jump.c (jmp_uses_reg_or_mem): Deleted unused function.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-6, 1997 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include <stdio.h>
38 #include "config.h"
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
55
56 int *uid_luid;
57
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
60
61 int *uid_loop_num;
62
63 /* 1 + largest uid of any insn. */
64
65 int max_uid_for_loop;
66
67 /* 1 + luid of last insn. */
68
69 static int max_luid;
70
71 /* Number of loops detected in current function. Used as index to the
72 next few tables. */
73
74 static int max_loop_num;
75
76 /* Indexed by loop number, contains the first and last insn of each loop. */
77
78 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
79
80 /* For each loop, gives the containing loop number, -1 if none. */
81
82 int *loop_outer_loop;
83
84 #ifdef HAIFA
85 /* The main output of analyze_loop_iterations is placed here */
86
87 int *loop_can_insert_bct;
88
89 /* For each loop, determines whether some of its inner loops has used
90 count register */
91
92 int *loop_used_count_register;
93
94 /* For each loop, remember its unrolling factor (if at all).
95 contents of the array:
96 0/1: not unrolled.
97 -1: completely unrolled - no further instrumentation is needed.
98 >1: holds the exact amount of unrolling. */
99
100 int *loop_unroll_factor;
101 int *loop_unroll_iter;
102
103 /* loop parameters for arithmetic loops. These loops have a loop variable
104 which is initialized to loop_start_value, incremented in each iteration
105 by "loop_increment". At the end of the iteration the loop variable is
106 compared to the loop_comparison_value (using loop_comparison_code). */
107
108 rtx *loop_increment;
109 rtx *loop_comparison_value;
110 rtx *loop_start_value;
111 enum rtx_code *loop_comparison_code;
112 #endif /* HAIFA */
113
114
115 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
116 really a loop (an insn outside the loop branches into it). */
117
118 static char *loop_invalid;
119
120 /* Indexed by loop number, links together all LABEL_REFs which refer to
121 code labels outside the loop. Used by routines that need to know all
122 loop exits, such as final_biv_value and final_giv_value.
123
124 This does not include loop exits due to return instructions. This is
125 because all bivs and givs are pseudos, and hence must be dead after a
126 return, so the presense of a return does not affect any of the
127 optimizations that use this info. It is simpler to just not include return
128 instructions on this list. */
129
130 rtx *loop_number_exit_labels;
131
132 /* Indexed by loop number, counts the number of LABEL_REFs on
133 loop_number_exit_labels for this loop and all loops nested inside it. */
134
135 int *loop_number_exit_count;
136
137 /* Holds the number of loop iterations. It is zero if the number could not be
138 calculated. Must be unsigned since the number of iterations can
139 be as high as 2^wordsize-1. For loops with a wider iterator, this number
140 will will be zero if the number of loop iterations is too large for an
141 unsigned integer to hold. */
142
143 unsigned HOST_WIDE_INT loop_n_iterations;
144
145 /* Nonzero if there is a subroutine call in the current loop. */
146
147 static int loop_has_call;
148
149 /* Nonzero if there is a volatile memory reference in the current
150 loop. */
151
152 static int loop_has_volatile;
153
154 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
155 current loop. A continue statement will generate a branch to
156 NEXT_INSN (loop_continue). */
157
158 static rtx loop_continue;
159
160 /* Indexed by register number, contains the number of times the reg
161 is set during the loop being scanned.
162 During code motion, a negative value indicates a reg that has been
163 made a candidate; in particular -2 means that it is an candidate that
164 we know is equal to a constant and -1 means that it is an candidate
165 not known equal to a constant.
166 After code motion, regs moved have 0 (which is accurate now)
167 while the failed candidates have the original number of times set.
168
169 Therefore, at all times, == 0 indicates an invariant register;
170 < 0 a conditionally invariant one. */
171
172 static int *n_times_set;
173
174 /* Original value of n_times_set; same except that this value
175 is not set negative for a reg whose sets have been made candidates
176 and not set to 0 for a reg that is moved. */
177
178 static int *n_times_used;
179
180 /* Index by register number, 1 indicates that the register
181 cannot be moved or strength reduced. */
182
183 static char *may_not_optimize;
184
185 /* Nonzero means reg N has already been moved out of one loop.
186 This reduces the desire to move it out of another. */
187
188 static char *moved_once;
189
190 /* Array of MEMs that are stored in this loop. If there are too many to fit
191 here, we just turn on unknown_address_altered. */
192
193 #define NUM_STORES 30
194 static rtx loop_store_mems[NUM_STORES];
195
196 /* Index of first available slot in above array. */
197 static int loop_store_mems_idx;
198
199 /* Nonzero if we don't know what MEMs were changed in the current loop.
200 This happens if the loop contains a call (in which case `loop_has_call'
201 will also be set) or if we store into more than NUM_STORES MEMs. */
202
203 static int unknown_address_altered;
204
205 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
206 static int num_movables;
207
208 /* Count of memory write instructions discovered in the loop. */
209 static int num_mem_sets;
210
211 /* Number of loops contained within the current one, including itself. */
212 static int loops_enclosed;
213
214 /* Bound on pseudo register number before loop optimization.
215 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
216 int max_reg_before_loop;
217
218 /* This obstack is used in product_cheap_p to allocate its rtl. It
219 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
220 If we used the same obstack that it did, we would be deallocating
221 that array. */
222
223 static struct obstack temp_obstack;
224
225 /* This is where the pointer to the obstack being used for RTL is stored. */
226
227 extern struct obstack *rtl_obstack;
228
229 #define obstack_chunk_alloc xmalloc
230 #define obstack_chunk_free free
231
232 extern char *oballoc ();
233 \f
234 /* During the analysis of a loop, a chain of `struct movable's
235 is made to record all the movable insns found.
236 Then the entire chain can be scanned to decide which to move. */
237
238 struct movable
239 {
240 rtx insn; /* A movable insn */
241 rtx set_src; /* The expression this reg is set from. */
242 rtx set_dest; /* The destination of this SET. */
243 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
244 of any registers used within the LIBCALL. */
245 int consec; /* Number of consecutive following insns
246 that must be moved with this one. */
247 int regno; /* The register it sets */
248 short lifetime; /* lifetime of that register;
249 may be adjusted when matching movables
250 that load the same value are found. */
251 short savings; /* Number of insns we can move for this reg,
252 including other movables that force this
253 or match this one. */
254 unsigned int cond : 1; /* 1 if only conditionally movable */
255 unsigned int force : 1; /* 1 means MUST move this insn */
256 unsigned int global : 1; /* 1 means reg is live outside this loop */
257 /* If PARTIAL is 1, GLOBAL means something different:
258 that the reg is live outside the range from where it is set
259 to the following label. */
260 unsigned int done : 1; /* 1 inhibits further processing of this */
261
262 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
263 In particular, moving it does not make it
264 invariant. */
265 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
266 load SRC, rather than copying INSN. */
267 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
268 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
269 that we should avoid changing when clearing
270 the rest of the reg. */
271 struct movable *match; /* First entry for same value */
272 struct movable *forces; /* An insn that must be moved if this is */
273 struct movable *next;
274 };
275
276 FILE *loop_dump_stream;
277
278 /* Forward declarations. */
279
280 static void find_and_verify_loops ();
281 static void mark_loop_jump ();
282 static void prescan_loop ();
283 static int reg_in_basic_block_p ();
284 static int consec_sets_invariant_p ();
285 static rtx libcall_other_reg ();
286 static int labels_in_range_p ();
287 static void count_loop_regs_set ();
288 static void note_addr_stored ();
289 static int loop_reg_used_before_p ();
290 static void scan_loop ();
291 static void replace_call_address ();
292 static rtx skip_consec_insns ();
293 static int libcall_benefit ();
294 static void ignore_some_movables ();
295 static void force_movables ();
296 static void combine_movables ();
297 static int rtx_equal_for_loop_p ();
298 static void move_movables ();
299 static void strength_reduce ();
300 static int valid_initial_value_p ();
301 static void find_mem_givs ();
302 static void record_biv ();
303 static void check_final_value ();
304 static void record_giv ();
305 static void update_giv_derive ();
306 static int basic_induction_var ();
307 static rtx simplify_giv_expr ();
308 static int general_induction_var ();
309 static int consec_sets_giv ();
310 static int check_dbra_loop ();
311 static rtx express_from ();
312 static int combine_givs_p ();
313 static void combine_givs ();
314 static int product_cheap_p ();
315 static int maybe_eliminate_biv ();
316 static int maybe_eliminate_biv_1 ();
317 static int last_use_this_basic_block ();
318 static void record_initial ();
319 static void update_reg_last_use ();
320
321 #ifdef HAIFA
322 /* This is extern from unroll.c */
323 void iteration_info ();
324
325 /* Two main functions for implementing bct:
326 first - to be called before loop unrolling, and the second - after */
327 static void analyze_loop_iterations ();
328 static void insert_bct ();
329
330 /* Auxiliary function that inserts the bct pattern into the loop */
331 static void instrument_loop_bct ();
332
333
334 int loop_number ();
335 #endif /* HAIFA */
336
337 /* Indirect_jump_in_function is computed once per function. */
338 int indirect_jump_in_function = 0;
339 static int indirect_jump_in_function_p ();
340
341 \f
342 /* Relative gain of eliminating various kinds of operations. */
343 int add_cost;
344 #if 0
345 int shift_cost;
346 int mult_cost;
347 #endif
348
349 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
350 copy the value of the strength reduced giv to its original register. */
351 int copy_cost;
352
353 void
354 init_loop ()
355 {
356 char *free_point = (char *) oballoc (1);
357 rtx reg = gen_rtx (REG, word_mode, LAST_VIRTUAL_REGISTER + 1);
358
359 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
360
361 /* We multiply by 2 to reconcile the difference in scale between
362 these two ways of computing costs. Otherwise the cost of a copy
363 will be far less than the cost of an add. */
364
365 copy_cost = 2 * 2;
366
367 /* Free the objects we just allocated. */
368 obfree (free_point);
369
370 /* Initialize the obstack used for rtl in product_cheap_p. */
371 gcc_obstack_init (&temp_obstack);
372 }
373 \f
374 /* Entry point of this file. Perform loop optimization
375 on the current function. F is the first insn of the function
376 and DUMPFILE is a stream for output of a trace of actions taken
377 (or 0 if none should be output). */
378
379 void
380 loop_optimize (f, dumpfile)
381 /* f is the first instruction of a chain of insns for one function */
382 rtx f;
383 FILE *dumpfile;
384 {
385 register rtx insn;
386 register int i;
387 rtx last_insn;
388
389 loop_dump_stream = dumpfile;
390
391 init_recog_no_volatile ();
392 init_alias_analysis ();
393
394 max_reg_before_loop = max_reg_num ();
395
396 moved_once = (char *) alloca (max_reg_before_loop);
397 bzero (moved_once, max_reg_before_loop);
398
399 regs_may_share = 0;
400
401 /* Count the number of loops. */
402
403 max_loop_num = 0;
404 for (insn = f; insn; insn = NEXT_INSN (insn))
405 {
406 if (GET_CODE (insn) == NOTE
407 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
408 max_loop_num++;
409 }
410
411 /* Don't waste time if no loops. */
412 if (max_loop_num == 0)
413 return;
414
415 /* Get size to use for tables indexed by uids.
416 Leave some space for labels allocated by find_and_verify_loops. */
417 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
418
419 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
420 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
421
422 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
423 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
424
425 /* Allocate tables for recording each loop. We set each entry, so they need
426 not be zeroed. */
427 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
428 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
429 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
430 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
431 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
432 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
433
434 #ifdef HAIFA
435 /* Allocate for BCT optimization */
436 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
437 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
438
439 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
440 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
441
442 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
443 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
444
445 loop_unroll_iter = (int *) alloca (max_loop_num *sizeof (int));
446 bzero ((char *) loop_unroll_iter, max_loop_num * sizeof (int));
447
448 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
449 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
450 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
451 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
452 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
453 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
454
455 loop_comparison_code
456 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
457 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
458 #endif /* HAIFA */
459
460 /* Find and process each loop.
461 First, find them, and record them in order of their beginnings. */
462 find_and_verify_loops (f);
463
464 /* Now find all register lifetimes. This must be done after
465 find_and_verify_loops, because it might reorder the insns in the
466 function. */
467 reg_scan (f, max_reg_num (), 1);
468
469 /* See if we went too far. */
470 if (get_max_uid () > max_uid_for_loop)
471 abort ();
472
473 /* Compute the mapping from uids to luids.
474 LUIDs are numbers assigned to insns, like uids,
475 except that luids increase monotonically through the code.
476 Don't assign luids to line-number NOTEs, so that the distance in luids
477 between two insns is not affected by -g. */
478
479 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
480 {
481 last_insn = insn;
482 if (GET_CODE (insn) != NOTE
483 || NOTE_LINE_NUMBER (insn) <= 0)
484 uid_luid[INSN_UID (insn)] = ++i;
485 else
486 /* Give a line number note the same luid as preceding insn. */
487 uid_luid[INSN_UID (insn)] = i;
488 }
489
490 max_luid = i + 1;
491
492 /* Don't leave gaps in uid_luid for insns that have been
493 deleted. It is possible that the first or last insn
494 using some register has been deleted by cross-jumping.
495 Make sure that uid_luid for that former insn's uid
496 points to the general area where that insn used to be. */
497 for (i = 0; i < max_uid_for_loop; i++)
498 {
499 uid_luid[0] = uid_luid[i];
500 if (uid_luid[0] != 0)
501 break;
502 }
503 for (i = 0; i < max_uid_for_loop; i++)
504 if (uid_luid[i] == 0)
505 uid_luid[i] = uid_luid[i - 1];
506
507 /* Create a mapping from loops to BLOCK tree nodes. */
508 if (flag_unroll_loops && write_symbols != NO_DEBUG)
509 find_loop_tree_blocks ();
510
511 /* Determine if the function has indirect jump. On some systems
512 this prevents low overhead loop instructions from being used. */
513 indirect_jump_in_function = indirect_jump_in_function_p (f);
514
515 /* Now scan the loops, last ones first, since this means inner ones are done
516 before outer ones. */
517 for (i = max_loop_num-1; i >= 0; i--)
518 if (! loop_invalid[i] && loop_number_loop_ends[i])
519 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
520 max_reg_num ());
521
522 /* If debugging and unrolling loops, we must replicate the tree nodes
523 corresponding to the blocks inside the loop, so that the original one
524 to one mapping will remain. */
525 if (flag_unroll_loops && write_symbols != NO_DEBUG)
526 unroll_block_trees ();
527 }
528 \f
529 /* Optimize one loop whose start is LOOP_START and end is END.
530 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
531 NOTE_INSN_LOOP_END. */
532
533 /* ??? Could also move memory writes out of loops if the destination address
534 is invariant, the source is invariant, the memory write is not volatile,
535 and if we can prove that no read inside the loop can read this address
536 before the write occurs. If there is a read of this address after the
537 write, then we can also mark the memory read as invariant. */
538
539 static void
540 scan_loop (loop_start, end, nregs)
541 rtx loop_start, end;
542 int nregs;
543 {
544 register int i;
545 register rtx p;
546 /* 1 if we are scanning insns that could be executed zero times. */
547 int maybe_never = 0;
548 /* 1 if we are scanning insns that might never be executed
549 due to a subroutine call which might exit before they are reached. */
550 int call_passed = 0;
551 /* For a rotated loop that is entered near the bottom,
552 this is the label at the top. Otherwise it is zero. */
553 rtx loop_top = 0;
554 /* Jump insn that enters the loop, or 0 if control drops in. */
555 rtx loop_entry_jump = 0;
556 /* Place in the loop where control enters. */
557 rtx scan_start;
558 /* Number of insns in the loop. */
559 int insn_count;
560 int in_libcall = 0;
561 int tem;
562 rtx temp;
563 /* The SET from an insn, if it is the only SET in the insn. */
564 rtx set, set1;
565 /* Chain describing insns movable in current loop. */
566 struct movable *movables = 0;
567 /* Last element in `movables' -- so we can add elements at the end. */
568 struct movable *last_movable = 0;
569 /* Ratio of extra register life span we can justify
570 for saving an instruction. More if loop doesn't call subroutines
571 since in that case saving an insn makes more difference
572 and more registers are available. */
573 int threshold;
574 /* If we have calls, contains the insn in which a register was used
575 if it was used exactly once; contains const0_rtx if it was used more
576 than once. */
577 rtx *reg_single_usage = 0;
578 /* Nonzero if we are scanning instructions in a sub-loop. */
579 int loop_depth = 0;
580
581 n_times_set = (int *) alloca (nregs * sizeof (int));
582 n_times_used = (int *) alloca (nregs * sizeof (int));
583 may_not_optimize = (char *) alloca (nregs);
584
585 /* Determine whether this loop starts with a jump down to a test at
586 the end. This will occur for a small number of loops with a test
587 that is too complex to duplicate in front of the loop.
588
589 We search for the first insn or label in the loop, skipping NOTEs.
590 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
591 (because we might have a loop executed only once that contains a
592 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
593 (in case we have a degenerate loop).
594
595 Note that if we mistakenly think that a loop is entered at the top
596 when, in fact, it is entered at the exit test, the only effect will be
597 slightly poorer optimization. Making the opposite error can generate
598 incorrect code. Since very few loops now start with a jump to the
599 exit test, the code here to detect that case is very conservative. */
600
601 for (p = NEXT_INSN (loop_start);
602 p != end
603 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
604 && (GET_CODE (p) != NOTE
605 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
606 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
607 p = NEXT_INSN (p))
608 ;
609
610 scan_start = p;
611
612 /* Set up variables describing this loop. */
613 prescan_loop (loop_start, end);
614 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
615
616 /* If loop has a jump before the first label,
617 the true entry is the target of that jump.
618 Start scan from there.
619 But record in LOOP_TOP the place where the end-test jumps
620 back to so we can scan that after the end of the loop. */
621 if (GET_CODE (p) == JUMP_INSN)
622 {
623 loop_entry_jump = p;
624
625 /* Loop entry must be unconditional jump (and not a RETURN) */
626 if (simplejump_p (p)
627 && JUMP_LABEL (p) != 0
628 /* Check to see whether the jump actually
629 jumps out of the loop (meaning it's no loop).
630 This case can happen for things like
631 do {..} while (0). If this label was generated previously
632 by loop, we can't tell anything about it and have to reject
633 the loop. */
634 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
635 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
636 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
637 {
638 loop_top = next_label (scan_start);
639 scan_start = JUMP_LABEL (p);
640 }
641 }
642
643 /* If SCAN_START was an insn created by loop, we don't know its luid
644 as required by loop_reg_used_before_p. So skip such loops. (This
645 test may never be true, but it's best to play it safe.)
646
647 Also, skip loops where we do not start scanning at a label. This
648 test also rejects loops starting with a JUMP_INSN that failed the
649 test above. */
650
651 if (INSN_UID (scan_start) >= max_uid_for_loop
652 || GET_CODE (scan_start) != CODE_LABEL)
653 {
654 if (loop_dump_stream)
655 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
656 INSN_UID (loop_start), INSN_UID (end));
657 return;
658 }
659
660 /* Count number of times each reg is set during this loop.
661 Set may_not_optimize[I] if it is not safe to move out
662 the setting of register I. If this loop has calls, set
663 reg_single_usage[I]. */
664
665 bzero ((char *) n_times_set, nregs * sizeof (int));
666 bzero (may_not_optimize, nregs);
667
668 if (loop_has_call)
669 {
670 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
671 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
672 }
673
674 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
675 may_not_optimize, reg_single_usage, &insn_count, nregs);
676
677 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
678 may_not_optimize[i] = 1, n_times_set[i] = 1;
679 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
680
681 if (loop_dump_stream)
682 {
683 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
684 INSN_UID (loop_start), INSN_UID (end), insn_count);
685 if (loop_continue)
686 fprintf (loop_dump_stream, "Continue at insn %d.\n",
687 INSN_UID (loop_continue));
688 }
689
690 /* Scan through the loop finding insns that are safe to move.
691 Set n_times_set negative for the reg being set, so that
692 this reg will be considered invariant for subsequent insns.
693 We consider whether subsequent insns use the reg
694 in deciding whether it is worth actually moving.
695
696 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
697 and therefore it is possible that the insns we are scanning
698 would never be executed. At such times, we must make sure
699 that it is safe to execute the insn once instead of zero times.
700 When MAYBE_NEVER is 0, all insns will be executed at least once
701 so that is not a problem. */
702
703 p = scan_start;
704 while (1)
705 {
706 p = NEXT_INSN (p);
707 /* At end of a straight-in loop, we are done.
708 At end of a loop entered at the bottom, scan the top. */
709 if (p == scan_start)
710 break;
711 if (p == end)
712 {
713 if (loop_top != 0)
714 p = loop_top;
715 else
716 break;
717 if (p == scan_start)
718 break;
719 }
720
721 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
722 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
723 in_libcall = 1;
724 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
725 && find_reg_note (p, REG_RETVAL, NULL_RTX))
726 in_libcall = 0;
727
728 if (GET_CODE (p) == INSN
729 && (set = single_set (p))
730 && GET_CODE (SET_DEST (set)) == REG
731 && ! may_not_optimize[REGNO (SET_DEST (set))])
732 {
733 int tem1 = 0;
734 int tem2 = 0;
735 int move_insn = 0;
736 rtx src = SET_SRC (set);
737 rtx dependencies = 0;
738
739 /* Figure out what to use as a source of this insn. If a REG_EQUIV
740 note is given or if a REG_EQUAL note with a constant operand is
741 specified, use it as the source and mark that we should move
742 this insn by calling emit_move_insn rather that duplicating the
743 insn.
744
745 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
746 is present. */
747 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
748 if (temp)
749 src = XEXP (temp, 0), move_insn = 1;
750 else
751 {
752 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
753 if (temp && CONSTANT_P (XEXP (temp, 0)))
754 src = XEXP (temp, 0), move_insn = 1;
755 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
756 {
757 src = XEXP (temp, 0);
758 /* A libcall block can use regs that don't appear in
759 the equivalent expression. To move the libcall,
760 we must move those regs too. */
761 dependencies = libcall_other_reg (p, src);
762 }
763 }
764
765 /* Don't try to optimize a register that was made
766 by loop-optimization for an inner loop.
767 We don't know its life-span, so we can't compute the benefit. */
768 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
769 ;
770 /* In order to move a register, we need to have one of three cases:
771 (1) it is used only in the same basic block as the set
772 (2) it is not a user variable and it is not used in the
773 exit test (this can cause the variable to be used
774 before it is set just like a user-variable).
775 (3) the set is guaranteed to be executed once the loop starts,
776 and the reg is not used until after that. */
777 else if (! ((! maybe_never
778 && ! loop_reg_used_before_p (set, p, loop_start,
779 scan_start, end))
780 || (! REG_USERVAR_P (SET_DEST (set))
781 && ! REG_LOOP_TEST_P (SET_DEST (set)))
782 || reg_in_basic_block_p (p, SET_DEST (set))))
783 ;
784 else if ((tem = invariant_p (src))
785 && (dependencies == 0
786 || (tem2 = invariant_p (dependencies)) != 0)
787 && (n_times_set[REGNO (SET_DEST (set))] == 1
788 || (tem1
789 = consec_sets_invariant_p (SET_DEST (set),
790 n_times_set[REGNO (SET_DEST (set))],
791 p)))
792 /* If the insn can cause a trap (such as divide by zero),
793 can't move it unless it's guaranteed to be executed
794 once loop is entered. Even a function call might
795 prevent the trap insn from being reached
796 (since it might exit!) */
797 && ! ((maybe_never || call_passed)
798 && may_trap_p (src)))
799 {
800 register struct movable *m;
801 register int regno = REGNO (SET_DEST (set));
802
803 /* A potential lossage is where we have a case where two insns
804 can be combined as long as they are both in the loop, but
805 we move one of them outside the loop. For large loops,
806 this can lose. The most common case of this is the address
807 of a function being called.
808
809 Therefore, if this register is marked as being used exactly
810 once if we are in a loop with calls (a "large loop"), see if
811 we can replace the usage of this register with the source
812 of this SET. If we can, delete this insn.
813
814 Don't do this if P has a REG_RETVAL note or if we have
815 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
816
817 if (reg_single_usage && reg_single_usage[regno] != 0
818 && reg_single_usage[regno] != const0_rtx
819 && REGNO_FIRST_UID (regno) == INSN_UID (p)
820 && (REGNO_LAST_UID (regno)
821 == INSN_UID (reg_single_usage[regno]))
822 && n_times_set[REGNO (SET_DEST (set))] == 1
823 && ! side_effects_p (SET_SRC (set))
824 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
825 #ifdef SMALL_REGISTER_CLASSES
826 && ! (SMALL_REGISTER_CLASSES
827 && GET_CODE (SET_SRC (set)) == REG
828 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)
829 #endif
830 /* This test is not redundant; SET_SRC (set) might be
831 a call-clobbered register and the life of REGNO
832 might span a call. */
833 && ! modified_between_p (SET_SRC (set), p,
834 reg_single_usage[regno])
835 && no_labels_between_p (p, reg_single_usage[regno])
836 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
837 reg_single_usage[regno]))
838 {
839 /* Replace any usage in a REG_EQUAL note. Must copy the
840 new source, so that we don't get rtx sharing between the
841 SET_SOURCE and REG_NOTES of insn p. */
842 REG_NOTES (reg_single_usage[regno])
843 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
844 SET_DEST (set), copy_rtx (SET_SRC (set)));
845
846 PUT_CODE (p, NOTE);
847 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
848 NOTE_SOURCE_FILE (p) = 0;
849 n_times_set[regno] = 0;
850 continue;
851 }
852
853 m = (struct movable *) alloca (sizeof (struct movable));
854 m->next = 0;
855 m->insn = p;
856 m->set_src = src;
857 m->dependencies = dependencies;
858 m->set_dest = SET_DEST (set);
859 m->force = 0;
860 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
861 m->done = 0;
862 m->forces = 0;
863 m->partial = 0;
864 m->move_insn = move_insn;
865 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
866 m->savemode = VOIDmode;
867 m->regno = regno;
868 /* Set M->cond if either invariant_p or consec_sets_invariant_p
869 returned 2 (only conditionally invariant). */
870 m->cond = ((tem | tem1 | tem2) > 1);
871 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
872 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
873 m->match = 0;
874 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
875 - uid_luid[REGNO_FIRST_UID (regno)]);
876 m->savings = n_times_used[regno];
877 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
878 m->savings += libcall_benefit (p);
879 n_times_set[regno] = move_insn ? -2 : -1;
880 /* Add M to the end of the chain MOVABLES. */
881 if (movables == 0)
882 movables = m;
883 else
884 last_movable->next = m;
885 last_movable = m;
886
887 if (m->consec > 0)
888 {
889 /* Skip this insn, not checking REG_LIBCALL notes. */
890 p = next_nonnote_insn (p);
891 /* Skip the consecutive insns, if there are any. */
892 p = skip_consec_insns (p, m->consec);
893 /* Back up to the last insn of the consecutive group. */
894 p = prev_nonnote_insn (p);
895
896 /* We must now reset m->move_insn, m->is_equiv, and possibly
897 m->set_src to correspond to the effects of all the
898 insns. */
899 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
900 if (temp)
901 m->set_src = XEXP (temp, 0), m->move_insn = 1;
902 else
903 {
904 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
905 if (temp && CONSTANT_P (XEXP (temp, 0)))
906 m->set_src = XEXP (temp, 0), m->move_insn = 1;
907 else
908 m->move_insn = 0;
909
910 }
911 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
912 }
913 }
914 /* If this register is always set within a STRICT_LOW_PART
915 or set to zero, then its high bytes are constant.
916 So clear them outside the loop and within the loop
917 just load the low bytes.
918 We must check that the machine has an instruction to do so.
919 Also, if the value loaded into the register
920 depends on the same register, this cannot be done. */
921 else if (SET_SRC (set) == const0_rtx
922 && GET_CODE (NEXT_INSN (p)) == INSN
923 && (set1 = single_set (NEXT_INSN (p)))
924 && GET_CODE (set1) == SET
925 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
926 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
927 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
928 == SET_DEST (set))
929 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
930 {
931 register int regno = REGNO (SET_DEST (set));
932 if (n_times_set[regno] == 2)
933 {
934 register struct movable *m;
935 m = (struct movable *) alloca (sizeof (struct movable));
936 m->next = 0;
937 m->insn = p;
938 m->set_dest = SET_DEST (set);
939 m->dependencies = 0;
940 m->force = 0;
941 m->consec = 0;
942 m->done = 0;
943 m->forces = 0;
944 m->move_insn = 0;
945 m->partial = 1;
946 /* If the insn may not be executed on some cycles,
947 we can't clear the whole reg; clear just high part.
948 Not even if the reg is used only within this loop.
949 Consider this:
950 while (1)
951 while (s != t) {
952 if (foo ()) x = *s;
953 use (x);
954 }
955 Clearing x before the inner loop could clobber a value
956 being saved from the last time around the outer loop.
957 However, if the reg is not used outside this loop
958 and all uses of the register are in the same
959 basic block as the store, there is no problem.
960
961 If this insn was made by loop, we don't know its
962 INSN_LUID and hence must make a conservative
963 assumption. */
964 m->global = (INSN_UID (p) >= max_uid_for_loop
965 || (uid_luid[REGNO_LAST_UID (regno)]
966 > INSN_LUID (end))
967 || (uid_luid[REGNO_FIRST_UID (regno)]
968 < INSN_LUID (p))
969 || (labels_in_range_p
970 (p, uid_luid[REGNO_FIRST_UID (regno)])));
971 if (maybe_never && m->global)
972 m->savemode = GET_MODE (SET_SRC (set1));
973 else
974 m->savemode = VOIDmode;
975 m->regno = regno;
976 m->cond = 0;
977 m->match = 0;
978 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
979 - uid_luid[REGNO_FIRST_UID (regno)]);
980 m->savings = 1;
981 n_times_set[regno] = -1;
982 /* Add M to the end of the chain MOVABLES. */
983 if (movables == 0)
984 movables = m;
985 else
986 last_movable->next = m;
987 last_movable = m;
988 }
989 }
990 }
991 /* Past a call insn, we get to insns which might not be executed
992 because the call might exit. This matters for insns that trap.
993 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
994 so they don't count. */
995 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
996 call_passed = 1;
997 /* Past a label or a jump, we get to insns for which we
998 can't count on whether or how many times they will be
999 executed during each iteration. Therefore, we can
1000 only move out sets of trivial variables
1001 (those not used after the loop). */
1002 /* Similar code appears twice in strength_reduce. */
1003 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1004 /* If we enter the loop in the middle, and scan around to the
1005 beginning, don't set maybe_never for that. This must be an
1006 unconditional jump, otherwise the code at the top of the
1007 loop might never be executed. Unconditional jumps are
1008 followed a by barrier then loop end. */
1009 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1010 && NEXT_INSN (NEXT_INSN (p)) == end
1011 && simplejump_p (p)))
1012 maybe_never = 1;
1013 else if (GET_CODE (p) == NOTE)
1014 {
1015 /* At the virtual top of a converted loop, insns are again known to
1016 be executed: logically, the loop begins here even though the exit
1017 code has been duplicated. */
1018 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1019 maybe_never = call_passed = 0;
1020 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1021 loop_depth++;
1022 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1023 loop_depth--;
1024 }
1025 }
1026
1027 /* If one movable subsumes another, ignore that other. */
1028
1029 ignore_some_movables (movables);
1030
1031 /* For each movable insn, see if the reg that it loads
1032 leads when it dies right into another conditionally movable insn.
1033 If so, record that the second insn "forces" the first one,
1034 since the second can be moved only if the first is. */
1035
1036 force_movables (movables);
1037
1038 /* See if there are multiple movable insns that load the same value.
1039 If there are, make all but the first point at the first one
1040 through the `match' field, and add the priorities of them
1041 all together as the priority of the first. */
1042
1043 combine_movables (movables, nregs);
1044
1045 /* Now consider each movable insn to decide whether it is worth moving.
1046 Store 0 in n_times_set for each reg that is moved. */
1047
1048 move_movables (movables, threshold,
1049 insn_count, loop_start, end, nregs);
1050
1051 /* Now candidates that still are negative are those not moved.
1052 Change n_times_set to indicate that those are not actually invariant. */
1053 for (i = 0; i < nregs; i++)
1054 if (n_times_set[i] < 0)
1055 n_times_set[i] = n_times_used[i];
1056
1057 if (flag_strength_reduce)
1058 strength_reduce (scan_start, end, loop_top,
1059 insn_count, loop_start, end);
1060 }
1061 \f
1062 /* Add elements to *OUTPUT to record all the pseudo-regs
1063 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1064
1065 void
1066 record_excess_regs (in_this, not_in_this, output)
1067 rtx in_this, not_in_this;
1068 rtx *output;
1069 {
1070 enum rtx_code code;
1071 char *fmt;
1072 int i;
1073
1074 code = GET_CODE (in_this);
1075
1076 switch (code)
1077 {
1078 case PC:
1079 case CC0:
1080 case CONST_INT:
1081 case CONST_DOUBLE:
1082 case CONST:
1083 case SYMBOL_REF:
1084 case LABEL_REF:
1085 return;
1086
1087 case REG:
1088 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1089 && ! reg_mentioned_p (in_this, not_in_this))
1090 *output = gen_rtx (EXPR_LIST, VOIDmode, in_this, *output);
1091 return;
1092 }
1093
1094 fmt = GET_RTX_FORMAT (code);
1095 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1096 {
1097 int j;
1098
1099 switch (fmt[i])
1100 {
1101 case 'E':
1102 for (j = 0; j < XVECLEN (in_this, i); j++)
1103 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1104 break;
1105
1106 case 'e':
1107 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1108 break;
1109 }
1110 }
1111 }
1112 \f
1113 /* Check what regs are referred to in the libcall block ending with INSN,
1114 aside from those mentioned in the equivalent value.
1115 If there are none, return 0.
1116 If there are one or more, return an EXPR_LIST containing all of them. */
1117
1118 static rtx
1119 libcall_other_reg (insn, equiv)
1120 rtx insn, equiv;
1121 {
1122 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1123 rtx p = XEXP (note, 0);
1124 rtx output = 0;
1125
1126 /* First, find all the regs used in the libcall block
1127 that are not mentioned as inputs to the result. */
1128
1129 while (p != insn)
1130 {
1131 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1132 || GET_CODE (p) == CALL_INSN)
1133 record_excess_regs (PATTERN (p), equiv, &output);
1134 p = NEXT_INSN (p);
1135 }
1136
1137 return output;
1138 }
1139 \f
1140 /* Return 1 if all uses of REG
1141 are between INSN and the end of the basic block. */
1142
1143 static int
1144 reg_in_basic_block_p (insn, reg)
1145 rtx insn, reg;
1146 {
1147 int regno = REGNO (reg);
1148 rtx p;
1149
1150 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1151 return 0;
1152
1153 /* Search this basic block for the already recorded last use of the reg. */
1154 for (p = insn; p; p = NEXT_INSN (p))
1155 {
1156 switch (GET_CODE (p))
1157 {
1158 case NOTE:
1159 break;
1160
1161 case INSN:
1162 case CALL_INSN:
1163 /* Ordinary insn: if this is the last use, we win. */
1164 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1165 return 1;
1166 break;
1167
1168 case JUMP_INSN:
1169 /* Jump insn: if this is the last use, we win. */
1170 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1171 return 1;
1172 /* Otherwise, it's the end of the basic block, so we lose. */
1173 return 0;
1174
1175 case CODE_LABEL:
1176 case BARRIER:
1177 /* It's the end of the basic block, so we lose. */
1178 return 0;
1179 }
1180 }
1181
1182 /* The "last use" doesn't follow the "first use"?? */
1183 abort ();
1184 }
1185 \f
1186 /* Compute the benefit of eliminating the insns in the block whose
1187 last insn is LAST. This may be a group of insns used to compute a
1188 value directly or can contain a library call. */
1189
1190 static int
1191 libcall_benefit (last)
1192 rtx last;
1193 {
1194 rtx insn;
1195 int benefit = 0;
1196
1197 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1198 insn != last; insn = NEXT_INSN (insn))
1199 {
1200 if (GET_CODE (insn) == CALL_INSN)
1201 benefit += 10; /* Assume at least this many insns in a library
1202 routine. */
1203 else if (GET_CODE (insn) == INSN
1204 && GET_CODE (PATTERN (insn)) != USE
1205 && GET_CODE (PATTERN (insn)) != CLOBBER)
1206 benefit++;
1207 }
1208
1209 return benefit;
1210 }
1211 \f
1212 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1213
1214 static rtx
1215 skip_consec_insns (insn, count)
1216 rtx insn;
1217 int count;
1218 {
1219 for (; count > 0; count--)
1220 {
1221 rtx temp;
1222
1223 /* If first insn of libcall sequence, skip to end. */
1224 /* Do this at start of loop, since INSN is guaranteed to
1225 be an insn here. */
1226 if (GET_CODE (insn) != NOTE
1227 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1228 insn = XEXP (temp, 0);
1229
1230 do insn = NEXT_INSN (insn);
1231 while (GET_CODE (insn) == NOTE);
1232 }
1233
1234 return insn;
1235 }
1236
1237 /* Ignore any movable whose insn falls within a libcall
1238 which is part of another movable.
1239 We make use of the fact that the movable for the libcall value
1240 was made later and so appears later on the chain. */
1241
1242 static void
1243 ignore_some_movables (movables)
1244 struct movable *movables;
1245 {
1246 register struct movable *m, *m1;
1247
1248 for (m = movables; m; m = m->next)
1249 {
1250 /* Is this a movable for the value of a libcall? */
1251 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1252 if (note)
1253 {
1254 rtx insn;
1255 /* Check for earlier movables inside that range,
1256 and mark them invalid. We cannot use LUIDs here because
1257 insns created by loop.c for prior loops don't have LUIDs.
1258 Rather than reject all such insns from movables, we just
1259 explicitly check each insn in the libcall (since invariant
1260 libcalls aren't that common). */
1261 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1262 for (m1 = movables; m1 != m; m1 = m1->next)
1263 if (m1->insn == insn)
1264 m1->done = 1;
1265 }
1266 }
1267 }
1268
1269 /* For each movable insn, see if the reg that it loads
1270 leads when it dies right into another conditionally movable insn.
1271 If so, record that the second insn "forces" the first one,
1272 since the second can be moved only if the first is. */
1273
1274 static void
1275 force_movables (movables)
1276 struct movable *movables;
1277 {
1278 register struct movable *m, *m1;
1279 for (m1 = movables; m1; m1 = m1->next)
1280 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1281 if (!m1->partial && !m1->done)
1282 {
1283 int regno = m1->regno;
1284 for (m = m1->next; m; m = m->next)
1285 /* ??? Could this be a bug? What if CSE caused the
1286 register of M1 to be used after this insn?
1287 Since CSE does not update regno_last_uid,
1288 this insn M->insn might not be where it dies.
1289 But very likely this doesn't matter; what matters is
1290 that M's reg is computed from M1's reg. */
1291 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1292 && !m->done)
1293 break;
1294 if (m != 0 && m->set_src == m1->set_dest
1295 /* If m->consec, m->set_src isn't valid. */
1296 && m->consec == 0)
1297 m = 0;
1298
1299 /* Increase the priority of the moving the first insn
1300 since it permits the second to be moved as well. */
1301 if (m != 0)
1302 {
1303 m->forces = m1;
1304 m1->lifetime += m->lifetime;
1305 m1->savings += m1->savings;
1306 }
1307 }
1308 }
1309 \f
1310 /* Find invariant expressions that are equal and can be combined into
1311 one register. */
1312
1313 static void
1314 combine_movables (movables, nregs)
1315 struct movable *movables;
1316 int nregs;
1317 {
1318 register struct movable *m;
1319 char *matched_regs = (char *) alloca (nregs);
1320 enum machine_mode mode;
1321
1322 /* Regs that are set more than once are not allowed to match
1323 or be matched. I'm no longer sure why not. */
1324 /* Perhaps testing m->consec_sets would be more appropriate here? */
1325
1326 for (m = movables; m; m = m->next)
1327 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1328 {
1329 register struct movable *m1;
1330 int regno = m->regno;
1331
1332 bzero (matched_regs, nregs);
1333 matched_regs[regno] = 1;
1334
1335 /* We want later insns to match the first one. Don't make the first
1336 one match any later ones. So start this loop at m->next. */
1337 for (m1 = m->next; m1; m1 = m1->next)
1338 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1339 /* A reg used outside the loop mustn't be eliminated. */
1340 && !m1->global
1341 /* A reg used for zero-extending mustn't be eliminated. */
1342 && !m1->partial
1343 && (matched_regs[m1->regno]
1344 ||
1345 (
1346 /* Can combine regs with different modes loaded from the
1347 same constant only if the modes are the same or
1348 if both are integer modes with M wider or the same
1349 width as M1. The check for integer is redundant, but
1350 safe, since the only case of differing destination
1351 modes with equal sources is when both sources are
1352 VOIDmode, i.e., CONST_INT. */
1353 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1354 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1355 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1356 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1357 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1358 /* See if the source of M1 says it matches M. */
1359 && ((GET_CODE (m1->set_src) == REG
1360 && matched_regs[REGNO (m1->set_src)])
1361 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1362 movables))))
1363 && ((m->dependencies == m1->dependencies)
1364 || rtx_equal_p (m->dependencies, m1->dependencies)))
1365 {
1366 m->lifetime += m1->lifetime;
1367 m->savings += m1->savings;
1368 m1->done = 1;
1369 m1->match = m;
1370 matched_regs[m1->regno] = 1;
1371 }
1372 }
1373
1374 /* Now combine the regs used for zero-extension.
1375 This can be done for those not marked `global'
1376 provided their lives don't overlap. */
1377
1378 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1379 mode = GET_MODE_WIDER_MODE (mode))
1380 {
1381 register struct movable *m0 = 0;
1382
1383 /* Combine all the registers for extension from mode MODE.
1384 Don't combine any that are used outside this loop. */
1385 for (m = movables; m; m = m->next)
1386 if (m->partial && ! m->global
1387 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1388 {
1389 register struct movable *m1;
1390 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1391 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1392
1393 if (m0 == 0)
1394 {
1395 /* First one: don't check for overlap, just record it. */
1396 m0 = m;
1397 continue;
1398 }
1399
1400 /* Make sure they extend to the same mode.
1401 (Almost always true.) */
1402 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1403 continue;
1404
1405 /* We already have one: check for overlap with those
1406 already combined together. */
1407 for (m1 = movables; m1 != m; m1 = m1->next)
1408 if (m1 == m0 || (m1->partial && m1->match == m0))
1409 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1410 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1411 goto overlap;
1412
1413 /* No overlap: we can combine this with the others. */
1414 m0->lifetime += m->lifetime;
1415 m0->savings += m->savings;
1416 m->done = 1;
1417 m->match = m0;
1418
1419 overlap: ;
1420 }
1421 }
1422 }
1423 \f
1424 /* Return 1 if regs X and Y will become the same if moved. */
1425
1426 static int
1427 regs_match_p (x, y, movables)
1428 rtx x, y;
1429 struct movable *movables;
1430 {
1431 int xn = REGNO (x);
1432 int yn = REGNO (y);
1433 struct movable *mx, *my;
1434
1435 for (mx = movables; mx; mx = mx->next)
1436 if (mx->regno == xn)
1437 break;
1438
1439 for (my = movables; my; my = my->next)
1440 if (my->regno == yn)
1441 break;
1442
1443 return (mx && my
1444 && ((mx->match == my->match && mx->match != 0)
1445 || mx->match == my
1446 || mx == my->match));
1447 }
1448
1449 /* Return 1 if X and Y are identical-looking rtx's.
1450 This is the Lisp function EQUAL for rtx arguments.
1451
1452 If two registers are matching movables or a movable register and an
1453 equivalent constant, consider them equal. */
1454
1455 static int
1456 rtx_equal_for_loop_p (x, y, movables)
1457 rtx x, y;
1458 struct movable *movables;
1459 {
1460 register int i;
1461 register int j;
1462 register struct movable *m;
1463 register enum rtx_code code;
1464 register char *fmt;
1465
1466 if (x == y)
1467 return 1;
1468 if (x == 0 || y == 0)
1469 return 0;
1470
1471 code = GET_CODE (x);
1472
1473 /* If we have a register and a constant, they may sometimes be
1474 equal. */
1475 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1476 && CONSTANT_P (y))
1477 for (m = movables; m; m = m->next)
1478 if (m->move_insn && m->regno == REGNO (x)
1479 && rtx_equal_p (m->set_src, y))
1480 return 1;
1481
1482 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1483 && CONSTANT_P (x))
1484 for (m = movables; m; m = m->next)
1485 if (m->move_insn && m->regno == REGNO (y)
1486 && rtx_equal_p (m->set_src, x))
1487 return 1;
1488
1489 /* Otherwise, rtx's of different codes cannot be equal. */
1490 if (code != GET_CODE (y))
1491 return 0;
1492
1493 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1494 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1495
1496 if (GET_MODE (x) != GET_MODE (y))
1497 return 0;
1498
1499 /* These three types of rtx's can be compared nonrecursively. */
1500 if (code == REG)
1501 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1502
1503 if (code == LABEL_REF)
1504 return XEXP (x, 0) == XEXP (y, 0);
1505 if (code == SYMBOL_REF)
1506 return XSTR (x, 0) == XSTR (y, 0);
1507
1508 /* Compare the elements. If any pair of corresponding elements
1509 fail to match, return 0 for the whole things. */
1510
1511 fmt = GET_RTX_FORMAT (code);
1512 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1513 {
1514 switch (fmt[i])
1515 {
1516 case 'w':
1517 if (XWINT (x, i) != XWINT (y, i))
1518 return 0;
1519 break;
1520
1521 case 'i':
1522 if (XINT (x, i) != XINT (y, i))
1523 return 0;
1524 break;
1525
1526 case 'E':
1527 /* Two vectors must have the same length. */
1528 if (XVECLEN (x, i) != XVECLEN (y, i))
1529 return 0;
1530
1531 /* And the corresponding elements must match. */
1532 for (j = 0; j < XVECLEN (x, i); j++)
1533 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1534 return 0;
1535 break;
1536
1537 case 'e':
1538 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1539 return 0;
1540 break;
1541
1542 case 's':
1543 if (strcmp (XSTR (x, i), XSTR (y, i)))
1544 return 0;
1545 break;
1546
1547 case 'u':
1548 /* These are just backpointers, so they don't matter. */
1549 break;
1550
1551 case '0':
1552 break;
1553
1554 /* It is believed that rtx's at this level will never
1555 contain anything but integers and other rtx's,
1556 except for within LABEL_REFs and SYMBOL_REFs. */
1557 default:
1558 abort ();
1559 }
1560 }
1561 return 1;
1562 }
1563 \f
1564 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1565 insns in INSNS which use thet reference. */
1566
1567 static void
1568 add_label_notes (x, insns)
1569 rtx x;
1570 rtx insns;
1571 {
1572 enum rtx_code code = GET_CODE (x);
1573 int i, j;
1574 char *fmt;
1575 rtx insn;
1576
1577 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1578 {
1579 rtx next = next_real_insn (XEXP (x, 0));
1580
1581 /* Don't record labels that refer to dispatch tables.
1582 This is not necessary, since the tablejump references the same label.
1583 And if we did record them, flow.c would make worse code. */
1584 if (next == 0
1585 || ! (GET_CODE (next) == JUMP_INSN
1586 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1587 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1588 {
1589 for (insn = insns; insn; insn = NEXT_INSN (insn))
1590 if (reg_mentioned_p (XEXP (x, 0), insn))
1591 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL, XEXP (x, 0),
1592 REG_NOTES (insn));
1593 }
1594 return;
1595 }
1596
1597 fmt = GET_RTX_FORMAT (code);
1598 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1599 {
1600 if (fmt[i] == 'e')
1601 add_label_notes (XEXP (x, i), insns);
1602 else if (fmt[i] == 'E')
1603 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1604 add_label_notes (XVECEXP (x, i, j), insns);
1605 }
1606 }
1607 \f
1608 /* Scan MOVABLES, and move the insns that deserve to be moved.
1609 If two matching movables are combined, replace one reg with the
1610 other throughout. */
1611
1612 static void
1613 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1614 struct movable *movables;
1615 int threshold;
1616 int insn_count;
1617 rtx loop_start;
1618 rtx end;
1619 int nregs;
1620 {
1621 rtx new_start = 0;
1622 register struct movable *m;
1623 register rtx p;
1624 /* Map of pseudo-register replacements to handle combining
1625 when we move several insns that load the same value
1626 into different pseudo-registers. */
1627 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1628 char *already_moved = (char *) alloca (nregs);
1629
1630 bzero (already_moved, nregs);
1631 bzero ((char *) reg_map, nregs * sizeof (rtx));
1632
1633 num_movables = 0;
1634
1635 for (m = movables; m; m = m->next)
1636 {
1637 /* Describe this movable insn. */
1638
1639 if (loop_dump_stream)
1640 {
1641 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1642 INSN_UID (m->insn), m->regno, m->lifetime);
1643 if (m->consec > 0)
1644 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1645 if (m->cond)
1646 fprintf (loop_dump_stream, "cond ");
1647 if (m->force)
1648 fprintf (loop_dump_stream, "force ");
1649 if (m->global)
1650 fprintf (loop_dump_stream, "global ");
1651 if (m->done)
1652 fprintf (loop_dump_stream, "done ");
1653 if (m->move_insn)
1654 fprintf (loop_dump_stream, "move-insn ");
1655 if (m->match)
1656 fprintf (loop_dump_stream, "matches %d ",
1657 INSN_UID (m->match->insn));
1658 if (m->forces)
1659 fprintf (loop_dump_stream, "forces %d ",
1660 INSN_UID (m->forces->insn));
1661 }
1662
1663 /* Count movables. Value used in heuristics in strength_reduce. */
1664 num_movables++;
1665
1666 /* Ignore the insn if it's already done (it matched something else).
1667 Otherwise, see if it is now safe to move. */
1668
1669 if (!m->done
1670 && (! m->cond
1671 || (1 == invariant_p (m->set_src)
1672 && (m->dependencies == 0
1673 || 1 == invariant_p (m->dependencies))
1674 && (m->consec == 0
1675 || 1 == consec_sets_invariant_p (m->set_dest,
1676 m->consec + 1,
1677 m->insn))))
1678 && (! m->forces || m->forces->done))
1679 {
1680 register int regno;
1681 register rtx p;
1682 int savings = m->savings;
1683
1684 /* We have an insn that is safe to move.
1685 Compute its desirability. */
1686
1687 p = m->insn;
1688 regno = m->regno;
1689
1690 if (loop_dump_stream)
1691 fprintf (loop_dump_stream, "savings %d ", savings);
1692
1693 if (moved_once[regno])
1694 {
1695 insn_count *= 2;
1696
1697 if (loop_dump_stream)
1698 fprintf (loop_dump_stream, "halved since already moved ");
1699 }
1700
1701 /* An insn MUST be moved if we already moved something else
1702 which is safe only if this one is moved too: that is,
1703 if already_moved[REGNO] is nonzero. */
1704
1705 /* An insn is desirable to move if the new lifetime of the
1706 register is no more than THRESHOLD times the old lifetime.
1707 If it's not desirable, it means the loop is so big
1708 that moving won't speed things up much,
1709 and it is liable to make register usage worse. */
1710
1711 /* It is also desirable to move if it can be moved at no
1712 extra cost because something else was already moved. */
1713
1714 if (already_moved[regno]
1715 || flag_move_all_movables
1716 || (threshold * savings * m->lifetime) >= insn_count
1717 || (m->forces && m->forces->done
1718 && n_times_used[m->forces->regno] == 1))
1719 {
1720 int count;
1721 register struct movable *m1;
1722 rtx first;
1723
1724 /* Now move the insns that set the reg. */
1725
1726 if (m->partial && m->match)
1727 {
1728 rtx newpat, i1;
1729 rtx r1, r2;
1730 /* Find the end of this chain of matching regs.
1731 Thus, we load each reg in the chain from that one reg.
1732 And that reg is loaded with 0 directly,
1733 since it has ->match == 0. */
1734 for (m1 = m; m1->match; m1 = m1->match);
1735 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1736 SET_DEST (PATTERN (m1->insn)));
1737 i1 = emit_insn_before (newpat, loop_start);
1738
1739 /* Mark the moved, invariant reg as being allowed to
1740 share a hard reg with the other matching invariant. */
1741 REG_NOTES (i1) = REG_NOTES (m->insn);
1742 r1 = SET_DEST (PATTERN (m->insn));
1743 r2 = SET_DEST (PATTERN (m1->insn));
1744 regs_may_share = gen_rtx (EXPR_LIST, VOIDmode, r1,
1745 gen_rtx (EXPR_LIST, VOIDmode, r2,
1746 regs_may_share));
1747 delete_insn (m->insn);
1748
1749 if (new_start == 0)
1750 new_start = i1;
1751
1752 if (loop_dump_stream)
1753 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1754 }
1755 /* If we are to re-generate the item being moved with a
1756 new move insn, first delete what we have and then emit
1757 the move insn before the loop. */
1758 else if (m->move_insn)
1759 {
1760 rtx i1, temp;
1761
1762 for (count = m->consec; count >= 0; count--)
1763 {
1764 /* If this is the first insn of a library call sequence,
1765 skip to the end. */
1766 if (GET_CODE (p) != NOTE
1767 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1768 p = XEXP (temp, 0);
1769
1770 /* If this is the last insn of a libcall sequence, then
1771 delete every insn in the sequence except the last.
1772 The last insn is handled in the normal manner. */
1773 if (GET_CODE (p) != NOTE
1774 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1775 {
1776 temp = XEXP (temp, 0);
1777 while (temp != p)
1778 temp = delete_insn (temp);
1779 }
1780
1781 p = delete_insn (p);
1782 while (p && GET_CODE (p) == NOTE)
1783 p = NEXT_INSN (p);
1784 }
1785
1786 start_sequence ();
1787 emit_move_insn (m->set_dest, m->set_src);
1788 temp = get_insns ();
1789 end_sequence ();
1790
1791 add_label_notes (m->set_src, temp);
1792
1793 i1 = emit_insns_before (temp, loop_start);
1794 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1795 REG_NOTES (i1)
1796 = gen_rtx (EXPR_LIST,
1797 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1798 m->set_src, REG_NOTES (i1));
1799
1800 if (loop_dump_stream)
1801 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1802
1803 /* The more regs we move, the less we like moving them. */
1804 threshold -= 3;
1805 }
1806 else
1807 {
1808 for (count = m->consec; count >= 0; count--)
1809 {
1810 rtx i1, temp;
1811
1812 /* If first insn of libcall sequence, skip to end. */
1813 /* Do this at start of loop, since p is guaranteed to
1814 be an insn here. */
1815 if (GET_CODE (p) != NOTE
1816 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1817 p = XEXP (temp, 0);
1818
1819 /* If last insn of libcall sequence, move all
1820 insns except the last before the loop. The last
1821 insn is handled in the normal manner. */
1822 if (GET_CODE (p) != NOTE
1823 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1824 {
1825 rtx fn_address = 0;
1826 rtx fn_reg = 0;
1827 rtx fn_address_insn = 0;
1828
1829 first = 0;
1830 for (temp = XEXP (temp, 0); temp != p;
1831 temp = NEXT_INSN (temp))
1832 {
1833 rtx body;
1834 rtx n;
1835 rtx next;
1836
1837 if (GET_CODE (temp) == NOTE)
1838 continue;
1839
1840 body = PATTERN (temp);
1841
1842 /* Find the next insn after TEMP,
1843 not counting USE or NOTE insns. */
1844 for (next = NEXT_INSN (temp); next != p;
1845 next = NEXT_INSN (next))
1846 if (! (GET_CODE (next) == INSN
1847 && GET_CODE (PATTERN (next)) == USE)
1848 && GET_CODE (next) != NOTE)
1849 break;
1850
1851 /* If that is the call, this may be the insn
1852 that loads the function address.
1853
1854 Extract the function address from the insn
1855 that loads it into a register.
1856 If this insn was cse'd, we get incorrect code.
1857
1858 So emit a new move insn that copies the
1859 function address into the register that the
1860 call insn will use. flow.c will delete any
1861 redundant stores that we have created. */
1862 if (GET_CODE (next) == CALL_INSN
1863 && GET_CODE (body) == SET
1864 && GET_CODE (SET_DEST (body)) == REG
1865 && (n = find_reg_note (temp, REG_EQUAL,
1866 NULL_RTX)))
1867 {
1868 fn_reg = SET_SRC (body);
1869 if (GET_CODE (fn_reg) != REG)
1870 fn_reg = SET_DEST (body);
1871 fn_address = XEXP (n, 0);
1872 fn_address_insn = temp;
1873 }
1874 /* We have the call insn.
1875 If it uses the register we suspect it might,
1876 load it with the correct address directly. */
1877 if (GET_CODE (temp) == CALL_INSN
1878 && fn_address != 0
1879 && reg_referenced_p (fn_reg, body))
1880 emit_insn_after (gen_move_insn (fn_reg,
1881 fn_address),
1882 fn_address_insn);
1883
1884 if (GET_CODE (temp) == CALL_INSN)
1885 {
1886 i1 = emit_call_insn_before (body, loop_start);
1887 /* Because the USAGE information potentially
1888 contains objects other than hard registers
1889 we need to copy it. */
1890 if (CALL_INSN_FUNCTION_USAGE (temp))
1891 CALL_INSN_FUNCTION_USAGE (i1)
1892 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1893 }
1894 else
1895 i1 = emit_insn_before (body, loop_start);
1896 if (first == 0)
1897 first = i1;
1898 if (temp == fn_address_insn)
1899 fn_address_insn = i1;
1900 REG_NOTES (i1) = REG_NOTES (temp);
1901 delete_insn (temp);
1902 }
1903 }
1904 if (m->savemode != VOIDmode)
1905 {
1906 /* P sets REG to zero; but we should clear only
1907 the bits that are not covered by the mode
1908 m->savemode. */
1909 rtx reg = m->set_dest;
1910 rtx sequence;
1911 rtx tem;
1912
1913 start_sequence ();
1914 tem = expand_binop
1915 (GET_MODE (reg), and_optab, reg,
1916 GEN_INT ((((HOST_WIDE_INT) 1
1917 << GET_MODE_BITSIZE (m->savemode)))
1918 - 1),
1919 reg, 1, OPTAB_LIB_WIDEN);
1920 if (tem == 0)
1921 abort ();
1922 if (tem != reg)
1923 emit_move_insn (reg, tem);
1924 sequence = gen_sequence ();
1925 end_sequence ();
1926 i1 = emit_insn_before (sequence, loop_start);
1927 }
1928 else if (GET_CODE (p) == CALL_INSN)
1929 {
1930 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1931 /* Because the USAGE information potentially
1932 contains objects other than hard registers
1933 we need to copy it. */
1934 if (CALL_INSN_FUNCTION_USAGE (p))
1935 CALL_INSN_FUNCTION_USAGE (i1)
1936 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1937 }
1938 else
1939 i1 = emit_insn_before (PATTERN (p), loop_start);
1940
1941 REG_NOTES (i1) = REG_NOTES (p);
1942
1943 /* If there is a REG_EQUAL note present whose value is
1944 not loop invariant, then delete it, since it may
1945 cause problems with later optimization passes.
1946 It is possible for cse to create such notes
1947 like this as a result of record_jump_cond. */
1948
1949 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1950 && ! invariant_p (XEXP (temp, 0)))
1951 remove_note (i1, temp);
1952
1953 if (new_start == 0)
1954 new_start = i1;
1955
1956 if (loop_dump_stream)
1957 fprintf (loop_dump_stream, " moved to %d",
1958 INSN_UID (i1));
1959
1960 #if 0
1961 /* This isn't needed because REG_NOTES is copied
1962 below and is wrong since P might be a PARALLEL. */
1963 if (REG_NOTES (i1) == 0
1964 && ! m->partial /* But not if it's a zero-extend clr. */
1965 && ! m->global /* and not if used outside the loop
1966 (since it might get set outside). */
1967 && CONSTANT_P (SET_SRC (PATTERN (p))))
1968 REG_NOTES (i1)
1969 = gen_rtx (EXPR_LIST, REG_EQUAL,
1970 SET_SRC (PATTERN (p)), REG_NOTES (i1));
1971 #endif
1972
1973 /* If library call, now fix the REG_NOTES that contain
1974 insn pointers, namely REG_LIBCALL on FIRST
1975 and REG_RETVAL on I1. */
1976 if (temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))
1977 {
1978 XEXP (temp, 0) = first;
1979 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
1980 XEXP (temp, 0) = i1;
1981 }
1982
1983 delete_insn (p);
1984 do p = NEXT_INSN (p);
1985 while (p && GET_CODE (p) == NOTE);
1986 }
1987
1988 /* The more regs we move, the less we like moving them. */
1989 threshold -= 3;
1990 }
1991
1992 /* Any other movable that loads the same register
1993 MUST be moved. */
1994 already_moved[regno] = 1;
1995
1996 /* This reg has been moved out of one loop. */
1997 moved_once[regno] = 1;
1998
1999 /* The reg set here is now invariant. */
2000 if (! m->partial)
2001 n_times_set[regno] = 0;
2002
2003 m->done = 1;
2004
2005 /* Change the length-of-life info for the register
2006 to say it lives at least the full length of this loop.
2007 This will help guide optimizations in outer loops. */
2008
2009 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2010 /* This is the old insn before all the moved insns.
2011 We can't use the moved insn because it is out of range
2012 in uid_luid. Only the old insns have luids. */
2013 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2014 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2015 REGNO_LAST_UID (regno) = INSN_UID (end);
2016
2017 /* Combine with this moved insn any other matching movables. */
2018
2019 if (! m->partial)
2020 for (m1 = movables; m1; m1 = m1->next)
2021 if (m1->match == m)
2022 {
2023 rtx temp;
2024
2025 /* Schedule the reg loaded by M1
2026 for replacement so that shares the reg of M.
2027 If the modes differ (only possible in restricted
2028 circumstances, make a SUBREG. */
2029 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2030 reg_map[m1->regno] = m->set_dest;
2031 else
2032 reg_map[m1->regno]
2033 = gen_lowpart_common (GET_MODE (m1->set_dest),
2034 m->set_dest);
2035
2036 /* Get rid of the matching insn
2037 and prevent further processing of it. */
2038 m1->done = 1;
2039
2040 /* if library call, delete all insn except last, which
2041 is deleted below */
2042 if (temp = find_reg_note (m1->insn, REG_RETVAL,
2043 NULL_RTX))
2044 {
2045 for (temp = XEXP (temp, 0); temp != m1->insn;
2046 temp = NEXT_INSN (temp))
2047 delete_insn (temp);
2048 }
2049 delete_insn (m1->insn);
2050
2051 /* Any other movable that loads the same register
2052 MUST be moved. */
2053 already_moved[m1->regno] = 1;
2054
2055 /* The reg merged here is now invariant,
2056 if the reg it matches is invariant. */
2057 if (! m->partial)
2058 n_times_set[m1->regno] = 0;
2059 }
2060 }
2061 else if (loop_dump_stream)
2062 fprintf (loop_dump_stream, "not desirable");
2063 }
2064 else if (loop_dump_stream && !m->match)
2065 fprintf (loop_dump_stream, "not safe");
2066
2067 if (loop_dump_stream)
2068 fprintf (loop_dump_stream, "\n");
2069 }
2070
2071 if (new_start == 0)
2072 new_start = loop_start;
2073
2074 /* Go through all the instructions in the loop, making
2075 all the register substitutions scheduled in REG_MAP. */
2076 for (p = new_start; p != end; p = NEXT_INSN (p))
2077 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2078 || GET_CODE (p) == CALL_INSN)
2079 {
2080 replace_regs (PATTERN (p), reg_map, nregs, 0);
2081 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2082 INSN_CODE (p) = -1;
2083 }
2084 }
2085 \f
2086 #if 0
2087 /* Scan X and replace the address of any MEM in it with ADDR.
2088 REG is the address that MEM should have before the replacement. */
2089
2090 static void
2091 replace_call_address (x, reg, addr)
2092 rtx x, reg, addr;
2093 {
2094 register enum rtx_code code;
2095 register int i;
2096 register char *fmt;
2097
2098 if (x == 0)
2099 return;
2100 code = GET_CODE (x);
2101 switch (code)
2102 {
2103 case PC:
2104 case CC0:
2105 case CONST_INT:
2106 case CONST_DOUBLE:
2107 case CONST:
2108 case SYMBOL_REF:
2109 case LABEL_REF:
2110 case REG:
2111 return;
2112
2113 case SET:
2114 /* Short cut for very common case. */
2115 replace_call_address (XEXP (x, 1), reg, addr);
2116 return;
2117
2118 case CALL:
2119 /* Short cut for very common case. */
2120 replace_call_address (XEXP (x, 0), reg, addr);
2121 return;
2122
2123 case MEM:
2124 /* If this MEM uses a reg other than the one we expected,
2125 something is wrong. */
2126 if (XEXP (x, 0) != reg)
2127 abort ();
2128 XEXP (x, 0) = addr;
2129 return;
2130 }
2131
2132 fmt = GET_RTX_FORMAT (code);
2133 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2134 {
2135 if (fmt[i] == 'e')
2136 replace_call_address (XEXP (x, i), reg, addr);
2137 if (fmt[i] == 'E')
2138 {
2139 register int j;
2140 for (j = 0; j < XVECLEN (x, i); j++)
2141 replace_call_address (XVECEXP (x, i, j), reg, addr);
2142 }
2143 }
2144 }
2145 #endif
2146 \f
2147 /* Return the number of memory refs to addresses that vary
2148 in the rtx X. */
2149
2150 static int
2151 count_nonfixed_reads (x)
2152 rtx x;
2153 {
2154 register enum rtx_code code;
2155 register int i;
2156 register char *fmt;
2157 int value;
2158
2159 if (x == 0)
2160 return 0;
2161
2162 code = GET_CODE (x);
2163 switch (code)
2164 {
2165 case PC:
2166 case CC0:
2167 case CONST_INT:
2168 case CONST_DOUBLE:
2169 case CONST:
2170 case SYMBOL_REF:
2171 case LABEL_REF:
2172 case REG:
2173 return 0;
2174
2175 case MEM:
2176 return ((invariant_p (XEXP (x, 0)) != 1)
2177 + count_nonfixed_reads (XEXP (x, 0)));
2178 }
2179
2180 value = 0;
2181 fmt = GET_RTX_FORMAT (code);
2182 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2183 {
2184 if (fmt[i] == 'e')
2185 value += count_nonfixed_reads (XEXP (x, i));
2186 if (fmt[i] == 'E')
2187 {
2188 register int j;
2189 for (j = 0; j < XVECLEN (x, i); j++)
2190 value += count_nonfixed_reads (XVECEXP (x, i, j));
2191 }
2192 }
2193 return value;
2194 }
2195
2196 \f
2197 #if 0
2198 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2199 Replace it with an instruction to load just the low bytes
2200 if the machine supports such an instruction,
2201 and insert above LOOP_START an instruction to clear the register. */
2202
2203 static void
2204 constant_high_bytes (p, loop_start)
2205 rtx p, loop_start;
2206 {
2207 register rtx new;
2208 register int insn_code_number;
2209
2210 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2211 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2212
2213 new = gen_rtx (SET, VOIDmode,
2214 gen_rtx (STRICT_LOW_PART, VOIDmode,
2215 gen_rtx (SUBREG, GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2216 SET_DEST (PATTERN (p)),
2217 0)),
2218 XEXP (SET_SRC (PATTERN (p)), 0));
2219 insn_code_number = recog (new, p);
2220
2221 if (insn_code_number)
2222 {
2223 register int i;
2224
2225 /* Clear destination register before the loop. */
2226 emit_insn_before (gen_rtx (SET, VOIDmode,
2227 SET_DEST (PATTERN (p)),
2228 const0_rtx),
2229 loop_start);
2230
2231 /* Inside the loop, just load the low part. */
2232 PATTERN (p) = new;
2233 }
2234 }
2235 #endif
2236 \f
2237 /* Scan a loop setting the variables `unknown_address_altered',
2238 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2239 and `loop_has_volatile'.
2240 Also, fill in the array `loop_store_mems'. */
2241
2242 static void
2243 prescan_loop (start, end)
2244 rtx start, end;
2245 {
2246 register int level = 1;
2247 register rtx insn;
2248
2249 unknown_address_altered = 0;
2250 loop_has_call = 0;
2251 loop_has_volatile = 0;
2252 loop_store_mems_idx = 0;
2253
2254 num_mem_sets = 0;
2255 loops_enclosed = 1;
2256 loop_continue = 0;
2257
2258 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2259 insn = NEXT_INSN (insn))
2260 {
2261 if (GET_CODE (insn) == NOTE)
2262 {
2263 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2264 {
2265 ++level;
2266 /* Count number of loops contained in this one. */
2267 loops_enclosed++;
2268 }
2269 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2270 {
2271 --level;
2272 if (level == 0)
2273 {
2274 end = insn;
2275 break;
2276 }
2277 }
2278 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2279 {
2280 if (level == 1)
2281 loop_continue = insn;
2282 }
2283 }
2284 else if (GET_CODE (insn) == CALL_INSN)
2285 {
2286 if (! CONST_CALL_P (insn))
2287 unknown_address_altered = 1;
2288 loop_has_call = 1;
2289 }
2290 else
2291 {
2292 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2293 {
2294 if (volatile_refs_p (PATTERN (insn)))
2295 loop_has_volatile = 1;
2296
2297 note_stores (PATTERN (insn), note_addr_stored);
2298 }
2299 }
2300 }
2301 }
2302 \f
2303 /* Scan the function looking for loops. Record the start and end of each loop.
2304 Also mark as invalid loops any loops that contain a setjmp or are branched
2305 to from outside the loop. */
2306
2307 static void
2308 find_and_verify_loops (f)
2309 rtx f;
2310 {
2311 rtx insn, label;
2312 int current_loop = -1;
2313 int next_loop = -1;
2314 int loop;
2315
2316 /* If there are jumps to undefined labels,
2317 treat them as jumps out of any/all loops.
2318 This also avoids writing past end of tables when there are no loops. */
2319 uid_loop_num[0] = -1;
2320
2321 /* Find boundaries of loops, mark which loops are contained within
2322 loops, and invalidate loops that have setjmp. */
2323
2324 for (insn = f; insn; insn = NEXT_INSN (insn))
2325 {
2326 if (GET_CODE (insn) == NOTE)
2327 switch (NOTE_LINE_NUMBER (insn))
2328 {
2329 case NOTE_INSN_LOOP_BEG:
2330 loop_number_loop_starts[++next_loop] = insn;
2331 loop_number_loop_ends[next_loop] = 0;
2332 loop_outer_loop[next_loop] = current_loop;
2333 loop_invalid[next_loop] = 0;
2334 loop_number_exit_labels[next_loop] = 0;
2335 loop_number_exit_count[next_loop] = 0;
2336 current_loop = next_loop;
2337 break;
2338
2339 case NOTE_INSN_SETJMP:
2340 /* In this case, we must invalidate our current loop and any
2341 enclosing loop. */
2342 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2343 {
2344 loop_invalid[loop] = 1;
2345 if (loop_dump_stream)
2346 fprintf (loop_dump_stream,
2347 "\nLoop at %d ignored due to setjmp.\n",
2348 INSN_UID (loop_number_loop_starts[loop]));
2349 }
2350 break;
2351
2352 case NOTE_INSN_LOOP_END:
2353 if (current_loop == -1)
2354 abort ();
2355
2356 loop_number_loop_ends[current_loop] = insn;
2357 current_loop = loop_outer_loop[current_loop];
2358 break;
2359
2360 }
2361
2362 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2363 enclosing loop, but this doesn't matter. */
2364 uid_loop_num[INSN_UID (insn)] = current_loop;
2365 }
2366
2367 /* Any loop containing a label used in an initializer must be invalidated,
2368 because it can be jumped into from anywhere. */
2369
2370 for (label = forced_labels; label; label = XEXP (label, 1))
2371 {
2372 int loop_num;
2373
2374 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2375 loop_num != -1;
2376 loop_num = loop_outer_loop[loop_num])
2377 loop_invalid[loop_num] = 1;
2378 }
2379
2380 /* Any loop containing a label used for an exception handler must be
2381 invalidated, because it can be jumped into from anywhere. */
2382
2383 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2384 {
2385 int loop_num;
2386
2387 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2388 loop_num != -1;
2389 loop_num = loop_outer_loop[loop_num])
2390 loop_invalid[loop_num] = 1;
2391 }
2392
2393 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2394 loop that it is not contained within, that loop is marked invalid.
2395 If any INSN or CALL_INSN uses a label's address, then the loop containing
2396 that label is marked invalid, because it could be jumped into from
2397 anywhere.
2398
2399 Also look for blocks of code ending in an unconditional branch that
2400 exits the loop. If such a block is surrounded by a conditional
2401 branch around the block, move the block elsewhere (see below) and
2402 invert the jump to point to the code block. This may eliminate a
2403 label in our loop and will simplify processing by both us and a
2404 possible second cse pass. */
2405
2406 for (insn = f; insn; insn = NEXT_INSN (insn))
2407 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2408 {
2409 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2410
2411 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2412 {
2413 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2414 if (note)
2415 {
2416 int loop_num;
2417
2418 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2419 loop_num != -1;
2420 loop_num = loop_outer_loop[loop_num])
2421 loop_invalid[loop_num] = 1;
2422 }
2423 }
2424
2425 if (GET_CODE (insn) != JUMP_INSN)
2426 continue;
2427
2428 mark_loop_jump (PATTERN (insn), this_loop_num);
2429
2430 /* See if this is an unconditional branch outside the loop. */
2431 if (this_loop_num != -1
2432 && (GET_CODE (PATTERN (insn)) == RETURN
2433 || (simplejump_p (insn)
2434 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2435 != this_loop_num)))
2436 && get_max_uid () < max_uid_for_loop)
2437 {
2438 rtx p;
2439 rtx our_next = next_real_insn (insn);
2440 int dest_loop;
2441 int outer_loop = -1;
2442
2443 /* Go backwards until we reach the start of the loop, a label,
2444 or a JUMP_INSN. */
2445 for (p = PREV_INSN (insn);
2446 GET_CODE (p) != CODE_LABEL
2447 && ! (GET_CODE (p) == NOTE
2448 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2449 && GET_CODE (p) != JUMP_INSN;
2450 p = PREV_INSN (p))
2451 ;
2452
2453 /* Check for the case where we have a jump to an inner nested
2454 loop, and do not perform the optimization in that case. */
2455
2456 if (JUMP_LABEL (insn))
2457 {
2458 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2459 if (dest_loop != -1)
2460 {
2461 for (outer_loop = dest_loop; outer_loop != -1;
2462 outer_loop = loop_outer_loop[outer_loop])
2463 if (outer_loop == this_loop_num)
2464 break;
2465 }
2466 }
2467
2468 /* Make sure that the target of P is within the current loop. */
2469
2470 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2471 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2472 outer_loop = this_loop_num;
2473
2474 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2475 we have a block of code to try to move.
2476
2477 We look backward and then forward from the target of INSN
2478 to find a BARRIER at the same loop depth as the target.
2479 If we find such a BARRIER, we make a new label for the start
2480 of the block, invert the jump in P and point it to that label,
2481 and move the block of code to the spot we found. */
2482
2483 if (outer_loop == -1
2484 && GET_CODE (p) == JUMP_INSN
2485 && JUMP_LABEL (p) != 0
2486 /* Just ignore jumps to labels that were never emitted.
2487 These always indicate compilation errors. */
2488 && INSN_UID (JUMP_LABEL (p)) != 0
2489 && condjump_p (p)
2490 && ! simplejump_p (p)
2491 && next_real_insn (JUMP_LABEL (p)) == our_next)
2492 {
2493 rtx target
2494 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2495 int target_loop_num = uid_loop_num[INSN_UID (target)];
2496 rtx loc;
2497
2498 for (loc = target; loc; loc = PREV_INSN (loc))
2499 if (GET_CODE (loc) == BARRIER
2500 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2501 break;
2502
2503 if (loc == 0)
2504 for (loc = target; loc; loc = NEXT_INSN (loc))
2505 if (GET_CODE (loc) == BARRIER
2506 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2507 break;
2508
2509 if (loc)
2510 {
2511 rtx cond_label = JUMP_LABEL (p);
2512 rtx new_label = get_label_after (p);
2513
2514 /* Ensure our label doesn't go away. */
2515 LABEL_NUSES (cond_label)++;
2516
2517 /* Verify that uid_loop_num is large enough and that
2518 we can invert P. */
2519 if (invert_jump (p, new_label))
2520 {
2521 rtx q, r;
2522
2523 /* Include the BARRIER after INSN and copy the
2524 block after LOC. */
2525 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2526 reorder_insns (new_label, NEXT_INSN (insn), loc);
2527
2528 /* All those insns are now in TARGET_LOOP_NUM. */
2529 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2530 q = NEXT_INSN (q))
2531 uid_loop_num[INSN_UID (q)] = target_loop_num;
2532
2533 /* The label jumped to by INSN is no longer a loop exit.
2534 Unless INSN does not have a label (e.g., it is a
2535 RETURN insn), search loop_number_exit_labels to find
2536 its label_ref, and remove it. Also turn off
2537 LABEL_OUTSIDE_LOOP_P bit. */
2538 if (JUMP_LABEL (insn))
2539 {
2540 int loop_num;
2541
2542 for (q = 0,
2543 r = loop_number_exit_labels[this_loop_num];
2544 r; q = r, r = LABEL_NEXTREF (r))
2545 if (XEXP (r, 0) == JUMP_LABEL (insn))
2546 {
2547 LABEL_OUTSIDE_LOOP_P (r) = 0;
2548 if (q)
2549 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2550 else
2551 loop_number_exit_labels[this_loop_num]
2552 = LABEL_NEXTREF (r);
2553 break;
2554 }
2555
2556 for (loop_num = this_loop_num;
2557 loop_num != -1 && loop_num != target_loop_num;
2558 loop_num = loop_outer_loop[loop_num])
2559 loop_number_exit_count[loop_num]--;
2560
2561 /* If we didn't find it, then something is wrong. */
2562 if (! r)
2563 abort ();
2564 }
2565
2566 /* P is now a jump outside the loop, so it must be put
2567 in loop_number_exit_labels, and marked as such.
2568 The easiest way to do this is to just call
2569 mark_loop_jump again for P. */
2570 mark_loop_jump (PATTERN (p), this_loop_num);
2571
2572 /* If INSN now jumps to the insn after it,
2573 delete INSN. */
2574 if (JUMP_LABEL (insn) != 0
2575 && (next_real_insn (JUMP_LABEL (insn))
2576 == next_real_insn (insn)))
2577 delete_insn (insn);
2578 }
2579
2580 /* Continue the loop after where the conditional
2581 branch used to jump, since the only branch insn
2582 in the block (if it still remains) is an inter-loop
2583 branch and hence needs no processing. */
2584 insn = NEXT_INSN (cond_label);
2585
2586 if (--LABEL_NUSES (cond_label) == 0)
2587 delete_insn (cond_label);
2588
2589 /* This loop will be continued with NEXT_INSN (insn). */
2590 insn = PREV_INSN (insn);
2591 }
2592 }
2593 }
2594 }
2595 }
2596
2597 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2598 loops it is contained in, mark the target loop invalid.
2599
2600 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2601
2602 static void
2603 mark_loop_jump (x, loop_num)
2604 rtx x;
2605 int loop_num;
2606 {
2607 int dest_loop;
2608 int outer_loop;
2609 int i;
2610
2611 switch (GET_CODE (x))
2612 {
2613 case PC:
2614 case USE:
2615 case CLOBBER:
2616 case REG:
2617 case MEM:
2618 case CONST_INT:
2619 case CONST_DOUBLE:
2620 case RETURN:
2621 return;
2622
2623 case CONST:
2624 /* There could be a label reference in here. */
2625 mark_loop_jump (XEXP (x, 0), loop_num);
2626 return;
2627
2628 case PLUS:
2629 case MINUS:
2630 case MULT:
2631 mark_loop_jump (XEXP (x, 0), loop_num);
2632 mark_loop_jump (XEXP (x, 1), loop_num);
2633 return;
2634
2635 case SIGN_EXTEND:
2636 case ZERO_EXTEND:
2637 mark_loop_jump (XEXP (x, 0), loop_num);
2638 return;
2639
2640 case LABEL_REF:
2641 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2642
2643 /* Link together all labels that branch outside the loop. This
2644 is used by final_[bg]iv_value and the loop unrolling code. Also
2645 mark this LABEL_REF so we know that this branch should predict
2646 false. */
2647
2648 /* A check to make sure the label is not in an inner nested loop,
2649 since this does not count as a loop exit. */
2650 if (dest_loop != -1)
2651 {
2652 for (outer_loop = dest_loop; outer_loop != -1;
2653 outer_loop = loop_outer_loop[outer_loop])
2654 if (outer_loop == loop_num)
2655 break;
2656 }
2657 else
2658 outer_loop = -1;
2659
2660 if (loop_num != -1 && outer_loop == -1)
2661 {
2662 LABEL_OUTSIDE_LOOP_P (x) = 1;
2663 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2664 loop_number_exit_labels[loop_num] = x;
2665
2666 for (outer_loop = loop_num;
2667 outer_loop != -1 && outer_loop != dest_loop;
2668 outer_loop = loop_outer_loop[outer_loop])
2669 loop_number_exit_count[outer_loop]++;
2670 }
2671
2672 /* If this is inside a loop, but not in the current loop or one enclosed
2673 by it, it invalidates at least one loop. */
2674
2675 if (dest_loop == -1)
2676 return;
2677
2678 /* We must invalidate every nested loop containing the target of this
2679 label, except those that also contain the jump insn. */
2680
2681 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2682 {
2683 /* Stop when we reach a loop that also contains the jump insn. */
2684 for (outer_loop = loop_num; outer_loop != -1;
2685 outer_loop = loop_outer_loop[outer_loop])
2686 if (dest_loop == outer_loop)
2687 return;
2688
2689 /* If we get here, we know we need to invalidate a loop. */
2690 if (loop_dump_stream && ! loop_invalid[dest_loop])
2691 fprintf (loop_dump_stream,
2692 "\nLoop at %d ignored due to multiple entry points.\n",
2693 INSN_UID (loop_number_loop_starts[dest_loop]));
2694
2695 loop_invalid[dest_loop] = 1;
2696 }
2697 return;
2698
2699 case SET:
2700 /* If this is not setting pc, ignore. */
2701 if (SET_DEST (x) == pc_rtx)
2702 mark_loop_jump (SET_SRC (x), loop_num);
2703 return;
2704
2705 case IF_THEN_ELSE:
2706 mark_loop_jump (XEXP (x, 1), loop_num);
2707 mark_loop_jump (XEXP (x, 2), loop_num);
2708 return;
2709
2710 case PARALLEL:
2711 case ADDR_VEC:
2712 for (i = 0; i < XVECLEN (x, 0); i++)
2713 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2714 return;
2715
2716 case ADDR_DIFF_VEC:
2717 for (i = 0; i < XVECLEN (x, 1); i++)
2718 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2719 return;
2720
2721 default:
2722 /* Treat anything else (such as a symbol_ref)
2723 as a branch out of this loop, but not into any loop. */
2724
2725 if (loop_num != -1)
2726 {
2727 #ifdef HAIFA
2728 LABEL_OUTSIDE_LOOP_P (x) = 1;
2729 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2730 #endif /* HAIFA */
2731
2732 loop_number_exit_labels[loop_num] = x;
2733
2734 for (outer_loop = loop_num; outer_loop != -1;
2735 outer_loop = loop_outer_loop[outer_loop])
2736 loop_number_exit_count[outer_loop]++;
2737 }
2738 return;
2739 }
2740 }
2741 \f
2742 /* Return nonzero if there is a label in the range from
2743 insn INSN to and including the insn whose luid is END
2744 INSN must have an assigned luid (i.e., it must not have
2745 been previously created by loop.c). */
2746
2747 static int
2748 labels_in_range_p (insn, end)
2749 rtx insn;
2750 int end;
2751 {
2752 while (insn && INSN_LUID (insn) <= end)
2753 {
2754 if (GET_CODE (insn) == CODE_LABEL)
2755 return 1;
2756 insn = NEXT_INSN (insn);
2757 }
2758
2759 return 0;
2760 }
2761
2762 /* Record that a memory reference X is being set. */
2763
2764 static void
2765 note_addr_stored (x)
2766 rtx x;
2767 {
2768 register int i;
2769
2770 if (x == 0 || GET_CODE (x) != MEM)
2771 return;
2772
2773 /* Count number of memory writes.
2774 This affects heuristics in strength_reduce. */
2775 num_mem_sets++;
2776
2777 /* BLKmode MEM means all memory is clobbered. */
2778 if (GET_MODE (x) == BLKmode)
2779 unknown_address_altered = 1;
2780
2781 if (unknown_address_altered)
2782 return;
2783
2784 for (i = 0; i < loop_store_mems_idx; i++)
2785 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2786 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2787 {
2788 /* We are storing at the same address as previously noted. Save the
2789 wider reference. */
2790 if (GET_MODE_SIZE (GET_MODE (x))
2791 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2792 loop_store_mems[i] = x;
2793 break;
2794 }
2795
2796 if (i == NUM_STORES)
2797 unknown_address_altered = 1;
2798
2799 else if (i == loop_store_mems_idx)
2800 loop_store_mems[loop_store_mems_idx++] = x;
2801 }
2802 \f
2803 /* Return nonzero if the rtx X is invariant over the current loop.
2804
2805 The value is 2 if we refer to something only conditionally invariant.
2806
2807 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2808 Otherwise, a memory ref is invariant if it does not conflict with
2809 anything stored in `loop_store_mems'. */
2810
2811 int
2812 invariant_p (x)
2813 register rtx x;
2814 {
2815 register int i;
2816 register enum rtx_code code;
2817 register char *fmt;
2818 int conditional = 0;
2819
2820 if (x == 0)
2821 return 1;
2822 code = GET_CODE (x);
2823 switch (code)
2824 {
2825 case CONST_INT:
2826 case CONST_DOUBLE:
2827 case SYMBOL_REF:
2828 case CONST:
2829 return 1;
2830
2831 case LABEL_REF:
2832 /* A LABEL_REF is normally invariant, however, if we are unrolling
2833 loops, and this label is inside the loop, then it isn't invariant.
2834 This is because each unrolled copy of the loop body will have
2835 a copy of this label. If this was invariant, then an insn loading
2836 the address of this label into a register might get moved outside
2837 the loop, and then each loop body would end up using the same label.
2838
2839 We don't know the loop bounds here though, so just fail for all
2840 labels. */
2841 if (flag_unroll_loops)
2842 return 0;
2843 else
2844 return 1;
2845
2846 case PC:
2847 case CC0:
2848 case UNSPEC_VOLATILE:
2849 return 0;
2850
2851 case REG:
2852 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2853 since the reg might be set by initialization within the loop. */
2854
2855 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2856 || x == arg_pointer_rtx)
2857 && ! current_function_has_nonlocal_goto)
2858 return 1;
2859
2860 if (loop_has_call
2861 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2862 return 0;
2863
2864 if (n_times_set[REGNO (x)] < 0)
2865 return 2;
2866
2867 return n_times_set[REGNO (x)] == 0;
2868
2869 case MEM:
2870 /* Volatile memory references must be rejected. Do this before
2871 checking for read-only items, so that volatile read-only items
2872 will be rejected also. */
2873 if (MEM_VOLATILE_P (x))
2874 return 0;
2875
2876 /* Read-only items (such as constants in a constant pool) are
2877 invariant if their address is. */
2878 if (RTX_UNCHANGING_P (x))
2879 break;
2880
2881 /* If we filled the table (or had a subroutine call), any location
2882 in memory could have been clobbered. */
2883 if (unknown_address_altered)
2884 return 0;
2885
2886 /* See if there is any dependence between a store and this load. */
2887 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2888 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2889 return 0;
2890
2891 /* It's not invalidated by a store in memory
2892 but we must still verify the address is invariant. */
2893 break;
2894
2895 case ASM_OPERANDS:
2896 /* Don't mess with insns declared volatile. */
2897 if (MEM_VOLATILE_P (x))
2898 return 0;
2899 }
2900
2901 fmt = GET_RTX_FORMAT (code);
2902 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2903 {
2904 if (fmt[i] == 'e')
2905 {
2906 int tem = invariant_p (XEXP (x, i));
2907 if (tem == 0)
2908 return 0;
2909 if (tem == 2)
2910 conditional = 1;
2911 }
2912 else if (fmt[i] == 'E')
2913 {
2914 register int j;
2915 for (j = 0; j < XVECLEN (x, i); j++)
2916 {
2917 int tem = invariant_p (XVECEXP (x, i, j));
2918 if (tem == 0)
2919 return 0;
2920 if (tem == 2)
2921 conditional = 1;
2922 }
2923
2924 }
2925 }
2926
2927 return 1 + conditional;
2928 }
2929
2930 \f
2931 /* Return nonzero if all the insns in the loop that set REG
2932 are INSN and the immediately following insns,
2933 and if each of those insns sets REG in an invariant way
2934 (not counting uses of REG in them).
2935
2936 The value is 2 if some of these insns are only conditionally invariant.
2937
2938 We assume that INSN itself is the first set of REG
2939 and that its source is invariant. */
2940
2941 static int
2942 consec_sets_invariant_p (reg, n_sets, insn)
2943 int n_sets;
2944 rtx reg, insn;
2945 {
2946 register rtx p = insn;
2947 register int regno = REGNO (reg);
2948 rtx temp;
2949 /* Number of sets we have to insist on finding after INSN. */
2950 int count = n_sets - 1;
2951 int old = n_times_set[regno];
2952 int value = 0;
2953 int this;
2954
2955 /* If N_SETS hit the limit, we can't rely on its value. */
2956 if (n_sets == 127)
2957 return 0;
2958
2959 n_times_set[regno] = 0;
2960
2961 while (count > 0)
2962 {
2963 register enum rtx_code code;
2964 rtx set;
2965
2966 p = NEXT_INSN (p);
2967 code = GET_CODE (p);
2968
2969 /* If library call, skip to end of of it. */
2970 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2971 p = XEXP (temp, 0);
2972
2973 this = 0;
2974 if (code == INSN
2975 && (set = single_set (p))
2976 && GET_CODE (SET_DEST (set)) == REG
2977 && REGNO (SET_DEST (set)) == regno)
2978 {
2979 this = invariant_p (SET_SRC (set));
2980 if (this != 0)
2981 value |= this;
2982 else if (temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
2983 {
2984 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
2985 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
2986 notes are OK. */
2987 this = (CONSTANT_P (XEXP (temp, 0))
2988 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
2989 && invariant_p (XEXP (temp, 0))));
2990 if (this != 0)
2991 value |= this;
2992 }
2993 }
2994 if (this != 0)
2995 count--;
2996 else if (code != NOTE)
2997 {
2998 n_times_set[regno] = old;
2999 return 0;
3000 }
3001 }
3002
3003 n_times_set[regno] = old;
3004 /* If invariant_p ever returned 2, we return 2. */
3005 return 1 + (value & 2);
3006 }
3007
3008 #if 0
3009 /* I don't think this condition is sufficient to allow INSN
3010 to be moved, so we no longer test it. */
3011
3012 /* Return 1 if all insns in the basic block of INSN and following INSN
3013 that set REG are invariant according to TABLE. */
3014
3015 static int
3016 all_sets_invariant_p (reg, insn, table)
3017 rtx reg, insn;
3018 short *table;
3019 {
3020 register rtx p = insn;
3021 register int regno = REGNO (reg);
3022
3023 while (1)
3024 {
3025 register enum rtx_code code;
3026 p = NEXT_INSN (p);
3027 code = GET_CODE (p);
3028 if (code == CODE_LABEL || code == JUMP_INSN)
3029 return 1;
3030 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3031 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3032 && REGNO (SET_DEST (PATTERN (p))) == regno)
3033 {
3034 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3035 return 0;
3036 }
3037 }
3038 }
3039 #endif /* 0 */
3040 \f
3041 /* Look at all uses (not sets) of registers in X. For each, if it is
3042 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3043 a different insn, set USAGE[REGNO] to const0_rtx. */
3044
3045 static void
3046 find_single_use_in_loop (insn, x, usage)
3047 rtx insn;
3048 rtx x;
3049 rtx *usage;
3050 {
3051 enum rtx_code code = GET_CODE (x);
3052 char *fmt = GET_RTX_FORMAT (code);
3053 int i, j;
3054
3055 if (code == REG)
3056 usage[REGNO (x)]
3057 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3058 ? const0_rtx : insn;
3059
3060 else if (code == SET)
3061 {
3062 /* Don't count SET_DEST if it is a REG; otherwise count things
3063 in SET_DEST because if a register is partially modified, it won't
3064 show up as a potential movable so we don't care how USAGE is set
3065 for it. */
3066 if (GET_CODE (SET_DEST (x)) != REG)
3067 find_single_use_in_loop (insn, SET_DEST (x), usage);
3068 find_single_use_in_loop (insn, SET_SRC (x), usage);
3069 }
3070 else
3071 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3072 {
3073 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3074 find_single_use_in_loop (insn, XEXP (x, i), usage);
3075 else if (fmt[i] == 'E')
3076 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3077 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3078 }
3079 }
3080 \f
3081 /* Increment N_TIMES_SET at the index of each register
3082 that is modified by an insn between FROM and TO.
3083 If the value of an element of N_TIMES_SET becomes 127 or more,
3084 stop incrementing it, to avoid overflow.
3085
3086 Store in SINGLE_USAGE[I] the single insn in which register I is
3087 used, if it is only used once. Otherwise, it is set to 0 (for no
3088 uses) or const0_rtx for more than one use. This parameter may be zero,
3089 in which case this processing is not done.
3090
3091 Store in *COUNT_PTR the number of actual instruction
3092 in the loop. We use this to decide what is worth moving out. */
3093
3094 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3095 In that case, it is the insn that last set reg n. */
3096
3097 static void
3098 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3099 register rtx from, to;
3100 char *may_not_move;
3101 rtx *single_usage;
3102 int *count_ptr;
3103 int nregs;
3104 {
3105 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3106 register rtx insn;
3107 register int count = 0;
3108 register rtx dest;
3109
3110 bzero ((char *) last_set, nregs * sizeof (rtx));
3111 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3112 {
3113 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3114 {
3115 ++count;
3116
3117 /* If requested, record registers that have exactly one use. */
3118 if (single_usage)
3119 {
3120 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3121
3122 /* Include uses in REG_EQUAL notes. */
3123 if (REG_NOTES (insn))
3124 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3125 }
3126
3127 if (GET_CODE (PATTERN (insn)) == CLOBBER
3128 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3129 /* Don't move a reg that has an explicit clobber.
3130 We might do so sometimes, but it's not worth the pain. */
3131 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3132
3133 if (GET_CODE (PATTERN (insn)) == SET
3134 || GET_CODE (PATTERN (insn)) == CLOBBER)
3135 {
3136 dest = SET_DEST (PATTERN (insn));
3137 while (GET_CODE (dest) == SUBREG
3138 || GET_CODE (dest) == ZERO_EXTRACT
3139 || GET_CODE (dest) == SIGN_EXTRACT
3140 || GET_CODE (dest) == STRICT_LOW_PART)
3141 dest = XEXP (dest, 0);
3142 if (GET_CODE (dest) == REG)
3143 {
3144 register int regno = REGNO (dest);
3145 /* If this is the first setting of this reg
3146 in current basic block, and it was set before,
3147 it must be set in two basic blocks, so it cannot
3148 be moved out of the loop. */
3149 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3150 may_not_move[regno] = 1;
3151 /* If this is not first setting in current basic block,
3152 see if reg was used in between previous one and this.
3153 If so, neither one can be moved. */
3154 if (last_set[regno] != 0
3155 && reg_used_between_p (dest, last_set[regno], insn))
3156 may_not_move[regno] = 1;
3157 if (n_times_set[regno] < 127)
3158 ++n_times_set[regno];
3159 last_set[regno] = insn;
3160 }
3161 }
3162 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3163 {
3164 register int i;
3165 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3166 {
3167 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3168 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3169 /* Don't move a reg that has an explicit clobber.
3170 It's not worth the pain to try to do it correctly. */
3171 may_not_move[REGNO (XEXP (x, 0))] = 1;
3172
3173 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3174 {
3175 dest = SET_DEST (x);
3176 while (GET_CODE (dest) == SUBREG
3177 || GET_CODE (dest) == ZERO_EXTRACT
3178 || GET_CODE (dest) == SIGN_EXTRACT
3179 || GET_CODE (dest) == STRICT_LOW_PART)
3180 dest = XEXP (dest, 0);
3181 if (GET_CODE (dest) == REG)
3182 {
3183 register int regno = REGNO (dest);
3184 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3185 may_not_move[regno] = 1;
3186 if (last_set[regno] != 0
3187 && reg_used_between_p (dest, last_set[regno], insn))
3188 may_not_move[regno] = 1;
3189 if (n_times_set[regno] < 127)
3190 ++n_times_set[regno];
3191 last_set[regno] = insn;
3192 }
3193 }
3194 }
3195 }
3196 }
3197
3198 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3199 bzero ((char *) last_set, nregs * sizeof (rtx));
3200 }
3201 *count_ptr = count;
3202 }
3203 \f
3204 /* Given a loop that is bounded by LOOP_START and LOOP_END
3205 and that is entered at SCAN_START,
3206 return 1 if the register set in SET contained in insn INSN is used by
3207 any insn that precedes INSN in cyclic order starting
3208 from the loop entry point.
3209
3210 We don't want to use INSN_LUID here because if we restrict INSN to those
3211 that have a valid INSN_LUID, it means we cannot move an invariant out
3212 from an inner loop past two loops. */
3213
3214 static int
3215 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3216 rtx set, insn, loop_start, scan_start, loop_end;
3217 {
3218 rtx reg = SET_DEST (set);
3219 rtx p;
3220
3221 /* Scan forward checking for register usage. If we hit INSN, we
3222 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3223 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3224 {
3225 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3226 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3227 return 1;
3228
3229 if (p == loop_end)
3230 p = loop_start;
3231 }
3232
3233 return 0;
3234 }
3235 \f
3236 /* A "basic induction variable" or biv is a pseudo reg that is set
3237 (within this loop) only by incrementing or decrementing it. */
3238 /* A "general induction variable" or giv is a pseudo reg whose
3239 value is a linear function of a biv. */
3240
3241 /* Bivs are recognized by `basic_induction_var';
3242 Givs by `general_induct_var'. */
3243
3244 /* Indexed by register number, indicates whether or not register is an
3245 induction variable, and if so what type. */
3246
3247 enum iv_mode *reg_iv_type;
3248
3249 /* Indexed by register number, contains pointer to `struct induction'
3250 if register is an induction variable. This holds general info for
3251 all induction variables. */
3252
3253 struct induction **reg_iv_info;
3254
3255 /* Indexed by register number, contains pointer to `struct iv_class'
3256 if register is a basic induction variable. This holds info describing
3257 the class (a related group) of induction variables that the biv belongs
3258 to. */
3259
3260 struct iv_class **reg_biv_class;
3261
3262 /* The head of a list which links together (via the next field)
3263 every iv class for the current loop. */
3264
3265 struct iv_class *loop_iv_list;
3266
3267 /* Communication with routines called via `note_stores'. */
3268
3269 static rtx note_insn;
3270
3271 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3272
3273 static rtx addr_placeholder;
3274
3275 /* ??? Unfinished optimizations, and possible future optimizations,
3276 for the strength reduction code. */
3277
3278 /* ??? There is one more optimization you might be interested in doing: to
3279 allocate pseudo registers for frequently-accessed memory locations.
3280 If the same memory location is referenced each time around, it might
3281 be possible to copy it into a register before and out after.
3282 This is especially useful when the memory location is a variable which
3283 is in a stack slot because somewhere its address is taken. If the
3284 loop doesn't contain a function call and the variable isn't volatile,
3285 it is safe to keep the value in a register for the duration of the
3286 loop. One tricky thing is that the copying of the value back from the
3287 register has to be done on all exits from the loop. You need to check that
3288 all the exits from the loop go to the same place. */
3289
3290 /* ??? The interaction of biv elimination, and recognition of 'constant'
3291 bivs, may cause problems. */
3292
3293 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3294 performance problems.
3295
3296 Perhaps don't eliminate things that can be combined with an addressing
3297 mode. Find all givs that have the same biv, mult_val, and add_val;
3298 then for each giv, check to see if its only use dies in a following
3299 memory address. If so, generate a new memory address and check to see
3300 if it is valid. If it is valid, then store the modified memory address,
3301 otherwise, mark the giv as not done so that it will get its own iv. */
3302
3303 /* ??? Could try to optimize branches when it is known that a biv is always
3304 positive. */
3305
3306 /* ??? When replace a biv in a compare insn, we should replace with closest
3307 giv so that an optimized branch can still be recognized by the combiner,
3308 e.g. the VAX acb insn. */
3309
3310 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3311 was rerun in loop_optimize whenever a register was added or moved.
3312 Also, some of the optimizations could be a little less conservative. */
3313 \f
3314 /* Perform strength reduction and induction variable elimination. */
3315
3316 /* Pseudo registers created during this function will be beyond the last
3317 valid index in several tables including n_times_set and regno_last_uid.
3318 This does not cause a problem here, because the added registers cannot be
3319 givs outside of their loop, and hence will never be reconsidered.
3320 But scan_loop must check regnos to make sure they are in bounds. */
3321
3322 static void
3323 strength_reduce (scan_start, end, loop_top, insn_count,
3324 loop_start, loop_end)
3325 rtx scan_start;
3326 rtx end;
3327 rtx loop_top;
3328 int insn_count;
3329 rtx loop_start;
3330 rtx loop_end;
3331 {
3332 rtx p;
3333 rtx set;
3334 rtx inc_val;
3335 rtx mult_val;
3336 rtx dest_reg;
3337 /* This is 1 if current insn is not executed at least once for every loop
3338 iteration. */
3339 int not_every_iteration = 0;
3340 /* This is 1 if current insn may be executed more than once for every
3341 loop iteration. */
3342 int maybe_multiple = 0;
3343 /* Temporary list pointers for traversing loop_iv_list. */
3344 struct iv_class *bl, **backbl;
3345 /* Ratio of extra register life span we can justify
3346 for saving an instruction. More if loop doesn't call subroutines
3347 since in that case saving an insn makes more difference
3348 and more registers are available. */
3349 /* ??? could set this to last value of threshold in move_movables */
3350 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3351 /* Map of pseudo-register replacements. */
3352 rtx *reg_map;
3353 int call_seen;
3354 rtx test;
3355 rtx end_insert_before;
3356 int loop_depth = 0;
3357
3358 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3359 * sizeof (enum iv_mode *));
3360 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3361 reg_iv_info = (struct induction **)
3362 alloca (max_reg_before_loop * sizeof (struct induction *));
3363 bzero ((char *) reg_iv_info, (max_reg_before_loop
3364 * sizeof (struct induction *)));
3365 reg_biv_class = (struct iv_class **)
3366 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3367 bzero ((char *) reg_biv_class, (max_reg_before_loop
3368 * sizeof (struct iv_class *)));
3369
3370 loop_iv_list = 0;
3371 addr_placeholder = gen_reg_rtx (Pmode);
3372
3373 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3374 must be put before this insn, so that they will appear in the right
3375 order (i.e. loop order).
3376
3377 If loop_end is the end of the current function, then emit a
3378 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3379 dummy note insn. */
3380 if (NEXT_INSN (loop_end) != 0)
3381 end_insert_before = NEXT_INSN (loop_end);
3382 else
3383 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3384
3385 /* Scan through loop to find all possible bivs. */
3386
3387 p = scan_start;
3388 while (1)
3389 {
3390 p = NEXT_INSN (p);
3391 /* At end of a straight-in loop, we are done.
3392 At end of a loop entered at the bottom, scan the top. */
3393 if (p == scan_start)
3394 break;
3395 if (p == end)
3396 {
3397 if (loop_top != 0)
3398 p = loop_top;
3399 else
3400 break;
3401 if (p == scan_start)
3402 break;
3403 }
3404
3405 if (GET_CODE (p) == INSN
3406 && (set = single_set (p))
3407 && GET_CODE (SET_DEST (set)) == REG)
3408 {
3409 dest_reg = SET_DEST (set);
3410 if (REGNO (dest_reg) < max_reg_before_loop
3411 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3412 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3413 {
3414 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3415 dest_reg, p, &inc_val, &mult_val))
3416 {
3417 /* It is a possible basic induction variable.
3418 Create and initialize an induction structure for it. */
3419
3420 struct induction *v
3421 = (struct induction *) alloca (sizeof (struct induction));
3422
3423 record_biv (v, p, dest_reg, inc_val, mult_val,
3424 not_every_iteration, maybe_multiple);
3425 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3426 }
3427 else if (REGNO (dest_reg) < max_reg_before_loop)
3428 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3429 }
3430 }
3431
3432 /* Past CODE_LABEL, we get to insns that may be executed multiple
3433 times. The only way we can be sure that they can't is if every
3434 every jump insn between here and the end of the loop either
3435 returns, exits the loop, is a forward jump, or is a jump
3436 to the loop start. */
3437
3438 if (GET_CODE (p) == CODE_LABEL)
3439 {
3440 rtx insn = p;
3441
3442 maybe_multiple = 0;
3443
3444 while (1)
3445 {
3446 insn = NEXT_INSN (insn);
3447 if (insn == scan_start)
3448 break;
3449 if (insn == end)
3450 {
3451 if (loop_top != 0)
3452 insn = loop_top;
3453 else
3454 break;
3455 if (insn == scan_start)
3456 break;
3457 }
3458
3459 if (GET_CODE (insn) == JUMP_INSN
3460 && GET_CODE (PATTERN (insn)) != RETURN
3461 && (! condjump_p (insn)
3462 || (JUMP_LABEL (insn) != 0
3463 && JUMP_LABEL (insn) != scan_start
3464 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3465 || INSN_UID (insn) >= max_uid_for_loop
3466 || (INSN_LUID (JUMP_LABEL (insn))
3467 < INSN_LUID (insn))))))
3468 {
3469 maybe_multiple = 1;
3470 break;
3471 }
3472 }
3473 }
3474
3475 /* Past a jump, we get to insns for which we can't count
3476 on whether they will be executed during each iteration. */
3477 /* This code appears twice in strength_reduce. There is also similar
3478 code in scan_loop. */
3479 if (GET_CODE (p) == JUMP_INSN
3480 /* If we enter the loop in the middle, and scan around to the
3481 beginning, don't set not_every_iteration for that.
3482 This can be any kind of jump, since we want to know if insns
3483 will be executed if the loop is executed. */
3484 && ! (JUMP_LABEL (p) == loop_top
3485 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3486 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3487 {
3488 rtx label = 0;
3489
3490 /* If this is a jump outside the loop, then it also doesn't
3491 matter. Check to see if the target of this branch is on the
3492 loop_number_exits_labels list. */
3493
3494 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3495 label;
3496 label = LABEL_NEXTREF (label))
3497 if (XEXP (label, 0) == JUMP_LABEL (p))
3498 break;
3499
3500 if (! label)
3501 not_every_iteration = 1;
3502 }
3503
3504 else if (GET_CODE (p) == NOTE)
3505 {
3506 /* At the virtual top of a converted loop, insns are again known to
3507 be executed each iteration: logically, the loop begins here
3508 even though the exit code has been duplicated. */
3509 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3510 not_every_iteration = 0;
3511 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3512 loop_depth++;
3513 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3514 loop_depth--;
3515 }
3516
3517 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3518 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3519 or not an insn is known to be executed each iteration of the
3520 loop, whether or not any iterations are known to occur.
3521
3522 Therefore, if we have just passed a label and have no more labels
3523 between here and the test insn of the loop, we know these insns
3524 will be executed each iteration. */
3525
3526 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3527 && no_labels_between_p (p, loop_end))
3528 not_every_iteration = 0;
3529 }
3530
3531 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3532 Make a sanity check against n_times_set. */
3533 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3534 {
3535 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3536 /* Above happens if register modified by subreg, etc. */
3537 /* Make sure it is not recognized as a basic induction var: */
3538 || n_times_set[bl->regno] != bl->biv_count
3539 /* If never incremented, it is invariant that we decided not to
3540 move. So leave it alone. */
3541 || ! bl->incremented)
3542 {
3543 if (loop_dump_stream)
3544 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3545 bl->regno,
3546 (reg_iv_type[bl->regno] != BASIC_INDUCT
3547 ? "not induction variable"
3548 : (! bl->incremented ? "never incremented"
3549 : "count error")));
3550
3551 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3552 *backbl = bl->next;
3553 }
3554 else
3555 {
3556 backbl = &bl->next;
3557
3558 if (loop_dump_stream)
3559 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3560 }
3561 }
3562
3563 /* Exit if there are no bivs. */
3564 if (! loop_iv_list)
3565 {
3566 /* Can still unroll the loop anyways, but indicate that there is no
3567 strength reduction info available. */
3568 if (flag_unroll_loops)
3569 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3570
3571 return;
3572 }
3573
3574 /* Find initial value for each biv by searching backwards from loop_start,
3575 halting at first label. Also record any test condition. */
3576
3577 call_seen = 0;
3578 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3579 {
3580 note_insn = p;
3581
3582 if (GET_CODE (p) == CALL_INSN)
3583 call_seen = 1;
3584
3585 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3586 || GET_CODE (p) == CALL_INSN)
3587 note_stores (PATTERN (p), record_initial);
3588
3589 /* Record any test of a biv that branches around the loop if no store
3590 between it and the start of loop. We only care about tests with
3591 constants and registers and only certain of those. */
3592 if (GET_CODE (p) == JUMP_INSN
3593 && JUMP_LABEL (p) != 0
3594 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3595 && (test = get_condition_for_loop (p)) != 0
3596 && GET_CODE (XEXP (test, 0)) == REG
3597 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3598 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3599 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3600 && bl->init_insn == 0)
3601 {
3602 /* If an NE test, we have an initial value! */
3603 if (GET_CODE (test) == NE)
3604 {
3605 bl->init_insn = p;
3606 bl->init_set = gen_rtx (SET, VOIDmode,
3607 XEXP (test, 0), XEXP (test, 1));
3608 }
3609 else
3610 bl->initial_test = test;
3611 }
3612 }
3613
3614 /* Look at the each biv and see if we can say anything better about its
3615 initial value from any initializing insns set up above. (This is done
3616 in two passes to avoid missing SETs in a PARALLEL.) */
3617 for (bl = loop_iv_list; bl; bl = bl->next)
3618 {
3619 rtx src;
3620
3621 if (! bl->init_insn)
3622 continue;
3623
3624 src = SET_SRC (bl->init_set);
3625
3626 if (loop_dump_stream)
3627 fprintf (loop_dump_stream,
3628 "Biv %d initialized at insn %d: initial value ",
3629 bl->regno, INSN_UID (bl->init_insn));
3630
3631 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3632 || GET_MODE (src) == VOIDmode)
3633 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3634 {
3635 bl->initial_value = src;
3636
3637 if (loop_dump_stream)
3638 {
3639 if (GET_CODE (src) == CONST_INT)
3640 fprintf (loop_dump_stream, "%d\n", INTVAL (src));
3641 else
3642 {
3643 print_rtl (loop_dump_stream, src);
3644 fprintf (loop_dump_stream, "\n");
3645 }
3646 }
3647 }
3648 else
3649 {
3650 /* Biv initial value is not simple move,
3651 so let it keep initial value of "itself". */
3652
3653 if (loop_dump_stream)
3654 fprintf (loop_dump_stream, "is complex\n");
3655 }
3656 }
3657
3658 /* Search the loop for general induction variables. */
3659
3660 /* A register is a giv if: it is only set once, it is a function of a
3661 biv and a constant (or invariant), and it is not a biv. */
3662
3663 not_every_iteration = 0;
3664 loop_depth = 0;
3665 p = scan_start;
3666 while (1)
3667 {
3668 p = NEXT_INSN (p);
3669 /* At end of a straight-in loop, we are done.
3670 At end of a loop entered at the bottom, scan the top. */
3671 if (p == scan_start)
3672 break;
3673 if (p == end)
3674 {
3675 if (loop_top != 0)
3676 p = loop_top;
3677 else
3678 break;
3679 if (p == scan_start)
3680 break;
3681 }
3682
3683 /* Look for a general induction variable in a register. */
3684 if (GET_CODE (p) == INSN
3685 && (set = single_set (p))
3686 && GET_CODE (SET_DEST (set)) == REG
3687 && ! may_not_optimize[REGNO (SET_DEST (set))])
3688 {
3689 rtx src_reg;
3690 rtx add_val;
3691 rtx mult_val;
3692 int benefit;
3693 rtx regnote = 0;
3694
3695 dest_reg = SET_DEST (set);
3696 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3697 continue;
3698
3699 if (/* SET_SRC is a giv. */
3700 ((benefit = general_induction_var (SET_SRC (set),
3701 &src_reg, &add_val,
3702 &mult_val))
3703 /* Equivalent expression is a giv. */
3704 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3705 && (benefit = general_induction_var (XEXP (regnote, 0),
3706 &src_reg,
3707 &add_val, &mult_val))))
3708 /* Don't try to handle any regs made by loop optimization.
3709 We have nothing on them in regno_first_uid, etc. */
3710 && REGNO (dest_reg) < max_reg_before_loop
3711 /* Don't recognize a BASIC_INDUCT_VAR here. */
3712 && dest_reg != src_reg
3713 /* This must be the only place where the register is set. */
3714 && (n_times_set[REGNO (dest_reg)] == 1
3715 /* or all sets must be consecutive and make a giv. */
3716 || (benefit = consec_sets_giv (benefit, p,
3717 src_reg, dest_reg,
3718 &add_val, &mult_val))))
3719 {
3720 int count;
3721 struct induction *v
3722 = (struct induction *) alloca (sizeof (struct induction));
3723 rtx temp;
3724
3725 /* If this is a library call, increase benefit. */
3726 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3727 benefit += libcall_benefit (p);
3728
3729 /* Skip the consecutive insns, if there are any. */
3730 for (count = n_times_set[REGNO (dest_reg)] - 1;
3731 count > 0; count--)
3732 {
3733 /* If first insn of libcall sequence, skip to end.
3734 Do this at start of loop, since INSN is guaranteed to
3735 be an insn here. */
3736 if (GET_CODE (p) != NOTE
3737 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3738 p = XEXP (temp, 0);
3739
3740 do p = NEXT_INSN (p);
3741 while (GET_CODE (p) == NOTE);
3742 }
3743
3744 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3745 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3746 loop_end);
3747
3748 }
3749 }
3750
3751 #ifndef DONT_REDUCE_ADDR
3752 /* Look for givs which are memory addresses. */
3753 /* This resulted in worse code on a VAX 8600. I wonder if it
3754 still does. */
3755 if (GET_CODE (p) == INSN)
3756 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3757 loop_end);
3758 #endif
3759
3760 /* Update the status of whether giv can derive other givs. This can
3761 change when we pass a label or an insn that updates a biv. */
3762 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3763 || GET_CODE (p) == CODE_LABEL)
3764 update_giv_derive (p);
3765
3766 /* Past a jump, we get to insns for which we can't count
3767 on whether they will be executed during each iteration. */
3768 /* This code appears twice in strength_reduce. There is also similar
3769 code in scan_loop. */
3770 if (GET_CODE (p) == JUMP_INSN
3771 /* If we enter the loop in the middle, and scan around to the
3772 beginning, don't set not_every_iteration for that.
3773 This can be any kind of jump, since we want to know if insns
3774 will be executed if the loop is executed. */
3775 && ! (JUMP_LABEL (p) == loop_top
3776 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3777 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3778 {
3779 rtx label = 0;
3780
3781 /* If this is a jump outside the loop, then it also doesn't
3782 matter. Check to see if the target of this branch is on the
3783 loop_number_exits_labels list. */
3784
3785 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3786 label;
3787 label = LABEL_NEXTREF (label))
3788 if (XEXP (label, 0) == JUMP_LABEL (p))
3789 break;
3790
3791 if (! label)
3792 not_every_iteration = 1;
3793 }
3794
3795 else if (GET_CODE (p) == NOTE)
3796 {
3797 /* At the virtual top of a converted loop, insns are again known to
3798 be executed each iteration: logically, the loop begins here
3799 even though the exit code has been duplicated. */
3800 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3801 not_every_iteration = 0;
3802 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3803 loop_depth++;
3804 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3805 loop_depth--;
3806 }
3807
3808 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3809 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3810 or not an insn is known to be executed each iteration of the
3811 loop, whether or not any iterations are known to occur.
3812
3813 Therefore, if we have just passed a label and have no more labels
3814 between here and the test insn of the loop, we know these insns
3815 will be executed each iteration. */
3816
3817 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3818 && no_labels_between_p (p, loop_end))
3819 not_every_iteration = 0;
3820 }
3821
3822 /* Try to calculate and save the number of loop iterations. This is
3823 set to zero if the actual number can not be calculated. This must
3824 be called after all giv's have been identified, since otherwise it may
3825 fail if the iteration variable is a giv. */
3826
3827 loop_n_iterations = loop_iterations (loop_start, loop_end);
3828
3829 /* Now for each giv for which we still don't know whether or not it is
3830 replaceable, check to see if it is replaceable because its final value
3831 can be calculated. This must be done after loop_iterations is called,
3832 so that final_giv_value will work correctly. */
3833
3834 for (bl = loop_iv_list; bl; bl = bl->next)
3835 {
3836 struct induction *v;
3837
3838 for (v = bl->giv; v; v = v->next_iv)
3839 if (! v->replaceable && ! v->not_replaceable)
3840 check_final_value (v, loop_start, loop_end);
3841 }
3842
3843 /* Try to prove that the loop counter variable (if any) is always
3844 nonnegative; if so, record that fact with a REG_NONNEG note
3845 so that "decrement and branch until zero" insn can be used. */
3846 check_dbra_loop (loop_end, insn_count, loop_start);
3847
3848 #ifdef HAIFA
3849 /* record loop-variables relevant for BCT optimization before unrolling
3850 the loop. Unrolling may update part of this information, and the
3851 correct data will be used for generating the BCT. */
3852 #ifdef HAVE_decrement_and_branch_on_count
3853 if (HAVE_decrement_and_branch_on_count)
3854 analyze_loop_iterations (loop_start, loop_end);
3855 #endif
3856 #endif /* HAIFA */
3857
3858 /* Create reg_map to hold substitutions for replaceable giv regs. */
3859 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3860 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3861
3862 /* Examine each iv class for feasibility of strength reduction/induction
3863 variable elimination. */
3864
3865 for (bl = loop_iv_list; bl; bl = bl->next)
3866 {
3867 struct induction *v;
3868 int benefit;
3869 int all_reduced;
3870 rtx final_value = 0;
3871
3872 /* Test whether it will be possible to eliminate this biv
3873 provided all givs are reduced. This is possible if either
3874 the reg is not used outside the loop, or we can compute
3875 what its final value will be.
3876
3877 For architectures with a decrement_and_branch_until_zero insn,
3878 don't do this if we put a REG_NONNEG note on the endtest for
3879 this biv. */
3880
3881 /* Compare against bl->init_insn rather than loop_start.
3882 We aren't concerned with any uses of the biv between
3883 init_insn and loop_start since these won't be affected
3884 by the value of the biv elsewhere in the function, so
3885 long as init_insn doesn't use the biv itself.
3886 March 14, 1989 -- self@bayes.arc.nasa.gov */
3887
3888 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3889 && bl->init_insn
3890 && INSN_UID (bl->init_insn) < max_uid_for_loop
3891 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3892 #ifdef HAVE_decrement_and_branch_until_zero
3893 && ! bl->nonneg
3894 #endif
3895 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3896 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3897 #ifdef HAVE_decrement_and_branch_until_zero
3898 && ! bl->nonneg
3899 #endif
3900 ))
3901 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3902 threshold, insn_count);
3903 else
3904 {
3905 if (loop_dump_stream)
3906 {
3907 fprintf (loop_dump_stream,
3908 "Cannot eliminate biv %d.\n",
3909 bl->regno);
3910 fprintf (loop_dump_stream,
3911 "First use: insn %d, last use: insn %d.\n",
3912 REGNO_FIRST_UID (bl->regno),
3913 REGNO_LAST_UID (bl->regno));
3914 }
3915 }
3916
3917 /* Combine all giv's for this iv_class. */
3918 combine_givs (bl);
3919
3920 /* This will be true at the end, if all givs which depend on this
3921 biv have been strength reduced.
3922 We can't (currently) eliminate the biv unless this is so. */
3923 all_reduced = 1;
3924
3925 /* Check each giv in this class to see if we will benefit by reducing
3926 it. Skip giv's combined with others. */
3927 for (v = bl->giv; v; v = v->next_iv)
3928 {
3929 struct induction *tv;
3930
3931 if (v->ignore || v->same)
3932 continue;
3933
3934 benefit = v->benefit;
3935
3936 /* Reduce benefit if not replaceable, since we will insert
3937 a move-insn to replace the insn that calculates this giv.
3938 Don't do this unless the giv is a user variable, since it
3939 will often be marked non-replaceable because of the duplication
3940 of the exit code outside the loop. In such a case, the copies
3941 we insert are dead and will be deleted. So they don't have
3942 a cost. Similar situations exist. */
3943 /* ??? The new final_[bg]iv_value code does a much better job
3944 of finding replaceable giv's, and hence this code may no longer
3945 be necessary. */
3946 if (! v->replaceable && ! bl->eliminable
3947 && REG_USERVAR_P (v->dest_reg))
3948 benefit -= copy_cost;
3949
3950 /* Decrease the benefit to count the add-insns that we will
3951 insert to increment the reduced reg for the giv. */
3952 benefit -= add_cost * bl->biv_count;
3953
3954 /* Decide whether to strength-reduce this giv or to leave the code
3955 unchanged (recompute it from the biv each time it is used).
3956 This decision can be made independently for each giv. */
3957
3958 #ifdef AUTO_INC_DEC
3959 /* Attempt to guess whether autoincrement will handle some of the
3960 new add insns; if so, increase BENEFIT (undo the subtraction of
3961 add_cost that was done above). */
3962 if (v->giv_type == DEST_ADDR
3963 && GET_CODE (v->mult_val) == CONST_INT)
3964 {
3965 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
3966 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
3967 benefit += add_cost * bl->biv_count;
3968 #endif
3969 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
3970 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
3971 benefit += add_cost * bl->biv_count;
3972 #endif
3973 }
3974 #endif
3975
3976 /* If an insn is not to be strength reduced, then set its ignore
3977 flag, and clear all_reduced. */
3978
3979 /* A giv that depends on a reversed biv must be reduced if it is
3980 used after the loop exit, otherwise, it would have the wrong
3981 value after the loop exit. To make it simple, just reduce all
3982 of such giv's whether or not we know they are used after the loop
3983 exit. */
3984
3985 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
3986 && ! bl->reversed )
3987 {
3988 if (loop_dump_stream)
3989 fprintf (loop_dump_stream,
3990 "giv of insn %d not worth while, %d vs %d.\n",
3991 INSN_UID (v->insn),
3992 v->lifetime * threshold * benefit, insn_count);
3993 v->ignore = 1;
3994 all_reduced = 0;
3995 }
3996 else
3997 {
3998 /* Check that we can increment the reduced giv without a
3999 multiply insn. If not, reject it. */
4000
4001 for (tv = bl->biv; tv; tv = tv->next_iv)
4002 if (tv->mult_val == const1_rtx
4003 && ! product_cheap_p (tv->add_val, v->mult_val))
4004 {
4005 if (loop_dump_stream)
4006 fprintf (loop_dump_stream,
4007 "giv of insn %d: would need a multiply.\n",
4008 INSN_UID (v->insn));
4009 v->ignore = 1;
4010 all_reduced = 0;
4011 break;
4012 }
4013 }
4014 }
4015
4016 /* Reduce each giv that we decided to reduce. */
4017
4018 for (v = bl->giv; v; v = v->next_iv)
4019 {
4020 struct induction *tv;
4021 if (! v->ignore && v->same == 0)
4022 {
4023 int auto_inc_opt = 0;
4024
4025 v->new_reg = gen_reg_rtx (v->mode);
4026
4027 #ifdef AUTO_INC_DEC
4028 /* If the target has auto-increment addressing modes, and
4029 this is an address giv, then try to put the increment
4030 immediately after its use, so that flow can create an
4031 auto-increment addressing mode. */
4032 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4033 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4034 /* We don't handle reversed biv's because bl->biv->insn
4035 does not have a valid INSN_LUID. */
4036 && ! bl->reversed
4037 && v->always_executed && ! v->maybe_multiple)
4038 {
4039 /* If other giv's have been combined with this one, then
4040 this will work only if all uses of the other giv's occur
4041 before this giv's insn. This is difficult to check.
4042
4043 We simplify this by looking for the common case where
4044 there is one DEST_REG giv, and this giv's insn is the
4045 last use of the dest_reg of that DEST_REG giv. If the
4046 the increment occurs after the address giv, then we can
4047 perform the optimization. (Otherwise, the increment
4048 would have to go before other_giv, and we would not be
4049 able to combine it with the address giv to get an
4050 auto-inc address.) */
4051 if (v->combined_with)
4052 {
4053 struct induction *other_giv = 0;
4054
4055 for (tv = bl->giv; tv; tv = tv->next_iv)
4056 if (tv->same == v)
4057 {
4058 if (other_giv)
4059 break;
4060 else
4061 other_giv = tv;
4062 }
4063 if (! tv && other_giv
4064 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4065 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4066 == INSN_UID (v->insn))
4067 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4068 auto_inc_opt = 1;
4069 }
4070 /* Check for case where increment is before the the address
4071 giv. */
4072 else if (INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn))
4073 auto_inc_opt = -1;
4074 else
4075 auto_inc_opt = 1;
4076
4077 #ifdef HAVE_cc0
4078 {
4079 rtx prev;
4080
4081 /* We can't put an insn immediately after one setting
4082 cc0, or immediately before one using cc0. */
4083 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4084 || (auto_inc_opt == -1
4085 && (prev = prev_nonnote_insn (v->insn)) != 0
4086 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4087 && sets_cc0_p (PATTERN (prev))))
4088 auto_inc_opt = 0;
4089 }
4090 #endif
4091
4092 if (auto_inc_opt)
4093 v->auto_inc_opt = 1;
4094 }
4095 #endif
4096
4097 /* For each place where the biv is incremented, add an insn
4098 to increment the new, reduced reg for the giv. */
4099 for (tv = bl->biv; tv; tv = tv->next_iv)
4100 {
4101 rtx insert_before;
4102
4103 if (! auto_inc_opt)
4104 insert_before = tv->insn;
4105 else if (auto_inc_opt == 1)
4106 insert_before = NEXT_INSN (v->insn);
4107 else
4108 insert_before = v->insn;
4109
4110 if (tv->mult_val == const1_rtx)
4111 emit_iv_add_mult (tv->add_val, v->mult_val,
4112 v->new_reg, v->new_reg, insert_before);
4113 else /* tv->mult_val == const0_rtx */
4114 /* A multiply is acceptable here
4115 since this is presumed to be seldom executed. */
4116 emit_iv_add_mult (tv->add_val, v->mult_val,
4117 v->add_val, v->new_reg, insert_before);
4118 }
4119
4120 /* Add code at loop start to initialize giv's reduced reg. */
4121
4122 emit_iv_add_mult (bl->initial_value, v->mult_val,
4123 v->add_val, v->new_reg, loop_start);
4124 }
4125 }
4126
4127 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4128 as not reduced.
4129
4130 For each giv register that can be reduced now: if replaceable,
4131 substitute reduced reg wherever the old giv occurs;
4132 else add new move insn "giv_reg = reduced_reg".
4133
4134 Also check for givs whose first use is their definition and whose
4135 last use is the definition of another giv. If so, it is likely
4136 dead and should not be used to eliminate a biv. */
4137 for (v = bl->giv; v; v = v->next_iv)
4138 {
4139 if (v->same && v->same->ignore)
4140 v->ignore = 1;
4141
4142 if (v->ignore)
4143 continue;
4144
4145 if (v->giv_type == DEST_REG
4146 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4147 {
4148 struct induction *v1;
4149
4150 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4151 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4152 v->maybe_dead = 1;
4153 }
4154
4155 /* Update expression if this was combined, in case other giv was
4156 replaced. */
4157 if (v->same)
4158 v->new_reg = replace_rtx (v->new_reg,
4159 v->same->dest_reg, v->same->new_reg);
4160
4161 if (v->giv_type == DEST_ADDR)
4162 /* Store reduced reg as the address in the memref where we found
4163 this giv. */
4164 validate_change (v->insn, v->location, v->new_reg, 0);
4165 else if (v->replaceable)
4166 {
4167 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4168
4169 #if 0
4170 /* I can no longer duplicate the original problem. Perhaps
4171 this is unnecessary now? */
4172
4173 /* Replaceable; it isn't strictly necessary to delete the old
4174 insn and emit a new one, because v->dest_reg is now dead.
4175
4176 However, especially when unrolling loops, the special
4177 handling for (set REG0 REG1) in the second cse pass may
4178 make v->dest_reg live again. To avoid this problem, emit
4179 an insn to set the original giv reg from the reduced giv.
4180 We can not delete the original insn, since it may be part
4181 of a LIBCALL, and the code in flow that eliminates dead
4182 libcalls will fail if it is deleted. */
4183 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4184 v->insn);
4185 #endif
4186 }
4187 else
4188 {
4189 /* Not replaceable; emit an insn to set the original giv reg from
4190 the reduced giv, same as above. */
4191 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4192 v->insn);
4193 }
4194
4195 /* When a loop is reversed, givs which depend on the reversed
4196 biv, and which are live outside the loop, must be set to their
4197 correct final value. This insn is only needed if the giv is
4198 not replaceable. The correct final value is the same as the
4199 value that the giv starts the reversed loop with. */
4200 if (bl->reversed && ! v->replaceable)
4201 emit_iv_add_mult (bl->initial_value, v->mult_val,
4202 v->add_val, v->dest_reg, end_insert_before);
4203 else if (v->final_value)
4204 {
4205 rtx insert_before;
4206
4207 /* If the loop has multiple exits, emit the insn before the
4208 loop to ensure that it will always be executed no matter
4209 how the loop exits. Otherwise, emit the insn after the loop,
4210 since this is slightly more efficient. */
4211 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4212 insert_before = loop_start;
4213 else
4214 insert_before = end_insert_before;
4215 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4216 insert_before);
4217
4218 #if 0
4219 /* If the insn to set the final value of the giv was emitted
4220 before the loop, then we must delete the insn inside the loop
4221 that sets it. If this is a LIBCALL, then we must delete
4222 every insn in the libcall. Note, however, that
4223 final_giv_value will only succeed when there are multiple
4224 exits if the giv is dead at each exit, hence it does not
4225 matter that the original insn remains because it is dead
4226 anyways. */
4227 /* Delete the insn inside the loop that sets the giv since
4228 the giv is now set before (or after) the loop. */
4229 delete_insn (v->insn);
4230 #endif
4231 }
4232
4233 if (loop_dump_stream)
4234 {
4235 fprintf (loop_dump_stream, "giv at %d reduced to ",
4236 INSN_UID (v->insn));
4237 print_rtl (loop_dump_stream, v->new_reg);
4238 fprintf (loop_dump_stream, "\n");
4239 }
4240 }
4241
4242 /* All the givs based on the biv bl have been reduced if they
4243 merit it. */
4244
4245 /* For each giv not marked as maybe dead that has been combined with a
4246 second giv, clear any "maybe dead" mark on that second giv.
4247 v->new_reg will either be or refer to the register of the giv it
4248 combined with.
4249
4250 Doing this clearing avoids problems in biv elimination where a
4251 giv's new_reg is a complex value that can't be put in the insn but
4252 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4253 Since the register will be used in either case, we'd prefer it be
4254 used from the simpler giv. */
4255
4256 for (v = bl->giv; v; v = v->next_iv)
4257 if (! v->maybe_dead && v->same)
4258 v->same->maybe_dead = 0;
4259
4260 /* Try to eliminate the biv, if it is a candidate.
4261 This won't work if ! all_reduced,
4262 since the givs we planned to use might not have been reduced.
4263
4264 We have to be careful that we didn't initially think we could eliminate
4265 this biv because of a giv that we now think may be dead and shouldn't
4266 be used as a biv replacement.
4267
4268 Also, there is the possibility that we may have a giv that looks
4269 like it can be used to eliminate a biv, but the resulting insn
4270 isn't valid. This can happen, for example, on the 88k, where a
4271 JUMP_INSN can compare a register only with zero. Attempts to
4272 replace it with a compare with a constant will fail.
4273
4274 Note that in cases where this call fails, we may have replaced some
4275 of the occurrences of the biv with a giv, but no harm was done in
4276 doing so in the rare cases where it can occur. */
4277
4278 if (all_reduced == 1 && bl->eliminable
4279 && maybe_eliminate_biv (bl, loop_start, end, 1,
4280 threshold, insn_count))
4281
4282 {
4283 /* ?? If we created a new test to bypass the loop entirely,
4284 or otherwise drop straight in, based on this test, then
4285 we might want to rewrite it also. This way some later
4286 pass has more hope of removing the initialization of this
4287 biv entirely. */
4288
4289 /* If final_value != 0, then the biv may be used after loop end
4290 and we must emit an insn to set it just in case.
4291
4292 Reversed bivs already have an insn after the loop setting their
4293 value, so we don't need another one. We can't calculate the
4294 proper final value for such a biv here anyways. */
4295 if (final_value != 0 && ! bl->reversed)
4296 {
4297 rtx insert_before;
4298
4299 /* If the loop has multiple exits, emit the insn before the
4300 loop to ensure that it will always be executed no matter
4301 how the loop exits. Otherwise, emit the insn after the
4302 loop, since this is slightly more efficient. */
4303 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4304 insert_before = loop_start;
4305 else
4306 insert_before = end_insert_before;
4307
4308 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4309 end_insert_before);
4310 }
4311
4312 #if 0
4313 /* Delete all of the instructions inside the loop which set
4314 the biv, as they are all dead. If is safe to delete them,
4315 because an insn setting a biv will never be part of a libcall. */
4316 /* However, deleting them will invalidate the regno_last_uid info,
4317 so keeping them around is more convenient. Final_biv_value
4318 will only succeed when there are multiple exits if the biv
4319 is dead at each exit, hence it does not matter that the original
4320 insn remains, because it is dead anyways. */
4321 for (v = bl->biv; v; v = v->next_iv)
4322 delete_insn (v->insn);
4323 #endif
4324
4325 if (loop_dump_stream)
4326 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4327 bl->regno);
4328 }
4329 }
4330
4331 /* Go through all the instructions in the loop, making all the
4332 register substitutions scheduled in REG_MAP. */
4333
4334 for (p = loop_start; p != end; p = NEXT_INSN (p))
4335 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4336 || GET_CODE (p) == CALL_INSN)
4337 {
4338 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4339 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4340 INSN_CODE (p) = -1;
4341 }
4342
4343 /* Unroll loops from within strength reduction so that we can use the
4344 induction variable information that strength_reduce has already
4345 collected. */
4346
4347 if (flag_unroll_loops)
4348 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4349
4350 #ifdef HAIFA
4351 /* instrument the loop with bct insn */
4352 #ifdef HAVE_decrement_and_branch_on_count
4353 if (HAVE_decrement_and_branch_on_count)
4354 insert_bct (loop_start, loop_end);
4355 #endif
4356 #endif /* HAIFA */
4357
4358 if (loop_dump_stream)
4359 fprintf (loop_dump_stream, "\n");
4360 }
4361 \f
4362 /* Return 1 if X is a valid source for an initial value (or as value being
4363 compared against in an initial test).
4364
4365 X must be either a register or constant and must not be clobbered between
4366 the current insn and the start of the loop.
4367
4368 INSN is the insn containing X. */
4369
4370 static int
4371 valid_initial_value_p (x, insn, call_seen, loop_start)
4372 rtx x;
4373 rtx insn;
4374 int call_seen;
4375 rtx loop_start;
4376 {
4377 if (CONSTANT_P (x))
4378 return 1;
4379
4380 /* Only consider pseudos we know about initialized in insns whose luids
4381 we know. */
4382 if (GET_CODE (x) != REG
4383 || REGNO (x) >= max_reg_before_loop)
4384 return 0;
4385
4386 /* Don't use call-clobbered registers across a call which clobbers it. On
4387 some machines, don't use any hard registers at all. */
4388 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4389 && (
4390 #ifdef SMALL_REGISTER_CLASSES
4391 SMALL_REGISTER_CLASSES
4392 #else
4393 0
4394 #endif
4395 || (call_used_regs[REGNO (x)] && call_seen))
4396 )
4397 return 0;
4398
4399 /* Don't use registers that have been clobbered before the start of the
4400 loop. */
4401 if (reg_set_between_p (x, insn, loop_start))
4402 return 0;
4403
4404 return 1;
4405 }
4406 \f
4407 /* Scan X for memory refs and check each memory address
4408 as a possible giv. INSN is the insn whose pattern X comes from.
4409 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4410 every loop iteration. */
4411
4412 static void
4413 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4414 rtx x;
4415 rtx insn;
4416 int not_every_iteration;
4417 rtx loop_start, loop_end;
4418 {
4419 register int i, j;
4420 register enum rtx_code code;
4421 register char *fmt;
4422
4423 if (x == 0)
4424 return;
4425
4426 code = GET_CODE (x);
4427 switch (code)
4428 {
4429 case REG:
4430 case CONST_INT:
4431 case CONST:
4432 case CONST_DOUBLE:
4433 case SYMBOL_REF:
4434 case LABEL_REF:
4435 case PC:
4436 case CC0:
4437 case ADDR_VEC:
4438 case ADDR_DIFF_VEC:
4439 case USE:
4440 case CLOBBER:
4441 return;
4442
4443 case MEM:
4444 {
4445 rtx src_reg;
4446 rtx add_val;
4447 rtx mult_val;
4448 int benefit;
4449
4450 benefit = general_induction_var (XEXP (x, 0),
4451 &src_reg, &add_val, &mult_val);
4452
4453 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4454 Such a giv isn't useful. */
4455 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4456 {
4457 /* Found one; record it. */
4458 struct induction *v
4459 = (struct induction *) oballoc (sizeof (struct induction));
4460
4461 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4462 add_val, benefit, DEST_ADDR, not_every_iteration,
4463 &XEXP (x, 0), loop_start, loop_end);
4464
4465 v->mem_mode = GET_MODE (x);
4466 }
4467 return;
4468 }
4469 }
4470
4471 /* Recursively scan the subexpressions for other mem refs. */
4472
4473 fmt = GET_RTX_FORMAT (code);
4474 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4475 if (fmt[i] == 'e')
4476 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4477 loop_end);
4478 else if (fmt[i] == 'E')
4479 for (j = 0; j < XVECLEN (x, i); j++)
4480 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4481 loop_start, loop_end);
4482 }
4483 \f
4484 /* Fill in the data about one biv update.
4485 V is the `struct induction' in which we record the biv. (It is
4486 allocated by the caller, with alloca.)
4487 INSN is the insn that sets it.
4488 DEST_REG is the biv's reg.
4489
4490 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4491 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4492 being set to INC_VAL.
4493
4494 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4495 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4496 can be executed more than once per iteration. If MAYBE_MULTIPLE
4497 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4498 executed exactly once per iteration. */
4499
4500 static void
4501 record_biv (v, insn, dest_reg, inc_val, mult_val,
4502 not_every_iteration, maybe_multiple)
4503 struct induction *v;
4504 rtx insn;
4505 rtx dest_reg;
4506 rtx inc_val;
4507 rtx mult_val;
4508 int not_every_iteration;
4509 int maybe_multiple;
4510 {
4511 struct iv_class *bl;
4512
4513 v->insn = insn;
4514 v->src_reg = dest_reg;
4515 v->dest_reg = dest_reg;
4516 v->mult_val = mult_val;
4517 v->add_val = inc_val;
4518 v->mode = GET_MODE (dest_reg);
4519 v->always_computable = ! not_every_iteration;
4520 v->always_executed = ! not_every_iteration;
4521 v->maybe_multiple = maybe_multiple;
4522
4523 /* Add this to the reg's iv_class, creating a class
4524 if this is the first incrementation of the reg. */
4525
4526 bl = reg_biv_class[REGNO (dest_reg)];
4527 if (bl == 0)
4528 {
4529 /* Create and initialize new iv_class. */
4530
4531 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4532
4533 bl->regno = REGNO (dest_reg);
4534 bl->biv = 0;
4535 bl->giv = 0;
4536 bl->biv_count = 0;
4537 bl->giv_count = 0;
4538
4539 /* Set initial value to the reg itself. */
4540 bl->initial_value = dest_reg;
4541 /* We haven't seen the initializing insn yet */
4542 bl->init_insn = 0;
4543 bl->init_set = 0;
4544 bl->initial_test = 0;
4545 bl->incremented = 0;
4546 bl->eliminable = 0;
4547 bl->nonneg = 0;
4548 bl->reversed = 0;
4549 bl->total_benefit = 0;
4550
4551 /* Add this class to loop_iv_list. */
4552 bl->next = loop_iv_list;
4553 loop_iv_list = bl;
4554
4555 /* Put it in the array of biv register classes. */
4556 reg_biv_class[REGNO (dest_reg)] = bl;
4557 }
4558
4559 /* Update IV_CLASS entry for this biv. */
4560 v->next_iv = bl->biv;
4561 bl->biv = v;
4562 bl->biv_count++;
4563 if (mult_val == const1_rtx)
4564 bl->incremented = 1;
4565
4566 if (loop_dump_stream)
4567 {
4568 fprintf (loop_dump_stream,
4569 "Insn %d: possible biv, reg %d,",
4570 INSN_UID (insn), REGNO (dest_reg));
4571 if (GET_CODE (inc_val) == CONST_INT)
4572 fprintf (loop_dump_stream, " const = %d\n",
4573 INTVAL (inc_val));
4574 else
4575 {
4576 fprintf (loop_dump_stream, " const = ");
4577 print_rtl (loop_dump_stream, inc_val);
4578 fprintf (loop_dump_stream, "\n");
4579 }
4580 }
4581 }
4582 \f
4583 /* Fill in the data about one giv.
4584 V is the `struct induction' in which we record the giv. (It is
4585 allocated by the caller, with alloca.)
4586 INSN is the insn that sets it.
4587 BENEFIT estimates the savings from deleting this insn.
4588 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4589 into a register or is used as a memory address.
4590
4591 SRC_REG is the biv reg which the giv is computed from.
4592 DEST_REG is the giv's reg (if the giv is stored in a reg).
4593 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4594 LOCATION points to the place where this giv's value appears in INSN. */
4595
4596 static void
4597 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4598 type, not_every_iteration, location, loop_start, loop_end)
4599 struct induction *v;
4600 rtx insn;
4601 rtx src_reg;
4602 rtx dest_reg;
4603 rtx mult_val, add_val;
4604 int benefit;
4605 enum g_types type;
4606 int not_every_iteration;
4607 rtx *location;
4608 rtx loop_start, loop_end;
4609 {
4610 struct induction *b;
4611 struct iv_class *bl;
4612 rtx set = single_set (insn);
4613 rtx p;
4614
4615 v->insn = insn;
4616 v->src_reg = src_reg;
4617 v->giv_type = type;
4618 v->dest_reg = dest_reg;
4619 v->mult_val = mult_val;
4620 v->add_val = add_val;
4621 v->benefit = benefit;
4622 v->location = location;
4623 v->cant_derive = 0;
4624 v->combined_with = 0;
4625 v->maybe_multiple = 0;
4626 v->maybe_dead = 0;
4627 v->derive_adjustment = 0;
4628 v->same = 0;
4629 v->ignore = 0;
4630 v->new_reg = 0;
4631 v->final_value = 0;
4632 v->same_insn = 0;
4633 v->auto_inc_opt = 0;
4634 v->unrolled = 0;
4635 v->shared = 0;
4636
4637 /* The v->always_computable field is used in update_giv_derive, to
4638 determine whether a giv can be used to derive another giv. For a
4639 DEST_REG giv, INSN computes a new value for the giv, so its value
4640 isn't computable if INSN insn't executed every iteration.
4641 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4642 it does not compute a new value. Hence the value is always computable
4643 regardless of whether INSN is executed each iteration. */
4644
4645 if (type == DEST_ADDR)
4646 v->always_computable = 1;
4647 else
4648 v->always_computable = ! not_every_iteration;
4649
4650 v->always_executed = ! not_every_iteration;
4651
4652 if (type == DEST_ADDR)
4653 {
4654 v->mode = GET_MODE (*location);
4655 v->lifetime = 1;
4656 v->times_used = 1;
4657 }
4658 else /* type == DEST_REG */
4659 {
4660 v->mode = GET_MODE (SET_DEST (set));
4661
4662 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4663 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4664
4665 v->times_used = n_times_used[REGNO (dest_reg)];
4666
4667 /* If the lifetime is zero, it means that this register is
4668 really a dead store. So mark this as a giv that can be
4669 ignored. This will not prevent the biv from being eliminated. */
4670 if (v->lifetime == 0)
4671 v->ignore = 1;
4672
4673 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4674 reg_iv_info[REGNO (dest_reg)] = v;
4675 }
4676
4677 /* Add the giv to the class of givs computed from one biv. */
4678
4679 bl = reg_biv_class[REGNO (src_reg)];
4680 if (bl)
4681 {
4682 v->next_iv = bl->giv;
4683 bl->giv = v;
4684 /* Don't count DEST_ADDR. This is supposed to count the number of
4685 insns that calculate givs. */
4686 if (type == DEST_REG)
4687 bl->giv_count++;
4688 bl->total_benefit += benefit;
4689 }
4690 else
4691 /* Fatal error, biv missing for this giv? */
4692 abort ();
4693
4694 if (type == DEST_ADDR)
4695 v->replaceable = 1;
4696 else
4697 {
4698 /* The giv can be replaced outright by the reduced register only if all
4699 of the following conditions are true:
4700 - the insn that sets the giv is always executed on any iteration
4701 on which the giv is used at all
4702 (there are two ways to deduce this:
4703 either the insn is executed on every iteration,
4704 or all uses follow that insn in the same basic block),
4705 - the giv is not used outside the loop
4706 - no assignments to the biv occur during the giv's lifetime. */
4707
4708 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4709 /* Previous line always fails if INSN was moved by loop opt. */
4710 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4711 && (! not_every_iteration
4712 || last_use_this_basic_block (dest_reg, insn)))
4713 {
4714 /* Now check that there are no assignments to the biv within the
4715 giv's lifetime. This requires two separate checks. */
4716
4717 /* Check each biv update, and fail if any are between the first
4718 and last use of the giv.
4719
4720 If this loop contains an inner loop that was unrolled, then
4721 the insn modifying the biv may have been emitted by the loop
4722 unrolling code, and hence does not have a valid luid. Just
4723 mark the biv as not replaceable in this case. It is not very
4724 useful as a biv, because it is used in two different loops.
4725 It is very unlikely that we would be able to optimize the giv
4726 using this biv anyways. */
4727
4728 v->replaceable = 1;
4729 for (b = bl->biv; b; b = b->next_iv)
4730 {
4731 if (INSN_UID (b->insn) >= max_uid_for_loop
4732 || ((uid_luid[INSN_UID (b->insn)]
4733 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4734 && (uid_luid[INSN_UID (b->insn)]
4735 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4736 {
4737 v->replaceable = 0;
4738 v->not_replaceable = 1;
4739 break;
4740 }
4741 }
4742
4743 /* If there are any backwards branches that go from after the
4744 biv update to before it, then this giv is not replaceable. */
4745 if (v->replaceable)
4746 for (b = bl->biv; b; b = b->next_iv)
4747 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4748 {
4749 v->replaceable = 0;
4750 v->not_replaceable = 1;
4751 break;
4752 }
4753 }
4754 else
4755 {
4756 /* May still be replaceable, we don't have enough info here to
4757 decide. */
4758 v->replaceable = 0;
4759 v->not_replaceable = 0;
4760 }
4761 }
4762
4763 if (loop_dump_stream)
4764 {
4765 if (type == DEST_REG)
4766 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4767 INSN_UID (insn), REGNO (dest_reg));
4768 else
4769 fprintf (loop_dump_stream, "Insn %d: dest address",
4770 INSN_UID (insn));
4771
4772 fprintf (loop_dump_stream, " src reg %d benefit %d",
4773 REGNO (src_reg), v->benefit);
4774 fprintf (loop_dump_stream, " used %d lifetime %d",
4775 v->times_used, v->lifetime);
4776
4777 if (v->replaceable)
4778 fprintf (loop_dump_stream, " replaceable");
4779
4780 if (GET_CODE (mult_val) == CONST_INT)
4781 fprintf (loop_dump_stream, " mult %d",
4782 INTVAL (mult_val));
4783 else
4784 {
4785 fprintf (loop_dump_stream, " mult ");
4786 print_rtl (loop_dump_stream, mult_val);
4787 }
4788
4789 if (GET_CODE (add_val) == CONST_INT)
4790 fprintf (loop_dump_stream, " add %d",
4791 INTVAL (add_val));
4792 else
4793 {
4794 fprintf (loop_dump_stream, " add ");
4795 print_rtl (loop_dump_stream, add_val);
4796 }
4797 }
4798
4799 if (loop_dump_stream)
4800 fprintf (loop_dump_stream, "\n");
4801
4802 }
4803
4804
4805 /* All this does is determine whether a giv can be made replaceable because
4806 its final value can be calculated. This code can not be part of record_giv
4807 above, because final_giv_value requires that the number of loop iterations
4808 be known, and that can not be accurately calculated until after all givs
4809 have been identified. */
4810
4811 static void
4812 check_final_value (v, loop_start, loop_end)
4813 struct induction *v;
4814 rtx loop_start, loop_end;
4815 {
4816 struct iv_class *bl;
4817 rtx final_value = 0;
4818
4819 bl = reg_biv_class[REGNO (v->src_reg)];
4820
4821 /* DEST_ADDR givs will never reach here, because they are always marked
4822 replaceable above in record_giv. */
4823
4824 /* The giv can be replaced outright by the reduced register only if all
4825 of the following conditions are true:
4826 - the insn that sets the giv is always executed on any iteration
4827 on which the giv is used at all
4828 (there are two ways to deduce this:
4829 either the insn is executed on every iteration,
4830 or all uses follow that insn in the same basic block),
4831 - its final value can be calculated (this condition is different
4832 than the one above in record_giv)
4833 - no assignments to the biv occur during the giv's lifetime. */
4834
4835 #if 0
4836 /* This is only called now when replaceable is known to be false. */
4837 /* Clear replaceable, so that it won't confuse final_giv_value. */
4838 v->replaceable = 0;
4839 #endif
4840
4841 if ((final_value = final_giv_value (v, loop_start, loop_end))
4842 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4843 {
4844 int biv_increment_seen = 0;
4845 rtx p = v->insn;
4846 rtx last_giv_use;
4847
4848 v->replaceable = 1;
4849
4850 /* When trying to determine whether or not a biv increment occurs
4851 during the lifetime of the giv, we can ignore uses of the variable
4852 outside the loop because final_value is true. Hence we can not
4853 use regno_last_uid and regno_first_uid as above in record_giv. */
4854
4855 /* Search the loop to determine whether any assignments to the
4856 biv occur during the giv's lifetime. Start with the insn
4857 that sets the giv, and search around the loop until we come
4858 back to that insn again.
4859
4860 Also fail if there is a jump within the giv's lifetime that jumps
4861 to somewhere outside the lifetime but still within the loop. This
4862 catches spaghetti code where the execution order is not linear, and
4863 hence the above test fails. Here we assume that the giv lifetime
4864 does not extend from one iteration of the loop to the next, so as
4865 to make the test easier. Since the lifetime isn't known yet,
4866 this requires two loops. See also record_giv above. */
4867
4868 last_giv_use = v->insn;
4869
4870 while (1)
4871 {
4872 p = NEXT_INSN (p);
4873 if (p == loop_end)
4874 p = NEXT_INSN (loop_start);
4875 if (p == v->insn)
4876 break;
4877
4878 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4879 || GET_CODE (p) == CALL_INSN)
4880 {
4881 if (biv_increment_seen)
4882 {
4883 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4884 {
4885 v->replaceable = 0;
4886 v->not_replaceable = 1;
4887 break;
4888 }
4889 }
4890 else if (reg_set_p (v->src_reg, PATTERN (p)))
4891 biv_increment_seen = 1;
4892 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4893 last_giv_use = p;
4894 }
4895 }
4896
4897 /* Now that the lifetime of the giv is known, check for branches
4898 from within the lifetime to outside the lifetime if it is still
4899 replaceable. */
4900
4901 if (v->replaceable)
4902 {
4903 p = v->insn;
4904 while (1)
4905 {
4906 p = NEXT_INSN (p);
4907 if (p == loop_end)
4908 p = NEXT_INSN (loop_start);
4909 if (p == last_giv_use)
4910 break;
4911
4912 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4913 && LABEL_NAME (JUMP_LABEL (p))
4914 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
4915 || (INSN_UID (v->insn) >= max_uid_for_loop)
4916 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
4917 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
4918 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
4919 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
4920 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
4921 {
4922 v->replaceable = 0;
4923 v->not_replaceable = 1;
4924
4925 if (loop_dump_stream)
4926 fprintf (loop_dump_stream,
4927 "Found branch outside giv lifetime.\n");
4928
4929 break;
4930 }
4931 }
4932 }
4933
4934 /* If it is replaceable, then save the final value. */
4935 if (v->replaceable)
4936 v->final_value = final_value;
4937 }
4938
4939 if (loop_dump_stream && v->replaceable)
4940 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
4941 INSN_UID (v->insn), REGNO (v->dest_reg));
4942 }
4943 \f
4944 /* Update the status of whether a giv can derive other givs.
4945
4946 We need to do something special if there is or may be an update to the biv
4947 between the time the giv is defined and the time it is used to derive
4948 another giv.
4949
4950 In addition, a giv that is only conditionally set is not allowed to
4951 derive another giv once a label has been passed.
4952
4953 The cases we look at are when a label or an update to a biv is passed. */
4954
4955 static void
4956 update_giv_derive (p)
4957 rtx p;
4958 {
4959 struct iv_class *bl;
4960 struct induction *biv, *giv;
4961 rtx tem;
4962 int dummy;
4963
4964 /* Search all IV classes, then all bivs, and finally all givs.
4965
4966 There are three cases we are concerned with. First we have the situation
4967 of a giv that is only updated conditionally. In that case, it may not
4968 derive any givs after a label is passed.
4969
4970 The second case is when a biv update occurs, or may occur, after the
4971 definition of a giv. For certain biv updates (see below) that are
4972 known to occur between the giv definition and use, we can adjust the
4973 giv definition. For others, or when the biv update is conditional,
4974 we must prevent the giv from deriving any other givs. There are two
4975 sub-cases within this case.
4976
4977 If this is a label, we are concerned with any biv update that is done
4978 conditionally, since it may be done after the giv is defined followed by
4979 a branch here (actually, we need to pass both a jump and a label, but
4980 this extra tracking doesn't seem worth it).
4981
4982 If this is a jump, we are concerned about any biv update that may be
4983 executed multiple times. We are actually only concerned about
4984 backward jumps, but it is probably not worth performing the test
4985 on the jump again here.
4986
4987 If this is a biv update, we must adjust the giv status to show that a
4988 subsequent biv update was performed. If this adjustment cannot be done,
4989 the giv cannot derive further givs. */
4990
4991 for (bl = loop_iv_list; bl; bl = bl->next)
4992 for (biv = bl->biv; biv; biv = biv->next_iv)
4993 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
4994 || biv->insn == p)
4995 {
4996 for (giv = bl->giv; giv; giv = giv->next_iv)
4997 {
4998 /* If cant_derive is already true, there is no point in
4999 checking all of these conditions again. */
5000 if (giv->cant_derive)
5001 continue;
5002
5003 /* If this giv is conditionally set and we have passed a label,
5004 it cannot derive anything. */
5005 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5006 giv->cant_derive = 1;
5007
5008 /* Skip givs that have mult_val == 0, since
5009 they are really invariants. Also skip those that are
5010 replaceable, since we know their lifetime doesn't contain
5011 any biv update. */
5012 else if (giv->mult_val == const0_rtx || giv->replaceable)
5013 continue;
5014
5015 /* The only way we can allow this giv to derive another
5016 is if this is a biv increment and we can form the product
5017 of biv->add_val and giv->mult_val. In this case, we will
5018 be able to compute a compensation. */
5019 else if (biv->insn == p)
5020 {
5021 tem = 0;
5022
5023 if (biv->mult_val == const1_rtx)
5024 tem = simplify_giv_expr (gen_rtx (MULT, giv->mode,
5025 biv->add_val,
5026 giv->mult_val),
5027 &dummy);
5028
5029 if (tem && giv->derive_adjustment)
5030 tem = simplify_giv_expr (gen_rtx (PLUS, giv->mode, tem,
5031 giv->derive_adjustment),
5032 &dummy);
5033 if (tem)
5034 giv->derive_adjustment = tem;
5035 else
5036 giv->cant_derive = 1;
5037 }
5038 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5039 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5040 giv->cant_derive = 1;
5041 }
5042 }
5043 }
5044 \f
5045 /* Check whether an insn is an increment legitimate for a basic induction var.
5046 X is the source of insn P, or a part of it.
5047 MODE is the mode in which X should be interpreted.
5048
5049 DEST_REG is the putative biv, also the destination of the insn.
5050 We accept patterns of these forms:
5051 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5052 REG = INVARIANT + REG
5053
5054 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5055 and store the additive term into *INC_VAL.
5056
5057 If X is an assignment of an invariant into DEST_REG, we set
5058 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5059
5060 We also want to detect a BIV when it corresponds to a variable
5061 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5062 of the variable may be a PLUS that adds a SUBREG of that variable to
5063 an invariant and then sign- or zero-extends the result of the PLUS
5064 into the variable.
5065
5066 Most GIVs in such cases will be in the promoted mode, since that is the
5067 probably the natural computation mode (and almost certainly the mode
5068 used for addresses) on the machine. So we view the pseudo-reg containing
5069 the variable as the BIV, as if it were simply incremented.
5070
5071 Note that treating the entire pseudo as a BIV will result in making
5072 simple increments to any GIVs based on it. However, if the variable
5073 overflows in its declared mode but not its promoted mode, the result will
5074 be incorrect. This is acceptable if the variable is signed, since
5075 overflows in such cases are undefined, but not if it is unsigned, since
5076 those overflows are defined. So we only check for SIGN_EXTEND and
5077 not ZERO_EXTEND.
5078
5079 If we cannot find a biv, we return 0. */
5080
5081 static int
5082 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5083 register rtx x;
5084 enum machine_mode mode;
5085 rtx p;
5086 rtx dest_reg;
5087 rtx *inc_val;
5088 rtx *mult_val;
5089 {
5090 register enum rtx_code code;
5091 rtx arg;
5092 rtx insn, set = 0;
5093
5094 code = GET_CODE (x);
5095 switch (code)
5096 {
5097 case PLUS:
5098 if (XEXP (x, 0) == dest_reg
5099 || (GET_CODE (XEXP (x, 0)) == SUBREG
5100 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5101 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5102 arg = XEXP (x, 1);
5103 else if (XEXP (x, 1) == dest_reg
5104 || (GET_CODE (XEXP (x, 1)) == SUBREG
5105 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5106 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5107 arg = XEXP (x, 0);
5108 else
5109 return 0;
5110
5111 if (invariant_p (arg) != 1)
5112 return 0;
5113
5114 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5115 *mult_val = const1_rtx;
5116 return 1;
5117
5118 case SUBREG:
5119 /* If this is a SUBREG for a promoted variable, check the inner
5120 value. */
5121 if (SUBREG_PROMOTED_VAR_P (x))
5122 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5123 dest_reg, p, inc_val, mult_val);
5124 return 0;
5125
5126 case REG:
5127 /* If this register is assigned in the previous insn, look at its
5128 source, but don't go outside the loop or past a label. */
5129
5130 for (insn = PREV_INSN (p);
5131 (insn && GET_CODE (insn) == NOTE
5132 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5133 insn = PREV_INSN (insn))
5134 ;
5135
5136 if (insn)
5137 set = single_set (insn);
5138
5139 if (set != 0
5140 && (SET_DEST (set) == x
5141 || (GET_CODE (SET_DEST (set)) == SUBREG
5142 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5143 <= UNITS_PER_WORD)
5144 && SUBREG_REG (SET_DEST (set)) == x)))
5145 return basic_induction_var (SET_SRC (set),
5146 (GET_MODE (SET_SRC (set)) == VOIDmode
5147 ? GET_MODE (x)
5148 : GET_MODE (SET_SRC (set))),
5149 dest_reg, insn,
5150 inc_val, mult_val);
5151 /* ... fall through ... */
5152
5153 /* Can accept constant setting of biv only when inside inner most loop.
5154 Otherwise, a biv of an inner loop may be incorrectly recognized
5155 as a biv of the outer loop,
5156 causing code to be moved INTO the inner loop. */
5157 case MEM:
5158 if (invariant_p (x) != 1)
5159 return 0;
5160 case CONST_INT:
5161 case SYMBOL_REF:
5162 case CONST:
5163 if (loops_enclosed == 1)
5164 {
5165 /* Possible bug here? Perhaps we don't know the mode of X. */
5166 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5167 *mult_val = const0_rtx;
5168 return 1;
5169 }
5170 else
5171 return 0;
5172
5173 case SIGN_EXTEND:
5174 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5175 dest_reg, p, inc_val, mult_val);
5176 case ASHIFTRT:
5177 /* Similar, since this can be a sign extension. */
5178 for (insn = PREV_INSN (p);
5179 (insn && GET_CODE (insn) == NOTE
5180 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5181 insn = PREV_INSN (insn))
5182 ;
5183
5184 if (insn)
5185 set = single_set (insn);
5186
5187 if (set && SET_DEST (set) == XEXP (x, 0)
5188 && GET_CODE (XEXP (x, 1)) == CONST_INT
5189 && INTVAL (XEXP (x, 1)) >= 0
5190 && GET_CODE (SET_SRC (set)) == ASHIFT
5191 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5192 return basic_induction_var (XEXP (SET_SRC (set), 0),
5193 GET_MODE (XEXP (x, 0)),
5194 dest_reg, insn, inc_val, mult_val);
5195 return 0;
5196
5197 default:
5198 return 0;
5199 }
5200 }
5201 \f
5202 /* A general induction variable (giv) is any quantity that is a linear
5203 function of a basic induction variable,
5204 i.e. giv = biv * mult_val + add_val.
5205 The coefficients can be any loop invariant quantity.
5206 A giv need not be computed directly from the biv;
5207 it can be computed by way of other givs. */
5208
5209 /* Determine whether X computes a giv.
5210 If it does, return a nonzero value
5211 which is the benefit from eliminating the computation of X;
5212 set *SRC_REG to the register of the biv that it is computed from;
5213 set *ADD_VAL and *MULT_VAL to the coefficients,
5214 such that the value of X is biv * mult + add; */
5215
5216 static int
5217 general_induction_var (x, src_reg, add_val, mult_val)
5218 rtx x;
5219 rtx *src_reg;
5220 rtx *add_val;
5221 rtx *mult_val;
5222 {
5223 rtx orig_x = x;
5224 int benefit = 0;
5225 char *storage;
5226
5227 /* If this is an invariant, forget it, it isn't a giv. */
5228 if (invariant_p (x) == 1)
5229 return 0;
5230
5231 /* See if the expression could be a giv and get its form.
5232 Mark our place on the obstack in case we don't find a giv. */
5233 storage = (char *) oballoc (0);
5234 x = simplify_giv_expr (x, &benefit);
5235 if (x == 0)
5236 {
5237 obfree (storage);
5238 return 0;
5239 }
5240
5241 switch (GET_CODE (x))
5242 {
5243 case USE:
5244 case CONST_INT:
5245 /* Since this is now an invariant and wasn't before, it must be a giv
5246 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5247 with. */
5248 *src_reg = loop_iv_list->biv->dest_reg;
5249 *mult_val = const0_rtx;
5250 *add_val = x;
5251 break;
5252
5253 case REG:
5254 /* This is equivalent to a BIV. */
5255 *src_reg = x;
5256 *mult_val = const1_rtx;
5257 *add_val = const0_rtx;
5258 break;
5259
5260 case PLUS:
5261 /* Either (plus (biv) (invar)) or
5262 (plus (mult (biv) (invar_1)) (invar_2)). */
5263 if (GET_CODE (XEXP (x, 0)) == MULT)
5264 {
5265 *src_reg = XEXP (XEXP (x, 0), 0);
5266 *mult_val = XEXP (XEXP (x, 0), 1);
5267 }
5268 else
5269 {
5270 *src_reg = XEXP (x, 0);
5271 *mult_val = const1_rtx;
5272 }
5273 *add_val = XEXP (x, 1);
5274 break;
5275
5276 case MULT:
5277 /* ADD_VAL is zero. */
5278 *src_reg = XEXP (x, 0);
5279 *mult_val = XEXP (x, 1);
5280 *add_val = const0_rtx;
5281 break;
5282
5283 default:
5284 abort ();
5285 }
5286
5287 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5288 unless they are CONST_INT). */
5289 if (GET_CODE (*add_val) == USE)
5290 *add_val = XEXP (*add_val, 0);
5291 if (GET_CODE (*mult_val) == USE)
5292 *mult_val = XEXP (*mult_val, 0);
5293
5294 benefit += rtx_cost (orig_x, SET);
5295
5296 /* Always return some benefit if this is a giv so it will be detected
5297 as such. This allows elimination of bivs that might otherwise
5298 not be eliminated. */
5299 return benefit == 0 ? 1 : benefit;
5300 }
5301 \f
5302 /* Given an expression, X, try to form it as a linear function of a biv.
5303 We will canonicalize it to be of the form
5304 (plus (mult (BIV) (invar_1))
5305 (invar_2))
5306 with possible degeneracies.
5307
5308 The invariant expressions must each be of a form that can be used as a
5309 machine operand. We surround then with a USE rtx (a hack, but localized
5310 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5311 routine; it is the caller's responsibility to strip them.
5312
5313 If no such canonicalization is possible (i.e., two biv's are used or an
5314 expression that is neither invariant nor a biv or giv), this routine
5315 returns 0.
5316
5317 For a non-zero return, the result will have a code of CONST_INT, USE,
5318 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5319
5320 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5321
5322 static rtx
5323 simplify_giv_expr (x, benefit)
5324 rtx x;
5325 int *benefit;
5326 {
5327 enum machine_mode mode = GET_MODE (x);
5328 rtx arg0, arg1;
5329 rtx tem;
5330
5331 /* If this is not an integer mode, or if we cannot do arithmetic in this
5332 mode, this can't be a giv. */
5333 if (mode != VOIDmode
5334 && (GET_MODE_CLASS (mode) != MODE_INT
5335 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5336 return 0;
5337
5338 switch (GET_CODE (x))
5339 {
5340 case PLUS:
5341 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5342 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5343 if (arg0 == 0 || arg1 == 0)
5344 return 0;
5345
5346 /* Put constant last, CONST_INT last if both constant. */
5347 if ((GET_CODE (arg0) == USE
5348 || GET_CODE (arg0) == CONST_INT)
5349 && GET_CODE (arg1) != CONST_INT)
5350 tem = arg0, arg0 = arg1, arg1 = tem;
5351
5352 /* Handle addition of zero, then addition of an invariant. */
5353 if (arg1 == const0_rtx)
5354 return arg0;
5355 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5356 switch (GET_CODE (arg0))
5357 {
5358 case CONST_INT:
5359 case USE:
5360 /* Both invariant. Only valid if sum is machine operand.
5361 First strip off possible USE on first operand. */
5362 if (GET_CODE (arg0) == USE)
5363 arg0 = XEXP (arg0, 0);
5364
5365 tem = 0;
5366 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5367 {
5368 tem = plus_constant (arg0, INTVAL (arg1));
5369 if (GET_CODE (tem) != CONST_INT)
5370 tem = gen_rtx (USE, mode, tem);
5371 }
5372
5373 return tem;
5374
5375 case REG:
5376 case MULT:
5377 /* biv + invar or mult + invar. Return sum. */
5378 return gen_rtx (PLUS, mode, arg0, arg1);
5379
5380 case PLUS:
5381 /* (a + invar_1) + invar_2. Associate. */
5382 return simplify_giv_expr (gen_rtx (PLUS, mode,
5383 XEXP (arg0, 0),
5384 gen_rtx (PLUS, mode,
5385 XEXP (arg0, 1), arg1)),
5386 benefit);
5387
5388 default:
5389 abort ();
5390 }
5391
5392 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5393 MULT to reduce cases. */
5394 if (GET_CODE (arg0) == REG)
5395 arg0 = gen_rtx (MULT, mode, arg0, const1_rtx);
5396 if (GET_CODE (arg1) == REG)
5397 arg1 = gen_rtx (MULT, mode, arg1, const1_rtx);
5398
5399 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5400 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5401 Recurse to associate the second PLUS. */
5402 if (GET_CODE (arg1) == MULT)
5403 tem = arg0, arg0 = arg1, arg1 = tem;
5404
5405 if (GET_CODE (arg1) == PLUS)
5406 return simplify_giv_expr (gen_rtx (PLUS, mode,
5407 gen_rtx (PLUS, mode,
5408 arg0, XEXP (arg1, 0)),
5409 XEXP (arg1, 1)),
5410 benefit);
5411
5412 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5413 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5414 abort ();
5415
5416 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5417 return 0;
5418
5419 return simplify_giv_expr (gen_rtx (MULT, mode,
5420 XEXP (arg0, 0),
5421 gen_rtx (PLUS, mode,
5422 XEXP (arg0, 1),
5423 XEXP (arg1, 1))),
5424 benefit);
5425
5426 case MINUS:
5427 /* Handle "a - b" as "a + b * (-1)". */
5428 return simplify_giv_expr (gen_rtx (PLUS, mode,
5429 XEXP (x, 0),
5430 gen_rtx (MULT, mode,
5431 XEXP (x, 1), constm1_rtx)),
5432 benefit);
5433
5434 case MULT:
5435 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5436 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5437 if (arg0 == 0 || arg1 == 0)
5438 return 0;
5439
5440 /* Put constant last, CONST_INT last if both constant. */
5441 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5442 && GET_CODE (arg1) != CONST_INT)
5443 tem = arg0, arg0 = arg1, arg1 = tem;
5444
5445 /* If second argument is not now constant, not giv. */
5446 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5447 return 0;
5448
5449 /* Handle multiply by 0 or 1. */
5450 if (arg1 == const0_rtx)
5451 return const0_rtx;
5452
5453 else if (arg1 == const1_rtx)
5454 return arg0;
5455
5456 switch (GET_CODE (arg0))
5457 {
5458 case REG:
5459 /* biv * invar. Done. */
5460 return gen_rtx (MULT, mode, arg0, arg1);
5461
5462 case CONST_INT:
5463 /* Product of two constants. */
5464 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5465
5466 case USE:
5467 /* invar * invar. Not giv. */
5468 return 0;
5469
5470 case MULT:
5471 /* (a * invar_1) * invar_2. Associate. */
5472 return simplify_giv_expr (gen_rtx (MULT, mode,
5473 XEXP (arg0, 0),
5474 gen_rtx (MULT, mode,
5475 XEXP (arg0, 1), arg1)),
5476 benefit);
5477
5478 case PLUS:
5479 /* (a + invar_1) * invar_2. Distribute. */
5480 return simplify_giv_expr (gen_rtx (PLUS, mode,
5481 gen_rtx (MULT, mode,
5482 XEXP (arg0, 0), arg1),
5483 gen_rtx (MULT, mode,
5484 XEXP (arg0, 1), arg1)),
5485 benefit);
5486
5487 default:
5488 abort ();
5489 }
5490
5491 case ASHIFT:
5492 /* Shift by constant is multiply by power of two. */
5493 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5494 return 0;
5495
5496 return simplify_giv_expr (gen_rtx (MULT, mode,
5497 XEXP (x, 0),
5498 GEN_INT ((HOST_WIDE_INT) 1
5499 << INTVAL (XEXP (x, 1)))),
5500 benefit);
5501
5502 case NEG:
5503 /* "-a" is "a * (-1)" */
5504 return simplify_giv_expr (gen_rtx (MULT, mode, XEXP (x, 0), constm1_rtx),
5505 benefit);
5506
5507 case NOT:
5508 /* "~a" is "-a - 1". Silly, but easy. */
5509 return simplify_giv_expr (gen_rtx (MINUS, mode,
5510 gen_rtx (NEG, mode, XEXP (x, 0)),
5511 const1_rtx),
5512 benefit);
5513
5514 case USE:
5515 /* Already in proper form for invariant. */
5516 return x;
5517
5518 case REG:
5519 /* If this is a new register, we can't deal with it. */
5520 if (REGNO (x) >= max_reg_before_loop)
5521 return 0;
5522
5523 /* Check for biv or giv. */
5524 switch (reg_iv_type[REGNO (x)])
5525 {
5526 case BASIC_INDUCT:
5527 return x;
5528 case GENERAL_INDUCT:
5529 {
5530 struct induction *v = reg_iv_info[REGNO (x)];
5531
5532 /* Form expression from giv and add benefit. Ensure this giv
5533 can derive another and subtract any needed adjustment if so. */
5534 *benefit += v->benefit;
5535 if (v->cant_derive)
5536 return 0;
5537
5538 tem = gen_rtx (PLUS, mode, gen_rtx (MULT, mode,
5539 v->src_reg, v->mult_val),
5540 v->add_val);
5541 if (v->derive_adjustment)
5542 tem = gen_rtx (MINUS, mode, tem, v->derive_adjustment);
5543 return simplify_giv_expr (tem, benefit);
5544 }
5545 }
5546
5547 /* Fall through to general case. */
5548 default:
5549 /* If invariant, return as USE (unless CONST_INT).
5550 Otherwise, not giv. */
5551 if (GET_CODE (x) == USE)
5552 x = XEXP (x, 0);
5553
5554 if (invariant_p (x) == 1)
5555 {
5556 if (GET_CODE (x) == CONST_INT)
5557 return x;
5558 else
5559 return gen_rtx (USE, mode, x);
5560 }
5561 else
5562 return 0;
5563 }
5564 }
5565 \f
5566 /* Help detect a giv that is calculated by several consecutive insns;
5567 for example,
5568 giv = biv * M
5569 giv = giv + A
5570 The caller has already identified the first insn P as having a giv as dest;
5571 we check that all other insns that set the same register follow
5572 immediately after P, that they alter nothing else,
5573 and that the result of the last is still a giv.
5574
5575 The value is 0 if the reg set in P is not really a giv.
5576 Otherwise, the value is the amount gained by eliminating
5577 all the consecutive insns that compute the value.
5578
5579 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5580 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5581
5582 The coefficients of the ultimate giv value are stored in
5583 *MULT_VAL and *ADD_VAL. */
5584
5585 static int
5586 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5587 add_val, mult_val)
5588 int first_benefit;
5589 rtx p;
5590 rtx src_reg;
5591 rtx dest_reg;
5592 rtx *add_val;
5593 rtx *mult_val;
5594 {
5595 int count;
5596 enum rtx_code code;
5597 int benefit;
5598 rtx temp;
5599 rtx set;
5600
5601 /* Indicate that this is a giv so that we can update the value produced in
5602 each insn of the multi-insn sequence.
5603
5604 This induction structure will be used only by the call to
5605 general_induction_var below, so we can allocate it on our stack.
5606 If this is a giv, our caller will replace the induct var entry with
5607 a new induction structure. */
5608 struct induction *v
5609 = (struct induction *) alloca (sizeof (struct induction));
5610 v->src_reg = src_reg;
5611 v->mult_val = *mult_val;
5612 v->add_val = *add_val;
5613 v->benefit = first_benefit;
5614 v->cant_derive = 0;
5615 v->derive_adjustment = 0;
5616
5617 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5618 reg_iv_info[REGNO (dest_reg)] = v;
5619
5620 count = n_times_set[REGNO (dest_reg)] - 1;
5621
5622 while (count > 0)
5623 {
5624 p = NEXT_INSN (p);
5625 code = GET_CODE (p);
5626
5627 /* If libcall, skip to end of call sequence. */
5628 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5629 p = XEXP (temp, 0);
5630
5631 if (code == INSN
5632 && (set = single_set (p))
5633 && GET_CODE (SET_DEST (set)) == REG
5634 && SET_DEST (set) == dest_reg
5635 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5636 add_val, mult_val))
5637 /* Giv created by equivalent expression. */
5638 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5639 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5640 add_val, mult_val))))
5641 && src_reg == v->src_reg)
5642 {
5643 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5644 benefit += libcall_benefit (p);
5645
5646 count--;
5647 v->mult_val = *mult_val;
5648 v->add_val = *add_val;
5649 v->benefit = benefit;
5650 }
5651 else if (code != NOTE)
5652 {
5653 /* Allow insns that set something other than this giv to a
5654 constant. Such insns are needed on machines which cannot
5655 include long constants and should not disqualify a giv. */
5656 if (code == INSN
5657 && (set = single_set (p))
5658 && SET_DEST (set) != dest_reg
5659 && CONSTANT_P (SET_SRC (set)))
5660 continue;
5661
5662 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5663 return 0;
5664 }
5665 }
5666
5667 return v->benefit;
5668 }
5669 \f
5670 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5671 represented by G1. If no such expression can be found, or it is clear that
5672 it cannot possibly be a valid address, 0 is returned.
5673
5674 To perform the computation, we note that
5675 G1 = a * v + b and
5676 G2 = c * v + d
5677 where `v' is the biv.
5678
5679 So G2 = (c/a) * G1 + (d - b*c/a) */
5680
5681 #ifdef ADDRESS_COST
5682 static rtx
5683 express_from (g1, g2)
5684 struct induction *g1, *g2;
5685 {
5686 rtx mult, add;
5687
5688 /* The value that G1 will be multiplied by must be a constant integer. Also,
5689 the only chance we have of getting a valid address is if b*c/a (see above
5690 for notation) is also an integer. */
5691 if (GET_CODE (g1->mult_val) != CONST_INT
5692 || GET_CODE (g2->mult_val) != CONST_INT
5693 || GET_CODE (g1->add_val) != CONST_INT
5694 || g1->mult_val == const0_rtx
5695 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5696 return 0;
5697
5698 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5699 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5700
5701 /* Form simplified final result. */
5702 if (mult == const0_rtx)
5703 return add;
5704 else if (mult == const1_rtx)
5705 mult = g1->dest_reg;
5706 else
5707 mult = gen_rtx (MULT, g2->mode, g1->dest_reg, mult);
5708
5709 if (add == const0_rtx)
5710 return mult;
5711 else
5712 return gen_rtx (PLUS, g2->mode, mult, add);
5713 }
5714 #endif
5715 \f
5716 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5717 (either directly or via an address expression) a register used to represent
5718 G1. Set g2->new_reg to a represtation of G1 (normally just
5719 g1->dest_reg). */
5720
5721 static int
5722 combine_givs_p (g1, g2)
5723 struct induction *g1, *g2;
5724 {
5725 rtx tem;
5726
5727 /* If these givs are identical, they can be combined. */
5728 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5729 && rtx_equal_p (g1->add_val, g2->add_val))
5730 {
5731 g2->new_reg = g1->dest_reg;
5732 return 1;
5733 }
5734
5735 #ifdef ADDRESS_COST
5736 /* If G2 can be expressed as a function of G1 and that function is valid
5737 as an address and no more expensive than using a register for G2,
5738 the expression of G2 in terms of G1 can be used. */
5739 if (g2->giv_type == DEST_ADDR
5740 && (tem = express_from (g1, g2)) != 0
5741 && memory_address_p (g2->mem_mode, tem)
5742 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5743 {
5744 g2->new_reg = tem;
5745 return 1;
5746 }
5747 #endif
5748
5749 return 0;
5750 }
5751 \f
5752 #ifdef GIV_SORT_CRITERION
5753 /* Compare two givs and sort the most desirable one for combinations first.
5754 This is used only in one qsort call below. */
5755
5756 static int
5757 giv_sort (x, y)
5758 struct induction **x, **y;
5759 {
5760 GIV_SORT_CRITERION (*x, *y);
5761
5762 return 0;
5763 }
5764 #endif
5765
5766 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5767 any other. If so, point SAME to the giv combined with and set NEW_REG to
5768 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5769 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5770
5771 static void
5772 combine_givs (bl)
5773 struct iv_class *bl;
5774 {
5775 struct induction *g1, *g2, **giv_array, *temp_iv;
5776 int i, j, giv_count, pass;
5777
5778 /* Count givs, because bl->giv_count is incorrect here. */
5779 giv_count = 0;
5780 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5781 giv_count++;
5782
5783 giv_array
5784 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
5785 i = 0;
5786 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5787 giv_array[i++] = g1;
5788
5789 #ifdef GIV_SORT_CRITERION
5790 /* Sort the givs if GIV_SORT_CRITERION is defined.
5791 This is usually defined for processors which lack
5792 negative register offsets so more givs may be combined. */
5793
5794 if (loop_dump_stream)
5795 fprintf (loop_dump_stream, "%d givs counted, sorting...\n", giv_count);
5796
5797 qsort (giv_array, giv_count, sizeof (struct induction *), giv_sort);
5798 #endif
5799
5800 for (i = 0; i < giv_count; i++)
5801 {
5802 g1 = giv_array[i];
5803 for (pass = 0; pass <= 1; pass++)
5804 for (j = 0; j < giv_count; j++)
5805 {
5806 g2 = giv_array[j];
5807 if (g1 != g2
5808 /* First try to combine with replaceable givs, then all givs. */
5809 && (g1->replaceable || pass == 1)
5810 /* If either has already been combined or is to be ignored, can't
5811 combine. */
5812 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5813 /* If something has been based on G2, G2 cannot itself be based
5814 on something else. */
5815 && ! g2->combined_with
5816 && combine_givs_p (g1, g2))
5817 {
5818 /* g2->new_reg set by `combine_givs_p' */
5819 g2->same = g1;
5820 g1->combined_with = 1;
5821
5822 /* If one of these givs is a DEST_REG that was only used
5823 once, by the other giv, this is actually a single use.
5824 The DEST_REG has the correct cost, while the other giv
5825 counts the REG use too often. */
5826 if (g2->giv_type == DEST_REG
5827 && n_times_used[REGNO (g2->dest_reg)] == 1
5828 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
5829 g1->benefit = g2->benefit;
5830 else if (g1->giv_type != DEST_REG
5831 || n_times_used[REGNO (g1->dest_reg)] != 1
5832 || ! reg_mentioned_p (g1->dest_reg,
5833 PATTERN (g2->insn)))
5834 {
5835 g1->benefit += g2->benefit;
5836 g1->times_used += g2->times_used;
5837 }
5838 /* ??? The new final_[bg]iv_value code does a much better job
5839 of finding replaceable giv's, and hence this code may no
5840 longer be necessary. */
5841 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5842 g1->benefit -= copy_cost;
5843 g1->lifetime += g2->lifetime;
5844
5845 if (loop_dump_stream)
5846 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5847 INSN_UID (g2->insn), INSN_UID (g1->insn));
5848 }
5849 }
5850 }
5851 }
5852 \f
5853 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5854
5855 void
5856 emit_iv_add_mult (b, m, a, reg, insert_before)
5857 rtx b; /* initial value of basic induction variable */
5858 rtx m; /* multiplicative constant */
5859 rtx a; /* additive constant */
5860 rtx reg; /* destination register */
5861 rtx insert_before;
5862 {
5863 rtx seq;
5864 rtx result;
5865
5866 /* Prevent unexpected sharing of these rtx. */
5867 a = copy_rtx (a);
5868 b = copy_rtx (b);
5869
5870 /* Increase the lifetime of any invariants moved further in code. */
5871 update_reg_last_use (a, insert_before);
5872 update_reg_last_use (b, insert_before);
5873 update_reg_last_use (m, insert_before);
5874
5875 start_sequence ();
5876 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5877 if (reg != result)
5878 emit_move_insn (reg, result);
5879 seq = gen_sequence ();
5880 end_sequence ();
5881
5882 emit_insn_before (seq, insert_before);
5883
5884 record_base_value (REGNO (reg), b);
5885 }
5886 \f
5887 /* Test whether A * B can be computed without
5888 an actual multiply insn. Value is 1 if so. */
5889
5890 static int
5891 product_cheap_p (a, b)
5892 rtx a;
5893 rtx b;
5894 {
5895 int i;
5896 rtx tmp;
5897 struct obstack *old_rtl_obstack = rtl_obstack;
5898 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
5899 int win = 1;
5900
5901 /* If only one is constant, make it B. */
5902 if (GET_CODE (a) == CONST_INT)
5903 tmp = a, a = b, b = tmp;
5904
5905 /* If first constant, both constant, so don't need multiply. */
5906 if (GET_CODE (a) == CONST_INT)
5907 return 1;
5908
5909 /* If second not constant, neither is constant, so would need multiply. */
5910 if (GET_CODE (b) != CONST_INT)
5911 return 0;
5912
5913 /* One operand is constant, so might not need multiply insn. Generate the
5914 code for the multiply and see if a call or multiply, or long sequence
5915 of insns is generated. */
5916
5917 rtl_obstack = &temp_obstack;
5918 start_sequence ();
5919 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
5920 tmp = gen_sequence ();
5921 end_sequence ();
5922
5923 if (GET_CODE (tmp) == SEQUENCE)
5924 {
5925 if (XVEC (tmp, 0) == 0)
5926 win = 1;
5927 else if (XVECLEN (tmp, 0) > 3)
5928 win = 0;
5929 else
5930 for (i = 0; i < XVECLEN (tmp, 0); i++)
5931 {
5932 rtx insn = XVECEXP (tmp, 0, i);
5933
5934 if (GET_CODE (insn) != INSN
5935 || (GET_CODE (PATTERN (insn)) == SET
5936 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
5937 || (GET_CODE (PATTERN (insn)) == PARALLEL
5938 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
5939 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
5940 {
5941 win = 0;
5942 break;
5943 }
5944 }
5945 }
5946 else if (GET_CODE (tmp) == SET
5947 && GET_CODE (SET_SRC (tmp)) == MULT)
5948 win = 0;
5949 else if (GET_CODE (tmp) == PARALLEL
5950 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
5951 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
5952 win = 0;
5953
5954 /* Free any storage we obtained in generating this multiply and restore rtl
5955 allocation to its normal obstack. */
5956 obstack_free (&temp_obstack, storage);
5957 rtl_obstack = old_rtl_obstack;
5958
5959 return win;
5960 }
5961 \f
5962 /* Check to see if loop can be terminated by a "decrement and branch until
5963 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
5964 Also try reversing an increment loop to a decrement loop
5965 to see if the optimization can be performed.
5966 Value is nonzero if optimization was performed. */
5967
5968 /* This is useful even if the architecture doesn't have such an insn,
5969 because it might change a loops which increments from 0 to n to a loop
5970 which decrements from n to 0. A loop that decrements to zero is usually
5971 faster than one that increments from zero. */
5972
5973 /* ??? This could be rewritten to use some of the loop unrolling procedures,
5974 such as approx_final_value, biv_total_increment, loop_iterations, and
5975 final_[bg]iv_value. */
5976
5977 static int
5978 check_dbra_loop (loop_end, insn_count, loop_start)
5979 rtx loop_end;
5980 int insn_count;
5981 rtx loop_start;
5982 {
5983 struct iv_class *bl;
5984 rtx reg;
5985 rtx jump_label;
5986 rtx final_value;
5987 rtx start_value;
5988 rtx new_add_val;
5989 rtx comparison;
5990 rtx before_comparison;
5991 rtx p;
5992
5993 /* If last insn is a conditional branch, and the insn before tests a
5994 register value, try to optimize it. Otherwise, we can't do anything. */
5995
5996 comparison = get_condition_for_loop (PREV_INSN (loop_end));
5997 if (comparison == 0)
5998 return 0;
5999
6000 /* Check all of the bivs to see if the compare uses one of them.
6001 Skip biv's set more than once because we can't guarantee that
6002 it will be zero on the last iteration. Also skip if the biv is
6003 used between its update and the test insn. */
6004
6005 for (bl = loop_iv_list; bl; bl = bl->next)
6006 {
6007 if (bl->biv_count == 1
6008 && bl->biv->dest_reg == XEXP (comparison, 0)
6009 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6010 PREV_INSN (PREV_INSN (loop_end))))
6011 break;
6012 }
6013
6014 if (! bl)
6015 return 0;
6016
6017 /* Look for the case where the basic induction variable is always
6018 nonnegative, and equals zero on the last iteration.
6019 In this case, add a reg_note REG_NONNEG, which allows the
6020 m68k DBRA instruction to be used. */
6021
6022 if (((GET_CODE (comparison) == GT
6023 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6024 && INTVAL (XEXP (comparison, 1)) == -1)
6025 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6026 && GET_CODE (bl->biv->add_val) == CONST_INT
6027 && INTVAL (bl->biv->add_val) < 0)
6028 {
6029 /* Initial value must be greater than 0,
6030 init_val % -dec_value == 0 to ensure that it equals zero on
6031 the last iteration */
6032
6033 if (GET_CODE (bl->initial_value) == CONST_INT
6034 && INTVAL (bl->initial_value) > 0
6035 && (INTVAL (bl->initial_value)
6036 % (-INTVAL (bl->biv->add_val))) == 0)
6037 {
6038 /* register always nonnegative, add REG_NOTE to branch */
6039 REG_NOTES (PREV_INSN (loop_end))
6040 = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
6041 REG_NOTES (PREV_INSN (loop_end)));
6042 bl->nonneg = 1;
6043
6044 return 1;
6045 }
6046
6047 /* If the decrement is 1 and the value was tested as >= 0 before
6048 the loop, then we can safely optimize. */
6049 for (p = loop_start; p; p = PREV_INSN (p))
6050 {
6051 if (GET_CODE (p) == CODE_LABEL)
6052 break;
6053 if (GET_CODE (p) != JUMP_INSN)
6054 continue;
6055
6056 before_comparison = get_condition_for_loop (p);
6057 if (before_comparison
6058 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6059 && GET_CODE (before_comparison) == LT
6060 && XEXP (before_comparison, 1) == const0_rtx
6061 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6062 && INTVAL (bl->biv->add_val) == -1)
6063 {
6064 REG_NOTES (PREV_INSN (loop_end))
6065 = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
6066 REG_NOTES (PREV_INSN (loop_end)));
6067 bl->nonneg = 1;
6068
6069 return 1;
6070 }
6071 }
6072 }
6073 else if (num_mem_sets <= 1)
6074 {
6075 /* Try to change inc to dec, so can apply above optimization. */
6076 /* Can do this if:
6077 all registers modified are induction variables or invariant,
6078 all memory references have non-overlapping addresses
6079 (obviously true if only one write)
6080 allow 2 insns for the compare/jump at the end of the loop. */
6081 /* Also, we must avoid any instructions which use both the reversed
6082 biv and another biv. Such instructions will fail if the loop is
6083 reversed. We meet this condition by requiring that either
6084 no_use_except_counting is true, or else that there is only
6085 one biv. */
6086 int num_nonfixed_reads = 0;
6087 /* 1 if the iteration var is used only to count iterations. */
6088 int no_use_except_counting = 0;
6089 /* 1 if the loop has no memory store, or it has a single memory store
6090 which is reversible. */
6091 int reversible_mem_store = 1;
6092
6093 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6094 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6095 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6096
6097 if (bl->giv_count == 0
6098 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6099 {
6100 rtx bivreg = regno_reg_rtx[bl->regno];
6101
6102 /* If there are no givs for this biv, and the only exit is the
6103 fall through at the end of the the loop, then
6104 see if perhaps there are no uses except to count. */
6105 no_use_except_counting = 1;
6106 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6107 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6108 {
6109 rtx set = single_set (p);
6110
6111 if (set && GET_CODE (SET_DEST (set)) == REG
6112 && REGNO (SET_DEST (set)) == bl->regno)
6113 /* An insn that sets the biv is okay. */
6114 ;
6115 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6116 || p == prev_nonnote_insn (loop_end))
6117 /* Don't bother about the end test. */
6118 ;
6119 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6120 /* Any other use of the biv is no good. */
6121 {
6122 no_use_except_counting = 0;
6123 break;
6124 }
6125 }
6126 }
6127
6128 /* If the loop has a single store, and the destination address is
6129 invariant, then we can't reverse the loop, because this address
6130 might then have the wrong value at loop exit.
6131 This would work if the source was invariant also, however, in that
6132 case, the insn should have been moved out of the loop. */
6133
6134 if (num_mem_sets == 1)
6135 reversible_mem_store
6136 = (! unknown_address_altered
6137 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6138
6139 /* This code only acts for innermost loops. Also it simplifies
6140 the memory address check by only reversing loops with
6141 zero or one memory access.
6142 Two memory accesses could involve parts of the same array,
6143 and that can't be reversed. */
6144
6145 if (num_nonfixed_reads <= 1
6146 && !loop_has_call
6147 && !loop_has_volatile
6148 && reversible_mem_store
6149 && (no_use_except_counting
6150 || ((bl->giv_count + bl->biv_count + num_mem_sets
6151 + num_movables + 2 == insn_count)
6152 && (bl == loop_iv_list && bl->next == 0))))
6153 {
6154 rtx tem;
6155
6156 /* Loop can be reversed. */
6157 if (loop_dump_stream)
6158 fprintf (loop_dump_stream, "Can reverse loop\n");
6159
6160 /* Now check other conditions:
6161 initial_value must be zero,
6162 final_value % add_val == 0, so that when reversed, the
6163 biv will be zero on the last iteration.
6164
6165 This test can probably be improved since +/- 1 in the constant
6166 can be obtained by changing LT to LE and vice versa; this is
6167 confusing. */
6168
6169 if (comparison && bl->initial_value == const0_rtx
6170 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6171 /* LE gets turned into LT */
6172 && GET_CODE (comparison) == LT
6173 && (INTVAL (XEXP (comparison, 1))
6174 % INTVAL (bl->biv->add_val)) == 0)
6175 {
6176 /* Register will always be nonnegative, with value
6177 0 on last iteration if loop reversed */
6178
6179 /* Save some info needed to produce the new insns. */
6180 reg = bl->biv->dest_reg;
6181 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6182 if (jump_label == pc_rtx)
6183 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6184 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6185
6186 final_value = XEXP (comparison, 1);
6187 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6188 - INTVAL (bl->biv->add_val));
6189
6190 /* Initialize biv to start_value before loop start.
6191 The old initializing insn will be deleted as a
6192 dead store by flow.c. */
6193 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6194
6195 /* Add insn to decrement register, and delete insn
6196 that incremented the register. */
6197 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6198 bl->biv->insn);
6199 delete_insn (bl->biv->insn);
6200
6201 /* Update biv info to reflect its new status. */
6202 bl->biv->insn = p;
6203 bl->initial_value = start_value;
6204 bl->biv->add_val = new_add_val;
6205
6206 /* Inc LABEL_NUSES so that delete_insn will
6207 not delete the label. */
6208 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6209
6210 /* Emit an insn after the end of the loop to set the biv's
6211 proper exit value if it is used anywhere outside the loop. */
6212 if ((REGNO_LAST_UID (bl->regno)
6213 != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
6214 || ! bl->init_insn
6215 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6216 emit_insn_after (gen_move_insn (reg, final_value),
6217 loop_end);
6218
6219 /* Delete compare/branch at end of loop. */
6220 delete_insn (PREV_INSN (loop_end));
6221 delete_insn (PREV_INSN (loop_end));
6222
6223 /* Add new compare/branch insn at end of loop. */
6224 start_sequence ();
6225 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6226 GET_MODE (reg), 0, 0);
6227 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6228 tem = gen_sequence ();
6229 end_sequence ();
6230 emit_jump_insn_before (tem, loop_end);
6231
6232 for (tem = PREV_INSN (loop_end);
6233 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6234 ;
6235 if (tem)
6236 {
6237 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6238
6239 /* Increment of LABEL_NUSES done above. */
6240 /* Register is now always nonnegative,
6241 so add REG_NONNEG note to the branch. */
6242 REG_NOTES (tem) = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
6243 REG_NOTES (tem));
6244 }
6245
6246 bl->nonneg = 1;
6247
6248 /* Mark that this biv has been reversed. Each giv which depends
6249 on this biv, and which is also live past the end of the loop
6250 will have to be fixed up. */
6251
6252 bl->reversed = 1;
6253
6254 if (loop_dump_stream)
6255 fprintf (loop_dump_stream,
6256 "Reversed loop and added reg_nonneg\n");
6257
6258 return 1;
6259 }
6260 }
6261 }
6262
6263 return 0;
6264 }
6265 \f
6266 /* Verify whether the biv BL appears to be eliminable,
6267 based on the insns in the loop that refer to it.
6268 LOOP_START is the first insn of the loop, and END is the end insn.
6269
6270 If ELIMINATE_P is non-zero, actually do the elimination.
6271
6272 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6273 determine whether invariant insns should be placed inside or at the
6274 start of the loop. */
6275
6276 static int
6277 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6278 struct iv_class *bl;
6279 rtx loop_start;
6280 rtx end;
6281 int eliminate_p;
6282 int threshold, insn_count;
6283 {
6284 rtx reg = bl->biv->dest_reg;
6285 rtx p;
6286
6287 /* Scan all insns in the loop, stopping if we find one that uses the
6288 biv in a way that we cannot eliminate. */
6289
6290 for (p = loop_start; p != end; p = NEXT_INSN (p))
6291 {
6292 enum rtx_code code = GET_CODE (p);
6293 rtx where = threshold >= insn_count ? loop_start : p;
6294
6295 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6296 && reg_mentioned_p (reg, PATTERN (p))
6297 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6298 {
6299 if (loop_dump_stream)
6300 fprintf (loop_dump_stream,
6301 "Cannot eliminate biv %d: biv used in insn %d.\n",
6302 bl->regno, INSN_UID (p));
6303 break;
6304 }
6305 }
6306
6307 if (p == end)
6308 {
6309 if (loop_dump_stream)
6310 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6311 bl->regno, eliminate_p ? "was" : "can be");
6312 return 1;
6313 }
6314
6315 return 0;
6316 }
6317 \f
6318 /* If BL appears in X (part of the pattern of INSN), see if we can
6319 eliminate its use. If so, return 1. If not, return 0.
6320
6321 If BIV does not appear in X, return 1.
6322
6323 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6324 where extra insns should be added. Depending on how many items have been
6325 moved out of the loop, it will either be before INSN or at the start of
6326 the loop. */
6327
6328 static int
6329 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6330 rtx x, insn;
6331 struct iv_class *bl;
6332 int eliminate_p;
6333 rtx where;
6334 {
6335 enum rtx_code code = GET_CODE (x);
6336 rtx reg = bl->biv->dest_reg;
6337 enum machine_mode mode = GET_MODE (reg);
6338 struct induction *v;
6339 rtx arg, new, tem;
6340 int arg_operand;
6341 char *fmt;
6342 int i, j;
6343
6344 switch (code)
6345 {
6346 case REG:
6347 /* If we haven't already been able to do something with this BIV,
6348 we can't eliminate it. */
6349 if (x == reg)
6350 return 0;
6351 return 1;
6352
6353 case SET:
6354 /* If this sets the BIV, it is not a problem. */
6355 if (SET_DEST (x) == reg)
6356 return 1;
6357
6358 /* If this is an insn that defines a giv, it is also ok because
6359 it will go away when the giv is reduced. */
6360 for (v = bl->giv; v; v = v->next_iv)
6361 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6362 return 1;
6363
6364 #ifdef HAVE_cc0
6365 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6366 {
6367 /* Can replace with any giv that was reduced and
6368 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6369 Require a constant for MULT_VAL, so we know it's nonzero.
6370 ??? We disable this optimization to avoid potential
6371 overflows. */
6372
6373 for (v = bl->giv; v; v = v->next_iv)
6374 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6375 && v->add_val == const0_rtx
6376 && ! v->ignore && ! v->maybe_dead && v->always_computable
6377 && v->mode == mode
6378 && 0)
6379 {
6380 /* If the giv V had the auto-inc address optimization applied
6381 to it, and INSN occurs between the giv insn and the biv
6382 insn, then we must adjust the value used here.
6383 This is rare, so we don't bother to do so. */
6384 if (v->auto_inc_opt
6385 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6386 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6387 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6388 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6389 continue;
6390
6391 if (! eliminate_p)
6392 return 1;
6393
6394 /* If the giv has the opposite direction of change,
6395 then reverse the comparison. */
6396 if (INTVAL (v->mult_val) < 0)
6397 new = gen_rtx (COMPARE, GET_MODE (v->new_reg),
6398 const0_rtx, v->new_reg);
6399 else
6400 new = v->new_reg;
6401
6402 /* We can probably test that giv's reduced reg. */
6403 if (validate_change (insn, &SET_SRC (x), new, 0))
6404 return 1;
6405 }
6406
6407 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6408 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6409 Require a constant for MULT_VAL, so we know it's nonzero.
6410 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6411 overflow problem. */
6412
6413 for (v = bl->giv; v; v = v->next_iv)
6414 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6415 && ! v->ignore && ! v->maybe_dead && v->always_computable
6416 && v->mode == mode
6417 && (GET_CODE (v->add_val) == SYMBOL_REF
6418 || GET_CODE (v->add_val) == LABEL_REF
6419 || GET_CODE (v->add_val) == CONST
6420 || (GET_CODE (v->add_val) == REG
6421 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
6422 {
6423 /* If the giv V had the auto-inc address optimization applied
6424 to it, and INSN occurs between the giv insn and the biv
6425 insn, then we must adjust the value used here.
6426 This is rare, so we don't bother to do so. */
6427 if (v->auto_inc_opt
6428 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6429 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6430 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6431 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6432 continue;
6433
6434 if (! eliminate_p)
6435 return 1;
6436
6437 /* If the giv has the opposite direction of change,
6438 then reverse the comparison. */
6439 if (INTVAL (v->mult_val) < 0)
6440 new = gen_rtx (COMPARE, VOIDmode, copy_rtx (v->add_val),
6441 v->new_reg);
6442 else
6443 new = gen_rtx (COMPARE, VOIDmode, v->new_reg,
6444 copy_rtx (v->add_val));
6445
6446 /* Replace biv with the giv's reduced register. */
6447 update_reg_last_use (v->add_val, insn);
6448 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6449 return 1;
6450
6451 /* Insn doesn't support that constant or invariant. Copy it
6452 into a register (it will be a loop invariant.) */
6453 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6454
6455 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6456 where);
6457
6458 /* Substitute the new register for its invariant value in
6459 the compare expression. */
6460 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
6461 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6462 return 1;
6463 }
6464 }
6465 #endif
6466 break;
6467
6468 case COMPARE:
6469 case EQ: case NE:
6470 case GT: case GE: case GTU: case GEU:
6471 case LT: case LE: case LTU: case LEU:
6472 /* See if either argument is the biv. */
6473 if (XEXP (x, 0) == reg)
6474 arg = XEXP (x, 1), arg_operand = 1;
6475 else if (XEXP (x, 1) == reg)
6476 arg = XEXP (x, 0), arg_operand = 0;
6477 else
6478 break;
6479
6480 if (CONSTANT_P (arg))
6481 {
6482 /* First try to replace with any giv that has constant positive
6483 mult_val and constant add_val. We might be able to support
6484 negative mult_val, but it seems complex to do it in general. */
6485
6486 for (v = bl->giv; v; v = v->next_iv)
6487 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6488 && (GET_CODE (v->add_val) == SYMBOL_REF
6489 || GET_CODE (v->add_val) == LABEL_REF
6490 || GET_CODE (v->add_val) == CONST
6491 || (GET_CODE (v->add_val) == REG
6492 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
6493 && ! v->ignore && ! v->maybe_dead && v->always_computable
6494 && v->mode == mode)
6495 {
6496 /* If the giv V had the auto-inc address optimization applied
6497 to it, and INSN occurs between the giv insn and the biv
6498 insn, then we must adjust the value used here.
6499 This is rare, so we don't bother to do so. */
6500 if (v->auto_inc_opt
6501 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6502 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6503 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6504 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6505 continue;
6506
6507 if (! eliminate_p)
6508 return 1;
6509
6510 /* Replace biv with the giv's reduced reg. */
6511 XEXP (x, 1-arg_operand) = v->new_reg;
6512
6513 /* If all constants are actually constant integers and
6514 the derived constant can be directly placed in the COMPARE,
6515 do so. */
6516 if (GET_CODE (arg) == CONST_INT
6517 && GET_CODE (v->mult_val) == CONST_INT
6518 && GET_CODE (v->add_val) == CONST_INT
6519 && validate_change (insn, &XEXP (x, arg_operand),
6520 GEN_INT (INTVAL (arg)
6521 * INTVAL (v->mult_val)
6522 + INTVAL (v->add_val)), 0))
6523 return 1;
6524
6525 /* Otherwise, load it into a register. */
6526 tem = gen_reg_rtx (mode);
6527 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6528 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6529 return 1;
6530
6531 /* If that failed, put back the change we made above. */
6532 XEXP (x, 1-arg_operand) = reg;
6533 }
6534
6535 /* Look for giv with positive constant mult_val and nonconst add_val.
6536 Insert insns to calculate new compare value.
6537 ??? Turn this off due to possible overflow. */
6538
6539 for (v = bl->giv; v; v = v->next_iv)
6540 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6541 && ! v->ignore && ! v->maybe_dead && v->always_computable
6542 && v->mode == mode
6543 && 0)
6544 {
6545 rtx tem;
6546
6547 /* If the giv V had the auto-inc address optimization applied
6548 to it, and INSN occurs between the giv insn and the biv
6549 insn, then we must adjust the value used here.
6550 This is rare, so we don't bother to do so. */
6551 if (v->auto_inc_opt
6552 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6553 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6554 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6555 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6556 continue;
6557
6558 if (! eliminate_p)
6559 return 1;
6560
6561 tem = gen_reg_rtx (mode);
6562
6563 /* Replace biv with giv's reduced register. */
6564 validate_change (insn, &XEXP (x, 1 - arg_operand),
6565 v->new_reg, 1);
6566
6567 /* Compute value to compare against. */
6568 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6569 /* Use it in this insn. */
6570 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6571 if (apply_change_group ())
6572 return 1;
6573 }
6574 }
6575 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6576 {
6577 if (invariant_p (arg) == 1)
6578 {
6579 /* Look for giv with constant positive mult_val and nonconst
6580 add_val. Insert insns to compute new compare value.
6581 ??? Turn this off due to possible overflow. */
6582
6583 for (v = bl->giv; v; v = v->next_iv)
6584 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6585 && ! v->ignore && ! v->maybe_dead && v->always_computable
6586 && v->mode == mode
6587 && 0)
6588 {
6589 rtx tem;
6590
6591 /* If the giv V had the auto-inc address optimization applied
6592 to it, and INSN occurs between the giv insn and the biv
6593 insn, then we must adjust the value used here.
6594 This is rare, so we don't bother to do so. */
6595 if (v->auto_inc_opt
6596 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6597 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6598 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6599 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6600 continue;
6601
6602 if (! eliminate_p)
6603 return 1;
6604
6605 tem = gen_reg_rtx (mode);
6606
6607 /* Replace biv with giv's reduced register. */
6608 validate_change (insn, &XEXP (x, 1 - arg_operand),
6609 v->new_reg, 1);
6610
6611 /* Compute value to compare against. */
6612 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6613 tem, where);
6614 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6615 if (apply_change_group ())
6616 return 1;
6617 }
6618 }
6619
6620 /* This code has problems. Basically, you can't know when
6621 seeing if we will eliminate BL, whether a particular giv
6622 of ARG will be reduced. If it isn't going to be reduced,
6623 we can't eliminate BL. We can try forcing it to be reduced,
6624 but that can generate poor code.
6625
6626 The problem is that the benefit of reducing TV, below should
6627 be increased if BL can actually be eliminated, but this means
6628 we might have to do a topological sort of the order in which
6629 we try to process biv. It doesn't seem worthwhile to do
6630 this sort of thing now. */
6631
6632 #if 0
6633 /* Otherwise the reg compared with had better be a biv. */
6634 if (GET_CODE (arg) != REG
6635 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6636 return 0;
6637
6638 /* Look for a pair of givs, one for each biv,
6639 with identical coefficients. */
6640 for (v = bl->giv; v; v = v->next_iv)
6641 {
6642 struct induction *tv;
6643
6644 if (v->ignore || v->maybe_dead || v->mode != mode)
6645 continue;
6646
6647 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6648 if (! tv->ignore && ! tv->maybe_dead
6649 && rtx_equal_p (tv->mult_val, v->mult_val)
6650 && rtx_equal_p (tv->add_val, v->add_val)
6651 && tv->mode == mode)
6652 {
6653 /* If the giv V had the auto-inc address optimization applied
6654 to it, and INSN occurs between the giv insn and the biv
6655 insn, then we must adjust the value used here.
6656 This is rare, so we don't bother to do so. */
6657 if (v->auto_inc_opt
6658 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6659 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6660 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6661 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6662 continue;
6663
6664 if (! eliminate_p)
6665 return 1;
6666
6667 /* Replace biv with its giv's reduced reg. */
6668 XEXP (x, 1-arg_operand) = v->new_reg;
6669 /* Replace other operand with the other giv's
6670 reduced reg. */
6671 XEXP (x, arg_operand) = tv->new_reg;
6672 return 1;
6673 }
6674 }
6675 #endif
6676 }
6677
6678 /* If we get here, the biv can't be eliminated. */
6679 return 0;
6680
6681 case MEM:
6682 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6683 biv is used in it, since it will be replaced. */
6684 for (v = bl->giv; v; v = v->next_iv)
6685 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6686 return 1;
6687 break;
6688 }
6689
6690 /* See if any subexpression fails elimination. */
6691 fmt = GET_RTX_FORMAT (code);
6692 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6693 {
6694 switch (fmt[i])
6695 {
6696 case 'e':
6697 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6698 eliminate_p, where))
6699 return 0;
6700 break;
6701
6702 case 'E':
6703 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6704 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6705 eliminate_p, where))
6706 return 0;
6707 break;
6708 }
6709 }
6710
6711 return 1;
6712 }
6713 \f
6714 /* Return nonzero if the last use of REG
6715 is in an insn following INSN in the same basic block. */
6716
6717 static int
6718 last_use_this_basic_block (reg, insn)
6719 rtx reg;
6720 rtx insn;
6721 {
6722 rtx n;
6723 for (n = insn;
6724 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6725 n = NEXT_INSN (n))
6726 {
6727 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
6728 return 1;
6729 }
6730 return 0;
6731 }
6732 \f
6733 /* Called via `note_stores' to record the initial value of a biv. Here we
6734 just record the location of the set and process it later. */
6735
6736 static void
6737 record_initial (dest, set)
6738 rtx dest;
6739 rtx set;
6740 {
6741 struct iv_class *bl;
6742
6743 if (GET_CODE (dest) != REG
6744 || REGNO (dest) >= max_reg_before_loop
6745 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6746 return;
6747
6748 bl = reg_biv_class[REGNO (dest)];
6749
6750 /* If this is the first set found, record it. */
6751 if (bl->init_insn == 0)
6752 {
6753 bl->init_insn = note_insn;
6754 bl->init_set = set;
6755 }
6756 }
6757 \f
6758 /* If any of the registers in X are "old" and currently have a last use earlier
6759 than INSN, update them to have a last use of INSN. Their actual last use
6760 will be the previous insn but it will not have a valid uid_luid so we can't
6761 use it. */
6762
6763 static void
6764 update_reg_last_use (x, insn)
6765 rtx x;
6766 rtx insn;
6767 {
6768 /* Check for the case where INSN does not have a valid luid. In this case,
6769 there is no need to modify the regno_last_uid, as this can only happen
6770 when code is inserted after the loop_end to set a pseudo's final value,
6771 and hence this insn will never be the last use of x. */
6772 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6773 && INSN_UID (insn) < max_uid_for_loop
6774 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
6775 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
6776 else
6777 {
6778 register int i, j;
6779 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6780 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6781 {
6782 if (fmt[i] == 'e')
6783 update_reg_last_use (XEXP (x, i), insn);
6784 else if (fmt[i] == 'E')
6785 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6786 update_reg_last_use (XVECEXP (x, i, j), insn);
6787 }
6788 }
6789 }
6790 \f
6791 /* Given a jump insn JUMP, return the condition that will cause it to branch
6792 to its JUMP_LABEL. If the condition cannot be understood, or is an
6793 inequality floating-point comparison which needs to be reversed, 0 will
6794 be returned.
6795
6796 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6797 insn used in locating the condition was found. If a replacement test
6798 of the condition is desired, it should be placed in front of that
6799 insn and we will be sure that the inputs are still valid.
6800
6801 The condition will be returned in a canonical form to simplify testing by
6802 callers. Specifically:
6803
6804 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6805 (2) Both operands will be machine operands; (cc0) will have been replaced.
6806 (3) If an operand is a constant, it will be the second operand.
6807 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6808 for GE, GEU, and LEU. */
6809
6810 rtx
6811 get_condition (jump, earliest)
6812 rtx jump;
6813 rtx *earliest;
6814 {
6815 enum rtx_code code;
6816 rtx prev = jump;
6817 rtx set;
6818 rtx tem;
6819 rtx op0, op1;
6820 int reverse_code = 0;
6821 int did_reverse_condition = 0;
6822
6823 /* If this is not a standard conditional jump, we can't parse it. */
6824 if (GET_CODE (jump) != JUMP_INSN
6825 || ! condjump_p (jump) || simplejump_p (jump))
6826 return 0;
6827
6828 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6829 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6830 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6831
6832 if (earliest)
6833 *earliest = jump;
6834
6835 /* If this branches to JUMP_LABEL when the condition is false, reverse
6836 the condition. */
6837 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6838 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
6839 code = reverse_condition (code), did_reverse_condition ^= 1;
6840
6841 /* If we are comparing a register with zero, see if the register is set
6842 in the previous insn to a COMPARE or a comparison operation. Perform
6843 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6844 in cse.c */
6845
6846 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
6847 {
6848 /* Set non-zero when we find something of interest. */
6849 rtx x = 0;
6850
6851 #ifdef HAVE_cc0
6852 /* If comparison with cc0, import actual comparison from compare
6853 insn. */
6854 if (op0 == cc0_rtx)
6855 {
6856 if ((prev = prev_nonnote_insn (prev)) == 0
6857 || GET_CODE (prev) != INSN
6858 || (set = single_set (prev)) == 0
6859 || SET_DEST (set) != cc0_rtx)
6860 return 0;
6861
6862 op0 = SET_SRC (set);
6863 op1 = CONST0_RTX (GET_MODE (op0));
6864 if (earliest)
6865 *earliest = prev;
6866 }
6867 #endif
6868
6869 /* If this is a COMPARE, pick up the two things being compared. */
6870 if (GET_CODE (op0) == COMPARE)
6871 {
6872 op1 = XEXP (op0, 1);
6873 op0 = XEXP (op0, 0);
6874 continue;
6875 }
6876 else if (GET_CODE (op0) != REG)
6877 break;
6878
6879 /* Go back to the previous insn. Stop if it is not an INSN. We also
6880 stop if it isn't a single set or if it has a REG_INC note because
6881 we don't want to bother dealing with it. */
6882
6883 if ((prev = prev_nonnote_insn (prev)) == 0
6884 || GET_CODE (prev) != INSN
6885 || FIND_REG_INC_NOTE (prev, 0)
6886 || (set = single_set (prev)) == 0)
6887 break;
6888
6889 /* If this is setting OP0, get what it sets it to if it looks
6890 relevant. */
6891 if (rtx_equal_p (SET_DEST (set), op0))
6892 {
6893 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
6894
6895 if ((GET_CODE (SET_SRC (set)) == COMPARE
6896 || (((code == NE
6897 || (code == LT
6898 && GET_MODE_CLASS (inner_mode) == MODE_INT
6899 && (GET_MODE_BITSIZE (inner_mode)
6900 <= HOST_BITS_PER_WIDE_INT)
6901 && (STORE_FLAG_VALUE
6902 & ((HOST_WIDE_INT) 1
6903 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6904 #ifdef FLOAT_STORE_FLAG_VALUE
6905 || (code == LT
6906 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
6907 && FLOAT_STORE_FLAG_VALUE < 0)
6908 #endif
6909 ))
6910 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
6911 x = SET_SRC (set);
6912 else if (((code == EQ
6913 || (code == GE
6914 && (GET_MODE_BITSIZE (inner_mode)
6915 <= HOST_BITS_PER_WIDE_INT)
6916 && GET_MODE_CLASS (inner_mode) == MODE_INT
6917 && (STORE_FLAG_VALUE
6918 & ((HOST_WIDE_INT) 1
6919 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6920 #ifdef FLOAT_STORE_FLAG_VALUE
6921 || (code == GE
6922 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
6923 && FLOAT_STORE_FLAG_VALUE < 0)
6924 #endif
6925 ))
6926 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
6927 {
6928 /* We might have reversed a LT to get a GE here. But this wasn't
6929 actually the comparison of data, so we don't flag that we
6930 have had to reverse the condition. */
6931 did_reverse_condition ^= 1;
6932 reverse_code = 1;
6933 x = SET_SRC (set);
6934 }
6935 else
6936 break;
6937 }
6938
6939 else if (reg_set_p (op0, prev))
6940 /* If this sets OP0, but not directly, we have to give up. */
6941 break;
6942
6943 if (x)
6944 {
6945 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
6946 code = GET_CODE (x);
6947 if (reverse_code)
6948 {
6949 code = reverse_condition (code);
6950 did_reverse_condition ^= 1;
6951 reverse_code = 0;
6952 }
6953
6954 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
6955 if (earliest)
6956 *earliest = prev;
6957 }
6958 }
6959
6960 /* If constant is first, put it last. */
6961 if (CONSTANT_P (op0))
6962 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
6963
6964 /* If OP0 is the result of a comparison, we weren't able to find what
6965 was really being compared, so fail. */
6966 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6967 return 0;
6968
6969 /* Canonicalize any ordered comparison with integers involving equality
6970 if we can do computations in the relevant mode and we do not
6971 overflow. */
6972
6973 if (GET_CODE (op1) == CONST_INT
6974 && GET_MODE (op0) != VOIDmode
6975 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
6976 {
6977 HOST_WIDE_INT const_val = INTVAL (op1);
6978 unsigned HOST_WIDE_INT uconst_val = const_val;
6979 unsigned HOST_WIDE_INT max_val
6980 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
6981
6982 switch (code)
6983 {
6984 case LE:
6985 if (const_val != max_val >> 1)
6986 code = LT, op1 = GEN_INT (const_val + 1);
6987 break;
6988
6989 case GE:
6990 if (const_val
6991 != (((HOST_WIDE_INT) 1
6992 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
6993 code = GT, op1 = GEN_INT (const_val - 1);
6994 break;
6995
6996 case LEU:
6997 if (uconst_val != max_val)
6998 code = LTU, op1 = GEN_INT (uconst_val + 1);
6999 break;
7000
7001 case GEU:
7002 if (uconst_val != 0)
7003 code = GTU, op1 = GEN_INT (uconst_val - 1);
7004 break;
7005 }
7006 }
7007
7008 /* If this was floating-point and we reversed anything other than an
7009 EQ or NE, return zero. */
7010 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7011 && did_reverse_condition && code != NE && code != EQ
7012 && ! flag_fast_math
7013 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7014 return 0;
7015
7016 #ifdef HAVE_cc0
7017 /* Never return CC0; return zero instead. */
7018 if (op0 == cc0_rtx)
7019 return 0;
7020 #endif
7021
7022 return gen_rtx (code, VOIDmode, op0, op1);
7023 }
7024
7025 /* Similar to above routine, except that we also put an invariant last
7026 unless both operands are invariants. */
7027
7028 rtx
7029 get_condition_for_loop (x)
7030 rtx x;
7031 {
7032 rtx comparison = get_condition (x, NULL_PTR);
7033
7034 if (comparison == 0
7035 || ! invariant_p (XEXP (comparison, 0))
7036 || invariant_p (XEXP (comparison, 1)))
7037 return comparison;
7038
7039 return gen_rtx (swap_condition (GET_CODE (comparison)), VOIDmode,
7040 XEXP (comparison, 1), XEXP (comparison, 0));
7041 }
7042
7043 #ifdef HAIFA
7044 /* Analyze a loop in order to instrument it with the use of count register.
7045 loop_start and loop_end are the first and last insns of the loop.
7046 This function works in cooperation with insert_bct ().
7047 loop_can_insert_bct[loop_num] is set according to whether the optimization
7048 is applicable to the loop. When it is applicable, the following variables
7049 are also set:
7050 loop_start_value[loop_num]
7051 loop_comparison_value[loop_num]
7052 loop_increment[loop_num]
7053 loop_comparison_code[loop_num] */
7054
7055 static
7056 void analyze_loop_iterations (loop_start, loop_end)
7057 rtx loop_start, loop_end;
7058 {
7059 rtx comparison, comparison_value;
7060 rtx iteration_var, initial_value, increment;
7061 enum rtx_code comparison_code;
7062
7063 rtx last_loop_insn;
7064 rtx insn;
7065 int i;
7066
7067 /* loop_variable mode */
7068 enum machine_mode original_mode;
7069
7070 /* find the number of the loop */
7071 int loop_num = loop_number (loop_start, loop_end);
7072
7073 /* we change our mind only when we are sure that loop will be instrumented */
7074 loop_can_insert_bct[loop_num] = 0;
7075
7076 /* is the optimization suppressed. */
7077 if ( !flag_branch_on_count_reg )
7078 return;
7079
7080 /* make sure that count-reg is not in use */
7081 if (loop_used_count_register[loop_num]){
7082 if (loop_dump_stream)
7083 fprintf (loop_dump_stream,
7084 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7085 loop_num);
7086 return;
7087 }
7088
7089 /* make sure that the function has no indirect jumps. */
7090 if (indirect_jump_in_function){
7091 if (loop_dump_stream)
7092 fprintf (loop_dump_stream,
7093 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7094 loop_num);
7095 return;
7096 }
7097
7098 /* make sure that the last loop insn is a conditional jump */
7099 last_loop_insn = PREV_INSN (loop_end);
7100 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7101 if (loop_dump_stream)
7102 fprintf (loop_dump_stream,
7103 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7104 loop_num);
7105 return;
7106 }
7107
7108 /* First find the iteration variable. If the last insn is a conditional
7109 branch, and the insn preceding it tests a register value, make that
7110 register the iteration variable. */
7111
7112 /* We used to use prev_nonnote_insn here, but that fails because it might
7113 accidentally get the branch for a contained loop if the branch for this
7114 loop was deleted. We can only trust branches immediately before the
7115 loop_end. */
7116
7117 comparison = get_condition_for_loop (last_loop_insn);
7118 /* ??? Get_condition may switch position of induction variable and
7119 invariant register when it canonicalizes the comparison. */
7120
7121 if (comparison == 0) {
7122 if (loop_dump_stream)
7123 fprintf (loop_dump_stream,
7124 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7125 loop_num);
7126 return;
7127 }
7128
7129 comparison_code = GET_CODE (comparison);
7130 iteration_var = XEXP (comparison, 0);
7131 comparison_value = XEXP (comparison, 1);
7132
7133 original_mode = GET_MODE (iteration_var);
7134 if (GET_MODE_CLASS (original_mode) != MODE_INT
7135 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7136 if (loop_dump_stream)
7137 fprintf (loop_dump_stream,
7138 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7139 loop_num);
7140 return;
7141 }
7142
7143 /* get info about loop bounds and increment */
7144 iteration_info (iteration_var, &initial_value, &increment,
7145 loop_start, loop_end);
7146
7147 /* make sure that all required loop data were found */
7148 if (!(initial_value && increment && comparison_value
7149 && invariant_p (comparison_value) && invariant_p (increment)
7150 && ! indirect_jump_in_function))
7151 {
7152 if (loop_dump_stream) {
7153 fprintf (loop_dump_stream,
7154 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7155 if (!(initial_value && increment && comparison_value)) {
7156 fprintf (loop_dump_stream, "\tbounds not available: ");
7157 if ( ! initial_value )
7158 fprintf (loop_dump_stream, "initial ");
7159 if ( ! increment )
7160 fprintf (loop_dump_stream, "increment ");
7161 if ( ! comparison_value )
7162 fprintf (loop_dump_stream, "comparison ");
7163 fprintf (loop_dump_stream, "\n");
7164 }
7165 if (!invariant_p (comparison_value) || !invariant_p (increment))
7166 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7167 }
7168 return;
7169 }
7170
7171 /* make sure that the increment is constant */
7172 if (GET_CODE (increment) != CONST_INT) {
7173 if (loop_dump_stream)
7174 fprintf (loop_dump_stream,
7175 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7176 loop_num);
7177 return;
7178 }
7179
7180 /* make sure that the loop contains neither function call, nor jump on table.
7181 (the count register might be altered by the called function, and might
7182 be used for a branch on table). */
7183 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7184 if (GET_CODE (insn) == CALL_INSN){
7185 if (loop_dump_stream)
7186 fprintf (loop_dump_stream,
7187 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7188 loop_num);
7189 return;
7190 }
7191
7192 if (GET_CODE (insn) == JUMP_INSN
7193 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7194 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7195 if (loop_dump_stream)
7196 fprintf (loop_dump_stream,
7197 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7198 loop_num);
7199 return;
7200 }
7201 }
7202
7203 /* At this point, we are sure that the loop can be instrumented with BCT.
7204 Some of the loops, however, will not be instrumented - the final decision
7205 is taken by insert_bct () */
7206 if (loop_dump_stream)
7207 fprintf (loop_dump_stream,
7208 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7209 loop_num);
7210
7211 /* mark all enclosing loops that they cannot use count register */
7212 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7213 marking here may prevent instrumenting an enclosing loop that could
7214 actually be instrumented. But since this is rare, it is safer to mark
7215 here in case the order of calling (analyze/insert)_bct would be changed. */
7216 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7217 loop_used_count_register[i] = 1;
7218
7219 /* Set data structures which will be used by the instrumentation phase */
7220 loop_start_value[loop_num] = initial_value;
7221 loop_comparison_value[loop_num] = comparison_value;
7222 loop_increment[loop_num] = increment;
7223 loop_comparison_code[loop_num] = comparison_code;
7224 loop_can_insert_bct[loop_num] = 1;
7225 }
7226
7227
7228 /* instrument loop for insertion of bct instruction. We distinguish between
7229 loops with compile-time bounds, to those with run-time bounds. The loop
7230 behaviour is analized according to the following characteristics/variables:
7231 ; Input variables:
7232 ; comparison-value: the value to which the iteration counter is compared.
7233 ; initial-value: iteration-counter initial value.
7234 ; increment: iteration-counter increment.
7235 ; Computed variables:
7236 ; increment-direction: the sign of the increment.
7237 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7238 ; range-direction: sign (comparison-value - initial-value)
7239 We give up on the following cases:
7240 ; loop variable overflow.
7241 ; run-time loop bounds with comparison code NE.
7242 */
7243
7244 static void
7245 insert_bct (loop_start, loop_end)
7246 rtx loop_start, loop_end;
7247 {
7248 rtx initial_value, comparison_value, increment;
7249 enum rtx_code comparison_code;
7250
7251 int increment_direction, compare_direction;
7252 int unsigned_p = 0;
7253
7254 /* if the loop condition is <= or >=, the number of iteration
7255 is 1 more than the range of the bounds of the loop */
7256 int add_iteration = 0;
7257
7258 /* the only machine mode we work with - is the integer of the size that the
7259 machine has */
7260 enum machine_mode loop_var_mode = SImode;
7261
7262 int loop_num = loop_number (loop_start, loop_end);
7263
7264 /* get loop-variables. No need to check that these are valid - already
7265 checked in analyze_loop_iterations (). */
7266 comparison_code = loop_comparison_code[loop_num];
7267 initial_value = loop_start_value[loop_num];
7268 comparison_value = loop_comparison_value[loop_num];
7269 increment = loop_increment[loop_num];
7270
7271 /* check analyze_loop_iterations decision for this loop. */
7272 if (! loop_can_insert_bct[loop_num]){
7273 if (loop_dump_stream)
7274 fprintf (loop_dump_stream,
7275 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7276 loop_num);
7277 return;
7278 }
7279
7280 /* make sure that the loop was not fully unrolled. */
7281 if (loop_unroll_factor[loop_num] == -1){
7282 if (loop_dump_stream)
7283 fprintf (loop_dump_stream, "insert_bct %d: was completely unrolled\n", loop_num);
7284 return;
7285 }
7286
7287 /* make sure that the last loop insn is a conditional jump .
7288 This check is repeated from analyze_loop_iterations (),
7289 because unrolling might have changed that. */
7290 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7291 || !condjump_p (PREV_INSN (loop_end))) {
7292 if (loop_dump_stream)
7293 fprintf (loop_dump_stream,
7294 "insert_bct: not instrumenting BCT because of invalid branch\n");
7295 return;
7296 }
7297
7298 /* fix increment in case loop was unrolled. */
7299 if (loop_unroll_factor[loop_num] > 1)
7300 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor[loop_num] );
7301
7302 /* determine properties and directions of the loop */
7303 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7304 switch ( comparison_code ) {
7305 case LEU:
7306 unsigned_p = 1;
7307 /* fallthrough */
7308 case LE:
7309 compare_direction = 1;
7310 add_iteration = 1;
7311 break;
7312 case GEU:
7313 unsigned_p = 1;
7314 /* fallthrough */
7315 case GE:
7316 compare_direction = -1;
7317 add_iteration = 1;
7318 break;
7319 case EQ:
7320 /* in this case we cannot know the number of iterations */
7321 if (loop_dump_stream)
7322 fprintf (loop_dump_stream,
7323 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7324 loop_num);
7325 return;
7326 case LTU:
7327 unsigned_p = 1;
7328 /* fallthrough */
7329 case LT:
7330 compare_direction = 1;
7331 break;
7332 case GTU:
7333 unsigned_p = 1;
7334 /* fallthrough */
7335 case GT:
7336 compare_direction = -1;
7337 break;
7338 case NE:
7339 compare_direction = 0;
7340 break;
7341 default:
7342 abort ();
7343 }
7344
7345
7346 /* make sure that the loop does not end by an overflow */
7347 if (compare_direction != increment_direction) {
7348 if (loop_dump_stream)
7349 fprintf (loop_dump_stream,
7350 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7351 loop_num);
7352 return;
7353 }
7354
7355 /* try to instrument the loop. */
7356
7357 /* Handle the simpler case, where the bounds are known at compile time. */
7358 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7359 {
7360 int n_iterations;
7361 int increment_value_abs = INTVAL (increment) * increment_direction;
7362
7363 /* check the relation between compare-val and initial-val */
7364 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7365 int range_direction = (difference > 0) ? 1 : -1;
7366
7367 /* make sure the loop executes enough iterations to gain from BCT */
7368 if (difference > -3 && difference < 3) {
7369 if (loop_dump_stream)
7370 fprintf (loop_dump_stream,
7371 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7372 loop_num);
7373 return;
7374 }
7375
7376 /* make sure that the loop executes at least once */
7377 if ((range_direction == 1 && compare_direction == -1)
7378 || (range_direction == -1 && compare_direction == 1))
7379 {
7380 if (loop_dump_stream)
7381 fprintf (loop_dump_stream,
7382 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7383 loop_num);
7384 return;
7385 }
7386
7387 /* make sure that the loop does not end by an overflow (in compile time
7388 bounds we must have an additional check for overflow, because here
7389 we also support the compare code of 'NE'. */
7390 if (comparison_code == NE
7391 && increment_direction != range_direction) {
7392 if (loop_dump_stream)
7393 fprintf (loop_dump_stream,
7394 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7395 loop_num);
7396 return;
7397 }
7398
7399 /* Determine the number of iterations by:
7400 ;
7401 ; compare-val - initial-val + (increment -1) + additional-iteration
7402 ; num_iterations = -----------------------------------------------------------------
7403 ; increment
7404 */
7405 difference = (range_direction > 0) ? difference : -difference;
7406 #if 0
7407 fprintf (stderr, "difference is: %d\n", difference); /* @*/
7408 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
7409 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
7410 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
7411 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
7412 #endif
7413
7414 if (increment_value_abs == 0) {
7415 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
7416 abort ();
7417 }
7418 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
7419 / increment_value_abs;
7420
7421 #if 0
7422 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
7423 #endif
7424 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
7425
7426 /* Done with this loop. */
7427 return;
7428 }
7429
7430 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7431 /* In this case we generate run_time calculation of the number of iterations */
7432
7433 /* With runtime bounds, if the compare is of the form '!=' we give up */
7434 if (comparison_code == NE) {
7435 if (loop_dump_stream)
7436 fprintf (loop_dump_stream,
7437 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7438 loop_num);
7439 return;
7440 }
7441
7442 else {
7443 /* We rely on the existence of run-time guard to ensure that the
7444 loop executes at least once. */
7445 rtx sequence;
7446 rtx iterations_num_reg;
7447
7448 int increment_value_abs = INTVAL (increment) * increment_direction;
7449
7450 /* make sure that the increment is a power of two, otherwise (an
7451 expensive) divide is needed. */
7452 if (exact_log2 (increment_value_abs) == -1)
7453 {
7454 if (loop_dump_stream)
7455 fprintf (loop_dump_stream,
7456 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7457 return;
7458 }
7459
7460 /* compute the number of iterations */
7461 start_sequence ();
7462 {
7463 /* CYGNUS LOCAL: HAIFA bug fix */
7464 rtx temp_reg;
7465
7466 /* Again, the number of iterations is calculated by:
7467 ;
7468 ; compare-val - initial-val + (increment -1) + additional-iteration
7469 ; num_iterations = -----------------------------------------------------------------
7470 ; increment
7471 */
7472 /* ??? Do we have to call copy_rtx here before passing rtx to
7473 expand_binop? */
7474 if (compare_direction > 0) {
7475 /* <, <= :the loop variable is increasing */
7476 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
7477 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7478 }
7479 else {
7480 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
7481 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7482 }
7483
7484 if (increment_value_abs - 1 + add_iteration != 0)
7485 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
7486 GEN_INT (increment_value_abs - 1 + add_iteration),
7487 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7488
7489 if (increment_value_abs != 1)
7490 {
7491 /* ??? This will generate an expensive divide instruction for
7492 most targets. The original authors apparently expected this
7493 to be a shift, since they test for power-of-2 divisors above,
7494 but just naively generating a divide instruction will not give
7495 a shift. It happens to work for the PowerPC target because
7496 the rs6000.md file has a divide pattern that emits shifts.
7497 It will probably not work for any other target. */
7498 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
7499 temp_reg,
7500 GEN_INT (increment_value_abs),
7501 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7502 }
7503 else
7504 iterations_num_reg = temp_reg;
7505 /* END CYGNUS LOCAL: HAIFA bug fix */
7506 }
7507 sequence = gen_sequence ();
7508 end_sequence ();
7509 emit_insn_before (sequence, loop_start);
7510 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
7511 }
7512 }
7513
7514 /* instrument loop by inserting a bct in it. This is done in the following way:
7515 1. A new register is created and assigned the hard register number of the count
7516 register.
7517 2. In the head of the loop the new variable is initialized by the value passed in the
7518 loop_num_iterations parameter.
7519 3. At the end of the loop, comparison of the register with 0 is generated.
7520 The created comparison follows the pattern defined for the
7521 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7522 generation phase.
7523 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7524 not used elsewhere, it will be eliminated by data-flow analisys. */
7525
7526 static void
7527 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
7528 rtx loop_start, loop_end;
7529 rtx loop_num_iterations;
7530 {
7531 rtx temp_reg1, temp_reg2;
7532 rtx start_label;
7533
7534 rtx sequence;
7535 enum machine_mode loop_var_mode = SImode;
7536
7537 #ifdef HAVE_decrement_and_branch_on_count
7538 if (HAVE_decrement_and_branch_on_count)
7539 {
7540 if (loop_dump_stream)
7541 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
7542
7543 /* eliminate the check on the old variable */
7544 delete_insn (PREV_INSN (loop_end));
7545 delete_insn (PREV_INSN (loop_end));
7546
7547 /* insert the label which will delimit the start of the loop */
7548 start_label = gen_label_rtx ();
7549 emit_label_after (start_label, loop_start);
7550
7551 /* insert initialization of the count register into the loop header */
7552 start_sequence ();
7553 temp_reg1 = gen_reg_rtx (loop_var_mode);
7554 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
7555
7556 /* this will be count register */
7557 temp_reg2 = gen_rtx (REG, loop_var_mode, COUNT_REGISTER_REGNUM);
7558 /* we have to move the value to the count register from an GPR
7559 because rtx pointed to by loop_num_iterations could contain
7560 expression which cannot be moved into count register */
7561 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
7562
7563 sequence = gen_sequence ();
7564 end_sequence ();
7565 emit_insn_after (sequence, loop_start);
7566
7567 /* insert new comparison on the count register instead of the
7568 old one, generating the needed BCT pattern (that will be
7569 later recognized by assembly generation phase). */
7570 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
7571 loop_end);
7572 LABEL_NUSES (start_label)++;
7573 }
7574
7575 #endif /* HAVE_decrement_and_branch_on_count */
7576 }
7577
7578 /* calculate the uid of the given loop */
7579 int
7580 loop_number (loop_start, loop_end)
7581 rtx loop_start, loop_end;
7582 {
7583 int loop_num = -1;
7584
7585 /* assume that this insn contains the LOOP_START
7586 note, so it will not be changed by the loop unrolling */
7587 loop_num = uid_loop_num[INSN_UID (loop_start)];
7588 /* sanity check - should never happen */
7589 if (loop_num == -1)
7590 abort ();
7591
7592 return loop_num;
7593 }
7594 #endif /* HAIFA */
7595
7596 /* Scan the function and determine whether it has indirect (computed) jumps.
7597
7598 This is taken mostly from flow.c; similar code exists elsewhere
7599 in the compiler. It may be useful to put this into rtlanal.c. */
7600 static int
7601 indirect_jump_in_function_p (start)
7602 rtx start;
7603 {
7604 rtx insn;
7605 int is_indirect_jump = 0;
7606
7607 for (insn = start; insn; insn = NEXT_INSN (insn))
7608 if (computed_jump_p (insn))
7609 return 1;
7610 }
7611 /* END CYGNUS LOCAL haifa */