loop.c (get_condition): Don't combine when either compare is MODE_CC.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
55
56 int *uid_luid;
57
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
60
61 int *uid_loop_num;
62
63 /* 1 + largest uid of any insn. */
64
65 int max_uid_for_loop;
66
67 /* 1 + luid of last insn. */
68
69 static int max_luid;
70
71 /* Number of loops detected in current function. Used as index to the
72 next few tables. */
73
74 static int max_loop_num;
75
76 /* Indexed by loop number, contains the first and last insn of each loop. */
77
78 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
79
80 /* For each loop, gives the containing loop number, -1 if none. */
81
82 int *loop_outer_loop;
83
84 #ifdef HAIFA
85 /* The main output of analyze_loop_iterations is placed here */
86
87 int *loop_can_insert_bct;
88
89 /* For each loop, determines whether some of its inner loops has used
90 count register */
91
92 int *loop_used_count_register;
93
94 /* loop parameters for arithmetic loops. These loops have a loop variable
95 which is initialized to loop_start_value, incremented in each iteration
96 by "loop_increment". At the end of the iteration the loop variable is
97 compared to the loop_comparison_value (using loop_comparison_code). */
98
99 rtx *loop_increment;
100 rtx *loop_comparison_value;
101 rtx *loop_start_value;
102 enum rtx_code *loop_comparison_code;
103 #endif /* HAIFA */
104
105 /* For each loop, keep track of its unrolling factor.
106 Potential values:
107 0: unrolled
108 1: not unrolled.
109 -1: completely unrolled
110 >0: holds the unroll exact factor. */
111 int *loop_unroll_factor;
112
113 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
114 really a loop (an insn outside the loop branches into it). */
115
116 static char *loop_invalid;
117
118 /* Indexed by loop number, links together all LABEL_REFs which refer to
119 code labels outside the loop. Used by routines that need to know all
120 loop exits, such as final_biv_value and final_giv_value.
121
122 This does not include loop exits due to return instructions. This is
123 because all bivs and givs are pseudos, and hence must be dead after a
124 return, so the presense of a return does not affect any of the
125 optimizations that use this info. It is simpler to just not include return
126 instructions on this list. */
127
128 rtx *loop_number_exit_labels;
129
130 /* Indexed by loop number, counts the number of LABEL_REFs on
131 loop_number_exit_labels for this loop and all loops nested inside it. */
132
133 int *loop_number_exit_count;
134
135 /* Holds the number of loop iterations. It is zero if the number could not be
136 calculated. Must be unsigned since the number of iterations can
137 be as high as 2^wordsize-1. For loops with a wider iterator, this number
138 will be zero if the number of loop iterations is too large for an
139 unsigned integer to hold. */
140
141 unsigned HOST_WIDE_INT loop_n_iterations;
142
143 /* Nonzero if there is a subroutine call in the current loop. */
144
145 static int loop_has_call;
146
147 /* Nonzero if there is a volatile memory reference in the current
148 loop. */
149
150 static int loop_has_volatile;
151
152 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
153 current loop. A continue statement will generate a branch to
154 NEXT_INSN (loop_continue). */
155
156 static rtx loop_continue;
157
158 /* Indexed by register number, contains the number of times the reg
159 is set during the loop being scanned.
160 During code motion, a negative value indicates a reg that has been
161 made a candidate; in particular -2 means that it is an candidate that
162 we know is equal to a constant and -1 means that it is an candidate
163 not known equal to a constant.
164 After code motion, regs moved have 0 (which is accurate now)
165 while the failed candidates have the original number of times set.
166
167 Therefore, at all times, == 0 indicates an invariant register;
168 < 0 a conditionally invariant one. */
169
170 static int *n_times_set;
171
172 /* Original value of n_times_set; same except that this value
173 is not set negative for a reg whose sets have been made candidates
174 and not set to 0 for a reg that is moved. */
175
176 static int *n_times_used;
177
178 /* Index by register number, 1 indicates that the register
179 cannot be moved or strength reduced. */
180
181 static char *may_not_optimize;
182
183 /* Nonzero means reg N has already been moved out of one loop.
184 This reduces the desire to move it out of another. */
185
186 static char *moved_once;
187
188 /* Array of MEMs that are stored in this loop. If there are too many to fit
189 here, we just turn on unknown_address_altered. */
190
191 #define NUM_STORES 30
192 static rtx loop_store_mems[NUM_STORES];
193
194 /* Index of first available slot in above array. */
195 static int loop_store_mems_idx;
196
197 /* Nonzero if we don't know what MEMs were changed in the current loop.
198 This happens if the loop contains a call (in which case `loop_has_call'
199 will also be set) or if we store into more than NUM_STORES MEMs. */
200
201 static int unknown_address_altered;
202
203 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
204 static int num_movables;
205
206 /* Count of memory write instructions discovered in the loop. */
207 static int num_mem_sets;
208
209 /* Number of loops contained within the current one, including itself. */
210 static int loops_enclosed;
211
212 /* Bound on pseudo register number before loop optimization.
213 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
214 int max_reg_before_loop;
215
216 /* This obstack is used in product_cheap_p to allocate its rtl. It
217 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
218 If we used the same obstack that it did, we would be deallocating
219 that array. */
220
221 static struct obstack temp_obstack;
222
223 /* This is where the pointer to the obstack being used for RTL is stored. */
224
225 extern struct obstack *rtl_obstack;
226
227 #define obstack_chunk_alloc xmalloc
228 #define obstack_chunk_free free
229
230 extern char *oballoc ();
231 \f
232 /* During the analysis of a loop, a chain of `struct movable's
233 is made to record all the movable insns found.
234 Then the entire chain can be scanned to decide which to move. */
235
236 struct movable
237 {
238 rtx insn; /* A movable insn */
239 rtx set_src; /* The expression this reg is set from. */
240 rtx set_dest; /* The destination of this SET. */
241 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
242 of any registers used within the LIBCALL. */
243 int consec; /* Number of consecutive following insns
244 that must be moved with this one. */
245 int regno; /* The register it sets */
246 short lifetime; /* lifetime of that register;
247 may be adjusted when matching movables
248 that load the same value are found. */
249 short savings; /* Number of insns we can move for this reg,
250 including other movables that force this
251 or match this one. */
252 unsigned int cond : 1; /* 1 if only conditionally movable */
253 unsigned int force : 1; /* 1 means MUST move this insn */
254 unsigned int global : 1; /* 1 means reg is live outside this loop */
255 /* If PARTIAL is 1, GLOBAL means something different:
256 that the reg is live outside the range from where it is set
257 to the following label. */
258 unsigned int done : 1; /* 1 inhibits further processing of this */
259
260 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
261 In particular, moving it does not make it
262 invariant. */
263 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
264 load SRC, rather than copying INSN. */
265 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
266 first insn of a consecutive sets group. */
267 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
268 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
269 that we should avoid changing when clearing
270 the rest of the reg. */
271 struct movable *match; /* First entry for same value */
272 struct movable *forces; /* An insn that must be moved if this is */
273 struct movable *next;
274 };
275
276 FILE *loop_dump_stream;
277
278 /* Forward declarations. */
279
280 static void find_and_verify_loops PROTO((rtx));
281 static void mark_loop_jump PROTO((rtx, int));
282 static void prescan_loop PROTO((rtx, rtx));
283 static int reg_in_basic_block_p PROTO((rtx, rtx));
284 static int consec_sets_invariant_p PROTO((rtx, int, rtx));
285 static rtx libcall_other_reg PROTO((rtx, rtx));
286 static int labels_in_range_p PROTO((rtx, int));
287 static void count_loop_regs_set PROTO((rtx, rtx, char *, rtx *, int *, int));
288 static void note_addr_stored PROTO((rtx, rtx));
289 static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
290 static void scan_loop PROTO((rtx, rtx, int, int));
291 #if 0
292 static void replace_call_address PROTO(());
293 #endif
294 static rtx skip_consec_insns PROTO((rtx, int));
295 static int libcall_benefit PROTO((rtx));
296 static void ignore_some_movables PROTO((struct movable *));
297 static void force_movables PROTO((struct movable *));
298 static void combine_movables PROTO((struct movable *, int));
299 static int regs_match_p PROTO((rtx, rtx, struct movable *));
300 static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
301 static void add_label_notes PROTO((rtx, rtx));
302 static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
303 static int count_nonfixed_reads PROTO((rtx));
304 static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, int));
305 static void find_single_use_in_loop PROTO((rtx, rtx, rtx *));
306 static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
307 static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
308 static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, int, int));
309 static void check_final_value PROTO((struct induction *, rtx, rtx));
310 static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
311 static void update_giv_derive PROTO((rtx));
312 static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *));
313 static rtx simplify_giv_expr PROTO((rtx, int *));
314 static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *));
315 static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *));
316 static int check_dbra_loop PROTO((rtx, int, rtx));
317 #ifdef ADDRESS_COST
318 static rtx express_from PROTO((struct induction *, struct induction *));
319 #endif
320 static int combine_givs_p PROTO((struct induction *, struct induction *));
321 #ifdef GIV_SORT_CRITERION
322 static int giv_sort PROTO((struct induction **, struct induction **));
323 #endif
324 static void combine_givs PROTO((struct iv_class *));
325 static int product_cheap_p PROTO((rtx, rtx));
326 static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
327 static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
328 static int last_use_this_basic_block PROTO((rtx, rtx));
329 static void record_initial PROTO((rtx, rtx));
330 static void update_reg_last_use PROTO((rtx, rtx));
331
332 #ifdef HAIFA
333 /* This is extern from unroll.c */
334 extern void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
335
336 /* Two main functions for implementing bct:
337 first - to be called before loop unrolling, and the second - after */
338 #ifdef HAVE_decrement_and_branch_on_count
339 static void analyze_loop_iterations PROTO((rtx, rtx));
340 static void insert_bct PROTO((rtx, rtx));
341
342 /* Auxiliary function that inserts the bct pattern into the loop */
343 static void instrument_loop_bct PROTO((rtx, rtx, rtx));
344 #endif /* HAVE_decrement_and_branch_on_count */
345 #endif /* HAIFA */
346
347 /* Indirect_jump_in_function is computed once per function. */
348 int indirect_jump_in_function = 0;
349 static int indirect_jump_in_function_p PROTO((rtx));
350
351 \f
352 /* Relative gain of eliminating various kinds of operations. */
353 int add_cost;
354 #if 0
355 int shift_cost;
356 int mult_cost;
357 #endif
358
359 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
360 copy the value of the strength reduced giv to its original register. */
361 int copy_cost;
362
363 void
364 init_loop ()
365 {
366 char *free_point = (char *) oballoc (1);
367 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
368
369 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
370
371 /* We multiply by 2 to reconcile the difference in scale between
372 these two ways of computing costs. Otherwise the cost of a copy
373 will be far less than the cost of an add. */
374
375 copy_cost = 2 * 2;
376
377 /* Free the objects we just allocated. */
378 obfree (free_point);
379
380 /* Initialize the obstack used for rtl in product_cheap_p. */
381 gcc_obstack_init (&temp_obstack);
382 }
383 \f
384 /* Entry point of this file. Perform loop optimization
385 on the current function. F is the first insn of the function
386 and DUMPFILE is a stream for output of a trace of actions taken
387 (or 0 if none should be output). */
388
389 void
390 loop_optimize (f, dumpfile, unroll_p)
391 /* f is the first instruction of a chain of insns for one function */
392 rtx f;
393 FILE *dumpfile;
394 int unroll_p;
395 {
396 register rtx insn;
397 register int i;
398 rtx last_insn;
399
400 loop_dump_stream = dumpfile;
401
402 init_recog_no_volatile ();
403 init_alias_analysis ();
404
405 max_reg_before_loop = max_reg_num ();
406
407 moved_once = (char *) alloca (max_reg_before_loop);
408 bzero (moved_once, max_reg_before_loop);
409
410 regs_may_share = 0;
411
412 /* Count the number of loops. */
413
414 max_loop_num = 0;
415 for (insn = f; insn; insn = NEXT_INSN (insn))
416 {
417 if (GET_CODE (insn) == NOTE
418 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
419 max_loop_num++;
420 }
421
422 /* Don't waste time if no loops. */
423 if (max_loop_num == 0)
424 return;
425
426 /* Get size to use for tables indexed by uids.
427 Leave some space for labels allocated by find_and_verify_loops. */
428 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
429
430 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
431 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
432
433 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
434 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
435
436 /* Allocate tables for recording each loop. We set each entry, so they need
437 not be zeroed. */
438 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
439 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
440 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
441 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
442 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
443 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
444
445 /* This is initialized by the unrolling code, so we go ahead
446 and clear them just in case we are not performing loop
447 unrolling. */
448 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
449 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
450
451 #ifdef HAIFA
452 /* Allocate for BCT optimization */
453 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
454 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
455
456 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
457 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
458
459 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
460 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
461 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
462 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
463 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
464 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
465
466 loop_comparison_code
467 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
468 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
469 #endif /* HAIFA */
470
471 /* Find and process each loop.
472 First, find them, and record them in order of their beginnings. */
473 find_and_verify_loops (f);
474
475 /* Now find all register lifetimes. This must be done after
476 find_and_verify_loops, because it might reorder the insns in the
477 function. */
478 reg_scan (f, max_reg_num (), 1);
479
480 /* See if we went too far. */
481 if (get_max_uid () > max_uid_for_loop)
482 abort ();
483 /* Now reset it to the actual size we need. See above. */
484 max_uid_for_loop = get_max_uid () + 1;
485
486 /* Compute the mapping from uids to luids.
487 LUIDs are numbers assigned to insns, like uids,
488 except that luids increase monotonically through the code.
489 Don't assign luids to line-number NOTEs, so that the distance in luids
490 between two insns is not affected by -g. */
491
492 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
493 {
494 last_insn = insn;
495 if (GET_CODE (insn) != NOTE
496 || NOTE_LINE_NUMBER (insn) <= 0)
497 uid_luid[INSN_UID (insn)] = ++i;
498 else
499 /* Give a line number note the same luid as preceding insn. */
500 uid_luid[INSN_UID (insn)] = i;
501 }
502
503 max_luid = i + 1;
504
505 /* Don't leave gaps in uid_luid for insns that have been
506 deleted. It is possible that the first or last insn
507 using some register has been deleted by cross-jumping.
508 Make sure that uid_luid for that former insn's uid
509 points to the general area where that insn used to be. */
510 for (i = 0; i < max_uid_for_loop; i++)
511 {
512 uid_luid[0] = uid_luid[i];
513 if (uid_luid[0] != 0)
514 break;
515 }
516 for (i = 0; i < max_uid_for_loop; i++)
517 if (uid_luid[i] == 0)
518 uid_luid[i] = uid_luid[i - 1];
519
520 /* Create a mapping from loops to BLOCK tree nodes. */
521 if (unroll_p && write_symbols != NO_DEBUG)
522 find_loop_tree_blocks ();
523
524 /* Determine if the function has indirect jump. On some systems
525 this prevents low overhead loop instructions from being used. */
526 indirect_jump_in_function = indirect_jump_in_function_p (f);
527
528 /* Now scan the loops, last ones first, since this means inner ones are done
529 before outer ones. */
530 for (i = max_loop_num-1; i >= 0; i--)
531 if (! loop_invalid[i] && loop_number_loop_ends[i])
532 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
533 max_reg_num (), unroll_p);
534
535 /* If debugging and unrolling loops, we must replicate the tree nodes
536 corresponding to the blocks inside the loop, so that the original one
537 to one mapping will remain. */
538 if (unroll_p && write_symbols != NO_DEBUG)
539 unroll_block_trees ();
540 }
541 \f
542 /* Optimize one loop whose start is LOOP_START and end is END.
543 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
544 NOTE_INSN_LOOP_END. */
545
546 /* ??? Could also move memory writes out of loops if the destination address
547 is invariant, the source is invariant, the memory write is not volatile,
548 and if we can prove that no read inside the loop can read this address
549 before the write occurs. If there is a read of this address after the
550 write, then we can also mark the memory read as invariant. */
551
552 static void
553 scan_loop (loop_start, end, nregs, unroll_p)
554 rtx loop_start, end;
555 int nregs;
556 int unroll_p;
557 {
558 register int i;
559 register rtx p;
560 /* 1 if we are scanning insns that could be executed zero times. */
561 int maybe_never = 0;
562 /* 1 if we are scanning insns that might never be executed
563 due to a subroutine call which might exit before they are reached. */
564 int call_passed = 0;
565 /* For a rotated loop that is entered near the bottom,
566 this is the label at the top. Otherwise it is zero. */
567 rtx loop_top = 0;
568 /* Jump insn that enters the loop, or 0 if control drops in. */
569 rtx loop_entry_jump = 0;
570 /* Place in the loop where control enters. */
571 rtx scan_start;
572 /* Number of insns in the loop. */
573 int insn_count;
574 int in_libcall = 0;
575 int tem;
576 rtx temp;
577 /* The SET from an insn, if it is the only SET in the insn. */
578 rtx set, set1;
579 /* Chain describing insns movable in current loop. */
580 struct movable *movables = 0;
581 /* Last element in `movables' -- so we can add elements at the end. */
582 struct movable *last_movable = 0;
583 /* Ratio of extra register life span we can justify
584 for saving an instruction. More if loop doesn't call subroutines
585 since in that case saving an insn makes more difference
586 and more registers are available. */
587 int threshold;
588 /* If we have calls, contains the insn in which a register was used
589 if it was used exactly once; contains const0_rtx if it was used more
590 than once. */
591 rtx *reg_single_usage = 0;
592 /* Nonzero if we are scanning instructions in a sub-loop. */
593 int loop_depth = 0;
594
595 n_times_set = (int *) alloca (nregs * sizeof (int));
596 n_times_used = (int *) alloca (nregs * sizeof (int));
597 may_not_optimize = (char *) alloca (nregs);
598
599 /* Determine whether this loop starts with a jump down to a test at
600 the end. This will occur for a small number of loops with a test
601 that is too complex to duplicate in front of the loop.
602
603 We search for the first insn or label in the loop, skipping NOTEs.
604 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
605 (because we might have a loop executed only once that contains a
606 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
607 (in case we have a degenerate loop).
608
609 Note that if we mistakenly think that a loop is entered at the top
610 when, in fact, it is entered at the exit test, the only effect will be
611 slightly poorer optimization. Making the opposite error can generate
612 incorrect code. Since very few loops now start with a jump to the
613 exit test, the code here to detect that case is very conservative. */
614
615 for (p = NEXT_INSN (loop_start);
616 p != end
617 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
618 && (GET_CODE (p) != NOTE
619 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
620 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
621 p = NEXT_INSN (p))
622 ;
623
624 scan_start = p;
625
626 /* Set up variables describing this loop. */
627 prescan_loop (loop_start, end);
628 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
629
630 /* If loop has a jump before the first label,
631 the true entry is the target of that jump.
632 Start scan from there.
633 But record in LOOP_TOP the place where the end-test jumps
634 back to so we can scan that after the end of the loop. */
635 if (GET_CODE (p) == JUMP_INSN)
636 {
637 loop_entry_jump = p;
638
639 /* Loop entry must be unconditional jump (and not a RETURN) */
640 if (simplejump_p (p)
641 && JUMP_LABEL (p) != 0
642 /* Check to see whether the jump actually
643 jumps out of the loop (meaning it's no loop).
644 This case can happen for things like
645 do {..} while (0). If this label was generated previously
646 by loop, we can't tell anything about it and have to reject
647 the loop. */
648 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
649 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
650 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
651 {
652 loop_top = next_label (scan_start);
653 scan_start = JUMP_LABEL (p);
654 }
655 }
656
657 /* If SCAN_START was an insn created by loop, we don't know its luid
658 as required by loop_reg_used_before_p. So skip such loops. (This
659 test may never be true, but it's best to play it safe.)
660
661 Also, skip loops where we do not start scanning at a label. This
662 test also rejects loops starting with a JUMP_INSN that failed the
663 test above. */
664
665 if (INSN_UID (scan_start) >= max_uid_for_loop
666 || GET_CODE (scan_start) != CODE_LABEL)
667 {
668 if (loop_dump_stream)
669 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
670 INSN_UID (loop_start), INSN_UID (end));
671 return;
672 }
673
674 /* Count number of times each reg is set during this loop.
675 Set may_not_optimize[I] if it is not safe to move out
676 the setting of register I. If this loop has calls, set
677 reg_single_usage[I]. */
678
679 bzero ((char *) n_times_set, nregs * sizeof (int));
680 bzero (may_not_optimize, nregs);
681
682 if (loop_has_call)
683 {
684 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
685 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
686 }
687
688 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
689 may_not_optimize, reg_single_usage, &insn_count, nregs);
690
691 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
692 may_not_optimize[i] = 1, n_times_set[i] = 1;
693 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
694
695 if (loop_dump_stream)
696 {
697 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
698 INSN_UID (loop_start), INSN_UID (end), insn_count);
699 if (loop_continue)
700 fprintf (loop_dump_stream, "Continue at insn %d.\n",
701 INSN_UID (loop_continue));
702 }
703
704 /* Scan through the loop finding insns that are safe to move.
705 Set n_times_set negative for the reg being set, so that
706 this reg will be considered invariant for subsequent insns.
707 We consider whether subsequent insns use the reg
708 in deciding whether it is worth actually moving.
709
710 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
711 and therefore it is possible that the insns we are scanning
712 would never be executed. At such times, we must make sure
713 that it is safe to execute the insn once instead of zero times.
714 When MAYBE_NEVER is 0, all insns will be executed at least once
715 so that is not a problem. */
716
717 p = scan_start;
718 while (1)
719 {
720 p = NEXT_INSN (p);
721 /* At end of a straight-in loop, we are done.
722 At end of a loop entered at the bottom, scan the top. */
723 if (p == scan_start)
724 break;
725 if (p == end)
726 {
727 if (loop_top != 0)
728 p = loop_top;
729 else
730 break;
731 if (p == scan_start)
732 break;
733 }
734
735 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
736 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
737 in_libcall = 1;
738 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
739 && find_reg_note (p, REG_RETVAL, NULL_RTX))
740 in_libcall = 0;
741
742 if (GET_CODE (p) == INSN
743 && (set = single_set (p))
744 && GET_CODE (SET_DEST (set)) == REG
745 && ! may_not_optimize[REGNO (SET_DEST (set))])
746 {
747 int tem1 = 0;
748 int tem2 = 0;
749 int move_insn = 0;
750 rtx src = SET_SRC (set);
751 rtx dependencies = 0;
752
753 /* Figure out what to use as a source of this insn. If a REG_EQUIV
754 note is given or if a REG_EQUAL note with a constant operand is
755 specified, use it as the source and mark that we should move
756 this insn by calling emit_move_insn rather that duplicating the
757 insn.
758
759 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
760 is present. */
761 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
762 if (temp)
763 src = XEXP (temp, 0), move_insn = 1;
764 else
765 {
766 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
767 if (temp && CONSTANT_P (XEXP (temp, 0)))
768 src = XEXP (temp, 0), move_insn = 1;
769 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
770 {
771 src = XEXP (temp, 0);
772 /* A libcall block can use regs that don't appear in
773 the equivalent expression. To move the libcall,
774 we must move those regs too. */
775 dependencies = libcall_other_reg (p, src);
776 }
777 }
778
779 /* Don't try to optimize a register that was made
780 by loop-optimization for an inner loop.
781 We don't know its life-span, so we can't compute the benefit. */
782 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
783 ;
784 /* In order to move a register, we need to have one of three cases:
785 (1) it is used only in the same basic block as the set
786 (2) it is not a user variable and it is not used in the
787 exit test (this can cause the variable to be used
788 before it is set just like a user-variable).
789 (3) the set is guaranteed to be executed once the loop starts,
790 and the reg is not used until after that. */
791 else if (! ((! maybe_never
792 && ! loop_reg_used_before_p (set, p, loop_start,
793 scan_start, end))
794 || (! REG_USERVAR_P (SET_DEST (set))
795 && ! REG_LOOP_TEST_P (SET_DEST (set)))
796 || reg_in_basic_block_p (p, SET_DEST (set))))
797 ;
798 else if ((tem = invariant_p (src))
799 && (dependencies == 0
800 || (tem2 = invariant_p (dependencies)) != 0)
801 && (n_times_set[REGNO (SET_DEST (set))] == 1
802 || (tem1
803 = consec_sets_invariant_p (SET_DEST (set),
804 n_times_set[REGNO (SET_DEST (set))],
805 p)))
806 /* If the insn can cause a trap (such as divide by zero),
807 can't move it unless it's guaranteed to be executed
808 once loop is entered. Even a function call might
809 prevent the trap insn from being reached
810 (since it might exit!) */
811 && ! ((maybe_never || call_passed)
812 && may_trap_p (src)))
813 {
814 register struct movable *m;
815 register int regno = REGNO (SET_DEST (set));
816
817 /* A potential lossage is where we have a case where two insns
818 can be combined as long as they are both in the loop, but
819 we move one of them outside the loop. For large loops,
820 this can lose. The most common case of this is the address
821 of a function being called.
822
823 Therefore, if this register is marked as being used exactly
824 once if we are in a loop with calls (a "large loop"), see if
825 we can replace the usage of this register with the source
826 of this SET. If we can, delete this insn.
827
828 Don't do this if P has a REG_RETVAL note or if we have
829 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
830
831 if (reg_single_usage && reg_single_usage[regno] != 0
832 && reg_single_usage[regno] != const0_rtx
833 && REGNO_FIRST_UID (regno) == INSN_UID (p)
834 && (REGNO_LAST_UID (regno)
835 == INSN_UID (reg_single_usage[regno]))
836 && n_times_set[REGNO (SET_DEST (set))] == 1
837 && ! side_effects_p (SET_SRC (set))
838 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
839 && (! SMALL_REGISTER_CLASSES
840 || (! (GET_CODE (SET_SRC (set)) == REG
841 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
842 /* This test is not redundant; SET_SRC (set) might be
843 a call-clobbered register and the life of REGNO
844 might span a call. */
845 && ! modified_between_p (SET_SRC (set), p,
846 reg_single_usage[regno])
847 && no_labels_between_p (p, reg_single_usage[regno])
848 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
849 reg_single_usage[regno]))
850 {
851 /* Replace any usage in a REG_EQUAL note. Must copy the
852 new source, so that we don't get rtx sharing between the
853 SET_SOURCE and REG_NOTES of insn p. */
854 REG_NOTES (reg_single_usage[regno])
855 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
856 SET_DEST (set), copy_rtx (SET_SRC (set)));
857
858 PUT_CODE (p, NOTE);
859 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
860 NOTE_SOURCE_FILE (p) = 0;
861 n_times_set[regno] = 0;
862 continue;
863 }
864
865 m = (struct movable *) alloca (sizeof (struct movable));
866 m->next = 0;
867 m->insn = p;
868 m->set_src = src;
869 m->dependencies = dependencies;
870 m->set_dest = SET_DEST (set);
871 m->force = 0;
872 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
873 m->done = 0;
874 m->forces = 0;
875 m->partial = 0;
876 m->move_insn = move_insn;
877 m->move_insn_first = 0;
878 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
879 m->savemode = VOIDmode;
880 m->regno = regno;
881 /* Set M->cond if either invariant_p or consec_sets_invariant_p
882 returned 2 (only conditionally invariant). */
883 m->cond = ((tem | tem1 | tem2) > 1);
884 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
885 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
886 m->match = 0;
887 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
888 - uid_luid[REGNO_FIRST_UID (regno)]);
889 m->savings = n_times_used[regno];
890 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
891 m->savings += libcall_benefit (p);
892 n_times_set[regno] = move_insn ? -2 : -1;
893 /* Add M to the end of the chain MOVABLES. */
894 if (movables == 0)
895 movables = m;
896 else
897 last_movable->next = m;
898 last_movable = m;
899
900 if (m->consec > 0)
901 {
902 /* It is possible for the first instruction to have a
903 REG_EQUAL note but a non-invariant SET_SRC, so we must
904 remember the status of the first instruction in case
905 the last instruction doesn't have a REG_EQUAL note. */
906 m->move_insn_first = m->move_insn;
907
908 /* Skip this insn, not checking REG_LIBCALL notes. */
909 p = next_nonnote_insn (p);
910 /* Skip the consecutive insns, if there are any. */
911 p = skip_consec_insns (p, m->consec);
912 /* Back up to the last insn of the consecutive group. */
913 p = prev_nonnote_insn (p);
914
915 /* We must now reset m->move_insn, m->is_equiv, and possibly
916 m->set_src to correspond to the effects of all the
917 insns. */
918 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
919 if (temp)
920 m->set_src = XEXP (temp, 0), m->move_insn = 1;
921 else
922 {
923 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
924 if (temp && CONSTANT_P (XEXP (temp, 0)))
925 m->set_src = XEXP (temp, 0), m->move_insn = 1;
926 else
927 m->move_insn = 0;
928
929 }
930 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
931 }
932 }
933 /* If this register is always set within a STRICT_LOW_PART
934 or set to zero, then its high bytes are constant.
935 So clear them outside the loop and within the loop
936 just load the low bytes.
937 We must check that the machine has an instruction to do so.
938 Also, if the value loaded into the register
939 depends on the same register, this cannot be done. */
940 else if (SET_SRC (set) == const0_rtx
941 && GET_CODE (NEXT_INSN (p)) == INSN
942 && (set1 = single_set (NEXT_INSN (p)))
943 && GET_CODE (set1) == SET
944 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
945 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
946 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
947 == SET_DEST (set))
948 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
949 {
950 register int regno = REGNO (SET_DEST (set));
951 if (n_times_set[regno] == 2)
952 {
953 register struct movable *m;
954 m = (struct movable *) alloca (sizeof (struct movable));
955 m->next = 0;
956 m->insn = p;
957 m->set_dest = SET_DEST (set);
958 m->dependencies = 0;
959 m->force = 0;
960 m->consec = 0;
961 m->done = 0;
962 m->forces = 0;
963 m->move_insn = 0;
964 m->move_insn_first = 0;
965 m->partial = 1;
966 /* If the insn may not be executed on some cycles,
967 we can't clear the whole reg; clear just high part.
968 Not even if the reg is used only within this loop.
969 Consider this:
970 while (1)
971 while (s != t) {
972 if (foo ()) x = *s;
973 use (x);
974 }
975 Clearing x before the inner loop could clobber a value
976 being saved from the last time around the outer loop.
977 However, if the reg is not used outside this loop
978 and all uses of the register are in the same
979 basic block as the store, there is no problem.
980
981 If this insn was made by loop, we don't know its
982 INSN_LUID and hence must make a conservative
983 assumption. */
984 m->global = (INSN_UID (p) >= max_uid_for_loop
985 || (uid_luid[REGNO_LAST_UID (regno)]
986 > INSN_LUID (end))
987 || (uid_luid[REGNO_FIRST_UID (regno)]
988 < INSN_LUID (p))
989 || (labels_in_range_p
990 (p, uid_luid[REGNO_FIRST_UID (regno)])));
991 if (maybe_never && m->global)
992 m->savemode = GET_MODE (SET_SRC (set1));
993 else
994 m->savemode = VOIDmode;
995 m->regno = regno;
996 m->cond = 0;
997 m->match = 0;
998 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
999 - uid_luid[REGNO_FIRST_UID (regno)]);
1000 m->savings = 1;
1001 n_times_set[regno] = -1;
1002 /* Add M to the end of the chain MOVABLES. */
1003 if (movables == 0)
1004 movables = m;
1005 else
1006 last_movable->next = m;
1007 last_movable = m;
1008 }
1009 }
1010 }
1011 /* Past a call insn, we get to insns which might not be executed
1012 because the call might exit. This matters for insns that trap.
1013 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1014 so they don't count. */
1015 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1016 call_passed = 1;
1017 /* Past a label or a jump, we get to insns for which we
1018 can't count on whether or how many times they will be
1019 executed during each iteration. Therefore, we can
1020 only move out sets of trivial variables
1021 (those not used after the loop). */
1022 /* Similar code appears twice in strength_reduce. */
1023 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1024 /* If we enter the loop in the middle, and scan around to the
1025 beginning, don't set maybe_never for that. This must be an
1026 unconditional jump, otherwise the code at the top of the
1027 loop might never be executed. Unconditional jumps are
1028 followed a by barrier then loop end. */
1029 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1030 && NEXT_INSN (NEXT_INSN (p)) == end
1031 && simplejump_p (p)))
1032 maybe_never = 1;
1033 else if (GET_CODE (p) == NOTE)
1034 {
1035 /* At the virtual top of a converted loop, insns are again known to
1036 be executed: logically, the loop begins here even though the exit
1037 code has been duplicated. */
1038 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1039 maybe_never = call_passed = 0;
1040 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1041 loop_depth++;
1042 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1043 loop_depth--;
1044 }
1045 }
1046
1047 /* If one movable subsumes another, ignore that other. */
1048
1049 ignore_some_movables (movables);
1050
1051 /* For each movable insn, see if the reg that it loads
1052 leads when it dies right into another conditionally movable insn.
1053 If so, record that the second insn "forces" the first one,
1054 since the second can be moved only if the first is. */
1055
1056 force_movables (movables);
1057
1058 /* See if there are multiple movable insns that load the same value.
1059 If there are, make all but the first point at the first one
1060 through the `match' field, and add the priorities of them
1061 all together as the priority of the first. */
1062
1063 combine_movables (movables, nregs);
1064
1065 /* Now consider each movable insn to decide whether it is worth moving.
1066 Store 0 in n_times_set for each reg that is moved. */
1067
1068 move_movables (movables, threshold,
1069 insn_count, loop_start, end, nregs);
1070
1071 /* Now candidates that still are negative are those not moved.
1072 Change n_times_set to indicate that those are not actually invariant. */
1073 for (i = 0; i < nregs; i++)
1074 if (n_times_set[i] < 0)
1075 n_times_set[i] = n_times_used[i];
1076
1077 if (flag_strength_reduce)
1078 strength_reduce (scan_start, end, loop_top,
1079 insn_count, loop_start, end, unroll_p);
1080 }
1081 \f
1082 /* Add elements to *OUTPUT to record all the pseudo-regs
1083 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1084
1085 void
1086 record_excess_regs (in_this, not_in_this, output)
1087 rtx in_this, not_in_this;
1088 rtx *output;
1089 {
1090 enum rtx_code code;
1091 char *fmt;
1092 int i;
1093
1094 code = GET_CODE (in_this);
1095
1096 switch (code)
1097 {
1098 case PC:
1099 case CC0:
1100 case CONST_INT:
1101 case CONST_DOUBLE:
1102 case CONST:
1103 case SYMBOL_REF:
1104 case LABEL_REF:
1105 return;
1106
1107 case REG:
1108 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1109 && ! reg_mentioned_p (in_this, not_in_this))
1110 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1111 return;
1112
1113 default:
1114 break;
1115 }
1116
1117 fmt = GET_RTX_FORMAT (code);
1118 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1119 {
1120 int j;
1121
1122 switch (fmt[i])
1123 {
1124 case 'E':
1125 for (j = 0; j < XVECLEN (in_this, i); j++)
1126 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1127 break;
1128
1129 case 'e':
1130 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1131 break;
1132 }
1133 }
1134 }
1135 \f
1136 /* Check what regs are referred to in the libcall block ending with INSN,
1137 aside from those mentioned in the equivalent value.
1138 If there are none, return 0.
1139 If there are one or more, return an EXPR_LIST containing all of them. */
1140
1141 static rtx
1142 libcall_other_reg (insn, equiv)
1143 rtx insn, equiv;
1144 {
1145 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1146 rtx p = XEXP (note, 0);
1147 rtx output = 0;
1148
1149 /* First, find all the regs used in the libcall block
1150 that are not mentioned as inputs to the result. */
1151
1152 while (p != insn)
1153 {
1154 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1155 || GET_CODE (p) == CALL_INSN)
1156 record_excess_regs (PATTERN (p), equiv, &output);
1157 p = NEXT_INSN (p);
1158 }
1159
1160 return output;
1161 }
1162 \f
1163 /* Return 1 if all uses of REG
1164 are between INSN and the end of the basic block. */
1165
1166 static int
1167 reg_in_basic_block_p (insn, reg)
1168 rtx insn, reg;
1169 {
1170 int regno = REGNO (reg);
1171 rtx p;
1172
1173 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1174 return 0;
1175
1176 /* Search this basic block for the already recorded last use of the reg. */
1177 for (p = insn; p; p = NEXT_INSN (p))
1178 {
1179 switch (GET_CODE (p))
1180 {
1181 case NOTE:
1182 break;
1183
1184 case INSN:
1185 case CALL_INSN:
1186 /* Ordinary insn: if this is the last use, we win. */
1187 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1188 return 1;
1189 break;
1190
1191 case JUMP_INSN:
1192 /* Jump insn: if this is the last use, we win. */
1193 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1194 return 1;
1195 /* Otherwise, it's the end of the basic block, so we lose. */
1196 return 0;
1197
1198 case CODE_LABEL:
1199 case BARRIER:
1200 /* It's the end of the basic block, so we lose. */
1201 return 0;
1202
1203 default:
1204 break;
1205 }
1206 }
1207
1208 /* The "last use" doesn't follow the "first use"?? */
1209 abort ();
1210 }
1211 \f
1212 /* Compute the benefit of eliminating the insns in the block whose
1213 last insn is LAST. This may be a group of insns used to compute a
1214 value directly or can contain a library call. */
1215
1216 static int
1217 libcall_benefit (last)
1218 rtx last;
1219 {
1220 rtx insn;
1221 int benefit = 0;
1222
1223 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1224 insn != last; insn = NEXT_INSN (insn))
1225 {
1226 if (GET_CODE (insn) == CALL_INSN)
1227 benefit += 10; /* Assume at least this many insns in a library
1228 routine. */
1229 else if (GET_CODE (insn) == INSN
1230 && GET_CODE (PATTERN (insn)) != USE
1231 && GET_CODE (PATTERN (insn)) != CLOBBER)
1232 benefit++;
1233 }
1234
1235 return benefit;
1236 }
1237 \f
1238 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1239
1240 static rtx
1241 skip_consec_insns (insn, count)
1242 rtx insn;
1243 int count;
1244 {
1245 for (; count > 0; count--)
1246 {
1247 rtx temp;
1248
1249 /* If first insn of libcall sequence, skip to end. */
1250 /* Do this at start of loop, since INSN is guaranteed to
1251 be an insn here. */
1252 if (GET_CODE (insn) != NOTE
1253 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1254 insn = XEXP (temp, 0);
1255
1256 do insn = NEXT_INSN (insn);
1257 while (GET_CODE (insn) == NOTE);
1258 }
1259
1260 return insn;
1261 }
1262
1263 /* Ignore any movable whose insn falls within a libcall
1264 which is part of another movable.
1265 We make use of the fact that the movable for the libcall value
1266 was made later and so appears later on the chain. */
1267
1268 static void
1269 ignore_some_movables (movables)
1270 struct movable *movables;
1271 {
1272 register struct movable *m, *m1;
1273
1274 for (m = movables; m; m = m->next)
1275 {
1276 /* Is this a movable for the value of a libcall? */
1277 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1278 if (note)
1279 {
1280 rtx insn;
1281 /* Check for earlier movables inside that range,
1282 and mark them invalid. We cannot use LUIDs here because
1283 insns created by loop.c for prior loops don't have LUIDs.
1284 Rather than reject all such insns from movables, we just
1285 explicitly check each insn in the libcall (since invariant
1286 libcalls aren't that common). */
1287 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1288 for (m1 = movables; m1 != m; m1 = m1->next)
1289 if (m1->insn == insn)
1290 m1->done = 1;
1291 }
1292 }
1293 }
1294
1295 /* For each movable insn, see if the reg that it loads
1296 leads when it dies right into another conditionally movable insn.
1297 If so, record that the second insn "forces" the first one,
1298 since the second can be moved only if the first is. */
1299
1300 static void
1301 force_movables (movables)
1302 struct movable *movables;
1303 {
1304 register struct movable *m, *m1;
1305 for (m1 = movables; m1; m1 = m1->next)
1306 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1307 if (!m1->partial && !m1->done)
1308 {
1309 int regno = m1->regno;
1310 for (m = m1->next; m; m = m->next)
1311 /* ??? Could this be a bug? What if CSE caused the
1312 register of M1 to be used after this insn?
1313 Since CSE does not update regno_last_uid,
1314 this insn M->insn might not be where it dies.
1315 But very likely this doesn't matter; what matters is
1316 that M's reg is computed from M1's reg. */
1317 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1318 && !m->done)
1319 break;
1320 if (m != 0 && m->set_src == m1->set_dest
1321 /* If m->consec, m->set_src isn't valid. */
1322 && m->consec == 0)
1323 m = 0;
1324
1325 /* Increase the priority of the moving the first insn
1326 since it permits the second to be moved as well. */
1327 if (m != 0)
1328 {
1329 m->forces = m1;
1330 m1->lifetime += m->lifetime;
1331 m1->savings += m->savings;
1332 }
1333 }
1334 }
1335 \f
1336 /* Find invariant expressions that are equal and can be combined into
1337 one register. */
1338
1339 static void
1340 combine_movables (movables, nregs)
1341 struct movable *movables;
1342 int nregs;
1343 {
1344 register struct movable *m;
1345 char *matched_regs = (char *) alloca (nregs);
1346 enum machine_mode mode;
1347
1348 /* Regs that are set more than once are not allowed to match
1349 or be matched. I'm no longer sure why not. */
1350 /* Perhaps testing m->consec_sets would be more appropriate here? */
1351
1352 for (m = movables; m; m = m->next)
1353 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1354 {
1355 register struct movable *m1;
1356 int regno = m->regno;
1357
1358 bzero (matched_regs, nregs);
1359 matched_regs[regno] = 1;
1360
1361 /* We want later insns to match the first one. Don't make the first
1362 one match any later ones. So start this loop at m->next. */
1363 for (m1 = m->next; m1; m1 = m1->next)
1364 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1365 /* A reg used outside the loop mustn't be eliminated. */
1366 && !m1->global
1367 /* A reg used for zero-extending mustn't be eliminated. */
1368 && !m1->partial
1369 && (matched_regs[m1->regno]
1370 ||
1371 (
1372 /* Can combine regs with different modes loaded from the
1373 same constant only if the modes are the same or
1374 if both are integer modes with M wider or the same
1375 width as M1. The check for integer is redundant, but
1376 safe, since the only case of differing destination
1377 modes with equal sources is when both sources are
1378 VOIDmode, i.e., CONST_INT. */
1379 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1380 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1381 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1382 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1383 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1384 /* See if the source of M1 says it matches M. */
1385 && ((GET_CODE (m1->set_src) == REG
1386 && matched_regs[REGNO (m1->set_src)])
1387 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1388 movables))))
1389 && ((m->dependencies == m1->dependencies)
1390 || rtx_equal_p (m->dependencies, m1->dependencies)))
1391 {
1392 m->lifetime += m1->lifetime;
1393 m->savings += m1->savings;
1394 m1->done = 1;
1395 m1->match = m;
1396 matched_regs[m1->regno] = 1;
1397 }
1398 }
1399
1400 /* Now combine the regs used for zero-extension.
1401 This can be done for those not marked `global'
1402 provided their lives don't overlap. */
1403
1404 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1405 mode = GET_MODE_WIDER_MODE (mode))
1406 {
1407 register struct movable *m0 = 0;
1408
1409 /* Combine all the registers for extension from mode MODE.
1410 Don't combine any that are used outside this loop. */
1411 for (m = movables; m; m = m->next)
1412 if (m->partial && ! m->global
1413 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1414 {
1415 register struct movable *m1;
1416 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1417 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1418
1419 if (m0 == 0)
1420 {
1421 /* First one: don't check for overlap, just record it. */
1422 m0 = m;
1423 continue;
1424 }
1425
1426 /* Make sure they extend to the same mode.
1427 (Almost always true.) */
1428 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1429 continue;
1430
1431 /* We already have one: check for overlap with those
1432 already combined together. */
1433 for (m1 = movables; m1 != m; m1 = m1->next)
1434 if (m1 == m0 || (m1->partial && m1->match == m0))
1435 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1436 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1437 goto overlap;
1438
1439 /* No overlap: we can combine this with the others. */
1440 m0->lifetime += m->lifetime;
1441 m0->savings += m->savings;
1442 m->done = 1;
1443 m->match = m0;
1444
1445 overlap: ;
1446 }
1447 }
1448 }
1449 \f
1450 /* Return 1 if regs X and Y will become the same if moved. */
1451
1452 static int
1453 regs_match_p (x, y, movables)
1454 rtx x, y;
1455 struct movable *movables;
1456 {
1457 int xn = REGNO (x);
1458 int yn = REGNO (y);
1459 struct movable *mx, *my;
1460
1461 for (mx = movables; mx; mx = mx->next)
1462 if (mx->regno == xn)
1463 break;
1464
1465 for (my = movables; my; my = my->next)
1466 if (my->regno == yn)
1467 break;
1468
1469 return (mx && my
1470 && ((mx->match == my->match && mx->match != 0)
1471 || mx->match == my
1472 || mx == my->match));
1473 }
1474
1475 /* Return 1 if X and Y are identical-looking rtx's.
1476 This is the Lisp function EQUAL for rtx arguments.
1477
1478 If two registers are matching movables or a movable register and an
1479 equivalent constant, consider them equal. */
1480
1481 static int
1482 rtx_equal_for_loop_p (x, y, movables)
1483 rtx x, y;
1484 struct movable *movables;
1485 {
1486 register int i;
1487 register int j;
1488 register struct movable *m;
1489 register enum rtx_code code;
1490 register char *fmt;
1491
1492 if (x == y)
1493 return 1;
1494 if (x == 0 || y == 0)
1495 return 0;
1496
1497 code = GET_CODE (x);
1498
1499 /* If we have a register and a constant, they may sometimes be
1500 equal. */
1501 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1502 && CONSTANT_P (y))
1503 {
1504 for (m = movables; m; m = m->next)
1505 if (m->move_insn && m->regno == REGNO (x)
1506 && rtx_equal_p (m->set_src, y))
1507 return 1;
1508 }
1509 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1510 && CONSTANT_P (x))
1511 {
1512 for (m = movables; m; m = m->next)
1513 if (m->move_insn && m->regno == REGNO (y)
1514 && rtx_equal_p (m->set_src, x))
1515 return 1;
1516 }
1517
1518 /* Otherwise, rtx's of different codes cannot be equal. */
1519 if (code != GET_CODE (y))
1520 return 0;
1521
1522 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1523 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1524
1525 if (GET_MODE (x) != GET_MODE (y))
1526 return 0;
1527
1528 /* These three types of rtx's can be compared nonrecursively. */
1529 if (code == REG)
1530 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1531
1532 if (code == LABEL_REF)
1533 return XEXP (x, 0) == XEXP (y, 0);
1534 if (code == SYMBOL_REF)
1535 return XSTR (x, 0) == XSTR (y, 0);
1536
1537 /* Compare the elements. If any pair of corresponding elements
1538 fail to match, return 0 for the whole things. */
1539
1540 fmt = GET_RTX_FORMAT (code);
1541 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1542 {
1543 switch (fmt[i])
1544 {
1545 case 'w':
1546 if (XWINT (x, i) != XWINT (y, i))
1547 return 0;
1548 break;
1549
1550 case 'i':
1551 if (XINT (x, i) != XINT (y, i))
1552 return 0;
1553 break;
1554
1555 case 'E':
1556 /* Two vectors must have the same length. */
1557 if (XVECLEN (x, i) != XVECLEN (y, i))
1558 return 0;
1559
1560 /* And the corresponding elements must match. */
1561 for (j = 0; j < XVECLEN (x, i); j++)
1562 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1563 return 0;
1564 break;
1565
1566 case 'e':
1567 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1568 return 0;
1569 break;
1570
1571 case 's':
1572 if (strcmp (XSTR (x, i), XSTR (y, i)))
1573 return 0;
1574 break;
1575
1576 case 'u':
1577 /* These are just backpointers, so they don't matter. */
1578 break;
1579
1580 case '0':
1581 break;
1582
1583 /* It is believed that rtx's at this level will never
1584 contain anything but integers and other rtx's,
1585 except for within LABEL_REFs and SYMBOL_REFs. */
1586 default:
1587 abort ();
1588 }
1589 }
1590 return 1;
1591 }
1592 \f
1593 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1594 insns in INSNS which use thet reference. */
1595
1596 static void
1597 add_label_notes (x, insns)
1598 rtx x;
1599 rtx insns;
1600 {
1601 enum rtx_code code = GET_CODE (x);
1602 int i, j;
1603 char *fmt;
1604 rtx insn;
1605
1606 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1607 {
1608 rtx next = next_real_insn (XEXP (x, 0));
1609
1610 /* Don't record labels that refer to dispatch tables.
1611 This is not necessary, since the tablejump references the same label.
1612 And if we did record them, flow.c would make worse code. */
1613 if (next == 0
1614 || ! (GET_CODE (next) == JUMP_INSN
1615 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1616 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1617 {
1618 for (insn = insns; insn; insn = NEXT_INSN (insn))
1619 if (reg_mentioned_p (XEXP (x, 0), insn))
1620 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1621 REG_NOTES (insn));
1622 }
1623 return;
1624 }
1625
1626 fmt = GET_RTX_FORMAT (code);
1627 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1628 {
1629 if (fmt[i] == 'e')
1630 add_label_notes (XEXP (x, i), insns);
1631 else if (fmt[i] == 'E')
1632 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1633 add_label_notes (XVECEXP (x, i, j), insns);
1634 }
1635 }
1636 \f
1637 /* Scan MOVABLES, and move the insns that deserve to be moved.
1638 If two matching movables are combined, replace one reg with the
1639 other throughout. */
1640
1641 static void
1642 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1643 struct movable *movables;
1644 int threshold;
1645 int insn_count;
1646 rtx loop_start;
1647 rtx end;
1648 int nregs;
1649 {
1650 rtx new_start = 0;
1651 register struct movable *m;
1652 register rtx p;
1653 /* Map of pseudo-register replacements to handle combining
1654 when we move several insns that load the same value
1655 into different pseudo-registers. */
1656 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1657 char *already_moved = (char *) alloca (nregs);
1658
1659 bzero (already_moved, nregs);
1660 bzero ((char *) reg_map, nregs * sizeof (rtx));
1661
1662 num_movables = 0;
1663
1664 for (m = movables; m; m = m->next)
1665 {
1666 /* Describe this movable insn. */
1667
1668 if (loop_dump_stream)
1669 {
1670 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1671 INSN_UID (m->insn), m->regno, m->lifetime);
1672 if (m->consec > 0)
1673 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1674 if (m->cond)
1675 fprintf (loop_dump_stream, "cond ");
1676 if (m->force)
1677 fprintf (loop_dump_stream, "force ");
1678 if (m->global)
1679 fprintf (loop_dump_stream, "global ");
1680 if (m->done)
1681 fprintf (loop_dump_stream, "done ");
1682 if (m->move_insn)
1683 fprintf (loop_dump_stream, "move-insn ");
1684 if (m->match)
1685 fprintf (loop_dump_stream, "matches %d ",
1686 INSN_UID (m->match->insn));
1687 if (m->forces)
1688 fprintf (loop_dump_stream, "forces %d ",
1689 INSN_UID (m->forces->insn));
1690 }
1691
1692 /* Count movables. Value used in heuristics in strength_reduce. */
1693 num_movables++;
1694
1695 /* Ignore the insn if it's already done (it matched something else).
1696 Otherwise, see if it is now safe to move. */
1697
1698 if (!m->done
1699 && (! m->cond
1700 || (1 == invariant_p (m->set_src)
1701 && (m->dependencies == 0
1702 || 1 == invariant_p (m->dependencies))
1703 && (m->consec == 0
1704 || 1 == consec_sets_invariant_p (m->set_dest,
1705 m->consec + 1,
1706 m->insn))))
1707 && (! m->forces || m->forces->done))
1708 {
1709 register int regno;
1710 register rtx p;
1711 int savings = m->savings;
1712
1713 /* We have an insn that is safe to move.
1714 Compute its desirability. */
1715
1716 p = m->insn;
1717 regno = m->regno;
1718
1719 if (loop_dump_stream)
1720 fprintf (loop_dump_stream, "savings %d ", savings);
1721
1722 if (moved_once[regno])
1723 {
1724 insn_count *= 2;
1725
1726 if (loop_dump_stream)
1727 fprintf (loop_dump_stream, "halved since already moved ");
1728 }
1729
1730 /* An insn MUST be moved if we already moved something else
1731 which is safe only if this one is moved too: that is,
1732 if already_moved[REGNO] is nonzero. */
1733
1734 /* An insn is desirable to move if the new lifetime of the
1735 register is no more than THRESHOLD times the old lifetime.
1736 If it's not desirable, it means the loop is so big
1737 that moving won't speed things up much,
1738 and it is liable to make register usage worse. */
1739
1740 /* It is also desirable to move if it can be moved at no
1741 extra cost because something else was already moved. */
1742
1743 if (already_moved[regno]
1744 || flag_move_all_movables
1745 || (threshold * savings * m->lifetime) >= insn_count
1746 || (m->forces && m->forces->done
1747 && n_times_used[m->forces->regno] == 1))
1748 {
1749 int count;
1750 register struct movable *m1;
1751 rtx first;
1752
1753 /* Now move the insns that set the reg. */
1754
1755 if (m->partial && m->match)
1756 {
1757 rtx newpat, i1;
1758 rtx r1, r2;
1759 /* Find the end of this chain of matching regs.
1760 Thus, we load each reg in the chain from that one reg.
1761 And that reg is loaded with 0 directly,
1762 since it has ->match == 0. */
1763 for (m1 = m; m1->match; m1 = m1->match);
1764 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1765 SET_DEST (PATTERN (m1->insn)));
1766 i1 = emit_insn_before (newpat, loop_start);
1767
1768 /* Mark the moved, invariant reg as being allowed to
1769 share a hard reg with the other matching invariant. */
1770 REG_NOTES (i1) = REG_NOTES (m->insn);
1771 r1 = SET_DEST (PATTERN (m->insn));
1772 r2 = SET_DEST (PATTERN (m1->insn));
1773 regs_may_share
1774 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1775 gen_rtx_EXPR_LIST (VOIDmode, r2,
1776 regs_may_share));
1777 delete_insn (m->insn);
1778
1779 if (new_start == 0)
1780 new_start = i1;
1781
1782 if (loop_dump_stream)
1783 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1784 }
1785 /* If we are to re-generate the item being moved with a
1786 new move insn, first delete what we have and then emit
1787 the move insn before the loop. */
1788 else if (m->move_insn)
1789 {
1790 rtx i1, temp;
1791
1792 for (count = m->consec; count >= 0; count--)
1793 {
1794 /* If this is the first insn of a library call sequence,
1795 skip to the end. */
1796 if (GET_CODE (p) != NOTE
1797 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1798 p = XEXP (temp, 0);
1799
1800 /* If this is the last insn of a libcall sequence, then
1801 delete every insn in the sequence except the last.
1802 The last insn is handled in the normal manner. */
1803 if (GET_CODE (p) != NOTE
1804 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1805 {
1806 temp = XEXP (temp, 0);
1807 while (temp != p)
1808 temp = delete_insn (temp);
1809 }
1810
1811 p = delete_insn (p);
1812 while (p && GET_CODE (p) == NOTE)
1813 p = NEXT_INSN (p);
1814 }
1815
1816 start_sequence ();
1817 emit_move_insn (m->set_dest, m->set_src);
1818 temp = get_insns ();
1819 end_sequence ();
1820
1821 add_label_notes (m->set_src, temp);
1822
1823 i1 = emit_insns_before (temp, loop_start);
1824 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1825 REG_NOTES (i1)
1826 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1827 m->set_src, REG_NOTES (i1));
1828
1829 if (loop_dump_stream)
1830 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1831
1832 /* The more regs we move, the less we like moving them. */
1833 threshold -= 3;
1834 }
1835 else
1836 {
1837 for (count = m->consec; count >= 0; count--)
1838 {
1839 rtx i1, temp;
1840
1841 /* If first insn of libcall sequence, skip to end. */
1842 /* Do this at start of loop, since p is guaranteed to
1843 be an insn here. */
1844 if (GET_CODE (p) != NOTE
1845 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1846 p = XEXP (temp, 0);
1847
1848 /* If last insn of libcall sequence, move all
1849 insns except the last before the loop. The last
1850 insn is handled in the normal manner. */
1851 if (GET_CODE (p) != NOTE
1852 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1853 {
1854 rtx fn_address = 0;
1855 rtx fn_reg = 0;
1856 rtx fn_address_insn = 0;
1857
1858 first = 0;
1859 for (temp = XEXP (temp, 0); temp != p;
1860 temp = NEXT_INSN (temp))
1861 {
1862 rtx body;
1863 rtx n;
1864 rtx next;
1865
1866 if (GET_CODE (temp) == NOTE)
1867 continue;
1868
1869 body = PATTERN (temp);
1870
1871 /* Find the next insn after TEMP,
1872 not counting USE or NOTE insns. */
1873 for (next = NEXT_INSN (temp); next != p;
1874 next = NEXT_INSN (next))
1875 if (! (GET_CODE (next) == INSN
1876 && GET_CODE (PATTERN (next)) == USE)
1877 && GET_CODE (next) != NOTE)
1878 break;
1879
1880 /* If that is the call, this may be the insn
1881 that loads the function address.
1882
1883 Extract the function address from the insn
1884 that loads it into a register.
1885 If this insn was cse'd, we get incorrect code.
1886
1887 So emit a new move insn that copies the
1888 function address into the register that the
1889 call insn will use. flow.c will delete any
1890 redundant stores that we have created. */
1891 if (GET_CODE (next) == CALL_INSN
1892 && GET_CODE (body) == SET
1893 && GET_CODE (SET_DEST (body)) == REG
1894 && (n = find_reg_note (temp, REG_EQUAL,
1895 NULL_RTX)))
1896 {
1897 fn_reg = SET_SRC (body);
1898 if (GET_CODE (fn_reg) != REG)
1899 fn_reg = SET_DEST (body);
1900 fn_address = XEXP (n, 0);
1901 fn_address_insn = temp;
1902 }
1903 /* We have the call insn.
1904 If it uses the register we suspect it might,
1905 load it with the correct address directly. */
1906 if (GET_CODE (temp) == CALL_INSN
1907 && fn_address != 0
1908 && reg_referenced_p (fn_reg, body))
1909 emit_insn_after (gen_move_insn (fn_reg,
1910 fn_address),
1911 fn_address_insn);
1912
1913 if (GET_CODE (temp) == CALL_INSN)
1914 {
1915 i1 = emit_call_insn_before (body, loop_start);
1916 /* Because the USAGE information potentially
1917 contains objects other than hard registers
1918 we need to copy it. */
1919 if (CALL_INSN_FUNCTION_USAGE (temp))
1920 CALL_INSN_FUNCTION_USAGE (i1)
1921 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1922 }
1923 else
1924 i1 = emit_insn_before (body, loop_start);
1925 if (first == 0)
1926 first = i1;
1927 if (temp == fn_address_insn)
1928 fn_address_insn = i1;
1929 REG_NOTES (i1) = REG_NOTES (temp);
1930 delete_insn (temp);
1931 }
1932 }
1933 if (m->savemode != VOIDmode)
1934 {
1935 /* P sets REG to zero; but we should clear only
1936 the bits that are not covered by the mode
1937 m->savemode. */
1938 rtx reg = m->set_dest;
1939 rtx sequence;
1940 rtx tem;
1941
1942 start_sequence ();
1943 tem = expand_binop
1944 (GET_MODE (reg), and_optab, reg,
1945 GEN_INT ((((HOST_WIDE_INT) 1
1946 << GET_MODE_BITSIZE (m->savemode)))
1947 - 1),
1948 reg, 1, OPTAB_LIB_WIDEN);
1949 if (tem == 0)
1950 abort ();
1951 if (tem != reg)
1952 emit_move_insn (reg, tem);
1953 sequence = gen_sequence ();
1954 end_sequence ();
1955 i1 = emit_insn_before (sequence, loop_start);
1956 }
1957 else if (GET_CODE (p) == CALL_INSN)
1958 {
1959 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1960 /* Because the USAGE information potentially
1961 contains objects other than hard registers
1962 we need to copy it. */
1963 if (CALL_INSN_FUNCTION_USAGE (p))
1964 CALL_INSN_FUNCTION_USAGE (i1)
1965 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1966 }
1967 else if (count == m->consec && m->move_insn_first)
1968 {
1969 /* The SET_SRC might not be invariant, so we must
1970 use the REG_EQUAL note. */
1971 start_sequence ();
1972 emit_move_insn (m->set_dest, m->set_src);
1973 temp = get_insns ();
1974 end_sequence ();
1975
1976 add_label_notes (m->set_src, temp);
1977
1978 i1 = emit_insns_before (temp, loop_start);
1979 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1980 REG_NOTES (i1)
1981 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1982 : REG_EQUAL),
1983 m->set_src, REG_NOTES (i1));
1984 }
1985 else
1986 i1 = emit_insn_before (PATTERN (p), loop_start);
1987
1988 if (REG_NOTES (i1) == 0)
1989 {
1990 REG_NOTES (i1) = REG_NOTES (p);
1991
1992 /* If there is a REG_EQUAL note present whose value
1993 is not loop invariant, then delete it, since it
1994 may cause problems with later optimization passes.
1995 It is possible for cse to create such notes
1996 like this as a result of record_jump_cond. */
1997
1998 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1999 && ! invariant_p (XEXP (temp, 0)))
2000 remove_note (i1, temp);
2001 }
2002
2003 if (new_start == 0)
2004 new_start = i1;
2005
2006 if (loop_dump_stream)
2007 fprintf (loop_dump_stream, " moved to %d",
2008 INSN_UID (i1));
2009
2010 /* If library call, now fix the REG_NOTES that contain
2011 insn pointers, namely REG_LIBCALL on FIRST
2012 and REG_RETVAL on I1. */
2013 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2014 {
2015 XEXP (temp, 0) = first;
2016 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2017 XEXP (temp, 0) = i1;
2018 }
2019
2020 delete_insn (p);
2021 do p = NEXT_INSN (p);
2022 while (p && GET_CODE (p) == NOTE);
2023 }
2024
2025 /* The more regs we move, the less we like moving them. */
2026 threshold -= 3;
2027 }
2028
2029 /* Any other movable that loads the same register
2030 MUST be moved. */
2031 already_moved[regno] = 1;
2032
2033 /* This reg has been moved out of one loop. */
2034 moved_once[regno] = 1;
2035
2036 /* The reg set here is now invariant. */
2037 if (! m->partial)
2038 n_times_set[regno] = 0;
2039
2040 m->done = 1;
2041
2042 /* Change the length-of-life info for the register
2043 to say it lives at least the full length of this loop.
2044 This will help guide optimizations in outer loops. */
2045
2046 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2047 /* This is the old insn before all the moved insns.
2048 We can't use the moved insn because it is out of range
2049 in uid_luid. Only the old insns have luids. */
2050 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2051 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2052 REGNO_LAST_UID (regno) = INSN_UID (end);
2053
2054 /* Combine with this moved insn any other matching movables. */
2055
2056 if (! m->partial)
2057 for (m1 = movables; m1; m1 = m1->next)
2058 if (m1->match == m)
2059 {
2060 rtx temp;
2061
2062 /* Schedule the reg loaded by M1
2063 for replacement so that shares the reg of M.
2064 If the modes differ (only possible in restricted
2065 circumstances, make a SUBREG. */
2066 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2067 reg_map[m1->regno] = m->set_dest;
2068 else
2069 reg_map[m1->regno]
2070 = gen_lowpart_common (GET_MODE (m1->set_dest),
2071 m->set_dest);
2072
2073 /* Get rid of the matching insn
2074 and prevent further processing of it. */
2075 m1->done = 1;
2076
2077 /* if library call, delete all insn except last, which
2078 is deleted below */
2079 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2080 NULL_RTX)))
2081 {
2082 for (temp = XEXP (temp, 0); temp != m1->insn;
2083 temp = NEXT_INSN (temp))
2084 delete_insn (temp);
2085 }
2086 delete_insn (m1->insn);
2087
2088 /* Any other movable that loads the same register
2089 MUST be moved. */
2090 already_moved[m1->regno] = 1;
2091
2092 /* The reg merged here is now invariant,
2093 if the reg it matches is invariant. */
2094 if (! m->partial)
2095 n_times_set[m1->regno] = 0;
2096 }
2097 }
2098 else if (loop_dump_stream)
2099 fprintf (loop_dump_stream, "not desirable");
2100 }
2101 else if (loop_dump_stream && !m->match)
2102 fprintf (loop_dump_stream, "not safe");
2103
2104 if (loop_dump_stream)
2105 fprintf (loop_dump_stream, "\n");
2106 }
2107
2108 if (new_start == 0)
2109 new_start = loop_start;
2110
2111 /* Go through all the instructions in the loop, making
2112 all the register substitutions scheduled in REG_MAP. */
2113 for (p = new_start; p != end; p = NEXT_INSN (p))
2114 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2115 || GET_CODE (p) == CALL_INSN)
2116 {
2117 replace_regs (PATTERN (p), reg_map, nregs, 0);
2118 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2119 INSN_CODE (p) = -1;
2120 }
2121 }
2122 \f
2123 #if 0
2124 /* Scan X and replace the address of any MEM in it with ADDR.
2125 REG is the address that MEM should have before the replacement. */
2126
2127 static void
2128 replace_call_address (x, reg, addr)
2129 rtx x, reg, addr;
2130 {
2131 register enum rtx_code code;
2132 register int i;
2133 register char *fmt;
2134
2135 if (x == 0)
2136 return;
2137 code = GET_CODE (x);
2138 switch (code)
2139 {
2140 case PC:
2141 case CC0:
2142 case CONST_INT:
2143 case CONST_DOUBLE:
2144 case CONST:
2145 case SYMBOL_REF:
2146 case LABEL_REF:
2147 case REG:
2148 return;
2149
2150 case SET:
2151 /* Short cut for very common case. */
2152 replace_call_address (XEXP (x, 1), reg, addr);
2153 return;
2154
2155 case CALL:
2156 /* Short cut for very common case. */
2157 replace_call_address (XEXP (x, 0), reg, addr);
2158 return;
2159
2160 case MEM:
2161 /* If this MEM uses a reg other than the one we expected,
2162 something is wrong. */
2163 if (XEXP (x, 0) != reg)
2164 abort ();
2165 XEXP (x, 0) = addr;
2166 return;
2167
2168 default:
2169 break;
2170 }
2171
2172 fmt = GET_RTX_FORMAT (code);
2173 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2174 {
2175 if (fmt[i] == 'e')
2176 replace_call_address (XEXP (x, i), reg, addr);
2177 if (fmt[i] == 'E')
2178 {
2179 register int j;
2180 for (j = 0; j < XVECLEN (x, i); j++)
2181 replace_call_address (XVECEXP (x, i, j), reg, addr);
2182 }
2183 }
2184 }
2185 #endif
2186 \f
2187 /* Return the number of memory refs to addresses that vary
2188 in the rtx X. */
2189
2190 static int
2191 count_nonfixed_reads (x)
2192 rtx x;
2193 {
2194 register enum rtx_code code;
2195 register int i;
2196 register char *fmt;
2197 int value;
2198
2199 if (x == 0)
2200 return 0;
2201
2202 code = GET_CODE (x);
2203 switch (code)
2204 {
2205 case PC:
2206 case CC0:
2207 case CONST_INT:
2208 case CONST_DOUBLE:
2209 case CONST:
2210 case SYMBOL_REF:
2211 case LABEL_REF:
2212 case REG:
2213 return 0;
2214
2215 case MEM:
2216 return ((invariant_p (XEXP (x, 0)) != 1)
2217 + count_nonfixed_reads (XEXP (x, 0)));
2218
2219 default:
2220 break;
2221 }
2222
2223 value = 0;
2224 fmt = GET_RTX_FORMAT (code);
2225 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2226 {
2227 if (fmt[i] == 'e')
2228 value += count_nonfixed_reads (XEXP (x, i));
2229 if (fmt[i] == 'E')
2230 {
2231 register int j;
2232 for (j = 0; j < XVECLEN (x, i); j++)
2233 value += count_nonfixed_reads (XVECEXP (x, i, j));
2234 }
2235 }
2236 return value;
2237 }
2238
2239 \f
2240 #if 0
2241 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2242 Replace it with an instruction to load just the low bytes
2243 if the machine supports such an instruction,
2244 and insert above LOOP_START an instruction to clear the register. */
2245
2246 static void
2247 constant_high_bytes (p, loop_start)
2248 rtx p, loop_start;
2249 {
2250 register rtx new;
2251 register int insn_code_number;
2252
2253 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2254 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2255
2256 new = gen_rtx_SET (VOIDmode,
2257 gen_rtx_STRICT_LOW_PART (VOIDmode,
2258 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2259 SET_DEST (PATTERN (p)),
2260 0)),
2261 XEXP (SET_SRC (PATTERN (p)), 0));
2262 insn_code_number = recog (new, p);
2263
2264 if (insn_code_number)
2265 {
2266 register int i;
2267
2268 /* Clear destination register before the loop. */
2269 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2270 const0_rtx),
2271 loop_start);
2272
2273 /* Inside the loop, just load the low part. */
2274 PATTERN (p) = new;
2275 }
2276 }
2277 #endif
2278 \f
2279 /* Scan a loop setting the variables `unknown_address_altered',
2280 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2281 and `loop_has_volatile'.
2282 Also, fill in the array `loop_store_mems'. */
2283
2284 static void
2285 prescan_loop (start, end)
2286 rtx start, end;
2287 {
2288 register int level = 1;
2289 register rtx insn;
2290
2291 unknown_address_altered = 0;
2292 loop_has_call = 0;
2293 loop_has_volatile = 0;
2294 loop_store_mems_idx = 0;
2295
2296 num_mem_sets = 0;
2297 loops_enclosed = 1;
2298 loop_continue = 0;
2299
2300 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2301 insn = NEXT_INSN (insn))
2302 {
2303 if (GET_CODE (insn) == NOTE)
2304 {
2305 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2306 {
2307 ++level;
2308 /* Count number of loops contained in this one. */
2309 loops_enclosed++;
2310 }
2311 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2312 {
2313 --level;
2314 if (level == 0)
2315 {
2316 end = insn;
2317 break;
2318 }
2319 }
2320 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2321 {
2322 if (level == 1)
2323 loop_continue = insn;
2324 }
2325 }
2326 else if (GET_CODE (insn) == CALL_INSN)
2327 {
2328 if (! CONST_CALL_P (insn))
2329 unknown_address_altered = 1;
2330 loop_has_call = 1;
2331 }
2332 else
2333 {
2334 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2335 {
2336 if (volatile_refs_p (PATTERN (insn)))
2337 loop_has_volatile = 1;
2338
2339 note_stores (PATTERN (insn), note_addr_stored);
2340 }
2341 }
2342 }
2343 }
2344 \f
2345 /* Scan the function looking for loops. Record the start and end of each loop.
2346 Also mark as invalid loops any loops that contain a setjmp or are branched
2347 to from outside the loop. */
2348
2349 static void
2350 find_and_verify_loops (f)
2351 rtx f;
2352 {
2353 rtx insn, label;
2354 int current_loop = -1;
2355 int next_loop = -1;
2356 int loop;
2357
2358 /* If there are jumps to undefined labels,
2359 treat them as jumps out of any/all loops.
2360 This also avoids writing past end of tables when there are no loops. */
2361 uid_loop_num[0] = -1;
2362
2363 /* Find boundaries of loops, mark which loops are contained within
2364 loops, and invalidate loops that have setjmp. */
2365
2366 for (insn = f; insn; insn = NEXT_INSN (insn))
2367 {
2368 if (GET_CODE (insn) == NOTE)
2369 switch (NOTE_LINE_NUMBER (insn))
2370 {
2371 case NOTE_INSN_LOOP_BEG:
2372 loop_number_loop_starts[++next_loop] = insn;
2373 loop_number_loop_ends[next_loop] = 0;
2374 loop_outer_loop[next_loop] = current_loop;
2375 loop_invalid[next_loop] = 0;
2376 loop_number_exit_labels[next_loop] = 0;
2377 loop_number_exit_count[next_loop] = 0;
2378 current_loop = next_loop;
2379 break;
2380
2381 case NOTE_INSN_SETJMP:
2382 /* In this case, we must invalidate our current loop and any
2383 enclosing loop. */
2384 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2385 {
2386 loop_invalid[loop] = 1;
2387 if (loop_dump_stream)
2388 fprintf (loop_dump_stream,
2389 "\nLoop at %d ignored due to setjmp.\n",
2390 INSN_UID (loop_number_loop_starts[loop]));
2391 }
2392 break;
2393
2394 case NOTE_INSN_LOOP_END:
2395 if (current_loop == -1)
2396 abort ();
2397
2398 loop_number_loop_ends[current_loop] = insn;
2399 current_loop = loop_outer_loop[current_loop];
2400 break;
2401
2402 default:
2403 break;
2404 }
2405
2406 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2407 enclosing loop, but this doesn't matter. */
2408 uid_loop_num[INSN_UID (insn)] = current_loop;
2409 }
2410
2411 /* Any loop containing a label used in an initializer must be invalidated,
2412 because it can be jumped into from anywhere. */
2413
2414 for (label = forced_labels; label; label = XEXP (label, 1))
2415 {
2416 int loop_num;
2417
2418 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2419 loop_num != -1;
2420 loop_num = loop_outer_loop[loop_num])
2421 loop_invalid[loop_num] = 1;
2422 }
2423
2424 /* Any loop containing a label used for an exception handler must be
2425 invalidated, because it can be jumped into from anywhere. */
2426
2427 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2428 {
2429 int loop_num;
2430
2431 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2432 loop_num != -1;
2433 loop_num = loop_outer_loop[loop_num])
2434 loop_invalid[loop_num] = 1;
2435 }
2436
2437 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2438 loop that it is not contained within, that loop is marked invalid.
2439 If any INSN or CALL_INSN uses a label's address, then the loop containing
2440 that label is marked invalid, because it could be jumped into from
2441 anywhere.
2442
2443 Also look for blocks of code ending in an unconditional branch that
2444 exits the loop. If such a block is surrounded by a conditional
2445 branch around the block, move the block elsewhere (see below) and
2446 invert the jump to point to the code block. This may eliminate a
2447 label in our loop and will simplify processing by both us and a
2448 possible second cse pass. */
2449
2450 for (insn = f; insn; insn = NEXT_INSN (insn))
2451 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2452 {
2453 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2454
2455 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2456 {
2457 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2458 if (note)
2459 {
2460 int loop_num;
2461
2462 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2463 loop_num != -1;
2464 loop_num = loop_outer_loop[loop_num])
2465 loop_invalid[loop_num] = 1;
2466 }
2467 }
2468
2469 if (GET_CODE (insn) != JUMP_INSN)
2470 continue;
2471
2472 mark_loop_jump (PATTERN (insn), this_loop_num);
2473
2474 /* See if this is an unconditional branch outside the loop. */
2475 if (this_loop_num != -1
2476 && (GET_CODE (PATTERN (insn)) == RETURN
2477 || (simplejump_p (insn)
2478 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2479 != this_loop_num)))
2480 && get_max_uid () < max_uid_for_loop)
2481 {
2482 rtx p;
2483 rtx our_next = next_real_insn (insn);
2484 int dest_loop;
2485 int outer_loop = -1;
2486
2487 /* Go backwards until we reach the start of the loop, a label,
2488 or a JUMP_INSN. */
2489 for (p = PREV_INSN (insn);
2490 GET_CODE (p) != CODE_LABEL
2491 && ! (GET_CODE (p) == NOTE
2492 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2493 && GET_CODE (p) != JUMP_INSN;
2494 p = PREV_INSN (p))
2495 ;
2496
2497 /* Check for the case where we have a jump to an inner nested
2498 loop, and do not perform the optimization in that case. */
2499
2500 if (JUMP_LABEL (insn))
2501 {
2502 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2503 if (dest_loop != -1)
2504 {
2505 for (outer_loop = dest_loop; outer_loop != -1;
2506 outer_loop = loop_outer_loop[outer_loop])
2507 if (outer_loop == this_loop_num)
2508 break;
2509 }
2510 }
2511
2512 /* Make sure that the target of P is within the current loop. */
2513
2514 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2515 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2516 outer_loop = this_loop_num;
2517
2518 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2519 we have a block of code to try to move.
2520
2521 We look backward and then forward from the target of INSN
2522 to find a BARRIER at the same loop depth as the target.
2523 If we find such a BARRIER, we make a new label for the start
2524 of the block, invert the jump in P and point it to that label,
2525 and move the block of code to the spot we found. */
2526
2527 if (outer_loop == -1
2528 && GET_CODE (p) == JUMP_INSN
2529 && JUMP_LABEL (p) != 0
2530 /* Just ignore jumps to labels that were never emitted.
2531 These always indicate compilation errors. */
2532 && INSN_UID (JUMP_LABEL (p)) != 0
2533 && condjump_p (p)
2534 && ! simplejump_p (p)
2535 && next_real_insn (JUMP_LABEL (p)) == our_next)
2536 {
2537 rtx target
2538 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2539 int target_loop_num = uid_loop_num[INSN_UID (target)];
2540 rtx loc;
2541
2542 for (loc = target; loc; loc = PREV_INSN (loc))
2543 if (GET_CODE (loc) == BARRIER
2544 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2545 break;
2546
2547 if (loc == 0)
2548 for (loc = target; loc; loc = NEXT_INSN (loc))
2549 if (GET_CODE (loc) == BARRIER
2550 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2551 break;
2552
2553 if (loc)
2554 {
2555 rtx cond_label = JUMP_LABEL (p);
2556 rtx new_label = get_label_after (p);
2557
2558 /* Ensure our label doesn't go away. */
2559 LABEL_NUSES (cond_label)++;
2560
2561 /* Verify that uid_loop_num is large enough and that
2562 we can invert P. */
2563 if (invert_jump (p, new_label))
2564 {
2565 rtx q, r;
2566
2567 /* If no suitable BARRIER was found, create a suitable
2568 one before TARGET. Since TARGET is a fall through
2569 path, we'll need to insert an jump around our block
2570 and a add a BARRIER before TARGET.
2571
2572 This creates an extra unconditional jump outside
2573 the loop. However, the benefits of removing rarely
2574 executed instructions from inside the loop usually
2575 outweighs the cost of the extra unconditional jump
2576 outside the loop. */
2577 if (loc == 0)
2578 {
2579 rtx temp;
2580
2581 temp = gen_jump (JUMP_LABEL (insn));
2582 temp = emit_jump_insn_before (temp, target);
2583 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2584 LABEL_NUSES (JUMP_LABEL (insn))++;
2585 loc = emit_barrier_before (target);
2586 }
2587
2588 /* Include the BARRIER after INSN and copy the
2589 block after LOC. */
2590 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2591 reorder_insns (new_label, NEXT_INSN (insn), loc);
2592
2593 /* All those insns are now in TARGET_LOOP_NUM. */
2594 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2595 q = NEXT_INSN (q))
2596 uid_loop_num[INSN_UID (q)] = target_loop_num;
2597
2598 /* The label jumped to by INSN is no longer a loop exit.
2599 Unless INSN does not have a label (e.g., it is a
2600 RETURN insn), search loop_number_exit_labels to find
2601 its label_ref, and remove it. Also turn off
2602 LABEL_OUTSIDE_LOOP_P bit. */
2603 if (JUMP_LABEL (insn))
2604 {
2605 int loop_num;
2606
2607 for (q = 0,
2608 r = loop_number_exit_labels[this_loop_num];
2609 r; q = r, r = LABEL_NEXTREF (r))
2610 if (XEXP (r, 0) == JUMP_LABEL (insn))
2611 {
2612 LABEL_OUTSIDE_LOOP_P (r) = 0;
2613 if (q)
2614 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2615 else
2616 loop_number_exit_labels[this_loop_num]
2617 = LABEL_NEXTREF (r);
2618 break;
2619 }
2620
2621 for (loop_num = this_loop_num;
2622 loop_num != -1 && loop_num != target_loop_num;
2623 loop_num = loop_outer_loop[loop_num])
2624 loop_number_exit_count[loop_num]--;
2625
2626 /* If we didn't find it, then something is wrong. */
2627 if (! r)
2628 abort ();
2629 }
2630
2631 /* P is now a jump outside the loop, so it must be put
2632 in loop_number_exit_labels, and marked as such.
2633 The easiest way to do this is to just call
2634 mark_loop_jump again for P. */
2635 mark_loop_jump (PATTERN (p), this_loop_num);
2636
2637 /* If INSN now jumps to the insn after it,
2638 delete INSN. */
2639 if (JUMP_LABEL (insn) != 0
2640 && (next_real_insn (JUMP_LABEL (insn))
2641 == next_real_insn (insn)))
2642 delete_insn (insn);
2643 }
2644
2645 /* Continue the loop after where the conditional
2646 branch used to jump, since the only branch insn
2647 in the block (if it still remains) is an inter-loop
2648 branch and hence needs no processing. */
2649 insn = NEXT_INSN (cond_label);
2650
2651 if (--LABEL_NUSES (cond_label) == 0)
2652 delete_insn (cond_label);
2653
2654 /* This loop will be continued with NEXT_INSN (insn). */
2655 insn = PREV_INSN (insn);
2656 }
2657 }
2658 }
2659 }
2660 }
2661
2662 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2663 loops it is contained in, mark the target loop invalid.
2664
2665 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2666
2667 static void
2668 mark_loop_jump (x, loop_num)
2669 rtx x;
2670 int loop_num;
2671 {
2672 int dest_loop;
2673 int outer_loop;
2674 int i;
2675
2676 switch (GET_CODE (x))
2677 {
2678 case PC:
2679 case USE:
2680 case CLOBBER:
2681 case REG:
2682 case MEM:
2683 case CONST_INT:
2684 case CONST_DOUBLE:
2685 case RETURN:
2686 return;
2687
2688 case CONST:
2689 /* There could be a label reference in here. */
2690 mark_loop_jump (XEXP (x, 0), loop_num);
2691 return;
2692
2693 case PLUS:
2694 case MINUS:
2695 case MULT:
2696 mark_loop_jump (XEXP (x, 0), loop_num);
2697 mark_loop_jump (XEXP (x, 1), loop_num);
2698 return;
2699
2700 case SIGN_EXTEND:
2701 case ZERO_EXTEND:
2702 mark_loop_jump (XEXP (x, 0), loop_num);
2703 return;
2704
2705 case LABEL_REF:
2706 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2707
2708 /* Link together all labels that branch outside the loop. This
2709 is used by final_[bg]iv_value and the loop unrolling code. Also
2710 mark this LABEL_REF so we know that this branch should predict
2711 false. */
2712
2713 /* A check to make sure the label is not in an inner nested loop,
2714 since this does not count as a loop exit. */
2715 if (dest_loop != -1)
2716 {
2717 for (outer_loop = dest_loop; outer_loop != -1;
2718 outer_loop = loop_outer_loop[outer_loop])
2719 if (outer_loop == loop_num)
2720 break;
2721 }
2722 else
2723 outer_loop = -1;
2724
2725 if (loop_num != -1 && outer_loop == -1)
2726 {
2727 LABEL_OUTSIDE_LOOP_P (x) = 1;
2728 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2729 loop_number_exit_labels[loop_num] = x;
2730
2731 for (outer_loop = loop_num;
2732 outer_loop != -1 && outer_loop != dest_loop;
2733 outer_loop = loop_outer_loop[outer_loop])
2734 loop_number_exit_count[outer_loop]++;
2735 }
2736
2737 /* If this is inside a loop, but not in the current loop or one enclosed
2738 by it, it invalidates at least one loop. */
2739
2740 if (dest_loop == -1)
2741 return;
2742
2743 /* We must invalidate every nested loop containing the target of this
2744 label, except those that also contain the jump insn. */
2745
2746 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2747 {
2748 /* Stop when we reach a loop that also contains the jump insn. */
2749 for (outer_loop = loop_num; outer_loop != -1;
2750 outer_loop = loop_outer_loop[outer_loop])
2751 if (dest_loop == outer_loop)
2752 return;
2753
2754 /* If we get here, we know we need to invalidate a loop. */
2755 if (loop_dump_stream && ! loop_invalid[dest_loop])
2756 fprintf (loop_dump_stream,
2757 "\nLoop at %d ignored due to multiple entry points.\n",
2758 INSN_UID (loop_number_loop_starts[dest_loop]));
2759
2760 loop_invalid[dest_loop] = 1;
2761 }
2762 return;
2763
2764 case SET:
2765 /* If this is not setting pc, ignore. */
2766 if (SET_DEST (x) == pc_rtx)
2767 mark_loop_jump (SET_SRC (x), loop_num);
2768 return;
2769
2770 case IF_THEN_ELSE:
2771 mark_loop_jump (XEXP (x, 1), loop_num);
2772 mark_loop_jump (XEXP (x, 2), loop_num);
2773 return;
2774
2775 case PARALLEL:
2776 case ADDR_VEC:
2777 for (i = 0; i < XVECLEN (x, 0); i++)
2778 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2779 return;
2780
2781 case ADDR_DIFF_VEC:
2782 for (i = 0; i < XVECLEN (x, 1); i++)
2783 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2784 return;
2785
2786 default:
2787 /* Treat anything else (such as a symbol_ref)
2788 as a branch out of this loop, but not into any loop. */
2789
2790 if (loop_num != -1)
2791 {
2792 #ifdef HAIFA
2793 LABEL_OUTSIDE_LOOP_P (x) = 1;
2794 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2795 #endif /* HAIFA */
2796
2797 loop_number_exit_labels[loop_num] = x;
2798
2799 for (outer_loop = loop_num; outer_loop != -1;
2800 outer_loop = loop_outer_loop[outer_loop])
2801 loop_number_exit_count[outer_loop]++;
2802 }
2803 return;
2804 }
2805 }
2806 \f
2807 /* Return nonzero if there is a label in the range from
2808 insn INSN to and including the insn whose luid is END
2809 INSN must have an assigned luid (i.e., it must not have
2810 been previously created by loop.c). */
2811
2812 static int
2813 labels_in_range_p (insn, end)
2814 rtx insn;
2815 int end;
2816 {
2817 while (insn && INSN_LUID (insn) <= end)
2818 {
2819 if (GET_CODE (insn) == CODE_LABEL)
2820 return 1;
2821 insn = NEXT_INSN (insn);
2822 }
2823
2824 return 0;
2825 }
2826
2827 /* Record that a memory reference X is being set. */
2828
2829 static void
2830 note_addr_stored (x, y)
2831 rtx x;
2832 rtx y ATTRIBUTE_UNUSED;
2833 {
2834 register int i;
2835
2836 if (x == 0 || GET_CODE (x) != MEM)
2837 return;
2838
2839 /* Count number of memory writes.
2840 This affects heuristics in strength_reduce. */
2841 num_mem_sets++;
2842
2843 /* BLKmode MEM means all memory is clobbered. */
2844 if (GET_MODE (x) == BLKmode)
2845 unknown_address_altered = 1;
2846
2847 if (unknown_address_altered)
2848 return;
2849
2850 for (i = 0; i < loop_store_mems_idx; i++)
2851 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2852 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2853 {
2854 /* We are storing at the same address as previously noted. Save the
2855 wider reference. */
2856 if (GET_MODE_SIZE (GET_MODE (x))
2857 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2858 loop_store_mems[i] = x;
2859 break;
2860 }
2861
2862 if (i == NUM_STORES)
2863 unknown_address_altered = 1;
2864
2865 else if (i == loop_store_mems_idx)
2866 loop_store_mems[loop_store_mems_idx++] = x;
2867 }
2868 \f
2869 /* Return nonzero if the rtx X is invariant over the current loop.
2870
2871 The value is 2 if we refer to something only conditionally invariant.
2872
2873 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2874 Otherwise, a memory ref is invariant if it does not conflict with
2875 anything stored in `loop_store_mems'. */
2876
2877 int
2878 invariant_p (x)
2879 register rtx x;
2880 {
2881 register int i;
2882 register enum rtx_code code;
2883 register char *fmt;
2884 int conditional = 0;
2885
2886 if (x == 0)
2887 return 1;
2888 code = GET_CODE (x);
2889 switch (code)
2890 {
2891 case CONST_INT:
2892 case CONST_DOUBLE:
2893 case SYMBOL_REF:
2894 case CONST:
2895 return 1;
2896
2897 case LABEL_REF:
2898 /* A LABEL_REF is normally invariant, however, if we are unrolling
2899 loops, and this label is inside the loop, then it isn't invariant.
2900 This is because each unrolled copy of the loop body will have
2901 a copy of this label. If this was invariant, then an insn loading
2902 the address of this label into a register might get moved outside
2903 the loop, and then each loop body would end up using the same label.
2904
2905 We don't know the loop bounds here though, so just fail for all
2906 labels. */
2907 if (flag_unroll_loops)
2908 return 0;
2909 else
2910 return 1;
2911
2912 case PC:
2913 case CC0:
2914 case UNSPEC_VOLATILE:
2915 return 0;
2916
2917 case REG:
2918 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2919 since the reg might be set by initialization within the loop. */
2920
2921 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2922 || x == arg_pointer_rtx)
2923 && ! current_function_has_nonlocal_goto)
2924 return 1;
2925
2926 if (loop_has_call
2927 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2928 return 0;
2929
2930 if (n_times_set[REGNO (x)] < 0)
2931 return 2;
2932
2933 return n_times_set[REGNO (x)] == 0;
2934
2935 case MEM:
2936 /* Volatile memory references must be rejected. Do this before
2937 checking for read-only items, so that volatile read-only items
2938 will be rejected also. */
2939 if (MEM_VOLATILE_P (x))
2940 return 0;
2941
2942 /* Read-only items (such as constants in a constant pool) are
2943 invariant if their address is. */
2944 if (RTX_UNCHANGING_P (x))
2945 break;
2946
2947 /* If we filled the table (or had a subroutine call), any location
2948 in memory could have been clobbered. */
2949 if (unknown_address_altered)
2950 return 0;
2951
2952 /* See if there is any dependence between a store and this load. */
2953 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2954 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2955 return 0;
2956
2957 /* It's not invalidated by a store in memory
2958 but we must still verify the address is invariant. */
2959 break;
2960
2961 case ASM_OPERANDS:
2962 /* Don't mess with insns declared volatile. */
2963 if (MEM_VOLATILE_P (x))
2964 return 0;
2965 break;
2966
2967 default:
2968 break;
2969 }
2970
2971 fmt = GET_RTX_FORMAT (code);
2972 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2973 {
2974 if (fmt[i] == 'e')
2975 {
2976 int tem = invariant_p (XEXP (x, i));
2977 if (tem == 0)
2978 return 0;
2979 if (tem == 2)
2980 conditional = 1;
2981 }
2982 else if (fmt[i] == 'E')
2983 {
2984 register int j;
2985 for (j = 0; j < XVECLEN (x, i); j++)
2986 {
2987 int tem = invariant_p (XVECEXP (x, i, j));
2988 if (tem == 0)
2989 return 0;
2990 if (tem == 2)
2991 conditional = 1;
2992 }
2993
2994 }
2995 }
2996
2997 return 1 + conditional;
2998 }
2999
3000 \f
3001 /* Return nonzero if all the insns in the loop that set REG
3002 are INSN and the immediately following insns,
3003 and if each of those insns sets REG in an invariant way
3004 (not counting uses of REG in them).
3005
3006 The value is 2 if some of these insns are only conditionally invariant.
3007
3008 We assume that INSN itself is the first set of REG
3009 and that its source is invariant. */
3010
3011 static int
3012 consec_sets_invariant_p (reg, n_sets, insn)
3013 int n_sets;
3014 rtx reg, insn;
3015 {
3016 register rtx p = insn;
3017 register int regno = REGNO (reg);
3018 rtx temp;
3019 /* Number of sets we have to insist on finding after INSN. */
3020 int count = n_sets - 1;
3021 int old = n_times_set[regno];
3022 int value = 0;
3023 int this;
3024
3025 /* If N_SETS hit the limit, we can't rely on its value. */
3026 if (n_sets == 127)
3027 return 0;
3028
3029 n_times_set[regno] = 0;
3030
3031 while (count > 0)
3032 {
3033 register enum rtx_code code;
3034 rtx set;
3035
3036 p = NEXT_INSN (p);
3037 code = GET_CODE (p);
3038
3039 /* If library call, skip to end of it. */
3040 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3041 p = XEXP (temp, 0);
3042
3043 this = 0;
3044 if (code == INSN
3045 && (set = single_set (p))
3046 && GET_CODE (SET_DEST (set)) == REG
3047 && REGNO (SET_DEST (set)) == regno)
3048 {
3049 this = invariant_p (SET_SRC (set));
3050 if (this != 0)
3051 value |= this;
3052 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3053 {
3054 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3055 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3056 notes are OK. */
3057 this = (CONSTANT_P (XEXP (temp, 0))
3058 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3059 && invariant_p (XEXP (temp, 0))));
3060 if (this != 0)
3061 value |= this;
3062 }
3063 }
3064 if (this != 0)
3065 count--;
3066 else if (code != NOTE)
3067 {
3068 n_times_set[regno] = old;
3069 return 0;
3070 }
3071 }
3072
3073 n_times_set[regno] = old;
3074 /* If invariant_p ever returned 2, we return 2. */
3075 return 1 + (value & 2);
3076 }
3077
3078 #if 0
3079 /* I don't think this condition is sufficient to allow INSN
3080 to be moved, so we no longer test it. */
3081
3082 /* Return 1 if all insns in the basic block of INSN and following INSN
3083 that set REG are invariant according to TABLE. */
3084
3085 static int
3086 all_sets_invariant_p (reg, insn, table)
3087 rtx reg, insn;
3088 short *table;
3089 {
3090 register rtx p = insn;
3091 register int regno = REGNO (reg);
3092
3093 while (1)
3094 {
3095 register enum rtx_code code;
3096 p = NEXT_INSN (p);
3097 code = GET_CODE (p);
3098 if (code == CODE_LABEL || code == JUMP_INSN)
3099 return 1;
3100 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3101 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3102 && REGNO (SET_DEST (PATTERN (p))) == regno)
3103 {
3104 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3105 return 0;
3106 }
3107 }
3108 }
3109 #endif /* 0 */
3110 \f
3111 /* Look at all uses (not sets) of registers in X. For each, if it is
3112 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3113 a different insn, set USAGE[REGNO] to const0_rtx. */
3114
3115 static void
3116 find_single_use_in_loop (insn, x, usage)
3117 rtx insn;
3118 rtx x;
3119 rtx *usage;
3120 {
3121 enum rtx_code code = GET_CODE (x);
3122 char *fmt = GET_RTX_FORMAT (code);
3123 int i, j;
3124
3125 if (code == REG)
3126 usage[REGNO (x)]
3127 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3128 ? const0_rtx : insn;
3129
3130 else if (code == SET)
3131 {
3132 /* Don't count SET_DEST if it is a REG; otherwise count things
3133 in SET_DEST because if a register is partially modified, it won't
3134 show up as a potential movable so we don't care how USAGE is set
3135 for it. */
3136 if (GET_CODE (SET_DEST (x)) != REG)
3137 find_single_use_in_loop (insn, SET_DEST (x), usage);
3138 find_single_use_in_loop (insn, SET_SRC (x), usage);
3139 }
3140 else
3141 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3142 {
3143 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3144 find_single_use_in_loop (insn, XEXP (x, i), usage);
3145 else if (fmt[i] == 'E')
3146 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3147 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3148 }
3149 }
3150 \f
3151 /* Increment N_TIMES_SET at the index of each register
3152 that is modified by an insn between FROM and TO.
3153 If the value of an element of N_TIMES_SET becomes 127 or more,
3154 stop incrementing it, to avoid overflow.
3155
3156 Store in SINGLE_USAGE[I] the single insn in which register I is
3157 used, if it is only used once. Otherwise, it is set to 0 (for no
3158 uses) or const0_rtx for more than one use. This parameter may be zero,
3159 in which case this processing is not done.
3160
3161 Store in *COUNT_PTR the number of actual instruction
3162 in the loop. We use this to decide what is worth moving out. */
3163
3164 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3165 In that case, it is the insn that last set reg n. */
3166
3167 static void
3168 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3169 register rtx from, to;
3170 char *may_not_move;
3171 rtx *single_usage;
3172 int *count_ptr;
3173 int nregs;
3174 {
3175 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3176 register rtx insn;
3177 register int count = 0;
3178 register rtx dest;
3179
3180 bzero ((char *) last_set, nregs * sizeof (rtx));
3181 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3182 {
3183 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3184 {
3185 ++count;
3186
3187 /* If requested, record registers that have exactly one use. */
3188 if (single_usage)
3189 {
3190 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3191
3192 /* Include uses in REG_EQUAL notes. */
3193 if (REG_NOTES (insn))
3194 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3195 }
3196
3197 if (GET_CODE (PATTERN (insn)) == CLOBBER
3198 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3199 /* Don't move a reg that has an explicit clobber.
3200 We might do so sometimes, but it's not worth the pain. */
3201 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3202
3203 if (GET_CODE (PATTERN (insn)) == SET
3204 || GET_CODE (PATTERN (insn)) == CLOBBER)
3205 {
3206 dest = SET_DEST (PATTERN (insn));
3207 while (GET_CODE (dest) == SUBREG
3208 || GET_CODE (dest) == ZERO_EXTRACT
3209 || GET_CODE (dest) == SIGN_EXTRACT
3210 || GET_CODE (dest) == STRICT_LOW_PART)
3211 dest = XEXP (dest, 0);
3212 if (GET_CODE (dest) == REG)
3213 {
3214 register int regno = REGNO (dest);
3215 /* If this is the first setting of this reg
3216 in current basic block, and it was set before,
3217 it must be set in two basic blocks, so it cannot
3218 be moved out of the loop. */
3219 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3220 may_not_move[regno] = 1;
3221 /* If this is not first setting in current basic block,
3222 see if reg was used in between previous one and this.
3223 If so, neither one can be moved. */
3224 if (last_set[regno] != 0
3225 && reg_used_between_p (dest, last_set[regno], insn))
3226 may_not_move[regno] = 1;
3227 if (n_times_set[regno] < 127)
3228 ++n_times_set[regno];
3229 last_set[regno] = insn;
3230 }
3231 }
3232 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3233 {
3234 register int i;
3235 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3236 {
3237 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3238 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3239 /* Don't move a reg that has an explicit clobber.
3240 It's not worth the pain to try to do it correctly. */
3241 may_not_move[REGNO (XEXP (x, 0))] = 1;
3242
3243 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3244 {
3245 dest = SET_DEST (x);
3246 while (GET_CODE (dest) == SUBREG
3247 || GET_CODE (dest) == ZERO_EXTRACT
3248 || GET_CODE (dest) == SIGN_EXTRACT
3249 || GET_CODE (dest) == STRICT_LOW_PART)
3250 dest = XEXP (dest, 0);
3251 if (GET_CODE (dest) == REG)
3252 {
3253 register int regno = REGNO (dest);
3254 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3255 may_not_move[regno] = 1;
3256 if (last_set[regno] != 0
3257 && reg_used_between_p (dest, last_set[regno], insn))
3258 may_not_move[regno] = 1;
3259 if (n_times_set[regno] < 127)
3260 ++n_times_set[regno];
3261 last_set[regno] = insn;
3262 }
3263 }
3264 }
3265 }
3266 }
3267
3268 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3269 bzero ((char *) last_set, nregs * sizeof (rtx));
3270 }
3271 *count_ptr = count;
3272 }
3273 \f
3274 /* Given a loop that is bounded by LOOP_START and LOOP_END
3275 and that is entered at SCAN_START,
3276 return 1 if the register set in SET contained in insn INSN is used by
3277 any insn that precedes INSN in cyclic order starting
3278 from the loop entry point.
3279
3280 We don't want to use INSN_LUID here because if we restrict INSN to those
3281 that have a valid INSN_LUID, it means we cannot move an invariant out
3282 from an inner loop past two loops. */
3283
3284 static int
3285 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3286 rtx set, insn, loop_start, scan_start, loop_end;
3287 {
3288 rtx reg = SET_DEST (set);
3289 rtx p;
3290
3291 /* Scan forward checking for register usage. If we hit INSN, we
3292 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3293 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3294 {
3295 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3296 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3297 return 1;
3298
3299 if (p == loop_end)
3300 p = loop_start;
3301 }
3302
3303 return 0;
3304 }
3305 \f
3306 /* A "basic induction variable" or biv is a pseudo reg that is set
3307 (within this loop) only by incrementing or decrementing it. */
3308 /* A "general induction variable" or giv is a pseudo reg whose
3309 value is a linear function of a biv. */
3310
3311 /* Bivs are recognized by `basic_induction_var';
3312 Givs by `general_induct_var'. */
3313
3314 /* Indexed by register number, indicates whether or not register is an
3315 induction variable, and if so what type. */
3316
3317 enum iv_mode *reg_iv_type;
3318
3319 /* Indexed by register number, contains pointer to `struct induction'
3320 if register is an induction variable. This holds general info for
3321 all induction variables. */
3322
3323 struct induction **reg_iv_info;
3324
3325 /* Indexed by register number, contains pointer to `struct iv_class'
3326 if register is a basic induction variable. This holds info describing
3327 the class (a related group) of induction variables that the biv belongs
3328 to. */
3329
3330 struct iv_class **reg_biv_class;
3331
3332 /* The head of a list which links together (via the next field)
3333 every iv class for the current loop. */
3334
3335 struct iv_class *loop_iv_list;
3336
3337 /* Communication with routines called via `note_stores'. */
3338
3339 static rtx note_insn;
3340
3341 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3342
3343 static rtx addr_placeholder;
3344
3345 /* ??? Unfinished optimizations, and possible future optimizations,
3346 for the strength reduction code. */
3347
3348 /* ??? There is one more optimization you might be interested in doing: to
3349 allocate pseudo registers for frequently-accessed memory locations.
3350 If the same memory location is referenced each time around, it might
3351 be possible to copy it into a register before and out after.
3352 This is especially useful when the memory location is a variable which
3353 is in a stack slot because somewhere its address is taken. If the
3354 loop doesn't contain a function call and the variable isn't volatile,
3355 it is safe to keep the value in a register for the duration of the
3356 loop. One tricky thing is that the copying of the value back from the
3357 register has to be done on all exits from the loop. You need to check that
3358 all the exits from the loop go to the same place. */
3359
3360 /* ??? The interaction of biv elimination, and recognition of 'constant'
3361 bivs, may cause problems. */
3362
3363 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3364 performance problems.
3365
3366 Perhaps don't eliminate things that can be combined with an addressing
3367 mode. Find all givs that have the same biv, mult_val, and add_val;
3368 then for each giv, check to see if its only use dies in a following
3369 memory address. If so, generate a new memory address and check to see
3370 if it is valid. If it is valid, then store the modified memory address,
3371 otherwise, mark the giv as not done so that it will get its own iv. */
3372
3373 /* ??? Could try to optimize branches when it is known that a biv is always
3374 positive. */
3375
3376 /* ??? When replace a biv in a compare insn, we should replace with closest
3377 giv so that an optimized branch can still be recognized by the combiner,
3378 e.g. the VAX acb insn. */
3379
3380 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3381 was rerun in loop_optimize whenever a register was added or moved.
3382 Also, some of the optimizations could be a little less conservative. */
3383 \f
3384 /* Perform strength reduction and induction variable elimination. */
3385
3386 /* Pseudo registers created during this function will be beyond the last
3387 valid index in several tables including n_times_set and regno_last_uid.
3388 This does not cause a problem here, because the added registers cannot be
3389 givs outside of their loop, and hence will never be reconsidered.
3390 But scan_loop must check regnos to make sure they are in bounds. */
3391
3392 static void
3393 strength_reduce (scan_start, end, loop_top, insn_count,
3394 loop_start, loop_end, unroll_p)
3395 rtx scan_start;
3396 rtx end;
3397 rtx loop_top;
3398 int insn_count;
3399 rtx loop_start;
3400 rtx loop_end;
3401 int unroll_p;
3402 {
3403 rtx p;
3404 rtx set;
3405 rtx inc_val;
3406 rtx mult_val;
3407 rtx dest_reg;
3408 /* This is 1 if current insn is not executed at least once for every loop
3409 iteration. */
3410 int not_every_iteration = 0;
3411 /* This is 1 if current insn may be executed more than once for every
3412 loop iteration. */
3413 int maybe_multiple = 0;
3414 /* Temporary list pointers for traversing loop_iv_list. */
3415 struct iv_class *bl, **backbl;
3416 /* Ratio of extra register life span we can justify
3417 for saving an instruction. More if loop doesn't call subroutines
3418 since in that case saving an insn makes more difference
3419 and more registers are available. */
3420 /* ??? could set this to last value of threshold in move_movables */
3421 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3422 /* Map of pseudo-register replacements. */
3423 rtx *reg_map;
3424 int call_seen;
3425 rtx test;
3426 rtx end_insert_before;
3427 int loop_depth = 0;
3428
3429 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3430 * sizeof (enum iv_mode *));
3431 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3432 reg_iv_info = (struct induction **)
3433 alloca (max_reg_before_loop * sizeof (struct induction *));
3434 bzero ((char *) reg_iv_info, (max_reg_before_loop
3435 * sizeof (struct induction *)));
3436 reg_biv_class = (struct iv_class **)
3437 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3438 bzero ((char *) reg_biv_class, (max_reg_before_loop
3439 * sizeof (struct iv_class *)));
3440
3441 loop_iv_list = 0;
3442 addr_placeholder = gen_reg_rtx (Pmode);
3443
3444 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3445 must be put before this insn, so that they will appear in the right
3446 order (i.e. loop order).
3447
3448 If loop_end is the end of the current function, then emit a
3449 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3450 dummy note insn. */
3451 if (NEXT_INSN (loop_end) != 0)
3452 end_insert_before = NEXT_INSN (loop_end);
3453 else
3454 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3455
3456 /* Scan through loop to find all possible bivs. */
3457
3458 p = scan_start;
3459 while (1)
3460 {
3461 p = NEXT_INSN (p);
3462 /* At end of a straight-in loop, we are done.
3463 At end of a loop entered at the bottom, scan the top. */
3464 if (p == scan_start)
3465 break;
3466 if (p == end)
3467 {
3468 if (loop_top != 0)
3469 p = loop_top;
3470 else
3471 break;
3472 if (p == scan_start)
3473 break;
3474 }
3475
3476 if (GET_CODE (p) == INSN
3477 && (set = single_set (p))
3478 && GET_CODE (SET_DEST (set)) == REG)
3479 {
3480 dest_reg = SET_DEST (set);
3481 if (REGNO (dest_reg) < max_reg_before_loop
3482 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3483 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3484 {
3485 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3486 dest_reg, p, &inc_val, &mult_val))
3487 {
3488 /* It is a possible basic induction variable.
3489 Create and initialize an induction structure for it. */
3490
3491 struct induction *v
3492 = (struct induction *) alloca (sizeof (struct induction));
3493
3494 record_biv (v, p, dest_reg, inc_val, mult_val,
3495 not_every_iteration, maybe_multiple);
3496 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3497 }
3498 else if (REGNO (dest_reg) < max_reg_before_loop)
3499 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3500 }
3501 }
3502
3503 /* Past CODE_LABEL, we get to insns that may be executed multiple
3504 times. The only way we can be sure that they can't is if every
3505 jump insn between here and the end of the loop either
3506 returns, exits the loop, is a forward jump, or is a jump
3507 to the loop start. */
3508
3509 if (GET_CODE (p) == CODE_LABEL)
3510 {
3511 rtx insn = p;
3512
3513 maybe_multiple = 0;
3514
3515 while (1)
3516 {
3517 insn = NEXT_INSN (insn);
3518 if (insn == scan_start)
3519 break;
3520 if (insn == end)
3521 {
3522 if (loop_top != 0)
3523 insn = loop_top;
3524 else
3525 break;
3526 if (insn == scan_start)
3527 break;
3528 }
3529
3530 if (GET_CODE (insn) == JUMP_INSN
3531 && GET_CODE (PATTERN (insn)) != RETURN
3532 && (! condjump_p (insn)
3533 || (JUMP_LABEL (insn) != 0
3534 && JUMP_LABEL (insn) != scan_start
3535 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3536 || INSN_UID (insn) >= max_uid_for_loop
3537 || (INSN_LUID (JUMP_LABEL (insn))
3538 < INSN_LUID (insn))))))
3539 {
3540 maybe_multiple = 1;
3541 break;
3542 }
3543 }
3544 }
3545
3546 /* Past a jump, we get to insns for which we can't count
3547 on whether they will be executed during each iteration. */
3548 /* This code appears twice in strength_reduce. There is also similar
3549 code in scan_loop. */
3550 if (GET_CODE (p) == JUMP_INSN
3551 /* If we enter the loop in the middle, and scan around to the
3552 beginning, don't set not_every_iteration for that.
3553 This can be any kind of jump, since we want to know if insns
3554 will be executed if the loop is executed. */
3555 && ! (JUMP_LABEL (p) == loop_top
3556 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3557 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3558 {
3559 rtx label = 0;
3560
3561 /* If this is a jump outside the loop, then it also doesn't
3562 matter. Check to see if the target of this branch is on the
3563 loop_number_exits_labels list. */
3564
3565 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3566 label;
3567 label = LABEL_NEXTREF (label))
3568 if (XEXP (label, 0) == JUMP_LABEL (p))
3569 break;
3570
3571 if (! label)
3572 not_every_iteration = 1;
3573 }
3574
3575 else if (GET_CODE (p) == NOTE)
3576 {
3577 /* At the virtual top of a converted loop, insns are again known to
3578 be executed each iteration: logically, the loop begins here
3579 even though the exit code has been duplicated. */
3580 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3581 not_every_iteration = 0;
3582 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3583 loop_depth++;
3584 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3585 loop_depth--;
3586 }
3587
3588 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3589 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3590 or not an insn is known to be executed each iteration of the
3591 loop, whether or not any iterations are known to occur.
3592
3593 Therefore, if we have just passed a label and have no more labels
3594 between here and the test insn of the loop, we know these insns
3595 will be executed each iteration. */
3596
3597 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3598 && no_labels_between_p (p, loop_end))
3599 not_every_iteration = 0;
3600 }
3601
3602 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3603 Make a sanity check against n_times_set. */
3604 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3605 {
3606 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3607 /* Above happens if register modified by subreg, etc. */
3608 /* Make sure it is not recognized as a basic induction var: */
3609 || n_times_set[bl->regno] != bl->biv_count
3610 /* If never incremented, it is invariant that we decided not to
3611 move. So leave it alone. */
3612 || ! bl->incremented)
3613 {
3614 if (loop_dump_stream)
3615 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3616 bl->regno,
3617 (reg_iv_type[bl->regno] != BASIC_INDUCT
3618 ? "not induction variable"
3619 : (! bl->incremented ? "never incremented"
3620 : "count error")));
3621
3622 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3623 *backbl = bl->next;
3624 }
3625 else
3626 {
3627 backbl = &bl->next;
3628
3629 if (loop_dump_stream)
3630 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3631 }
3632 }
3633
3634 /* Exit if there are no bivs. */
3635 if (! loop_iv_list)
3636 {
3637 /* Can still unroll the loop anyways, but indicate that there is no
3638 strength reduction info available. */
3639 if (unroll_p)
3640 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3641
3642 return;
3643 }
3644
3645 /* Find initial value for each biv by searching backwards from loop_start,
3646 halting at first label. Also record any test condition. */
3647
3648 call_seen = 0;
3649 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3650 {
3651 note_insn = p;
3652
3653 if (GET_CODE (p) == CALL_INSN)
3654 call_seen = 1;
3655
3656 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3657 || GET_CODE (p) == CALL_INSN)
3658 note_stores (PATTERN (p), record_initial);
3659
3660 /* Record any test of a biv that branches around the loop if no store
3661 between it and the start of loop. We only care about tests with
3662 constants and registers and only certain of those. */
3663 if (GET_CODE (p) == JUMP_INSN
3664 && JUMP_LABEL (p) != 0
3665 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3666 && (test = get_condition_for_loop (p)) != 0
3667 && GET_CODE (XEXP (test, 0)) == REG
3668 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3669 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3670 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3671 && bl->init_insn == 0)
3672 {
3673 /* If an NE test, we have an initial value! */
3674 if (GET_CODE (test) == NE)
3675 {
3676 bl->init_insn = p;
3677 bl->init_set = gen_rtx_SET (VOIDmode,
3678 XEXP (test, 0), XEXP (test, 1));
3679 }
3680 else
3681 bl->initial_test = test;
3682 }
3683 }
3684
3685 /* Look at the each biv and see if we can say anything better about its
3686 initial value from any initializing insns set up above. (This is done
3687 in two passes to avoid missing SETs in a PARALLEL.) */
3688 for (bl = loop_iv_list; bl; bl = bl->next)
3689 {
3690 rtx src;
3691 rtx note;
3692
3693 if (! bl->init_insn)
3694 continue;
3695
3696 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3697 is a constant, use the value of that. */
3698 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3699 && CONSTANT_P (XEXP (note, 0)))
3700 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3701 && CONSTANT_P (XEXP (note, 0))))
3702 src = XEXP (note, 0);
3703 else
3704 src = SET_SRC (bl->init_set);
3705
3706 if (loop_dump_stream)
3707 fprintf (loop_dump_stream,
3708 "Biv %d initialized at insn %d: initial value ",
3709 bl->regno, INSN_UID (bl->init_insn));
3710
3711 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3712 || GET_MODE (src) == VOIDmode)
3713 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3714 {
3715 bl->initial_value = src;
3716
3717 if (loop_dump_stream)
3718 {
3719 if (GET_CODE (src) == CONST_INT)
3720 {
3721 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3722 fputc ('\n', loop_dump_stream);
3723 }
3724 else
3725 {
3726 print_rtl (loop_dump_stream, src);
3727 fprintf (loop_dump_stream, "\n");
3728 }
3729 }
3730 }
3731 else
3732 {
3733 /* Biv initial value is not simple move,
3734 so let it keep initial value of "itself". */
3735
3736 if (loop_dump_stream)
3737 fprintf (loop_dump_stream, "is complex\n");
3738 }
3739 }
3740
3741 /* Search the loop for general induction variables. */
3742
3743 /* A register is a giv if: it is only set once, it is a function of a
3744 biv and a constant (or invariant), and it is not a biv. */
3745
3746 not_every_iteration = 0;
3747 loop_depth = 0;
3748 p = scan_start;
3749 while (1)
3750 {
3751 p = NEXT_INSN (p);
3752 /* At end of a straight-in loop, we are done.
3753 At end of a loop entered at the bottom, scan the top. */
3754 if (p == scan_start)
3755 break;
3756 if (p == end)
3757 {
3758 if (loop_top != 0)
3759 p = loop_top;
3760 else
3761 break;
3762 if (p == scan_start)
3763 break;
3764 }
3765
3766 /* Look for a general induction variable in a register. */
3767 if (GET_CODE (p) == INSN
3768 && (set = single_set (p))
3769 && GET_CODE (SET_DEST (set)) == REG
3770 && ! may_not_optimize[REGNO (SET_DEST (set))])
3771 {
3772 rtx src_reg;
3773 rtx add_val;
3774 rtx mult_val;
3775 int benefit;
3776 rtx regnote = 0;
3777
3778 dest_reg = SET_DEST (set);
3779 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3780 continue;
3781
3782 if (/* SET_SRC is a giv. */
3783 ((benefit = general_induction_var (SET_SRC (set),
3784 &src_reg, &add_val,
3785 &mult_val))
3786 /* Equivalent expression is a giv. */
3787 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3788 && (benefit = general_induction_var (XEXP (regnote, 0),
3789 &src_reg,
3790 &add_val, &mult_val))))
3791 /* Don't try to handle any regs made by loop optimization.
3792 We have nothing on them in regno_first_uid, etc. */
3793 && REGNO (dest_reg) < max_reg_before_loop
3794 /* Don't recognize a BASIC_INDUCT_VAR here. */
3795 && dest_reg != src_reg
3796 /* This must be the only place where the register is set. */
3797 && (n_times_set[REGNO (dest_reg)] == 1
3798 /* or all sets must be consecutive and make a giv. */
3799 || (benefit = consec_sets_giv (benefit, p,
3800 src_reg, dest_reg,
3801 &add_val, &mult_val))))
3802 {
3803 int count;
3804 struct induction *v
3805 = (struct induction *) alloca (sizeof (struct induction));
3806 rtx temp;
3807
3808 /* If this is a library call, increase benefit. */
3809 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3810 benefit += libcall_benefit (p);
3811
3812 /* Skip the consecutive insns, if there are any. */
3813 for (count = n_times_set[REGNO (dest_reg)] - 1;
3814 count > 0; count--)
3815 {
3816 /* If first insn of libcall sequence, skip to end.
3817 Do this at start of loop, since INSN is guaranteed to
3818 be an insn here. */
3819 if (GET_CODE (p) != NOTE
3820 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3821 p = XEXP (temp, 0);
3822
3823 do p = NEXT_INSN (p);
3824 while (GET_CODE (p) == NOTE);
3825 }
3826
3827 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3828 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3829 loop_end);
3830
3831 }
3832 }
3833
3834 #ifndef DONT_REDUCE_ADDR
3835 /* Look for givs which are memory addresses. */
3836 /* This resulted in worse code on a VAX 8600. I wonder if it
3837 still does. */
3838 if (GET_CODE (p) == INSN)
3839 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3840 loop_end);
3841 #endif
3842
3843 /* Update the status of whether giv can derive other givs. This can
3844 change when we pass a label or an insn that updates a biv. */
3845 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3846 || GET_CODE (p) == CODE_LABEL)
3847 update_giv_derive (p);
3848
3849 /* Past a jump, we get to insns for which we can't count
3850 on whether they will be executed during each iteration. */
3851 /* This code appears twice in strength_reduce. There is also similar
3852 code in scan_loop. */
3853 if (GET_CODE (p) == JUMP_INSN
3854 /* If we enter the loop in the middle, and scan around to the
3855 beginning, don't set not_every_iteration for that.
3856 This can be any kind of jump, since we want to know if insns
3857 will be executed if the loop is executed. */
3858 && ! (JUMP_LABEL (p) == loop_top
3859 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3860 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3861 {
3862 rtx label = 0;
3863
3864 /* If this is a jump outside the loop, then it also doesn't
3865 matter. Check to see if the target of this branch is on the
3866 loop_number_exits_labels list. */
3867
3868 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3869 label;
3870 label = LABEL_NEXTREF (label))
3871 if (XEXP (label, 0) == JUMP_LABEL (p))
3872 break;
3873
3874 if (! label)
3875 not_every_iteration = 1;
3876 }
3877
3878 else if (GET_CODE (p) == NOTE)
3879 {
3880 /* At the virtual top of a converted loop, insns are again known to
3881 be executed each iteration: logically, the loop begins here
3882 even though the exit code has been duplicated. */
3883 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3884 not_every_iteration = 0;
3885 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3886 loop_depth++;
3887 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3888 loop_depth--;
3889 }
3890
3891 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3892 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3893 or not an insn is known to be executed each iteration of the
3894 loop, whether or not any iterations are known to occur.
3895
3896 Therefore, if we have just passed a label and have no more labels
3897 between here and the test insn of the loop, we know these insns
3898 will be executed each iteration. */
3899
3900 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3901 && no_labels_between_p (p, loop_end))
3902 not_every_iteration = 0;
3903 }
3904
3905 /* Try to calculate and save the number of loop iterations. This is
3906 set to zero if the actual number can not be calculated. This must
3907 be called after all giv's have been identified, since otherwise it may
3908 fail if the iteration variable is a giv. */
3909
3910 loop_n_iterations = loop_iterations (loop_start, loop_end);
3911
3912 /* Now for each giv for which we still don't know whether or not it is
3913 replaceable, check to see if it is replaceable because its final value
3914 can be calculated. This must be done after loop_iterations is called,
3915 so that final_giv_value will work correctly. */
3916
3917 for (bl = loop_iv_list; bl; bl = bl->next)
3918 {
3919 struct induction *v;
3920
3921 for (v = bl->giv; v; v = v->next_iv)
3922 if (! v->replaceable && ! v->not_replaceable)
3923 check_final_value (v, loop_start, loop_end);
3924 }
3925
3926 /* Try to prove that the loop counter variable (if any) is always
3927 nonnegative; if so, record that fact with a REG_NONNEG note
3928 so that "decrement and branch until zero" insn can be used. */
3929 check_dbra_loop (loop_end, insn_count, loop_start);
3930
3931 #ifdef HAIFA
3932 /* record loop-variables relevant for BCT optimization before unrolling
3933 the loop. Unrolling may update part of this information, and the
3934 correct data will be used for generating the BCT. */
3935 #ifdef HAVE_decrement_and_branch_on_count
3936 if (HAVE_decrement_and_branch_on_count)
3937 analyze_loop_iterations (loop_start, loop_end);
3938 #endif
3939 #endif /* HAIFA */
3940
3941 /* Create reg_map to hold substitutions for replaceable giv regs. */
3942 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3943 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3944
3945 /* Examine each iv class for feasibility of strength reduction/induction
3946 variable elimination. */
3947
3948 for (bl = loop_iv_list; bl; bl = bl->next)
3949 {
3950 struct induction *v;
3951 int benefit;
3952 int all_reduced;
3953 rtx final_value = 0;
3954
3955 /* Test whether it will be possible to eliminate this biv
3956 provided all givs are reduced. This is possible if either
3957 the reg is not used outside the loop, or we can compute
3958 what its final value will be.
3959
3960 For architectures with a decrement_and_branch_until_zero insn,
3961 don't do this if we put a REG_NONNEG note on the endtest for
3962 this biv. */
3963
3964 /* Compare against bl->init_insn rather than loop_start.
3965 We aren't concerned with any uses of the biv between
3966 init_insn and loop_start since these won't be affected
3967 by the value of the biv elsewhere in the function, so
3968 long as init_insn doesn't use the biv itself.
3969 March 14, 1989 -- self@bayes.arc.nasa.gov */
3970
3971 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3972 && bl->init_insn
3973 && INSN_UID (bl->init_insn) < max_uid_for_loop
3974 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3975 #ifdef HAVE_decrement_and_branch_until_zero
3976 && ! bl->nonneg
3977 #endif
3978 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3979 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3980 #ifdef HAVE_decrement_and_branch_until_zero
3981 && ! bl->nonneg
3982 #endif
3983 ))
3984 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3985 threshold, insn_count);
3986 else
3987 {
3988 if (loop_dump_stream)
3989 {
3990 fprintf (loop_dump_stream,
3991 "Cannot eliminate biv %d.\n",
3992 bl->regno);
3993 fprintf (loop_dump_stream,
3994 "First use: insn %d, last use: insn %d.\n",
3995 REGNO_FIRST_UID (bl->regno),
3996 REGNO_LAST_UID (bl->regno));
3997 }
3998 }
3999
4000 /* Combine all giv's for this iv_class. */
4001 combine_givs (bl);
4002
4003 /* This will be true at the end, if all givs which depend on this
4004 biv have been strength reduced.
4005 We can't (currently) eliminate the biv unless this is so. */
4006 all_reduced = 1;
4007
4008 /* Check each giv in this class to see if we will benefit by reducing
4009 it. Skip giv's combined with others. */
4010 for (v = bl->giv; v; v = v->next_iv)
4011 {
4012 struct induction *tv;
4013
4014 if (v->ignore || v->same)
4015 continue;
4016
4017 benefit = v->benefit;
4018
4019 /* Reduce benefit if not replaceable, since we will insert
4020 a move-insn to replace the insn that calculates this giv.
4021 Don't do this unless the giv is a user variable, since it
4022 will often be marked non-replaceable because of the duplication
4023 of the exit code outside the loop. In such a case, the copies
4024 we insert are dead and will be deleted. So they don't have
4025 a cost. Similar situations exist. */
4026 /* ??? The new final_[bg]iv_value code does a much better job
4027 of finding replaceable giv's, and hence this code may no longer
4028 be necessary. */
4029 if (! v->replaceable && ! bl->eliminable
4030 && REG_USERVAR_P (v->dest_reg))
4031 benefit -= copy_cost;
4032
4033 /* Decrease the benefit to count the add-insns that we will
4034 insert to increment the reduced reg for the giv. */
4035 benefit -= add_cost * bl->biv_count;
4036
4037 /* Decide whether to strength-reduce this giv or to leave the code
4038 unchanged (recompute it from the biv each time it is used).
4039 This decision can be made independently for each giv. */
4040
4041 #ifdef AUTO_INC_DEC
4042 /* Attempt to guess whether autoincrement will handle some of the
4043 new add insns; if so, increase BENEFIT (undo the subtraction of
4044 add_cost that was done above). */
4045 if (v->giv_type == DEST_ADDR
4046 && GET_CODE (v->mult_val) == CONST_INT)
4047 {
4048 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4049 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4050 benefit += add_cost * bl->biv_count;
4051 #endif
4052 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4053 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4054 benefit += add_cost * bl->biv_count;
4055 #endif
4056 }
4057 #endif
4058
4059 /* If an insn is not to be strength reduced, then set its ignore
4060 flag, and clear all_reduced. */
4061
4062 /* A giv that depends on a reversed biv must be reduced if it is
4063 used after the loop exit, otherwise, it would have the wrong
4064 value after the loop exit. To make it simple, just reduce all
4065 of such giv's whether or not we know they are used after the loop
4066 exit. */
4067
4068 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4069 && ! bl->reversed )
4070 {
4071 if (loop_dump_stream)
4072 fprintf (loop_dump_stream,
4073 "giv of insn %d not worth while, %d vs %d.\n",
4074 INSN_UID (v->insn),
4075 v->lifetime * threshold * benefit, insn_count);
4076 v->ignore = 1;
4077 all_reduced = 0;
4078 }
4079 else
4080 {
4081 /* Check that we can increment the reduced giv without a
4082 multiply insn. If not, reject it. */
4083
4084 for (tv = bl->biv; tv; tv = tv->next_iv)
4085 if (tv->mult_val == const1_rtx
4086 && ! product_cheap_p (tv->add_val, v->mult_val))
4087 {
4088 if (loop_dump_stream)
4089 fprintf (loop_dump_stream,
4090 "giv of insn %d: would need a multiply.\n",
4091 INSN_UID (v->insn));
4092 v->ignore = 1;
4093 all_reduced = 0;
4094 break;
4095 }
4096 }
4097 }
4098
4099 /* Reduce each giv that we decided to reduce. */
4100
4101 for (v = bl->giv; v; v = v->next_iv)
4102 {
4103 struct induction *tv;
4104 if (! v->ignore && v->same == 0)
4105 {
4106 int auto_inc_opt = 0;
4107
4108 v->new_reg = gen_reg_rtx (v->mode);
4109
4110 #ifdef AUTO_INC_DEC
4111 /* If the target has auto-increment addressing modes, and
4112 this is an address giv, then try to put the increment
4113 immediately after its use, so that flow can create an
4114 auto-increment addressing mode. */
4115 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4116 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4117 /* We don't handle reversed biv's because bl->biv->insn
4118 does not have a valid INSN_LUID. */
4119 && ! bl->reversed
4120 && v->always_executed && ! v->maybe_multiple
4121 && INSN_UID (v->insn) < max_uid_for_loop)
4122 {
4123 /* If other giv's have been combined with this one, then
4124 this will work only if all uses of the other giv's occur
4125 before this giv's insn. This is difficult to check.
4126
4127 We simplify this by looking for the common case where
4128 there is one DEST_REG giv, and this giv's insn is the
4129 last use of the dest_reg of that DEST_REG giv. If the
4130 increment occurs after the address giv, then we can
4131 perform the optimization. (Otherwise, the increment
4132 would have to go before other_giv, and we would not be
4133 able to combine it with the address giv to get an
4134 auto-inc address.) */
4135 if (v->combined_with)
4136 {
4137 struct induction *other_giv = 0;
4138
4139 for (tv = bl->giv; tv; tv = tv->next_iv)
4140 if (tv->same == v)
4141 {
4142 if (other_giv)
4143 break;
4144 else
4145 other_giv = tv;
4146 }
4147 if (! tv && other_giv
4148 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4149 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4150 == INSN_UID (v->insn))
4151 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4152 auto_inc_opt = 1;
4153 }
4154 /* Check for case where increment is before the address
4155 giv. Do this test in "loop order". */
4156 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4157 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4158 || (INSN_LUID (bl->biv->insn)
4159 > INSN_LUID (scan_start))))
4160 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4161 && (INSN_LUID (scan_start)
4162 < INSN_LUID (bl->biv->insn))))
4163 auto_inc_opt = -1;
4164 else
4165 auto_inc_opt = 1;
4166
4167 #ifdef HAVE_cc0
4168 {
4169 rtx prev;
4170
4171 /* We can't put an insn immediately after one setting
4172 cc0, or immediately before one using cc0. */
4173 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4174 || (auto_inc_opt == -1
4175 && (prev = prev_nonnote_insn (v->insn)) != 0
4176 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4177 && sets_cc0_p (PATTERN (prev))))
4178 auto_inc_opt = 0;
4179 }
4180 #endif
4181
4182 if (auto_inc_opt)
4183 v->auto_inc_opt = 1;
4184 }
4185 #endif
4186
4187 /* For each place where the biv is incremented, add an insn
4188 to increment the new, reduced reg for the giv. */
4189 for (tv = bl->biv; tv; tv = tv->next_iv)
4190 {
4191 rtx insert_before;
4192
4193 if (! auto_inc_opt)
4194 insert_before = tv->insn;
4195 else if (auto_inc_opt == 1)
4196 insert_before = NEXT_INSN (v->insn);
4197 else
4198 insert_before = v->insn;
4199
4200 if (tv->mult_val == const1_rtx)
4201 emit_iv_add_mult (tv->add_val, v->mult_val,
4202 v->new_reg, v->new_reg, insert_before);
4203 else /* tv->mult_val == const0_rtx */
4204 /* A multiply is acceptable here
4205 since this is presumed to be seldom executed. */
4206 emit_iv_add_mult (tv->add_val, v->mult_val,
4207 v->add_val, v->new_reg, insert_before);
4208 }
4209
4210 /* Add code at loop start to initialize giv's reduced reg. */
4211
4212 emit_iv_add_mult (bl->initial_value, v->mult_val,
4213 v->add_val, v->new_reg, loop_start);
4214 }
4215 }
4216
4217 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4218 as not reduced.
4219
4220 For each giv register that can be reduced now: if replaceable,
4221 substitute reduced reg wherever the old giv occurs;
4222 else add new move insn "giv_reg = reduced_reg".
4223
4224 Also check for givs whose first use is their definition and whose
4225 last use is the definition of another giv. If so, it is likely
4226 dead and should not be used to eliminate a biv. */
4227 for (v = bl->giv; v; v = v->next_iv)
4228 {
4229 if (v->same && v->same->ignore)
4230 v->ignore = 1;
4231
4232 if (v->ignore)
4233 continue;
4234
4235 if (v->giv_type == DEST_REG
4236 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4237 {
4238 struct induction *v1;
4239
4240 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4241 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4242 v->maybe_dead = 1;
4243 }
4244
4245 /* Update expression if this was combined, in case other giv was
4246 replaced. */
4247 if (v->same)
4248 v->new_reg = replace_rtx (v->new_reg,
4249 v->same->dest_reg, v->same->new_reg);
4250
4251 if (v->giv_type == DEST_ADDR)
4252 /* Store reduced reg as the address in the memref where we found
4253 this giv. */
4254 validate_change (v->insn, v->location, v->new_reg, 0);
4255 else if (v->replaceable)
4256 {
4257 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4258
4259 #if 0
4260 /* I can no longer duplicate the original problem. Perhaps
4261 this is unnecessary now? */
4262
4263 /* Replaceable; it isn't strictly necessary to delete the old
4264 insn and emit a new one, because v->dest_reg is now dead.
4265
4266 However, especially when unrolling loops, the special
4267 handling for (set REG0 REG1) in the second cse pass may
4268 make v->dest_reg live again. To avoid this problem, emit
4269 an insn to set the original giv reg from the reduced giv.
4270 We can not delete the original insn, since it may be part
4271 of a LIBCALL, and the code in flow that eliminates dead
4272 libcalls will fail if it is deleted. */
4273 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4274 v->insn);
4275 #endif
4276 }
4277 else
4278 {
4279 /* Not replaceable; emit an insn to set the original giv reg from
4280 the reduced giv, same as above. */
4281 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4282 v->insn);
4283 }
4284
4285 /* When a loop is reversed, givs which depend on the reversed
4286 biv, and which are live outside the loop, must be set to their
4287 correct final value. This insn is only needed if the giv is
4288 not replaceable. The correct final value is the same as the
4289 value that the giv starts the reversed loop with. */
4290 if (bl->reversed && ! v->replaceable)
4291 emit_iv_add_mult (bl->initial_value, v->mult_val,
4292 v->add_val, v->dest_reg, end_insert_before);
4293 else if (v->final_value)
4294 {
4295 rtx insert_before;
4296
4297 /* If the loop has multiple exits, emit the insn before the
4298 loop to ensure that it will always be executed no matter
4299 how the loop exits. Otherwise, emit the insn after the loop,
4300 since this is slightly more efficient. */
4301 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4302 insert_before = loop_start;
4303 else
4304 insert_before = end_insert_before;
4305 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4306 insert_before);
4307
4308 #if 0
4309 /* If the insn to set the final value of the giv was emitted
4310 before the loop, then we must delete the insn inside the loop
4311 that sets it. If this is a LIBCALL, then we must delete
4312 every insn in the libcall. Note, however, that
4313 final_giv_value will only succeed when there are multiple
4314 exits if the giv is dead at each exit, hence it does not
4315 matter that the original insn remains because it is dead
4316 anyways. */
4317 /* Delete the insn inside the loop that sets the giv since
4318 the giv is now set before (or after) the loop. */
4319 delete_insn (v->insn);
4320 #endif
4321 }
4322
4323 if (loop_dump_stream)
4324 {
4325 fprintf (loop_dump_stream, "giv at %d reduced to ",
4326 INSN_UID (v->insn));
4327 print_rtl (loop_dump_stream, v->new_reg);
4328 fprintf (loop_dump_stream, "\n");
4329 }
4330 }
4331
4332 /* All the givs based on the biv bl have been reduced if they
4333 merit it. */
4334
4335 /* For each giv not marked as maybe dead that has been combined with a
4336 second giv, clear any "maybe dead" mark on that second giv.
4337 v->new_reg will either be or refer to the register of the giv it
4338 combined with.
4339
4340 Doing this clearing avoids problems in biv elimination where a
4341 giv's new_reg is a complex value that can't be put in the insn but
4342 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4343 Since the register will be used in either case, we'd prefer it be
4344 used from the simpler giv. */
4345
4346 for (v = bl->giv; v; v = v->next_iv)
4347 if (! v->maybe_dead && v->same)
4348 v->same->maybe_dead = 0;
4349
4350 /* Try to eliminate the biv, if it is a candidate.
4351 This won't work if ! all_reduced,
4352 since the givs we planned to use might not have been reduced.
4353
4354 We have to be careful that we didn't initially think we could eliminate
4355 this biv because of a giv that we now think may be dead and shouldn't
4356 be used as a biv replacement.
4357
4358 Also, there is the possibility that we may have a giv that looks
4359 like it can be used to eliminate a biv, but the resulting insn
4360 isn't valid. This can happen, for example, on the 88k, where a
4361 JUMP_INSN can compare a register only with zero. Attempts to
4362 replace it with a compare with a constant will fail.
4363
4364 Note that in cases where this call fails, we may have replaced some
4365 of the occurrences of the biv with a giv, but no harm was done in
4366 doing so in the rare cases where it can occur. */
4367
4368 if (all_reduced == 1 && bl->eliminable
4369 && maybe_eliminate_biv (bl, loop_start, end, 1,
4370 threshold, insn_count))
4371
4372 {
4373 /* ?? If we created a new test to bypass the loop entirely,
4374 or otherwise drop straight in, based on this test, then
4375 we might want to rewrite it also. This way some later
4376 pass has more hope of removing the initialization of this
4377 biv entirely. */
4378
4379 /* If final_value != 0, then the biv may be used after loop end
4380 and we must emit an insn to set it just in case.
4381
4382 Reversed bivs already have an insn after the loop setting their
4383 value, so we don't need another one. We can't calculate the
4384 proper final value for such a biv here anyways. */
4385 if (final_value != 0 && ! bl->reversed)
4386 {
4387 rtx insert_before;
4388
4389 /* If the loop has multiple exits, emit the insn before the
4390 loop to ensure that it will always be executed no matter
4391 how the loop exits. Otherwise, emit the insn after the
4392 loop, since this is slightly more efficient. */
4393 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4394 insert_before = loop_start;
4395 else
4396 insert_before = end_insert_before;
4397
4398 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4399 end_insert_before);
4400 }
4401
4402 #if 0
4403 /* Delete all of the instructions inside the loop which set
4404 the biv, as they are all dead. If is safe to delete them,
4405 because an insn setting a biv will never be part of a libcall. */
4406 /* However, deleting them will invalidate the regno_last_uid info,
4407 so keeping them around is more convenient. Final_biv_value
4408 will only succeed when there are multiple exits if the biv
4409 is dead at each exit, hence it does not matter that the original
4410 insn remains, because it is dead anyways. */
4411 for (v = bl->biv; v; v = v->next_iv)
4412 delete_insn (v->insn);
4413 #endif
4414
4415 if (loop_dump_stream)
4416 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4417 bl->regno);
4418 }
4419 }
4420
4421 /* Go through all the instructions in the loop, making all the
4422 register substitutions scheduled in REG_MAP. */
4423
4424 for (p = loop_start; p != end; p = NEXT_INSN (p))
4425 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4426 || GET_CODE (p) == CALL_INSN)
4427 {
4428 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4429 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4430 INSN_CODE (p) = -1;
4431 }
4432
4433 /* Unroll loops from within strength reduction so that we can use the
4434 induction variable information that strength_reduce has already
4435 collected. */
4436
4437 if (unroll_p)
4438 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4439
4440 #ifdef HAIFA
4441 /* instrument the loop with bct insn */
4442 #ifdef HAVE_decrement_and_branch_on_count
4443 if (HAVE_decrement_and_branch_on_count)
4444 insert_bct (loop_start, loop_end);
4445 #endif
4446 #endif /* HAIFA */
4447
4448 if (loop_dump_stream)
4449 fprintf (loop_dump_stream, "\n");
4450 }
4451 \f
4452 /* Return 1 if X is a valid source for an initial value (or as value being
4453 compared against in an initial test).
4454
4455 X must be either a register or constant and must not be clobbered between
4456 the current insn and the start of the loop.
4457
4458 INSN is the insn containing X. */
4459
4460 static int
4461 valid_initial_value_p (x, insn, call_seen, loop_start)
4462 rtx x;
4463 rtx insn;
4464 int call_seen;
4465 rtx loop_start;
4466 {
4467 if (CONSTANT_P (x))
4468 return 1;
4469
4470 /* Only consider pseudos we know about initialized in insns whose luids
4471 we know. */
4472 if (GET_CODE (x) != REG
4473 || REGNO (x) >= max_reg_before_loop)
4474 return 0;
4475
4476 /* Don't use call-clobbered registers across a call which clobbers it. On
4477 some machines, don't use any hard registers at all. */
4478 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4479 && (SMALL_REGISTER_CLASSES
4480 || (call_used_regs[REGNO (x)] && call_seen)))
4481 return 0;
4482
4483 /* Don't use registers that have been clobbered before the start of the
4484 loop. */
4485 if (reg_set_between_p (x, insn, loop_start))
4486 return 0;
4487
4488 return 1;
4489 }
4490 \f
4491 /* Scan X for memory refs and check each memory address
4492 as a possible giv. INSN is the insn whose pattern X comes from.
4493 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4494 every loop iteration. */
4495
4496 static void
4497 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4498 rtx x;
4499 rtx insn;
4500 int not_every_iteration;
4501 rtx loop_start, loop_end;
4502 {
4503 register int i, j;
4504 register enum rtx_code code;
4505 register char *fmt;
4506
4507 if (x == 0)
4508 return;
4509
4510 code = GET_CODE (x);
4511 switch (code)
4512 {
4513 case REG:
4514 case CONST_INT:
4515 case CONST:
4516 case CONST_DOUBLE:
4517 case SYMBOL_REF:
4518 case LABEL_REF:
4519 case PC:
4520 case CC0:
4521 case ADDR_VEC:
4522 case ADDR_DIFF_VEC:
4523 case USE:
4524 case CLOBBER:
4525 return;
4526
4527 case MEM:
4528 {
4529 rtx src_reg;
4530 rtx add_val;
4531 rtx mult_val;
4532 int benefit;
4533
4534 benefit = general_induction_var (XEXP (x, 0),
4535 &src_reg, &add_val, &mult_val);
4536
4537 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4538 Such a giv isn't useful. */
4539 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4540 {
4541 /* Found one; record it. */
4542 struct induction *v
4543 = (struct induction *) oballoc (sizeof (struct induction));
4544
4545 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4546 add_val, benefit, DEST_ADDR, not_every_iteration,
4547 &XEXP (x, 0), loop_start, loop_end);
4548
4549 v->mem_mode = GET_MODE (x);
4550 }
4551 }
4552 return;
4553
4554 default:
4555 break;
4556 }
4557
4558 /* Recursively scan the subexpressions for other mem refs. */
4559
4560 fmt = GET_RTX_FORMAT (code);
4561 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4562 if (fmt[i] == 'e')
4563 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4564 loop_end);
4565 else if (fmt[i] == 'E')
4566 for (j = 0; j < XVECLEN (x, i); j++)
4567 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4568 loop_start, loop_end);
4569 }
4570 \f
4571 /* Fill in the data about one biv update.
4572 V is the `struct induction' in which we record the biv. (It is
4573 allocated by the caller, with alloca.)
4574 INSN is the insn that sets it.
4575 DEST_REG is the biv's reg.
4576
4577 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4578 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4579 being set to INC_VAL.
4580
4581 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4582 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4583 can be executed more than once per iteration. If MAYBE_MULTIPLE
4584 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4585 executed exactly once per iteration. */
4586
4587 static void
4588 record_biv (v, insn, dest_reg, inc_val, mult_val,
4589 not_every_iteration, maybe_multiple)
4590 struct induction *v;
4591 rtx insn;
4592 rtx dest_reg;
4593 rtx inc_val;
4594 rtx mult_val;
4595 int not_every_iteration;
4596 int maybe_multiple;
4597 {
4598 struct iv_class *bl;
4599
4600 v->insn = insn;
4601 v->src_reg = dest_reg;
4602 v->dest_reg = dest_reg;
4603 v->mult_val = mult_val;
4604 v->add_val = inc_val;
4605 v->mode = GET_MODE (dest_reg);
4606 v->always_computable = ! not_every_iteration;
4607 v->always_executed = ! not_every_iteration;
4608 v->maybe_multiple = maybe_multiple;
4609
4610 /* Add this to the reg's iv_class, creating a class
4611 if this is the first incrementation of the reg. */
4612
4613 bl = reg_biv_class[REGNO (dest_reg)];
4614 if (bl == 0)
4615 {
4616 /* Create and initialize new iv_class. */
4617
4618 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4619
4620 bl->regno = REGNO (dest_reg);
4621 bl->biv = 0;
4622 bl->giv = 0;
4623 bl->biv_count = 0;
4624 bl->giv_count = 0;
4625
4626 /* Set initial value to the reg itself. */
4627 bl->initial_value = dest_reg;
4628 /* We haven't seen the initializing insn yet */
4629 bl->init_insn = 0;
4630 bl->init_set = 0;
4631 bl->initial_test = 0;
4632 bl->incremented = 0;
4633 bl->eliminable = 0;
4634 bl->nonneg = 0;
4635 bl->reversed = 0;
4636 bl->total_benefit = 0;
4637
4638 /* Add this class to loop_iv_list. */
4639 bl->next = loop_iv_list;
4640 loop_iv_list = bl;
4641
4642 /* Put it in the array of biv register classes. */
4643 reg_biv_class[REGNO (dest_reg)] = bl;
4644 }
4645
4646 /* Update IV_CLASS entry for this biv. */
4647 v->next_iv = bl->biv;
4648 bl->biv = v;
4649 bl->biv_count++;
4650 if (mult_val == const1_rtx)
4651 bl->incremented = 1;
4652
4653 if (loop_dump_stream)
4654 {
4655 fprintf (loop_dump_stream,
4656 "Insn %d: possible biv, reg %d,",
4657 INSN_UID (insn), REGNO (dest_reg));
4658 if (GET_CODE (inc_val) == CONST_INT)
4659 {
4660 fprintf (loop_dump_stream, " const =");
4661 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4662 fputc ('\n', loop_dump_stream);
4663 }
4664 else
4665 {
4666 fprintf (loop_dump_stream, " const = ");
4667 print_rtl (loop_dump_stream, inc_val);
4668 fprintf (loop_dump_stream, "\n");
4669 }
4670 }
4671 }
4672 \f
4673 /* Fill in the data about one giv.
4674 V is the `struct induction' in which we record the giv. (It is
4675 allocated by the caller, with alloca.)
4676 INSN is the insn that sets it.
4677 BENEFIT estimates the savings from deleting this insn.
4678 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4679 into a register or is used as a memory address.
4680
4681 SRC_REG is the biv reg which the giv is computed from.
4682 DEST_REG is the giv's reg (if the giv is stored in a reg).
4683 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4684 LOCATION points to the place where this giv's value appears in INSN. */
4685
4686 static void
4687 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4688 type, not_every_iteration, location, loop_start, loop_end)
4689 struct induction *v;
4690 rtx insn;
4691 rtx src_reg;
4692 rtx dest_reg;
4693 rtx mult_val, add_val;
4694 int benefit;
4695 enum g_types type;
4696 int not_every_iteration;
4697 rtx *location;
4698 rtx loop_start, loop_end;
4699 {
4700 struct induction *b;
4701 struct iv_class *bl;
4702 rtx set = single_set (insn);
4703
4704 v->insn = insn;
4705 v->src_reg = src_reg;
4706 v->giv_type = type;
4707 v->dest_reg = dest_reg;
4708 v->mult_val = mult_val;
4709 v->add_val = add_val;
4710 v->benefit = benefit;
4711 v->location = location;
4712 v->cant_derive = 0;
4713 v->combined_with = 0;
4714 v->maybe_multiple = 0;
4715 v->maybe_dead = 0;
4716 v->derive_adjustment = 0;
4717 v->same = 0;
4718 v->ignore = 0;
4719 v->new_reg = 0;
4720 v->final_value = 0;
4721 v->same_insn = 0;
4722 v->auto_inc_opt = 0;
4723 v->unrolled = 0;
4724 v->shared = 0;
4725
4726 /* The v->always_computable field is used in update_giv_derive, to
4727 determine whether a giv can be used to derive another giv. For a
4728 DEST_REG giv, INSN computes a new value for the giv, so its value
4729 isn't computable if INSN insn't executed every iteration.
4730 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4731 it does not compute a new value. Hence the value is always computable
4732 regardless of whether INSN is executed each iteration. */
4733
4734 if (type == DEST_ADDR)
4735 v->always_computable = 1;
4736 else
4737 v->always_computable = ! not_every_iteration;
4738
4739 v->always_executed = ! not_every_iteration;
4740
4741 if (type == DEST_ADDR)
4742 {
4743 v->mode = GET_MODE (*location);
4744 v->lifetime = 1;
4745 v->times_used = 1;
4746 }
4747 else /* type == DEST_REG */
4748 {
4749 v->mode = GET_MODE (SET_DEST (set));
4750
4751 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4752 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4753
4754 v->times_used = n_times_used[REGNO (dest_reg)];
4755
4756 /* If the lifetime is zero, it means that this register is
4757 really a dead store. So mark this as a giv that can be
4758 ignored. This will not prevent the biv from being eliminated. */
4759 if (v->lifetime == 0)
4760 v->ignore = 1;
4761
4762 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4763 reg_iv_info[REGNO (dest_reg)] = v;
4764 }
4765
4766 /* Add the giv to the class of givs computed from one biv. */
4767
4768 bl = reg_biv_class[REGNO (src_reg)];
4769 if (bl)
4770 {
4771 v->next_iv = bl->giv;
4772 bl->giv = v;
4773 /* Don't count DEST_ADDR. This is supposed to count the number of
4774 insns that calculate givs. */
4775 if (type == DEST_REG)
4776 bl->giv_count++;
4777 bl->total_benefit += benefit;
4778 }
4779 else
4780 /* Fatal error, biv missing for this giv? */
4781 abort ();
4782
4783 if (type == DEST_ADDR)
4784 v->replaceable = 1;
4785 else
4786 {
4787 /* The giv can be replaced outright by the reduced register only if all
4788 of the following conditions are true:
4789 - the insn that sets the giv is always executed on any iteration
4790 on which the giv is used at all
4791 (there are two ways to deduce this:
4792 either the insn is executed on every iteration,
4793 or all uses follow that insn in the same basic block),
4794 - the giv is not used outside the loop
4795 - no assignments to the biv occur during the giv's lifetime. */
4796
4797 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4798 /* Previous line always fails if INSN was moved by loop opt. */
4799 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4800 && (! not_every_iteration
4801 || last_use_this_basic_block (dest_reg, insn)))
4802 {
4803 /* Now check that there are no assignments to the biv within the
4804 giv's lifetime. This requires two separate checks. */
4805
4806 /* Check each biv update, and fail if any are between the first
4807 and last use of the giv.
4808
4809 If this loop contains an inner loop that was unrolled, then
4810 the insn modifying the biv may have been emitted by the loop
4811 unrolling code, and hence does not have a valid luid. Just
4812 mark the biv as not replaceable in this case. It is not very
4813 useful as a biv, because it is used in two different loops.
4814 It is very unlikely that we would be able to optimize the giv
4815 using this biv anyways. */
4816
4817 v->replaceable = 1;
4818 for (b = bl->biv; b; b = b->next_iv)
4819 {
4820 if (INSN_UID (b->insn) >= max_uid_for_loop
4821 || ((uid_luid[INSN_UID (b->insn)]
4822 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4823 && (uid_luid[INSN_UID (b->insn)]
4824 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4825 {
4826 v->replaceable = 0;
4827 v->not_replaceable = 1;
4828 break;
4829 }
4830 }
4831
4832 /* If there are any backwards branches that go from after the
4833 biv update to before it, then this giv is not replaceable. */
4834 if (v->replaceable)
4835 for (b = bl->biv; b; b = b->next_iv)
4836 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4837 {
4838 v->replaceable = 0;
4839 v->not_replaceable = 1;
4840 break;
4841 }
4842 }
4843 else
4844 {
4845 /* May still be replaceable, we don't have enough info here to
4846 decide. */
4847 v->replaceable = 0;
4848 v->not_replaceable = 0;
4849 }
4850 }
4851
4852 if (loop_dump_stream)
4853 {
4854 if (type == DEST_REG)
4855 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4856 INSN_UID (insn), REGNO (dest_reg));
4857 else
4858 fprintf (loop_dump_stream, "Insn %d: dest address",
4859 INSN_UID (insn));
4860
4861 fprintf (loop_dump_stream, " src reg %d benefit %d",
4862 REGNO (src_reg), v->benefit);
4863 fprintf (loop_dump_stream, " used %d lifetime %d",
4864 v->times_used, v->lifetime);
4865
4866 if (v->replaceable)
4867 fprintf (loop_dump_stream, " replaceable");
4868
4869 if (GET_CODE (mult_val) == CONST_INT)
4870 {
4871 fprintf (loop_dump_stream, " mult ");
4872 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
4873 }
4874 else
4875 {
4876 fprintf (loop_dump_stream, " mult ");
4877 print_rtl (loop_dump_stream, mult_val);
4878 }
4879
4880 if (GET_CODE (add_val) == CONST_INT)
4881 {
4882 fprintf (loop_dump_stream, " add ");
4883 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
4884 }
4885 else
4886 {
4887 fprintf (loop_dump_stream, " add ");
4888 print_rtl (loop_dump_stream, add_val);
4889 }
4890 }
4891
4892 if (loop_dump_stream)
4893 fprintf (loop_dump_stream, "\n");
4894
4895 }
4896
4897
4898 /* All this does is determine whether a giv can be made replaceable because
4899 its final value can be calculated. This code can not be part of record_giv
4900 above, because final_giv_value requires that the number of loop iterations
4901 be known, and that can not be accurately calculated until after all givs
4902 have been identified. */
4903
4904 static void
4905 check_final_value (v, loop_start, loop_end)
4906 struct induction *v;
4907 rtx loop_start, loop_end;
4908 {
4909 struct iv_class *bl;
4910 rtx final_value = 0;
4911
4912 bl = reg_biv_class[REGNO (v->src_reg)];
4913
4914 /* DEST_ADDR givs will never reach here, because they are always marked
4915 replaceable above in record_giv. */
4916
4917 /* The giv can be replaced outright by the reduced register only if all
4918 of the following conditions are true:
4919 - the insn that sets the giv is always executed on any iteration
4920 on which the giv is used at all
4921 (there are two ways to deduce this:
4922 either the insn is executed on every iteration,
4923 or all uses follow that insn in the same basic block),
4924 - its final value can be calculated (this condition is different
4925 than the one above in record_giv)
4926 - no assignments to the biv occur during the giv's lifetime. */
4927
4928 #if 0
4929 /* This is only called now when replaceable is known to be false. */
4930 /* Clear replaceable, so that it won't confuse final_giv_value. */
4931 v->replaceable = 0;
4932 #endif
4933
4934 if ((final_value = final_giv_value (v, loop_start, loop_end))
4935 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4936 {
4937 int biv_increment_seen = 0;
4938 rtx p = v->insn;
4939 rtx last_giv_use;
4940
4941 v->replaceable = 1;
4942
4943 /* When trying to determine whether or not a biv increment occurs
4944 during the lifetime of the giv, we can ignore uses of the variable
4945 outside the loop because final_value is true. Hence we can not
4946 use regno_last_uid and regno_first_uid as above in record_giv. */
4947
4948 /* Search the loop to determine whether any assignments to the
4949 biv occur during the giv's lifetime. Start with the insn
4950 that sets the giv, and search around the loop until we come
4951 back to that insn again.
4952
4953 Also fail if there is a jump within the giv's lifetime that jumps
4954 to somewhere outside the lifetime but still within the loop. This
4955 catches spaghetti code where the execution order is not linear, and
4956 hence the above test fails. Here we assume that the giv lifetime
4957 does not extend from one iteration of the loop to the next, so as
4958 to make the test easier. Since the lifetime isn't known yet,
4959 this requires two loops. See also record_giv above. */
4960
4961 last_giv_use = v->insn;
4962
4963 while (1)
4964 {
4965 p = NEXT_INSN (p);
4966 if (p == loop_end)
4967 p = NEXT_INSN (loop_start);
4968 if (p == v->insn)
4969 break;
4970
4971 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4972 || GET_CODE (p) == CALL_INSN)
4973 {
4974 if (biv_increment_seen)
4975 {
4976 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4977 {
4978 v->replaceable = 0;
4979 v->not_replaceable = 1;
4980 break;
4981 }
4982 }
4983 else if (reg_set_p (v->src_reg, PATTERN (p)))
4984 biv_increment_seen = 1;
4985 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4986 last_giv_use = p;
4987 }
4988 }
4989
4990 /* Now that the lifetime of the giv is known, check for branches
4991 from within the lifetime to outside the lifetime if it is still
4992 replaceable. */
4993
4994 if (v->replaceable)
4995 {
4996 p = v->insn;
4997 while (1)
4998 {
4999 p = NEXT_INSN (p);
5000 if (p == loop_end)
5001 p = NEXT_INSN (loop_start);
5002 if (p == last_giv_use)
5003 break;
5004
5005 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5006 && LABEL_NAME (JUMP_LABEL (p))
5007 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
5008 || (INSN_UID (v->insn) >= max_uid_for_loop)
5009 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
5010 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
5011 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
5012 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
5013 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
5014 {
5015 v->replaceable = 0;
5016 v->not_replaceable = 1;
5017
5018 if (loop_dump_stream)
5019 fprintf (loop_dump_stream,
5020 "Found branch outside giv lifetime.\n");
5021
5022 break;
5023 }
5024 }
5025 }
5026
5027 /* If it is replaceable, then save the final value. */
5028 if (v->replaceable)
5029 v->final_value = final_value;
5030 }
5031
5032 if (loop_dump_stream && v->replaceable)
5033 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5034 INSN_UID (v->insn), REGNO (v->dest_reg));
5035 }
5036 \f
5037 /* Update the status of whether a giv can derive other givs.
5038
5039 We need to do something special if there is or may be an update to the biv
5040 between the time the giv is defined and the time it is used to derive
5041 another giv.
5042
5043 In addition, a giv that is only conditionally set is not allowed to
5044 derive another giv once a label has been passed.
5045
5046 The cases we look at are when a label or an update to a biv is passed. */
5047
5048 static void
5049 update_giv_derive (p)
5050 rtx p;
5051 {
5052 struct iv_class *bl;
5053 struct induction *biv, *giv;
5054 rtx tem;
5055 int dummy;
5056
5057 /* Search all IV classes, then all bivs, and finally all givs.
5058
5059 There are three cases we are concerned with. First we have the situation
5060 of a giv that is only updated conditionally. In that case, it may not
5061 derive any givs after a label is passed.
5062
5063 The second case is when a biv update occurs, or may occur, after the
5064 definition of a giv. For certain biv updates (see below) that are
5065 known to occur between the giv definition and use, we can adjust the
5066 giv definition. For others, or when the biv update is conditional,
5067 we must prevent the giv from deriving any other givs. There are two
5068 sub-cases within this case.
5069
5070 If this is a label, we are concerned with any biv update that is done
5071 conditionally, since it may be done after the giv is defined followed by
5072 a branch here (actually, we need to pass both a jump and a label, but
5073 this extra tracking doesn't seem worth it).
5074
5075 If this is a jump, we are concerned about any biv update that may be
5076 executed multiple times. We are actually only concerned about
5077 backward jumps, but it is probably not worth performing the test
5078 on the jump again here.
5079
5080 If this is a biv update, we must adjust the giv status to show that a
5081 subsequent biv update was performed. If this adjustment cannot be done,
5082 the giv cannot derive further givs. */
5083
5084 for (bl = loop_iv_list; bl; bl = bl->next)
5085 for (biv = bl->biv; biv; biv = biv->next_iv)
5086 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5087 || biv->insn == p)
5088 {
5089 for (giv = bl->giv; giv; giv = giv->next_iv)
5090 {
5091 /* If cant_derive is already true, there is no point in
5092 checking all of these conditions again. */
5093 if (giv->cant_derive)
5094 continue;
5095
5096 /* If this giv is conditionally set and we have passed a label,
5097 it cannot derive anything. */
5098 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5099 giv->cant_derive = 1;
5100
5101 /* Skip givs that have mult_val == 0, since
5102 they are really invariants. Also skip those that are
5103 replaceable, since we know their lifetime doesn't contain
5104 any biv update. */
5105 else if (giv->mult_val == const0_rtx || giv->replaceable)
5106 continue;
5107
5108 /* The only way we can allow this giv to derive another
5109 is if this is a biv increment and we can form the product
5110 of biv->add_val and giv->mult_val. In this case, we will
5111 be able to compute a compensation. */
5112 else if (biv->insn == p)
5113 {
5114 tem = 0;
5115
5116 if (biv->mult_val == const1_rtx)
5117 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5118 biv->add_val,
5119 giv->mult_val),
5120 &dummy);
5121
5122 if (tem && giv->derive_adjustment)
5123 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5124 giv->derive_adjustment),
5125 &dummy);
5126 if (tem)
5127 giv->derive_adjustment = tem;
5128 else
5129 giv->cant_derive = 1;
5130 }
5131 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5132 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5133 giv->cant_derive = 1;
5134 }
5135 }
5136 }
5137 \f
5138 /* Check whether an insn is an increment legitimate for a basic induction var.
5139 X is the source of insn P, or a part of it.
5140 MODE is the mode in which X should be interpreted.
5141
5142 DEST_REG is the putative biv, also the destination of the insn.
5143 We accept patterns of these forms:
5144 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5145 REG = INVARIANT + REG
5146
5147 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5148 and store the additive term into *INC_VAL.
5149
5150 If X is an assignment of an invariant into DEST_REG, we set
5151 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5152
5153 We also want to detect a BIV when it corresponds to a variable
5154 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5155 of the variable may be a PLUS that adds a SUBREG of that variable to
5156 an invariant and then sign- or zero-extends the result of the PLUS
5157 into the variable.
5158
5159 Most GIVs in such cases will be in the promoted mode, since that is the
5160 probably the natural computation mode (and almost certainly the mode
5161 used for addresses) on the machine. So we view the pseudo-reg containing
5162 the variable as the BIV, as if it were simply incremented.
5163
5164 Note that treating the entire pseudo as a BIV will result in making
5165 simple increments to any GIVs based on it. However, if the variable
5166 overflows in its declared mode but not its promoted mode, the result will
5167 be incorrect. This is acceptable if the variable is signed, since
5168 overflows in such cases are undefined, but not if it is unsigned, since
5169 those overflows are defined. So we only check for SIGN_EXTEND and
5170 not ZERO_EXTEND.
5171
5172 If we cannot find a biv, we return 0. */
5173
5174 static int
5175 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5176 register rtx x;
5177 enum machine_mode mode;
5178 rtx p;
5179 rtx dest_reg;
5180 rtx *inc_val;
5181 rtx *mult_val;
5182 {
5183 register enum rtx_code code;
5184 rtx arg;
5185 rtx insn, set = 0;
5186
5187 code = GET_CODE (x);
5188 switch (code)
5189 {
5190 case PLUS:
5191 if (XEXP (x, 0) == dest_reg
5192 || (GET_CODE (XEXP (x, 0)) == SUBREG
5193 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5194 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5195 arg = XEXP (x, 1);
5196 else if (XEXP (x, 1) == dest_reg
5197 || (GET_CODE (XEXP (x, 1)) == SUBREG
5198 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5199 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5200 arg = XEXP (x, 0);
5201 else
5202 return 0;
5203
5204 if (invariant_p (arg) != 1)
5205 return 0;
5206
5207 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5208 *mult_val = const1_rtx;
5209 return 1;
5210
5211 case SUBREG:
5212 /* If this is a SUBREG for a promoted variable, check the inner
5213 value. */
5214 if (SUBREG_PROMOTED_VAR_P (x))
5215 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5216 dest_reg, p, inc_val, mult_val);
5217 return 0;
5218
5219 case REG:
5220 /* If this register is assigned in the previous insn, look at its
5221 source, but don't go outside the loop or past a label. */
5222
5223 for (insn = PREV_INSN (p);
5224 (insn && GET_CODE (insn) == NOTE
5225 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5226 insn = PREV_INSN (insn))
5227 ;
5228
5229 if (insn)
5230 set = single_set (insn);
5231
5232 if (set != 0
5233 && (SET_DEST (set) == x
5234 || (GET_CODE (SET_DEST (set)) == SUBREG
5235 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5236 <= UNITS_PER_WORD)
5237 && SUBREG_REG (SET_DEST (set)) == x)))
5238 return basic_induction_var (SET_SRC (set),
5239 (GET_MODE (SET_SRC (set)) == VOIDmode
5240 ? GET_MODE (x)
5241 : GET_MODE (SET_SRC (set))),
5242 dest_reg, insn,
5243 inc_val, mult_val);
5244 /* ... fall through ... */
5245
5246 /* Can accept constant setting of biv only when inside inner most loop.
5247 Otherwise, a biv of an inner loop may be incorrectly recognized
5248 as a biv of the outer loop,
5249 causing code to be moved INTO the inner loop. */
5250 case MEM:
5251 if (invariant_p (x) != 1)
5252 return 0;
5253 case CONST_INT:
5254 case SYMBOL_REF:
5255 case CONST:
5256 if (loops_enclosed == 1)
5257 {
5258 /* Possible bug here? Perhaps we don't know the mode of X. */
5259 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5260 *mult_val = const0_rtx;
5261 return 1;
5262 }
5263 else
5264 return 0;
5265
5266 case SIGN_EXTEND:
5267 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5268 dest_reg, p, inc_val, mult_val);
5269 case ASHIFTRT:
5270 /* Similar, since this can be a sign extension. */
5271 for (insn = PREV_INSN (p);
5272 (insn && GET_CODE (insn) == NOTE
5273 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5274 insn = PREV_INSN (insn))
5275 ;
5276
5277 if (insn)
5278 set = single_set (insn);
5279
5280 if (set && SET_DEST (set) == XEXP (x, 0)
5281 && GET_CODE (XEXP (x, 1)) == CONST_INT
5282 && INTVAL (XEXP (x, 1)) >= 0
5283 && GET_CODE (SET_SRC (set)) == ASHIFT
5284 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5285 return basic_induction_var (XEXP (SET_SRC (set), 0),
5286 GET_MODE (XEXP (x, 0)),
5287 dest_reg, insn, inc_val, mult_val);
5288 return 0;
5289
5290 default:
5291 return 0;
5292 }
5293 }
5294 \f
5295 /* A general induction variable (giv) is any quantity that is a linear
5296 function of a basic induction variable,
5297 i.e. giv = biv * mult_val + add_val.
5298 The coefficients can be any loop invariant quantity.
5299 A giv need not be computed directly from the biv;
5300 it can be computed by way of other givs. */
5301
5302 /* Determine whether X computes a giv.
5303 If it does, return a nonzero value
5304 which is the benefit from eliminating the computation of X;
5305 set *SRC_REG to the register of the biv that it is computed from;
5306 set *ADD_VAL and *MULT_VAL to the coefficients,
5307 such that the value of X is biv * mult + add; */
5308
5309 static int
5310 general_induction_var (x, src_reg, add_val, mult_val)
5311 rtx x;
5312 rtx *src_reg;
5313 rtx *add_val;
5314 rtx *mult_val;
5315 {
5316 rtx orig_x = x;
5317 int benefit = 0;
5318 char *storage;
5319
5320 /* If this is an invariant, forget it, it isn't a giv. */
5321 if (invariant_p (x) == 1)
5322 return 0;
5323
5324 /* See if the expression could be a giv and get its form.
5325 Mark our place on the obstack in case we don't find a giv. */
5326 storage = (char *) oballoc (0);
5327 x = simplify_giv_expr (x, &benefit);
5328 if (x == 0)
5329 {
5330 obfree (storage);
5331 return 0;
5332 }
5333
5334 switch (GET_CODE (x))
5335 {
5336 case USE:
5337 case CONST_INT:
5338 /* Since this is now an invariant and wasn't before, it must be a giv
5339 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5340 with. */
5341 *src_reg = loop_iv_list->biv->dest_reg;
5342 *mult_val = const0_rtx;
5343 *add_val = x;
5344 break;
5345
5346 case REG:
5347 /* This is equivalent to a BIV. */
5348 *src_reg = x;
5349 *mult_val = const1_rtx;
5350 *add_val = const0_rtx;
5351 break;
5352
5353 case PLUS:
5354 /* Either (plus (biv) (invar)) or
5355 (plus (mult (biv) (invar_1)) (invar_2)). */
5356 if (GET_CODE (XEXP (x, 0)) == MULT)
5357 {
5358 *src_reg = XEXP (XEXP (x, 0), 0);
5359 *mult_val = XEXP (XEXP (x, 0), 1);
5360 }
5361 else
5362 {
5363 *src_reg = XEXP (x, 0);
5364 *mult_val = const1_rtx;
5365 }
5366 *add_val = XEXP (x, 1);
5367 break;
5368
5369 case MULT:
5370 /* ADD_VAL is zero. */
5371 *src_reg = XEXP (x, 0);
5372 *mult_val = XEXP (x, 1);
5373 *add_val = const0_rtx;
5374 break;
5375
5376 default:
5377 abort ();
5378 }
5379
5380 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5381 unless they are CONST_INT). */
5382 if (GET_CODE (*add_val) == USE)
5383 *add_val = XEXP (*add_val, 0);
5384 if (GET_CODE (*mult_val) == USE)
5385 *mult_val = XEXP (*mult_val, 0);
5386
5387 benefit += rtx_cost (orig_x, SET);
5388
5389 /* Always return some benefit if this is a giv so it will be detected
5390 as such. This allows elimination of bivs that might otherwise
5391 not be eliminated. */
5392 return benefit == 0 ? 1 : benefit;
5393 }
5394 \f
5395 /* Given an expression, X, try to form it as a linear function of a biv.
5396 We will canonicalize it to be of the form
5397 (plus (mult (BIV) (invar_1))
5398 (invar_2))
5399 with possible degeneracies.
5400
5401 The invariant expressions must each be of a form that can be used as a
5402 machine operand. We surround then with a USE rtx (a hack, but localized
5403 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5404 routine; it is the caller's responsibility to strip them.
5405
5406 If no such canonicalization is possible (i.e., two biv's are used or an
5407 expression that is neither invariant nor a biv or giv), this routine
5408 returns 0.
5409
5410 For a non-zero return, the result will have a code of CONST_INT, USE,
5411 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5412
5413 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5414
5415 static rtx
5416 simplify_giv_expr (x, benefit)
5417 rtx x;
5418 int *benefit;
5419 {
5420 enum machine_mode mode = GET_MODE (x);
5421 rtx arg0, arg1;
5422 rtx tem;
5423
5424 /* If this is not an integer mode, or if we cannot do arithmetic in this
5425 mode, this can't be a giv. */
5426 if (mode != VOIDmode
5427 && (GET_MODE_CLASS (mode) != MODE_INT
5428 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5429 return 0;
5430
5431 switch (GET_CODE (x))
5432 {
5433 case PLUS:
5434 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5435 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5436 if (arg0 == 0 || arg1 == 0)
5437 return 0;
5438
5439 /* Put constant last, CONST_INT last if both constant. */
5440 if ((GET_CODE (arg0) == USE
5441 || GET_CODE (arg0) == CONST_INT)
5442 && GET_CODE (arg1) != CONST_INT)
5443 tem = arg0, arg0 = arg1, arg1 = tem;
5444
5445 /* Handle addition of zero, then addition of an invariant. */
5446 if (arg1 == const0_rtx)
5447 return arg0;
5448 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5449 switch (GET_CODE (arg0))
5450 {
5451 case CONST_INT:
5452 case USE:
5453 /* Both invariant. Only valid if sum is machine operand.
5454 First strip off possible USE on the operands. */
5455 if (GET_CODE (arg0) == USE)
5456 arg0 = XEXP (arg0, 0);
5457
5458 if (GET_CODE (arg1) == USE)
5459 arg1 = XEXP (arg1, 0);
5460
5461 tem = 0;
5462 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5463 {
5464 tem = plus_constant (arg0, INTVAL (arg1));
5465 if (GET_CODE (tem) != CONST_INT)
5466 tem = gen_rtx_USE (mode, tem);
5467 }
5468 else
5469 {
5470 /* Adding two invariants must result in an invariant,
5471 so enclose addition operation inside a USE and
5472 return it. */
5473 tem = gen_rtx_USE (mode, gen_rtx_PLUS (mode, arg0, arg1));
5474 }
5475
5476 return tem;
5477
5478 case REG:
5479 case MULT:
5480 /* biv + invar or mult + invar. Return sum. */
5481 return gen_rtx_PLUS (mode, arg0, arg1);
5482
5483 case PLUS:
5484 /* (a + invar_1) + invar_2. Associate. */
5485 return simplify_giv_expr (gen_rtx_PLUS (mode,
5486 XEXP (arg0, 0),
5487 gen_rtx_PLUS (mode,
5488 XEXP (arg0, 1), arg1)),
5489 benefit);
5490
5491 default:
5492 abort ();
5493 }
5494
5495 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5496 MULT to reduce cases. */
5497 if (GET_CODE (arg0) == REG)
5498 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5499 if (GET_CODE (arg1) == REG)
5500 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5501
5502 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5503 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5504 Recurse to associate the second PLUS. */
5505 if (GET_CODE (arg1) == MULT)
5506 tem = arg0, arg0 = arg1, arg1 = tem;
5507
5508 if (GET_CODE (arg1) == PLUS)
5509 return simplify_giv_expr (gen_rtx_PLUS (mode,
5510 gen_rtx_PLUS (mode, arg0,
5511 XEXP (arg1, 0)),
5512 XEXP (arg1, 1)),
5513 benefit);
5514
5515 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5516 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5517 abort ();
5518
5519 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5520 return 0;
5521
5522 return simplify_giv_expr (gen_rtx_MULT (mode,
5523 XEXP (arg0, 0),
5524 gen_rtx_PLUS (mode,
5525 XEXP (arg0, 1),
5526 XEXP (arg1, 1))),
5527 benefit);
5528
5529 case MINUS:
5530 /* Handle "a - b" as "a + b * (-1)". */
5531 return simplify_giv_expr (gen_rtx_PLUS (mode,
5532 XEXP (x, 0),
5533 gen_rtx_MULT (mode, XEXP (x, 1),
5534 constm1_rtx)),
5535 benefit);
5536
5537 case MULT:
5538 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5539 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5540 if (arg0 == 0 || arg1 == 0)
5541 return 0;
5542
5543 /* Put constant last, CONST_INT last if both constant. */
5544 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5545 && GET_CODE (arg1) != CONST_INT)
5546 tem = arg0, arg0 = arg1, arg1 = tem;
5547
5548 /* If second argument is not now constant, not giv. */
5549 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5550 return 0;
5551
5552 /* Handle multiply by 0 or 1. */
5553 if (arg1 == const0_rtx)
5554 return const0_rtx;
5555
5556 else if (arg1 == const1_rtx)
5557 return arg0;
5558
5559 switch (GET_CODE (arg0))
5560 {
5561 case REG:
5562 /* biv * invar. Done. */
5563 return gen_rtx_MULT (mode, arg0, arg1);
5564
5565 case CONST_INT:
5566 /* Product of two constants. */
5567 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5568
5569 case USE:
5570 /* invar * invar. Not giv. */
5571 return 0;
5572
5573 case MULT:
5574 /* (a * invar_1) * invar_2. Associate. */
5575 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5576 gen_rtx_MULT (mode,
5577 XEXP (arg0, 1),
5578 arg1)),
5579 benefit);
5580
5581 case PLUS:
5582 /* (a + invar_1) * invar_2. Distribute. */
5583 return simplify_giv_expr (gen_rtx_PLUS (mode,
5584 gen_rtx_MULT (mode,
5585 XEXP (arg0, 0),
5586 arg1),
5587 gen_rtx_MULT (mode,
5588 XEXP (arg0, 1),
5589 arg1)),
5590 benefit);
5591
5592 default:
5593 abort ();
5594 }
5595
5596 case ASHIFT:
5597 /* Shift by constant is multiply by power of two. */
5598 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5599 return 0;
5600
5601 return simplify_giv_expr (gen_rtx_MULT (mode,
5602 XEXP (x, 0),
5603 GEN_INT ((HOST_WIDE_INT) 1
5604 << INTVAL (XEXP (x, 1)))),
5605 benefit);
5606
5607 case NEG:
5608 /* "-a" is "a * (-1)" */
5609 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5610 benefit);
5611
5612 case NOT:
5613 /* "~a" is "-a - 1". Silly, but easy. */
5614 return simplify_giv_expr (gen_rtx_MINUS (mode,
5615 gen_rtx_NEG (mode, XEXP (x, 0)),
5616 const1_rtx),
5617 benefit);
5618
5619 case USE:
5620 /* Already in proper form for invariant. */
5621 return x;
5622
5623 case REG:
5624 /* If this is a new register, we can't deal with it. */
5625 if (REGNO (x) >= max_reg_before_loop)
5626 return 0;
5627
5628 /* Check for biv or giv. */
5629 switch (reg_iv_type[REGNO (x)])
5630 {
5631 case BASIC_INDUCT:
5632 return x;
5633 case GENERAL_INDUCT:
5634 {
5635 struct induction *v = reg_iv_info[REGNO (x)];
5636
5637 /* Form expression from giv and add benefit. Ensure this giv
5638 can derive another and subtract any needed adjustment if so. */
5639 *benefit += v->benefit;
5640 if (v->cant_derive)
5641 return 0;
5642
5643 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5644 v->mult_val),
5645 v->add_val);
5646 if (v->derive_adjustment)
5647 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5648 return simplify_giv_expr (tem, benefit);
5649 }
5650
5651 default:
5652 break;
5653 }
5654
5655 /* Fall through to general case. */
5656 default:
5657 /* If invariant, return as USE (unless CONST_INT).
5658 Otherwise, not giv. */
5659 if (GET_CODE (x) == USE)
5660 x = XEXP (x, 0);
5661
5662 if (invariant_p (x) == 1)
5663 {
5664 if (GET_CODE (x) == CONST_INT)
5665 return x;
5666 else
5667 return gen_rtx_USE (mode, x);
5668 }
5669 else
5670 return 0;
5671 }
5672 }
5673 \f
5674 /* Help detect a giv that is calculated by several consecutive insns;
5675 for example,
5676 giv = biv * M
5677 giv = giv + A
5678 The caller has already identified the first insn P as having a giv as dest;
5679 we check that all other insns that set the same register follow
5680 immediately after P, that they alter nothing else,
5681 and that the result of the last is still a giv.
5682
5683 The value is 0 if the reg set in P is not really a giv.
5684 Otherwise, the value is the amount gained by eliminating
5685 all the consecutive insns that compute the value.
5686
5687 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5688 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5689
5690 The coefficients of the ultimate giv value are stored in
5691 *MULT_VAL and *ADD_VAL. */
5692
5693 static int
5694 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5695 add_val, mult_val)
5696 int first_benefit;
5697 rtx p;
5698 rtx src_reg;
5699 rtx dest_reg;
5700 rtx *add_val;
5701 rtx *mult_val;
5702 {
5703 int count;
5704 enum rtx_code code;
5705 int benefit;
5706 rtx temp;
5707 rtx set;
5708
5709 /* Indicate that this is a giv so that we can update the value produced in
5710 each insn of the multi-insn sequence.
5711
5712 This induction structure will be used only by the call to
5713 general_induction_var below, so we can allocate it on our stack.
5714 If this is a giv, our caller will replace the induct var entry with
5715 a new induction structure. */
5716 struct induction *v
5717 = (struct induction *) alloca (sizeof (struct induction));
5718 v->src_reg = src_reg;
5719 v->mult_val = *mult_val;
5720 v->add_val = *add_val;
5721 v->benefit = first_benefit;
5722 v->cant_derive = 0;
5723 v->derive_adjustment = 0;
5724
5725 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5726 reg_iv_info[REGNO (dest_reg)] = v;
5727
5728 count = n_times_set[REGNO (dest_reg)] - 1;
5729
5730 while (count > 0)
5731 {
5732 p = NEXT_INSN (p);
5733 code = GET_CODE (p);
5734
5735 /* If libcall, skip to end of call sequence. */
5736 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5737 p = XEXP (temp, 0);
5738
5739 if (code == INSN
5740 && (set = single_set (p))
5741 && GET_CODE (SET_DEST (set)) == REG
5742 && SET_DEST (set) == dest_reg
5743 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5744 add_val, mult_val))
5745 /* Giv created by equivalent expression. */
5746 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5747 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5748 add_val, mult_val))))
5749 && src_reg == v->src_reg)
5750 {
5751 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5752 benefit += libcall_benefit (p);
5753
5754 count--;
5755 v->mult_val = *mult_val;
5756 v->add_val = *add_val;
5757 v->benefit = benefit;
5758 }
5759 else if (code != NOTE)
5760 {
5761 /* Allow insns that set something other than this giv to a
5762 constant. Such insns are needed on machines which cannot
5763 include long constants and should not disqualify a giv. */
5764 if (code == INSN
5765 && (set = single_set (p))
5766 && SET_DEST (set) != dest_reg
5767 && CONSTANT_P (SET_SRC (set)))
5768 continue;
5769
5770 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5771 return 0;
5772 }
5773 }
5774
5775 return v->benefit;
5776 }
5777 \f
5778 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5779 represented by G1. If no such expression can be found, or it is clear that
5780 it cannot possibly be a valid address, 0 is returned.
5781
5782 To perform the computation, we note that
5783 G1 = a * v + b and
5784 G2 = c * v + d
5785 where `v' is the biv.
5786
5787 So G2 = (c/a) * G1 + (d - b*c/a) */
5788
5789 #ifdef ADDRESS_COST
5790 static rtx
5791 express_from (g1, g2)
5792 struct induction *g1, *g2;
5793 {
5794 rtx mult, add;
5795
5796 /* The value that G1 will be multiplied by must be a constant integer. Also,
5797 the only chance we have of getting a valid address is if b*c/a (see above
5798 for notation) is also an integer. */
5799 if (GET_CODE (g1->mult_val) != CONST_INT
5800 || GET_CODE (g2->mult_val) != CONST_INT
5801 || GET_CODE (g1->add_val) != CONST_INT
5802 || g1->mult_val == const0_rtx
5803 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5804 return 0;
5805
5806 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5807 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5808
5809 /* Form simplified final result. */
5810 if (mult == const0_rtx)
5811 return add;
5812 else if (mult == const1_rtx)
5813 mult = g1->dest_reg;
5814 else
5815 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
5816
5817 if (add == const0_rtx)
5818 return mult;
5819 else
5820 return gen_rtx_PLUS (g2->mode, mult, add);
5821 }
5822 #endif
5823 \f
5824 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5825 (either directly or via an address expression) a register used to represent
5826 G1. Set g2->new_reg to a represtation of G1 (normally just
5827 g1->dest_reg). */
5828
5829 static int
5830 combine_givs_p (g1, g2)
5831 struct induction *g1, *g2;
5832 {
5833 rtx tem;
5834
5835 /* If these givs are identical, they can be combined. */
5836 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5837 && rtx_equal_p (g1->add_val, g2->add_val))
5838 {
5839 g2->new_reg = g1->dest_reg;
5840 return 1;
5841 }
5842
5843 #ifdef ADDRESS_COST
5844 /* If G2 can be expressed as a function of G1 and that function is valid
5845 as an address and no more expensive than using a register for G2,
5846 the expression of G2 in terms of G1 can be used. */
5847 if (g2->giv_type == DEST_ADDR
5848 && (tem = express_from (g1, g2)) != 0
5849 && memory_address_p (g2->mem_mode, tem)
5850 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5851 {
5852 g2->new_reg = tem;
5853 return 1;
5854 }
5855 #endif
5856
5857 return 0;
5858 }
5859 \f
5860 #ifdef GIV_SORT_CRITERION
5861 /* Compare two givs and sort the most desirable one for combinations first.
5862 This is used only in one qsort call below. */
5863
5864 static int
5865 giv_sort (x, y)
5866 struct induction **x, **y;
5867 {
5868 GIV_SORT_CRITERION (*x, *y);
5869
5870 return 0;
5871 }
5872 #endif
5873
5874 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5875 any other. If so, point SAME to the giv combined with and set NEW_REG to
5876 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5877 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5878
5879 static void
5880 combine_givs (bl)
5881 struct iv_class *bl;
5882 {
5883 struct induction *g1, *g2, **giv_array;
5884 int i, j, giv_count, pass;
5885
5886 /* Count givs, because bl->giv_count is incorrect here. */
5887 giv_count = 0;
5888 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5889 giv_count++;
5890
5891 giv_array
5892 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
5893 i = 0;
5894 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5895 giv_array[i++] = g1;
5896
5897 #ifdef GIV_SORT_CRITERION
5898 /* Sort the givs if GIV_SORT_CRITERION is defined.
5899 This is usually defined for processors which lack
5900 negative register offsets so more givs may be combined. */
5901
5902 if (loop_dump_stream)
5903 fprintf (loop_dump_stream, "%d givs counted, sorting...\n", giv_count);
5904
5905 qsort (giv_array, giv_count, sizeof (struct induction *), giv_sort);
5906 #endif
5907
5908 for (i = 0; i < giv_count; i++)
5909 {
5910 g1 = giv_array[i];
5911 for (pass = 0; pass <= 1; pass++)
5912 for (j = 0; j < giv_count; j++)
5913 {
5914 g2 = giv_array[j];
5915 if (g1 != g2
5916 /* First try to combine with replaceable givs, then all givs. */
5917 && (g1->replaceable || pass == 1)
5918 /* If either has already been combined or is to be ignored, can't
5919 combine. */
5920 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5921 /* If something has been based on G2, G2 cannot itself be based
5922 on something else. */
5923 && ! g2->combined_with
5924 && combine_givs_p (g1, g2))
5925 {
5926 /* g2->new_reg set by `combine_givs_p' */
5927 g2->same = g1;
5928 g1->combined_with = 1;
5929
5930 /* If one of these givs is a DEST_REG that was only used
5931 once, by the other giv, this is actually a single use.
5932 The DEST_REG has the correct cost, while the other giv
5933 counts the REG use too often. */
5934 if (g2->giv_type == DEST_REG
5935 && n_times_used[REGNO (g2->dest_reg)] == 1
5936 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
5937 g1->benefit = g2->benefit;
5938 else if (g1->giv_type != DEST_REG
5939 || n_times_used[REGNO (g1->dest_reg)] != 1
5940 || ! reg_mentioned_p (g1->dest_reg,
5941 PATTERN (g2->insn)))
5942 {
5943 g1->benefit += g2->benefit;
5944 g1->times_used += g2->times_used;
5945 }
5946 /* ??? The new final_[bg]iv_value code does a much better job
5947 of finding replaceable giv's, and hence this code may no
5948 longer be necessary. */
5949 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5950 g1->benefit -= copy_cost;
5951 g1->lifetime += g2->lifetime;
5952
5953 if (loop_dump_stream)
5954 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5955 INSN_UID (g2->insn), INSN_UID (g1->insn));
5956 }
5957 }
5958 }
5959 }
5960 \f
5961 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5962
5963 void
5964 emit_iv_add_mult (b, m, a, reg, insert_before)
5965 rtx b; /* initial value of basic induction variable */
5966 rtx m; /* multiplicative constant */
5967 rtx a; /* additive constant */
5968 rtx reg; /* destination register */
5969 rtx insert_before;
5970 {
5971 rtx seq;
5972 rtx result;
5973
5974 /* Prevent unexpected sharing of these rtx. */
5975 a = copy_rtx (a);
5976 b = copy_rtx (b);
5977
5978 /* Increase the lifetime of any invariants moved further in code. */
5979 update_reg_last_use (a, insert_before);
5980 update_reg_last_use (b, insert_before);
5981 update_reg_last_use (m, insert_before);
5982
5983 start_sequence ();
5984 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5985 if (reg != result)
5986 emit_move_insn (reg, result);
5987 seq = gen_sequence ();
5988 end_sequence ();
5989
5990 emit_insn_before (seq, insert_before);
5991
5992 record_base_value (REGNO (reg), b, 0);
5993 }
5994 \f
5995 /* Test whether A * B can be computed without
5996 an actual multiply insn. Value is 1 if so. */
5997
5998 static int
5999 product_cheap_p (a, b)
6000 rtx a;
6001 rtx b;
6002 {
6003 int i;
6004 rtx tmp;
6005 struct obstack *old_rtl_obstack = rtl_obstack;
6006 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
6007 int win = 1;
6008
6009 /* If only one is constant, make it B. */
6010 if (GET_CODE (a) == CONST_INT)
6011 tmp = a, a = b, b = tmp;
6012
6013 /* If first constant, both constant, so don't need multiply. */
6014 if (GET_CODE (a) == CONST_INT)
6015 return 1;
6016
6017 /* If second not constant, neither is constant, so would need multiply. */
6018 if (GET_CODE (b) != CONST_INT)
6019 return 0;
6020
6021 /* One operand is constant, so might not need multiply insn. Generate the
6022 code for the multiply and see if a call or multiply, or long sequence
6023 of insns is generated. */
6024
6025 rtl_obstack = &temp_obstack;
6026 start_sequence ();
6027 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
6028 tmp = gen_sequence ();
6029 end_sequence ();
6030
6031 if (GET_CODE (tmp) == SEQUENCE)
6032 {
6033 if (XVEC (tmp, 0) == 0)
6034 win = 1;
6035 else if (XVECLEN (tmp, 0) > 3)
6036 win = 0;
6037 else
6038 for (i = 0; i < XVECLEN (tmp, 0); i++)
6039 {
6040 rtx insn = XVECEXP (tmp, 0, i);
6041
6042 if (GET_CODE (insn) != INSN
6043 || (GET_CODE (PATTERN (insn)) == SET
6044 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6045 || (GET_CODE (PATTERN (insn)) == PARALLEL
6046 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6047 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6048 {
6049 win = 0;
6050 break;
6051 }
6052 }
6053 }
6054 else if (GET_CODE (tmp) == SET
6055 && GET_CODE (SET_SRC (tmp)) == MULT)
6056 win = 0;
6057 else if (GET_CODE (tmp) == PARALLEL
6058 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6059 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6060 win = 0;
6061
6062 /* Free any storage we obtained in generating this multiply and restore rtl
6063 allocation to its normal obstack. */
6064 obstack_free (&temp_obstack, storage);
6065 rtl_obstack = old_rtl_obstack;
6066
6067 return win;
6068 }
6069 \f
6070 /* Check to see if loop can be terminated by a "decrement and branch until
6071 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6072 Also try reversing an increment loop to a decrement loop
6073 to see if the optimization can be performed.
6074 Value is nonzero if optimization was performed. */
6075
6076 /* This is useful even if the architecture doesn't have such an insn,
6077 because it might change a loops which increments from 0 to n to a loop
6078 which decrements from n to 0. A loop that decrements to zero is usually
6079 faster than one that increments from zero. */
6080
6081 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6082 such as approx_final_value, biv_total_increment, loop_iterations, and
6083 final_[bg]iv_value. */
6084
6085 static int
6086 check_dbra_loop (loop_end, insn_count, loop_start)
6087 rtx loop_end;
6088 int insn_count;
6089 rtx loop_start;
6090 {
6091 struct iv_class *bl;
6092 rtx reg;
6093 rtx jump_label;
6094 rtx final_value;
6095 rtx start_value;
6096 rtx new_add_val;
6097 rtx comparison;
6098 rtx before_comparison;
6099 rtx p;
6100 rtx jump;
6101 rtx first_compare;
6102 int compare_and_branch;
6103
6104 /* If last insn is a conditional branch, and the insn before tests a
6105 register value, try to optimize it. Otherwise, we can't do anything. */
6106
6107 jump = PREV_INSN (loop_end);
6108 comparison = get_condition_for_loop (jump);
6109 if (comparison == 0)
6110 return 0;
6111
6112 /* Try to compute whether the compare/branch at the loop end is one or
6113 two instructions. */
6114 get_condition (jump, &first_compare);
6115 if (first_compare == jump)
6116 compare_and_branch = 1;
6117 else if (first_compare == prev_nonnote_insn (jump))
6118 compare_and_branch = 2;
6119 else
6120 return 0;
6121
6122 /* Check all of the bivs to see if the compare uses one of them.
6123 Skip biv's set more than once because we can't guarantee that
6124 it will be zero on the last iteration. Also skip if the biv is
6125 used between its update and the test insn. */
6126
6127 for (bl = loop_iv_list; bl; bl = bl->next)
6128 {
6129 if (bl->biv_count == 1
6130 && bl->biv->dest_reg == XEXP (comparison, 0)
6131 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6132 first_compare))
6133 break;
6134 }
6135
6136 if (! bl)
6137 return 0;
6138
6139 /* Look for the case where the basic induction variable is always
6140 nonnegative, and equals zero on the last iteration.
6141 In this case, add a reg_note REG_NONNEG, which allows the
6142 m68k DBRA instruction to be used. */
6143
6144 if (((GET_CODE (comparison) == GT
6145 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6146 && INTVAL (XEXP (comparison, 1)) == -1)
6147 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6148 && GET_CODE (bl->biv->add_val) == CONST_INT
6149 && INTVAL (bl->biv->add_val) < 0)
6150 {
6151 /* Initial value must be greater than 0,
6152 init_val % -dec_value == 0 to ensure that it equals zero on
6153 the last iteration */
6154
6155 if (GET_CODE (bl->initial_value) == CONST_INT
6156 && INTVAL (bl->initial_value) > 0
6157 && (INTVAL (bl->initial_value)
6158 % (-INTVAL (bl->biv->add_val))) == 0)
6159 {
6160 /* register always nonnegative, add REG_NOTE to branch */
6161 REG_NOTES (PREV_INSN (loop_end))
6162 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6163 REG_NOTES (PREV_INSN (loop_end)));
6164 bl->nonneg = 1;
6165
6166 return 1;
6167 }
6168
6169 /* If the decrement is 1 and the value was tested as >= 0 before
6170 the loop, then we can safely optimize. */
6171 for (p = loop_start; p; p = PREV_INSN (p))
6172 {
6173 if (GET_CODE (p) == CODE_LABEL)
6174 break;
6175 if (GET_CODE (p) != JUMP_INSN)
6176 continue;
6177
6178 before_comparison = get_condition_for_loop (p);
6179 if (before_comparison
6180 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6181 && GET_CODE (before_comparison) == LT
6182 && XEXP (before_comparison, 1) == const0_rtx
6183 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6184 && INTVAL (bl->biv->add_val) == -1)
6185 {
6186 REG_NOTES (PREV_INSN (loop_end))
6187 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6188 REG_NOTES (PREV_INSN (loop_end)));
6189 bl->nonneg = 1;
6190
6191 return 1;
6192 }
6193 }
6194 }
6195 else if (num_mem_sets <= 1)
6196 {
6197 /* Try to change inc to dec, so can apply above optimization. */
6198 /* Can do this if:
6199 all registers modified are induction variables or invariant,
6200 all memory references have non-overlapping addresses
6201 (obviously true if only one write)
6202 allow 2 insns for the compare/jump at the end of the loop. */
6203 /* Also, we must avoid any instructions which use both the reversed
6204 biv and another biv. Such instructions will fail if the loop is
6205 reversed. We meet this condition by requiring that either
6206 no_use_except_counting is true, or else that there is only
6207 one biv. */
6208 int num_nonfixed_reads = 0;
6209 /* 1 if the iteration var is used only to count iterations. */
6210 int no_use_except_counting = 0;
6211 /* 1 if the loop has no memory store, or it has a single memory store
6212 which is reversible. */
6213 int reversible_mem_store = 1;
6214
6215 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6216 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6217 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6218
6219 if (bl->giv_count == 0
6220 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6221 {
6222 rtx bivreg = regno_reg_rtx[bl->regno];
6223
6224 /* If there are no givs for this biv, and the only exit is the
6225 fall through at the end of the loop, then
6226 see if perhaps there are no uses except to count. */
6227 no_use_except_counting = 1;
6228 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6229 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6230 {
6231 rtx set = single_set (p);
6232
6233 if (set && GET_CODE (SET_DEST (set)) == REG
6234 && REGNO (SET_DEST (set)) == bl->regno)
6235 /* An insn that sets the biv is okay. */
6236 ;
6237 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6238 || p == prev_nonnote_insn (loop_end))
6239 /* Don't bother about the end test. */
6240 ;
6241 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6242 /* Any other use of the biv is no good. */
6243 {
6244 no_use_except_counting = 0;
6245 break;
6246 }
6247 }
6248 }
6249
6250 /* If the loop has a single store, and the destination address is
6251 invariant, then we can't reverse the loop, because this address
6252 might then have the wrong value at loop exit.
6253 This would work if the source was invariant also, however, in that
6254 case, the insn should have been moved out of the loop. */
6255
6256 if (num_mem_sets == 1)
6257 reversible_mem_store
6258 = (! unknown_address_altered
6259 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6260
6261 /* This code only acts for innermost loops. Also it simplifies
6262 the memory address check by only reversing loops with
6263 zero or one memory access.
6264 Two memory accesses could involve parts of the same array,
6265 and that can't be reversed. */
6266
6267 if (num_nonfixed_reads <= 1
6268 && !loop_has_call
6269 && !loop_has_volatile
6270 && reversible_mem_store
6271 && (no_use_except_counting
6272 || ((bl->giv_count + bl->biv_count + num_mem_sets
6273 + num_movables + compare_and_branch == insn_count)
6274 && (bl == loop_iv_list && bl->next == 0))))
6275 {
6276 rtx tem;
6277
6278 /* Loop can be reversed. */
6279 if (loop_dump_stream)
6280 fprintf (loop_dump_stream, "Can reverse loop\n");
6281
6282 /* Now check other conditions:
6283
6284 The increment must be a constant, as must the initial value,
6285 and the comparison code must be LT.
6286
6287 This test can probably be improved since +/- 1 in the constant
6288 can be obtained by changing LT to LE and vice versa; this is
6289 confusing. */
6290
6291 if (comparison
6292 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6293 /* LE gets turned into LT */
6294 && GET_CODE (comparison) == LT
6295 && GET_CODE (bl->initial_value) == CONST_INT)
6296 {
6297 HOST_WIDE_INT add_val, comparison_val;
6298 rtx initial_value;
6299
6300 add_val = INTVAL (bl->biv->add_val);
6301 comparison_val = INTVAL (XEXP (comparison, 1));
6302 initial_value = bl->initial_value;
6303
6304 /* Normalize the initial value if it is an integer and
6305 has no other use except as a counter. This will allow
6306 a few more loops to be reversed. */
6307 if (no_use_except_counting
6308 && GET_CODE (initial_value) == CONST_INT)
6309 {
6310 comparison_val = comparison_val - INTVAL (bl->initial_value);
6311 /* Check for overflow. If comparison_val ends up as a
6312 negative value, then we can't reverse the loop. */
6313 if (comparison_val >= 0)
6314 initial_value = const0_rtx;
6315 }
6316
6317 /* If the initial value is not zero, or if the comparison
6318 value is not an exact multiple of the increment, then we
6319 can not reverse this loop. */
6320 if (initial_value != const0_rtx
6321 || (comparison_val % add_val) != 0)
6322 return 0;
6323
6324 /* Reset these in case we normalized the initial value
6325 and comparison value above. */
6326 bl->initial_value = initial_value;
6327 XEXP (comparison, 1) = GEN_INT (comparison_val);
6328
6329 /* Register will always be nonnegative, with value
6330 0 on last iteration if loop reversed */
6331
6332 /* Save some info needed to produce the new insns. */
6333 reg = bl->biv->dest_reg;
6334 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6335 if (jump_label == pc_rtx)
6336 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6337 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6338
6339 final_value = XEXP (comparison, 1);
6340 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6341 - INTVAL (bl->biv->add_val));
6342
6343 /* Initialize biv to start_value before loop start.
6344 The old initializing insn will be deleted as a
6345 dead store by flow.c. */
6346 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6347
6348 /* Add insn to decrement register, and delete insn
6349 that incremented the register. */
6350 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6351 bl->biv->insn);
6352 delete_insn (bl->biv->insn);
6353
6354 /* Update biv info to reflect its new status. */
6355 bl->biv->insn = p;
6356 bl->initial_value = start_value;
6357 bl->biv->add_val = new_add_val;
6358
6359 /* Inc LABEL_NUSES so that delete_insn will
6360 not delete the label. */
6361 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6362
6363 /* Emit an insn after the end of the loop to set the biv's
6364 proper exit value if it is used anywhere outside the loop. */
6365 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
6366 || ! bl->init_insn
6367 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6368 emit_insn_after (gen_move_insn (reg, final_value),
6369 loop_end);
6370
6371 /* Delete compare/branch at end of loop. */
6372 delete_insn (PREV_INSN (loop_end));
6373 if (compare_and_branch == 2)
6374 delete_insn (first_compare);
6375
6376 /* Add new compare/branch insn at end of loop. */
6377 start_sequence ();
6378 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6379 GET_MODE (reg), 0, 0);
6380 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6381 tem = gen_sequence ();
6382 end_sequence ();
6383 emit_jump_insn_before (tem, loop_end);
6384
6385 for (tem = PREV_INSN (loop_end);
6386 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6387 ;
6388 if (tem)
6389 {
6390 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6391
6392 /* Increment of LABEL_NUSES done above. */
6393 /* Register is now always nonnegative,
6394 so add REG_NONNEG note to the branch. */
6395 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6396 REG_NOTES (tem));
6397 }
6398
6399 bl->nonneg = 1;
6400
6401 /* Mark that this biv has been reversed. Each giv which depends
6402 on this biv, and which is also live past the end of the loop
6403 will have to be fixed up. */
6404
6405 bl->reversed = 1;
6406
6407 if (loop_dump_stream)
6408 fprintf (loop_dump_stream,
6409 "Reversed loop and added reg_nonneg\n");
6410
6411 return 1;
6412 }
6413 }
6414 }
6415
6416 return 0;
6417 }
6418 \f
6419 /* Verify whether the biv BL appears to be eliminable,
6420 based on the insns in the loop that refer to it.
6421 LOOP_START is the first insn of the loop, and END is the end insn.
6422
6423 If ELIMINATE_P is non-zero, actually do the elimination.
6424
6425 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6426 determine whether invariant insns should be placed inside or at the
6427 start of the loop. */
6428
6429 static int
6430 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6431 struct iv_class *bl;
6432 rtx loop_start;
6433 rtx end;
6434 int eliminate_p;
6435 int threshold, insn_count;
6436 {
6437 rtx reg = bl->biv->dest_reg;
6438 rtx p;
6439
6440 /* Scan all insns in the loop, stopping if we find one that uses the
6441 biv in a way that we cannot eliminate. */
6442
6443 for (p = loop_start; p != end; p = NEXT_INSN (p))
6444 {
6445 enum rtx_code code = GET_CODE (p);
6446 rtx where = threshold >= insn_count ? loop_start : p;
6447
6448 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6449 && reg_mentioned_p (reg, PATTERN (p))
6450 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6451 {
6452 if (loop_dump_stream)
6453 fprintf (loop_dump_stream,
6454 "Cannot eliminate biv %d: biv used in insn %d.\n",
6455 bl->regno, INSN_UID (p));
6456 break;
6457 }
6458 }
6459
6460 if (p == end)
6461 {
6462 if (loop_dump_stream)
6463 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6464 bl->regno, eliminate_p ? "was" : "can be");
6465 return 1;
6466 }
6467
6468 return 0;
6469 }
6470 \f
6471 /* If BL appears in X (part of the pattern of INSN), see if we can
6472 eliminate its use. If so, return 1. If not, return 0.
6473
6474 If BIV does not appear in X, return 1.
6475
6476 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6477 where extra insns should be added. Depending on how many items have been
6478 moved out of the loop, it will either be before INSN or at the start of
6479 the loop. */
6480
6481 static int
6482 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6483 rtx x, insn;
6484 struct iv_class *bl;
6485 int eliminate_p;
6486 rtx where;
6487 {
6488 enum rtx_code code = GET_CODE (x);
6489 rtx reg = bl->biv->dest_reg;
6490 enum machine_mode mode = GET_MODE (reg);
6491 struct induction *v;
6492 rtx arg, tem;
6493 #ifdef HAVE_cc0
6494 rtx new;
6495 #endif
6496 int arg_operand;
6497 char *fmt;
6498 int i, j;
6499
6500 switch (code)
6501 {
6502 case REG:
6503 /* If we haven't already been able to do something with this BIV,
6504 we can't eliminate it. */
6505 if (x == reg)
6506 return 0;
6507 return 1;
6508
6509 case SET:
6510 /* If this sets the BIV, it is not a problem. */
6511 if (SET_DEST (x) == reg)
6512 return 1;
6513
6514 /* If this is an insn that defines a giv, it is also ok because
6515 it will go away when the giv is reduced. */
6516 for (v = bl->giv; v; v = v->next_iv)
6517 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6518 return 1;
6519
6520 #ifdef HAVE_cc0
6521 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6522 {
6523 /* Can replace with any giv that was reduced and
6524 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6525 Require a constant for MULT_VAL, so we know it's nonzero.
6526 ??? We disable this optimization to avoid potential
6527 overflows. */
6528
6529 for (v = bl->giv; v; v = v->next_iv)
6530 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6531 && v->add_val == const0_rtx
6532 && ! v->ignore && ! v->maybe_dead && v->always_computable
6533 && v->mode == mode
6534 && 0)
6535 {
6536 /* If the giv V had the auto-inc address optimization applied
6537 to it, and INSN occurs between the giv insn and the biv
6538 insn, then we must adjust the value used here.
6539 This is rare, so we don't bother to do so. */
6540 if (v->auto_inc_opt
6541 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6542 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6543 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6544 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6545 continue;
6546
6547 if (! eliminate_p)
6548 return 1;
6549
6550 /* If the giv has the opposite direction of change,
6551 then reverse the comparison. */
6552 if (INTVAL (v->mult_val) < 0)
6553 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
6554 const0_rtx, v->new_reg);
6555 else
6556 new = v->new_reg;
6557
6558 /* We can probably test that giv's reduced reg. */
6559 if (validate_change (insn, &SET_SRC (x), new, 0))
6560 return 1;
6561 }
6562
6563 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6564 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6565 Require a constant for MULT_VAL, so we know it's nonzero.
6566 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6567 overflow problem. */
6568
6569 for (v = bl->giv; v; v = v->next_iv)
6570 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6571 && ! v->ignore && ! v->maybe_dead && v->always_computable
6572 && v->mode == mode
6573 && (GET_CODE (v->add_val) == SYMBOL_REF
6574 || GET_CODE (v->add_val) == LABEL_REF
6575 || GET_CODE (v->add_val) == CONST
6576 || (GET_CODE (v->add_val) == REG
6577 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
6578 {
6579 /* If the giv V had the auto-inc address optimization applied
6580 to it, and INSN occurs between the giv insn and the biv
6581 insn, then we must adjust the value used here.
6582 This is rare, so we don't bother to do so. */
6583 if (v->auto_inc_opt
6584 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6585 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6586 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6587 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6588 continue;
6589
6590 if (! eliminate_p)
6591 return 1;
6592
6593 /* If the giv has the opposite direction of change,
6594 then reverse the comparison. */
6595 if (INTVAL (v->mult_val) < 0)
6596 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
6597 v->new_reg);
6598 else
6599 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
6600 copy_rtx (v->add_val));
6601
6602 /* Replace biv with the giv's reduced register. */
6603 update_reg_last_use (v->add_val, insn);
6604 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6605 return 1;
6606
6607 /* Insn doesn't support that constant or invariant. Copy it
6608 into a register (it will be a loop invariant.) */
6609 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6610
6611 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6612 where);
6613
6614 /* Substitute the new register for its invariant value in
6615 the compare expression. */
6616 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
6617 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6618 return 1;
6619 }
6620 }
6621 #endif
6622 break;
6623
6624 case COMPARE:
6625 case EQ: case NE:
6626 case GT: case GE: case GTU: case GEU:
6627 case LT: case LE: case LTU: case LEU:
6628 /* See if either argument is the biv. */
6629 if (XEXP (x, 0) == reg)
6630 arg = XEXP (x, 1), arg_operand = 1;
6631 else if (XEXP (x, 1) == reg)
6632 arg = XEXP (x, 0), arg_operand = 0;
6633 else
6634 break;
6635
6636 if (CONSTANT_P (arg))
6637 {
6638 /* First try to replace with any giv that has constant positive
6639 mult_val and constant add_val. We might be able to support
6640 negative mult_val, but it seems complex to do it in general. */
6641
6642 for (v = bl->giv; v; v = v->next_iv)
6643 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6644 && (GET_CODE (v->add_val) == SYMBOL_REF
6645 || GET_CODE (v->add_val) == LABEL_REF
6646 || GET_CODE (v->add_val) == CONST
6647 || (GET_CODE (v->add_val) == REG
6648 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
6649 && ! v->ignore && ! v->maybe_dead && v->always_computable
6650 && v->mode == mode)
6651 {
6652 /* If the giv V had the auto-inc address optimization applied
6653 to it, and INSN occurs between the giv insn and the biv
6654 insn, then we must adjust the value used here.
6655 This is rare, so we don't bother to do so. */
6656 if (v->auto_inc_opt
6657 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6658 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6659 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6660 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6661 continue;
6662
6663 if (! eliminate_p)
6664 return 1;
6665
6666 /* Replace biv with the giv's reduced reg. */
6667 XEXP (x, 1-arg_operand) = v->new_reg;
6668
6669 /* If all constants are actually constant integers and
6670 the derived constant can be directly placed in the COMPARE,
6671 do so. */
6672 if (GET_CODE (arg) == CONST_INT
6673 && GET_CODE (v->mult_val) == CONST_INT
6674 && GET_CODE (v->add_val) == CONST_INT
6675 && validate_change (insn, &XEXP (x, arg_operand),
6676 GEN_INT (INTVAL (arg)
6677 * INTVAL (v->mult_val)
6678 + INTVAL (v->add_val)), 0))
6679 return 1;
6680
6681 /* Otherwise, load it into a register. */
6682 tem = gen_reg_rtx (mode);
6683 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6684 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6685 return 1;
6686
6687 /* If that failed, put back the change we made above. */
6688 XEXP (x, 1-arg_operand) = reg;
6689 }
6690
6691 /* Look for giv with positive constant mult_val and nonconst add_val.
6692 Insert insns to calculate new compare value.
6693 ??? Turn this off due to possible overflow. */
6694
6695 for (v = bl->giv; v; v = v->next_iv)
6696 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6697 && ! v->ignore && ! v->maybe_dead && v->always_computable
6698 && v->mode == mode
6699 && 0)
6700 {
6701 rtx tem;
6702
6703 /* If the giv V had the auto-inc address optimization applied
6704 to it, and INSN occurs between the giv insn and the biv
6705 insn, then we must adjust the value used here.
6706 This is rare, so we don't bother to do so. */
6707 if (v->auto_inc_opt
6708 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6709 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6710 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6711 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6712 continue;
6713
6714 if (! eliminate_p)
6715 return 1;
6716
6717 tem = gen_reg_rtx (mode);
6718
6719 /* Replace biv with giv's reduced register. */
6720 validate_change (insn, &XEXP (x, 1 - arg_operand),
6721 v->new_reg, 1);
6722
6723 /* Compute value to compare against. */
6724 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6725 /* Use it in this insn. */
6726 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6727 if (apply_change_group ())
6728 return 1;
6729 }
6730 }
6731 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6732 {
6733 if (invariant_p (arg) == 1)
6734 {
6735 /* Look for giv with constant positive mult_val and nonconst
6736 add_val. Insert insns to compute new compare value.
6737 ??? Turn this off due to possible overflow. */
6738
6739 for (v = bl->giv; v; v = v->next_iv)
6740 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6741 && ! v->ignore && ! v->maybe_dead && v->always_computable
6742 && v->mode == mode
6743 && 0)
6744 {
6745 rtx tem;
6746
6747 /* If the giv V had the auto-inc address optimization applied
6748 to it, and INSN occurs between the giv insn and the biv
6749 insn, then we must adjust the value used here.
6750 This is rare, so we don't bother to do so. */
6751 if (v->auto_inc_opt
6752 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6753 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6754 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6755 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6756 continue;
6757
6758 if (! eliminate_p)
6759 return 1;
6760
6761 tem = gen_reg_rtx (mode);
6762
6763 /* Replace biv with giv's reduced register. */
6764 validate_change (insn, &XEXP (x, 1 - arg_operand),
6765 v->new_reg, 1);
6766
6767 /* Compute value to compare against. */
6768 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6769 tem, where);
6770 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6771 if (apply_change_group ())
6772 return 1;
6773 }
6774 }
6775
6776 /* This code has problems. Basically, you can't know when
6777 seeing if we will eliminate BL, whether a particular giv
6778 of ARG will be reduced. If it isn't going to be reduced,
6779 we can't eliminate BL. We can try forcing it to be reduced,
6780 but that can generate poor code.
6781
6782 The problem is that the benefit of reducing TV, below should
6783 be increased if BL can actually be eliminated, but this means
6784 we might have to do a topological sort of the order in which
6785 we try to process biv. It doesn't seem worthwhile to do
6786 this sort of thing now. */
6787
6788 #if 0
6789 /* Otherwise the reg compared with had better be a biv. */
6790 if (GET_CODE (arg) != REG
6791 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6792 return 0;
6793
6794 /* Look for a pair of givs, one for each biv,
6795 with identical coefficients. */
6796 for (v = bl->giv; v; v = v->next_iv)
6797 {
6798 struct induction *tv;
6799
6800 if (v->ignore || v->maybe_dead || v->mode != mode)
6801 continue;
6802
6803 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6804 if (! tv->ignore && ! tv->maybe_dead
6805 && rtx_equal_p (tv->mult_val, v->mult_val)
6806 && rtx_equal_p (tv->add_val, v->add_val)
6807 && tv->mode == mode)
6808 {
6809 /* If the giv V had the auto-inc address optimization applied
6810 to it, and INSN occurs between the giv insn and the biv
6811 insn, then we must adjust the value used here.
6812 This is rare, so we don't bother to do so. */
6813 if (v->auto_inc_opt
6814 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6815 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6816 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6817 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6818 continue;
6819
6820 if (! eliminate_p)
6821 return 1;
6822
6823 /* Replace biv with its giv's reduced reg. */
6824 XEXP (x, 1-arg_operand) = v->new_reg;
6825 /* Replace other operand with the other giv's
6826 reduced reg. */
6827 XEXP (x, arg_operand) = tv->new_reg;
6828 return 1;
6829 }
6830 }
6831 #endif
6832 }
6833
6834 /* If we get here, the biv can't be eliminated. */
6835 return 0;
6836
6837 case MEM:
6838 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6839 biv is used in it, since it will be replaced. */
6840 for (v = bl->giv; v; v = v->next_iv)
6841 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6842 return 1;
6843 break;
6844
6845 default:
6846 break;
6847 }
6848
6849 /* See if any subexpression fails elimination. */
6850 fmt = GET_RTX_FORMAT (code);
6851 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6852 {
6853 switch (fmt[i])
6854 {
6855 case 'e':
6856 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6857 eliminate_p, where))
6858 return 0;
6859 break;
6860
6861 case 'E':
6862 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6863 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6864 eliminate_p, where))
6865 return 0;
6866 break;
6867 }
6868 }
6869
6870 return 1;
6871 }
6872 \f
6873 /* Return nonzero if the last use of REG
6874 is in an insn following INSN in the same basic block. */
6875
6876 static int
6877 last_use_this_basic_block (reg, insn)
6878 rtx reg;
6879 rtx insn;
6880 {
6881 rtx n;
6882 for (n = insn;
6883 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6884 n = NEXT_INSN (n))
6885 {
6886 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
6887 return 1;
6888 }
6889 return 0;
6890 }
6891 \f
6892 /* Called via `note_stores' to record the initial value of a biv. Here we
6893 just record the location of the set and process it later. */
6894
6895 static void
6896 record_initial (dest, set)
6897 rtx dest;
6898 rtx set;
6899 {
6900 struct iv_class *bl;
6901
6902 if (GET_CODE (dest) != REG
6903 || REGNO (dest) >= max_reg_before_loop
6904 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6905 return;
6906
6907 bl = reg_biv_class[REGNO (dest)];
6908
6909 /* If this is the first set found, record it. */
6910 if (bl->init_insn == 0)
6911 {
6912 bl->init_insn = note_insn;
6913 bl->init_set = set;
6914 }
6915 }
6916 \f
6917 /* If any of the registers in X are "old" and currently have a last use earlier
6918 than INSN, update them to have a last use of INSN. Their actual last use
6919 will be the previous insn but it will not have a valid uid_luid so we can't
6920 use it. */
6921
6922 static void
6923 update_reg_last_use (x, insn)
6924 rtx x;
6925 rtx insn;
6926 {
6927 /* Check for the case where INSN does not have a valid luid. In this case,
6928 there is no need to modify the regno_last_uid, as this can only happen
6929 when code is inserted after the loop_end to set a pseudo's final value,
6930 and hence this insn will never be the last use of x. */
6931 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6932 && INSN_UID (insn) < max_uid_for_loop
6933 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
6934 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
6935 else
6936 {
6937 register int i, j;
6938 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6939 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6940 {
6941 if (fmt[i] == 'e')
6942 update_reg_last_use (XEXP (x, i), insn);
6943 else if (fmt[i] == 'E')
6944 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6945 update_reg_last_use (XVECEXP (x, i, j), insn);
6946 }
6947 }
6948 }
6949 \f
6950 /* Given a jump insn JUMP, return the condition that will cause it to branch
6951 to its JUMP_LABEL. If the condition cannot be understood, or is an
6952 inequality floating-point comparison which needs to be reversed, 0 will
6953 be returned.
6954
6955 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6956 insn used in locating the condition was found. If a replacement test
6957 of the condition is desired, it should be placed in front of that
6958 insn and we will be sure that the inputs are still valid.
6959
6960 The condition will be returned in a canonical form to simplify testing by
6961 callers. Specifically:
6962
6963 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6964 (2) Both operands will be machine operands; (cc0) will have been replaced.
6965 (3) If an operand is a constant, it will be the second operand.
6966 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6967 for GE, GEU, and LEU. */
6968
6969 rtx
6970 get_condition (jump, earliest)
6971 rtx jump;
6972 rtx *earliest;
6973 {
6974 enum rtx_code code;
6975 rtx prev = jump;
6976 rtx set;
6977 rtx tem;
6978 rtx op0, op1;
6979 int reverse_code = 0;
6980 int did_reverse_condition = 0;
6981 enum machine_mode mode;
6982
6983 /* If this is not a standard conditional jump, we can't parse it. */
6984 if (GET_CODE (jump) != JUMP_INSN
6985 || ! condjump_p (jump) || simplejump_p (jump))
6986 return 0;
6987
6988 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6989 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6990 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6991 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6992
6993 if (earliest)
6994 *earliest = jump;
6995
6996 /* If this branches to JUMP_LABEL when the condition is false, reverse
6997 the condition. */
6998 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6999 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
7000 code = reverse_condition (code), did_reverse_condition ^= 1;
7001
7002 /* If we are comparing a register with zero, see if the register is set
7003 in the previous insn to a COMPARE or a comparison operation. Perform
7004 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7005 in cse.c */
7006
7007 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
7008 {
7009 /* Set non-zero when we find something of interest. */
7010 rtx x = 0;
7011
7012 #ifdef HAVE_cc0
7013 /* If comparison with cc0, import actual comparison from compare
7014 insn. */
7015 if (op0 == cc0_rtx)
7016 {
7017 if ((prev = prev_nonnote_insn (prev)) == 0
7018 || GET_CODE (prev) != INSN
7019 || (set = single_set (prev)) == 0
7020 || SET_DEST (set) != cc0_rtx)
7021 return 0;
7022
7023 op0 = SET_SRC (set);
7024 op1 = CONST0_RTX (GET_MODE (op0));
7025 if (earliest)
7026 *earliest = prev;
7027 }
7028 #endif
7029
7030 /* If this is a COMPARE, pick up the two things being compared. */
7031 if (GET_CODE (op0) == COMPARE)
7032 {
7033 op1 = XEXP (op0, 1);
7034 op0 = XEXP (op0, 0);
7035 continue;
7036 }
7037 else if (GET_CODE (op0) != REG)
7038 break;
7039
7040 /* Go back to the previous insn. Stop if it is not an INSN. We also
7041 stop if it isn't a single set or if it has a REG_INC note because
7042 we don't want to bother dealing with it. */
7043
7044 if ((prev = prev_nonnote_insn (prev)) == 0
7045 || GET_CODE (prev) != INSN
7046 || FIND_REG_INC_NOTE (prev, 0)
7047 || (set = single_set (prev)) == 0)
7048 break;
7049
7050 /* If this is setting OP0, get what it sets it to if it looks
7051 relevant. */
7052 if (rtx_equal_p (SET_DEST (set), op0))
7053 {
7054 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7055
7056 /* ??? We may not combine comparisons done in a CCmode with
7057 comparisons not done in a CCmode. This is to aid targets
7058 like Alpha that have an IEEE compliant EQ instruction, and
7059 a non-IEEE compliant BEQ instruction. The use of CCmode is
7060 actually artificial, simply to prevent the combination, but
7061 should not affect other platforms. */
7062
7063 if ((GET_CODE (SET_SRC (set)) == COMPARE
7064 || (((code == NE
7065 || (code == LT
7066 && GET_MODE_CLASS (inner_mode) == MODE_INT
7067 && (GET_MODE_BITSIZE (inner_mode)
7068 <= HOST_BITS_PER_WIDE_INT)
7069 && (STORE_FLAG_VALUE
7070 & ((HOST_WIDE_INT) 1
7071 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7072 #ifdef FLOAT_STORE_FLAG_VALUE
7073 || (code == LT
7074 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7075 && FLOAT_STORE_FLAG_VALUE < 0)
7076 #endif
7077 ))
7078 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
7079 && ((GET_MODE_CLASS (mode) == MODE_CC)
7080 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7081 x = SET_SRC (set);
7082 else if (((code == EQ
7083 || (code == GE
7084 && (GET_MODE_BITSIZE (inner_mode)
7085 <= HOST_BITS_PER_WIDE_INT)
7086 && GET_MODE_CLASS (inner_mode) == MODE_INT
7087 && (STORE_FLAG_VALUE
7088 & ((HOST_WIDE_INT) 1
7089 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7090 #ifdef FLOAT_STORE_FLAG_VALUE
7091 || (code == GE
7092 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7093 && FLOAT_STORE_FLAG_VALUE < 0)
7094 #endif
7095 ))
7096 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
7097 && ((GET_MODE_CLASS (mode) == MODE_CC)
7098 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7099 {
7100 /* We might have reversed a LT to get a GE here. But this wasn't
7101 actually the comparison of data, so we don't flag that we
7102 have had to reverse the condition. */
7103 did_reverse_condition ^= 1;
7104 reverse_code = 1;
7105 x = SET_SRC (set);
7106 }
7107 else
7108 break;
7109 }
7110
7111 else if (reg_set_p (op0, prev))
7112 /* If this sets OP0, but not directly, we have to give up. */
7113 break;
7114
7115 if (x)
7116 {
7117 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7118 code = GET_CODE (x);
7119 if (reverse_code)
7120 {
7121 code = reverse_condition (code);
7122 did_reverse_condition ^= 1;
7123 reverse_code = 0;
7124 }
7125
7126 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7127 if (earliest)
7128 *earliest = prev;
7129 }
7130 }
7131
7132 /* If constant is first, put it last. */
7133 if (CONSTANT_P (op0))
7134 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7135
7136 /* If OP0 is the result of a comparison, we weren't able to find what
7137 was really being compared, so fail. */
7138 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7139 return 0;
7140
7141 /* Canonicalize any ordered comparison with integers involving equality
7142 if we can do computations in the relevant mode and we do not
7143 overflow. */
7144
7145 if (GET_CODE (op1) == CONST_INT
7146 && GET_MODE (op0) != VOIDmode
7147 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7148 {
7149 HOST_WIDE_INT const_val = INTVAL (op1);
7150 unsigned HOST_WIDE_INT uconst_val = const_val;
7151 unsigned HOST_WIDE_INT max_val
7152 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7153
7154 switch (code)
7155 {
7156 case LE:
7157 if (const_val != max_val >> 1)
7158 code = LT, op1 = GEN_INT (const_val + 1);
7159 break;
7160
7161 /* When cross-compiling, const_val might be sign-extended from
7162 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7163 case GE:
7164 if ((const_val & max_val)
7165 != (((HOST_WIDE_INT) 1
7166 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7167 code = GT, op1 = GEN_INT (const_val - 1);
7168 break;
7169
7170 case LEU:
7171 if (uconst_val < max_val)
7172 code = LTU, op1 = GEN_INT (uconst_val + 1);
7173 break;
7174
7175 case GEU:
7176 if (uconst_val != 0)
7177 code = GTU, op1 = GEN_INT (uconst_val - 1);
7178 break;
7179
7180 default:
7181 break;
7182 }
7183 }
7184
7185 /* If this was floating-point and we reversed anything other than an
7186 EQ or NE, return zero. */
7187 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7188 && did_reverse_condition && code != NE && code != EQ
7189 && ! flag_fast_math
7190 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7191 return 0;
7192
7193 #ifdef HAVE_cc0
7194 /* Never return CC0; return zero instead. */
7195 if (op0 == cc0_rtx)
7196 return 0;
7197 #endif
7198
7199 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7200 }
7201
7202 /* Similar to above routine, except that we also put an invariant last
7203 unless both operands are invariants. */
7204
7205 rtx
7206 get_condition_for_loop (x)
7207 rtx x;
7208 {
7209 rtx comparison = get_condition (x, NULL_PTR);
7210
7211 if (comparison == 0
7212 || ! invariant_p (XEXP (comparison, 0))
7213 || invariant_p (XEXP (comparison, 1)))
7214 return comparison;
7215
7216 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7217 XEXP (comparison, 1), XEXP (comparison, 0));
7218 }
7219
7220 #ifdef HAIFA
7221 /* Analyze a loop in order to instrument it with the use of count register.
7222 loop_start and loop_end are the first and last insns of the loop.
7223 This function works in cooperation with insert_bct ().
7224 loop_can_insert_bct[loop_num] is set according to whether the optimization
7225 is applicable to the loop. When it is applicable, the following variables
7226 are also set:
7227 loop_start_value[loop_num]
7228 loop_comparison_value[loop_num]
7229 loop_increment[loop_num]
7230 loop_comparison_code[loop_num] */
7231
7232 #ifdef HAVE_decrement_and_branch_on_count
7233 static
7234 void analyze_loop_iterations (loop_start, loop_end)
7235 rtx loop_start, loop_end;
7236 {
7237 rtx comparison, comparison_value;
7238 rtx iteration_var, initial_value, increment;
7239 enum rtx_code comparison_code;
7240
7241 rtx last_loop_insn;
7242 rtx insn;
7243 int i;
7244
7245 /* loop_variable mode */
7246 enum machine_mode original_mode;
7247
7248 /* find the number of the loop */
7249 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7250
7251 /* we change our mind only when we are sure that loop will be instrumented */
7252 loop_can_insert_bct[loop_num] = 0;
7253
7254 /* is the optimization suppressed. */
7255 if ( !flag_branch_on_count_reg )
7256 return;
7257
7258 /* make sure that count-reg is not in use */
7259 if (loop_used_count_register[loop_num]){
7260 if (loop_dump_stream)
7261 fprintf (loop_dump_stream,
7262 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7263 loop_num);
7264 return;
7265 }
7266
7267 /* make sure that the function has no indirect jumps. */
7268 if (indirect_jump_in_function){
7269 if (loop_dump_stream)
7270 fprintf (loop_dump_stream,
7271 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7272 loop_num);
7273 return;
7274 }
7275
7276 /* make sure that the last loop insn is a conditional jump */
7277 last_loop_insn = PREV_INSN (loop_end);
7278 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7279 if (loop_dump_stream)
7280 fprintf (loop_dump_stream,
7281 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7282 loop_num);
7283 return;
7284 }
7285
7286 /* First find the iteration variable. If the last insn is a conditional
7287 branch, and the insn preceding it tests a register value, make that
7288 register the iteration variable. */
7289
7290 /* We used to use prev_nonnote_insn here, but that fails because it might
7291 accidentally get the branch for a contained loop if the branch for this
7292 loop was deleted. We can only trust branches immediately before the
7293 loop_end. */
7294
7295 comparison = get_condition_for_loop (last_loop_insn);
7296 /* ??? Get_condition may switch position of induction variable and
7297 invariant register when it canonicalizes the comparison. */
7298
7299 if (comparison == 0) {
7300 if (loop_dump_stream)
7301 fprintf (loop_dump_stream,
7302 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7303 loop_num);
7304 return;
7305 }
7306
7307 comparison_code = GET_CODE (comparison);
7308 iteration_var = XEXP (comparison, 0);
7309 comparison_value = XEXP (comparison, 1);
7310
7311 original_mode = GET_MODE (iteration_var);
7312 if (GET_MODE_CLASS (original_mode) != MODE_INT
7313 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7314 if (loop_dump_stream)
7315 fprintf (loop_dump_stream,
7316 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7317 loop_num);
7318 return;
7319 }
7320
7321 /* get info about loop bounds and increment */
7322 iteration_info (iteration_var, &initial_value, &increment,
7323 loop_start, loop_end);
7324
7325 /* make sure that all required loop data were found */
7326 if (!(initial_value && increment && comparison_value
7327 && invariant_p (comparison_value) && invariant_p (increment)
7328 && ! indirect_jump_in_function))
7329 {
7330 if (loop_dump_stream) {
7331 fprintf (loop_dump_stream,
7332 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7333 if (!(initial_value && increment && comparison_value)) {
7334 fprintf (loop_dump_stream, "\tbounds not available: ");
7335 if ( ! initial_value )
7336 fprintf (loop_dump_stream, "initial ");
7337 if ( ! increment )
7338 fprintf (loop_dump_stream, "increment ");
7339 if ( ! comparison_value )
7340 fprintf (loop_dump_stream, "comparison ");
7341 fprintf (loop_dump_stream, "\n");
7342 }
7343 if (!invariant_p (comparison_value) || !invariant_p (increment))
7344 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7345 }
7346 return;
7347 }
7348
7349 /* make sure that the increment is constant */
7350 if (GET_CODE (increment) != CONST_INT) {
7351 if (loop_dump_stream)
7352 fprintf (loop_dump_stream,
7353 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7354 loop_num);
7355 return;
7356 }
7357
7358 /* make sure that the loop contains neither function call, nor jump on table.
7359 (the count register might be altered by the called function, and might
7360 be used for a branch on table). */
7361 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7362 if (GET_CODE (insn) == CALL_INSN){
7363 if (loop_dump_stream)
7364 fprintf (loop_dump_stream,
7365 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7366 loop_num);
7367 return;
7368 }
7369
7370 if (GET_CODE (insn) == JUMP_INSN
7371 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7372 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7373 if (loop_dump_stream)
7374 fprintf (loop_dump_stream,
7375 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7376 loop_num);
7377 return;
7378 }
7379 }
7380
7381 /* At this point, we are sure that the loop can be instrumented with BCT.
7382 Some of the loops, however, will not be instrumented - the final decision
7383 is taken by insert_bct () */
7384 if (loop_dump_stream)
7385 fprintf (loop_dump_stream,
7386 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7387 loop_num);
7388
7389 /* mark all enclosing loops that they cannot use count register */
7390 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7391 marking here may prevent instrumenting an enclosing loop that could
7392 actually be instrumented. But since this is rare, it is safer to mark
7393 here in case the order of calling (analyze/insert)_bct would be changed. */
7394 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7395 loop_used_count_register[i] = 1;
7396
7397 /* Set data structures which will be used by the instrumentation phase */
7398 loop_start_value[loop_num] = initial_value;
7399 loop_comparison_value[loop_num] = comparison_value;
7400 loop_increment[loop_num] = increment;
7401 loop_comparison_code[loop_num] = comparison_code;
7402 loop_can_insert_bct[loop_num] = 1;
7403 }
7404
7405
7406 /* instrument loop for insertion of bct instruction. We distinguish between
7407 loops with compile-time bounds, to those with run-time bounds. The loop
7408 behaviour is analized according to the following characteristics/variables:
7409 ; Input variables:
7410 ; comparison-value: the value to which the iteration counter is compared.
7411 ; initial-value: iteration-counter initial value.
7412 ; increment: iteration-counter increment.
7413 ; Computed variables:
7414 ; increment-direction: the sign of the increment.
7415 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7416 ; range-direction: sign (comparison-value - initial-value)
7417 We give up on the following cases:
7418 ; loop variable overflow.
7419 ; run-time loop bounds with comparison code NE.
7420 */
7421
7422 static void
7423 insert_bct (loop_start, loop_end)
7424 rtx loop_start, loop_end;
7425 {
7426 rtx initial_value, comparison_value, increment;
7427 enum rtx_code comparison_code;
7428
7429 int increment_direction, compare_direction;
7430 int unsigned_p = 0;
7431
7432 /* if the loop condition is <= or >=, the number of iteration
7433 is 1 more than the range of the bounds of the loop */
7434 int add_iteration = 0;
7435
7436 /* the only machine mode we work with - is the integer of the size that the
7437 machine has */
7438 enum machine_mode loop_var_mode = SImode;
7439
7440 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7441
7442 /* get loop-variables. No need to check that these are valid - already
7443 checked in analyze_loop_iterations (). */
7444 comparison_code = loop_comparison_code[loop_num];
7445 initial_value = loop_start_value[loop_num];
7446 comparison_value = loop_comparison_value[loop_num];
7447 increment = loop_increment[loop_num];
7448
7449 /* check analyze_loop_iterations decision for this loop. */
7450 if (! loop_can_insert_bct[loop_num]){
7451 if (loop_dump_stream)
7452 fprintf (loop_dump_stream,
7453 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7454 loop_num);
7455 return;
7456 }
7457
7458 /* It's impossible to instrument a competely unrolled loop. */
7459 if (loop_unroll_factor [loop_num] == -1)
7460 return;
7461
7462 /* make sure that the last loop insn is a conditional jump .
7463 This check is repeated from analyze_loop_iterations (),
7464 because unrolling might have changed that. */
7465 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7466 || !condjump_p (PREV_INSN (loop_end))) {
7467 if (loop_dump_stream)
7468 fprintf (loop_dump_stream,
7469 "insert_bct: not instrumenting BCT because of invalid branch\n");
7470 return;
7471 }
7472
7473 /* fix increment in case loop was unrolled. */
7474 if (loop_unroll_factor [loop_num] > 1)
7475 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
7476
7477 /* determine properties and directions of the loop */
7478 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7479 switch ( comparison_code ) {
7480 case LEU:
7481 unsigned_p = 1;
7482 /* fallthrough */
7483 case LE:
7484 compare_direction = 1;
7485 add_iteration = 1;
7486 break;
7487 case GEU:
7488 unsigned_p = 1;
7489 /* fallthrough */
7490 case GE:
7491 compare_direction = -1;
7492 add_iteration = 1;
7493 break;
7494 case EQ:
7495 /* in this case we cannot know the number of iterations */
7496 if (loop_dump_stream)
7497 fprintf (loop_dump_stream,
7498 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7499 loop_num);
7500 return;
7501 case LTU:
7502 unsigned_p = 1;
7503 /* fallthrough */
7504 case LT:
7505 compare_direction = 1;
7506 break;
7507 case GTU:
7508 unsigned_p = 1;
7509 /* fallthrough */
7510 case GT:
7511 compare_direction = -1;
7512 break;
7513 case NE:
7514 compare_direction = 0;
7515 break;
7516 default:
7517 abort ();
7518 }
7519
7520
7521 /* make sure that the loop does not end by an overflow */
7522 if (compare_direction != increment_direction) {
7523 if (loop_dump_stream)
7524 fprintf (loop_dump_stream,
7525 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7526 loop_num);
7527 return;
7528 }
7529
7530 /* try to instrument the loop. */
7531
7532 /* Handle the simpler case, where the bounds are known at compile time. */
7533 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7534 {
7535 int n_iterations;
7536 int increment_value_abs = INTVAL (increment) * increment_direction;
7537
7538 /* check the relation between compare-val and initial-val */
7539 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7540 int range_direction = (difference > 0) ? 1 : -1;
7541
7542 /* make sure the loop executes enough iterations to gain from BCT */
7543 if (difference > -3 && difference < 3) {
7544 if (loop_dump_stream)
7545 fprintf (loop_dump_stream,
7546 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7547 loop_num);
7548 return;
7549 }
7550
7551 /* make sure that the loop executes at least once */
7552 if ((range_direction == 1 && compare_direction == -1)
7553 || (range_direction == -1 && compare_direction == 1))
7554 {
7555 if (loop_dump_stream)
7556 fprintf (loop_dump_stream,
7557 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7558 loop_num);
7559 return;
7560 }
7561
7562 /* make sure that the loop does not end by an overflow (in compile time
7563 bounds we must have an additional check for overflow, because here
7564 we also support the compare code of 'NE'. */
7565 if (comparison_code == NE
7566 && increment_direction != range_direction) {
7567 if (loop_dump_stream)
7568 fprintf (loop_dump_stream,
7569 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7570 loop_num);
7571 return;
7572 }
7573
7574 /* Determine the number of iterations by:
7575 ;
7576 ; compare-val - initial-val + (increment -1) + additional-iteration
7577 ; num_iterations = -----------------------------------------------------------------
7578 ; increment
7579 */
7580 difference = (range_direction > 0) ? difference : -difference;
7581 #if 0
7582 fprintf (stderr, "difference is: %d\n", difference); /* @*/
7583 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
7584 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
7585 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
7586 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
7587 #endif
7588
7589 if (increment_value_abs == 0) {
7590 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
7591 abort ();
7592 }
7593 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
7594 / increment_value_abs;
7595
7596 #if 0
7597 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
7598 #endif
7599 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
7600
7601 /* Done with this loop. */
7602 return;
7603 }
7604
7605 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7606 /* In this case we generate run_time calculation of the number of iterations */
7607
7608 /* With runtime bounds, if the compare is of the form '!=' we give up */
7609 if (comparison_code == NE) {
7610 if (loop_dump_stream)
7611 fprintf (loop_dump_stream,
7612 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7613 loop_num);
7614 return;
7615 }
7616
7617 else {
7618 /* We rely on the existence of run-time guard to ensure that the
7619 loop executes at least once. */
7620 rtx sequence;
7621 rtx iterations_num_reg;
7622
7623 int increment_value_abs = INTVAL (increment) * increment_direction;
7624
7625 /* make sure that the increment is a power of two, otherwise (an
7626 expensive) divide is needed. */
7627 if (exact_log2 (increment_value_abs) == -1)
7628 {
7629 if (loop_dump_stream)
7630 fprintf (loop_dump_stream,
7631 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7632 return;
7633 }
7634
7635 /* compute the number of iterations */
7636 start_sequence ();
7637 {
7638 rtx temp_reg;
7639
7640 /* Again, the number of iterations is calculated by:
7641 ;
7642 ; compare-val - initial-val + (increment -1) + additional-iteration
7643 ; num_iterations = -----------------------------------------------------------------
7644 ; increment
7645 */
7646 /* ??? Do we have to call copy_rtx here before passing rtx to
7647 expand_binop? */
7648 if (compare_direction > 0) {
7649 /* <, <= :the loop variable is increasing */
7650 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
7651 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7652 }
7653 else {
7654 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
7655 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7656 }
7657
7658 if (increment_value_abs - 1 + add_iteration != 0)
7659 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
7660 GEN_INT (increment_value_abs - 1 + add_iteration),
7661 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7662
7663 if (increment_value_abs != 1)
7664 {
7665 /* ??? This will generate an expensive divide instruction for
7666 most targets. The original authors apparently expected this
7667 to be a shift, since they test for power-of-2 divisors above,
7668 but just naively generating a divide instruction will not give
7669 a shift. It happens to work for the PowerPC target because
7670 the rs6000.md file has a divide pattern that emits shifts.
7671 It will probably not work for any other target. */
7672 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
7673 temp_reg,
7674 GEN_INT (increment_value_abs),
7675 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7676 }
7677 else
7678 iterations_num_reg = temp_reg;
7679 }
7680 sequence = gen_sequence ();
7681 end_sequence ();
7682 emit_insn_before (sequence, loop_start);
7683 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
7684 }
7685 }
7686
7687 /* instrument loop by inserting a bct in it. This is done in the following way:
7688 1. A new register is created and assigned the hard register number of the count
7689 register.
7690 2. In the head of the loop the new variable is initialized by the value passed in the
7691 loop_num_iterations parameter.
7692 3. At the end of the loop, comparison of the register with 0 is generated.
7693 The created comparison follows the pattern defined for the
7694 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7695 generation phase.
7696 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7697 not used elsewhere, it will be eliminated by data-flow analisys. */
7698
7699 static void
7700 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
7701 rtx loop_start, loop_end;
7702 rtx loop_num_iterations;
7703 {
7704 rtx temp_reg1, temp_reg2;
7705 rtx start_label;
7706
7707 rtx sequence;
7708 enum machine_mode loop_var_mode = SImode;
7709
7710 if (HAVE_decrement_and_branch_on_count)
7711 {
7712 if (loop_dump_stream)
7713 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
7714
7715 /* eliminate the check on the old variable */
7716 delete_insn (PREV_INSN (loop_end));
7717 delete_insn (PREV_INSN (loop_end));
7718
7719 /* insert the label which will delimit the start of the loop */
7720 start_label = gen_label_rtx ();
7721 emit_label_after (start_label, loop_start);
7722
7723 /* insert initialization of the count register into the loop header */
7724 start_sequence ();
7725 temp_reg1 = gen_reg_rtx (loop_var_mode);
7726 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
7727
7728 /* this will be count register */
7729 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
7730 /* we have to move the value to the count register from an GPR
7731 because rtx pointed to by loop_num_iterations could contain
7732 expression which cannot be moved into count register */
7733 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
7734
7735 sequence = gen_sequence ();
7736 end_sequence ();
7737 emit_insn_after (sequence, loop_start);
7738
7739 /* insert new comparison on the count register instead of the
7740 old one, generating the needed BCT pattern (that will be
7741 later recognized by assembly generation phase). */
7742 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
7743 loop_end);
7744 LABEL_NUSES (start_label)++;
7745 }
7746
7747 }
7748 #endif /* HAVE_decrement_and_branch_on_count */
7749
7750 #endif /* HAIFA */
7751
7752 /* Scan the function and determine whether it has indirect (computed) jumps.
7753
7754 This is taken mostly from flow.c; similar code exists elsewhere
7755 in the compiler. It may be useful to put this into rtlanal.c. */
7756 static int
7757 indirect_jump_in_function_p (start)
7758 rtx start;
7759 {
7760 rtx insn;
7761
7762 for (insn = start; insn; insn = NEXT_INSN (insn))
7763 if (computed_jump_p (insn))
7764 return 1;
7765
7766 return 0;
7767 }