loop.c (emit_iv_add_mult): Scan the entire insn list generated for the sequence,...
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51 #include "toplev.h"
52
53 /* Vector mapping INSN_UIDs to luids.
54 The luids are like uids but increase monotonically always.
55 We use them to see whether a jump comes from outside a given loop. */
56
57 int *uid_luid;
58
59 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
60 number the insn is contained in. */
61
62 int *uid_loop_num;
63
64 /* 1 + largest uid of any insn. */
65
66 int max_uid_for_loop;
67
68 /* 1 + luid of last insn. */
69
70 static int max_luid;
71
72 /* Number of loops detected in current function. Used as index to the
73 next few tables. */
74
75 static int max_loop_num;
76
77 /* Indexed by loop number, contains the first and last insn of each loop. */
78
79 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
80
81 /* For each loop, gives the containing loop number, -1 if none. */
82
83 int *loop_outer_loop;
84
85 #ifdef HAIFA
86 /* The main output of analyze_loop_iterations is placed here */
87
88 int *loop_can_insert_bct;
89
90 /* For each loop, determines whether some of its inner loops has used
91 count register */
92
93 int *loop_used_count_register;
94
95 /* loop parameters for arithmetic loops. These loops have a loop variable
96 which is initialized to loop_start_value, incremented in each iteration
97 by "loop_increment". At the end of the iteration the loop variable is
98 compared to the loop_comparison_value (using loop_comparison_code). */
99
100 rtx *loop_increment;
101 rtx *loop_comparison_value;
102 rtx *loop_start_value;
103 enum rtx_code *loop_comparison_code;
104 #endif /* HAIFA */
105
106 /* For each loop, keep track of its unrolling factor.
107 Potential values:
108 0: unrolled
109 1: not unrolled.
110 -1: completely unrolled
111 >0: holds the unroll exact factor. */
112 int *loop_unroll_factor;
113
114 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
115 really a loop (an insn outside the loop branches into it). */
116
117 static char *loop_invalid;
118
119 /* Indexed by loop number, links together all LABEL_REFs which refer to
120 code labels outside the loop. Used by routines that need to know all
121 loop exits, such as final_biv_value and final_giv_value.
122
123 This does not include loop exits due to return instructions. This is
124 because all bivs and givs are pseudos, and hence must be dead after a
125 return, so the presense of a return does not affect any of the
126 optimizations that use this info. It is simpler to just not include return
127 instructions on this list. */
128
129 rtx *loop_number_exit_labels;
130
131 /* Indexed by loop number, counts the number of LABEL_REFs on
132 loop_number_exit_labels for this loop and all loops nested inside it. */
133
134 int *loop_number_exit_count;
135
136 /* Holds the number of loop iterations. It is zero if the number could not be
137 calculated. Must be unsigned since the number of iterations can
138 be as high as 2^wordsize-1. For loops with a wider iterator, this number
139 will be zero if the number of loop iterations is too large for an
140 unsigned integer to hold. */
141
142 unsigned HOST_WIDE_INT loop_n_iterations;
143
144 /* Nonzero if there is a subroutine call in the current loop. */
145
146 static int loop_has_call;
147
148 /* Nonzero if there is a volatile memory reference in the current
149 loop. */
150
151 static int loop_has_volatile;
152
153 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
154 current loop. A continue statement will generate a branch to
155 NEXT_INSN (loop_continue). */
156
157 static rtx loop_continue;
158
159 /* Indexed by register number, contains the number of times the reg
160 is set during the loop being scanned.
161 During code motion, a negative value indicates a reg that has been
162 made a candidate; in particular -2 means that it is an candidate that
163 we know is equal to a constant and -1 means that it is an candidate
164 not known equal to a constant.
165 After code motion, regs moved have 0 (which is accurate now)
166 while the failed candidates have the original number of times set.
167
168 Therefore, at all times, == 0 indicates an invariant register;
169 < 0 a conditionally invariant one. */
170
171 static int *n_times_set;
172
173 /* Original value of n_times_set; same except that this value
174 is not set negative for a reg whose sets have been made candidates
175 and not set to 0 for a reg that is moved. */
176
177 static int *n_times_used;
178
179 /* Index by register number, 1 indicates that the register
180 cannot be moved or strength reduced. */
181
182 static char *may_not_optimize;
183
184 /* Nonzero means reg N has already been moved out of one loop.
185 This reduces the desire to move it out of another. */
186
187 static char *moved_once;
188
189 /* Array of MEMs that are stored in this loop. If there are too many to fit
190 here, we just turn on unknown_address_altered. */
191
192 #define NUM_STORES 30
193 static rtx loop_store_mems[NUM_STORES];
194
195 /* Index of first available slot in above array. */
196 static int loop_store_mems_idx;
197
198 /* Nonzero if we don't know what MEMs were changed in the current loop.
199 This happens if the loop contains a call (in which case `loop_has_call'
200 will also be set) or if we store into more than NUM_STORES MEMs. */
201
202 static int unknown_address_altered;
203
204 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
205 static int num_movables;
206
207 /* Count of memory write instructions discovered in the loop. */
208 static int num_mem_sets;
209
210 /* Number of loops contained within the current one, including itself. */
211 static int loops_enclosed;
212
213 /* Bound on pseudo register number before loop optimization.
214 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
215 int max_reg_before_loop;
216
217 /* This obstack is used in product_cheap_p to allocate its rtl. It
218 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
219 If we used the same obstack that it did, we would be deallocating
220 that array. */
221
222 static struct obstack temp_obstack;
223
224 /* This is where the pointer to the obstack being used for RTL is stored. */
225
226 extern struct obstack *rtl_obstack;
227
228 #define obstack_chunk_alloc xmalloc
229 #define obstack_chunk_free free
230 \f
231 /* During the analysis of a loop, a chain of `struct movable's
232 is made to record all the movable insns found.
233 Then the entire chain can be scanned to decide which to move. */
234
235 struct movable
236 {
237 rtx insn; /* A movable insn */
238 rtx set_src; /* The expression this reg is set from. */
239 rtx set_dest; /* The destination of this SET. */
240 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
241 of any registers used within the LIBCALL. */
242 int consec; /* Number of consecutive following insns
243 that must be moved with this one. */
244 int regno; /* The register it sets */
245 short lifetime; /* lifetime of that register;
246 may be adjusted when matching movables
247 that load the same value are found. */
248 short savings; /* Number of insns we can move for this reg,
249 including other movables that force this
250 or match this one. */
251 unsigned int cond : 1; /* 1 if only conditionally movable */
252 unsigned int force : 1; /* 1 means MUST move this insn */
253 unsigned int global : 1; /* 1 means reg is live outside this loop */
254 /* If PARTIAL is 1, GLOBAL means something different:
255 that the reg is live outside the range from where it is set
256 to the following label. */
257 unsigned int done : 1; /* 1 inhibits further processing of this */
258
259 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
260 In particular, moving it does not make it
261 invariant. */
262 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
263 load SRC, rather than copying INSN. */
264 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
265 first insn of a consecutive sets group. */
266 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
267 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
268 that we should avoid changing when clearing
269 the rest of the reg. */
270 struct movable *match; /* First entry for same value */
271 struct movable *forces; /* An insn that must be moved if this is */
272 struct movable *next;
273 };
274
275 FILE *loop_dump_stream;
276
277 /* Forward declarations. */
278
279 static void find_and_verify_loops PROTO((rtx));
280 static void mark_loop_jump PROTO((rtx, int));
281 static void prescan_loop PROTO((rtx, rtx));
282 static int reg_in_basic_block_p PROTO((rtx, rtx));
283 static int consec_sets_invariant_p PROTO((rtx, int, rtx));
284 static rtx libcall_other_reg PROTO((rtx, rtx));
285 static int labels_in_range_p PROTO((rtx, int));
286 static void count_loop_regs_set PROTO((rtx, rtx, char *, rtx *, int *, int));
287 static void note_addr_stored PROTO((rtx, rtx));
288 static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
289 static void scan_loop PROTO((rtx, rtx, int, int));
290 #if 0
291 static void replace_call_address PROTO((rtx, rtx, rtx));
292 #endif
293 static rtx skip_consec_insns PROTO((rtx, int));
294 static int libcall_benefit PROTO((rtx));
295 static void ignore_some_movables PROTO((struct movable *));
296 static void force_movables PROTO((struct movable *));
297 static void combine_movables PROTO((struct movable *, int));
298 static int regs_match_p PROTO((rtx, rtx, struct movable *));
299 static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
300 static void add_label_notes PROTO((rtx, rtx));
301 static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
302 static int count_nonfixed_reads PROTO((rtx));
303 static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, int));
304 static void find_single_use_in_loop PROTO((rtx, rtx, rtx *));
305 static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
306 static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
307 static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, int, int));
308 static void check_final_value PROTO((struct induction *, rtx, rtx));
309 static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
310 static void update_giv_derive PROTO((rtx));
311 static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *));
312 static rtx simplify_giv_expr PROTO((rtx, int *));
313 static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *));
314 static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *));
315 static int check_dbra_loop PROTO((rtx, int, rtx));
316 #ifdef ADDRESS_COST
317 static rtx express_from PROTO((struct induction *, struct induction *));
318 #endif
319 static int combine_givs_p PROTO((struct induction *, struct induction *));
320 #ifdef GIV_SORT_CRITERION
321 static int giv_sort PROTO((struct induction **, struct induction **));
322 #endif
323 static void combine_givs PROTO((struct iv_class *));
324 static int product_cheap_p PROTO((rtx, rtx));
325 static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
326 static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
327 static int last_use_this_basic_block PROTO((rtx, rtx));
328 static void record_initial PROTO((rtx, rtx));
329 static void update_reg_last_use PROTO((rtx, rtx));
330
331 #ifdef HAIFA
332 /* This is extern from unroll.c */
333 extern void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
334
335 /* Two main functions for implementing bct:
336 first - to be called before loop unrolling, and the second - after */
337 #ifdef HAVE_decrement_and_branch_on_count
338 static void analyze_loop_iterations PROTO((rtx, rtx));
339 static void insert_bct PROTO((rtx, rtx));
340
341 /* Auxiliary function that inserts the bct pattern into the loop */
342 static void instrument_loop_bct PROTO((rtx, rtx, rtx));
343 #endif /* HAVE_decrement_and_branch_on_count */
344 #endif /* HAIFA */
345
346 /* Indirect_jump_in_function is computed once per function. */
347 int indirect_jump_in_function = 0;
348 static int indirect_jump_in_function_p PROTO((rtx));
349
350 \f
351 /* Relative gain of eliminating various kinds of operations. */
352 int add_cost;
353 #if 0
354 int shift_cost;
355 int mult_cost;
356 #endif
357
358 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
359 copy the value of the strength reduced giv to its original register. */
360 int copy_cost;
361
362 void
363 init_loop ()
364 {
365 char *free_point = (char *) oballoc (1);
366 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
367
368 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
369
370 /* We multiply by 2 to reconcile the difference in scale between
371 these two ways of computing costs. Otherwise the cost of a copy
372 will be far less than the cost of an add. */
373
374 copy_cost = 2 * 2;
375
376 /* Free the objects we just allocated. */
377 obfree (free_point);
378
379 /* Initialize the obstack used for rtl in product_cheap_p. */
380 gcc_obstack_init (&temp_obstack);
381 }
382 \f
383 /* Entry point of this file. Perform loop optimization
384 on the current function. F is the first insn of the function
385 and DUMPFILE is a stream for output of a trace of actions taken
386 (or 0 if none should be output). */
387
388 void
389 loop_optimize (f, dumpfile, unroll_p)
390 /* f is the first instruction of a chain of insns for one function */
391 rtx f;
392 FILE *dumpfile;
393 int unroll_p;
394 {
395 register rtx insn;
396 register int i;
397 rtx last_insn;
398
399 loop_dump_stream = dumpfile;
400
401 init_recog_no_volatile ();
402
403 max_reg_before_loop = max_reg_num ();
404
405 moved_once = (char *) alloca (max_reg_before_loop);
406 bzero (moved_once, max_reg_before_loop);
407
408 regs_may_share = 0;
409
410 /* Count the number of loops. */
411
412 max_loop_num = 0;
413 for (insn = f; insn; insn = NEXT_INSN (insn))
414 {
415 if (GET_CODE (insn) == NOTE
416 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
417 max_loop_num++;
418 }
419
420 /* Don't waste time if no loops. */
421 if (max_loop_num == 0)
422 return;
423
424 /* Get size to use for tables indexed by uids.
425 Leave some space for labels allocated by find_and_verify_loops. */
426 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
427
428 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
429 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
430
431 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
432 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
433
434 /* Allocate tables for recording each loop. We set each entry, so they need
435 not be zeroed. */
436 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
437 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
438 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
439 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
440 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
441 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
442
443 /* This is initialized by the unrolling code, so we go ahead
444 and clear them just in case we are not performing loop
445 unrolling. */
446 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
447 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
448
449 #ifdef HAIFA
450 /* Allocate for BCT optimization */
451 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
452 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
453
454 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
455 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
456
457 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
458 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
459 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
460 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
461 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
462 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
463
464 loop_comparison_code
465 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
466 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
467 #endif /* HAIFA */
468
469 /* Find and process each loop.
470 First, find them, and record them in order of their beginnings. */
471 find_and_verify_loops (f);
472
473 /* Now find all register lifetimes. This must be done after
474 find_and_verify_loops, because it might reorder the insns in the
475 function. */
476 reg_scan (f, max_reg_num (), 1);
477
478 /* This must occur after reg_scan so that registers created by gcse
479 will have entries in the register tables.
480
481 We could have added a call to reg_scan after gcse_main in toplev.c,
482 but moving this call to init_alias_analysis is more efficient. */
483 init_alias_analysis ();
484
485 /* See if we went too far. */
486 if (get_max_uid () > max_uid_for_loop)
487 abort ();
488 /* Now reset it to the actual size we need. See above. */
489 max_uid_for_loop = get_max_uid () + 1;
490
491 /* Compute the mapping from uids to luids.
492 LUIDs are numbers assigned to insns, like uids,
493 except that luids increase monotonically through the code.
494 Don't assign luids to line-number NOTEs, so that the distance in luids
495 between two insns is not affected by -g. */
496
497 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
498 {
499 last_insn = insn;
500 if (GET_CODE (insn) != NOTE
501 || NOTE_LINE_NUMBER (insn) <= 0)
502 uid_luid[INSN_UID (insn)] = ++i;
503 else
504 /* Give a line number note the same luid as preceding insn. */
505 uid_luid[INSN_UID (insn)] = i;
506 }
507
508 max_luid = i + 1;
509
510 /* Don't leave gaps in uid_luid for insns that have been
511 deleted. It is possible that the first or last insn
512 using some register has been deleted by cross-jumping.
513 Make sure that uid_luid for that former insn's uid
514 points to the general area where that insn used to be. */
515 for (i = 0; i < max_uid_for_loop; i++)
516 {
517 uid_luid[0] = uid_luid[i];
518 if (uid_luid[0] != 0)
519 break;
520 }
521 for (i = 0; i < max_uid_for_loop; i++)
522 if (uid_luid[i] == 0)
523 uid_luid[i] = uid_luid[i - 1];
524
525 /* Create a mapping from loops to BLOCK tree nodes. */
526 if (unroll_p && write_symbols != NO_DEBUG)
527 find_loop_tree_blocks ();
528
529 /* Determine if the function has indirect jump. On some systems
530 this prevents low overhead loop instructions from being used. */
531 indirect_jump_in_function = indirect_jump_in_function_p (f);
532
533 /* Now scan the loops, last ones first, since this means inner ones are done
534 before outer ones. */
535 for (i = max_loop_num-1; i >= 0; i--)
536 if (! loop_invalid[i] && loop_number_loop_ends[i])
537 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
538 max_reg_num (), unroll_p);
539
540 /* If debugging and unrolling loops, we must replicate the tree nodes
541 corresponding to the blocks inside the loop, so that the original one
542 to one mapping will remain. */
543 if (unroll_p && write_symbols != NO_DEBUG)
544 unroll_block_trees ();
545 }
546 \f
547 /* Optimize one loop whose start is LOOP_START and end is END.
548 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
549 NOTE_INSN_LOOP_END. */
550
551 /* ??? Could also move memory writes out of loops if the destination address
552 is invariant, the source is invariant, the memory write is not volatile,
553 and if we can prove that no read inside the loop can read this address
554 before the write occurs. If there is a read of this address after the
555 write, then we can also mark the memory read as invariant. */
556
557 static void
558 scan_loop (loop_start, end, nregs, unroll_p)
559 rtx loop_start, end;
560 int nregs;
561 int unroll_p;
562 {
563 register int i;
564 register rtx p;
565 /* 1 if we are scanning insns that could be executed zero times. */
566 int maybe_never = 0;
567 /* 1 if we are scanning insns that might never be executed
568 due to a subroutine call which might exit before they are reached. */
569 int call_passed = 0;
570 /* For a rotated loop that is entered near the bottom,
571 this is the label at the top. Otherwise it is zero. */
572 rtx loop_top = 0;
573 /* Jump insn that enters the loop, or 0 if control drops in. */
574 rtx loop_entry_jump = 0;
575 /* Place in the loop where control enters. */
576 rtx scan_start;
577 /* Number of insns in the loop. */
578 int insn_count;
579 int in_libcall = 0;
580 int tem;
581 rtx temp;
582 /* The SET from an insn, if it is the only SET in the insn. */
583 rtx set, set1;
584 /* Chain describing insns movable in current loop. */
585 struct movable *movables = 0;
586 /* Last element in `movables' -- so we can add elements at the end. */
587 struct movable *last_movable = 0;
588 /* Ratio of extra register life span we can justify
589 for saving an instruction. More if loop doesn't call subroutines
590 since in that case saving an insn makes more difference
591 and more registers are available. */
592 int threshold;
593 /* If we have calls, contains the insn in which a register was used
594 if it was used exactly once; contains const0_rtx if it was used more
595 than once. */
596 rtx *reg_single_usage = 0;
597 /* Nonzero if we are scanning instructions in a sub-loop. */
598 int loop_depth = 0;
599
600 n_times_set = (int *) alloca (nregs * sizeof (int));
601 n_times_used = (int *) alloca (nregs * sizeof (int));
602 may_not_optimize = (char *) alloca (nregs);
603
604 /* Determine whether this loop starts with a jump down to a test at
605 the end. This will occur for a small number of loops with a test
606 that is too complex to duplicate in front of the loop.
607
608 We search for the first insn or label in the loop, skipping NOTEs.
609 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
610 (because we might have a loop executed only once that contains a
611 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
612 (in case we have a degenerate loop).
613
614 Note that if we mistakenly think that a loop is entered at the top
615 when, in fact, it is entered at the exit test, the only effect will be
616 slightly poorer optimization. Making the opposite error can generate
617 incorrect code. Since very few loops now start with a jump to the
618 exit test, the code here to detect that case is very conservative. */
619
620 for (p = NEXT_INSN (loop_start);
621 p != end
622 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
623 && (GET_CODE (p) != NOTE
624 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
625 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
626 p = NEXT_INSN (p))
627 ;
628
629 scan_start = p;
630
631 /* Set up variables describing this loop. */
632 prescan_loop (loop_start, end);
633 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
634
635 /* If loop has a jump before the first label,
636 the true entry is the target of that jump.
637 Start scan from there.
638 But record in LOOP_TOP the place where the end-test jumps
639 back to so we can scan that after the end of the loop. */
640 if (GET_CODE (p) == JUMP_INSN)
641 {
642 loop_entry_jump = p;
643
644 /* Loop entry must be unconditional jump (and not a RETURN) */
645 if (simplejump_p (p)
646 && JUMP_LABEL (p) != 0
647 /* Check to see whether the jump actually
648 jumps out of the loop (meaning it's no loop).
649 This case can happen for things like
650 do {..} while (0). If this label was generated previously
651 by loop, we can't tell anything about it and have to reject
652 the loop. */
653 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
654 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
655 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
656 {
657 loop_top = next_label (scan_start);
658 scan_start = JUMP_LABEL (p);
659 }
660 }
661
662 /* If SCAN_START was an insn created by loop, we don't know its luid
663 as required by loop_reg_used_before_p. So skip such loops. (This
664 test may never be true, but it's best to play it safe.)
665
666 Also, skip loops where we do not start scanning at a label. This
667 test also rejects loops starting with a JUMP_INSN that failed the
668 test above. */
669
670 if (INSN_UID (scan_start) >= max_uid_for_loop
671 || GET_CODE (scan_start) != CODE_LABEL)
672 {
673 if (loop_dump_stream)
674 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
675 INSN_UID (loop_start), INSN_UID (end));
676 return;
677 }
678
679 /* Count number of times each reg is set during this loop.
680 Set may_not_optimize[I] if it is not safe to move out
681 the setting of register I. If this loop has calls, set
682 reg_single_usage[I]. */
683
684 bzero ((char *) n_times_set, nregs * sizeof (int));
685 bzero (may_not_optimize, nregs);
686
687 if (loop_has_call)
688 {
689 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
690 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
691 }
692
693 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
694 may_not_optimize, reg_single_usage, &insn_count, nregs);
695
696 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
697 may_not_optimize[i] = 1, n_times_set[i] = 1;
698 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
699
700 if (loop_dump_stream)
701 {
702 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
703 INSN_UID (loop_start), INSN_UID (end), insn_count);
704 if (loop_continue)
705 fprintf (loop_dump_stream, "Continue at insn %d.\n",
706 INSN_UID (loop_continue));
707 }
708
709 /* Scan through the loop finding insns that are safe to move.
710 Set n_times_set negative for the reg being set, so that
711 this reg will be considered invariant for subsequent insns.
712 We consider whether subsequent insns use the reg
713 in deciding whether it is worth actually moving.
714
715 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
716 and therefore it is possible that the insns we are scanning
717 would never be executed. At such times, we must make sure
718 that it is safe to execute the insn once instead of zero times.
719 When MAYBE_NEVER is 0, all insns will be executed at least once
720 so that is not a problem. */
721
722 p = scan_start;
723 while (1)
724 {
725 p = NEXT_INSN (p);
726 /* At end of a straight-in loop, we are done.
727 At end of a loop entered at the bottom, scan the top. */
728 if (p == scan_start)
729 break;
730 if (p == end)
731 {
732 if (loop_top != 0)
733 p = loop_top;
734 else
735 break;
736 if (p == scan_start)
737 break;
738 }
739
740 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
741 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
742 in_libcall = 1;
743 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
744 && find_reg_note (p, REG_RETVAL, NULL_RTX))
745 in_libcall = 0;
746
747 if (GET_CODE (p) == INSN
748 && (set = single_set (p))
749 && GET_CODE (SET_DEST (set)) == REG
750 && ! may_not_optimize[REGNO (SET_DEST (set))])
751 {
752 int tem1 = 0;
753 int tem2 = 0;
754 int move_insn = 0;
755 rtx src = SET_SRC (set);
756 rtx dependencies = 0;
757
758 /* Figure out what to use as a source of this insn. If a REG_EQUIV
759 note is given or if a REG_EQUAL note with a constant operand is
760 specified, use it as the source and mark that we should move
761 this insn by calling emit_move_insn rather that duplicating the
762 insn.
763
764 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
765 is present. */
766 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
767 if (temp)
768 src = XEXP (temp, 0), move_insn = 1;
769 else
770 {
771 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
772 if (temp && CONSTANT_P (XEXP (temp, 0)))
773 src = XEXP (temp, 0), move_insn = 1;
774 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
775 {
776 src = XEXP (temp, 0);
777 /* A libcall block can use regs that don't appear in
778 the equivalent expression. To move the libcall,
779 we must move those regs too. */
780 dependencies = libcall_other_reg (p, src);
781 }
782 }
783
784 /* Don't try to optimize a register that was made
785 by loop-optimization for an inner loop.
786 We don't know its life-span, so we can't compute the benefit. */
787 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
788 ;
789 /* In order to move a register, we need to have one of three cases:
790 (1) it is used only in the same basic block as the set
791 (2) it is not a user variable and it is not used in the
792 exit test (this can cause the variable to be used
793 before it is set just like a user-variable).
794 (3) the set is guaranteed to be executed once the loop starts,
795 and the reg is not used until after that. */
796 else if (! ((! maybe_never
797 && ! loop_reg_used_before_p (set, p, loop_start,
798 scan_start, end))
799 || (! REG_USERVAR_P (SET_DEST (set))
800 && ! REG_LOOP_TEST_P (SET_DEST (set)))
801 || reg_in_basic_block_p (p, SET_DEST (set))))
802 ;
803 else if ((tem = invariant_p (src))
804 && (dependencies == 0
805 || (tem2 = invariant_p (dependencies)) != 0)
806 && (n_times_set[REGNO (SET_DEST (set))] == 1
807 || (tem1
808 = consec_sets_invariant_p (SET_DEST (set),
809 n_times_set[REGNO (SET_DEST (set))],
810 p)))
811 /* If the insn can cause a trap (such as divide by zero),
812 can't move it unless it's guaranteed to be executed
813 once loop is entered. Even a function call might
814 prevent the trap insn from being reached
815 (since it might exit!) */
816 && ! ((maybe_never || call_passed)
817 && may_trap_p (src)))
818 {
819 register struct movable *m;
820 register int regno = REGNO (SET_DEST (set));
821
822 /* A potential lossage is where we have a case where two insns
823 can be combined as long as they are both in the loop, but
824 we move one of them outside the loop. For large loops,
825 this can lose. The most common case of this is the address
826 of a function being called.
827
828 Therefore, if this register is marked as being used exactly
829 once if we are in a loop with calls (a "large loop"), see if
830 we can replace the usage of this register with the source
831 of this SET. If we can, delete this insn.
832
833 Don't do this if P has a REG_RETVAL note or if we have
834 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
835
836 if (reg_single_usage && reg_single_usage[regno] != 0
837 && reg_single_usage[regno] != const0_rtx
838 && REGNO_FIRST_UID (regno) == INSN_UID (p)
839 && (REGNO_LAST_UID (regno)
840 == INSN_UID (reg_single_usage[regno]))
841 && n_times_set[REGNO (SET_DEST (set))] == 1
842 && ! side_effects_p (SET_SRC (set))
843 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
844 && (! SMALL_REGISTER_CLASSES
845 || (! (GET_CODE (SET_SRC (set)) == REG
846 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
847 /* This test is not redundant; SET_SRC (set) might be
848 a call-clobbered register and the life of REGNO
849 might span a call. */
850 && ! modified_between_p (SET_SRC (set), p,
851 reg_single_usage[regno])
852 && no_labels_between_p (p, reg_single_usage[regno])
853 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
854 reg_single_usage[regno]))
855 {
856 /* Replace any usage in a REG_EQUAL note. Must copy the
857 new source, so that we don't get rtx sharing between the
858 SET_SOURCE and REG_NOTES of insn p. */
859 REG_NOTES (reg_single_usage[regno])
860 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
861 SET_DEST (set), copy_rtx (SET_SRC (set)));
862
863 PUT_CODE (p, NOTE);
864 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
865 NOTE_SOURCE_FILE (p) = 0;
866 n_times_set[regno] = 0;
867 continue;
868 }
869
870 m = (struct movable *) alloca (sizeof (struct movable));
871 m->next = 0;
872 m->insn = p;
873 m->set_src = src;
874 m->dependencies = dependencies;
875 m->set_dest = SET_DEST (set);
876 m->force = 0;
877 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
878 m->done = 0;
879 m->forces = 0;
880 m->partial = 0;
881 m->move_insn = move_insn;
882 m->move_insn_first = 0;
883 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
884 m->savemode = VOIDmode;
885 m->regno = regno;
886 /* Set M->cond if either invariant_p or consec_sets_invariant_p
887 returned 2 (only conditionally invariant). */
888 m->cond = ((tem | tem1 | tem2) > 1);
889 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
890 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
891 m->match = 0;
892 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
893 - uid_luid[REGNO_FIRST_UID (regno)]);
894 m->savings = n_times_used[regno];
895 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
896 m->savings += libcall_benefit (p);
897 n_times_set[regno] = move_insn ? -2 : -1;
898 /* Add M to the end of the chain MOVABLES. */
899 if (movables == 0)
900 movables = m;
901 else
902 last_movable->next = m;
903 last_movable = m;
904
905 if (m->consec > 0)
906 {
907 /* It is possible for the first instruction to have a
908 REG_EQUAL note but a non-invariant SET_SRC, so we must
909 remember the status of the first instruction in case
910 the last instruction doesn't have a REG_EQUAL note. */
911 m->move_insn_first = m->move_insn;
912
913 /* Skip this insn, not checking REG_LIBCALL notes. */
914 p = next_nonnote_insn (p);
915 /* Skip the consecutive insns, if there are any. */
916 p = skip_consec_insns (p, m->consec);
917 /* Back up to the last insn of the consecutive group. */
918 p = prev_nonnote_insn (p);
919
920 /* We must now reset m->move_insn, m->is_equiv, and possibly
921 m->set_src to correspond to the effects of all the
922 insns. */
923 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
924 if (temp)
925 m->set_src = XEXP (temp, 0), m->move_insn = 1;
926 else
927 {
928 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
929 if (temp && CONSTANT_P (XEXP (temp, 0)))
930 m->set_src = XEXP (temp, 0), m->move_insn = 1;
931 else
932 m->move_insn = 0;
933
934 }
935 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
936 }
937 }
938 /* If this register is always set within a STRICT_LOW_PART
939 or set to zero, then its high bytes are constant.
940 So clear them outside the loop and within the loop
941 just load the low bytes.
942 We must check that the machine has an instruction to do so.
943 Also, if the value loaded into the register
944 depends on the same register, this cannot be done. */
945 else if (SET_SRC (set) == const0_rtx
946 && GET_CODE (NEXT_INSN (p)) == INSN
947 && (set1 = single_set (NEXT_INSN (p)))
948 && GET_CODE (set1) == SET
949 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
950 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
951 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
952 == SET_DEST (set))
953 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
954 {
955 register int regno = REGNO (SET_DEST (set));
956 if (n_times_set[regno] == 2)
957 {
958 register struct movable *m;
959 m = (struct movable *) alloca (sizeof (struct movable));
960 m->next = 0;
961 m->insn = p;
962 m->set_dest = SET_DEST (set);
963 m->dependencies = 0;
964 m->force = 0;
965 m->consec = 0;
966 m->done = 0;
967 m->forces = 0;
968 m->move_insn = 0;
969 m->move_insn_first = 0;
970 m->partial = 1;
971 /* If the insn may not be executed on some cycles,
972 we can't clear the whole reg; clear just high part.
973 Not even if the reg is used only within this loop.
974 Consider this:
975 while (1)
976 while (s != t) {
977 if (foo ()) x = *s;
978 use (x);
979 }
980 Clearing x before the inner loop could clobber a value
981 being saved from the last time around the outer loop.
982 However, if the reg is not used outside this loop
983 and all uses of the register are in the same
984 basic block as the store, there is no problem.
985
986 If this insn was made by loop, we don't know its
987 INSN_LUID and hence must make a conservative
988 assumption. */
989 m->global = (INSN_UID (p) >= max_uid_for_loop
990 || (uid_luid[REGNO_LAST_UID (regno)]
991 > INSN_LUID (end))
992 || (uid_luid[REGNO_FIRST_UID (regno)]
993 < INSN_LUID (p))
994 || (labels_in_range_p
995 (p, uid_luid[REGNO_FIRST_UID (regno)])));
996 if (maybe_never && m->global)
997 m->savemode = GET_MODE (SET_SRC (set1));
998 else
999 m->savemode = VOIDmode;
1000 m->regno = regno;
1001 m->cond = 0;
1002 m->match = 0;
1003 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1004 - uid_luid[REGNO_FIRST_UID (regno)]);
1005 m->savings = 1;
1006 n_times_set[regno] = -1;
1007 /* Add M to the end of the chain MOVABLES. */
1008 if (movables == 0)
1009 movables = m;
1010 else
1011 last_movable->next = m;
1012 last_movable = m;
1013 }
1014 }
1015 }
1016 /* Past a call insn, we get to insns which might not be executed
1017 because the call might exit. This matters for insns that trap.
1018 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1019 so they don't count. */
1020 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1021 call_passed = 1;
1022 /* Past a label or a jump, we get to insns for which we
1023 can't count on whether or how many times they will be
1024 executed during each iteration. Therefore, we can
1025 only move out sets of trivial variables
1026 (those not used after the loop). */
1027 /* Similar code appears twice in strength_reduce. */
1028 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1029 /* If we enter the loop in the middle, and scan around to the
1030 beginning, don't set maybe_never for that. This must be an
1031 unconditional jump, otherwise the code at the top of the
1032 loop might never be executed. Unconditional jumps are
1033 followed a by barrier then loop end. */
1034 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1035 && NEXT_INSN (NEXT_INSN (p)) == end
1036 && simplejump_p (p)))
1037 maybe_never = 1;
1038 else if (GET_CODE (p) == NOTE)
1039 {
1040 /* At the virtual top of a converted loop, insns are again known to
1041 be executed: logically, the loop begins here even though the exit
1042 code has been duplicated. */
1043 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1044 maybe_never = call_passed = 0;
1045 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1046 loop_depth++;
1047 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1048 loop_depth--;
1049 }
1050 }
1051
1052 /* If one movable subsumes another, ignore that other. */
1053
1054 ignore_some_movables (movables);
1055
1056 /* For each movable insn, see if the reg that it loads
1057 leads when it dies right into another conditionally movable insn.
1058 If so, record that the second insn "forces" the first one,
1059 since the second can be moved only if the first is. */
1060
1061 force_movables (movables);
1062
1063 /* See if there are multiple movable insns that load the same value.
1064 If there are, make all but the first point at the first one
1065 through the `match' field, and add the priorities of them
1066 all together as the priority of the first. */
1067
1068 combine_movables (movables, nregs);
1069
1070 /* Now consider each movable insn to decide whether it is worth moving.
1071 Store 0 in n_times_set for each reg that is moved.
1072
1073 Generally this increases code size, so do not move moveables when
1074 optimizing for code size. */
1075
1076 if (! optimize_size)
1077 move_movables (movables, threshold,
1078 insn_count, loop_start, end, nregs);
1079
1080 /* Now candidates that still are negative are those not moved.
1081 Change n_times_set to indicate that those are not actually invariant. */
1082 for (i = 0; i < nregs; i++)
1083 if (n_times_set[i] < 0)
1084 n_times_set[i] = n_times_used[i];
1085
1086 if (flag_strength_reduce)
1087 strength_reduce (scan_start, end, loop_top,
1088 insn_count, loop_start, end, unroll_p);
1089 }
1090 \f
1091 /* Add elements to *OUTPUT to record all the pseudo-regs
1092 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1093
1094 void
1095 record_excess_regs (in_this, not_in_this, output)
1096 rtx in_this, not_in_this;
1097 rtx *output;
1098 {
1099 enum rtx_code code;
1100 char *fmt;
1101 int i;
1102
1103 code = GET_CODE (in_this);
1104
1105 switch (code)
1106 {
1107 case PC:
1108 case CC0:
1109 case CONST_INT:
1110 case CONST_DOUBLE:
1111 case CONST:
1112 case SYMBOL_REF:
1113 case LABEL_REF:
1114 return;
1115
1116 case REG:
1117 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1118 && ! reg_mentioned_p (in_this, not_in_this))
1119 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1120 return;
1121
1122 default:
1123 break;
1124 }
1125
1126 fmt = GET_RTX_FORMAT (code);
1127 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1128 {
1129 int j;
1130
1131 switch (fmt[i])
1132 {
1133 case 'E':
1134 for (j = 0; j < XVECLEN (in_this, i); j++)
1135 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1136 break;
1137
1138 case 'e':
1139 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1140 break;
1141 }
1142 }
1143 }
1144 \f
1145 /* Check what regs are referred to in the libcall block ending with INSN,
1146 aside from those mentioned in the equivalent value.
1147 If there are none, return 0.
1148 If there are one or more, return an EXPR_LIST containing all of them. */
1149
1150 static rtx
1151 libcall_other_reg (insn, equiv)
1152 rtx insn, equiv;
1153 {
1154 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1155 rtx p = XEXP (note, 0);
1156 rtx output = 0;
1157
1158 /* First, find all the regs used in the libcall block
1159 that are not mentioned as inputs to the result. */
1160
1161 while (p != insn)
1162 {
1163 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1164 || GET_CODE (p) == CALL_INSN)
1165 record_excess_regs (PATTERN (p), equiv, &output);
1166 p = NEXT_INSN (p);
1167 }
1168
1169 return output;
1170 }
1171 \f
1172 /* Return 1 if all uses of REG
1173 are between INSN and the end of the basic block. */
1174
1175 static int
1176 reg_in_basic_block_p (insn, reg)
1177 rtx insn, reg;
1178 {
1179 int regno = REGNO (reg);
1180 rtx p;
1181
1182 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1183 return 0;
1184
1185 /* Search this basic block for the already recorded last use of the reg. */
1186 for (p = insn; p; p = NEXT_INSN (p))
1187 {
1188 switch (GET_CODE (p))
1189 {
1190 case NOTE:
1191 break;
1192
1193 case INSN:
1194 case CALL_INSN:
1195 /* Ordinary insn: if this is the last use, we win. */
1196 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1197 return 1;
1198 break;
1199
1200 case JUMP_INSN:
1201 /* Jump insn: if this is the last use, we win. */
1202 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1203 return 1;
1204 /* Otherwise, it's the end of the basic block, so we lose. */
1205 return 0;
1206
1207 case CODE_LABEL:
1208 case BARRIER:
1209 /* It's the end of the basic block, so we lose. */
1210 return 0;
1211
1212 default:
1213 break;
1214 }
1215 }
1216
1217 /* The "last use" doesn't follow the "first use"?? */
1218 abort ();
1219 }
1220 \f
1221 /* Compute the benefit of eliminating the insns in the block whose
1222 last insn is LAST. This may be a group of insns used to compute a
1223 value directly or can contain a library call. */
1224
1225 static int
1226 libcall_benefit (last)
1227 rtx last;
1228 {
1229 rtx insn;
1230 int benefit = 0;
1231
1232 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1233 insn != last; insn = NEXT_INSN (insn))
1234 {
1235 if (GET_CODE (insn) == CALL_INSN)
1236 benefit += 10; /* Assume at least this many insns in a library
1237 routine. */
1238 else if (GET_CODE (insn) == INSN
1239 && GET_CODE (PATTERN (insn)) != USE
1240 && GET_CODE (PATTERN (insn)) != CLOBBER)
1241 benefit++;
1242 }
1243
1244 return benefit;
1245 }
1246 \f
1247 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1248
1249 static rtx
1250 skip_consec_insns (insn, count)
1251 rtx insn;
1252 int count;
1253 {
1254 for (; count > 0; count--)
1255 {
1256 rtx temp;
1257
1258 /* If first insn of libcall sequence, skip to end. */
1259 /* Do this at start of loop, since INSN is guaranteed to
1260 be an insn here. */
1261 if (GET_CODE (insn) != NOTE
1262 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1263 insn = XEXP (temp, 0);
1264
1265 do insn = NEXT_INSN (insn);
1266 while (GET_CODE (insn) == NOTE);
1267 }
1268
1269 return insn;
1270 }
1271
1272 /* Ignore any movable whose insn falls within a libcall
1273 which is part of another movable.
1274 We make use of the fact that the movable for the libcall value
1275 was made later and so appears later on the chain. */
1276
1277 static void
1278 ignore_some_movables (movables)
1279 struct movable *movables;
1280 {
1281 register struct movable *m, *m1;
1282
1283 for (m = movables; m; m = m->next)
1284 {
1285 /* Is this a movable for the value of a libcall? */
1286 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1287 if (note)
1288 {
1289 rtx insn;
1290 /* Check for earlier movables inside that range,
1291 and mark them invalid. We cannot use LUIDs here because
1292 insns created by loop.c for prior loops don't have LUIDs.
1293 Rather than reject all such insns from movables, we just
1294 explicitly check each insn in the libcall (since invariant
1295 libcalls aren't that common). */
1296 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1297 for (m1 = movables; m1 != m; m1 = m1->next)
1298 if (m1->insn == insn)
1299 m1->done = 1;
1300 }
1301 }
1302 }
1303
1304 /* For each movable insn, see if the reg that it loads
1305 leads when it dies right into another conditionally movable insn.
1306 If so, record that the second insn "forces" the first one,
1307 since the second can be moved only if the first is. */
1308
1309 static void
1310 force_movables (movables)
1311 struct movable *movables;
1312 {
1313 register struct movable *m, *m1;
1314 for (m1 = movables; m1; m1 = m1->next)
1315 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1316 if (!m1->partial && !m1->done)
1317 {
1318 int regno = m1->regno;
1319 for (m = m1->next; m; m = m->next)
1320 /* ??? Could this be a bug? What if CSE caused the
1321 register of M1 to be used after this insn?
1322 Since CSE does not update regno_last_uid,
1323 this insn M->insn might not be where it dies.
1324 But very likely this doesn't matter; what matters is
1325 that M's reg is computed from M1's reg. */
1326 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1327 && !m->done)
1328 break;
1329 if (m != 0 && m->set_src == m1->set_dest
1330 /* If m->consec, m->set_src isn't valid. */
1331 && m->consec == 0)
1332 m = 0;
1333
1334 /* Increase the priority of the moving the first insn
1335 since it permits the second to be moved as well. */
1336 if (m != 0)
1337 {
1338 m->forces = m1;
1339 m1->lifetime += m->lifetime;
1340 m1->savings += m->savings;
1341 }
1342 }
1343 }
1344 \f
1345 /* Find invariant expressions that are equal and can be combined into
1346 one register. */
1347
1348 static void
1349 combine_movables (movables, nregs)
1350 struct movable *movables;
1351 int nregs;
1352 {
1353 register struct movable *m;
1354 char *matched_regs = (char *) alloca (nregs);
1355 enum machine_mode mode;
1356
1357 /* Regs that are set more than once are not allowed to match
1358 or be matched. I'm no longer sure why not. */
1359 /* Perhaps testing m->consec_sets would be more appropriate here? */
1360
1361 for (m = movables; m; m = m->next)
1362 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1363 {
1364 register struct movable *m1;
1365 int regno = m->regno;
1366
1367 bzero (matched_regs, nregs);
1368 matched_regs[regno] = 1;
1369
1370 /* We want later insns to match the first one. Don't make the first
1371 one match any later ones. So start this loop at m->next. */
1372 for (m1 = m->next; m1; m1 = m1->next)
1373 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1374 /* A reg used outside the loop mustn't be eliminated. */
1375 && !m1->global
1376 /* A reg used for zero-extending mustn't be eliminated. */
1377 && !m1->partial
1378 && (matched_regs[m1->regno]
1379 ||
1380 (
1381 /* Can combine regs with different modes loaded from the
1382 same constant only if the modes are the same or
1383 if both are integer modes with M wider or the same
1384 width as M1. The check for integer is redundant, but
1385 safe, since the only case of differing destination
1386 modes with equal sources is when both sources are
1387 VOIDmode, i.e., CONST_INT. */
1388 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1389 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1390 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1391 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1392 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1393 /* See if the source of M1 says it matches M. */
1394 && ((GET_CODE (m1->set_src) == REG
1395 && matched_regs[REGNO (m1->set_src)])
1396 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1397 movables))))
1398 && ((m->dependencies == m1->dependencies)
1399 || rtx_equal_p (m->dependencies, m1->dependencies)))
1400 {
1401 m->lifetime += m1->lifetime;
1402 m->savings += m1->savings;
1403 m1->done = 1;
1404 m1->match = m;
1405 matched_regs[m1->regno] = 1;
1406 }
1407 }
1408
1409 /* Now combine the regs used for zero-extension.
1410 This can be done for those not marked `global'
1411 provided their lives don't overlap. */
1412
1413 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1414 mode = GET_MODE_WIDER_MODE (mode))
1415 {
1416 register struct movable *m0 = 0;
1417
1418 /* Combine all the registers for extension from mode MODE.
1419 Don't combine any that are used outside this loop. */
1420 for (m = movables; m; m = m->next)
1421 if (m->partial && ! m->global
1422 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1423 {
1424 register struct movable *m1;
1425 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1426 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1427
1428 if (m0 == 0)
1429 {
1430 /* First one: don't check for overlap, just record it. */
1431 m0 = m;
1432 continue;
1433 }
1434
1435 /* Make sure they extend to the same mode.
1436 (Almost always true.) */
1437 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1438 continue;
1439
1440 /* We already have one: check for overlap with those
1441 already combined together. */
1442 for (m1 = movables; m1 != m; m1 = m1->next)
1443 if (m1 == m0 || (m1->partial && m1->match == m0))
1444 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1445 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1446 goto overlap;
1447
1448 /* No overlap: we can combine this with the others. */
1449 m0->lifetime += m->lifetime;
1450 m0->savings += m->savings;
1451 m->done = 1;
1452 m->match = m0;
1453
1454 overlap: ;
1455 }
1456 }
1457 }
1458 \f
1459 /* Return 1 if regs X and Y will become the same if moved. */
1460
1461 static int
1462 regs_match_p (x, y, movables)
1463 rtx x, y;
1464 struct movable *movables;
1465 {
1466 int xn = REGNO (x);
1467 int yn = REGNO (y);
1468 struct movable *mx, *my;
1469
1470 for (mx = movables; mx; mx = mx->next)
1471 if (mx->regno == xn)
1472 break;
1473
1474 for (my = movables; my; my = my->next)
1475 if (my->regno == yn)
1476 break;
1477
1478 return (mx && my
1479 && ((mx->match == my->match && mx->match != 0)
1480 || mx->match == my
1481 || mx == my->match));
1482 }
1483
1484 /* Return 1 if X and Y are identical-looking rtx's.
1485 This is the Lisp function EQUAL for rtx arguments.
1486
1487 If two registers are matching movables or a movable register and an
1488 equivalent constant, consider them equal. */
1489
1490 static int
1491 rtx_equal_for_loop_p (x, y, movables)
1492 rtx x, y;
1493 struct movable *movables;
1494 {
1495 register int i;
1496 register int j;
1497 register struct movable *m;
1498 register enum rtx_code code;
1499 register char *fmt;
1500
1501 if (x == y)
1502 return 1;
1503 if (x == 0 || y == 0)
1504 return 0;
1505
1506 code = GET_CODE (x);
1507
1508 /* If we have a register and a constant, they may sometimes be
1509 equal. */
1510 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1511 && CONSTANT_P (y))
1512 {
1513 for (m = movables; m; m = m->next)
1514 if (m->move_insn && m->regno == REGNO (x)
1515 && rtx_equal_p (m->set_src, y))
1516 return 1;
1517 }
1518 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1519 && CONSTANT_P (x))
1520 {
1521 for (m = movables; m; m = m->next)
1522 if (m->move_insn && m->regno == REGNO (y)
1523 && rtx_equal_p (m->set_src, x))
1524 return 1;
1525 }
1526
1527 /* Otherwise, rtx's of different codes cannot be equal. */
1528 if (code != GET_CODE (y))
1529 return 0;
1530
1531 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1532 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1533
1534 if (GET_MODE (x) != GET_MODE (y))
1535 return 0;
1536
1537 /* These three types of rtx's can be compared nonrecursively. */
1538 if (code == REG)
1539 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1540
1541 if (code == LABEL_REF)
1542 return XEXP (x, 0) == XEXP (y, 0);
1543 if (code == SYMBOL_REF)
1544 return XSTR (x, 0) == XSTR (y, 0);
1545
1546 /* Compare the elements. If any pair of corresponding elements
1547 fail to match, return 0 for the whole things. */
1548
1549 fmt = GET_RTX_FORMAT (code);
1550 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1551 {
1552 switch (fmt[i])
1553 {
1554 case 'w':
1555 if (XWINT (x, i) != XWINT (y, i))
1556 return 0;
1557 break;
1558
1559 case 'i':
1560 if (XINT (x, i) != XINT (y, i))
1561 return 0;
1562 break;
1563
1564 case 'E':
1565 /* Two vectors must have the same length. */
1566 if (XVECLEN (x, i) != XVECLEN (y, i))
1567 return 0;
1568
1569 /* And the corresponding elements must match. */
1570 for (j = 0; j < XVECLEN (x, i); j++)
1571 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1572 return 0;
1573 break;
1574
1575 case 'e':
1576 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1577 return 0;
1578 break;
1579
1580 case 's':
1581 if (strcmp (XSTR (x, i), XSTR (y, i)))
1582 return 0;
1583 break;
1584
1585 case 'u':
1586 /* These are just backpointers, so they don't matter. */
1587 break;
1588
1589 case '0':
1590 break;
1591
1592 /* It is believed that rtx's at this level will never
1593 contain anything but integers and other rtx's,
1594 except for within LABEL_REFs and SYMBOL_REFs. */
1595 default:
1596 abort ();
1597 }
1598 }
1599 return 1;
1600 }
1601 \f
1602 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1603 insns in INSNS which use thet reference. */
1604
1605 static void
1606 add_label_notes (x, insns)
1607 rtx x;
1608 rtx insns;
1609 {
1610 enum rtx_code code = GET_CODE (x);
1611 int i, j;
1612 char *fmt;
1613 rtx insn;
1614
1615 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1616 {
1617 rtx next = next_real_insn (XEXP (x, 0));
1618
1619 /* Don't record labels that refer to dispatch tables.
1620 This is not necessary, since the tablejump references the same label.
1621 And if we did record them, flow.c would make worse code. */
1622 if (next == 0
1623 || ! (GET_CODE (next) == JUMP_INSN
1624 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1625 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1626 {
1627 for (insn = insns; insn; insn = NEXT_INSN (insn))
1628 if (reg_mentioned_p (XEXP (x, 0), insn))
1629 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1630 REG_NOTES (insn));
1631 }
1632 return;
1633 }
1634
1635 fmt = GET_RTX_FORMAT (code);
1636 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1637 {
1638 if (fmt[i] == 'e')
1639 add_label_notes (XEXP (x, i), insns);
1640 else if (fmt[i] == 'E')
1641 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1642 add_label_notes (XVECEXP (x, i, j), insns);
1643 }
1644 }
1645 \f
1646 /* Scan MOVABLES, and move the insns that deserve to be moved.
1647 If two matching movables are combined, replace one reg with the
1648 other throughout. */
1649
1650 static void
1651 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1652 struct movable *movables;
1653 int threshold;
1654 int insn_count;
1655 rtx loop_start;
1656 rtx end;
1657 int nregs;
1658 {
1659 rtx new_start = 0;
1660 register struct movable *m;
1661 register rtx p;
1662 /* Map of pseudo-register replacements to handle combining
1663 when we move several insns that load the same value
1664 into different pseudo-registers. */
1665 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1666 char *already_moved = (char *) alloca (nregs);
1667
1668 bzero (already_moved, nregs);
1669 bzero ((char *) reg_map, nregs * sizeof (rtx));
1670
1671 num_movables = 0;
1672
1673 for (m = movables; m; m = m->next)
1674 {
1675 /* Describe this movable insn. */
1676
1677 if (loop_dump_stream)
1678 {
1679 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1680 INSN_UID (m->insn), m->regno, m->lifetime);
1681 if (m->consec > 0)
1682 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1683 if (m->cond)
1684 fprintf (loop_dump_stream, "cond ");
1685 if (m->force)
1686 fprintf (loop_dump_stream, "force ");
1687 if (m->global)
1688 fprintf (loop_dump_stream, "global ");
1689 if (m->done)
1690 fprintf (loop_dump_stream, "done ");
1691 if (m->move_insn)
1692 fprintf (loop_dump_stream, "move-insn ");
1693 if (m->match)
1694 fprintf (loop_dump_stream, "matches %d ",
1695 INSN_UID (m->match->insn));
1696 if (m->forces)
1697 fprintf (loop_dump_stream, "forces %d ",
1698 INSN_UID (m->forces->insn));
1699 }
1700
1701 /* Count movables. Value used in heuristics in strength_reduce. */
1702 num_movables++;
1703
1704 /* Ignore the insn if it's already done (it matched something else).
1705 Otherwise, see if it is now safe to move. */
1706
1707 if (!m->done
1708 && (! m->cond
1709 || (1 == invariant_p (m->set_src)
1710 && (m->dependencies == 0
1711 || 1 == invariant_p (m->dependencies))
1712 && (m->consec == 0
1713 || 1 == consec_sets_invariant_p (m->set_dest,
1714 m->consec + 1,
1715 m->insn))))
1716 && (! m->forces || m->forces->done))
1717 {
1718 register int regno;
1719 register rtx p;
1720 int savings = m->savings;
1721
1722 /* We have an insn that is safe to move.
1723 Compute its desirability. */
1724
1725 p = m->insn;
1726 regno = m->regno;
1727
1728 if (loop_dump_stream)
1729 fprintf (loop_dump_stream, "savings %d ", savings);
1730
1731 if (moved_once[regno])
1732 {
1733 insn_count *= 2;
1734
1735 if (loop_dump_stream)
1736 fprintf (loop_dump_stream, "halved since already moved ");
1737 }
1738
1739 /* An insn MUST be moved if we already moved something else
1740 which is safe only if this one is moved too: that is,
1741 if already_moved[REGNO] is nonzero. */
1742
1743 /* An insn is desirable to move if the new lifetime of the
1744 register is no more than THRESHOLD times the old lifetime.
1745 If it's not desirable, it means the loop is so big
1746 that moving won't speed things up much,
1747 and it is liable to make register usage worse. */
1748
1749 /* It is also desirable to move if it can be moved at no
1750 extra cost because something else was already moved. */
1751
1752 if (already_moved[regno]
1753 || flag_move_all_movables
1754 || (threshold * savings * m->lifetime) >= insn_count
1755 || (m->forces && m->forces->done
1756 && n_times_used[m->forces->regno] == 1))
1757 {
1758 int count;
1759 register struct movable *m1;
1760 rtx first;
1761
1762 /* Now move the insns that set the reg. */
1763
1764 if (m->partial && m->match)
1765 {
1766 rtx newpat, i1;
1767 rtx r1, r2;
1768 /* Find the end of this chain of matching regs.
1769 Thus, we load each reg in the chain from that one reg.
1770 And that reg is loaded with 0 directly,
1771 since it has ->match == 0. */
1772 for (m1 = m; m1->match; m1 = m1->match);
1773 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1774 SET_DEST (PATTERN (m1->insn)));
1775 i1 = emit_insn_before (newpat, loop_start);
1776
1777 /* Mark the moved, invariant reg as being allowed to
1778 share a hard reg with the other matching invariant. */
1779 REG_NOTES (i1) = REG_NOTES (m->insn);
1780 r1 = SET_DEST (PATTERN (m->insn));
1781 r2 = SET_DEST (PATTERN (m1->insn));
1782 regs_may_share
1783 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1784 gen_rtx_EXPR_LIST (VOIDmode, r2,
1785 regs_may_share));
1786 delete_insn (m->insn);
1787
1788 if (new_start == 0)
1789 new_start = i1;
1790
1791 if (loop_dump_stream)
1792 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1793 }
1794 /* If we are to re-generate the item being moved with a
1795 new move insn, first delete what we have and then emit
1796 the move insn before the loop. */
1797 else if (m->move_insn)
1798 {
1799 rtx i1, temp;
1800
1801 for (count = m->consec; count >= 0; count--)
1802 {
1803 /* If this is the first insn of a library call sequence,
1804 skip to the end. */
1805 if (GET_CODE (p) != NOTE
1806 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1807 p = XEXP (temp, 0);
1808
1809 /* If this is the last insn of a libcall sequence, then
1810 delete every insn in the sequence except the last.
1811 The last insn is handled in the normal manner. */
1812 if (GET_CODE (p) != NOTE
1813 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1814 {
1815 temp = XEXP (temp, 0);
1816 while (temp != p)
1817 temp = delete_insn (temp);
1818 }
1819
1820 p = delete_insn (p);
1821 while (p && GET_CODE (p) == NOTE)
1822 p = NEXT_INSN (p);
1823 }
1824
1825 start_sequence ();
1826 emit_move_insn (m->set_dest, m->set_src);
1827 temp = get_insns ();
1828 end_sequence ();
1829
1830 add_label_notes (m->set_src, temp);
1831
1832 i1 = emit_insns_before (temp, loop_start);
1833 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1834 REG_NOTES (i1)
1835 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1836 m->set_src, REG_NOTES (i1));
1837
1838 if (loop_dump_stream)
1839 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1840
1841 /* The more regs we move, the less we like moving them. */
1842 threshold -= 3;
1843 }
1844 else
1845 {
1846 for (count = m->consec; count >= 0; count--)
1847 {
1848 rtx i1, temp;
1849
1850 /* If first insn of libcall sequence, skip to end. */
1851 /* Do this at start of loop, since p is guaranteed to
1852 be an insn here. */
1853 if (GET_CODE (p) != NOTE
1854 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1855 p = XEXP (temp, 0);
1856
1857 /* If last insn of libcall sequence, move all
1858 insns except the last before the loop. The last
1859 insn is handled in the normal manner. */
1860 if (GET_CODE (p) != NOTE
1861 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1862 {
1863 rtx fn_address = 0;
1864 rtx fn_reg = 0;
1865 rtx fn_address_insn = 0;
1866
1867 first = 0;
1868 for (temp = XEXP (temp, 0); temp != p;
1869 temp = NEXT_INSN (temp))
1870 {
1871 rtx body;
1872 rtx n;
1873 rtx next;
1874
1875 if (GET_CODE (temp) == NOTE)
1876 continue;
1877
1878 body = PATTERN (temp);
1879
1880 /* Find the next insn after TEMP,
1881 not counting USE or NOTE insns. */
1882 for (next = NEXT_INSN (temp); next != p;
1883 next = NEXT_INSN (next))
1884 if (! (GET_CODE (next) == INSN
1885 && GET_CODE (PATTERN (next)) == USE)
1886 && GET_CODE (next) != NOTE)
1887 break;
1888
1889 /* If that is the call, this may be the insn
1890 that loads the function address.
1891
1892 Extract the function address from the insn
1893 that loads it into a register.
1894 If this insn was cse'd, we get incorrect code.
1895
1896 So emit a new move insn that copies the
1897 function address into the register that the
1898 call insn will use. flow.c will delete any
1899 redundant stores that we have created. */
1900 if (GET_CODE (next) == CALL_INSN
1901 && GET_CODE (body) == SET
1902 && GET_CODE (SET_DEST (body)) == REG
1903 && (n = find_reg_note (temp, REG_EQUAL,
1904 NULL_RTX)))
1905 {
1906 fn_reg = SET_SRC (body);
1907 if (GET_CODE (fn_reg) != REG)
1908 fn_reg = SET_DEST (body);
1909 fn_address = XEXP (n, 0);
1910 fn_address_insn = temp;
1911 }
1912 /* We have the call insn.
1913 If it uses the register we suspect it might,
1914 load it with the correct address directly. */
1915 if (GET_CODE (temp) == CALL_INSN
1916 && fn_address != 0
1917 && reg_referenced_p (fn_reg, body))
1918 emit_insn_after (gen_move_insn (fn_reg,
1919 fn_address),
1920 fn_address_insn);
1921
1922 if (GET_CODE (temp) == CALL_INSN)
1923 {
1924 i1 = emit_call_insn_before (body, loop_start);
1925 /* Because the USAGE information potentially
1926 contains objects other than hard registers
1927 we need to copy it. */
1928 if (CALL_INSN_FUNCTION_USAGE (temp))
1929 CALL_INSN_FUNCTION_USAGE (i1)
1930 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1931 }
1932 else
1933 i1 = emit_insn_before (body, loop_start);
1934 if (first == 0)
1935 first = i1;
1936 if (temp == fn_address_insn)
1937 fn_address_insn = i1;
1938 REG_NOTES (i1) = REG_NOTES (temp);
1939 delete_insn (temp);
1940 }
1941 }
1942 if (m->savemode != VOIDmode)
1943 {
1944 /* P sets REG to zero; but we should clear only
1945 the bits that are not covered by the mode
1946 m->savemode. */
1947 rtx reg = m->set_dest;
1948 rtx sequence;
1949 rtx tem;
1950
1951 start_sequence ();
1952 tem = expand_binop
1953 (GET_MODE (reg), and_optab, reg,
1954 GEN_INT ((((HOST_WIDE_INT) 1
1955 << GET_MODE_BITSIZE (m->savemode)))
1956 - 1),
1957 reg, 1, OPTAB_LIB_WIDEN);
1958 if (tem == 0)
1959 abort ();
1960 if (tem != reg)
1961 emit_move_insn (reg, tem);
1962 sequence = gen_sequence ();
1963 end_sequence ();
1964 i1 = emit_insn_before (sequence, loop_start);
1965 }
1966 else if (GET_CODE (p) == CALL_INSN)
1967 {
1968 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1969 /* Because the USAGE information potentially
1970 contains objects other than hard registers
1971 we need to copy it. */
1972 if (CALL_INSN_FUNCTION_USAGE (p))
1973 CALL_INSN_FUNCTION_USAGE (i1)
1974 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1975 }
1976 else if (count == m->consec && m->move_insn_first)
1977 {
1978 /* The SET_SRC might not be invariant, so we must
1979 use the REG_EQUAL note. */
1980 start_sequence ();
1981 emit_move_insn (m->set_dest, m->set_src);
1982 temp = get_insns ();
1983 end_sequence ();
1984
1985 add_label_notes (m->set_src, temp);
1986
1987 i1 = emit_insns_before (temp, loop_start);
1988 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1989 REG_NOTES (i1)
1990 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1991 : REG_EQUAL),
1992 m->set_src, REG_NOTES (i1));
1993 }
1994 else
1995 i1 = emit_insn_before (PATTERN (p), loop_start);
1996
1997 if (REG_NOTES (i1) == 0)
1998 {
1999 REG_NOTES (i1) = REG_NOTES (p);
2000
2001 /* If there is a REG_EQUAL note present whose value
2002 is not loop invariant, then delete it, since it
2003 may cause problems with later optimization passes.
2004 It is possible for cse to create such notes
2005 like this as a result of record_jump_cond. */
2006
2007 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2008 && ! invariant_p (XEXP (temp, 0)))
2009 remove_note (i1, temp);
2010 }
2011
2012 if (new_start == 0)
2013 new_start = i1;
2014
2015 if (loop_dump_stream)
2016 fprintf (loop_dump_stream, " moved to %d",
2017 INSN_UID (i1));
2018
2019 /* If library call, now fix the REG_NOTES that contain
2020 insn pointers, namely REG_LIBCALL on FIRST
2021 and REG_RETVAL on I1. */
2022 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2023 {
2024 XEXP (temp, 0) = first;
2025 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2026 XEXP (temp, 0) = i1;
2027 }
2028
2029 delete_insn (p);
2030 do p = NEXT_INSN (p);
2031 while (p && GET_CODE (p) == NOTE);
2032 }
2033
2034 /* The more regs we move, the less we like moving them. */
2035 threshold -= 3;
2036 }
2037
2038 /* Any other movable that loads the same register
2039 MUST be moved. */
2040 already_moved[regno] = 1;
2041
2042 /* This reg has been moved out of one loop. */
2043 moved_once[regno] = 1;
2044
2045 /* The reg set here is now invariant. */
2046 if (! m->partial)
2047 n_times_set[regno] = 0;
2048
2049 m->done = 1;
2050
2051 /* Change the length-of-life info for the register
2052 to say it lives at least the full length of this loop.
2053 This will help guide optimizations in outer loops. */
2054
2055 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2056 /* This is the old insn before all the moved insns.
2057 We can't use the moved insn because it is out of range
2058 in uid_luid. Only the old insns have luids. */
2059 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2060 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2061 REGNO_LAST_UID (regno) = INSN_UID (end);
2062
2063 /* Combine with this moved insn any other matching movables. */
2064
2065 if (! m->partial)
2066 for (m1 = movables; m1; m1 = m1->next)
2067 if (m1->match == m)
2068 {
2069 rtx temp;
2070
2071 /* Schedule the reg loaded by M1
2072 for replacement so that shares the reg of M.
2073 If the modes differ (only possible in restricted
2074 circumstances, make a SUBREG. */
2075 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2076 reg_map[m1->regno] = m->set_dest;
2077 else
2078 reg_map[m1->regno]
2079 = gen_lowpart_common (GET_MODE (m1->set_dest),
2080 m->set_dest);
2081
2082 /* Get rid of the matching insn
2083 and prevent further processing of it. */
2084 m1->done = 1;
2085
2086 /* if library call, delete all insn except last, which
2087 is deleted below */
2088 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2089 NULL_RTX)))
2090 {
2091 for (temp = XEXP (temp, 0); temp != m1->insn;
2092 temp = NEXT_INSN (temp))
2093 delete_insn (temp);
2094 }
2095 delete_insn (m1->insn);
2096
2097 /* Any other movable that loads the same register
2098 MUST be moved. */
2099 already_moved[m1->regno] = 1;
2100
2101 /* The reg merged here is now invariant,
2102 if the reg it matches is invariant. */
2103 if (! m->partial)
2104 n_times_set[m1->regno] = 0;
2105 }
2106 }
2107 else if (loop_dump_stream)
2108 fprintf (loop_dump_stream, "not desirable");
2109 }
2110 else if (loop_dump_stream && !m->match)
2111 fprintf (loop_dump_stream, "not safe");
2112
2113 if (loop_dump_stream)
2114 fprintf (loop_dump_stream, "\n");
2115 }
2116
2117 if (new_start == 0)
2118 new_start = loop_start;
2119
2120 /* Go through all the instructions in the loop, making
2121 all the register substitutions scheduled in REG_MAP. */
2122 for (p = new_start; p != end; p = NEXT_INSN (p))
2123 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2124 || GET_CODE (p) == CALL_INSN)
2125 {
2126 replace_regs (PATTERN (p), reg_map, nregs, 0);
2127 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2128 INSN_CODE (p) = -1;
2129 }
2130 }
2131 \f
2132 #if 0
2133 /* Scan X and replace the address of any MEM in it with ADDR.
2134 REG is the address that MEM should have before the replacement. */
2135
2136 static void
2137 replace_call_address (x, reg, addr)
2138 rtx x, reg, addr;
2139 {
2140 register enum rtx_code code;
2141 register int i;
2142 register char *fmt;
2143
2144 if (x == 0)
2145 return;
2146 code = GET_CODE (x);
2147 switch (code)
2148 {
2149 case PC:
2150 case CC0:
2151 case CONST_INT:
2152 case CONST_DOUBLE:
2153 case CONST:
2154 case SYMBOL_REF:
2155 case LABEL_REF:
2156 case REG:
2157 return;
2158
2159 case SET:
2160 /* Short cut for very common case. */
2161 replace_call_address (XEXP (x, 1), reg, addr);
2162 return;
2163
2164 case CALL:
2165 /* Short cut for very common case. */
2166 replace_call_address (XEXP (x, 0), reg, addr);
2167 return;
2168
2169 case MEM:
2170 /* If this MEM uses a reg other than the one we expected,
2171 something is wrong. */
2172 if (XEXP (x, 0) != reg)
2173 abort ();
2174 XEXP (x, 0) = addr;
2175 return;
2176
2177 default:
2178 break;
2179 }
2180
2181 fmt = GET_RTX_FORMAT (code);
2182 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2183 {
2184 if (fmt[i] == 'e')
2185 replace_call_address (XEXP (x, i), reg, addr);
2186 if (fmt[i] == 'E')
2187 {
2188 register int j;
2189 for (j = 0; j < XVECLEN (x, i); j++)
2190 replace_call_address (XVECEXP (x, i, j), reg, addr);
2191 }
2192 }
2193 }
2194 #endif
2195 \f
2196 /* Return the number of memory refs to addresses that vary
2197 in the rtx X. */
2198
2199 static int
2200 count_nonfixed_reads (x)
2201 rtx x;
2202 {
2203 register enum rtx_code code;
2204 register int i;
2205 register char *fmt;
2206 int value;
2207
2208 if (x == 0)
2209 return 0;
2210
2211 code = GET_CODE (x);
2212 switch (code)
2213 {
2214 case PC:
2215 case CC0:
2216 case CONST_INT:
2217 case CONST_DOUBLE:
2218 case CONST:
2219 case SYMBOL_REF:
2220 case LABEL_REF:
2221 case REG:
2222 return 0;
2223
2224 case MEM:
2225 return ((invariant_p (XEXP (x, 0)) != 1)
2226 + count_nonfixed_reads (XEXP (x, 0)));
2227
2228 default:
2229 break;
2230 }
2231
2232 value = 0;
2233 fmt = GET_RTX_FORMAT (code);
2234 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2235 {
2236 if (fmt[i] == 'e')
2237 value += count_nonfixed_reads (XEXP (x, i));
2238 if (fmt[i] == 'E')
2239 {
2240 register int j;
2241 for (j = 0; j < XVECLEN (x, i); j++)
2242 value += count_nonfixed_reads (XVECEXP (x, i, j));
2243 }
2244 }
2245 return value;
2246 }
2247
2248 \f
2249 #if 0
2250 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2251 Replace it with an instruction to load just the low bytes
2252 if the machine supports such an instruction,
2253 and insert above LOOP_START an instruction to clear the register. */
2254
2255 static void
2256 constant_high_bytes (p, loop_start)
2257 rtx p, loop_start;
2258 {
2259 register rtx new;
2260 register int insn_code_number;
2261
2262 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2263 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2264
2265 new = gen_rtx_SET (VOIDmode,
2266 gen_rtx_STRICT_LOW_PART (VOIDmode,
2267 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2268 SET_DEST (PATTERN (p)),
2269 0)),
2270 XEXP (SET_SRC (PATTERN (p)), 0));
2271 insn_code_number = recog (new, p);
2272
2273 if (insn_code_number)
2274 {
2275 register int i;
2276
2277 /* Clear destination register before the loop. */
2278 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2279 const0_rtx),
2280 loop_start);
2281
2282 /* Inside the loop, just load the low part. */
2283 PATTERN (p) = new;
2284 }
2285 }
2286 #endif
2287 \f
2288 /* Scan a loop setting the variables `unknown_address_altered',
2289 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2290 and `loop_has_volatile'.
2291 Also, fill in the array `loop_store_mems'. */
2292
2293 static void
2294 prescan_loop (start, end)
2295 rtx start, end;
2296 {
2297 register int level = 1;
2298 register rtx insn;
2299
2300 unknown_address_altered = 0;
2301 loop_has_call = 0;
2302 loop_has_volatile = 0;
2303 loop_store_mems_idx = 0;
2304
2305 num_mem_sets = 0;
2306 loops_enclosed = 1;
2307 loop_continue = 0;
2308
2309 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2310 insn = NEXT_INSN (insn))
2311 {
2312 if (GET_CODE (insn) == NOTE)
2313 {
2314 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2315 {
2316 ++level;
2317 /* Count number of loops contained in this one. */
2318 loops_enclosed++;
2319 }
2320 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2321 {
2322 --level;
2323 if (level == 0)
2324 {
2325 end = insn;
2326 break;
2327 }
2328 }
2329 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2330 {
2331 if (level == 1)
2332 loop_continue = insn;
2333 }
2334 }
2335 else if (GET_CODE (insn) == CALL_INSN)
2336 {
2337 if (! CONST_CALL_P (insn))
2338 unknown_address_altered = 1;
2339 loop_has_call = 1;
2340 }
2341 else
2342 {
2343 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2344 {
2345 if (volatile_refs_p (PATTERN (insn)))
2346 loop_has_volatile = 1;
2347
2348 note_stores (PATTERN (insn), note_addr_stored);
2349 }
2350 }
2351 }
2352 }
2353 \f
2354 /* Scan the function looking for loops. Record the start and end of each loop.
2355 Also mark as invalid loops any loops that contain a setjmp or are branched
2356 to from outside the loop. */
2357
2358 static void
2359 find_and_verify_loops (f)
2360 rtx f;
2361 {
2362 rtx insn, label;
2363 int current_loop = -1;
2364 int next_loop = -1;
2365 int loop;
2366
2367 /* If there are jumps to undefined labels,
2368 treat them as jumps out of any/all loops.
2369 This also avoids writing past end of tables when there are no loops. */
2370 uid_loop_num[0] = -1;
2371
2372 /* Find boundaries of loops, mark which loops are contained within
2373 loops, and invalidate loops that have setjmp. */
2374
2375 for (insn = f; insn; insn = NEXT_INSN (insn))
2376 {
2377 if (GET_CODE (insn) == NOTE)
2378 switch (NOTE_LINE_NUMBER (insn))
2379 {
2380 case NOTE_INSN_LOOP_BEG:
2381 loop_number_loop_starts[++next_loop] = insn;
2382 loop_number_loop_ends[next_loop] = 0;
2383 loop_outer_loop[next_loop] = current_loop;
2384 loop_invalid[next_loop] = 0;
2385 loop_number_exit_labels[next_loop] = 0;
2386 loop_number_exit_count[next_loop] = 0;
2387 current_loop = next_loop;
2388 break;
2389
2390 case NOTE_INSN_SETJMP:
2391 /* In this case, we must invalidate our current loop and any
2392 enclosing loop. */
2393 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2394 {
2395 loop_invalid[loop] = 1;
2396 if (loop_dump_stream)
2397 fprintf (loop_dump_stream,
2398 "\nLoop at %d ignored due to setjmp.\n",
2399 INSN_UID (loop_number_loop_starts[loop]));
2400 }
2401 break;
2402
2403 case NOTE_INSN_LOOP_END:
2404 if (current_loop == -1)
2405 abort ();
2406
2407 loop_number_loop_ends[current_loop] = insn;
2408 current_loop = loop_outer_loop[current_loop];
2409 break;
2410
2411 default:
2412 break;
2413 }
2414
2415 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2416 enclosing loop, but this doesn't matter. */
2417 uid_loop_num[INSN_UID (insn)] = current_loop;
2418 }
2419
2420 /* Any loop containing a label used in an initializer must be invalidated,
2421 because it can be jumped into from anywhere. */
2422
2423 for (label = forced_labels; label; label = XEXP (label, 1))
2424 {
2425 int loop_num;
2426
2427 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2428 loop_num != -1;
2429 loop_num = loop_outer_loop[loop_num])
2430 loop_invalid[loop_num] = 1;
2431 }
2432
2433 /* Any loop containing a label used for an exception handler must be
2434 invalidated, because it can be jumped into from anywhere. */
2435
2436 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2437 {
2438 int loop_num;
2439
2440 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2441 loop_num != -1;
2442 loop_num = loop_outer_loop[loop_num])
2443 loop_invalid[loop_num] = 1;
2444 }
2445
2446 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2447 loop that it is not contained within, that loop is marked invalid.
2448 If any INSN or CALL_INSN uses a label's address, then the loop containing
2449 that label is marked invalid, because it could be jumped into from
2450 anywhere.
2451
2452 Also look for blocks of code ending in an unconditional branch that
2453 exits the loop. If such a block is surrounded by a conditional
2454 branch around the block, move the block elsewhere (see below) and
2455 invert the jump to point to the code block. This may eliminate a
2456 label in our loop and will simplify processing by both us and a
2457 possible second cse pass. */
2458
2459 for (insn = f; insn; insn = NEXT_INSN (insn))
2460 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2461 {
2462 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2463
2464 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2465 {
2466 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2467 if (note)
2468 {
2469 int loop_num;
2470
2471 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2472 loop_num != -1;
2473 loop_num = loop_outer_loop[loop_num])
2474 loop_invalid[loop_num] = 1;
2475 }
2476 }
2477
2478 if (GET_CODE (insn) != JUMP_INSN)
2479 continue;
2480
2481 mark_loop_jump (PATTERN (insn), this_loop_num);
2482
2483 /* See if this is an unconditional branch outside the loop. */
2484 if (this_loop_num != -1
2485 && (GET_CODE (PATTERN (insn)) == RETURN
2486 || (simplejump_p (insn)
2487 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2488 != this_loop_num)))
2489 && get_max_uid () < max_uid_for_loop)
2490 {
2491 rtx p;
2492 rtx our_next = next_real_insn (insn);
2493 int dest_loop;
2494 int outer_loop = -1;
2495
2496 /* Go backwards until we reach the start of the loop, a label,
2497 or a JUMP_INSN. */
2498 for (p = PREV_INSN (insn);
2499 GET_CODE (p) != CODE_LABEL
2500 && ! (GET_CODE (p) == NOTE
2501 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2502 && GET_CODE (p) != JUMP_INSN;
2503 p = PREV_INSN (p))
2504 ;
2505
2506 /* Check for the case where we have a jump to an inner nested
2507 loop, and do not perform the optimization in that case. */
2508
2509 if (JUMP_LABEL (insn))
2510 {
2511 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2512 if (dest_loop != -1)
2513 {
2514 for (outer_loop = dest_loop; outer_loop != -1;
2515 outer_loop = loop_outer_loop[outer_loop])
2516 if (outer_loop == this_loop_num)
2517 break;
2518 }
2519 }
2520
2521 /* Make sure that the target of P is within the current loop. */
2522
2523 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2524 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2525 outer_loop = this_loop_num;
2526
2527 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2528 we have a block of code to try to move.
2529
2530 We look backward and then forward from the target of INSN
2531 to find a BARRIER at the same loop depth as the target.
2532 If we find such a BARRIER, we make a new label for the start
2533 of the block, invert the jump in P and point it to that label,
2534 and move the block of code to the spot we found. */
2535
2536 if (outer_loop == -1
2537 && GET_CODE (p) == JUMP_INSN
2538 && JUMP_LABEL (p) != 0
2539 /* Just ignore jumps to labels that were never emitted.
2540 These always indicate compilation errors. */
2541 && INSN_UID (JUMP_LABEL (p)) != 0
2542 && condjump_p (p)
2543 && ! simplejump_p (p)
2544 && next_real_insn (JUMP_LABEL (p)) == our_next)
2545 {
2546 rtx target
2547 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2548 int target_loop_num = uid_loop_num[INSN_UID (target)];
2549 rtx loc;
2550
2551 for (loc = target; loc; loc = PREV_INSN (loc))
2552 if (GET_CODE (loc) == BARRIER
2553 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2554 break;
2555
2556 if (loc == 0)
2557 for (loc = target; loc; loc = NEXT_INSN (loc))
2558 if (GET_CODE (loc) == BARRIER
2559 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2560 break;
2561
2562 if (loc)
2563 {
2564 rtx cond_label = JUMP_LABEL (p);
2565 rtx new_label = get_label_after (p);
2566
2567 /* Ensure our label doesn't go away. */
2568 LABEL_NUSES (cond_label)++;
2569
2570 /* Verify that uid_loop_num is large enough and that
2571 we can invert P. */
2572 if (invert_jump (p, new_label))
2573 {
2574 rtx q, r;
2575
2576 /* If no suitable BARRIER was found, create a suitable
2577 one before TARGET. Since TARGET is a fall through
2578 path, we'll need to insert an jump around our block
2579 and a add a BARRIER before TARGET.
2580
2581 This creates an extra unconditional jump outside
2582 the loop. However, the benefits of removing rarely
2583 executed instructions from inside the loop usually
2584 outweighs the cost of the extra unconditional jump
2585 outside the loop. */
2586 if (loc == 0)
2587 {
2588 rtx temp;
2589
2590 temp = gen_jump (JUMP_LABEL (insn));
2591 temp = emit_jump_insn_before (temp, target);
2592 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2593 LABEL_NUSES (JUMP_LABEL (insn))++;
2594 loc = emit_barrier_before (target);
2595 }
2596
2597 /* Include the BARRIER after INSN and copy the
2598 block after LOC. */
2599 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2600 reorder_insns (new_label, NEXT_INSN (insn), loc);
2601
2602 /* All those insns are now in TARGET_LOOP_NUM. */
2603 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2604 q = NEXT_INSN (q))
2605 uid_loop_num[INSN_UID (q)] = target_loop_num;
2606
2607 /* The label jumped to by INSN is no longer a loop exit.
2608 Unless INSN does not have a label (e.g., it is a
2609 RETURN insn), search loop_number_exit_labels to find
2610 its label_ref, and remove it. Also turn off
2611 LABEL_OUTSIDE_LOOP_P bit. */
2612 if (JUMP_LABEL (insn))
2613 {
2614 int loop_num;
2615
2616 for (q = 0,
2617 r = loop_number_exit_labels[this_loop_num];
2618 r; q = r, r = LABEL_NEXTREF (r))
2619 if (XEXP (r, 0) == JUMP_LABEL (insn))
2620 {
2621 LABEL_OUTSIDE_LOOP_P (r) = 0;
2622 if (q)
2623 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2624 else
2625 loop_number_exit_labels[this_loop_num]
2626 = LABEL_NEXTREF (r);
2627 break;
2628 }
2629
2630 for (loop_num = this_loop_num;
2631 loop_num != -1 && loop_num != target_loop_num;
2632 loop_num = loop_outer_loop[loop_num])
2633 loop_number_exit_count[loop_num]--;
2634
2635 /* If we didn't find it, then something is wrong. */
2636 if (! r)
2637 abort ();
2638 }
2639
2640 /* P is now a jump outside the loop, so it must be put
2641 in loop_number_exit_labels, and marked as such.
2642 The easiest way to do this is to just call
2643 mark_loop_jump again for P. */
2644 mark_loop_jump (PATTERN (p), this_loop_num);
2645
2646 /* If INSN now jumps to the insn after it,
2647 delete INSN. */
2648 if (JUMP_LABEL (insn) != 0
2649 && (next_real_insn (JUMP_LABEL (insn))
2650 == next_real_insn (insn)))
2651 delete_insn (insn);
2652 }
2653
2654 /* Continue the loop after where the conditional
2655 branch used to jump, since the only branch insn
2656 in the block (if it still remains) is an inter-loop
2657 branch and hence needs no processing. */
2658 insn = NEXT_INSN (cond_label);
2659
2660 if (--LABEL_NUSES (cond_label) == 0)
2661 delete_insn (cond_label);
2662
2663 /* This loop will be continued with NEXT_INSN (insn). */
2664 insn = PREV_INSN (insn);
2665 }
2666 }
2667 }
2668 }
2669 }
2670
2671 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2672 loops it is contained in, mark the target loop invalid.
2673
2674 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2675
2676 static void
2677 mark_loop_jump (x, loop_num)
2678 rtx x;
2679 int loop_num;
2680 {
2681 int dest_loop;
2682 int outer_loop;
2683 int i;
2684
2685 switch (GET_CODE (x))
2686 {
2687 case PC:
2688 case USE:
2689 case CLOBBER:
2690 case REG:
2691 case MEM:
2692 case CONST_INT:
2693 case CONST_DOUBLE:
2694 case RETURN:
2695 return;
2696
2697 case CONST:
2698 /* There could be a label reference in here. */
2699 mark_loop_jump (XEXP (x, 0), loop_num);
2700 return;
2701
2702 case PLUS:
2703 case MINUS:
2704 case MULT:
2705 mark_loop_jump (XEXP (x, 0), loop_num);
2706 mark_loop_jump (XEXP (x, 1), loop_num);
2707 return;
2708
2709 case SIGN_EXTEND:
2710 case ZERO_EXTEND:
2711 mark_loop_jump (XEXP (x, 0), loop_num);
2712 return;
2713
2714 case LABEL_REF:
2715 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2716
2717 /* Link together all labels that branch outside the loop. This
2718 is used by final_[bg]iv_value and the loop unrolling code. Also
2719 mark this LABEL_REF so we know that this branch should predict
2720 false. */
2721
2722 /* A check to make sure the label is not in an inner nested loop,
2723 since this does not count as a loop exit. */
2724 if (dest_loop != -1)
2725 {
2726 for (outer_loop = dest_loop; outer_loop != -1;
2727 outer_loop = loop_outer_loop[outer_loop])
2728 if (outer_loop == loop_num)
2729 break;
2730 }
2731 else
2732 outer_loop = -1;
2733
2734 if (loop_num != -1 && outer_loop == -1)
2735 {
2736 LABEL_OUTSIDE_LOOP_P (x) = 1;
2737 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2738 loop_number_exit_labels[loop_num] = x;
2739
2740 for (outer_loop = loop_num;
2741 outer_loop != -1 && outer_loop != dest_loop;
2742 outer_loop = loop_outer_loop[outer_loop])
2743 loop_number_exit_count[outer_loop]++;
2744 }
2745
2746 /* If this is inside a loop, but not in the current loop or one enclosed
2747 by it, it invalidates at least one loop. */
2748
2749 if (dest_loop == -1)
2750 return;
2751
2752 /* We must invalidate every nested loop containing the target of this
2753 label, except those that also contain the jump insn. */
2754
2755 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2756 {
2757 /* Stop when we reach a loop that also contains the jump insn. */
2758 for (outer_loop = loop_num; outer_loop != -1;
2759 outer_loop = loop_outer_loop[outer_loop])
2760 if (dest_loop == outer_loop)
2761 return;
2762
2763 /* If we get here, we know we need to invalidate a loop. */
2764 if (loop_dump_stream && ! loop_invalid[dest_loop])
2765 fprintf (loop_dump_stream,
2766 "\nLoop at %d ignored due to multiple entry points.\n",
2767 INSN_UID (loop_number_loop_starts[dest_loop]));
2768
2769 loop_invalid[dest_loop] = 1;
2770 }
2771 return;
2772
2773 case SET:
2774 /* If this is not setting pc, ignore. */
2775 if (SET_DEST (x) == pc_rtx)
2776 mark_loop_jump (SET_SRC (x), loop_num);
2777 return;
2778
2779 case IF_THEN_ELSE:
2780 mark_loop_jump (XEXP (x, 1), loop_num);
2781 mark_loop_jump (XEXP (x, 2), loop_num);
2782 return;
2783
2784 case PARALLEL:
2785 case ADDR_VEC:
2786 for (i = 0; i < XVECLEN (x, 0); i++)
2787 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2788 return;
2789
2790 case ADDR_DIFF_VEC:
2791 for (i = 0; i < XVECLEN (x, 1); i++)
2792 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2793 return;
2794
2795 default:
2796 /* Treat anything else (such as a symbol_ref)
2797 as a branch out of this loop, but not into any loop. */
2798
2799 if (loop_num != -1)
2800 {
2801 #ifdef HAIFA
2802 LABEL_OUTSIDE_LOOP_P (x) = 1;
2803 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2804 #endif /* HAIFA */
2805
2806 loop_number_exit_labels[loop_num] = x;
2807
2808 for (outer_loop = loop_num; outer_loop != -1;
2809 outer_loop = loop_outer_loop[outer_loop])
2810 loop_number_exit_count[outer_loop]++;
2811 }
2812 return;
2813 }
2814 }
2815 \f
2816 /* Return nonzero if there is a label in the range from
2817 insn INSN to and including the insn whose luid is END
2818 INSN must have an assigned luid (i.e., it must not have
2819 been previously created by loop.c). */
2820
2821 static int
2822 labels_in_range_p (insn, end)
2823 rtx insn;
2824 int end;
2825 {
2826 while (insn && INSN_LUID (insn) <= end)
2827 {
2828 if (GET_CODE (insn) == CODE_LABEL)
2829 return 1;
2830 insn = NEXT_INSN (insn);
2831 }
2832
2833 return 0;
2834 }
2835
2836 /* Record that a memory reference X is being set. */
2837
2838 static void
2839 note_addr_stored (x, y)
2840 rtx x;
2841 rtx y ATTRIBUTE_UNUSED;
2842 {
2843 register int i;
2844
2845 if (x == 0 || GET_CODE (x) != MEM)
2846 return;
2847
2848 /* Count number of memory writes.
2849 This affects heuristics in strength_reduce. */
2850 num_mem_sets++;
2851
2852 /* BLKmode MEM means all memory is clobbered. */
2853 if (GET_MODE (x) == BLKmode)
2854 unknown_address_altered = 1;
2855
2856 if (unknown_address_altered)
2857 return;
2858
2859 for (i = 0; i < loop_store_mems_idx; i++)
2860 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2861 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2862 {
2863 /* We are storing at the same address as previously noted. Save the
2864 wider reference. */
2865 if (GET_MODE_SIZE (GET_MODE (x))
2866 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2867 loop_store_mems[i] = x;
2868 break;
2869 }
2870
2871 if (i == NUM_STORES)
2872 unknown_address_altered = 1;
2873
2874 else if (i == loop_store_mems_idx)
2875 loop_store_mems[loop_store_mems_idx++] = x;
2876 }
2877 \f
2878 /* Return nonzero if the rtx X is invariant over the current loop.
2879
2880 The value is 2 if we refer to something only conditionally invariant.
2881
2882 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2883 Otherwise, a memory ref is invariant if it does not conflict with
2884 anything stored in `loop_store_mems'. */
2885
2886 int
2887 invariant_p (x)
2888 register rtx x;
2889 {
2890 register int i;
2891 register enum rtx_code code;
2892 register char *fmt;
2893 int conditional = 0;
2894
2895 if (x == 0)
2896 return 1;
2897 code = GET_CODE (x);
2898 switch (code)
2899 {
2900 case CONST_INT:
2901 case CONST_DOUBLE:
2902 case SYMBOL_REF:
2903 case CONST:
2904 return 1;
2905
2906 case LABEL_REF:
2907 /* A LABEL_REF is normally invariant, however, if we are unrolling
2908 loops, and this label is inside the loop, then it isn't invariant.
2909 This is because each unrolled copy of the loop body will have
2910 a copy of this label. If this was invariant, then an insn loading
2911 the address of this label into a register might get moved outside
2912 the loop, and then each loop body would end up using the same label.
2913
2914 We don't know the loop bounds here though, so just fail for all
2915 labels. */
2916 if (flag_unroll_loops)
2917 return 0;
2918 else
2919 return 1;
2920
2921 case PC:
2922 case CC0:
2923 case UNSPEC_VOLATILE:
2924 return 0;
2925
2926 case REG:
2927 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2928 since the reg might be set by initialization within the loop. */
2929
2930 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2931 || x == arg_pointer_rtx)
2932 && ! current_function_has_nonlocal_goto)
2933 return 1;
2934
2935 if (loop_has_call
2936 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2937 return 0;
2938
2939 if (n_times_set[REGNO (x)] < 0)
2940 return 2;
2941
2942 return n_times_set[REGNO (x)] == 0;
2943
2944 case MEM:
2945 /* Volatile memory references must be rejected. Do this before
2946 checking for read-only items, so that volatile read-only items
2947 will be rejected also. */
2948 if (MEM_VOLATILE_P (x))
2949 return 0;
2950
2951 /* Read-only items (such as constants in a constant pool) are
2952 invariant if their address is. */
2953 if (RTX_UNCHANGING_P (x))
2954 break;
2955
2956 /* If we filled the table (or had a subroutine call), any location
2957 in memory could have been clobbered. */
2958 if (unknown_address_altered)
2959 return 0;
2960
2961 /* See if there is any dependence between a store and this load. */
2962 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2963 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2964 return 0;
2965
2966 /* It's not invalidated by a store in memory
2967 but we must still verify the address is invariant. */
2968 break;
2969
2970 case ASM_OPERANDS:
2971 /* Don't mess with insns declared volatile. */
2972 if (MEM_VOLATILE_P (x))
2973 return 0;
2974 break;
2975
2976 default:
2977 break;
2978 }
2979
2980 fmt = GET_RTX_FORMAT (code);
2981 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2982 {
2983 if (fmt[i] == 'e')
2984 {
2985 int tem = invariant_p (XEXP (x, i));
2986 if (tem == 0)
2987 return 0;
2988 if (tem == 2)
2989 conditional = 1;
2990 }
2991 else if (fmt[i] == 'E')
2992 {
2993 register int j;
2994 for (j = 0; j < XVECLEN (x, i); j++)
2995 {
2996 int tem = invariant_p (XVECEXP (x, i, j));
2997 if (tem == 0)
2998 return 0;
2999 if (tem == 2)
3000 conditional = 1;
3001 }
3002
3003 }
3004 }
3005
3006 return 1 + conditional;
3007 }
3008
3009 \f
3010 /* Return nonzero if all the insns in the loop that set REG
3011 are INSN and the immediately following insns,
3012 and if each of those insns sets REG in an invariant way
3013 (not counting uses of REG in them).
3014
3015 The value is 2 if some of these insns are only conditionally invariant.
3016
3017 We assume that INSN itself is the first set of REG
3018 and that its source is invariant. */
3019
3020 static int
3021 consec_sets_invariant_p (reg, n_sets, insn)
3022 int n_sets;
3023 rtx reg, insn;
3024 {
3025 register rtx p = insn;
3026 register int regno = REGNO (reg);
3027 rtx temp;
3028 /* Number of sets we have to insist on finding after INSN. */
3029 int count = n_sets - 1;
3030 int old = n_times_set[regno];
3031 int value = 0;
3032 int this;
3033
3034 /* If N_SETS hit the limit, we can't rely on its value. */
3035 if (n_sets == 127)
3036 return 0;
3037
3038 n_times_set[regno] = 0;
3039
3040 while (count > 0)
3041 {
3042 register enum rtx_code code;
3043 rtx set;
3044
3045 p = NEXT_INSN (p);
3046 code = GET_CODE (p);
3047
3048 /* If library call, skip to end of it. */
3049 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3050 p = XEXP (temp, 0);
3051
3052 this = 0;
3053 if (code == INSN
3054 && (set = single_set (p))
3055 && GET_CODE (SET_DEST (set)) == REG
3056 && REGNO (SET_DEST (set)) == regno)
3057 {
3058 this = invariant_p (SET_SRC (set));
3059 if (this != 0)
3060 value |= this;
3061 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3062 {
3063 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3064 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3065 notes are OK. */
3066 this = (CONSTANT_P (XEXP (temp, 0))
3067 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3068 && invariant_p (XEXP (temp, 0))));
3069 if (this != 0)
3070 value |= this;
3071 }
3072 }
3073 if (this != 0)
3074 count--;
3075 else if (code != NOTE)
3076 {
3077 n_times_set[regno] = old;
3078 return 0;
3079 }
3080 }
3081
3082 n_times_set[regno] = old;
3083 /* If invariant_p ever returned 2, we return 2. */
3084 return 1 + (value & 2);
3085 }
3086
3087 #if 0
3088 /* I don't think this condition is sufficient to allow INSN
3089 to be moved, so we no longer test it. */
3090
3091 /* Return 1 if all insns in the basic block of INSN and following INSN
3092 that set REG are invariant according to TABLE. */
3093
3094 static int
3095 all_sets_invariant_p (reg, insn, table)
3096 rtx reg, insn;
3097 short *table;
3098 {
3099 register rtx p = insn;
3100 register int regno = REGNO (reg);
3101
3102 while (1)
3103 {
3104 register enum rtx_code code;
3105 p = NEXT_INSN (p);
3106 code = GET_CODE (p);
3107 if (code == CODE_LABEL || code == JUMP_INSN)
3108 return 1;
3109 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3110 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3111 && REGNO (SET_DEST (PATTERN (p))) == regno)
3112 {
3113 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3114 return 0;
3115 }
3116 }
3117 }
3118 #endif /* 0 */
3119 \f
3120 /* Look at all uses (not sets) of registers in X. For each, if it is
3121 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3122 a different insn, set USAGE[REGNO] to const0_rtx. */
3123
3124 static void
3125 find_single_use_in_loop (insn, x, usage)
3126 rtx insn;
3127 rtx x;
3128 rtx *usage;
3129 {
3130 enum rtx_code code = GET_CODE (x);
3131 char *fmt = GET_RTX_FORMAT (code);
3132 int i, j;
3133
3134 if (code == REG)
3135 usage[REGNO (x)]
3136 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3137 ? const0_rtx : insn;
3138
3139 else if (code == SET)
3140 {
3141 /* Don't count SET_DEST if it is a REG; otherwise count things
3142 in SET_DEST because if a register is partially modified, it won't
3143 show up as a potential movable so we don't care how USAGE is set
3144 for it. */
3145 if (GET_CODE (SET_DEST (x)) != REG)
3146 find_single_use_in_loop (insn, SET_DEST (x), usage);
3147 find_single_use_in_loop (insn, SET_SRC (x), usage);
3148 }
3149 else
3150 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3151 {
3152 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3153 find_single_use_in_loop (insn, XEXP (x, i), usage);
3154 else if (fmt[i] == 'E')
3155 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3156 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3157 }
3158 }
3159 \f
3160 /* Increment N_TIMES_SET at the index of each register
3161 that is modified by an insn between FROM and TO.
3162 If the value of an element of N_TIMES_SET becomes 127 or more,
3163 stop incrementing it, to avoid overflow.
3164
3165 Store in SINGLE_USAGE[I] the single insn in which register I is
3166 used, if it is only used once. Otherwise, it is set to 0 (for no
3167 uses) or const0_rtx for more than one use. This parameter may be zero,
3168 in which case this processing is not done.
3169
3170 Store in *COUNT_PTR the number of actual instruction
3171 in the loop. We use this to decide what is worth moving out. */
3172
3173 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3174 In that case, it is the insn that last set reg n. */
3175
3176 static void
3177 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3178 register rtx from, to;
3179 char *may_not_move;
3180 rtx *single_usage;
3181 int *count_ptr;
3182 int nregs;
3183 {
3184 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3185 register rtx insn;
3186 register int count = 0;
3187 register rtx dest;
3188
3189 bzero ((char *) last_set, nregs * sizeof (rtx));
3190 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3191 {
3192 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3193 {
3194 ++count;
3195
3196 /* If requested, record registers that have exactly one use. */
3197 if (single_usage)
3198 {
3199 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3200
3201 /* Include uses in REG_EQUAL notes. */
3202 if (REG_NOTES (insn))
3203 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3204 }
3205
3206 if (GET_CODE (PATTERN (insn)) == CLOBBER
3207 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3208 /* Don't move a reg that has an explicit clobber.
3209 We might do so sometimes, but it's not worth the pain. */
3210 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3211
3212 if (GET_CODE (PATTERN (insn)) == SET
3213 || GET_CODE (PATTERN (insn)) == CLOBBER)
3214 {
3215 dest = SET_DEST (PATTERN (insn));
3216 while (GET_CODE (dest) == SUBREG
3217 || GET_CODE (dest) == ZERO_EXTRACT
3218 || GET_CODE (dest) == SIGN_EXTRACT
3219 || GET_CODE (dest) == STRICT_LOW_PART)
3220 dest = XEXP (dest, 0);
3221 if (GET_CODE (dest) == REG)
3222 {
3223 register int regno = REGNO (dest);
3224 /* If this is the first setting of this reg
3225 in current basic block, and it was set before,
3226 it must be set in two basic blocks, so it cannot
3227 be moved out of the loop. */
3228 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3229 may_not_move[regno] = 1;
3230 /* If this is not first setting in current basic block,
3231 see if reg was used in between previous one and this.
3232 If so, neither one can be moved. */
3233 if (last_set[regno] != 0
3234 && reg_used_between_p (dest, last_set[regno], insn))
3235 may_not_move[regno] = 1;
3236 if (n_times_set[regno] < 127)
3237 ++n_times_set[regno];
3238 last_set[regno] = insn;
3239 }
3240 }
3241 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3242 {
3243 register int i;
3244 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3245 {
3246 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3247 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3248 /* Don't move a reg that has an explicit clobber.
3249 It's not worth the pain to try to do it correctly. */
3250 may_not_move[REGNO (XEXP (x, 0))] = 1;
3251
3252 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3253 {
3254 dest = SET_DEST (x);
3255 while (GET_CODE (dest) == SUBREG
3256 || GET_CODE (dest) == ZERO_EXTRACT
3257 || GET_CODE (dest) == SIGN_EXTRACT
3258 || GET_CODE (dest) == STRICT_LOW_PART)
3259 dest = XEXP (dest, 0);
3260 if (GET_CODE (dest) == REG)
3261 {
3262 register int regno = REGNO (dest);
3263 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3264 may_not_move[regno] = 1;
3265 if (last_set[regno] != 0
3266 && reg_used_between_p (dest, last_set[regno], insn))
3267 may_not_move[regno] = 1;
3268 if (n_times_set[regno] < 127)
3269 ++n_times_set[regno];
3270 last_set[regno] = insn;
3271 }
3272 }
3273 }
3274 }
3275 }
3276
3277 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3278 bzero ((char *) last_set, nregs * sizeof (rtx));
3279 }
3280 *count_ptr = count;
3281 }
3282 \f
3283 /* Given a loop that is bounded by LOOP_START and LOOP_END
3284 and that is entered at SCAN_START,
3285 return 1 if the register set in SET contained in insn INSN is used by
3286 any insn that precedes INSN in cyclic order starting
3287 from the loop entry point.
3288
3289 We don't want to use INSN_LUID here because if we restrict INSN to those
3290 that have a valid INSN_LUID, it means we cannot move an invariant out
3291 from an inner loop past two loops. */
3292
3293 static int
3294 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3295 rtx set, insn, loop_start, scan_start, loop_end;
3296 {
3297 rtx reg = SET_DEST (set);
3298 rtx p;
3299
3300 /* Scan forward checking for register usage. If we hit INSN, we
3301 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3302 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3303 {
3304 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3305 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3306 return 1;
3307
3308 if (p == loop_end)
3309 p = loop_start;
3310 }
3311
3312 return 0;
3313 }
3314 \f
3315 /* A "basic induction variable" or biv is a pseudo reg that is set
3316 (within this loop) only by incrementing or decrementing it. */
3317 /* A "general induction variable" or giv is a pseudo reg whose
3318 value is a linear function of a biv. */
3319
3320 /* Bivs are recognized by `basic_induction_var';
3321 Givs by `general_induct_var'. */
3322
3323 /* Indexed by register number, indicates whether or not register is an
3324 induction variable, and if so what type. */
3325
3326 enum iv_mode *reg_iv_type;
3327
3328 /* Indexed by register number, contains pointer to `struct induction'
3329 if register is an induction variable. This holds general info for
3330 all induction variables. */
3331
3332 struct induction **reg_iv_info;
3333
3334 /* Indexed by register number, contains pointer to `struct iv_class'
3335 if register is a basic induction variable. This holds info describing
3336 the class (a related group) of induction variables that the biv belongs
3337 to. */
3338
3339 struct iv_class **reg_biv_class;
3340
3341 /* The head of a list which links together (via the next field)
3342 every iv class for the current loop. */
3343
3344 struct iv_class *loop_iv_list;
3345
3346 /* Communication with routines called via `note_stores'. */
3347
3348 static rtx note_insn;
3349
3350 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3351
3352 static rtx addr_placeholder;
3353
3354 /* ??? Unfinished optimizations, and possible future optimizations,
3355 for the strength reduction code. */
3356
3357 /* ??? There is one more optimization you might be interested in doing: to
3358 allocate pseudo registers for frequently-accessed memory locations.
3359 If the same memory location is referenced each time around, it might
3360 be possible to copy it into a register before and out after.
3361 This is especially useful when the memory location is a variable which
3362 is in a stack slot because somewhere its address is taken. If the
3363 loop doesn't contain a function call and the variable isn't volatile,
3364 it is safe to keep the value in a register for the duration of the
3365 loop. One tricky thing is that the copying of the value back from the
3366 register has to be done on all exits from the loop. You need to check that
3367 all the exits from the loop go to the same place. */
3368
3369 /* ??? The interaction of biv elimination, and recognition of 'constant'
3370 bivs, may cause problems. */
3371
3372 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3373 performance problems.
3374
3375 Perhaps don't eliminate things that can be combined with an addressing
3376 mode. Find all givs that have the same biv, mult_val, and add_val;
3377 then for each giv, check to see if its only use dies in a following
3378 memory address. If so, generate a new memory address and check to see
3379 if it is valid. If it is valid, then store the modified memory address,
3380 otherwise, mark the giv as not done so that it will get its own iv. */
3381
3382 /* ??? Could try to optimize branches when it is known that a biv is always
3383 positive. */
3384
3385 /* ??? When replace a biv in a compare insn, we should replace with closest
3386 giv so that an optimized branch can still be recognized by the combiner,
3387 e.g. the VAX acb insn. */
3388
3389 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3390 was rerun in loop_optimize whenever a register was added or moved.
3391 Also, some of the optimizations could be a little less conservative. */
3392 \f
3393 /* Perform strength reduction and induction variable elimination. */
3394
3395 /* Pseudo registers created during this function will be beyond the last
3396 valid index in several tables including n_times_set and regno_last_uid.
3397 This does not cause a problem here, because the added registers cannot be
3398 givs outside of their loop, and hence will never be reconsidered.
3399 But scan_loop must check regnos to make sure they are in bounds. */
3400
3401 static void
3402 strength_reduce (scan_start, end, loop_top, insn_count,
3403 loop_start, loop_end, unroll_p)
3404 rtx scan_start;
3405 rtx end;
3406 rtx loop_top;
3407 int insn_count;
3408 rtx loop_start;
3409 rtx loop_end;
3410 int unroll_p;
3411 {
3412 rtx p;
3413 rtx set;
3414 rtx inc_val;
3415 rtx mult_val;
3416 rtx dest_reg;
3417 /* This is 1 if current insn is not executed at least once for every loop
3418 iteration. */
3419 int not_every_iteration = 0;
3420 /* This is 1 if current insn may be executed more than once for every
3421 loop iteration. */
3422 int maybe_multiple = 0;
3423 /* Temporary list pointers for traversing loop_iv_list. */
3424 struct iv_class *bl, **backbl;
3425 /* Ratio of extra register life span we can justify
3426 for saving an instruction. More if loop doesn't call subroutines
3427 since in that case saving an insn makes more difference
3428 and more registers are available. */
3429 /* ??? could set this to last value of threshold in move_movables */
3430 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3431 /* Map of pseudo-register replacements. */
3432 rtx *reg_map;
3433 int call_seen;
3434 rtx test;
3435 rtx end_insert_before;
3436 int loop_depth = 0;
3437
3438 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3439 * sizeof (enum iv_mode *));
3440 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3441 reg_iv_info = (struct induction **)
3442 alloca (max_reg_before_loop * sizeof (struct induction *));
3443 bzero ((char *) reg_iv_info, (max_reg_before_loop
3444 * sizeof (struct induction *)));
3445 reg_biv_class = (struct iv_class **)
3446 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3447 bzero ((char *) reg_biv_class, (max_reg_before_loop
3448 * sizeof (struct iv_class *)));
3449
3450 loop_iv_list = 0;
3451 addr_placeholder = gen_reg_rtx (Pmode);
3452
3453 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3454 must be put before this insn, so that they will appear in the right
3455 order (i.e. loop order).
3456
3457 If loop_end is the end of the current function, then emit a
3458 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3459 dummy note insn. */
3460 if (NEXT_INSN (loop_end) != 0)
3461 end_insert_before = NEXT_INSN (loop_end);
3462 else
3463 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3464
3465 /* Scan through loop to find all possible bivs. */
3466
3467 p = scan_start;
3468 while (1)
3469 {
3470 p = NEXT_INSN (p);
3471 /* At end of a straight-in loop, we are done.
3472 At end of a loop entered at the bottom, scan the top. */
3473 if (p == scan_start)
3474 break;
3475 if (p == end)
3476 {
3477 if (loop_top != 0)
3478 p = loop_top;
3479 else
3480 break;
3481 if (p == scan_start)
3482 break;
3483 }
3484
3485 if (GET_CODE (p) == INSN
3486 && (set = single_set (p))
3487 && GET_CODE (SET_DEST (set)) == REG)
3488 {
3489 dest_reg = SET_DEST (set);
3490 if (REGNO (dest_reg) < max_reg_before_loop
3491 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3492 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3493 {
3494 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3495 dest_reg, p, &inc_val, &mult_val))
3496 {
3497 /* It is a possible basic induction variable.
3498 Create and initialize an induction structure for it. */
3499
3500 struct induction *v
3501 = (struct induction *) alloca (sizeof (struct induction));
3502
3503 record_biv (v, p, dest_reg, inc_val, mult_val,
3504 not_every_iteration, maybe_multiple);
3505 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3506 }
3507 else if (REGNO (dest_reg) < max_reg_before_loop)
3508 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3509 }
3510 }
3511
3512 /* Past CODE_LABEL, we get to insns that may be executed multiple
3513 times. The only way we can be sure that they can't is if every
3514 jump insn between here and the end of the loop either
3515 returns, exits the loop, is a forward jump, or is a jump
3516 to the loop start. */
3517
3518 if (GET_CODE (p) == CODE_LABEL)
3519 {
3520 rtx insn = p;
3521
3522 maybe_multiple = 0;
3523
3524 while (1)
3525 {
3526 insn = NEXT_INSN (insn);
3527 if (insn == scan_start)
3528 break;
3529 if (insn == end)
3530 {
3531 if (loop_top != 0)
3532 insn = loop_top;
3533 else
3534 break;
3535 if (insn == scan_start)
3536 break;
3537 }
3538
3539 if (GET_CODE (insn) == JUMP_INSN
3540 && GET_CODE (PATTERN (insn)) != RETURN
3541 && (! condjump_p (insn)
3542 || (JUMP_LABEL (insn) != 0
3543 && JUMP_LABEL (insn) != scan_start
3544 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3545 || INSN_UID (insn) >= max_uid_for_loop
3546 || (INSN_LUID (JUMP_LABEL (insn))
3547 < INSN_LUID (insn))))))
3548 {
3549 maybe_multiple = 1;
3550 break;
3551 }
3552 }
3553 }
3554
3555 /* Past a jump, we get to insns for which we can't count
3556 on whether they will be executed during each iteration. */
3557 /* This code appears twice in strength_reduce. There is also similar
3558 code in scan_loop. */
3559 if (GET_CODE (p) == JUMP_INSN
3560 /* If we enter the loop in the middle, and scan around to the
3561 beginning, don't set not_every_iteration for that.
3562 This can be any kind of jump, since we want to know if insns
3563 will be executed if the loop is executed. */
3564 && ! (JUMP_LABEL (p) == loop_top
3565 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3566 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3567 {
3568 rtx label = 0;
3569
3570 /* If this is a jump outside the loop, then it also doesn't
3571 matter. Check to see if the target of this branch is on the
3572 loop_number_exits_labels list. */
3573
3574 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3575 label;
3576 label = LABEL_NEXTREF (label))
3577 if (XEXP (label, 0) == JUMP_LABEL (p))
3578 break;
3579
3580 if (! label)
3581 not_every_iteration = 1;
3582 }
3583
3584 else if (GET_CODE (p) == NOTE)
3585 {
3586 /* At the virtual top of a converted loop, insns are again known to
3587 be executed each iteration: logically, the loop begins here
3588 even though the exit code has been duplicated. */
3589 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3590 not_every_iteration = 0;
3591 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3592 loop_depth++;
3593 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3594 loop_depth--;
3595 }
3596
3597 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3598 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3599 or not an insn is known to be executed each iteration of the
3600 loop, whether or not any iterations are known to occur.
3601
3602 Therefore, if we have just passed a label and have no more labels
3603 between here and the test insn of the loop, we know these insns
3604 will be executed each iteration. */
3605
3606 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3607 && no_labels_between_p (p, loop_end))
3608 not_every_iteration = 0;
3609 }
3610
3611 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3612 Make a sanity check against n_times_set. */
3613 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3614 {
3615 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3616 /* Above happens if register modified by subreg, etc. */
3617 /* Make sure it is not recognized as a basic induction var: */
3618 || n_times_set[bl->regno] != bl->biv_count
3619 /* If never incremented, it is invariant that we decided not to
3620 move. So leave it alone. */
3621 || ! bl->incremented)
3622 {
3623 if (loop_dump_stream)
3624 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3625 bl->regno,
3626 (reg_iv_type[bl->regno] != BASIC_INDUCT
3627 ? "not induction variable"
3628 : (! bl->incremented ? "never incremented"
3629 : "count error")));
3630
3631 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3632 *backbl = bl->next;
3633 }
3634 else
3635 {
3636 backbl = &bl->next;
3637
3638 if (loop_dump_stream)
3639 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3640 }
3641 }
3642
3643 /* Exit if there are no bivs. */
3644 if (! loop_iv_list)
3645 {
3646 /* Can still unroll the loop anyways, but indicate that there is no
3647 strength reduction info available. */
3648 if (unroll_p)
3649 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3650
3651 return;
3652 }
3653
3654 /* Find initial value for each biv by searching backwards from loop_start,
3655 halting at first label. Also record any test condition. */
3656
3657 call_seen = 0;
3658 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3659 {
3660 note_insn = p;
3661
3662 if (GET_CODE (p) == CALL_INSN)
3663 call_seen = 1;
3664
3665 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3666 || GET_CODE (p) == CALL_INSN)
3667 note_stores (PATTERN (p), record_initial);
3668
3669 /* Record any test of a biv that branches around the loop if no store
3670 between it and the start of loop. We only care about tests with
3671 constants and registers and only certain of those. */
3672 if (GET_CODE (p) == JUMP_INSN
3673 && JUMP_LABEL (p) != 0
3674 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3675 && (test = get_condition_for_loop (p)) != 0
3676 && GET_CODE (XEXP (test, 0)) == REG
3677 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3678 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3679 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3680 && bl->init_insn == 0)
3681 {
3682 /* If an NE test, we have an initial value! */
3683 if (GET_CODE (test) == NE)
3684 {
3685 bl->init_insn = p;
3686 bl->init_set = gen_rtx_SET (VOIDmode,
3687 XEXP (test, 0), XEXP (test, 1));
3688 }
3689 else
3690 bl->initial_test = test;
3691 }
3692 }
3693
3694 /* Look at the each biv and see if we can say anything better about its
3695 initial value from any initializing insns set up above. (This is done
3696 in two passes to avoid missing SETs in a PARALLEL.) */
3697 for (bl = loop_iv_list; bl; bl = bl->next)
3698 {
3699 rtx src;
3700 rtx note;
3701
3702 if (! bl->init_insn)
3703 continue;
3704
3705 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3706 is a constant, use the value of that. */
3707 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3708 && CONSTANT_P (XEXP (note, 0)))
3709 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3710 && CONSTANT_P (XEXP (note, 0))))
3711 src = XEXP (note, 0);
3712 else
3713 src = SET_SRC (bl->init_set);
3714
3715 if (loop_dump_stream)
3716 fprintf (loop_dump_stream,
3717 "Biv %d initialized at insn %d: initial value ",
3718 bl->regno, INSN_UID (bl->init_insn));
3719
3720 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3721 || GET_MODE (src) == VOIDmode)
3722 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3723 {
3724 bl->initial_value = src;
3725
3726 if (loop_dump_stream)
3727 {
3728 if (GET_CODE (src) == CONST_INT)
3729 {
3730 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3731 fputc ('\n', loop_dump_stream);
3732 }
3733 else
3734 {
3735 print_rtl (loop_dump_stream, src);
3736 fprintf (loop_dump_stream, "\n");
3737 }
3738 }
3739 }
3740 else
3741 {
3742 /* Biv initial value is not simple move,
3743 so let it keep initial value of "itself". */
3744
3745 if (loop_dump_stream)
3746 fprintf (loop_dump_stream, "is complex\n");
3747 }
3748 }
3749
3750 /* Search the loop for general induction variables. */
3751
3752 /* A register is a giv if: it is only set once, it is a function of a
3753 biv and a constant (or invariant), and it is not a biv. */
3754
3755 not_every_iteration = 0;
3756 loop_depth = 0;
3757 p = scan_start;
3758 while (1)
3759 {
3760 p = NEXT_INSN (p);
3761 /* At end of a straight-in loop, we are done.
3762 At end of a loop entered at the bottom, scan the top. */
3763 if (p == scan_start)
3764 break;
3765 if (p == end)
3766 {
3767 if (loop_top != 0)
3768 p = loop_top;
3769 else
3770 break;
3771 if (p == scan_start)
3772 break;
3773 }
3774
3775 /* Look for a general induction variable in a register. */
3776 if (GET_CODE (p) == INSN
3777 && (set = single_set (p))
3778 && GET_CODE (SET_DEST (set)) == REG
3779 && ! may_not_optimize[REGNO (SET_DEST (set))])
3780 {
3781 rtx src_reg;
3782 rtx add_val;
3783 rtx mult_val;
3784 int benefit;
3785 rtx regnote = 0;
3786
3787 dest_reg = SET_DEST (set);
3788 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3789 continue;
3790
3791 if (/* SET_SRC is a giv. */
3792 ((benefit = general_induction_var (SET_SRC (set),
3793 &src_reg, &add_val,
3794 &mult_val))
3795 /* Equivalent expression is a giv. */
3796 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3797 && (benefit = general_induction_var (XEXP (regnote, 0),
3798 &src_reg,
3799 &add_val, &mult_val))))
3800 /* Don't try to handle any regs made by loop optimization.
3801 We have nothing on them in regno_first_uid, etc. */
3802 && REGNO (dest_reg) < max_reg_before_loop
3803 /* Don't recognize a BASIC_INDUCT_VAR here. */
3804 && dest_reg != src_reg
3805 /* This must be the only place where the register is set. */
3806 && (n_times_set[REGNO (dest_reg)] == 1
3807 /* or all sets must be consecutive and make a giv. */
3808 || (benefit = consec_sets_giv (benefit, p,
3809 src_reg, dest_reg,
3810 &add_val, &mult_val))))
3811 {
3812 int count;
3813 struct induction *v
3814 = (struct induction *) alloca (sizeof (struct induction));
3815 rtx temp;
3816
3817 /* If this is a library call, increase benefit. */
3818 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3819 benefit += libcall_benefit (p);
3820
3821 /* Skip the consecutive insns, if there are any. */
3822 for (count = n_times_set[REGNO (dest_reg)] - 1;
3823 count > 0; count--)
3824 {
3825 /* If first insn of libcall sequence, skip to end.
3826 Do this at start of loop, since INSN is guaranteed to
3827 be an insn here. */
3828 if (GET_CODE (p) != NOTE
3829 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3830 p = XEXP (temp, 0);
3831
3832 do p = NEXT_INSN (p);
3833 while (GET_CODE (p) == NOTE);
3834 }
3835
3836 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3837 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3838 loop_end);
3839
3840 }
3841 }
3842
3843 #ifndef DONT_REDUCE_ADDR
3844 /* Look for givs which are memory addresses. */
3845 /* This resulted in worse code on a VAX 8600. I wonder if it
3846 still does. */
3847 if (GET_CODE (p) == INSN)
3848 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3849 loop_end);
3850 #endif
3851
3852 /* Update the status of whether giv can derive other givs. This can
3853 change when we pass a label or an insn that updates a biv. */
3854 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3855 || GET_CODE (p) == CODE_LABEL)
3856 update_giv_derive (p);
3857
3858 /* Past a jump, we get to insns for which we can't count
3859 on whether they will be executed during each iteration. */
3860 /* This code appears twice in strength_reduce. There is also similar
3861 code in scan_loop. */
3862 if (GET_CODE (p) == JUMP_INSN
3863 /* If we enter the loop in the middle, and scan around to the
3864 beginning, don't set not_every_iteration for that.
3865 This can be any kind of jump, since we want to know if insns
3866 will be executed if the loop is executed. */
3867 && ! (JUMP_LABEL (p) == loop_top
3868 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3869 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3870 {
3871 rtx label = 0;
3872
3873 /* If this is a jump outside the loop, then it also doesn't
3874 matter. Check to see if the target of this branch is on the
3875 loop_number_exits_labels list. */
3876
3877 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3878 label;
3879 label = LABEL_NEXTREF (label))
3880 if (XEXP (label, 0) == JUMP_LABEL (p))
3881 break;
3882
3883 if (! label)
3884 not_every_iteration = 1;
3885 }
3886
3887 else if (GET_CODE (p) == NOTE)
3888 {
3889 /* At the virtual top of a converted loop, insns are again known to
3890 be executed each iteration: logically, the loop begins here
3891 even though the exit code has been duplicated. */
3892 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3893 not_every_iteration = 0;
3894 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3895 loop_depth++;
3896 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3897 loop_depth--;
3898 }
3899
3900 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3901 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3902 or not an insn is known to be executed each iteration of the
3903 loop, whether or not any iterations are known to occur.
3904
3905 Therefore, if we have just passed a label and have no more labels
3906 between here and the test insn of the loop, we know these insns
3907 will be executed each iteration. */
3908
3909 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3910 && no_labels_between_p (p, loop_end))
3911 not_every_iteration = 0;
3912 }
3913
3914 /* Try to calculate and save the number of loop iterations. This is
3915 set to zero if the actual number can not be calculated. This must
3916 be called after all giv's have been identified, since otherwise it may
3917 fail if the iteration variable is a giv. */
3918
3919 loop_n_iterations = loop_iterations (loop_start, loop_end);
3920
3921 /* Now for each giv for which we still don't know whether or not it is
3922 replaceable, check to see if it is replaceable because its final value
3923 can be calculated. This must be done after loop_iterations is called,
3924 so that final_giv_value will work correctly. */
3925
3926 for (bl = loop_iv_list; bl; bl = bl->next)
3927 {
3928 struct induction *v;
3929
3930 for (v = bl->giv; v; v = v->next_iv)
3931 if (! v->replaceable && ! v->not_replaceable)
3932 check_final_value (v, loop_start, loop_end);
3933 }
3934
3935 /* Try to prove that the loop counter variable (if any) is always
3936 nonnegative; if so, record that fact with a REG_NONNEG note
3937 so that "decrement and branch until zero" insn can be used. */
3938 check_dbra_loop (loop_end, insn_count, loop_start);
3939
3940 #ifdef HAIFA
3941 /* record loop-variables relevant for BCT optimization before unrolling
3942 the loop. Unrolling may update part of this information, and the
3943 correct data will be used for generating the BCT. */
3944 #ifdef HAVE_decrement_and_branch_on_count
3945 if (HAVE_decrement_and_branch_on_count)
3946 analyze_loop_iterations (loop_start, loop_end);
3947 #endif
3948 #endif /* HAIFA */
3949
3950 /* Create reg_map to hold substitutions for replaceable giv regs. */
3951 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3952 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3953
3954 /* Examine each iv class for feasibility of strength reduction/induction
3955 variable elimination. */
3956
3957 for (bl = loop_iv_list; bl; bl = bl->next)
3958 {
3959 struct induction *v;
3960 int benefit;
3961 int all_reduced;
3962 rtx final_value = 0;
3963
3964 /* Test whether it will be possible to eliminate this biv
3965 provided all givs are reduced. This is possible if either
3966 the reg is not used outside the loop, or we can compute
3967 what its final value will be.
3968
3969 For architectures with a decrement_and_branch_until_zero insn,
3970 don't do this if we put a REG_NONNEG note on the endtest for
3971 this biv. */
3972
3973 /* Compare against bl->init_insn rather than loop_start.
3974 We aren't concerned with any uses of the biv between
3975 init_insn and loop_start since these won't be affected
3976 by the value of the biv elsewhere in the function, so
3977 long as init_insn doesn't use the biv itself.
3978 March 14, 1989 -- self@bayes.arc.nasa.gov */
3979
3980 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3981 && bl->init_insn
3982 && INSN_UID (bl->init_insn) < max_uid_for_loop
3983 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3984 #ifdef HAVE_decrement_and_branch_until_zero
3985 && ! bl->nonneg
3986 #endif
3987 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3988 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3989 #ifdef HAVE_decrement_and_branch_until_zero
3990 && ! bl->nonneg
3991 #endif
3992 ))
3993 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3994 threshold, insn_count);
3995 else
3996 {
3997 if (loop_dump_stream)
3998 {
3999 fprintf (loop_dump_stream,
4000 "Cannot eliminate biv %d.\n",
4001 bl->regno);
4002 fprintf (loop_dump_stream,
4003 "First use: insn %d, last use: insn %d.\n",
4004 REGNO_FIRST_UID (bl->regno),
4005 REGNO_LAST_UID (bl->regno));
4006 }
4007 }
4008
4009 /* Combine all giv's for this iv_class. */
4010 combine_givs (bl);
4011
4012 /* This will be true at the end, if all givs which depend on this
4013 biv have been strength reduced.
4014 We can't (currently) eliminate the biv unless this is so. */
4015 all_reduced = 1;
4016
4017 /* Check each giv in this class to see if we will benefit by reducing
4018 it. Skip giv's combined with others. */
4019 for (v = bl->giv; v; v = v->next_iv)
4020 {
4021 struct induction *tv;
4022
4023 if (v->ignore || v->same)
4024 continue;
4025
4026 benefit = v->benefit;
4027
4028 /* Reduce benefit if not replaceable, since we will insert
4029 a move-insn to replace the insn that calculates this giv.
4030 Don't do this unless the giv is a user variable, since it
4031 will often be marked non-replaceable because of the duplication
4032 of the exit code outside the loop. In such a case, the copies
4033 we insert are dead and will be deleted. So they don't have
4034 a cost. Similar situations exist. */
4035 /* ??? The new final_[bg]iv_value code does a much better job
4036 of finding replaceable giv's, and hence this code may no longer
4037 be necessary. */
4038 if (! v->replaceable && ! bl->eliminable
4039 && REG_USERVAR_P (v->dest_reg))
4040 benefit -= copy_cost;
4041
4042 /* Decrease the benefit to count the add-insns that we will
4043 insert to increment the reduced reg for the giv. */
4044 benefit -= add_cost * bl->biv_count;
4045
4046 /* Decide whether to strength-reduce this giv or to leave the code
4047 unchanged (recompute it from the biv each time it is used).
4048 This decision can be made independently for each giv. */
4049
4050 #ifdef AUTO_INC_DEC
4051 /* Attempt to guess whether autoincrement will handle some of the
4052 new add insns; if so, increase BENEFIT (undo the subtraction of
4053 add_cost that was done above). */
4054 if (v->giv_type == DEST_ADDR
4055 && GET_CODE (v->mult_val) == CONST_INT)
4056 {
4057 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4058 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4059 benefit += add_cost * bl->biv_count;
4060 #endif
4061 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4062 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4063 benefit += add_cost * bl->biv_count;
4064 #endif
4065 }
4066 #endif
4067
4068 /* If an insn is not to be strength reduced, then set its ignore
4069 flag, and clear all_reduced. */
4070
4071 /* A giv that depends on a reversed biv must be reduced if it is
4072 used after the loop exit, otherwise, it would have the wrong
4073 value after the loop exit. To make it simple, just reduce all
4074 of such giv's whether or not we know they are used after the loop
4075 exit. */
4076
4077 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4078 && ! bl->reversed )
4079 {
4080 if (loop_dump_stream)
4081 fprintf (loop_dump_stream,
4082 "giv of insn %d not worth while, %d vs %d.\n",
4083 INSN_UID (v->insn),
4084 v->lifetime * threshold * benefit, insn_count);
4085 v->ignore = 1;
4086 all_reduced = 0;
4087 }
4088 else
4089 {
4090 /* Check that we can increment the reduced giv without a
4091 multiply insn. If not, reject it. */
4092
4093 for (tv = bl->biv; tv; tv = tv->next_iv)
4094 if (tv->mult_val == const1_rtx
4095 && ! product_cheap_p (tv->add_val, v->mult_val))
4096 {
4097 if (loop_dump_stream)
4098 fprintf (loop_dump_stream,
4099 "giv of insn %d: would need a multiply.\n",
4100 INSN_UID (v->insn));
4101 v->ignore = 1;
4102 all_reduced = 0;
4103 break;
4104 }
4105 }
4106 }
4107
4108 /* Reduce each giv that we decided to reduce. */
4109
4110 for (v = bl->giv; v; v = v->next_iv)
4111 {
4112 struct induction *tv;
4113 if (! v->ignore && v->same == 0)
4114 {
4115 int auto_inc_opt = 0;
4116
4117 v->new_reg = gen_reg_rtx (v->mode);
4118
4119 #ifdef AUTO_INC_DEC
4120 /* If the target has auto-increment addressing modes, and
4121 this is an address giv, then try to put the increment
4122 immediately after its use, so that flow can create an
4123 auto-increment addressing mode. */
4124 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4125 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4126 /* We don't handle reversed biv's because bl->biv->insn
4127 does not have a valid INSN_LUID. */
4128 && ! bl->reversed
4129 && v->always_executed && ! v->maybe_multiple
4130 && INSN_UID (v->insn) < max_uid_for_loop)
4131 {
4132 /* If other giv's have been combined with this one, then
4133 this will work only if all uses of the other giv's occur
4134 before this giv's insn. This is difficult to check.
4135
4136 We simplify this by looking for the common case where
4137 there is one DEST_REG giv, and this giv's insn is the
4138 last use of the dest_reg of that DEST_REG giv. If the
4139 increment occurs after the address giv, then we can
4140 perform the optimization. (Otherwise, the increment
4141 would have to go before other_giv, and we would not be
4142 able to combine it with the address giv to get an
4143 auto-inc address.) */
4144 if (v->combined_with)
4145 {
4146 struct induction *other_giv = 0;
4147
4148 for (tv = bl->giv; tv; tv = tv->next_iv)
4149 if (tv->same == v)
4150 {
4151 if (other_giv)
4152 break;
4153 else
4154 other_giv = tv;
4155 }
4156 if (! tv && other_giv
4157 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4158 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4159 == INSN_UID (v->insn))
4160 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4161 auto_inc_opt = 1;
4162 }
4163 /* Check for case where increment is before the address
4164 giv. Do this test in "loop order". */
4165 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4166 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4167 || (INSN_LUID (bl->biv->insn)
4168 > INSN_LUID (scan_start))))
4169 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4170 && (INSN_LUID (scan_start)
4171 < INSN_LUID (bl->biv->insn))))
4172 auto_inc_opt = -1;
4173 else
4174 auto_inc_opt = 1;
4175
4176 #ifdef HAVE_cc0
4177 {
4178 rtx prev;
4179
4180 /* We can't put an insn immediately after one setting
4181 cc0, or immediately before one using cc0. */
4182 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4183 || (auto_inc_opt == -1
4184 && (prev = prev_nonnote_insn (v->insn)) != 0
4185 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4186 && sets_cc0_p (PATTERN (prev))))
4187 auto_inc_opt = 0;
4188 }
4189 #endif
4190
4191 if (auto_inc_opt)
4192 v->auto_inc_opt = 1;
4193 }
4194 #endif
4195
4196 /* For each place where the biv is incremented, add an insn
4197 to increment the new, reduced reg for the giv. */
4198 for (tv = bl->biv; tv; tv = tv->next_iv)
4199 {
4200 rtx insert_before;
4201
4202 if (! auto_inc_opt)
4203 insert_before = tv->insn;
4204 else if (auto_inc_opt == 1)
4205 insert_before = NEXT_INSN (v->insn);
4206 else
4207 insert_before = v->insn;
4208
4209 if (tv->mult_val == const1_rtx)
4210 emit_iv_add_mult (tv->add_val, v->mult_val,
4211 v->new_reg, v->new_reg, insert_before);
4212 else /* tv->mult_val == const0_rtx */
4213 /* A multiply is acceptable here
4214 since this is presumed to be seldom executed. */
4215 emit_iv_add_mult (tv->add_val, v->mult_val,
4216 v->add_val, v->new_reg, insert_before);
4217 }
4218
4219 /* Add code at loop start to initialize giv's reduced reg. */
4220
4221 emit_iv_add_mult (bl->initial_value, v->mult_val,
4222 v->add_val, v->new_reg, loop_start);
4223 }
4224 }
4225
4226 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4227 as not reduced.
4228
4229 For each giv register that can be reduced now: if replaceable,
4230 substitute reduced reg wherever the old giv occurs;
4231 else add new move insn "giv_reg = reduced_reg".
4232
4233 Also check for givs whose first use is their definition and whose
4234 last use is the definition of another giv. If so, it is likely
4235 dead and should not be used to eliminate a biv. */
4236 for (v = bl->giv; v; v = v->next_iv)
4237 {
4238 if (v->same && v->same->ignore)
4239 v->ignore = 1;
4240
4241 if (v->ignore)
4242 continue;
4243
4244 if (v->giv_type == DEST_REG
4245 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4246 {
4247 struct induction *v1;
4248
4249 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4250 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4251 v->maybe_dead = 1;
4252 }
4253
4254 /* Update expression if this was combined, in case other giv was
4255 replaced. */
4256 if (v->same)
4257 v->new_reg = replace_rtx (v->new_reg,
4258 v->same->dest_reg, v->same->new_reg);
4259
4260 if (v->giv_type == DEST_ADDR)
4261 /* Store reduced reg as the address in the memref where we found
4262 this giv. */
4263 validate_change (v->insn, v->location, v->new_reg, 0);
4264 else if (v->replaceable)
4265 {
4266 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4267
4268 #if 0
4269 /* I can no longer duplicate the original problem. Perhaps
4270 this is unnecessary now? */
4271
4272 /* Replaceable; it isn't strictly necessary to delete the old
4273 insn and emit a new one, because v->dest_reg is now dead.
4274
4275 However, especially when unrolling loops, the special
4276 handling for (set REG0 REG1) in the second cse pass may
4277 make v->dest_reg live again. To avoid this problem, emit
4278 an insn to set the original giv reg from the reduced giv.
4279 We can not delete the original insn, since it may be part
4280 of a LIBCALL, and the code in flow that eliminates dead
4281 libcalls will fail if it is deleted. */
4282 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4283 v->insn);
4284 #endif
4285 }
4286 else
4287 {
4288 /* Not replaceable; emit an insn to set the original giv reg from
4289 the reduced giv, same as above. */
4290 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4291 v->insn);
4292 }
4293
4294 /* When a loop is reversed, givs which depend on the reversed
4295 biv, and which are live outside the loop, must be set to their
4296 correct final value. This insn is only needed if the giv is
4297 not replaceable. The correct final value is the same as the
4298 value that the giv starts the reversed loop with. */
4299 if (bl->reversed && ! v->replaceable)
4300 emit_iv_add_mult (bl->initial_value, v->mult_val,
4301 v->add_val, v->dest_reg, end_insert_before);
4302 else if (v->final_value)
4303 {
4304 rtx insert_before;
4305
4306 /* If the loop has multiple exits, emit the insn before the
4307 loop to ensure that it will always be executed no matter
4308 how the loop exits. Otherwise, emit the insn after the loop,
4309 since this is slightly more efficient. */
4310 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4311 insert_before = loop_start;
4312 else
4313 insert_before = end_insert_before;
4314 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4315 insert_before);
4316
4317 #if 0
4318 /* If the insn to set the final value of the giv was emitted
4319 before the loop, then we must delete the insn inside the loop
4320 that sets it. If this is a LIBCALL, then we must delete
4321 every insn in the libcall. Note, however, that
4322 final_giv_value will only succeed when there are multiple
4323 exits if the giv is dead at each exit, hence it does not
4324 matter that the original insn remains because it is dead
4325 anyways. */
4326 /* Delete the insn inside the loop that sets the giv since
4327 the giv is now set before (or after) the loop. */
4328 delete_insn (v->insn);
4329 #endif
4330 }
4331
4332 if (loop_dump_stream)
4333 {
4334 fprintf (loop_dump_stream, "giv at %d reduced to ",
4335 INSN_UID (v->insn));
4336 print_rtl (loop_dump_stream, v->new_reg);
4337 fprintf (loop_dump_stream, "\n");
4338 }
4339 }
4340
4341 /* All the givs based on the biv bl have been reduced if they
4342 merit it. */
4343
4344 /* For each giv not marked as maybe dead that has been combined with a
4345 second giv, clear any "maybe dead" mark on that second giv.
4346 v->new_reg will either be or refer to the register of the giv it
4347 combined with.
4348
4349 Doing this clearing avoids problems in biv elimination where a
4350 giv's new_reg is a complex value that can't be put in the insn but
4351 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4352 Since the register will be used in either case, we'd prefer it be
4353 used from the simpler giv. */
4354
4355 for (v = bl->giv; v; v = v->next_iv)
4356 if (! v->maybe_dead && v->same)
4357 v->same->maybe_dead = 0;
4358
4359 /* Try to eliminate the biv, if it is a candidate.
4360 This won't work if ! all_reduced,
4361 since the givs we planned to use might not have been reduced.
4362
4363 We have to be careful that we didn't initially think we could eliminate
4364 this biv because of a giv that we now think may be dead and shouldn't
4365 be used as a biv replacement.
4366
4367 Also, there is the possibility that we may have a giv that looks
4368 like it can be used to eliminate a biv, but the resulting insn
4369 isn't valid. This can happen, for example, on the 88k, where a
4370 JUMP_INSN can compare a register only with zero. Attempts to
4371 replace it with a compare with a constant will fail.
4372
4373 Note that in cases where this call fails, we may have replaced some
4374 of the occurrences of the biv with a giv, but no harm was done in
4375 doing so in the rare cases where it can occur. */
4376
4377 if (all_reduced == 1 && bl->eliminable
4378 && maybe_eliminate_biv (bl, loop_start, end, 1,
4379 threshold, insn_count))
4380
4381 {
4382 /* ?? If we created a new test to bypass the loop entirely,
4383 or otherwise drop straight in, based on this test, then
4384 we might want to rewrite it also. This way some later
4385 pass has more hope of removing the initialization of this
4386 biv entirely. */
4387
4388 /* If final_value != 0, then the biv may be used after loop end
4389 and we must emit an insn to set it just in case.
4390
4391 Reversed bivs already have an insn after the loop setting their
4392 value, so we don't need another one. We can't calculate the
4393 proper final value for such a biv here anyways. */
4394 if (final_value != 0 && ! bl->reversed)
4395 {
4396 rtx insert_before;
4397
4398 /* If the loop has multiple exits, emit the insn before the
4399 loop to ensure that it will always be executed no matter
4400 how the loop exits. Otherwise, emit the insn after the
4401 loop, since this is slightly more efficient. */
4402 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4403 insert_before = loop_start;
4404 else
4405 insert_before = end_insert_before;
4406
4407 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4408 end_insert_before);
4409 }
4410
4411 #if 0
4412 /* Delete all of the instructions inside the loop which set
4413 the biv, as they are all dead. If is safe to delete them,
4414 because an insn setting a biv will never be part of a libcall. */
4415 /* However, deleting them will invalidate the regno_last_uid info,
4416 so keeping them around is more convenient. Final_biv_value
4417 will only succeed when there are multiple exits if the biv
4418 is dead at each exit, hence it does not matter that the original
4419 insn remains, because it is dead anyways. */
4420 for (v = bl->biv; v; v = v->next_iv)
4421 delete_insn (v->insn);
4422 #endif
4423
4424 if (loop_dump_stream)
4425 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4426 bl->regno);
4427 }
4428 }
4429
4430 /* Go through all the instructions in the loop, making all the
4431 register substitutions scheduled in REG_MAP. */
4432
4433 for (p = loop_start; p != end; p = NEXT_INSN (p))
4434 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4435 || GET_CODE (p) == CALL_INSN)
4436 {
4437 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4438 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4439 INSN_CODE (p) = -1;
4440 }
4441
4442 /* Unroll loops from within strength reduction so that we can use the
4443 induction variable information that strength_reduce has already
4444 collected. */
4445
4446 if (unroll_p)
4447 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4448
4449 #ifdef HAIFA
4450 /* instrument the loop with bct insn */
4451 #ifdef HAVE_decrement_and_branch_on_count
4452 if (HAVE_decrement_and_branch_on_count)
4453 insert_bct (loop_start, loop_end);
4454 #endif
4455 #endif /* HAIFA */
4456
4457 if (loop_dump_stream)
4458 fprintf (loop_dump_stream, "\n");
4459 }
4460 \f
4461 /* Return 1 if X is a valid source for an initial value (or as value being
4462 compared against in an initial test).
4463
4464 X must be either a register or constant and must not be clobbered between
4465 the current insn and the start of the loop.
4466
4467 INSN is the insn containing X. */
4468
4469 static int
4470 valid_initial_value_p (x, insn, call_seen, loop_start)
4471 rtx x;
4472 rtx insn;
4473 int call_seen;
4474 rtx loop_start;
4475 {
4476 if (CONSTANT_P (x))
4477 return 1;
4478
4479 /* Only consider pseudos we know about initialized in insns whose luids
4480 we know. */
4481 if (GET_CODE (x) != REG
4482 || REGNO (x) >= max_reg_before_loop)
4483 return 0;
4484
4485 /* Don't use call-clobbered registers across a call which clobbers it. On
4486 some machines, don't use any hard registers at all. */
4487 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4488 && (SMALL_REGISTER_CLASSES
4489 || (call_used_regs[REGNO (x)] && call_seen)))
4490 return 0;
4491
4492 /* Don't use registers that have been clobbered before the start of the
4493 loop. */
4494 if (reg_set_between_p (x, insn, loop_start))
4495 return 0;
4496
4497 return 1;
4498 }
4499 \f
4500 /* Scan X for memory refs and check each memory address
4501 as a possible giv. INSN is the insn whose pattern X comes from.
4502 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4503 every loop iteration. */
4504
4505 static void
4506 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4507 rtx x;
4508 rtx insn;
4509 int not_every_iteration;
4510 rtx loop_start, loop_end;
4511 {
4512 register int i, j;
4513 register enum rtx_code code;
4514 register char *fmt;
4515
4516 if (x == 0)
4517 return;
4518
4519 code = GET_CODE (x);
4520 switch (code)
4521 {
4522 case REG:
4523 case CONST_INT:
4524 case CONST:
4525 case CONST_DOUBLE:
4526 case SYMBOL_REF:
4527 case LABEL_REF:
4528 case PC:
4529 case CC0:
4530 case ADDR_VEC:
4531 case ADDR_DIFF_VEC:
4532 case USE:
4533 case CLOBBER:
4534 return;
4535
4536 case MEM:
4537 {
4538 rtx src_reg;
4539 rtx add_val;
4540 rtx mult_val;
4541 int benefit;
4542
4543 benefit = general_induction_var (XEXP (x, 0),
4544 &src_reg, &add_val, &mult_val);
4545
4546 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4547 Such a giv isn't useful. */
4548 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4549 {
4550 /* Found one; record it. */
4551 struct induction *v
4552 = (struct induction *) oballoc (sizeof (struct induction));
4553
4554 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4555 add_val, benefit, DEST_ADDR, not_every_iteration,
4556 &XEXP (x, 0), loop_start, loop_end);
4557
4558 v->mem_mode = GET_MODE (x);
4559 }
4560 }
4561 return;
4562
4563 default:
4564 break;
4565 }
4566
4567 /* Recursively scan the subexpressions for other mem refs. */
4568
4569 fmt = GET_RTX_FORMAT (code);
4570 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4571 if (fmt[i] == 'e')
4572 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4573 loop_end);
4574 else if (fmt[i] == 'E')
4575 for (j = 0; j < XVECLEN (x, i); j++)
4576 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4577 loop_start, loop_end);
4578 }
4579 \f
4580 /* Fill in the data about one biv update.
4581 V is the `struct induction' in which we record the biv. (It is
4582 allocated by the caller, with alloca.)
4583 INSN is the insn that sets it.
4584 DEST_REG is the biv's reg.
4585
4586 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4587 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4588 being set to INC_VAL.
4589
4590 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4591 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4592 can be executed more than once per iteration. If MAYBE_MULTIPLE
4593 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4594 executed exactly once per iteration. */
4595
4596 static void
4597 record_biv (v, insn, dest_reg, inc_val, mult_val,
4598 not_every_iteration, maybe_multiple)
4599 struct induction *v;
4600 rtx insn;
4601 rtx dest_reg;
4602 rtx inc_val;
4603 rtx mult_val;
4604 int not_every_iteration;
4605 int maybe_multiple;
4606 {
4607 struct iv_class *bl;
4608
4609 v->insn = insn;
4610 v->src_reg = dest_reg;
4611 v->dest_reg = dest_reg;
4612 v->mult_val = mult_val;
4613 v->add_val = inc_val;
4614 v->mode = GET_MODE (dest_reg);
4615 v->always_computable = ! not_every_iteration;
4616 v->always_executed = ! not_every_iteration;
4617 v->maybe_multiple = maybe_multiple;
4618
4619 /* Add this to the reg's iv_class, creating a class
4620 if this is the first incrementation of the reg. */
4621
4622 bl = reg_biv_class[REGNO (dest_reg)];
4623 if (bl == 0)
4624 {
4625 /* Create and initialize new iv_class. */
4626
4627 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4628
4629 bl->regno = REGNO (dest_reg);
4630 bl->biv = 0;
4631 bl->giv = 0;
4632 bl->biv_count = 0;
4633 bl->giv_count = 0;
4634
4635 /* Set initial value to the reg itself. */
4636 bl->initial_value = dest_reg;
4637 /* We haven't seen the initializing insn yet */
4638 bl->init_insn = 0;
4639 bl->init_set = 0;
4640 bl->initial_test = 0;
4641 bl->incremented = 0;
4642 bl->eliminable = 0;
4643 bl->nonneg = 0;
4644 bl->reversed = 0;
4645 bl->total_benefit = 0;
4646
4647 /* Add this class to loop_iv_list. */
4648 bl->next = loop_iv_list;
4649 loop_iv_list = bl;
4650
4651 /* Put it in the array of biv register classes. */
4652 reg_biv_class[REGNO (dest_reg)] = bl;
4653 }
4654
4655 /* Update IV_CLASS entry for this biv. */
4656 v->next_iv = bl->biv;
4657 bl->biv = v;
4658 bl->biv_count++;
4659 if (mult_val == const1_rtx)
4660 bl->incremented = 1;
4661
4662 if (loop_dump_stream)
4663 {
4664 fprintf (loop_dump_stream,
4665 "Insn %d: possible biv, reg %d,",
4666 INSN_UID (insn), REGNO (dest_reg));
4667 if (GET_CODE (inc_val) == CONST_INT)
4668 {
4669 fprintf (loop_dump_stream, " const =");
4670 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4671 fputc ('\n', loop_dump_stream);
4672 }
4673 else
4674 {
4675 fprintf (loop_dump_stream, " const = ");
4676 print_rtl (loop_dump_stream, inc_val);
4677 fprintf (loop_dump_stream, "\n");
4678 }
4679 }
4680 }
4681 \f
4682 /* Fill in the data about one giv.
4683 V is the `struct induction' in which we record the giv. (It is
4684 allocated by the caller, with alloca.)
4685 INSN is the insn that sets it.
4686 BENEFIT estimates the savings from deleting this insn.
4687 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4688 into a register or is used as a memory address.
4689
4690 SRC_REG is the biv reg which the giv is computed from.
4691 DEST_REG is the giv's reg (if the giv is stored in a reg).
4692 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4693 LOCATION points to the place where this giv's value appears in INSN. */
4694
4695 static void
4696 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4697 type, not_every_iteration, location, loop_start, loop_end)
4698 struct induction *v;
4699 rtx insn;
4700 rtx src_reg;
4701 rtx dest_reg;
4702 rtx mult_val, add_val;
4703 int benefit;
4704 enum g_types type;
4705 int not_every_iteration;
4706 rtx *location;
4707 rtx loop_start, loop_end;
4708 {
4709 struct induction *b;
4710 struct iv_class *bl;
4711 rtx set = single_set (insn);
4712
4713 v->insn = insn;
4714 v->src_reg = src_reg;
4715 v->giv_type = type;
4716 v->dest_reg = dest_reg;
4717 v->mult_val = mult_val;
4718 v->add_val = add_val;
4719 v->benefit = benefit;
4720 v->location = location;
4721 v->cant_derive = 0;
4722 v->combined_with = 0;
4723 v->maybe_multiple = 0;
4724 v->maybe_dead = 0;
4725 v->derive_adjustment = 0;
4726 v->same = 0;
4727 v->ignore = 0;
4728 v->new_reg = 0;
4729 v->final_value = 0;
4730 v->same_insn = 0;
4731 v->auto_inc_opt = 0;
4732 v->unrolled = 0;
4733 v->shared = 0;
4734
4735 /* The v->always_computable field is used in update_giv_derive, to
4736 determine whether a giv can be used to derive another giv. For a
4737 DEST_REG giv, INSN computes a new value for the giv, so its value
4738 isn't computable if INSN insn't executed every iteration.
4739 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4740 it does not compute a new value. Hence the value is always computable
4741 regardless of whether INSN is executed each iteration. */
4742
4743 if (type == DEST_ADDR)
4744 v->always_computable = 1;
4745 else
4746 v->always_computable = ! not_every_iteration;
4747
4748 v->always_executed = ! not_every_iteration;
4749
4750 if (type == DEST_ADDR)
4751 {
4752 v->mode = GET_MODE (*location);
4753 v->lifetime = 1;
4754 v->times_used = 1;
4755 }
4756 else /* type == DEST_REG */
4757 {
4758 v->mode = GET_MODE (SET_DEST (set));
4759
4760 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4761 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4762
4763 v->times_used = n_times_used[REGNO (dest_reg)];
4764
4765 /* If the lifetime is zero, it means that this register is
4766 really a dead store. So mark this as a giv that can be
4767 ignored. This will not prevent the biv from being eliminated. */
4768 if (v->lifetime == 0)
4769 v->ignore = 1;
4770
4771 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4772 reg_iv_info[REGNO (dest_reg)] = v;
4773 }
4774
4775 /* Add the giv to the class of givs computed from one biv. */
4776
4777 bl = reg_biv_class[REGNO (src_reg)];
4778 if (bl)
4779 {
4780 v->next_iv = bl->giv;
4781 bl->giv = v;
4782 /* Don't count DEST_ADDR. This is supposed to count the number of
4783 insns that calculate givs. */
4784 if (type == DEST_REG)
4785 bl->giv_count++;
4786 bl->total_benefit += benefit;
4787 }
4788 else
4789 /* Fatal error, biv missing for this giv? */
4790 abort ();
4791
4792 if (type == DEST_ADDR)
4793 v->replaceable = 1;
4794 else
4795 {
4796 /* The giv can be replaced outright by the reduced register only if all
4797 of the following conditions are true:
4798 - the insn that sets the giv is always executed on any iteration
4799 on which the giv is used at all
4800 (there are two ways to deduce this:
4801 either the insn is executed on every iteration,
4802 or all uses follow that insn in the same basic block),
4803 - the giv is not used outside the loop
4804 - no assignments to the biv occur during the giv's lifetime. */
4805
4806 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4807 /* Previous line always fails if INSN was moved by loop opt. */
4808 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4809 && (! not_every_iteration
4810 || last_use_this_basic_block (dest_reg, insn)))
4811 {
4812 /* Now check that there are no assignments to the biv within the
4813 giv's lifetime. This requires two separate checks. */
4814
4815 /* Check each biv update, and fail if any are between the first
4816 and last use of the giv.
4817
4818 If this loop contains an inner loop that was unrolled, then
4819 the insn modifying the biv may have been emitted by the loop
4820 unrolling code, and hence does not have a valid luid. Just
4821 mark the biv as not replaceable in this case. It is not very
4822 useful as a biv, because it is used in two different loops.
4823 It is very unlikely that we would be able to optimize the giv
4824 using this biv anyways. */
4825
4826 v->replaceable = 1;
4827 for (b = bl->biv; b; b = b->next_iv)
4828 {
4829 if (INSN_UID (b->insn) >= max_uid_for_loop
4830 || ((uid_luid[INSN_UID (b->insn)]
4831 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4832 && (uid_luid[INSN_UID (b->insn)]
4833 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4834 {
4835 v->replaceable = 0;
4836 v->not_replaceable = 1;
4837 break;
4838 }
4839 }
4840
4841 /* If there are any backwards branches that go from after the
4842 biv update to before it, then this giv is not replaceable. */
4843 if (v->replaceable)
4844 for (b = bl->biv; b; b = b->next_iv)
4845 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4846 {
4847 v->replaceable = 0;
4848 v->not_replaceable = 1;
4849 break;
4850 }
4851 }
4852 else
4853 {
4854 /* May still be replaceable, we don't have enough info here to
4855 decide. */
4856 v->replaceable = 0;
4857 v->not_replaceable = 0;
4858 }
4859 }
4860
4861 if (loop_dump_stream)
4862 {
4863 if (type == DEST_REG)
4864 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4865 INSN_UID (insn), REGNO (dest_reg));
4866 else
4867 fprintf (loop_dump_stream, "Insn %d: dest address",
4868 INSN_UID (insn));
4869
4870 fprintf (loop_dump_stream, " src reg %d benefit %d",
4871 REGNO (src_reg), v->benefit);
4872 fprintf (loop_dump_stream, " used %d lifetime %d",
4873 v->times_used, v->lifetime);
4874
4875 if (v->replaceable)
4876 fprintf (loop_dump_stream, " replaceable");
4877
4878 if (GET_CODE (mult_val) == CONST_INT)
4879 {
4880 fprintf (loop_dump_stream, " mult ");
4881 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
4882 }
4883 else
4884 {
4885 fprintf (loop_dump_stream, " mult ");
4886 print_rtl (loop_dump_stream, mult_val);
4887 }
4888
4889 if (GET_CODE (add_val) == CONST_INT)
4890 {
4891 fprintf (loop_dump_stream, " add ");
4892 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
4893 }
4894 else
4895 {
4896 fprintf (loop_dump_stream, " add ");
4897 print_rtl (loop_dump_stream, add_val);
4898 }
4899 }
4900
4901 if (loop_dump_stream)
4902 fprintf (loop_dump_stream, "\n");
4903
4904 }
4905
4906
4907 /* All this does is determine whether a giv can be made replaceable because
4908 its final value can be calculated. This code can not be part of record_giv
4909 above, because final_giv_value requires that the number of loop iterations
4910 be known, and that can not be accurately calculated until after all givs
4911 have been identified. */
4912
4913 static void
4914 check_final_value (v, loop_start, loop_end)
4915 struct induction *v;
4916 rtx loop_start, loop_end;
4917 {
4918 struct iv_class *bl;
4919 rtx final_value = 0;
4920
4921 bl = reg_biv_class[REGNO (v->src_reg)];
4922
4923 /* DEST_ADDR givs will never reach here, because they are always marked
4924 replaceable above in record_giv. */
4925
4926 /* The giv can be replaced outright by the reduced register only if all
4927 of the following conditions are true:
4928 - the insn that sets the giv is always executed on any iteration
4929 on which the giv is used at all
4930 (there are two ways to deduce this:
4931 either the insn is executed on every iteration,
4932 or all uses follow that insn in the same basic block),
4933 - its final value can be calculated (this condition is different
4934 than the one above in record_giv)
4935 - no assignments to the biv occur during the giv's lifetime. */
4936
4937 #if 0
4938 /* This is only called now when replaceable is known to be false. */
4939 /* Clear replaceable, so that it won't confuse final_giv_value. */
4940 v->replaceable = 0;
4941 #endif
4942
4943 if ((final_value = final_giv_value (v, loop_start, loop_end))
4944 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4945 {
4946 int biv_increment_seen = 0;
4947 rtx p = v->insn;
4948 rtx last_giv_use;
4949
4950 v->replaceable = 1;
4951
4952 /* When trying to determine whether or not a biv increment occurs
4953 during the lifetime of the giv, we can ignore uses of the variable
4954 outside the loop because final_value is true. Hence we can not
4955 use regno_last_uid and regno_first_uid as above in record_giv. */
4956
4957 /* Search the loop to determine whether any assignments to the
4958 biv occur during the giv's lifetime. Start with the insn
4959 that sets the giv, and search around the loop until we come
4960 back to that insn again.
4961
4962 Also fail if there is a jump within the giv's lifetime that jumps
4963 to somewhere outside the lifetime but still within the loop. This
4964 catches spaghetti code where the execution order is not linear, and
4965 hence the above test fails. Here we assume that the giv lifetime
4966 does not extend from one iteration of the loop to the next, so as
4967 to make the test easier. Since the lifetime isn't known yet,
4968 this requires two loops. See also record_giv above. */
4969
4970 last_giv_use = v->insn;
4971
4972 while (1)
4973 {
4974 p = NEXT_INSN (p);
4975 if (p == loop_end)
4976 p = NEXT_INSN (loop_start);
4977 if (p == v->insn)
4978 break;
4979
4980 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4981 || GET_CODE (p) == CALL_INSN)
4982 {
4983 if (biv_increment_seen)
4984 {
4985 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4986 {
4987 v->replaceable = 0;
4988 v->not_replaceable = 1;
4989 break;
4990 }
4991 }
4992 else if (reg_set_p (v->src_reg, PATTERN (p)))
4993 biv_increment_seen = 1;
4994 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4995 last_giv_use = p;
4996 }
4997 }
4998
4999 /* Now that the lifetime of the giv is known, check for branches
5000 from within the lifetime to outside the lifetime if it is still
5001 replaceable. */
5002
5003 if (v->replaceable)
5004 {
5005 p = v->insn;
5006 while (1)
5007 {
5008 p = NEXT_INSN (p);
5009 if (p == loop_end)
5010 p = NEXT_INSN (loop_start);
5011 if (p == last_giv_use)
5012 break;
5013
5014 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5015 && LABEL_NAME (JUMP_LABEL (p))
5016 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
5017 || (INSN_UID (v->insn) >= max_uid_for_loop)
5018 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
5019 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
5020 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
5021 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
5022 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
5023 {
5024 v->replaceable = 0;
5025 v->not_replaceable = 1;
5026
5027 if (loop_dump_stream)
5028 fprintf (loop_dump_stream,
5029 "Found branch outside giv lifetime.\n");
5030
5031 break;
5032 }
5033 }
5034 }
5035
5036 /* If it is replaceable, then save the final value. */
5037 if (v->replaceable)
5038 v->final_value = final_value;
5039 }
5040
5041 if (loop_dump_stream && v->replaceable)
5042 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5043 INSN_UID (v->insn), REGNO (v->dest_reg));
5044 }
5045 \f
5046 /* Update the status of whether a giv can derive other givs.
5047
5048 We need to do something special if there is or may be an update to the biv
5049 between the time the giv is defined and the time it is used to derive
5050 another giv.
5051
5052 In addition, a giv that is only conditionally set is not allowed to
5053 derive another giv once a label has been passed.
5054
5055 The cases we look at are when a label or an update to a biv is passed. */
5056
5057 static void
5058 update_giv_derive (p)
5059 rtx p;
5060 {
5061 struct iv_class *bl;
5062 struct induction *biv, *giv;
5063 rtx tem;
5064 int dummy;
5065
5066 /* Search all IV classes, then all bivs, and finally all givs.
5067
5068 There are three cases we are concerned with. First we have the situation
5069 of a giv that is only updated conditionally. In that case, it may not
5070 derive any givs after a label is passed.
5071
5072 The second case is when a biv update occurs, or may occur, after the
5073 definition of a giv. For certain biv updates (see below) that are
5074 known to occur between the giv definition and use, we can adjust the
5075 giv definition. For others, or when the biv update is conditional,
5076 we must prevent the giv from deriving any other givs. There are two
5077 sub-cases within this case.
5078
5079 If this is a label, we are concerned with any biv update that is done
5080 conditionally, since it may be done after the giv is defined followed by
5081 a branch here (actually, we need to pass both a jump and a label, but
5082 this extra tracking doesn't seem worth it).
5083
5084 If this is a jump, we are concerned about any biv update that may be
5085 executed multiple times. We are actually only concerned about
5086 backward jumps, but it is probably not worth performing the test
5087 on the jump again here.
5088
5089 If this is a biv update, we must adjust the giv status to show that a
5090 subsequent biv update was performed. If this adjustment cannot be done,
5091 the giv cannot derive further givs. */
5092
5093 for (bl = loop_iv_list; bl; bl = bl->next)
5094 for (biv = bl->biv; biv; biv = biv->next_iv)
5095 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5096 || biv->insn == p)
5097 {
5098 for (giv = bl->giv; giv; giv = giv->next_iv)
5099 {
5100 /* If cant_derive is already true, there is no point in
5101 checking all of these conditions again. */
5102 if (giv->cant_derive)
5103 continue;
5104
5105 /* If this giv is conditionally set and we have passed a label,
5106 it cannot derive anything. */
5107 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5108 giv->cant_derive = 1;
5109
5110 /* Skip givs that have mult_val == 0, since
5111 they are really invariants. Also skip those that are
5112 replaceable, since we know their lifetime doesn't contain
5113 any biv update. */
5114 else if (giv->mult_val == const0_rtx || giv->replaceable)
5115 continue;
5116
5117 /* The only way we can allow this giv to derive another
5118 is if this is a biv increment and we can form the product
5119 of biv->add_val and giv->mult_val. In this case, we will
5120 be able to compute a compensation. */
5121 else if (biv->insn == p)
5122 {
5123 tem = 0;
5124
5125 if (biv->mult_val == const1_rtx)
5126 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5127 biv->add_val,
5128 giv->mult_val),
5129 &dummy);
5130
5131 if (tem && giv->derive_adjustment)
5132 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5133 giv->derive_adjustment),
5134 &dummy);
5135 if (tem)
5136 giv->derive_adjustment = tem;
5137 else
5138 giv->cant_derive = 1;
5139 }
5140 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5141 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5142 giv->cant_derive = 1;
5143 }
5144 }
5145 }
5146 \f
5147 /* Check whether an insn is an increment legitimate for a basic induction var.
5148 X is the source of insn P, or a part of it.
5149 MODE is the mode in which X should be interpreted.
5150
5151 DEST_REG is the putative biv, also the destination of the insn.
5152 We accept patterns of these forms:
5153 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5154 REG = INVARIANT + REG
5155
5156 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5157 and store the additive term into *INC_VAL.
5158
5159 If X is an assignment of an invariant into DEST_REG, we set
5160 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5161
5162 We also want to detect a BIV when it corresponds to a variable
5163 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5164 of the variable may be a PLUS that adds a SUBREG of that variable to
5165 an invariant and then sign- or zero-extends the result of the PLUS
5166 into the variable.
5167
5168 Most GIVs in such cases will be in the promoted mode, since that is the
5169 probably the natural computation mode (and almost certainly the mode
5170 used for addresses) on the machine. So we view the pseudo-reg containing
5171 the variable as the BIV, as if it were simply incremented.
5172
5173 Note that treating the entire pseudo as a BIV will result in making
5174 simple increments to any GIVs based on it. However, if the variable
5175 overflows in its declared mode but not its promoted mode, the result will
5176 be incorrect. This is acceptable if the variable is signed, since
5177 overflows in such cases are undefined, but not if it is unsigned, since
5178 those overflows are defined. So we only check for SIGN_EXTEND and
5179 not ZERO_EXTEND.
5180
5181 If we cannot find a biv, we return 0. */
5182
5183 static int
5184 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5185 register rtx x;
5186 enum machine_mode mode;
5187 rtx p;
5188 rtx dest_reg;
5189 rtx *inc_val;
5190 rtx *mult_val;
5191 {
5192 register enum rtx_code code;
5193 rtx arg;
5194 rtx insn, set = 0;
5195
5196 code = GET_CODE (x);
5197 switch (code)
5198 {
5199 case PLUS:
5200 if (XEXP (x, 0) == dest_reg
5201 || (GET_CODE (XEXP (x, 0)) == SUBREG
5202 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5203 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5204 arg = XEXP (x, 1);
5205 else if (XEXP (x, 1) == dest_reg
5206 || (GET_CODE (XEXP (x, 1)) == SUBREG
5207 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5208 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5209 arg = XEXP (x, 0);
5210 else
5211 return 0;
5212
5213 if (invariant_p (arg) != 1)
5214 return 0;
5215
5216 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5217 *mult_val = const1_rtx;
5218 return 1;
5219
5220 case SUBREG:
5221 /* If this is a SUBREG for a promoted variable, check the inner
5222 value. */
5223 if (SUBREG_PROMOTED_VAR_P (x))
5224 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5225 dest_reg, p, inc_val, mult_val);
5226 return 0;
5227
5228 case REG:
5229 /* If this register is assigned in the previous insn, look at its
5230 source, but don't go outside the loop or past a label. */
5231
5232 for (insn = PREV_INSN (p);
5233 (insn && GET_CODE (insn) == NOTE
5234 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5235 insn = PREV_INSN (insn))
5236 ;
5237
5238 if (insn)
5239 set = single_set (insn);
5240
5241 if (set != 0
5242 && (SET_DEST (set) == x
5243 || (GET_CODE (SET_DEST (set)) == SUBREG
5244 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5245 <= UNITS_PER_WORD)
5246 && SUBREG_REG (SET_DEST (set)) == x)))
5247 return basic_induction_var (SET_SRC (set),
5248 (GET_MODE (SET_SRC (set)) == VOIDmode
5249 ? GET_MODE (x)
5250 : GET_MODE (SET_SRC (set))),
5251 dest_reg, insn,
5252 inc_val, mult_val);
5253 /* ... fall through ... */
5254
5255 /* Can accept constant setting of biv only when inside inner most loop.
5256 Otherwise, a biv of an inner loop may be incorrectly recognized
5257 as a biv of the outer loop,
5258 causing code to be moved INTO the inner loop. */
5259 case MEM:
5260 if (invariant_p (x) != 1)
5261 return 0;
5262 case CONST_INT:
5263 case SYMBOL_REF:
5264 case CONST:
5265 /* convert_modes aborts if we try to convert to or from CCmode, so just
5266 exclude that case. It is very unlikely that a condition code value
5267 would be a useful iterator anyways. */
5268 if (loops_enclosed == 1
5269 && GET_MODE_CLASS (mode) != MODE_CC
5270 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5271 {
5272 /* Possible bug here? Perhaps we don't know the mode of X. */
5273 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5274 *mult_val = const0_rtx;
5275 return 1;
5276 }
5277 else
5278 return 0;
5279
5280 case SIGN_EXTEND:
5281 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5282 dest_reg, p, inc_val, mult_val);
5283 case ASHIFTRT:
5284 /* Similar, since this can be a sign extension. */
5285 for (insn = PREV_INSN (p);
5286 (insn && GET_CODE (insn) == NOTE
5287 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5288 insn = PREV_INSN (insn))
5289 ;
5290
5291 if (insn)
5292 set = single_set (insn);
5293
5294 if (set && SET_DEST (set) == XEXP (x, 0)
5295 && GET_CODE (XEXP (x, 1)) == CONST_INT
5296 && INTVAL (XEXP (x, 1)) >= 0
5297 && GET_CODE (SET_SRC (set)) == ASHIFT
5298 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5299 return basic_induction_var (XEXP (SET_SRC (set), 0),
5300 GET_MODE (XEXP (x, 0)),
5301 dest_reg, insn, inc_val, mult_val);
5302 return 0;
5303
5304 default:
5305 return 0;
5306 }
5307 }
5308 \f
5309 /* A general induction variable (giv) is any quantity that is a linear
5310 function of a basic induction variable,
5311 i.e. giv = biv * mult_val + add_val.
5312 The coefficients can be any loop invariant quantity.
5313 A giv need not be computed directly from the biv;
5314 it can be computed by way of other givs. */
5315
5316 /* Determine whether X computes a giv.
5317 If it does, return a nonzero value
5318 which is the benefit from eliminating the computation of X;
5319 set *SRC_REG to the register of the biv that it is computed from;
5320 set *ADD_VAL and *MULT_VAL to the coefficients,
5321 such that the value of X is biv * mult + add; */
5322
5323 static int
5324 general_induction_var (x, src_reg, add_val, mult_val)
5325 rtx x;
5326 rtx *src_reg;
5327 rtx *add_val;
5328 rtx *mult_val;
5329 {
5330 rtx orig_x = x;
5331 int benefit = 0;
5332 char *storage;
5333
5334 /* If this is an invariant, forget it, it isn't a giv. */
5335 if (invariant_p (x) == 1)
5336 return 0;
5337
5338 /* See if the expression could be a giv and get its form.
5339 Mark our place on the obstack in case we don't find a giv. */
5340 storage = (char *) oballoc (0);
5341 x = simplify_giv_expr (x, &benefit);
5342 if (x == 0)
5343 {
5344 obfree (storage);
5345 return 0;
5346 }
5347
5348 switch (GET_CODE (x))
5349 {
5350 case USE:
5351 case CONST_INT:
5352 /* Since this is now an invariant and wasn't before, it must be a giv
5353 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5354 with. */
5355 *src_reg = loop_iv_list->biv->dest_reg;
5356 *mult_val = const0_rtx;
5357 *add_val = x;
5358 break;
5359
5360 case REG:
5361 /* This is equivalent to a BIV. */
5362 *src_reg = x;
5363 *mult_val = const1_rtx;
5364 *add_val = const0_rtx;
5365 break;
5366
5367 case PLUS:
5368 /* Either (plus (biv) (invar)) or
5369 (plus (mult (biv) (invar_1)) (invar_2)). */
5370 if (GET_CODE (XEXP (x, 0)) == MULT)
5371 {
5372 *src_reg = XEXP (XEXP (x, 0), 0);
5373 *mult_val = XEXP (XEXP (x, 0), 1);
5374 }
5375 else
5376 {
5377 *src_reg = XEXP (x, 0);
5378 *mult_val = const1_rtx;
5379 }
5380 *add_val = XEXP (x, 1);
5381 break;
5382
5383 case MULT:
5384 /* ADD_VAL is zero. */
5385 *src_reg = XEXP (x, 0);
5386 *mult_val = XEXP (x, 1);
5387 *add_val = const0_rtx;
5388 break;
5389
5390 default:
5391 abort ();
5392 }
5393
5394 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5395 unless they are CONST_INT). */
5396 if (GET_CODE (*add_val) == USE)
5397 *add_val = XEXP (*add_val, 0);
5398 if (GET_CODE (*mult_val) == USE)
5399 *mult_val = XEXP (*mult_val, 0);
5400
5401 benefit += rtx_cost (orig_x, SET);
5402
5403 /* Always return some benefit if this is a giv so it will be detected
5404 as such. This allows elimination of bivs that might otherwise
5405 not be eliminated. */
5406 return benefit == 0 ? 1 : benefit;
5407 }
5408 \f
5409 /* Given an expression, X, try to form it as a linear function of a biv.
5410 We will canonicalize it to be of the form
5411 (plus (mult (BIV) (invar_1))
5412 (invar_2))
5413 with possible degeneracies.
5414
5415 The invariant expressions must each be of a form that can be used as a
5416 machine operand. We surround then with a USE rtx (a hack, but localized
5417 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5418 routine; it is the caller's responsibility to strip them.
5419
5420 If no such canonicalization is possible (i.e., two biv's are used or an
5421 expression that is neither invariant nor a biv or giv), this routine
5422 returns 0.
5423
5424 For a non-zero return, the result will have a code of CONST_INT, USE,
5425 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5426
5427 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5428
5429 static rtx
5430 simplify_giv_expr (x, benefit)
5431 rtx x;
5432 int *benefit;
5433 {
5434 enum machine_mode mode = GET_MODE (x);
5435 rtx arg0, arg1;
5436 rtx tem;
5437
5438 /* If this is not an integer mode, or if we cannot do arithmetic in this
5439 mode, this can't be a giv. */
5440 if (mode != VOIDmode
5441 && (GET_MODE_CLASS (mode) != MODE_INT
5442 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5443 return 0;
5444
5445 switch (GET_CODE (x))
5446 {
5447 case PLUS:
5448 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5449 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5450 if (arg0 == 0 || arg1 == 0)
5451 return 0;
5452
5453 /* Put constant last, CONST_INT last if both constant. */
5454 if ((GET_CODE (arg0) == USE
5455 || GET_CODE (arg0) == CONST_INT)
5456 && GET_CODE (arg1) != CONST_INT)
5457 tem = arg0, arg0 = arg1, arg1 = tem;
5458
5459 /* Handle addition of zero, then addition of an invariant. */
5460 if (arg1 == const0_rtx)
5461 return arg0;
5462 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5463 switch (GET_CODE (arg0))
5464 {
5465 case CONST_INT:
5466 case USE:
5467 /* Both invariant. Only valid if sum is machine operand.
5468 First strip off possible USE on the operands. */
5469 if (GET_CODE (arg0) == USE)
5470 arg0 = XEXP (arg0, 0);
5471
5472 if (GET_CODE (arg1) == USE)
5473 arg1 = XEXP (arg1, 0);
5474
5475 tem = 0;
5476 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5477 {
5478 tem = plus_constant (arg0, INTVAL (arg1));
5479 if (GET_CODE (tem) != CONST_INT)
5480 tem = gen_rtx_USE (mode, tem);
5481 }
5482 else
5483 {
5484 /* Adding two invariants must result in an invariant,
5485 so enclose addition operation inside a USE and
5486 return it. */
5487 tem = gen_rtx_USE (mode, gen_rtx_PLUS (mode, arg0, arg1));
5488 }
5489
5490 return tem;
5491
5492 case REG:
5493 case MULT:
5494 /* biv + invar or mult + invar. Return sum. */
5495 return gen_rtx_PLUS (mode, arg0, arg1);
5496
5497 case PLUS:
5498 /* (a + invar_1) + invar_2. Associate. */
5499 return simplify_giv_expr (gen_rtx_PLUS (mode,
5500 XEXP (arg0, 0),
5501 gen_rtx_PLUS (mode,
5502 XEXP (arg0, 1), arg1)),
5503 benefit);
5504
5505 default:
5506 abort ();
5507 }
5508
5509 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5510 MULT to reduce cases. */
5511 if (GET_CODE (arg0) == REG)
5512 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5513 if (GET_CODE (arg1) == REG)
5514 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5515
5516 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5517 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5518 Recurse to associate the second PLUS. */
5519 if (GET_CODE (arg1) == MULT)
5520 tem = arg0, arg0 = arg1, arg1 = tem;
5521
5522 if (GET_CODE (arg1) == PLUS)
5523 return simplify_giv_expr (gen_rtx_PLUS (mode,
5524 gen_rtx_PLUS (mode, arg0,
5525 XEXP (arg1, 0)),
5526 XEXP (arg1, 1)),
5527 benefit);
5528
5529 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5530 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5531 abort ();
5532
5533 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5534 return 0;
5535
5536 return simplify_giv_expr (gen_rtx_MULT (mode,
5537 XEXP (arg0, 0),
5538 gen_rtx_PLUS (mode,
5539 XEXP (arg0, 1),
5540 XEXP (arg1, 1))),
5541 benefit);
5542
5543 case MINUS:
5544 /* Handle "a - b" as "a + b * (-1)". */
5545 return simplify_giv_expr (gen_rtx_PLUS (mode,
5546 XEXP (x, 0),
5547 gen_rtx_MULT (mode, XEXP (x, 1),
5548 constm1_rtx)),
5549 benefit);
5550
5551 case MULT:
5552 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5553 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5554 if (arg0 == 0 || arg1 == 0)
5555 return 0;
5556
5557 /* Put constant last, CONST_INT last if both constant. */
5558 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5559 && GET_CODE (arg1) != CONST_INT)
5560 tem = arg0, arg0 = arg1, arg1 = tem;
5561
5562 /* If second argument is not now constant, not giv. */
5563 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5564 return 0;
5565
5566 /* Handle multiply by 0 or 1. */
5567 if (arg1 == const0_rtx)
5568 return const0_rtx;
5569
5570 else if (arg1 == const1_rtx)
5571 return arg0;
5572
5573 switch (GET_CODE (arg0))
5574 {
5575 case REG:
5576 /* biv * invar. Done. */
5577 return gen_rtx_MULT (mode, arg0, arg1);
5578
5579 case CONST_INT:
5580 /* Product of two constants. */
5581 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5582
5583 case USE:
5584 /* invar * invar. Not giv. */
5585 return 0;
5586
5587 case MULT:
5588 /* (a * invar_1) * invar_2. Associate. */
5589 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5590 gen_rtx_MULT (mode,
5591 XEXP (arg0, 1),
5592 arg1)),
5593 benefit);
5594
5595 case PLUS:
5596 /* (a + invar_1) * invar_2. Distribute. */
5597 return simplify_giv_expr (gen_rtx_PLUS (mode,
5598 gen_rtx_MULT (mode,
5599 XEXP (arg0, 0),
5600 arg1),
5601 gen_rtx_MULT (mode,
5602 XEXP (arg0, 1),
5603 arg1)),
5604 benefit);
5605
5606 default:
5607 abort ();
5608 }
5609
5610 case ASHIFT:
5611 /* Shift by constant is multiply by power of two. */
5612 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5613 return 0;
5614
5615 return simplify_giv_expr (gen_rtx_MULT (mode,
5616 XEXP (x, 0),
5617 GEN_INT ((HOST_WIDE_INT) 1
5618 << INTVAL (XEXP (x, 1)))),
5619 benefit);
5620
5621 case NEG:
5622 /* "-a" is "a * (-1)" */
5623 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5624 benefit);
5625
5626 case NOT:
5627 /* "~a" is "-a - 1". Silly, but easy. */
5628 return simplify_giv_expr (gen_rtx_MINUS (mode,
5629 gen_rtx_NEG (mode, XEXP (x, 0)),
5630 const1_rtx),
5631 benefit);
5632
5633 case USE:
5634 /* Already in proper form for invariant. */
5635 return x;
5636
5637 case REG:
5638 /* If this is a new register, we can't deal with it. */
5639 if (REGNO (x) >= max_reg_before_loop)
5640 return 0;
5641
5642 /* Check for biv or giv. */
5643 switch (reg_iv_type[REGNO (x)])
5644 {
5645 case BASIC_INDUCT:
5646 return x;
5647 case GENERAL_INDUCT:
5648 {
5649 struct induction *v = reg_iv_info[REGNO (x)];
5650
5651 /* Form expression from giv and add benefit. Ensure this giv
5652 can derive another and subtract any needed adjustment if so. */
5653 *benefit += v->benefit;
5654 if (v->cant_derive)
5655 return 0;
5656
5657 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5658 v->mult_val),
5659 v->add_val);
5660 if (v->derive_adjustment)
5661 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5662 return simplify_giv_expr (tem, benefit);
5663 }
5664
5665 default:
5666 break;
5667 }
5668
5669 /* Fall through to general case. */
5670 default:
5671 /* If invariant, return as USE (unless CONST_INT).
5672 Otherwise, not giv. */
5673 if (GET_CODE (x) == USE)
5674 x = XEXP (x, 0);
5675
5676 if (invariant_p (x) == 1)
5677 {
5678 if (GET_CODE (x) == CONST_INT)
5679 return x;
5680 else
5681 return gen_rtx_USE (mode, x);
5682 }
5683 else
5684 return 0;
5685 }
5686 }
5687 \f
5688 /* Help detect a giv that is calculated by several consecutive insns;
5689 for example,
5690 giv = biv * M
5691 giv = giv + A
5692 The caller has already identified the first insn P as having a giv as dest;
5693 we check that all other insns that set the same register follow
5694 immediately after P, that they alter nothing else,
5695 and that the result of the last is still a giv.
5696
5697 The value is 0 if the reg set in P is not really a giv.
5698 Otherwise, the value is the amount gained by eliminating
5699 all the consecutive insns that compute the value.
5700
5701 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5702 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5703
5704 The coefficients of the ultimate giv value are stored in
5705 *MULT_VAL and *ADD_VAL. */
5706
5707 static int
5708 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5709 add_val, mult_val)
5710 int first_benefit;
5711 rtx p;
5712 rtx src_reg;
5713 rtx dest_reg;
5714 rtx *add_val;
5715 rtx *mult_val;
5716 {
5717 int count;
5718 enum rtx_code code;
5719 int benefit;
5720 rtx temp;
5721 rtx set;
5722
5723 /* Indicate that this is a giv so that we can update the value produced in
5724 each insn of the multi-insn sequence.
5725
5726 This induction structure will be used only by the call to
5727 general_induction_var below, so we can allocate it on our stack.
5728 If this is a giv, our caller will replace the induct var entry with
5729 a new induction structure. */
5730 struct induction *v
5731 = (struct induction *) alloca (sizeof (struct induction));
5732 v->src_reg = src_reg;
5733 v->mult_val = *mult_val;
5734 v->add_val = *add_val;
5735 v->benefit = first_benefit;
5736 v->cant_derive = 0;
5737 v->derive_adjustment = 0;
5738
5739 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5740 reg_iv_info[REGNO (dest_reg)] = v;
5741
5742 count = n_times_set[REGNO (dest_reg)] - 1;
5743
5744 while (count > 0)
5745 {
5746 p = NEXT_INSN (p);
5747 code = GET_CODE (p);
5748
5749 /* If libcall, skip to end of call sequence. */
5750 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5751 p = XEXP (temp, 0);
5752
5753 if (code == INSN
5754 && (set = single_set (p))
5755 && GET_CODE (SET_DEST (set)) == REG
5756 && SET_DEST (set) == dest_reg
5757 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5758 add_val, mult_val))
5759 /* Giv created by equivalent expression. */
5760 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5761 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5762 add_val, mult_val))))
5763 && src_reg == v->src_reg)
5764 {
5765 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5766 benefit += libcall_benefit (p);
5767
5768 count--;
5769 v->mult_val = *mult_val;
5770 v->add_val = *add_val;
5771 v->benefit = benefit;
5772 }
5773 else if (code != NOTE)
5774 {
5775 /* Allow insns that set something other than this giv to a
5776 constant. Such insns are needed on machines which cannot
5777 include long constants and should not disqualify a giv. */
5778 if (code == INSN
5779 && (set = single_set (p))
5780 && SET_DEST (set) != dest_reg
5781 && CONSTANT_P (SET_SRC (set)))
5782 continue;
5783
5784 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5785 return 0;
5786 }
5787 }
5788
5789 return v->benefit;
5790 }
5791 \f
5792 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5793 represented by G1. If no such expression can be found, or it is clear that
5794 it cannot possibly be a valid address, 0 is returned.
5795
5796 To perform the computation, we note that
5797 G1 = a * v + b and
5798 G2 = c * v + d
5799 where `v' is the biv.
5800
5801 So G2 = (c/a) * G1 + (d - b*c/a) */
5802
5803 #ifdef ADDRESS_COST
5804 static rtx
5805 express_from (g1, g2)
5806 struct induction *g1, *g2;
5807 {
5808 rtx mult, add;
5809
5810 /* The value that G1 will be multiplied by must be a constant integer. Also,
5811 the only chance we have of getting a valid address is if b*c/a (see above
5812 for notation) is also an integer. */
5813 if (GET_CODE (g1->mult_val) != CONST_INT
5814 || GET_CODE (g2->mult_val) != CONST_INT
5815 || GET_CODE (g1->add_val) != CONST_INT
5816 || g1->mult_val == const0_rtx
5817 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5818 return 0;
5819
5820 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5821 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5822
5823 /* Form simplified final result. */
5824 if (mult == const0_rtx)
5825 return add;
5826 else if (mult == const1_rtx)
5827 mult = g1->dest_reg;
5828 else
5829 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
5830
5831 if (add == const0_rtx)
5832 return mult;
5833 else
5834 return gen_rtx_PLUS (g2->mode, mult, add);
5835 }
5836 #endif
5837 \f
5838 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5839 (either directly or via an address expression) a register used to represent
5840 G1. Set g2->new_reg to a represtation of G1 (normally just
5841 g1->dest_reg). */
5842
5843 static int
5844 combine_givs_p (g1, g2)
5845 struct induction *g1, *g2;
5846 {
5847 #ifdef ADDRESS_COST
5848 rtx tem;
5849 #endif
5850
5851 /* If these givs are identical, they can be combined. */
5852 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5853 && rtx_equal_p (g1->add_val, g2->add_val))
5854 {
5855 g2->new_reg = g1->dest_reg;
5856 return 1;
5857 }
5858
5859 #ifdef ADDRESS_COST
5860 /* If G2 can be expressed as a function of G1 and that function is valid
5861 as an address and no more expensive than using a register for G2,
5862 the expression of G2 in terms of G1 can be used. */
5863 if (g2->giv_type == DEST_ADDR
5864 && (tem = express_from (g1, g2)) != 0
5865 && memory_address_p (g2->mem_mode, tem)
5866 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5867 {
5868 g2->new_reg = tem;
5869 return 1;
5870 }
5871 #endif
5872
5873 return 0;
5874 }
5875 \f
5876 #ifdef GIV_SORT_CRITERION
5877 /* Compare two givs and sort the most desirable one for combinations first.
5878 This is used only in one qsort call below. */
5879
5880 static int
5881 giv_sort (x, y)
5882 struct induction **x, **y;
5883 {
5884 GIV_SORT_CRITERION (*x, *y);
5885
5886 return 0;
5887 }
5888 #endif
5889
5890 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5891 any other. If so, point SAME to the giv combined with and set NEW_REG to
5892 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5893 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5894
5895 static void
5896 combine_givs (bl)
5897 struct iv_class *bl;
5898 {
5899 struct induction *g1, *g2, **giv_array;
5900 int i, j, giv_count, pass;
5901
5902 /* Count givs, because bl->giv_count is incorrect here. */
5903 giv_count = 0;
5904 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5905 giv_count++;
5906
5907 giv_array
5908 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
5909 i = 0;
5910 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5911 giv_array[i++] = g1;
5912
5913 #ifdef GIV_SORT_CRITERION
5914 /* Sort the givs if GIV_SORT_CRITERION is defined.
5915 This is usually defined for processors which lack
5916 negative register offsets so more givs may be combined. */
5917
5918 if (loop_dump_stream)
5919 fprintf (loop_dump_stream, "%d givs counted, sorting...\n", giv_count);
5920
5921 qsort (giv_array, giv_count, sizeof (struct induction *), giv_sort);
5922 #endif
5923
5924 for (i = 0; i < giv_count; i++)
5925 {
5926 g1 = giv_array[i];
5927 for (pass = 0; pass <= 1; pass++)
5928 for (j = 0; j < giv_count; j++)
5929 {
5930 g2 = giv_array[j];
5931 if (g1 != g2
5932 /* First try to combine with replaceable givs, then all givs. */
5933 && (g1->replaceable || pass == 1)
5934 /* If either has already been combined or is to be ignored, can't
5935 combine. */
5936 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5937 /* If something has been based on G2, G2 cannot itself be based
5938 on something else. */
5939 && ! g2->combined_with
5940 && combine_givs_p (g1, g2))
5941 {
5942 /* g2->new_reg set by `combine_givs_p' */
5943 g2->same = g1;
5944 g1->combined_with = 1;
5945
5946 /* If one of these givs is a DEST_REG that was only used
5947 once, by the other giv, this is actually a single use.
5948 The DEST_REG has the correct cost, while the other giv
5949 counts the REG use too often. */
5950 if (g2->giv_type == DEST_REG
5951 && n_times_used[REGNO (g2->dest_reg)] == 1
5952 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
5953 g1->benefit = g2->benefit;
5954 else if (g1->giv_type != DEST_REG
5955 || n_times_used[REGNO (g1->dest_reg)] != 1
5956 || ! reg_mentioned_p (g1->dest_reg,
5957 PATTERN (g2->insn)))
5958 {
5959 g1->benefit += g2->benefit;
5960 g1->times_used += g2->times_used;
5961 }
5962 /* ??? The new final_[bg]iv_value code does a much better job
5963 of finding replaceable giv's, and hence this code may no
5964 longer be necessary. */
5965 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5966 g1->benefit -= copy_cost;
5967 g1->lifetime += g2->lifetime;
5968
5969 if (loop_dump_stream)
5970 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5971 INSN_UID (g2->insn), INSN_UID (g1->insn));
5972 }
5973 }
5974 }
5975 }
5976 \f
5977 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5978
5979 void
5980 emit_iv_add_mult (b, m, a, reg, insert_before)
5981 rtx b; /* initial value of basic induction variable */
5982 rtx m; /* multiplicative constant */
5983 rtx a; /* additive constant */
5984 rtx reg; /* destination register */
5985 rtx insert_before;
5986 {
5987 rtx seq;
5988 rtx result;
5989
5990 /* Prevent unexpected sharing of these rtx. */
5991 a = copy_rtx (a);
5992 b = copy_rtx (b);
5993
5994 /* Increase the lifetime of any invariants moved further in code. */
5995 update_reg_last_use (a, insert_before);
5996 update_reg_last_use (b, insert_before);
5997 update_reg_last_use (m, insert_before);
5998
5999 start_sequence ();
6000 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
6001 if (reg != result)
6002 emit_move_insn (reg, result);
6003 seq = gen_sequence ();
6004 end_sequence ();
6005
6006 emit_insn_before (seq, insert_before);
6007
6008 /* It is entirely possible that the expansion created lots of new
6009 registers. Iterate over the sequence we just created and
6010 record them all. */
6011
6012 if (GET_CODE (seq) == SEQUENCE)
6013 {
6014 int i;
6015 for (i = 0; i < XVECLEN (seq, 0); ++i)
6016 {
6017 rtx set = single_set (XVECEXP (seq, 0, i));
6018 if (set && GET_CODE (SET_DEST (set)) == REG)
6019 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6020 }
6021 }
6022 else if (GET_CODE (seq) == SET
6023 && GET_CODE (SET_DEST (seq)) == REG)
6024 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
6025 }
6026 \f
6027 /* Test whether A * B can be computed without
6028 an actual multiply insn. Value is 1 if so. */
6029
6030 static int
6031 product_cheap_p (a, b)
6032 rtx a;
6033 rtx b;
6034 {
6035 int i;
6036 rtx tmp;
6037 struct obstack *old_rtl_obstack = rtl_obstack;
6038 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
6039 int win = 1;
6040
6041 /* If only one is constant, make it B. */
6042 if (GET_CODE (a) == CONST_INT)
6043 tmp = a, a = b, b = tmp;
6044
6045 /* If first constant, both constant, so don't need multiply. */
6046 if (GET_CODE (a) == CONST_INT)
6047 return 1;
6048
6049 /* If second not constant, neither is constant, so would need multiply. */
6050 if (GET_CODE (b) != CONST_INT)
6051 return 0;
6052
6053 /* One operand is constant, so might not need multiply insn. Generate the
6054 code for the multiply and see if a call or multiply, or long sequence
6055 of insns is generated. */
6056
6057 rtl_obstack = &temp_obstack;
6058 start_sequence ();
6059 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
6060 tmp = gen_sequence ();
6061 end_sequence ();
6062
6063 if (GET_CODE (tmp) == SEQUENCE)
6064 {
6065 if (XVEC (tmp, 0) == 0)
6066 win = 1;
6067 else if (XVECLEN (tmp, 0) > 3)
6068 win = 0;
6069 else
6070 for (i = 0; i < XVECLEN (tmp, 0); i++)
6071 {
6072 rtx insn = XVECEXP (tmp, 0, i);
6073
6074 if (GET_CODE (insn) != INSN
6075 || (GET_CODE (PATTERN (insn)) == SET
6076 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6077 || (GET_CODE (PATTERN (insn)) == PARALLEL
6078 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6079 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6080 {
6081 win = 0;
6082 break;
6083 }
6084 }
6085 }
6086 else if (GET_CODE (tmp) == SET
6087 && GET_CODE (SET_SRC (tmp)) == MULT)
6088 win = 0;
6089 else if (GET_CODE (tmp) == PARALLEL
6090 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6091 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6092 win = 0;
6093
6094 /* Free any storage we obtained in generating this multiply and restore rtl
6095 allocation to its normal obstack. */
6096 obstack_free (&temp_obstack, storage);
6097 rtl_obstack = old_rtl_obstack;
6098
6099 return win;
6100 }
6101 \f
6102 /* Check to see if loop can be terminated by a "decrement and branch until
6103 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6104 Also try reversing an increment loop to a decrement loop
6105 to see if the optimization can be performed.
6106 Value is nonzero if optimization was performed. */
6107
6108 /* This is useful even if the architecture doesn't have such an insn,
6109 because it might change a loops which increments from 0 to n to a loop
6110 which decrements from n to 0. A loop that decrements to zero is usually
6111 faster than one that increments from zero. */
6112
6113 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6114 such as approx_final_value, biv_total_increment, loop_iterations, and
6115 final_[bg]iv_value. */
6116
6117 static int
6118 check_dbra_loop (loop_end, insn_count, loop_start)
6119 rtx loop_end;
6120 int insn_count;
6121 rtx loop_start;
6122 {
6123 struct iv_class *bl;
6124 rtx reg;
6125 rtx jump_label;
6126 rtx final_value;
6127 rtx start_value;
6128 rtx new_add_val;
6129 rtx comparison;
6130 rtx before_comparison;
6131 rtx p;
6132 rtx jump;
6133 rtx first_compare;
6134 int compare_and_branch;
6135
6136 /* If last insn is a conditional branch, and the insn before tests a
6137 register value, try to optimize it. Otherwise, we can't do anything. */
6138
6139 jump = PREV_INSN (loop_end);
6140 comparison = get_condition_for_loop (jump);
6141 if (comparison == 0)
6142 return 0;
6143
6144 /* Try to compute whether the compare/branch at the loop end is one or
6145 two instructions. */
6146 get_condition (jump, &first_compare);
6147 if (first_compare == jump)
6148 compare_and_branch = 1;
6149 else if (first_compare == prev_nonnote_insn (jump))
6150 compare_and_branch = 2;
6151 else
6152 return 0;
6153
6154 /* Check all of the bivs to see if the compare uses one of them.
6155 Skip biv's set more than once because we can't guarantee that
6156 it will be zero on the last iteration. Also skip if the biv is
6157 used between its update and the test insn. */
6158
6159 for (bl = loop_iv_list; bl; bl = bl->next)
6160 {
6161 if (bl->biv_count == 1
6162 && bl->biv->dest_reg == XEXP (comparison, 0)
6163 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6164 first_compare))
6165 break;
6166 }
6167
6168 if (! bl)
6169 return 0;
6170
6171 /* Look for the case where the basic induction variable is always
6172 nonnegative, and equals zero on the last iteration.
6173 In this case, add a reg_note REG_NONNEG, which allows the
6174 m68k DBRA instruction to be used. */
6175
6176 if (((GET_CODE (comparison) == GT
6177 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6178 && INTVAL (XEXP (comparison, 1)) == -1)
6179 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6180 && GET_CODE (bl->biv->add_val) == CONST_INT
6181 && INTVAL (bl->biv->add_val) < 0)
6182 {
6183 /* Initial value must be greater than 0,
6184 init_val % -dec_value == 0 to ensure that it equals zero on
6185 the last iteration */
6186
6187 if (GET_CODE (bl->initial_value) == CONST_INT
6188 && INTVAL (bl->initial_value) > 0
6189 && (INTVAL (bl->initial_value)
6190 % (-INTVAL (bl->biv->add_val))) == 0)
6191 {
6192 /* register always nonnegative, add REG_NOTE to branch */
6193 REG_NOTES (PREV_INSN (loop_end))
6194 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6195 REG_NOTES (PREV_INSN (loop_end)));
6196 bl->nonneg = 1;
6197
6198 return 1;
6199 }
6200
6201 /* If the decrement is 1 and the value was tested as >= 0 before
6202 the loop, then we can safely optimize. */
6203 for (p = loop_start; p; p = PREV_INSN (p))
6204 {
6205 if (GET_CODE (p) == CODE_LABEL)
6206 break;
6207 if (GET_CODE (p) != JUMP_INSN)
6208 continue;
6209
6210 before_comparison = get_condition_for_loop (p);
6211 if (before_comparison
6212 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6213 && GET_CODE (before_comparison) == LT
6214 && XEXP (before_comparison, 1) == const0_rtx
6215 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6216 && INTVAL (bl->biv->add_val) == -1)
6217 {
6218 REG_NOTES (PREV_INSN (loop_end))
6219 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6220 REG_NOTES (PREV_INSN (loop_end)));
6221 bl->nonneg = 1;
6222
6223 return 1;
6224 }
6225 }
6226 }
6227 else if (num_mem_sets <= 1)
6228 {
6229 /* Try to change inc to dec, so can apply above optimization. */
6230 /* Can do this if:
6231 all registers modified are induction variables or invariant,
6232 all memory references have non-overlapping addresses
6233 (obviously true if only one write)
6234 allow 2 insns for the compare/jump at the end of the loop. */
6235 /* Also, we must avoid any instructions which use both the reversed
6236 biv and another biv. Such instructions will fail if the loop is
6237 reversed. We meet this condition by requiring that either
6238 no_use_except_counting is true, or else that there is only
6239 one biv. */
6240 int num_nonfixed_reads = 0;
6241 /* 1 if the iteration var is used only to count iterations. */
6242 int no_use_except_counting = 0;
6243 /* 1 if the loop has no memory store, or it has a single memory store
6244 which is reversible. */
6245 int reversible_mem_store = 1;
6246
6247 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6248 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6249 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6250
6251 if (bl->giv_count == 0
6252 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6253 {
6254 rtx bivreg = regno_reg_rtx[bl->regno];
6255
6256 /* If there are no givs for this biv, and the only exit is the
6257 fall through at the end of the loop, then
6258 see if perhaps there are no uses except to count. */
6259 no_use_except_counting = 1;
6260 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6261 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6262 {
6263 rtx set = single_set (p);
6264
6265 if (set && GET_CODE (SET_DEST (set)) == REG
6266 && REGNO (SET_DEST (set)) == bl->regno)
6267 /* An insn that sets the biv is okay. */
6268 ;
6269 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6270 || p == prev_nonnote_insn (loop_end))
6271 /* Don't bother about the end test. */
6272 ;
6273 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6274 /* Any other use of the biv is no good. */
6275 {
6276 no_use_except_counting = 0;
6277 break;
6278 }
6279 }
6280 }
6281
6282 /* If the loop has a single store, and the destination address is
6283 invariant, then we can't reverse the loop, because this address
6284 might then have the wrong value at loop exit.
6285 This would work if the source was invariant also, however, in that
6286 case, the insn should have been moved out of the loop. */
6287
6288 if (num_mem_sets == 1)
6289 reversible_mem_store
6290 = (! unknown_address_altered
6291 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6292
6293 /* This code only acts for innermost loops. Also it simplifies
6294 the memory address check by only reversing loops with
6295 zero or one memory access.
6296 Two memory accesses could involve parts of the same array,
6297 and that can't be reversed. */
6298
6299 if (num_nonfixed_reads <= 1
6300 && !loop_has_call
6301 && !loop_has_volatile
6302 && reversible_mem_store
6303 && (no_use_except_counting
6304 || ((bl->giv_count + bl->biv_count + num_mem_sets
6305 + num_movables + compare_and_branch == insn_count)
6306 && (bl == loop_iv_list && bl->next == 0))))
6307 {
6308 rtx tem;
6309
6310 /* Loop can be reversed. */
6311 if (loop_dump_stream)
6312 fprintf (loop_dump_stream, "Can reverse loop\n");
6313
6314 /* Now check other conditions:
6315
6316 The increment must be a constant, as must the initial value,
6317 and the comparison code must be LT.
6318
6319 This test can probably be improved since +/- 1 in the constant
6320 can be obtained by changing LT to LE and vice versa; this is
6321 confusing. */
6322
6323 if (comparison
6324 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6325 /* LE gets turned into LT */
6326 && GET_CODE (comparison) == LT
6327 && GET_CODE (bl->initial_value) == CONST_INT)
6328 {
6329 HOST_WIDE_INT add_val, comparison_val;
6330 rtx initial_value;
6331
6332 add_val = INTVAL (bl->biv->add_val);
6333 comparison_val = INTVAL (XEXP (comparison, 1));
6334 final_value = XEXP (comparison, 1);
6335 initial_value = bl->initial_value;
6336
6337 /* Normalize the initial value if it is an integer and
6338 has no other use except as a counter. This will allow
6339 a few more loops to be reversed. */
6340 if (no_use_except_counting
6341 && GET_CODE (initial_value) == CONST_INT)
6342 {
6343 comparison_val = comparison_val - INTVAL (bl->initial_value);
6344 /* Check for overflow. If comparison_val ends up as a
6345 negative value, then we can't reverse the loop. */
6346 if (comparison_val >= 0)
6347 initial_value = const0_rtx;
6348 }
6349
6350 /* If the initial value is not zero, or if the comparison
6351 value is not an exact multiple of the increment, then we
6352 can not reverse this loop. */
6353 if (initial_value != const0_rtx
6354 || (comparison_val % add_val) != 0)
6355 return 0;
6356
6357 /* Reset these in case we normalized the initial value
6358 and comparison value above. */
6359 bl->initial_value = initial_value;
6360 XEXP (comparison, 1) = GEN_INT (comparison_val);
6361
6362 /* Register will always be nonnegative, with value
6363 0 on last iteration if loop reversed */
6364
6365 /* Save some info needed to produce the new insns. */
6366 reg = bl->biv->dest_reg;
6367 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6368 if (jump_label == pc_rtx)
6369 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6370 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6371
6372 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6373 - INTVAL (bl->biv->add_val));
6374
6375 /* Initialize biv to start_value before loop start.
6376 The old initializing insn will be deleted as a
6377 dead store by flow.c. */
6378 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6379
6380 /* Add insn to decrement register, and delete insn
6381 that incremented the register. */
6382 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6383 bl->biv->insn);
6384 delete_insn (bl->biv->insn);
6385
6386 /* Update biv info to reflect its new status. */
6387 bl->biv->insn = p;
6388 bl->initial_value = start_value;
6389 bl->biv->add_val = new_add_val;
6390
6391 /* Inc LABEL_NUSES so that delete_insn will
6392 not delete the label. */
6393 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6394
6395 /* Emit an insn after the end of the loop to set the biv's
6396 proper exit value if it is used anywhere outside the loop. */
6397 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
6398 || ! bl->init_insn
6399 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6400 emit_insn_after (gen_move_insn (reg, final_value),
6401 loop_end);
6402
6403 /* Delete compare/branch at end of loop. */
6404 delete_insn (PREV_INSN (loop_end));
6405 if (compare_and_branch == 2)
6406 delete_insn (first_compare);
6407
6408 /* Add new compare/branch insn at end of loop. */
6409 start_sequence ();
6410 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6411 GET_MODE (reg), 0, 0);
6412 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6413 tem = gen_sequence ();
6414 end_sequence ();
6415 emit_jump_insn_before (tem, loop_end);
6416
6417 for (tem = PREV_INSN (loop_end);
6418 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6419 ;
6420 if (tem)
6421 {
6422 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6423
6424 /* Increment of LABEL_NUSES done above. */
6425 /* Register is now always nonnegative,
6426 so add REG_NONNEG note to the branch. */
6427 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6428 REG_NOTES (tem));
6429 }
6430
6431 bl->nonneg = 1;
6432
6433 /* Mark that this biv has been reversed. Each giv which depends
6434 on this biv, and which is also live past the end of the loop
6435 will have to be fixed up. */
6436
6437 bl->reversed = 1;
6438
6439 if (loop_dump_stream)
6440 fprintf (loop_dump_stream,
6441 "Reversed loop and added reg_nonneg\n");
6442
6443 return 1;
6444 }
6445 }
6446 }
6447
6448 return 0;
6449 }
6450 \f
6451 /* Verify whether the biv BL appears to be eliminable,
6452 based on the insns in the loop that refer to it.
6453 LOOP_START is the first insn of the loop, and END is the end insn.
6454
6455 If ELIMINATE_P is non-zero, actually do the elimination.
6456
6457 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6458 determine whether invariant insns should be placed inside or at the
6459 start of the loop. */
6460
6461 static int
6462 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6463 struct iv_class *bl;
6464 rtx loop_start;
6465 rtx end;
6466 int eliminate_p;
6467 int threshold, insn_count;
6468 {
6469 rtx reg = bl->biv->dest_reg;
6470 rtx p;
6471
6472 /* Scan all insns in the loop, stopping if we find one that uses the
6473 biv in a way that we cannot eliminate. */
6474
6475 for (p = loop_start; p != end; p = NEXT_INSN (p))
6476 {
6477 enum rtx_code code = GET_CODE (p);
6478 rtx where = threshold >= insn_count ? loop_start : p;
6479
6480 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6481 && reg_mentioned_p (reg, PATTERN (p))
6482 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6483 {
6484 if (loop_dump_stream)
6485 fprintf (loop_dump_stream,
6486 "Cannot eliminate biv %d: biv used in insn %d.\n",
6487 bl->regno, INSN_UID (p));
6488 break;
6489 }
6490 }
6491
6492 if (p == end)
6493 {
6494 if (loop_dump_stream)
6495 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6496 bl->regno, eliminate_p ? "was" : "can be");
6497 return 1;
6498 }
6499
6500 return 0;
6501 }
6502 \f
6503 /* If BL appears in X (part of the pattern of INSN), see if we can
6504 eliminate its use. If so, return 1. If not, return 0.
6505
6506 If BIV does not appear in X, return 1.
6507
6508 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6509 where extra insns should be added. Depending on how many items have been
6510 moved out of the loop, it will either be before INSN or at the start of
6511 the loop. */
6512
6513 static int
6514 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6515 rtx x, insn;
6516 struct iv_class *bl;
6517 int eliminate_p;
6518 rtx where;
6519 {
6520 enum rtx_code code = GET_CODE (x);
6521 rtx reg = bl->biv->dest_reg;
6522 enum machine_mode mode = GET_MODE (reg);
6523 struct induction *v;
6524 rtx arg, tem;
6525 #ifdef HAVE_cc0
6526 rtx new;
6527 #endif
6528 int arg_operand;
6529 char *fmt;
6530 int i, j;
6531
6532 switch (code)
6533 {
6534 case REG:
6535 /* If we haven't already been able to do something with this BIV,
6536 we can't eliminate it. */
6537 if (x == reg)
6538 return 0;
6539 return 1;
6540
6541 case SET:
6542 /* If this sets the BIV, it is not a problem. */
6543 if (SET_DEST (x) == reg)
6544 return 1;
6545
6546 /* If this is an insn that defines a giv, it is also ok because
6547 it will go away when the giv is reduced. */
6548 for (v = bl->giv; v; v = v->next_iv)
6549 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6550 return 1;
6551
6552 #ifdef HAVE_cc0
6553 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6554 {
6555 /* Can replace with any giv that was reduced and
6556 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6557 Require a constant for MULT_VAL, so we know it's nonzero.
6558 ??? We disable this optimization to avoid potential
6559 overflows. */
6560
6561 for (v = bl->giv; v; v = v->next_iv)
6562 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6563 && v->add_val == const0_rtx
6564 && ! v->ignore && ! v->maybe_dead && v->always_computable
6565 && v->mode == mode
6566 && 0)
6567 {
6568 /* If the giv V had the auto-inc address optimization applied
6569 to it, and INSN occurs between the giv insn and the biv
6570 insn, then we must adjust the value used here.
6571 This is rare, so we don't bother to do so. */
6572 if (v->auto_inc_opt
6573 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6574 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6575 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6576 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6577 continue;
6578
6579 if (! eliminate_p)
6580 return 1;
6581
6582 /* If the giv has the opposite direction of change,
6583 then reverse the comparison. */
6584 if (INTVAL (v->mult_val) < 0)
6585 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
6586 const0_rtx, v->new_reg);
6587 else
6588 new = v->new_reg;
6589
6590 /* We can probably test that giv's reduced reg. */
6591 if (validate_change (insn, &SET_SRC (x), new, 0))
6592 return 1;
6593 }
6594
6595 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6596 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6597 Require a constant for MULT_VAL, so we know it's nonzero.
6598 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6599 overflow problem. */
6600
6601 for (v = bl->giv; v; v = v->next_iv)
6602 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6603 && ! v->ignore && ! v->maybe_dead && v->always_computable
6604 && v->mode == mode
6605 && (GET_CODE (v->add_val) == SYMBOL_REF
6606 || GET_CODE (v->add_val) == LABEL_REF
6607 || GET_CODE (v->add_val) == CONST
6608 || (GET_CODE (v->add_val) == REG
6609 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
6610 {
6611 /* If the giv V had the auto-inc address optimization applied
6612 to it, and INSN occurs between the giv insn and the biv
6613 insn, then we must adjust the value used here.
6614 This is rare, so we don't bother to do so. */
6615 if (v->auto_inc_opt
6616 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6617 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6618 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6619 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6620 continue;
6621
6622 if (! eliminate_p)
6623 return 1;
6624
6625 /* If the giv has the opposite direction of change,
6626 then reverse the comparison. */
6627 if (INTVAL (v->mult_val) < 0)
6628 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
6629 v->new_reg);
6630 else
6631 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
6632 copy_rtx (v->add_val));
6633
6634 /* Replace biv with the giv's reduced register. */
6635 update_reg_last_use (v->add_val, insn);
6636 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6637 return 1;
6638
6639 /* Insn doesn't support that constant or invariant. Copy it
6640 into a register (it will be a loop invariant.) */
6641 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6642
6643 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6644 where);
6645
6646 /* Substitute the new register for its invariant value in
6647 the compare expression. */
6648 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
6649 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6650 return 1;
6651 }
6652 }
6653 #endif
6654 break;
6655
6656 case COMPARE:
6657 case EQ: case NE:
6658 case GT: case GE: case GTU: case GEU:
6659 case LT: case LE: case LTU: case LEU:
6660 /* See if either argument is the biv. */
6661 if (XEXP (x, 0) == reg)
6662 arg = XEXP (x, 1), arg_operand = 1;
6663 else if (XEXP (x, 1) == reg)
6664 arg = XEXP (x, 0), arg_operand = 0;
6665 else
6666 break;
6667
6668 if (CONSTANT_P (arg))
6669 {
6670 /* First try to replace with any giv that has constant positive
6671 mult_val and constant add_val. We might be able to support
6672 negative mult_val, but it seems complex to do it in general. */
6673
6674 for (v = bl->giv; v; v = v->next_iv)
6675 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6676 && (GET_CODE (v->add_val) == SYMBOL_REF
6677 || GET_CODE (v->add_val) == LABEL_REF
6678 || GET_CODE (v->add_val) == CONST
6679 || (GET_CODE (v->add_val) == REG
6680 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
6681 && ! v->ignore && ! v->maybe_dead && v->always_computable
6682 && v->mode == mode)
6683 {
6684 /* If the giv V had the auto-inc address optimization applied
6685 to it, and INSN occurs between the giv insn and the biv
6686 insn, then we must adjust the value used here.
6687 This is rare, so we don't bother to do so. */
6688 if (v->auto_inc_opt
6689 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6690 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6691 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6692 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6693 continue;
6694
6695 if (! eliminate_p)
6696 return 1;
6697
6698 /* Replace biv with the giv's reduced reg. */
6699 XEXP (x, 1-arg_operand) = v->new_reg;
6700
6701 /* If all constants are actually constant integers and
6702 the derived constant can be directly placed in the COMPARE,
6703 do so. */
6704 if (GET_CODE (arg) == CONST_INT
6705 && GET_CODE (v->mult_val) == CONST_INT
6706 && GET_CODE (v->add_val) == CONST_INT
6707 && validate_change (insn, &XEXP (x, arg_operand),
6708 GEN_INT (INTVAL (arg)
6709 * INTVAL (v->mult_val)
6710 + INTVAL (v->add_val)), 0))
6711 return 1;
6712
6713 /* Otherwise, load it into a register. */
6714 tem = gen_reg_rtx (mode);
6715 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6716 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6717 return 1;
6718
6719 /* If that failed, put back the change we made above. */
6720 XEXP (x, 1-arg_operand) = reg;
6721 }
6722
6723 /* Look for giv with positive constant mult_val and nonconst add_val.
6724 Insert insns to calculate new compare value.
6725 ??? Turn this off due to possible overflow. */
6726
6727 for (v = bl->giv; v; v = v->next_iv)
6728 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6729 && ! v->ignore && ! v->maybe_dead && v->always_computable
6730 && v->mode == mode
6731 && 0)
6732 {
6733 rtx tem;
6734
6735 /* If the giv V had the auto-inc address optimization applied
6736 to it, and INSN occurs between the giv insn and the biv
6737 insn, then we must adjust the value used here.
6738 This is rare, so we don't bother to do so. */
6739 if (v->auto_inc_opt
6740 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6741 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6742 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6743 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6744 continue;
6745
6746 if (! eliminate_p)
6747 return 1;
6748
6749 tem = gen_reg_rtx (mode);
6750
6751 /* Replace biv with giv's reduced register. */
6752 validate_change (insn, &XEXP (x, 1 - arg_operand),
6753 v->new_reg, 1);
6754
6755 /* Compute value to compare against. */
6756 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6757 /* Use it in this insn. */
6758 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6759 if (apply_change_group ())
6760 return 1;
6761 }
6762 }
6763 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6764 {
6765 if (invariant_p (arg) == 1)
6766 {
6767 /* Look for giv with constant positive mult_val and nonconst
6768 add_val. Insert insns to compute new compare value.
6769 ??? Turn this off due to possible overflow. */
6770
6771 for (v = bl->giv; v; v = v->next_iv)
6772 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6773 && ! v->ignore && ! v->maybe_dead && v->always_computable
6774 && v->mode == mode
6775 && 0)
6776 {
6777 rtx tem;
6778
6779 /* If the giv V had the auto-inc address optimization applied
6780 to it, and INSN occurs between the giv insn and the biv
6781 insn, then we must adjust the value used here.
6782 This is rare, so we don't bother to do so. */
6783 if (v->auto_inc_opt
6784 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6785 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6786 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6787 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6788 continue;
6789
6790 if (! eliminate_p)
6791 return 1;
6792
6793 tem = gen_reg_rtx (mode);
6794
6795 /* Replace biv with giv's reduced register. */
6796 validate_change (insn, &XEXP (x, 1 - arg_operand),
6797 v->new_reg, 1);
6798
6799 /* Compute value to compare against. */
6800 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6801 tem, where);
6802 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6803 if (apply_change_group ())
6804 return 1;
6805 }
6806 }
6807
6808 /* This code has problems. Basically, you can't know when
6809 seeing if we will eliminate BL, whether a particular giv
6810 of ARG will be reduced. If it isn't going to be reduced,
6811 we can't eliminate BL. We can try forcing it to be reduced,
6812 but that can generate poor code.
6813
6814 The problem is that the benefit of reducing TV, below should
6815 be increased if BL can actually be eliminated, but this means
6816 we might have to do a topological sort of the order in which
6817 we try to process biv. It doesn't seem worthwhile to do
6818 this sort of thing now. */
6819
6820 #if 0
6821 /* Otherwise the reg compared with had better be a biv. */
6822 if (GET_CODE (arg) != REG
6823 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6824 return 0;
6825
6826 /* Look for a pair of givs, one for each biv,
6827 with identical coefficients. */
6828 for (v = bl->giv; v; v = v->next_iv)
6829 {
6830 struct induction *tv;
6831
6832 if (v->ignore || v->maybe_dead || v->mode != mode)
6833 continue;
6834
6835 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6836 if (! tv->ignore && ! tv->maybe_dead
6837 && rtx_equal_p (tv->mult_val, v->mult_val)
6838 && rtx_equal_p (tv->add_val, v->add_val)
6839 && tv->mode == mode)
6840 {
6841 /* If the giv V had the auto-inc address optimization applied
6842 to it, and INSN occurs between the giv insn and the biv
6843 insn, then we must adjust the value used here.
6844 This is rare, so we don't bother to do so. */
6845 if (v->auto_inc_opt
6846 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6847 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6848 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6849 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6850 continue;
6851
6852 if (! eliminate_p)
6853 return 1;
6854
6855 /* Replace biv with its giv's reduced reg. */
6856 XEXP (x, 1-arg_operand) = v->new_reg;
6857 /* Replace other operand with the other giv's
6858 reduced reg. */
6859 XEXP (x, arg_operand) = tv->new_reg;
6860 return 1;
6861 }
6862 }
6863 #endif
6864 }
6865
6866 /* If we get here, the biv can't be eliminated. */
6867 return 0;
6868
6869 case MEM:
6870 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6871 biv is used in it, since it will be replaced. */
6872 for (v = bl->giv; v; v = v->next_iv)
6873 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6874 return 1;
6875 break;
6876
6877 default:
6878 break;
6879 }
6880
6881 /* See if any subexpression fails elimination. */
6882 fmt = GET_RTX_FORMAT (code);
6883 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6884 {
6885 switch (fmt[i])
6886 {
6887 case 'e':
6888 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6889 eliminate_p, where))
6890 return 0;
6891 break;
6892
6893 case 'E':
6894 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6895 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6896 eliminate_p, where))
6897 return 0;
6898 break;
6899 }
6900 }
6901
6902 return 1;
6903 }
6904 \f
6905 /* Return nonzero if the last use of REG
6906 is in an insn following INSN in the same basic block. */
6907
6908 static int
6909 last_use_this_basic_block (reg, insn)
6910 rtx reg;
6911 rtx insn;
6912 {
6913 rtx n;
6914 for (n = insn;
6915 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6916 n = NEXT_INSN (n))
6917 {
6918 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
6919 return 1;
6920 }
6921 return 0;
6922 }
6923 \f
6924 /* Called via `note_stores' to record the initial value of a biv. Here we
6925 just record the location of the set and process it later. */
6926
6927 static void
6928 record_initial (dest, set)
6929 rtx dest;
6930 rtx set;
6931 {
6932 struct iv_class *bl;
6933
6934 if (GET_CODE (dest) != REG
6935 || REGNO (dest) >= max_reg_before_loop
6936 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6937 return;
6938
6939 bl = reg_biv_class[REGNO (dest)];
6940
6941 /* If this is the first set found, record it. */
6942 if (bl->init_insn == 0)
6943 {
6944 bl->init_insn = note_insn;
6945 bl->init_set = set;
6946 }
6947 }
6948 \f
6949 /* If any of the registers in X are "old" and currently have a last use earlier
6950 than INSN, update them to have a last use of INSN. Their actual last use
6951 will be the previous insn but it will not have a valid uid_luid so we can't
6952 use it. */
6953
6954 static void
6955 update_reg_last_use (x, insn)
6956 rtx x;
6957 rtx insn;
6958 {
6959 /* Check for the case where INSN does not have a valid luid. In this case,
6960 there is no need to modify the regno_last_uid, as this can only happen
6961 when code is inserted after the loop_end to set a pseudo's final value,
6962 and hence this insn will never be the last use of x. */
6963 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6964 && INSN_UID (insn) < max_uid_for_loop
6965 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
6966 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
6967 else
6968 {
6969 register int i, j;
6970 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6971 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6972 {
6973 if (fmt[i] == 'e')
6974 update_reg_last_use (XEXP (x, i), insn);
6975 else if (fmt[i] == 'E')
6976 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6977 update_reg_last_use (XVECEXP (x, i, j), insn);
6978 }
6979 }
6980 }
6981 \f
6982 /* Given a jump insn JUMP, return the condition that will cause it to branch
6983 to its JUMP_LABEL. If the condition cannot be understood, or is an
6984 inequality floating-point comparison which needs to be reversed, 0 will
6985 be returned.
6986
6987 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6988 insn used in locating the condition was found. If a replacement test
6989 of the condition is desired, it should be placed in front of that
6990 insn and we will be sure that the inputs are still valid.
6991
6992 The condition will be returned in a canonical form to simplify testing by
6993 callers. Specifically:
6994
6995 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6996 (2) Both operands will be machine operands; (cc0) will have been replaced.
6997 (3) If an operand is a constant, it will be the second operand.
6998 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6999 for GE, GEU, and LEU. */
7000
7001 rtx
7002 get_condition (jump, earliest)
7003 rtx jump;
7004 rtx *earliest;
7005 {
7006 enum rtx_code code;
7007 rtx prev = jump;
7008 rtx set;
7009 rtx tem;
7010 rtx op0, op1;
7011 int reverse_code = 0;
7012 int did_reverse_condition = 0;
7013 enum machine_mode mode;
7014
7015 /* If this is not a standard conditional jump, we can't parse it. */
7016 if (GET_CODE (jump) != JUMP_INSN
7017 || ! condjump_p (jump) || simplejump_p (jump))
7018 return 0;
7019
7020 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7021 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7022 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
7023 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
7024
7025 if (earliest)
7026 *earliest = jump;
7027
7028 /* If this branches to JUMP_LABEL when the condition is false, reverse
7029 the condition. */
7030 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
7031 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
7032 code = reverse_condition (code), did_reverse_condition ^= 1;
7033
7034 /* If we are comparing a register with zero, see if the register is set
7035 in the previous insn to a COMPARE or a comparison operation. Perform
7036 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7037 in cse.c */
7038
7039 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
7040 {
7041 /* Set non-zero when we find something of interest. */
7042 rtx x = 0;
7043
7044 #ifdef HAVE_cc0
7045 /* If comparison with cc0, import actual comparison from compare
7046 insn. */
7047 if (op0 == cc0_rtx)
7048 {
7049 if ((prev = prev_nonnote_insn (prev)) == 0
7050 || GET_CODE (prev) != INSN
7051 || (set = single_set (prev)) == 0
7052 || SET_DEST (set) != cc0_rtx)
7053 return 0;
7054
7055 op0 = SET_SRC (set);
7056 op1 = CONST0_RTX (GET_MODE (op0));
7057 if (earliest)
7058 *earliest = prev;
7059 }
7060 #endif
7061
7062 /* If this is a COMPARE, pick up the two things being compared. */
7063 if (GET_CODE (op0) == COMPARE)
7064 {
7065 op1 = XEXP (op0, 1);
7066 op0 = XEXP (op0, 0);
7067 continue;
7068 }
7069 else if (GET_CODE (op0) != REG)
7070 break;
7071
7072 /* Go back to the previous insn. Stop if it is not an INSN. We also
7073 stop if it isn't a single set or if it has a REG_INC note because
7074 we don't want to bother dealing with it. */
7075
7076 if ((prev = prev_nonnote_insn (prev)) == 0
7077 || GET_CODE (prev) != INSN
7078 || FIND_REG_INC_NOTE (prev, 0)
7079 || (set = single_set (prev)) == 0)
7080 break;
7081
7082 /* If this is setting OP0, get what it sets it to if it looks
7083 relevant. */
7084 if (rtx_equal_p (SET_DEST (set), op0))
7085 {
7086 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7087
7088 /* ??? We may not combine comparisons done in a CCmode with
7089 comparisons not done in a CCmode. This is to aid targets
7090 like Alpha that have an IEEE compliant EQ instruction, and
7091 a non-IEEE compliant BEQ instruction. The use of CCmode is
7092 actually artificial, simply to prevent the combination, but
7093 should not affect other platforms. */
7094
7095 if ((GET_CODE (SET_SRC (set)) == COMPARE
7096 || (((code == NE
7097 || (code == LT
7098 && GET_MODE_CLASS (inner_mode) == MODE_INT
7099 && (GET_MODE_BITSIZE (inner_mode)
7100 <= HOST_BITS_PER_WIDE_INT)
7101 && (STORE_FLAG_VALUE
7102 & ((HOST_WIDE_INT) 1
7103 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7104 #ifdef FLOAT_STORE_FLAG_VALUE
7105 || (code == LT
7106 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7107 && FLOAT_STORE_FLAG_VALUE < 0)
7108 #endif
7109 ))
7110 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
7111 && ((GET_MODE_CLASS (mode) == MODE_CC)
7112 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7113 x = SET_SRC (set);
7114 else if (((code == EQ
7115 || (code == GE
7116 && (GET_MODE_BITSIZE (inner_mode)
7117 <= HOST_BITS_PER_WIDE_INT)
7118 && GET_MODE_CLASS (inner_mode) == MODE_INT
7119 && (STORE_FLAG_VALUE
7120 & ((HOST_WIDE_INT) 1
7121 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7122 #ifdef FLOAT_STORE_FLAG_VALUE
7123 || (code == GE
7124 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7125 && FLOAT_STORE_FLAG_VALUE < 0)
7126 #endif
7127 ))
7128 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
7129 && ((GET_MODE_CLASS (mode) == MODE_CC)
7130 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7131 {
7132 /* We might have reversed a LT to get a GE here. But this wasn't
7133 actually the comparison of data, so we don't flag that we
7134 have had to reverse the condition. */
7135 did_reverse_condition ^= 1;
7136 reverse_code = 1;
7137 x = SET_SRC (set);
7138 }
7139 else
7140 break;
7141 }
7142
7143 else if (reg_set_p (op0, prev))
7144 /* If this sets OP0, but not directly, we have to give up. */
7145 break;
7146
7147 if (x)
7148 {
7149 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7150 code = GET_CODE (x);
7151 if (reverse_code)
7152 {
7153 code = reverse_condition (code);
7154 did_reverse_condition ^= 1;
7155 reverse_code = 0;
7156 }
7157
7158 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7159 if (earliest)
7160 *earliest = prev;
7161 }
7162 }
7163
7164 /* If constant is first, put it last. */
7165 if (CONSTANT_P (op0))
7166 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7167
7168 /* If OP0 is the result of a comparison, we weren't able to find what
7169 was really being compared, so fail. */
7170 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7171 return 0;
7172
7173 /* Canonicalize any ordered comparison with integers involving equality
7174 if we can do computations in the relevant mode and we do not
7175 overflow. */
7176
7177 if (GET_CODE (op1) == CONST_INT
7178 && GET_MODE (op0) != VOIDmode
7179 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7180 {
7181 HOST_WIDE_INT const_val = INTVAL (op1);
7182 unsigned HOST_WIDE_INT uconst_val = const_val;
7183 unsigned HOST_WIDE_INT max_val
7184 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7185
7186 switch (code)
7187 {
7188 case LE:
7189 if (const_val != max_val >> 1)
7190 code = LT, op1 = GEN_INT (const_val + 1);
7191 break;
7192
7193 /* When cross-compiling, const_val might be sign-extended from
7194 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7195 case GE:
7196 if ((const_val & max_val)
7197 != (((HOST_WIDE_INT) 1
7198 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7199 code = GT, op1 = GEN_INT (const_val - 1);
7200 break;
7201
7202 case LEU:
7203 if (uconst_val < max_val)
7204 code = LTU, op1 = GEN_INT (uconst_val + 1);
7205 break;
7206
7207 case GEU:
7208 if (uconst_val != 0)
7209 code = GTU, op1 = GEN_INT (uconst_val - 1);
7210 break;
7211
7212 default:
7213 break;
7214 }
7215 }
7216
7217 /* If this was floating-point and we reversed anything other than an
7218 EQ or NE, return zero. */
7219 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7220 && did_reverse_condition && code != NE && code != EQ
7221 && ! flag_fast_math
7222 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7223 return 0;
7224
7225 #ifdef HAVE_cc0
7226 /* Never return CC0; return zero instead. */
7227 if (op0 == cc0_rtx)
7228 return 0;
7229 #endif
7230
7231 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7232 }
7233
7234 /* Similar to above routine, except that we also put an invariant last
7235 unless both operands are invariants. */
7236
7237 rtx
7238 get_condition_for_loop (x)
7239 rtx x;
7240 {
7241 rtx comparison = get_condition (x, NULL_PTR);
7242
7243 if (comparison == 0
7244 || ! invariant_p (XEXP (comparison, 0))
7245 || invariant_p (XEXP (comparison, 1)))
7246 return comparison;
7247
7248 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7249 XEXP (comparison, 1), XEXP (comparison, 0));
7250 }
7251
7252 #ifdef HAIFA
7253 /* Analyze a loop in order to instrument it with the use of count register.
7254 loop_start and loop_end are the first and last insns of the loop.
7255 This function works in cooperation with insert_bct ().
7256 loop_can_insert_bct[loop_num] is set according to whether the optimization
7257 is applicable to the loop. When it is applicable, the following variables
7258 are also set:
7259 loop_start_value[loop_num]
7260 loop_comparison_value[loop_num]
7261 loop_increment[loop_num]
7262 loop_comparison_code[loop_num] */
7263
7264 #ifdef HAVE_decrement_and_branch_on_count
7265 static
7266 void analyze_loop_iterations (loop_start, loop_end)
7267 rtx loop_start, loop_end;
7268 {
7269 rtx comparison, comparison_value;
7270 rtx iteration_var, initial_value, increment;
7271 enum rtx_code comparison_code;
7272
7273 rtx last_loop_insn;
7274 rtx insn;
7275 int i;
7276
7277 /* loop_variable mode */
7278 enum machine_mode original_mode;
7279
7280 /* find the number of the loop */
7281 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7282
7283 /* we change our mind only when we are sure that loop will be instrumented */
7284 loop_can_insert_bct[loop_num] = 0;
7285
7286 /* is the optimization suppressed. */
7287 if ( !flag_branch_on_count_reg )
7288 return;
7289
7290 /* make sure that count-reg is not in use */
7291 if (loop_used_count_register[loop_num]){
7292 if (loop_dump_stream)
7293 fprintf (loop_dump_stream,
7294 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7295 loop_num);
7296 return;
7297 }
7298
7299 /* make sure that the function has no indirect jumps. */
7300 if (indirect_jump_in_function){
7301 if (loop_dump_stream)
7302 fprintf (loop_dump_stream,
7303 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7304 loop_num);
7305 return;
7306 }
7307
7308 /* make sure that the last loop insn is a conditional jump */
7309 last_loop_insn = PREV_INSN (loop_end);
7310 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7311 if (loop_dump_stream)
7312 fprintf (loop_dump_stream,
7313 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7314 loop_num);
7315 return;
7316 }
7317
7318 /* First find the iteration variable. If the last insn is a conditional
7319 branch, and the insn preceding it tests a register value, make that
7320 register the iteration variable. */
7321
7322 /* We used to use prev_nonnote_insn here, but that fails because it might
7323 accidentally get the branch for a contained loop if the branch for this
7324 loop was deleted. We can only trust branches immediately before the
7325 loop_end. */
7326
7327 comparison = get_condition_for_loop (last_loop_insn);
7328 /* ??? Get_condition may switch position of induction variable and
7329 invariant register when it canonicalizes the comparison. */
7330
7331 if (comparison == 0) {
7332 if (loop_dump_stream)
7333 fprintf (loop_dump_stream,
7334 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7335 loop_num);
7336 return;
7337 }
7338
7339 comparison_code = GET_CODE (comparison);
7340 iteration_var = XEXP (comparison, 0);
7341 comparison_value = XEXP (comparison, 1);
7342
7343 original_mode = GET_MODE (iteration_var);
7344 if (GET_MODE_CLASS (original_mode) != MODE_INT
7345 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7346 if (loop_dump_stream)
7347 fprintf (loop_dump_stream,
7348 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7349 loop_num);
7350 return;
7351 }
7352
7353 /* get info about loop bounds and increment */
7354 iteration_info (iteration_var, &initial_value, &increment,
7355 loop_start, loop_end);
7356
7357 /* make sure that all required loop data were found */
7358 if (!(initial_value && increment && comparison_value
7359 && invariant_p (comparison_value) && invariant_p (increment)
7360 && ! indirect_jump_in_function))
7361 {
7362 if (loop_dump_stream) {
7363 fprintf (loop_dump_stream,
7364 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7365 if (!(initial_value && increment && comparison_value)) {
7366 fprintf (loop_dump_stream, "\tbounds not available: ");
7367 if ( ! initial_value )
7368 fprintf (loop_dump_stream, "initial ");
7369 if ( ! increment )
7370 fprintf (loop_dump_stream, "increment ");
7371 if ( ! comparison_value )
7372 fprintf (loop_dump_stream, "comparison ");
7373 fprintf (loop_dump_stream, "\n");
7374 }
7375 if (!invariant_p (comparison_value) || !invariant_p (increment))
7376 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7377 }
7378 return;
7379 }
7380
7381 /* make sure that the increment is constant */
7382 if (GET_CODE (increment) != CONST_INT) {
7383 if (loop_dump_stream)
7384 fprintf (loop_dump_stream,
7385 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7386 loop_num);
7387 return;
7388 }
7389
7390 /* make sure that the loop contains neither function call, nor jump on table.
7391 (the count register might be altered by the called function, and might
7392 be used for a branch on table). */
7393 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7394 if (GET_CODE (insn) == CALL_INSN){
7395 if (loop_dump_stream)
7396 fprintf (loop_dump_stream,
7397 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7398 loop_num);
7399 return;
7400 }
7401
7402 if (GET_CODE (insn) == JUMP_INSN
7403 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7404 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7405 if (loop_dump_stream)
7406 fprintf (loop_dump_stream,
7407 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7408 loop_num);
7409 return;
7410 }
7411 }
7412
7413 /* At this point, we are sure that the loop can be instrumented with BCT.
7414 Some of the loops, however, will not be instrumented - the final decision
7415 is taken by insert_bct () */
7416 if (loop_dump_stream)
7417 fprintf (loop_dump_stream,
7418 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7419 loop_num);
7420
7421 /* mark all enclosing loops that they cannot use count register */
7422 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7423 marking here may prevent instrumenting an enclosing loop that could
7424 actually be instrumented. But since this is rare, it is safer to mark
7425 here in case the order of calling (analyze/insert)_bct would be changed. */
7426 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7427 loop_used_count_register[i] = 1;
7428
7429 /* Set data structures which will be used by the instrumentation phase */
7430 loop_start_value[loop_num] = initial_value;
7431 loop_comparison_value[loop_num] = comparison_value;
7432 loop_increment[loop_num] = increment;
7433 loop_comparison_code[loop_num] = comparison_code;
7434 loop_can_insert_bct[loop_num] = 1;
7435 }
7436
7437
7438 /* instrument loop for insertion of bct instruction. We distinguish between
7439 loops with compile-time bounds, to those with run-time bounds. The loop
7440 behaviour is analized according to the following characteristics/variables:
7441 ; Input variables:
7442 ; comparison-value: the value to which the iteration counter is compared.
7443 ; initial-value: iteration-counter initial value.
7444 ; increment: iteration-counter increment.
7445 ; Computed variables:
7446 ; increment-direction: the sign of the increment.
7447 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7448 ; range-direction: sign (comparison-value - initial-value)
7449 We give up on the following cases:
7450 ; loop variable overflow.
7451 ; run-time loop bounds with comparison code NE.
7452 */
7453
7454 static void
7455 insert_bct (loop_start, loop_end)
7456 rtx loop_start, loop_end;
7457 {
7458 rtx initial_value, comparison_value, increment;
7459 enum rtx_code comparison_code;
7460
7461 int increment_direction, compare_direction;
7462 int unsigned_p = 0;
7463
7464 /* if the loop condition is <= or >=, the number of iteration
7465 is 1 more than the range of the bounds of the loop */
7466 int add_iteration = 0;
7467
7468 /* the only machine mode we work with - is the integer of the size that the
7469 machine has */
7470 enum machine_mode loop_var_mode = SImode;
7471
7472 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7473
7474 /* get loop-variables. No need to check that these are valid - already
7475 checked in analyze_loop_iterations (). */
7476 comparison_code = loop_comparison_code[loop_num];
7477 initial_value = loop_start_value[loop_num];
7478 comparison_value = loop_comparison_value[loop_num];
7479 increment = loop_increment[loop_num];
7480
7481 /* check analyze_loop_iterations decision for this loop. */
7482 if (! loop_can_insert_bct[loop_num]){
7483 if (loop_dump_stream)
7484 fprintf (loop_dump_stream,
7485 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7486 loop_num);
7487 return;
7488 }
7489
7490 /* It's impossible to instrument a competely unrolled loop. */
7491 if (loop_unroll_factor [loop_num] == -1)
7492 return;
7493
7494 /* make sure that the last loop insn is a conditional jump .
7495 This check is repeated from analyze_loop_iterations (),
7496 because unrolling might have changed that. */
7497 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7498 || !condjump_p (PREV_INSN (loop_end))) {
7499 if (loop_dump_stream)
7500 fprintf (loop_dump_stream,
7501 "insert_bct: not instrumenting BCT because of invalid branch\n");
7502 return;
7503 }
7504
7505 /* fix increment in case loop was unrolled. */
7506 if (loop_unroll_factor [loop_num] > 1)
7507 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
7508
7509 /* determine properties and directions of the loop */
7510 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7511 switch ( comparison_code ) {
7512 case LEU:
7513 unsigned_p = 1;
7514 /* fallthrough */
7515 case LE:
7516 compare_direction = 1;
7517 add_iteration = 1;
7518 break;
7519 case GEU:
7520 unsigned_p = 1;
7521 /* fallthrough */
7522 case GE:
7523 compare_direction = -1;
7524 add_iteration = 1;
7525 break;
7526 case EQ:
7527 /* in this case we cannot know the number of iterations */
7528 if (loop_dump_stream)
7529 fprintf (loop_dump_stream,
7530 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7531 loop_num);
7532 return;
7533 case LTU:
7534 unsigned_p = 1;
7535 /* fallthrough */
7536 case LT:
7537 compare_direction = 1;
7538 break;
7539 case GTU:
7540 unsigned_p = 1;
7541 /* fallthrough */
7542 case GT:
7543 compare_direction = -1;
7544 break;
7545 case NE:
7546 compare_direction = 0;
7547 break;
7548 default:
7549 abort ();
7550 }
7551
7552
7553 /* make sure that the loop does not end by an overflow */
7554 if (compare_direction != increment_direction) {
7555 if (loop_dump_stream)
7556 fprintf (loop_dump_stream,
7557 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7558 loop_num);
7559 return;
7560 }
7561
7562 /* try to instrument the loop. */
7563
7564 /* Handle the simpler case, where the bounds are known at compile time. */
7565 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7566 {
7567 int n_iterations;
7568 int increment_value_abs = INTVAL (increment) * increment_direction;
7569
7570 /* check the relation between compare-val and initial-val */
7571 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7572 int range_direction = (difference > 0) ? 1 : -1;
7573
7574 /* make sure the loop executes enough iterations to gain from BCT */
7575 if (difference > -3 && difference < 3) {
7576 if (loop_dump_stream)
7577 fprintf (loop_dump_stream,
7578 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7579 loop_num);
7580 return;
7581 }
7582
7583 /* make sure that the loop executes at least once */
7584 if ((range_direction == 1 && compare_direction == -1)
7585 || (range_direction == -1 && compare_direction == 1))
7586 {
7587 if (loop_dump_stream)
7588 fprintf (loop_dump_stream,
7589 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7590 loop_num);
7591 return;
7592 }
7593
7594 /* make sure that the loop does not end by an overflow (in compile time
7595 bounds we must have an additional check for overflow, because here
7596 we also support the compare code of 'NE'. */
7597 if (comparison_code == NE
7598 && increment_direction != range_direction) {
7599 if (loop_dump_stream)
7600 fprintf (loop_dump_stream,
7601 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7602 loop_num);
7603 return;
7604 }
7605
7606 /* Determine the number of iterations by:
7607 ;
7608 ; compare-val - initial-val + (increment -1) + additional-iteration
7609 ; num_iterations = -----------------------------------------------------------------
7610 ; increment
7611 */
7612 difference = (range_direction > 0) ? difference : -difference;
7613 #if 0
7614 fprintf (stderr, "difference is: %d\n", difference); /* @*/
7615 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
7616 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
7617 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
7618 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
7619 #endif
7620
7621 if (increment_value_abs == 0) {
7622 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
7623 abort ();
7624 }
7625 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
7626 / increment_value_abs;
7627
7628 #if 0
7629 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
7630 #endif
7631 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
7632
7633 /* Done with this loop. */
7634 return;
7635 }
7636
7637 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7638 /* In this case we generate run_time calculation of the number of iterations */
7639
7640 /* With runtime bounds, if the compare is of the form '!=' we give up */
7641 if (comparison_code == NE) {
7642 if (loop_dump_stream)
7643 fprintf (loop_dump_stream,
7644 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7645 loop_num);
7646 return;
7647 }
7648
7649 else {
7650 /* We rely on the existence of run-time guard to ensure that the
7651 loop executes at least once. */
7652 rtx sequence;
7653 rtx iterations_num_reg;
7654
7655 int increment_value_abs = INTVAL (increment) * increment_direction;
7656
7657 /* make sure that the increment is a power of two, otherwise (an
7658 expensive) divide is needed. */
7659 if (exact_log2 (increment_value_abs) == -1)
7660 {
7661 if (loop_dump_stream)
7662 fprintf (loop_dump_stream,
7663 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7664 return;
7665 }
7666
7667 /* compute the number of iterations */
7668 start_sequence ();
7669 {
7670 rtx temp_reg;
7671
7672 /* Again, the number of iterations is calculated by:
7673 ;
7674 ; compare-val - initial-val + (increment -1) + additional-iteration
7675 ; num_iterations = -----------------------------------------------------------------
7676 ; increment
7677 */
7678 /* ??? Do we have to call copy_rtx here before passing rtx to
7679 expand_binop? */
7680 if (compare_direction > 0) {
7681 /* <, <= :the loop variable is increasing */
7682 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
7683 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7684 }
7685 else {
7686 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
7687 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7688 }
7689
7690 if (increment_value_abs - 1 + add_iteration != 0)
7691 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
7692 GEN_INT (increment_value_abs - 1 + add_iteration),
7693 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7694
7695 if (increment_value_abs != 1)
7696 {
7697 /* ??? This will generate an expensive divide instruction for
7698 most targets. The original authors apparently expected this
7699 to be a shift, since they test for power-of-2 divisors above,
7700 but just naively generating a divide instruction will not give
7701 a shift. It happens to work for the PowerPC target because
7702 the rs6000.md file has a divide pattern that emits shifts.
7703 It will probably not work for any other target. */
7704 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
7705 temp_reg,
7706 GEN_INT (increment_value_abs),
7707 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7708 }
7709 else
7710 iterations_num_reg = temp_reg;
7711 }
7712 sequence = gen_sequence ();
7713 end_sequence ();
7714 emit_insn_before (sequence, loop_start);
7715 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
7716 }
7717 }
7718
7719 /* instrument loop by inserting a bct in it. This is done in the following way:
7720 1. A new register is created and assigned the hard register number of the count
7721 register.
7722 2. In the head of the loop the new variable is initialized by the value passed in the
7723 loop_num_iterations parameter.
7724 3. At the end of the loop, comparison of the register with 0 is generated.
7725 The created comparison follows the pattern defined for the
7726 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7727 generation phase.
7728 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7729 not used elsewhere, it will be eliminated by data-flow analisys. */
7730
7731 static void
7732 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
7733 rtx loop_start, loop_end;
7734 rtx loop_num_iterations;
7735 {
7736 rtx temp_reg1, temp_reg2;
7737 rtx start_label;
7738
7739 rtx sequence;
7740 enum machine_mode loop_var_mode = SImode;
7741
7742 if (HAVE_decrement_and_branch_on_count)
7743 {
7744 if (loop_dump_stream)
7745 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
7746
7747 /* eliminate the check on the old variable */
7748 delete_insn (PREV_INSN (loop_end));
7749 delete_insn (PREV_INSN (loop_end));
7750
7751 /* insert the label which will delimit the start of the loop */
7752 start_label = gen_label_rtx ();
7753 emit_label_after (start_label, loop_start);
7754
7755 /* insert initialization of the count register into the loop header */
7756 start_sequence ();
7757 temp_reg1 = gen_reg_rtx (loop_var_mode);
7758 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
7759
7760 /* this will be count register */
7761 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
7762 /* we have to move the value to the count register from an GPR
7763 because rtx pointed to by loop_num_iterations could contain
7764 expression which cannot be moved into count register */
7765 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
7766
7767 sequence = gen_sequence ();
7768 end_sequence ();
7769 emit_insn_after (sequence, loop_start);
7770
7771 /* insert new comparison on the count register instead of the
7772 old one, generating the needed BCT pattern (that will be
7773 later recognized by assembly generation phase). */
7774 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
7775 loop_end);
7776 LABEL_NUSES (start_label)++;
7777 }
7778
7779 }
7780 #endif /* HAVE_decrement_and_branch_on_count */
7781
7782 #endif /* HAIFA */
7783
7784 /* Scan the function and determine whether it has indirect (computed) jumps.
7785
7786 This is taken mostly from flow.c; similar code exists elsewhere
7787 in the compiler. It may be useful to put this into rtlanal.c. */
7788 static int
7789 indirect_jump_in_function_p (start)
7790 rtx start;
7791 {
7792 rtx insn;
7793
7794 for (insn = start; insn; insn = NEXT_INSN (insn))
7795 if (computed_jump_p (insn))
7796 return 1;
7797
7798 return 0;
7799 }