loop.c (add_label_notes): Do not ignore references to labels before dispatch tables.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51 #include "toplev.h"
52
53 /* Vector mapping INSN_UIDs to luids.
54 The luids are like uids but increase monotonically always.
55 We use them to see whether a jump comes from outside a given loop. */
56
57 int *uid_luid;
58
59 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
60 number the insn is contained in. */
61
62 int *uid_loop_num;
63
64 /* 1 + largest uid of any insn. */
65
66 int max_uid_for_loop;
67
68 /* 1 + luid of last insn. */
69
70 static int max_luid;
71
72 /* Number of loops detected in current function. Used as index to the
73 next few tables. */
74
75 static int max_loop_num;
76
77 /* Indexed by loop number, contains the first and last insn of each loop. */
78
79 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
80
81 /* For each loop, gives the containing loop number, -1 if none. */
82
83 int *loop_outer_loop;
84
85 #ifdef HAIFA
86 /* The main output of analyze_loop_iterations is placed here */
87
88 int *loop_can_insert_bct;
89
90 /* For each loop, determines whether some of its inner loops has used
91 count register */
92
93 int *loop_used_count_register;
94
95 /* loop parameters for arithmetic loops. These loops have a loop variable
96 which is initialized to loop_start_value, incremented in each iteration
97 by "loop_increment". At the end of the iteration the loop variable is
98 compared to the loop_comparison_value (using loop_comparison_code). */
99
100 rtx *loop_increment;
101 rtx *loop_comparison_value;
102 rtx *loop_start_value;
103 enum rtx_code *loop_comparison_code;
104 #endif /* HAIFA */
105
106 /* For each loop, keep track of its unrolling factor.
107 Potential values:
108 0: unrolled
109 1: not unrolled.
110 -1: completely unrolled
111 >0: holds the unroll exact factor. */
112 int *loop_unroll_factor;
113
114 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
115 really a loop (an insn outside the loop branches into it). */
116
117 static char *loop_invalid;
118
119 /* Indexed by loop number, links together all LABEL_REFs which refer to
120 code labels outside the loop. Used by routines that need to know all
121 loop exits, such as final_biv_value and final_giv_value.
122
123 This does not include loop exits due to return instructions. This is
124 because all bivs and givs are pseudos, and hence must be dead after a
125 return, so the presense of a return does not affect any of the
126 optimizations that use this info. It is simpler to just not include return
127 instructions on this list. */
128
129 rtx *loop_number_exit_labels;
130
131 /* Indexed by loop number, counts the number of LABEL_REFs on
132 loop_number_exit_labels for this loop and all loops nested inside it. */
133
134 int *loop_number_exit_count;
135
136 /* Holds the number of loop iterations. It is zero if the number could not be
137 calculated. Must be unsigned since the number of iterations can
138 be as high as 2^wordsize-1. For loops with a wider iterator, this number
139 will be zero if the number of loop iterations is too large for an
140 unsigned integer to hold. */
141
142 unsigned HOST_WIDE_INT loop_n_iterations;
143
144 /* Nonzero if there is a subroutine call in the current loop. */
145
146 static int loop_has_call;
147
148 /* Nonzero if there is a volatile memory reference in the current
149 loop. */
150
151 static int loop_has_volatile;
152
153 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
154 current loop. A continue statement will generate a branch to
155 NEXT_INSN (loop_continue). */
156
157 static rtx loop_continue;
158
159 /* Indexed by register number, contains the number of times the reg
160 is set during the loop being scanned.
161 During code motion, a negative value indicates a reg that has been
162 made a candidate; in particular -2 means that it is an candidate that
163 we know is equal to a constant and -1 means that it is an candidate
164 not known equal to a constant.
165 After code motion, regs moved have 0 (which is accurate now)
166 while the failed candidates have the original number of times set.
167
168 Therefore, at all times, == 0 indicates an invariant register;
169 < 0 a conditionally invariant one. */
170
171 static int *n_times_set;
172
173 /* Original value of n_times_set; same except that this value
174 is not set negative for a reg whose sets have been made candidates
175 and not set to 0 for a reg that is moved. */
176
177 static int *n_times_used;
178
179 /* Index by register number, 1 indicates that the register
180 cannot be moved or strength reduced. */
181
182 static char *may_not_optimize;
183
184 /* Nonzero means reg N has already been moved out of one loop.
185 This reduces the desire to move it out of another. */
186
187 static char *moved_once;
188
189 /* Array of MEMs that are stored in this loop. If there are too many to fit
190 here, we just turn on unknown_address_altered. */
191
192 #define NUM_STORES 30
193 static rtx loop_store_mems[NUM_STORES];
194
195 /* Index of first available slot in above array. */
196 static int loop_store_mems_idx;
197
198 /* Nonzero if we don't know what MEMs were changed in the current loop.
199 This happens if the loop contains a call (in which case `loop_has_call'
200 will also be set) or if we store into more than NUM_STORES MEMs. */
201
202 static int unknown_address_altered;
203
204 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
205 static int num_movables;
206
207 /* Count of memory write instructions discovered in the loop. */
208 static int num_mem_sets;
209
210 /* Number of loops contained within the current one, including itself. */
211 static int loops_enclosed;
212
213 /* Bound on pseudo register number before loop optimization.
214 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
215 int max_reg_before_loop;
216
217 /* This obstack is used in product_cheap_p to allocate its rtl. It
218 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
219 If we used the same obstack that it did, we would be deallocating
220 that array. */
221
222 static struct obstack temp_obstack;
223
224 /* This is where the pointer to the obstack being used for RTL is stored. */
225
226 extern struct obstack *rtl_obstack;
227
228 #define obstack_chunk_alloc xmalloc
229 #define obstack_chunk_free free
230 \f
231 /* During the analysis of a loop, a chain of `struct movable's
232 is made to record all the movable insns found.
233 Then the entire chain can be scanned to decide which to move. */
234
235 struct movable
236 {
237 rtx insn; /* A movable insn */
238 rtx set_src; /* The expression this reg is set from. */
239 rtx set_dest; /* The destination of this SET. */
240 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
241 of any registers used within the LIBCALL. */
242 int consec; /* Number of consecutive following insns
243 that must be moved with this one. */
244 int regno; /* The register it sets */
245 short lifetime; /* lifetime of that register;
246 may be adjusted when matching movables
247 that load the same value are found. */
248 short savings; /* Number of insns we can move for this reg,
249 including other movables that force this
250 or match this one. */
251 unsigned int cond : 1; /* 1 if only conditionally movable */
252 unsigned int force : 1; /* 1 means MUST move this insn */
253 unsigned int global : 1; /* 1 means reg is live outside this loop */
254 /* If PARTIAL is 1, GLOBAL means something different:
255 that the reg is live outside the range from where it is set
256 to the following label. */
257 unsigned int done : 1; /* 1 inhibits further processing of this */
258
259 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
260 In particular, moving it does not make it
261 invariant. */
262 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
263 load SRC, rather than copying INSN. */
264 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
265 first insn of a consecutive sets group. */
266 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
267 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
268 that we should avoid changing when clearing
269 the rest of the reg. */
270 struct movable *match; /* First entry for same value */
271 struct movable *forces; /* An insn that must be moved if this is */
272 struct movable *next;
273 };
274
275 static struct movable *the_movables;
276
277 FILE *loop_dump_stream;
278
279 /* Forward declarations. */
280
281 static void find_and_verify_loops PROTO((rtx));
282 static void mark_loop_jump PROTO((rtx, int));
283 static void prescan_loop PROTO((rtx, rtx));
284 static int reg_in_basic_block_p PROTO((rtx, rtx));
285 static int consec_sets_invariant_p PROTO((rtx, int, rtx));
286 static rtx libcall_other_reg PROTO((rtx, rtx));
287 static int labels_in_range_p PROTO((rtx, int));
288 static void count_loop_regs_set PROTO((rtx, rtx, char *, rtx *, int *, int));
289 static void note_addr_stored PROTO((rtx, rtx));
290 static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
291 static void scan_loop PROTO((rtx, rtx, int, int));
292 #if 0
293 static void replace_call_address PROTO((rtx, rtx, rtx));
294 #endif
295 static rtx skip_consec_insns PROTO((rtx, int));
296 static int libcall_benefit PROTO((rtx));
297 static void ignore_some_movables PROTO((struct movable *));
298 static void force_movables PROTO((struct movable *));
299 static void combine_movables PROTO((struct movable *, int));
300 static int regs_match_p PROTO((rtx, rtx, struct movable *));
301 static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
302 static void add_label_notes PROTO((rtx, rtx));
303 static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
304 static int count_nonfixed_reads PROTO((rtx));
305 static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, int));
306 static void find_single_use_in_loop PROTO((rtx, rtx, rtx *));
307 static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
308 static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
309 static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, int, int));
310 static void check_final_value PROTO((struct induction *, rtx, rtx));
311 static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
312 static void update_giv_derive PROTO((rtx));
313 static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *));
314 static rtx simplify_giv_expr PROTO((rtx, int *));
315 static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *));
316 static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *));
317 static int check_dbra_loop PROTO((rtx, int, rtx));
318 static rtx express_from_1 PROTO((rtx, rtx, rtx));
319 static rtx express_from PROTO((struct induction *, struct induction *));
320 static rtx combine_givs_p PROTO((struct induction *, struct induction *));
321 static void combine_givs PROTO((struct iv_class *));
322 static int product_cheap_p PROTO((rtx, rtx));
323 static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
324 static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
325 static int last_use_this_basic_block PROTO((rtx, rtx));
326 static void record_initial PROTO((rtx, rtx));
327 static void update_reg_last_use PROTO((rtx, rtx));
328
329 #ifdef HAIFA
330 /* This is extern from unroll.c */
331 extern void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
332
333 /* Two main functions for implementing bct:
334 first - to be called before loop unrolling, and the second - after */
335 #ifdef HAVE_decrement_and_branch_on_count
336 static void analyze_loop_iterations PROTO((rtx, rtx));
337 static void insert_bct PROTO((rtx, rtx));
338
339 /* Auxiliary function that inserts the bct pattern into the loop */
340 static void instrument_loop_bct PROTO((rtx, rtx, rtx));
341 #endif /* HAVE_decrement_and_branch_on_count */
342 #endif /* HAIFA */
343
344 /* Indirect_jump_in_function is computed once per function. */
345 int indirect_jump_in_function = 0;
346 static int indirect_jump_in_function_p PROTO((rtx));
347
348 \f
349 /* Relative gain of eliminating various kinds of operations. */
350 static int add_cost;
351 #if 0
352 static int shift_cost;
353 static int mult_cost;
354 #endif
355
356 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
357 copy the value of the strength reduced giv to its original register. */
358 static int copy_cost;
359
360 /* Cost of using a register, to normalize the benefits of a giv. */
361 static int reg_address_cost;
362
363
364 void
365 init_loop ()
366 {
367 char *free_point = (char *) oballoc (1);
368 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
369
370 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
371
372 #ifdef ADDRESS_COST
373 reg_address_cost = ADDRESS_COST (reg);
374 #else
375 reg_address_cost = rtx_cost (reg, MEM);
376 #endif
377
378 /* We multiply by 2 to reconcile the difference in scale between
379 these two ways of computing costs. Otherwise the cost of a copy
380 will be far less than the cost of an add. */
381
382 copy_cost = 2 * 2;
383
384 /* Free the objects we just allocated. */
385 obfree (free_point);
386
387 /* Initialize the obstack used for rtl in product_cheap_p. */
388 gcc_obstack_init (&temp_obstack);
389 }
390 \f
391 /* Entry point of this file. Perform loop optimization
392 on the current function. F is the first insn of the function
393 and DUMPFILE is a stream for output of a trace of actions taken
394 (or 0 if none should be output). */
395
396 void
397 loop_optimize (f, dumpfile, unroll_p)
398 /* f is the first instruction of a chain of insns for one function */
399 rtx f;
400 FILE *dumpfile;
401 int unroll_p;
402 {
403 register rtx insn;
404 register int i;
405 rtx last_insn;
406
407 loop_dump_stream = dumpfile;
408
409 init_recog_no_volatile ();
410
411 max_reg_before_loop = max_reg_num ();
412
413 moved_once = (char *) alloca (max_reg_before_loop);
414 bzero (moved_once, max_reg_before_loop);
415
416 regs_may_share = 0;
417
418 /* Count the number of loops. */
419
420 max_loop_num = 0;
421 for (insn = f; insn; insn = NEXT_INSN (insn))
422 {
423 if (GET_CODE (insn) == NOTE
424 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
425 max_loop_num++;
426 }
427
428 /* Don't waste time if no loops. */
429 if (max_loop_num == 0)
430 return;
431
432 /* Get size to use for tables indexed by uids.
433 Leave some space for labels allocated by find_and_verify_loops. */
434 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
435
436 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
437 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
438
439 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
440 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
441
442 /* Allocate tables for recording each loop. We set each entry, so they need
443 not be zeroed. */
444 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
445 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
446 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
447 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
448 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
449 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
450
451 /* This is initialized by the unrolling code, so we go ahead
452 and clear them just in case we are not performing loop
453 unrolling. */
454 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
455 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
456
457 #ifdef HAIFA
458 /* Allocate for BCT optimization */
459 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
460 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
461
462 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
463 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
464
465 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
466 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
467 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
468 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
469 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
470 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
471
472 loop_comparison_code
473 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
474 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
475 #endif /* HAIFA */
476
477 /* Find and process each loop.
478 First, find them, and record them in order of their beginnings. */
479 find_and_verify_loops (f);
480
481 /* Now find all register lifetimes. This must be done after
482 find_and_verify_loops, because it might reorder the insns in the
483 function. */
484 reg_scan (f, max_reg_num (), 1);
485
486 /* This must occur after reg_scan so that registers created by gcse
487 will have entries in the register tables.
488
489 We could have added a call to reg_scan after gcse_main in toplev.c,
490 but moving this call to init_alias_analysis is more efficient. */
491 init_alias_analysis ();
492
493 /* See if we went too far. */
494 if (get_max_uid () > max_uid_for_loop)
495 abort ();
496 /* Now reset it to the actual size we need. See above. */
497 max_uid_for_loop = get_max_uid () + 1;
498
499 /* Compute the mapping from uids to luids.
500 LUIDs are numbers assigned to insns, like uids,
501 except that luids increase monotonically through the code.
502 Don't assign luids to line-number NOTEs, so that the distance in luids
503 between two insns is not affected by -g. */
504
505 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
506 {
507 last_insn = insn;
508 if (GET_CODE (insn) != NOTE
509 || NOTE_LINE_NUMBER (insn) <= 0)
510 uid_luid[INSN_UID (insn)] = ++i;
511 else
512 /* Give a line number note the same luid as preceding insn. */
513 uid_luid[INSN_UID (insn)] = i;
514 }
515
516 max_luid = i + 1;
517
518 /* Don't leave gaps in uid_luid for insns that have been
519 deleted. It is possible that the first or last insn
520 using some register has been deleted by cross-jumping.
521 Make sure that uid_luid for that former insn's uid
522 points to the general area where that insn used to be. */
523 for (i = 0; i < max_uid_for_loop; i++)
524 {
525 uid_luid[0] = uid_luid[i];
526 if (uid_luid[0] != 0)
527 break;
528 }
529 for (i = 0; i < max_uid_for_loop; i++)
530 if (uid_luid[i] == 0)
531 uid_luid[i] = uid_luid[i - 1];
532
533 /* Create a mapping from loops to BLOCK tree nodes. */
534 if (unroll_p && write_symbols != NO_DEBUG)
535 find_loop_tree_blocks ();
536
537 /* Determine if the function has indirect jump. On some systems
538 this prevents low overhead loop instructions from being used. */
539 indirect_jump_in_function = indirect_jump_in_function_p (f);
540
541 /* Now scan the loops, last ones first, since this means inner ones are done
542 before outer ones. */
543 for (i = max_loop_num-1; i >= 0; i--)
544 if (! loop_invalid[i] && loop_number_loop_ends[i])
545 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
546 max_reg_num (), unroll_p);
547
548 /* If debugging and unrolling loops, we must replicate the tree nodes
549 corresponding to the blocks inside the loop, so that the original one
550 to one mapping will remain. */
551 if (unroll_p && write_symbols != NO_DEBUG)
552 unroll_block_trees ();
553
554 end_alias_analysis ();
555 }
556 \f
557 /* Optimize one loop whose start is LOOP_START and end is END.
558 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
559 NOTE_INSN_LOOP_END. */
560
561 /* ??? Could also move memory writes out of loops if the destination address
562 is invariant, the source is invariant, the memory write is not volatile,
563 and if we can prove that no read inside the loop can read this address
564 before the write occurs. If there is a read of this address after the
565 write, then we can also mark the memory read as invariant. */
566
567 static void
568 scan_loop (loop_start, end, nregs, unroll_p)
569 rtx loop_start, end;
570 int nregs;
571 int unroll_p;
572 {
573 register int i;
574 register rtx p;
575 /* 1 if we are scanning insns that could be executed zero times. */
576 int maybe_never = 0;
577 /* 1 if we are scanning insns that might never be executed
578 due to a subroutine call which might exit before they are reached. */
579 int call_passed = 0;
580 /* For a rotated loop that is entered near the bottom,
581 this is the label at the top. Otherwise it is zero. */
582 rtx loop_top = 0;
583 /* Jump insn that enters the loop, or 0 if control drops in. */
584 rtx loop_entry_jump = 0;
585 /* Place in the loop where control enters. */
586 rtx scan_start;
587 /* Number of insns in the loop. */
588 int insn_count;
589 int in_libcall = 0;
590 int tem;
591 rtx temp;
592 /* The SET from an insn, if it is the only SET in the insn. */
593 rtx set, set1;
594 /* Chain describing insns movable in current loop. */
595 struct movable *movables = 0;
596 /* Last element in `movables' -- so we can add elements at the end. */
597 struct movable *last_movable = 0;
598 /* Ratio of extra register life span we can justify
599 for saving an instruction. More if loop doesn't call subroutines
600 since in that case saving an insn makes more difference
601 and more registers are available. */
602 int threshold;
603 /* If we have calls, contains the insn in which a register was used
604 if it was used exactly once; contains const0_rtx if it was used more
605 than once. */
606 rtx *reg_single_usage = 0;
607 /* Nonzero if we are scanning instructions in a sub-loop. */
608 int loop_depth = 0;
609
610 n_times_set = (int *) alloca (nregs * sizeof (int));
611 n_times_used = (int *) alloca (nregs * sizeof (int));
612 may_not_optimize = (char *) alloca (nregs);
613
614 /* Determine whether this loop starts with a jump down to a test at
615 the end. This will occur for a small number of loops with a test
616 that is too complex to duplicate in front of the loop.
617
618 We search for the first insn or label in the loop, skipping NOTEs.
619 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
620 (because we might have a loop executed only once that contains a
621 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
622 (in case we have a degenerate loop).
623
624 Note that if we mistakenly think that a loop is entered at the top
625 when, in fact, it is entered at the exit test, the only effect will be
626 slightly poorer optimization. Making the opposite error can generate
627 incorrect code. Since very few loops now start with a jump to the
628 exit test, the code here to detect that case is very conservative. */
629
630 for (p = NEXT_INSN (loop_start);
631 p != end
632 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
633 && (GET_CODE (p) != NOTE
634 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
635 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
636 p = NEXT_INSN (p))
637 ;
638
639 scan_start = p;
640
641 /* Set up variables describing this loop. */
642 prescan_loop (loop_start, end);
643 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
644
645 /* If loop has a jump before the first label,
646 the true entry is the target of that jump.
647 Start scan from there.
648 But record in LOOP_TOP the place where the end-test jumps
649 back to so we can scan that after the end of the loop. */
650 if (GET_CODE (p) == JUMP_INSN)
651 {
652 loop_entry_jump = p;
653
654 /* Loop entry must be unconditional jump (and not a RETURN) */
655 if (simplejump_p (p)
656 && JUMP_LABEL (p) != 0
657 /* Check to see whether the jump actually
658 jumps out of the loop (meaning it's no loop).
659 This case can happen for things like
660 do {..} while (0). If this label was generated previously
661 by loop, we can't tell anything about it and have to reject
662 the loop. */
663 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
664 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
665 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
666 {
667 loop_top = next_label (scan_start);
668 scan_start = JUMP_LABEL (p);
669 }
670 }
671
672 /* If SCAN_START was an insn created by loop, we don't know its luid
673 as required by loop_reg_used_before_p. So skip such loops. (This
674 test may never be true, but it's best to play it safe.)
675
676 Also, skip loops where we do not start scanning at a label. This
677 test also rejects loops starting with a JUMP_INSN that failed the
678 test above. */
679
680 if (INSN_UID (scan_start) >= max_uid_for_loop
681 || GET_CODE (scan_start) != CODE_LABEL)
682 {
683 if (loop_dump_stream)
684 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
685 INSN_UID (loop_start), INSN_UID (end));
686 return;
687 }
688
689 /* Count number of times each reg is set during this loop.
690 Set may_not_optimize[I] if it is not safe to move out
691 the setting of register I. If this loop has calls, set
692 reg_single_usage[I]. */
693
694 bzero ((char *) n_times_set, nregs * sizeof (int));
695 bzero (may_not_optimize, nregs);
696
697 if (loop_has_call)
698 {
699 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
700 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
701 }
702
703 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
704 may_not_optimize, reg_single_usage, &insn_count, nregs);
705
706 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
707 may_not_optimize[i] = 1, n_times_set[i] = 1;
708 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
709
710 if (loop_dump_stream)
711 {
712 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
713 INSN_UID (loop_start), INSN_UID (end), insn_count);
714 if (loop_continue)
715 fprintf (loop_dump_stream, "Continue at insn %d.\n",
716 INSN_UID (loop_continue));
717 }
718
719 /* Scan through the loop finding insns that are safe to move.
720 Set n_times_set negative for the reg being set, so that
721 this reg will be considered invariant for subsequent insns.
722 We consider whether subsequent insns use the reg
723 in deciding whether it is worth actually moving.
724
725 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
726 and therefore it is possible that the insns we are scanning
727 would never be executed. At such times, we must make sure
728 that it is safe to execute the insn once instead of zero times.
729 When MAYBE_NEVER is 0, all insns will be executed at least once
730 so that is not a problem. */
731
732 p = scan_start;
733 while (1)
734 {
735 p = NEXT_INSN (p);
736 /* At end of a straight-in loop, we are done.
737 At end of a loop entered at the bottom, scan the top. */
738 if (p == scan_start)
739 break;
740 if (p == end)
741 {
742 if (loop_top != 0)
743 p = loop_top;
744 else
745 break;
746 if (p == scan_start)
747 break;
748 }
749
750 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
751 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
752 in_libcall = 1;
753 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
754 && find_reg_note (p, REG_RETVAL, NULL_RTX))
755 in_libcall = 0;
756
757 if (GET_CODE (p) == INSN
758 && (set = single_set (p))
759 && GET_CODE (SET_DEST (set)) == REG
760 && ! may_not_optimize[REGNO (SET_DEST (set))])
761 {
762 int tem1 = 0;
763 int tem2 = 0;
764 int move_insn = 0;
765 rtx src = SET_SRC (set);
766 rtx dependencies = 0;
767
768 /* Figure out what to use as a source of this insn. If a REG_EQUIV
769 note is given or if a REG_EQUAL note with a constant operand is
770 specified, use it as the source and mark that we should move
771 this insn by calling emit_move_insn rather that duplicating the
772 insn.
773
774 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
775 is present. */
776 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
777 if (temp)
778 src = XEXP (temp, 0), move_insn = 1;
779 else
780 {
781 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
782 if (temp && CONSTANT_P (XEXP (temp, 0)))
783 src = XEXP (temp, 0), move_insn = 1;
784 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
785 {
786 src = XEXP (temp, 0);
787 /* A libcall block can use regs that don't appear in
788 the equivalent expression. To move the libcall,
789 we must move those regs too. */
790 dependencies = libcall_other_reg (p, src);
791 }
792 }
793
794 /* Don't try to optimize a register that was made
795 by loop-optimization for an inner loop.
796 We don't know its life-span, so we can't compute the benefit. */
797 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
798 ;
799 /* In order to move a register, we need to have one of three cases:
800 (1) it is used only in the same basic block as the set
801 (2) it is not a user variable and it is not used in the
802 exit test (this can cause the variable to be used
803 before it is set just like a user-variable).
804 (3) the set is guaranteed to be executed once the loop starts,
805 and the reg is not used until after that. */
806 else if (! ((! maybe_never
807 && ! loop_reg_used_before_p (set, p, loop_start,
808 scan_start, end))
809 || (! REG_USERVAR_P (SET_DEST (set))
810 && ! REG_LOOP_TEST_P (SET_DEST (set)))
811 || reg_in_basic_block_p (p, SET_DEST (set))))
812 ;
813 else if ((tem = invariant_p (src))
814 && (dependencies == 0
815 || (tem2 = invariant_p (dependencies)) != 0)
816 && (n_times_set[REGNO (SET_DEST (set))] == 1
817 || (tem1
818 = consec_sets_invariant_p (SET_DEST (set),
819 n_times_set[REGNO (SET_DEST (set))],
820 p)))
821 /* If the insn can cause a trap (such as divide by zero),
822 can't move it unless it's guaranteed to be executed
823 once loop is entered. Even a function call might
824 prevent the trap insn from being reached
825 (since it might exit!) */
826 && ! ((maybe_never || call_passed)
827 && may_trap_p (src)))
828 {
829 register struct movable *m;
830 register int regno = REGNO (SET_DEST (set));
831
832 /* A potential lossage is where we have a case where two insns
833 can be combined as long as they are both in the loop, but
834 we move one of them outside the loop. For large loops,
835 this can lose. The most common case of this is the address
836 of a function being called.
837
838 Therefore, if this register is marked as being used exactly
839 once if we are in a loop with calls (a "large loop"), see if
840 we can replace the usage of this register with the source
841 of this SET. If we can, delete this insn.
842
843 Don't do this if P has a REG_RETVAL note or if we have
844 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
845
846 if (reg_single_usage && reg_single_usage[regno] != 0
847 && reg_single_usage[regno] != const0_rtx
848 && REGNO_FIRST_UID (regno) == INSN_UID (p)
849 && (REGNO_LAST_UID (regno)
850 == INSN_UID (reg_single_usage[regno]))
851 && n_times_set[REGNO (SET_DEST (set))] == 1
852 && ! side_effects_p (SET_SRC (set))
853 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
854 && (! SMALL_REGISTER_CLASSES
855 || (! (GET_CODE (SET_SRC (set)) == REG
856 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
857 /* This test is not redundant; SET_SRC (set) might be
858 a call-clobbered register and the life of REGNO
859 might span a call. */
860 && ! modified_between_p (SET_SRC (set), p,
861 reg_single_usage[regno])
862 && no_labels_between_p (p, reg_single_usage[regno])
863 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
864 reg_single_usage[regno]))
865 {
866 /* Replace any usage in a REG_EQUAL note. Must copy the
867 new source, so that we don't get rtx sharing between the
868 SET_SOURCE and REG_NOTES of insn p. */
869 REG_NOTES (reg_single_usage[regno])
870 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
871 SET_DEST (set), copy_rtx (SET_SRC (set)));
872
873 PUT_CODE (p, NOTE);
874 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
875 NOTE_SOURCE_FILE (p) = 0;
876 n_times_set[regno] = 0;
877 continue;
878 }
879
880 m = (struct movable *) alloca (sizeof (struct movable));
881 m->next = 0;
882 m->insn = p;
883 m->set_src = src;
884 m->dependencies = dependencies;
885 m->set_dest = SET_DEST (set);
886 m->force = 0;
887 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
888 m->done = 0;
889 m->forces = 0;
890 m->partial = 0;
891 m->move_insn = move_insn;
892 m->move_insn_first = 0;
893 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
894 m->savemode = VOIDmode;
895 m->regno = regno;
896 /* Set M->cond if either invariant_p or consec_sets_invariant_p
897 returned 2 (only conditionally invariant). */
898 m->cond = ((tem | tem1 | tem2) > 1);
899 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
900 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
901 m->match = 0;
902 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
903 - uid_luid[REGNO_FIRST_UID (regno)]);
904 m->savings = n_times_used[regno];
905 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
906 m->savings += libcall_benefit (p);
907 n_times_set[regno] = move_insn ? -2 : -1;
908 /* Add M to the end of the chain MOVABLES. */
909 if (movables == 0)
910 movables = m;
911 else
912 last_movable->next = m;
913 last_movable = m;
914
915 if (m->consec > 0)
916 {
917 /* It is possible for the first instruction to have a
918 REG_EQUAL note but a non-invariant SET_SRC, so we must
919 remember the status of the first instruction in case
920 the last instruction doesn't have a REG_EQUAL note. */
921 m->move_insn_first = m->move_insn;
922
923 /* Skip this insn, not checking REG_LIBCALL notes. */
924 p = next_nonnote_insn (p);
925 /* Skip the consecutive insns, if there are any. */
926 p = skip_consec_insns (p, m->consec);
927 /* Back up to the last insn of the consecutive group. */
928 p = prev_nonnote_insn (p);
929
930 /* We must now reset m->move_insn, m->is_equiv, and possibly
931 m->set_src to correspond to the effects of all the
932 insns. */
933 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
934 if (temp)
935 m->set_src = XEXP (temp, 0), m->move_insn = 1;
936 else
937 {
938 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
939 if (temp && CONSTANT_P (XEXP (temp, 0)))
940 m->set_src = XEXP (temp, 0), m->move_insn = 1;
941 else
942 m->move_insn = 0;
943
944 }
945 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
946 }
947 }
948 /* If this register is always set within a STRICT_LOW_PART
949 or set to zero, then its high bytes are constant.
950 So clear them outside the loop and within the loop
951 just load the low bytes.
952 We must check that the machine has an instruction to do so.
953 Also, if the value loaded into the register
954 depends on the same register, this cannot be done. */
955 else if (SET_SRC (set) == const0_rtx
956 && GET_CODE (NEXT_INSN (p)) == INSN
957 && (set1 = single_set (NEXT_INSN (p)))
958 && GET_CODE (set1) == SET
959 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
960 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
961 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
962 == SET_DEST (set))
963 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
964 {
965 register int regno = REGNO (SET_DEST (set));
966 if (n_times_set[regno] == 2)
967 {
968 register struct movable *m;
969 m = (struct movable *) alloca (sizeof (struct movable));
970 m->next = 0;
971 m->insn = p;
972 m->set_dest = SET_DEST (set);
973 m->dependencies = 0;
974 m->force = 0;
975 m->consec = 0;
976 m->done = 0;
977 m->forces = 0;
978 m->move_insn = 0;
979 m->move_insn_first = 0;
980 m->partial = 1;
981 /* If the insn may not be executed on some cycles,
982 we can't clear the whole reg; clear just high part.
983 Not even if the reg is used only within this loop.
984 Consider this:
985 while (1)
986 while (s != t) {
987 if (foo ()) x = *s;
988 use (x);
989 }
990 Clearing x before the inner loop could clobber a value
991 being saved from the last time around the outer loop.
992 However, if the reg is not used outside this loop
993 and all uses of the register are in the same
994 basic block as the store, there is no problem.
995
996 If this insn was made by loop, we don't know its
997 INSN_LUID and hence must make a conservative
998 assumption. */
999 m->global = (INSN_UID (p) >= max_uid_for_loop
1000 || (uid_luid[REGNO_LAST_UID (regno)]
1001 > INSN_LUID (end))
1002 || (uid_luid[REGNO_FIRST_UID (regno)]
1003 < INSN_LUID (p))
1004 || (labels_in_range_p
1005 (p, uid_luid[REGNO_FIRST_UID (regno)])));
1006 if (maybe_never && m->global)
1007 m->savemode = GET_MODE (SET_SRC (set1));
1008 else
1009 m->savemode = VOIDmode;
1010 m->regno = regno;
1011 m->cond = 0;
1012 m->match = 0;
1013 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1014 - uid_luid[REGNO_FIRST_UID (regno)]);
1015 m->savings = 1;
1016 n_times_set[regno] = -1;
1017 /* Add M to the end of the chain MOVABLES. */
1018 if (movables == 0)
1019 movables = m;
1020 else
1021 last_movable->next = m;
1022 last_movable = m;
1023 }
1024 }
1025 }
1026 /* Past a call insn, we get to insns which might not be executed
1027 because the call might exit. This matters for insns that trap.
1028 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1029 so they don't count. */
1030 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1031 call_passed = 1;
1032 /* Past a label or a jump, we get to insns for which we
1033 can't count on whether or how many times they will be
1034 executed during each iteration. Therefore, we can
1035 only move out sets of trivial variables
1036 (those not used after the loop). */
1037 /* Similar code appears twice in strength_reduce. */
1038 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1039 /* If we enter the loop in the middle, and scan around to the
1040 beginning, don't set maybe_never for that. This must be an
1041 unconditional jump, otherwise the code at the top of the
1042 loop might never be executed. Unconditional jumps are
1043 followed a by barrier then loop end. */
1044 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1045 && NEXT_INSN (NEXT_INSN (p)) == end
1046 && simplejump_p (p)))
1047 maybe_never = 1;
1048 else if (GET_CODE (p) == NOTE)
1049 {
1050 /* At the virtual top of a converted loop, insns are again known to
1051 be executed: logically, the loop begins here even though the exit
1052 code has been duplicated. */
1053 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1054 maybe_never = call_passed = 0;
1055 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1056 loop_depth++;
1057 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1058 loop_depth--;
1059 }
1060 }
1061
1062 /* If one movable subsumes another, ignore that other. */
1063
1064 ignore_some_movables (movables);
1065
1066 /* For each movable insn, see if the reg that it loads
1067 leads when it dies right into another conditionally movable insn.
1068 If so, record that the second insn "forces" the first one,
1069 since the second can be moved only if the first is. */
1070
1071 force_movables (movables);
1072
1073 /* See if there are multiple movable insns that load the same value.
1074 If there are, make all but the first point at the first one
1075 through the `match' field, and add the priorities of them
1076 all together as the priority of the first. */
1077
1078 combine_movables (movables, nregs);
1079
1080 /* Now consider each movable insn to decide whether it is worth moving.
1081 Store 0 in n_times_set for each reg that is moved.
1082
1083 Generally this increases code size, so do not move moveables when
1084 optimizing for code size. */
1085
1086 if (! optimize_size)
1087 move_movables (movables, threshold,
1088 insn_count, loop_start, end, nregs);
1089
1090 /* Now candidates that still are negative are those not moved.
1091 Change n_times_set to indicate that those are not actually invariant. */
1092 for (i = 0; i < nregs; i++)
1093 if (n_times_set[i] < 0)
1094 n_times_set[i] = n_times_used[i];
1095
1096 if (flag_strength_reduce)
1097 {
1098 the_movables = movables;
1099 strength_reduce (scan_start, end, loop_top,
1100 insn_count, loop_start, end, unroll_p);
1101 }
1102 }
1103 \f
1104 /* Add elements to *OUTPUT to record all the pseudo-regs
1105 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1106
1107 void
1108 record_excess_regs (in_this, not_in_this, output)
1109 rtx in_this, not_in_this;
1110 rtx *output;
1111 {
1112 enum rtx_code code;
1113 char *fmt;
1114 int i;
1115
1116 code = GET_CODE (in_this);
1117
1118 switch (code)
1119 {
1120 case PC:
1121 case CC0:
1122 case CONST_INT:
1123 case CONST_DOUBLE:
1124 case CONST:
1125 case SYMBOL_REF:
1126 case LABEL_REF:
1127 return;
1128
1129 case REG:
1130 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1131 && ! reg_mentioned_p (in_this, not_in_this))
1132 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1133 return;
1134
1135 default:
1136 break;
1137 }
1138
1139 fmt = GET_RTX_FORMAT (code);
1140 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1141 {
1142 int j;
1143
1144 switch (fmt[i])
1145 {
1146 case 'E':
1147 for (j = 0; j < XVECLEN (in_this, i); j++)
1148 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1149 break;
1150
1151 case 'e':
1152 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1153 break;
1154 }
1155 }
1156 }
1157 \f
1158 /* Check what regs are referred to in the libcall block ending with INSN,
1159 aside from those mentioned in the equivalent value.
1160 If there are none, return 0.
1161 If there are one or more, return an EXPR_LIST containing all of them. */
1162
1163 static rtx
1164 libcall_other_reg (insn, equiv)
1165 rtx insn, equiv;
1166 {
1167 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1168 rtx p = XEXP (note, 0);
1169 rtx output = 0;
1170
1171 /* First, find all the regs used in the libcall block
1172 that are not mentioned as inputs to the result. */
1173
1174 while (p != insn)
1175 {
1176 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1177 || GET_CODE (p) == CALL_INSN)
1178 record_excess_regs (PATTERN (p), equiv, &output);
1179 p = NEXT_INSN (p);
1180 }
1181
1182 return output;
1183 }
1184 \f
1185 /* Return 1 if all uses of REG
1186 are between INSN and the end of the basic block. */
1187
1188 static int
1189 reg_in_basic_block_p (insn, reg)
1190 rtx insn, reg;
1191 {
1192 int regno = REGNO (reg);
1193 rtx p;
1194
1195 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1196 return 0;
1197
1198 /* Search this basic block for the already recorded last use of the reg. */
1199 for (p = insn; p; p = NEXT_INSN (p))
1200 {
1201 switch (GET_CODE (p))
1202 {
1203 case NOTE:
1204 break;
1205
1206 case INSN:
1207 case CALL_INSN:
1208 /* Ordinary insn: if this is the last use, we win. */
1209 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1210 return 1;
1211 break;
1212
1213 case JUMP_INSN:
1214 /* Jump insn: if this is the last use, we win. */
1215 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1216 return 1;
1217 /* Otherwise, it's the end of the basic block, so we lose. */
1218 return 0;
1219
1220 case CODE_LABEL:
1221 case BARRIER:
1222 /* It's the end of the basic block, so we lose. */
1223 return 0;
1224
1225 default:
1226 break;
1227 }
1228 }
1229
1230 /* The "last use" doesn't follow the "first use"?? */
1231 abort ();
1232 }
1233 \f
1234 /* Compute the benefit of eliminating the insns in the block whose
1235 last insn is LAST. This may be a group of insns used to compute a
1236 value directly or can contain a library call. */
1237
1238 static int
1239 libcall_benefit (last)
1240 rtx last;
1241 {
1242 rtx insn;
1243 int benefit = 0;
1244
1245 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1246 insn != last; insn = NEXT_INSN (insn))
1247 {
1248 if (GET_CODE (insn) == CALL_INSN)
1249 benefit += 10; /* Assume at least this many insns in a library
1250 routine. */
1251 else if (GET_CODE (insn) == INSN
1252 && GET_CODE (PATTERN (insn)) != USE
1253 && GET_CODE (PATTERN (insn)) != CLOBBER)
1254 benefit++;
1255 }
1256
1257 return benefit;
1258 }
1259 \f
1260 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1261
1262 static rtx
1263 skip_consec_insns (insn, count)
1264 rtx insn;
1265 int count;
1266 {
1267 for (; count > 0; count--)
1268 {
1269 rtx temp;
1270
1271 /* If first insn of libcall sequence, skip to end. */
1272 /* Do this at start of loop, since INSN is guaranteed to
1273 be an insn here. */
1274 if (GET_CODE (insn) != NOTE
1275 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1276 insn = XEXP (temp, 0);
1277
1278 do insn = NEXT_INSN (insn);
1279 while (GET_CODE (insn) == NOTE);
1280 }
1281
1282 return insn;
1283 }
1284
1285 /* Ignore any movable whose insn falls within a libcall
1286 which is part of another movable.
1287 We make use of the fact that the movable for the libcall value
1288 was made later and so appears later on the chain. */
1289
1290 static void
1291 ignore_some_movables (movables)
1292 struct movable *movables;
1293 {
1294 register struct movable *m, *m1;
1295
1296 for (m = movables; m; m = m->next)
1297 {
1298 /* Is this a movable for the value of a libcall? */
1299 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1300 if (note)
1301 {
1302 rtx insn;
1303 /* Check for earlier movables inside that range,
1304 and mark them invalid. We cannot use LUIDs here because
1305 insns created by loop.c for prior loops don't have LUIDs.
1306 Rather than reject all such insns from movables, we just
1307 explicitly check each insn in the libcall (since invariant
1308 libcalls aren't that common). */
1309 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1310 for (m1 = movables; m1 != m; m1 = m1->next)
1311 if (m1->insn == insn)
1312 m1->done = 1;
1313 }
1314 }
1315 }
1316
1317 /* For each movable insn, see if the reg that it loads
1318 leads when it dies right into another conditionally movable insn.
1319 If so, record that the second insn "forces" the first one,
1320 since the second can be moved only if the first is. */
1321
1322 static void
1323 force_movables (movables)
1324 struct movable *movables;
1325 {
1326 register struct movable *m, *m1;
1327 for (m1 = movables; m1; m1 = m1->next)
1328 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1329 if (!m1->partial && !m1->done)
1330 {
1331 int regno = m1->regno;
1332 for (m = m1->next; m; m = m->next)
1333 /* ??? Could this be a bug? What if CSE caused the
1334 register of M1 to be used after this insn?
1335 Since CSE does not update regno_last_uid,
1336 this insn M->insn might not be where it dies.
1337 But very likely this doesn't matter; what matters is
1338 that M's reg is computed from M1's reg. */
1339 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1340 && !m->done)
1341 break;
1342 if (m != 0 && m->set_src == m1->set_dest
1343 /* If m->consec, m->set_src isn't valid. */
1344 && m->consec == 0)
1345 m = 0;
1346
1347 /* Increase the priority of the moving the first insn
1348 since it permits the second to be moved as well. */
1349 if (m != 0)
1350 {
1351 m->forces = m1;
1352 m1->lifetime += m->lifetime;
1353 m1->savings += m->savings;
1354 }
1355 }
1356 }
1357 \f
1358 /* Find invariant expressions that are equal and can be combined into
1359 one register. */
1360
1361 static void
1362 combine_movables (movables, nregs)
1363 struct movable *movables;
1364 int nregs;
1365 {
1366 register struct movable *m;
1367 char *matched_regs = (char *) alloca (nregs);
1368 enum machine_mode mode;
1369
1370 /* Regs that are set more than once are not allowed to match
1371 or be matched. I'm no longer sure why not. */
1372 /* Perhaps testing m->consec_sets would be more appropriate here? */
1373
1374 for (m = movables; m; m = m->next)
1375 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1376 {
1377 register struct movable *m1;
1378 int regno = m->regno;
1379
1380 bzero (matched_regs, nregs);
1381 matched_regs[regno] = 1;
1382
1383 /* We want later insns to match the first one. Don't make the first
1384 one match any later ones. So start this loop at m->next. */
1385 for (m1 = m->next; m1; m1 = m1->next)
1386 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1387 /* A reg used outside the loop mustn't be eliminated. */
1388 && !m1->global
1389 /* A reg used for zero-extending mustn't be eliminated. */
1390 && !m1->partial
1391 && (matched_regs[m1->regno]
1392 ||
1393 (
1394 /* Can combine regs with different modes loaded from the
1395 same constant only if the modes are the same or
1396 if both are integer modes with M wider or the same
1397 width as M1. The check for integer is redundant, but
1398 safe, since the only case of differing destination
1399 modes with equal sources is when both sources are
1400 VOIDmode, i.e., CONST_INT. */
1401 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1402 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1403 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1404 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1405 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1406 /* See if the source of M1 says it matches M. */
1407 && ((GET_CODE (m1->set_src) == REG
1408 && matched_regs[REGNO (m1->set_src)])
1409 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1410 movables))))
1411 && ((m->dependencies == m1->dependencies)
1412 || rtx_equal_p (m->dependencies, m1->dependencies)))
1413 {
1414 m->lifetime += m1->lifetime;
1415 m->savings += m1->savings;
1416 m1->done = 1;
1417 m1->match = m;
1418 matched_regs[m1->regno] = 1;
1419 }
1420 }
1421
1422 /* Now combine the regs used for zero-extension.
1423 This can be done for those not marked `global'
1424 provided their lives don't overlap. */
1425
1426 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1427 mode = GET_MODE_WIDER_MODE (mode))
1428 {
1429 register struct movable *m0 = 0;
1430
1431 /* Combine all the registers for extension from mode MODE.
1432 Don't combine any that are used outside this loop. */
1433 for (m = movables; m; m = m->next)
1434 if (m->partial && ! m->global
1435 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1436 {
1437 register struct movable *m1;
1438 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1439 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1440
1441 if (m0 == 0)
1442 {
1443 /* First one: don't check for overlap, just record it. */
1444 m0 = m;
1445 continue;
1446 }
1447
1448 /* Make sure they extend to the same mode.
1449 (Almost always true.) */
1450 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1451 continue;
1452
1453 /* We already have one: check for overlap with those
1454 already combined together. */
1455 for (m1 = movables; m1 != m; m1 = m1->next)
1456 if (m1 == m0 || (m1->partial && m1->match == m0))
1457 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1458 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1459 goto overlap;
1460
1461 /* No overlap: we can combine this with the others. */
1462 m0->lifetime += m->lifetime;
1463 m0->savings += m->savings;
1464 m->done = 1;
1465 m->match = m0;
1466
1467 overlap: ;
1468 }
1469 }
1470 }
1471 \f
1472 /* Return 1 if regs X and Y will become the same if moved. */
1473
1474 static int
1475 regs_match_p (x, y, movables)
1476 rtx x, y;
1477 struct movable *movables;
1478 {
1479 int xn = REGNO (x);
1480 int yn = REGNO (y);
1481 struct movable *mx, *my;
1482
1483 for (mx = movables; mx; mx = mx->next)
1484 if (mx->regno == xn)
1485 break;
1486
1487 for (my = movables; my; my = my->next)
1488 if (my->regno == yn)
1489 break;
1490
1491 return (mx && my
1492 && ((mx->match == my->match && mx->match != 0)
1493 || mx->match == my
1494 || mx == my->match));
1495 }
1496
1497 /* Return 1 if X and Y are identical-looking rtx's.
1498 This is the Lisp function EQUAL for rtx arguments.
1499
1500 If two registers are matching movables or a movable register and an
1501 equivalent constant, consider them equal. */
1502
1503 static int
1504 rtx_equal_for_loop_p (x, y, movables)
1505 rtx x, y;
1506 struct movable *movables;
1507 {
1508 register int i;
1509 register int j;
1510 register struct movable *m;
1511 register enum rtx_code code;
1512 register char *fmt;
1513
1514 if (x == y)
1515 return 1;
1516 if (x == 0 || y == 0)
1517 return 0;
1518
1519 code = GET_CODE (x);
1520
1521 /* If we have a register and a constant, they may sometimes be
1522 equal. */
1523 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1524 && CONSTANT_P (y))
1525 {
1526 for (m = movables; m; m = m->next)
1527 if (m->move_insn && m->regno == REGNO (x)
1528 && rtx_equal_p (m->set_src, y))
1529 return 1;
1530 }
1531 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1532 && CONSTANT_P (x))
1533 {
1534 for (m = movables; m; m = m->next)
1535 if (m->move_insn && m->regno == REGNO (y)
1536 && rtx_equal_p (m->set_src, x))
1537 return 1;
1538 }
1539
1540 /* Otherwise, rtx's of different codes cannot be equal. */
1541 if (code != GET_CODE (y))
1542 return 0;
1543
1544 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1545 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1546
1547 if (GET_MODE (x) != GET_MODE (y))
1548 return 0;
1549
1550 /* These three types of rtx's can be compared nonrecursively. */
1551 if (code == REG)
1552 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1553
1554 if (code == LABEL_REF)
1555 return XEXP (x, 0) == XEXP (y, 0);
1556 if (code == SYMBOL_REF)
1557 return XSTR (x, 0) == XSTR (y, 0);
1558
1559 /* Compare the elements. If any pair of corresponding elements
1560 fail to match, return 0 for the whole things. */
1561
1562 fmt = GET_RTX_FORMAT (code);
1563 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1564 {
1565 switch (fmt[i])
1566 {
1567 case 'w':
1568 if (XWINT (x, i) != XWINT (y, i))
1569 return 0;
1570 break;
1571
1572 case 'i':
1573 if (XINT (x, i) != XINT (y, i))
1574 return 0;
1575 break;
1576
1577 case 'E':
1578 /* Two vectors must have the same length. */
1579 if (XVECLEN (x, i) != XVECLEN (y, i))
1580 return 0;
1581
1582 /* And the corresponding elements must match. */
1583 for (j = 0; j < XVECLEN (x, i); j++)
1584 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1585 return 0;
1586 break;
1587
1588 case 'e':
1589 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1590 return 0;
1591 break;
1592
1593 case 's':
1594 if (strcmp (XSTR (x, i), XSTR (y, i)))
1595 return 0;
1596 break;
1597
1598 case 'u':
1599 /* These are just backpointers, so they don't matter. */
1600 break;
1601
1602 case '0':
1603 break;
1604
1605 /* It is believed that rtx's at this level will never
1606 contain anything but integers and other rtx's,
1607 except for within LABEL_REFs and SYMBOL_REFs. */
1608 default:
1609 abort ();
1610 }
1611 }
1612 return 1;
1613 }
1614 \f
1615 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1616 insns in INSNS which use thet reference. */
1617
1618 static void
1619 add_label_notes (x, insns)
1620 rtx x;
1621 rtx insns;
1622 {
1623 enum rtx_code code = GET_CODE (x);
1624 int i, j;
1625 char *fmt;
1626 rtx insn;
1627
1628 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1629 {
1630 /* This code used to ignore labels that referred to dispatch tables to
1631 avoid flow generating (slighly) worse code.
1632
1633 We no longer ignore such label references (see LABEL_REF handling in
1634 mark_jump_label for additional information). */
1635 for (insn = insns; insn; insn = NEXT_INSN (insn))
1636 if (reg_mentioned_p (XEXP (x, 0), insn))
1637 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1638 REG_NOTES (insn));
1639 }
1640
1641 fmt = GET_RTX_FORMAT (code);
1642 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1643 {
1644 if (fmt[i] == 'e')
1645 add_label_notes (XEXP (x, i), insns);
1646 else if (fmt[i] == 'E')
1647 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1648 add_label_notes (XVECEXP (x, i, j), insns);
1649 }
1650 }
1651 \f
1652 /* Scan MOVABLES, and move the insns that deserve to be moved.
1653 If two matching movables are combined, replace one reg with the
1654 other throughout. */
1655
1656 static void
1657 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1658 struct movable *movables;
1659 int threshold;
1660 int insn_count;
1661 rtx loop_start;
1662 rtx end;
1663 int nregs;
1664 {
1665 rtx new_start = 0;
1666 register struct movable *m;
1667 register rtx p;
1668 /* Map of pseudo-register replacements to handle combining
1669 when we move several insns that load the same value
1670 into different pseudo-registers. */
1671 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1672 char *already_moved = (char *) alloca (nregs);
1673
1674 bzero (already_moved, nregs);
1675 bzero ((char *) reg_map, nregs * sizeof (rtx));
1676
1677 num_movables = 0;
1678
1679 for (m = movables; m; m = m->next)
1680 {
1681 /* Describe this movable insn. */
1682
1683 if (loop_dump_stream)
1684 {
1685 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1686 INSN_UID (m->insn), m->regno, m->lifetime);
1687 if (m->consec > 0)
1688 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1689 if (m->cond)
1690 fprintf (loop_dump_stream, "cond ");
1691 if (m->force)
1692 fprintf (loop_dump_stream, "force ");
1693 if (m->global)
1694 fprintf (loop_dump_stream, "global ");
1695 if (m->done)
1696 fprintf (loop_dump_stream, "done ");
1697 if (m->move_insn)
1698 fprintf (loop_dump_stream, "move-insn ");
1699 if (m->match)
1700 fprintf (loop_dump_stream, "matches %d ",
1701 INSN_UID (m->match->insn));
1702 if (m->forces)
1703 fprintf (loop_dump_stream, "forces %d ",
1704 INSN_UID (m->forces->insn));
1705 }
1706
1707 /* Count movables. Value used in heuristics in strength_reduce. */
1708 num_movables++;
1709
1710 /* Ignore the insn if it's already done (it matched something else).
1711 Otherwise, see if it is now safe to move. */
1712
1713 if (!m->done
1714 && (! m->cond
1715 || (1 == invariant_p (m->set_src)
1716 && (m->dependencies == 0
1717 || 1 == invariant_p (m->dependencies))
1718 && (m->consec == 0
1719 || 1 == consec_sets_invariant_p (m->set_dest,
1720 m->consec + 1,
1721 m->insn))))
1722 && (! m->forces || m->forces->done))
1723 {
1724 register int regno;
1725 register rtx p;
1726 int savings = m->savings;
1727
1728 /* We have an insn that is safe to move.
1729 Compute its desirability. */
1730
1731 p = m->insn;
1732 regno = m->regno;
1733
1734 if (loop_dump_stream)
1735 fprintf (loop_dump_stream, "savings %d ", savings);
1736
1737 if (moved_once[regno])
1738 {
1739 insn_count *= 2;
1740
1741 if (loop_dump_stream)
1742 fprintf (loop_dump_stream, "halved since already moved ");
1743 }
1744
1745 /* An insn MUST be moved if we already moved something else
1746 which is safe only if this one is moved too: that is,
1747 if already_moved[REGNO] is nonzero. */
1748
1749 /* An insn is desirable to move if the new lifetime of the
1750 register is no more than THRESHOLD times the old lifetime.
1751 If it's not desirable, it means the loop is so big
1752 that moving won't speed things up much,
1753 and it is liable to make register usage worse. */
1754
1755 /* It is also desirable to move if it can be moved at no
1756 extra cost because something else was already moved. */
1757
1758 if (already_moved[regno]
1759 || flag_move_all_movables
1760 || (threshold * savings * m->lifetime) >= insn_count
1761 || (m->forces && m->forces->done
1762 && n_times_used[m->forces->regno] == 1))
1763 {
1764 int count;
1765 register struct movable *m1;
1766 rtx first;
1767
1768 /* Now move the insns that set the reg. */
1769
1770 if (m->partial && m->match)
1771 {
1772 rtx newpat, i1;
1773 rtx r1, r2;
1774 /* Find the end of this chain of matching regs.
1775 Thus, we load each reg in the chain from that one reg.
1776 And that reg is loaded with 0 directly,
1777 since it has ->match == 0. */
1778 for (m1 = m; m1->match; m1 = m1->match);
1779 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1780 SET_DEST (PATTERN (m1->insn)));
1781 i1 = emit_insn_before (newpat, loop_start);
1782
1783 /* Mark the moved, invariant reg as being allowed to
1784 share a hard reg with the other matching invariant. */
1785 REG_NOTES (i1) = REG_NOTES (m->insn);
1786 r1 = SET_DEST (PATTERN (m->insn));
1787 r2 = SET_DEST (PATTERN (m1->insn));
1788 regs_may_share
1789 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1790 gen_rtx_EXPR_LIST (VOIDmode, r2,
1791 regs_may_share));
1792 delete_insn (m->insn);
1793
1794 if (new_start == 0)
1795 new_start = i1;
1796
1797 if (loop_dump_stream)
1798 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1799 }
1800 /* If we are to re-generate the item being moved with a
1801 new move insn, first delete what we have and then emit
1802 the move insn before the loop. */
1803 else if (m->move_insn)
1804 {
1805 rtx i1, temp;
1806
1807 for (count = m->consec; count >= 0; count--)
1808 {
1809 /* If this is the first insn of a library call sequence,
1810 skip to the end. */
1811 if (GET_CODE (p) != NOTE
1812 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1813 p = XEXP (temp, 0);
1814
1815 /* If this is the last insn of a libcall sequence, then
1816 delete every insn in the sequence except the last.
1817 The last insn is handled in the normal manner. */
1818 if (GET_CODE (p) != NOTE
1819 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1820 {
1821 temp = XEXP (temp, 0);
1822 while (temp != p)
1823 temp = delete_insn (temp);
1824 }
1825
1826 p = delete_insn (p);
1827 while (p && GET_CODE (p) == NOTE)
1828 p = NEXT_INSN (p);
1829 }
1830
1831 start_sequence ();
1832 emit_move_insn (m->set_dest, m->set_src);
1833 temp = get_insns ();
1834 end_sequence ();
1835
1836 add_label_notes (m->set_src, temp);
1837
1838 i1 = emit_insns_before (temp, loop_start);
1839 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1840 REG_NOTES (i1)
1841 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1842 m->set_src, REG_NOTES (i1));
1843
1844 if (loop_dump_stream)
1845 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1846
1847 /* The more regs we move, the less we like moving them. */
1848 threshold -= 3;
1849 }
1850 else
1851 {
1852 for (count = m->consec; count >= 0; count--)
1853 {
1854 rtx i1, temp;
1855
1856 /* If first insn of libcall sequence, skip to end. */
1857 /* Do this at start of loop, since p is guaranteed to
1858 be an insn here. */
1859 if (GET_CODE (p) != NOTE
1860 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1861 p = XEXP (temp, 0);
1862
1863 /* If last insn of libcall sequence, move all
1864 insns except the last before the loop. The last
1865 insn is handled in the normal manner. */
1866 if (GET_CODE (p) != NOTE
1867 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1868 {
1869 rtx fn_address = 0;
1870 rtx fn_reg = 0;
1871 rtx fn_address_insn = 0;
1872
1873 first = 0;
1874 for (temp = XEXP (temp, 0); temp != p;
1875 temp = NEXT_INSN (temp))
1876 {
1877 rtx body;
1878 rtx n;
1879 rtx next;
1880
1881 if (GET_CODE (temp) == NOTE)
1882 continue;
1883
1884 body = PATTERN (temp);
1885
1886 /* Find the next insn after TEMP,
1887 not counting USE or NOTE insns. */
1888 for (next = NEXT_INSN (temp); next != p;
1889 next = NEXT_INSN (next))
1890 if (! (GET_CODE (next) == INSN
1891 && GET_CODE (PATTERN (next)) == USE)
1892 && GET_CODE (next) != NOTE)
1893 break;
1894
1895 /* If that is the call, this may be the insn
1896 that loads the function address.
1897
1898 Extract the function address from the insn
1899 that loads it into a register.
1900 If this insn was cse'd, we get incorrect code.
1901
1902 So emit a new move insn that copies the
1903 function address into the register that the
1904 call insn will use. flow.c will delete any
1905 redundant stores that we have created. */
1906 if (GET_CODE (next) == CALL_INSN
1907 && GET_CODE (body) == SET
1908 && GET_CODE (SET_DEST (body)) == REG
1909 && (n = find_reg_note (temp, REG_EQUAL,
1910 NULL_RTX)))
1911 {
1912 fn_reg = SET_SRC (body);
1913 if (GET_CODE (fn_reg) != REG)
1914 fn_reg = SET_DEST (body);
1915 fn_address = XEXP (n, 0);
1916 fn_address_insn = temp;
1917 }
1918 /* We have the call insn.
1919 If it uses the register we suspect it might,
1920 load it with the correct address directly. */
1921 if (GET_CODE (temp) == CALL_INSN
1922 && fn_address != 0
1923 && reg_referenced_p (fn_reg, body))
1924 emit_insn_after (gen_move_insn (fn_reg,
1925 fn_address),
1926 fn_address_insn);
1927
1928 if (GET_CODE (temp) == CALL_INSN)
1929 {
1930 i1 = emit_call_insn_before (body, loop_start);
1931 /* Because the USAGE information potentially
1932 contains objects other than hard registers
1933 we need to copy it. */
1934 if (CALL_INSN_FUNCTION_USAGE (temp))
1935 CALL_INSN_FUNCTION_USAGE (i1)
1936 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1937 }
1938 else
1939 i1 = emit_insn_before (body, loop_start);
1940 if (first == 0)
1941 first = i1;
1942 if (temp == fn_address_insn)
1943 fn_address_insn = i1;
1944 REG_NOTES (i1) = REG_NOTES (temp);
1945 delete_insn (temp);
1946 }
1947 }
1948 if (m->savemode != VOIDmode)
1949 {
1950 /* P sets REG to zero; but we should clear only
1951 the bits that are not covered by the mode
1952 m->savemode. */
1953 rtx reg = m->set_dest;
1954 rtx sequence;
1955 rtx tem;
1956
1957 start_sequence ();
1958 tem = expand_binop
1959 (GET_MODE (reg), and_optab, reg,
1960 GEN_INT ((((HOST_WIDE_INT) 1
1961 << GET_MODE_BITSIZE (m->savemode)))
1962 - 1),
1963 reg, 1, OPTAB_LIB_WIDEN);
1964 if (tem == 0)
1965 abort ();
1966 if (tem != reg)
1967 emit_move_insn (reg, tem);
1968 sequence = gen_sequence ();
1969 end_sequence ();
1970 i1 = emit_insn_before (sequence, loop_start);
1971 }
1972 else if (GET_CODE (p) == CALL_INSN)
1973 {
1974 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1975 /* Because the USAGE information potentially
1976 contains objects other than hard registers
1977 we need to copy it. */
1978 if (CALL_INSN_FUNCTION_USAGE (p))
1979 CALL_INSN_FUNCTION_USAGE (i1)
1980 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1981 }
1982 else if (count == m->consec && m->move_insn_first)
1983 {
1984 /* The SET_SRC might not be invariant, so we must
1985 use the REG_EQUAL note. */
1986 start_sequence ();
1987 emit_move_insn (m->set_dest, m->set_src);
1988 temp = get_insns ();
1989 end_sequence ();
1990
1991 add_label_notes (m->set_src, temp);
1992
1993 i1 = emit_insns_before (temp, loop_start);
1994 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1995 REG_NOTES (i1)
1996 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1997 : REG_EQUAL),
1998 m->set_src, REG_NOTES (i1));
1999 }
2000 else
2001 i1 = emit_insn_before (PATTERN (p), loop_start);
2002
2003 if (REG_NOTES (i1) == 0)
2004 {
2005 REG_NOTES (i1) = REG_NOTES (p);
2006
2007 /* If there is a REG_EQUAL note present whose value
2008 is not loop invariant, then delete it, since it
2009 may cause problems with later optimization passes.
2010 It is possible for cse to create such notes
2011 like this as a result of record_jump_cond. */
2012
2013 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2014 && ! invariant_p (XEXP (temp, 0)))
2015 remove_note (i1, temp);
2016 }
2017
2018 if (new_start == 0)
2019 new_start = i1;
2020
2021 if (loop_dump_stream)
2022 fprintf (loop_dump_stream, " moved to %d",
2023 INSN_UID (i1));
2024
2025 /* If library call, now fix the REG_NOTES that contain
2026 insn pointers, namely REG_LIBCALL on FIRST
2027 and REG_RETVAL on I1. */
2028 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2029 {
2030 XEXP (temp, 0) = first;
2031 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2032 XEXP (temp, 0) = i1;
2033 }
2034
2035 delete_insn (p);
2036 do p = NEXT_INSN (p);
2037 while (p && GET_CODE (p) == NOTE);
2038 }
2039
2040 /* The more regs we move, the less we like moving them. */
2041 threshold -= 3;
2042 }
2043
2044 /* Any other movable that loads the same register
2045 MUST be moved. */
2046 already_moved[regno] = 1;
2047
2048 /* This reg has been moved out of one loop. */
2049 moved_once[regno] = 1;
2050
2051 /* The reg set here is now invariant. */
2052 if (! m->partial)
2053 n_times_set[regno] = 0;
2054
2055 m->done = 1;
2056
2057 /* Change the length-of-life info for the register
2058 to say it lives at least the full length of this loop.
2059 This will help guide optimizations in outer loops. */
2060
2061 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2062 /* This is the old insn before all the moved insns.
2063 We can't use the moved insn because it is out of range
2064 in uid_luid. Only the old insns have luids. */
2065 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2066 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2067 REGNO_LAST_UID (regno) = INSN_UID (end);
2068
2069 /* Combine with this moved insn any other matching movables. */
2070
2071 if (! m->partial)
2072 for (m1 = movables; m1; m1 = m1->next)
2073 if (m1->match == m)
2074 {
2075 rtx temp;
2076
2077 /* Schedule the reg loaded by M1
2078 for replacement so that shares the reg of M.
2079 If the modes differ (only possible in restricted
2080 circumstances, make a SUBREG. */
2081 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2082 reg_map[m1->regno] = m->set_dest;
2083 else
2084 reg_map[m1->regno]
2085 = gen_lowpart_common (GET_MODE (m1->set_dest),
2086 m->set_dest);
2087
2088 /* Get rid of the matching insn
2089 and prevent further processing of it. */
2090 m1->done = 1;
2091
2092 /* if library call, delete all insn except last, which
2093 is deleted below */
2094 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2095 NULL_RTX)))
2096 {
2097 for (temp = XEXP (temp, 0); temp != m1->insn;
2098 temp = NEXT_INSN (temp))
2099 delete_insn (temp);
2100 }
2101 delete_insn (m1->insn);
2102
2103 /* Any other movable that loads the same register
2104 MUST be moved. */
2105 already_moved[m1->regno] = 1;
2106
2107 /* The reg merged here is now invariant,
2108 if the reg it matches is invariant. */
2109 if (! m->partial)
2110 n_times_set[m1->regno] = 0;
2111 }
2112 }
2113 else if (loop_dump_stream)
2114 fprintf (loop_dump_stream, "not desirable");
2115 }
2116 else if (loop_dump_stream && !m->match)
2117 fprintf (loop_dump_stream, "not safe");
2118
2119 if (loop_dump_stream)
2120 fprintf (loop_dump_stream, "\n");
2121 }
2122
2123 if (new_start == 0)
2124 new_start = loop_start;
2125
2126 /* Go through all the instructions in the loop, making
2127 all the register substitutions scheduled in REG_MAP. */
2128 for (p = new_start; p != end; p = NEXT_INSN (p))
2129 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2130 || GET_CODE (p) == CALL_INSN)
2131 {
2132 replace_regs (PATTERN (p), reg_map, nregs, 0);
2133 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2134 INSN_CODE (p) = -1;
2135 }
2136 }
2137 \f
2138 #if 0
2139 /* Scan X and replace the address of any MEM in it with ADDR.
2140 REG is the address that MEM should have before the replacement. */
2141
2142 static void
2143 replace_call_address (x, reg, addr)
2144 rtx x, reg, addr;
2145 {
2146 register enum rtx_code code;
2147 register int i;
2148 register char *fmt;
2149
2150 if (x == 0)
2151 return;
2152 code = GET_CODE (x);
2153 switch (code)
2154 {
2155 case PC:
2156 case CC0:
2157 case CONST_INT:
2158 case CONST_DOUBLE:
2159 case CONST:
2160 case SYMBOL_REF:
2161 case LABEL_REF:
2162 case REG:
2163 return;
2164
2165 case SET:
2166 /* Short cut for very common case. */
2167 replace_call_address (XEXP (x, 1), reg, addr);
2168 return;
2169
2170 case CALL:
2171 /* Short cut for very common case. */
2172 replace_call_address (XEXP (x, 0), reg, addr);
2173 return;
2174
2175 case MEM:
2176 /* If this MEM uses a reg other than the one we expected,
2177 something is wrong. */
2178 if (XEXP (x, 0) != reg)
2179 abort ();
2180 XEXP (x, 0) = addr;
2181 return;
2182
2183 default:
2184 break;
2185 }
2186
2187 fmt = GET_RTX_FORMAT (code);
2188 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2189 {
2190 if (fmt[i] == 'e')
2191 replace_call_address (XEXP (x, i), reg, addr);
2192 if (fmt[i] == 'E')
2193 {
2194 register int j;
2195 for (j = 0; j < XVECLEN (x, i); j++)
2196 replace_call_address (XVECEXP (x, i, j), reg, addr);
2197 }
2198 }
2199 }
2200 #endif
2201 \f
2202 /* Return the number of memory refs to addresses that vary
2203 in the rtx X. */
2204
2205 static int
2206 count_nonfixed_reads (x)
2207 rtx x;
2208 {
2209 register enum rtx_code code;
2210 register int i;
2211 register char *fmt;
2212 int value;
2213
2214 if (x == 0)
2215 return 0;
2216
2217 code = GET_CODE (x);
2218 switch (code)
2219 {
2220 case PC:
2221 case CC0:
2222 case CONST_INT:
2223 case CONST_DOUBLE:
2224 case CONST:
2225 case SYMBOL_REF:
2226 case LABEL_REF:
2227 case REG:
2228 return 0;
2229
2230 case MEM:
2231 return ((invariant_p (XEXP (x, 0)) != 1)
2232 + count_nonfixed_reads (XEXP (x, 0)));
2233
2234 default:
2235 break;
2236 }
2237
2238 value = 0;
2239 fmt = GET_RTX_FORMAT (code);
2240 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2241 {
2242 if (fmt[i] == 'e')
2243 value += count_nonfixed_reads (XEXP (x, i));
2244 if (fmt[i] == 'E')
2245 {
2246 register int j;
2247 for (j = 0; j < XVECLEN (x, i); j++)
2248 value += count_nonfixed_reads (XVECEXP (x, i, j));
2249 }
2250 }
2251 return value;
2252 }
2253
2254 \f
2255 #if 0
2256 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2257 Replace it with an instruction to load just the low bytes
2258 if the machine supports such an instruction,
2259 and insert above LOOP_START an instruction to clear the register. */
2260
2261 static void
2262 constant_high_bytes (p, loop_start)
2263 rtx p, loop_start;
2264 {
2265 register rtx new;
2266 register int insn_code_number;
2267
2268 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2269 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2270
2271 new = gen_rtx_SET (VOIDmode,
2272 gen_rtx_STRICT_LOW_PART (VOIDmode,
2273 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2274 SET_DEST (PATTERN (p)),
2275 0)),
2276 XEXP (SET_SRC (PATTERN (p)), 0));
2277 insn_code_number = recog (new, p);
2278
2279 if (insn_code_number)
2280 {
2281 register int i;
2282
2283 /* Clear destination register before the loop. */
2284 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2285 const0_rtx),
2286 loop_start);
2287
2288 /* Inside the loop, just load the low part. */
2289 PATTERN (p) = new;
2290 }
2291 }
2292 #endif
2293 \f
2294 /* Scan a loop setting the variables `unknown_address_altered',
2295 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2296 and `loop_has_volatile'.
2297 Also, fill in the array `loop_store_mems'. */
2298
2299 static void
2300 prescan_loop (start, end)
2301 rtx start, end;
2302 {
2303 register int level = 1;
2304 register rtx insn;
2305
2306 unknown_address_altered = 0;
2307 loop_has_call = 0;
2308 loop_has_volatile = 0;
2309 loop_store_mems_idx = 0;
2310
2311 num_mem_sets = 0;
2312 loops_enclosed = 1;
2313 loop_continue = 0;
2314
2315 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2316 insn = NEXT_INSN (insn))
2317 {
2318 if (GET_CODE (insn) == NOTE)
2319 {
2320 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2321 {
2322 ++level;
2323 /* Count number of loops contained in this one. */
2324 loops_enclosed++;
2325 }
2326 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2327 {
2328 --level;
2329 if (level == 0)
2330 {
2331 end = insn;
2332 break;
2333 }
2334 }
2335 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2336 {
2337 if (level == 1)
2338 loop_continue = insn;
2339 }
2340 }
2341 else if (GET_CODE (insn) == CALL_INSN)
2342 {
2343 if (! CONST_CALL_P (insn))
2344 unknown_address_altered = 1;
2345 loop_has_call = 1;
2346 }
2347 else
2348 {
2349 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2350 {
2351 if (volatile_refs_p (PATTERN (insn)))
2352 loop_has_volatile = 1;
2353
2354 note_stores (PATTERN (insn), note_addr_stored);
2355 }
2356 }
2357 }
2358 }
2359 \f
2360 /* Scan the function looking for loops. Record the start and end of each loop.
2361 Also mark as invalid loops any loops that contain a setjmp or are branched
2362 to from outside the loop. */
2363
2364 static void
2365 find_and_verify_loops (f)
2366 rtx f;
2367 {
2368 rtx insn, label;
2369 int current_loop = -1;
2370 int next_loop = -1;
2371 int loop;
2372
2373 /* If there are jumps to undefined labels,
2374 treat them as jumps out of any/all loops.
2375 This also avoids writing past end of tables when there are no loops. */
2376 uid_loop_num[0] = -1;
2377
2378 /* Find boundaries of loops, mark which loops are contained within
2379 loops, and invalidate loops that have setjmp. */
2380
2381 for (insn = f; insn; insn = NEXT_INSN (insn))
2382 {
2383 if (GET_CODE (insn) == NOTE)
2384 switch (NOTE_LINE_NUMBER (insn))
2385 {
2386 case NOTE_INSN_LOOP_BEG:
2387 loop_number_loop_starts[++next_loop] = insn;
2388 loop_number_loop_ends[next_loop] = 0;
2389 loop_outer_loop[next_loop] = current_loop;
2390 loop_invalid[next_loop] = 0;
2391 loop_number_exit_labels[next_loop] = 0;
2392 loop_number_exit_count[next_loop] = 0;
2393 current_loop = next_loop;
2394 break;
2395
2396 case NOTE_INSN_SETJMP:
2397 /* In this case, we must invalidate our current loop and any
2398 enclosing loop. */
2399 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2400 {
2401 loop_invalid[loop] = 1;
2402 if (loop_dump_stream)
2403 fprintf (loop_dump_stream,
2404 "\nLoop at %d ignored due to setjmp.\n",
2405 INSN_UID (loop_number_loop_starts[loop]));
2406 }
2407 break;
2408
2409 case NOTE_INSN_LOOP_END:
2410 if (current_loop == -1)
2411 abort ();
2412
2413 loop_number_loop_ends[current_loop] = insn;
2414 current_loop = loop_outer_loop[current_loop];
2415 break;
2416
2417 default:
2418 break;
2419 }
2420
2421 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2422 enclosing loop, but this doesn't matter. */
2423 uid_loop_num[INSN_UID (insn)] = current_loop;
2424 }
2425
2426 /* Any loop containing a label used in an initializer must be invalidated,
2427 because it can be jumped into from anywhere. */
2428
2429 for (label = forced_labels; label; label = XEXP (label, 1))
2430 {
2431 int loop_num;
2432
2433 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2434 loop_num != -1;
2435 loop_num = loop_outer_loop[loop_num])
2436 loop_invalid[loop_num] = 1;
2437 }
2438
2439 /* Any loop containing a label used for an exception handler must be
2440 invalidated, because it can be jumped into from anywhere. */
2441
2442 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2443 {
2444 int loop_num;
2445
2446 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2447 loop_num != -1;
2448 loop_num = loop_outer_loop[loop_num])
2449 loop_invalid[loop_num] = 1;
2450 }
2451
2452 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2453 loop that it is not contained within, that loop is marked invalid.
2454 If any INSN or CALL_INSN uses a label's address, then the loop containing
2455 that label is marked invalid, because it could be jumped into from
2456 anywhere.
2457
2458 Also look for blocks of code ending in an unconditional branch that
2459 exits the loop. If such a block is surrounded by a conditional
2460 branch around the block, move the block elsewhere (see below) and
2461 invert the jump to point to the code block. This may eliminate a
2462 label in our loop and will simplify processing by both us and a
2463 possible second cse pass. */
2464
2465 for (insn = f; insn; insn = NEXT_INSN (insn))
2466 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2467 {
2468 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2469
2470 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2471 {
2472 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2473 if (note)
2474 {
2475 int loop_num;
2476
2477 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2478 loop_num != -1;
2479 loop_num = loop_outer_loop[loop_num])
2480 loop_invalid[loop_num] = 1;
2481 }
2482 }
2483
2484 if (GET_CODE (insn) != JUMP_INSN)
2485 continue;
2486
2487 mark_loop_jump (PATTERN (insn), this_loop_num);
2488
2489 /* See if this is an unconditional branch outside the loop. */
2490 if (this_loop_num != -1
2491 && (GET_CODE (PATTERN (insn)) == RETURN
2492 || (simplejump_p (insn)
2493 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2494 != this_loop_num)))
2495 && get_max_uid () < max_uid_for_loop)
2496 {
2497 rtx p;
2498 rtx our_next = next_real_insn (insn);
2499 int dest_loop;
2500 int outer_loop = -1;
2501
2502 /* Go backwards until we reach the start of the loop, a label,
2503 or a JUMP_INSN. */
2504 for (p = PREV_INSN (insn);
2505 GET_CODE (p) != CODE_LABEL
2506 && ! (GET_CODE (p) == NOTE
2507 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2508 && GET_CODE (p) != JUMP_INSN;
2509 p = PREV_INSN (p))
2510 ;
2511
2512 /* Check for the case where we have a jump to an inner nested
2513 loop, and do not perform the optimization in that case. */
2514
2515 if (JUMP_LABEL (insn))
2516 {
2517 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2518 if (dest_loop != -1)
2519 {
2520 for (outer_loop = dest_loop; outer_loop != -1;
2521 outer_loop = loop_outer_loop[outer_loop])
2522 if (outer_loop == this_loop_num)
2523 break;
2524 }
2525 }
2526
2527 /* Make sure that the target of P is within the current loop. */
2528
2529 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2530 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2531 outer_loop = this_loop_num;
2532
2533 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2534 we have a block of code to try to move.
2535
2536 We look backward and then forward from the target of INSN
2537 to find a BARRIER at the same loop depth as the target.
2538 If we find such a BARRIER, we make a new label for the start
2539 of the block, invert the jump in P and point it to that label,
2540 and move the block of code to the spot we found. */
2541
2542 if (outer_loop == -1
2543 && GET_CODE (p) == JUMP_INSN
2544 && JUMP_LABEL (p) != 0
2545 /* Just ignore jumps to labels that were never emitted.
2546 These always indicate compilation errors. */
2547 && INSN_UID (JUMP_LABEL (p)) != 0
2548 && condjump_p (p)
2549 && ! simplejump_p (p)
2550 && next_real_insn (JUMP_LABEL (p)) == our_next)
2551 {
2552 rtx target
2553 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2554 int target_loop_num = uid_loop_num[INSN_UID (target)];
2555 rtx loc;
2556
2557 for (loc = target; loc; loc = PREV_INSN (loc))
2558 if (GET_CODE (loc) == BARRIER
2559 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2560 break;
2561
2562 if (loc == 0)
2563 for (loc = target; loc; loc = NEXT_INSN (loc))
2564 if (GET_CODE (loc) == BARRIER
2565 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2566 break;
2567
2568 if (loc)
2569 {
2570 rtx cond_label = JUMP_LABEL (p);
2571 rtx new_label = get_label_after (p);
2572
2573 /* Ensure our label doesn't go away. */
2574 LABEL_NUSES (cond_label)++;
2575
2576 /* Verify that uid_loop_num is large enough and that
2577 we can invert P. */
2578 if (invert_jump (p, new_label))
2579 {
2580 rtx q, r;
2581
2582 /* If no suitable BARRIER was found, create a suitable
2583 one before TARGET. Since TARGET is a fall through
2584 path, we'll need to insert an jump around our block
2585 and a add a BARRIER before TARGET.
2586
2587 This creates an extra unconditional jump outside
2588 the loop. However, the benefits of removing rarely
2589 executed instructions from inside the loop usually
2590 outweighs the cost of the extra unconditional jump
2591 outside the loop. */
2592 if (loc == 0)
2593 {
2594 rtx temp;
2595
2596 temp = gen_jump (JUMP_LABEL (insn));
2597 temp = emit_jump_insn_before (temp, target);
2598 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2599 LABEL_NUSES (JUMP_LABEL (insn))++;
2600 loc = emit_barrier_before (target);
2601 }
2602
2603 /* Include the BARRIER after INSN and copy the
2604 block after LOC. */
2605 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2606 reorder_insns (new_label, NEXT_INSN (insn), loc);
2607
2608 /* All those insns are now in TARGET_LOOP_NUM. */
2609 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2610 q = NEXT_INSN (q))
2611 uid_loop_num[INSN_UID (q)] = target_loop_num;
2612
2613 /* The label jumped to by INSN is no longer a loop exit.
2614 Unless INSN does not have a label (e.g., it is a
2615 RETURN insn), search loop_number_exit_labels to find
2616 its label_ref, and remove it. Also turn off
2617 LABEL_OUTSIDE_LOOP_P bit. */
2618 if (JUMP_LABEL (insn))
2619 {
2620 int loop_num;
2621
2622 for (q = 0,
2623 r = loop_number_exit_labels[this_loop_num];
2624 r; q = r, r = LABEL_NEXTREF (r))
2625 if (XEXP (r, 0) == JUMP_LABEL (insn))
2626 {
2627 LABEL_OUTSIDE_LOOP_P (r) = 0;
2628 if (q)
2629 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2630 else
2631 loop_number_exit_labels[this_loop_num]
2632 = LABEL_NEXTREF (r);
2633 break;
2634 }
2635
2636 for (loop_num = this_loop_num;
2637 loop_num != -1 && loop_num != target_loop_num;
2638 loop_num = loop_outer_loop[loop_num])
2639 loop_number_exit_count[loop_num]--;
2640
2641 /* If we didn't find it, then something is wrong. */
2642 if (! r)
2643 abort ();
2644 }
2645
2646 /* P is now a jump outside the loop, so it must be put
2647 in loop_number_exit_labels, and marked as such.
2648 The easiest way to do this is to just call
2649 mark_loop_jump again for P. */
2650 mark_loop_jump (PATTERN (p), this_loop_num);
2651
2652 /* If INSN now jumps to the insn after it,
2653 delete INSN. */
2654 if (JUMP_LABEL (insn) != 0
2655 && (next_real_insn (JUMP_LABEL (insn))
2656 == next_real_insn (insn)))
2657 delete_insn (insn);
2658 }
2659
2660 /* Continue the loop after where the conditional
2661 branch used to jump, since the only branch insn
2662 in the block (if it still remains) is an inter-loop
2663 branch and hence needs no processing. */
2664 insn = NEXT_INSN (cond_label);
2665
2666 if (--LABEL_NUSES (cond_label) == 0)
2667 delete_insn (cond_label);
2668
2669 /* This loop will be continued with NEXT_INSN (insn). */
2670 insn = PREV_INSN (insn);
2671 }
2672 }
2673 }
2674 }
2675 }
2676
2677 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2678 loops it is contained in, mark the target loop invalid.
2679
2680 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2681
2682 static void
2683 mark_loop_jump (x, loop_num)
2684 rtx x;
2685 int loop_num;
2686 {
2687 int dest_loop;
2688 int outer_loop;
2689 int i;
2690
2691 switch (GET_CODE (x))
2692 {
2693 case PC:
2694 case USE:
2695 case CLOBBER:
2696 case REG:
2697 case MEM:
2698 case CONST_INT:
2699 case CONST_DOUBLE:
2700 case RETURN:
2701 return;
2702
2703 case CONST:
2704 /* There could be a label reference in here. */
2705 mark_loop_jump (XEXP (x, 0), loop_num);
2706 return;
2707
2708 case PLUS:
2709 case MINUS:
2710 case MULT:
2711 mark_loop_jump (XEXP (x, 0), loop_num);
2712 mark_loop_jump (XEXP (x, 1), loop_num);
2713 return;
2714
2715 case SIGN_EXTEND:
2716 case ZERO_EXTEND:
2717 mark_loop_jump (XEXP (x, 0), loop_num);
2718 return;
2719
2720 case LABEL_REF:
2721 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2722
2723 /* Link together all labels that branch outside the loop. This
2724 is used by final_[bg]iv_value and the loop unrolling code. Also
2725 mark this LABEL_REF so we know that this branch should predict
2726 false. */
2727
2728 /* A check to make sure the label is not in an inner nested loop,
2729 since this does not count as a loop exit. */
2730 if (dest_loop != -1)
2731 {
2732 for (outer_loop = dest_loop; outer_loop != -1;
2733 outer_loop = loop_outer_loop[outer_loop])
2734 if (outer_loop == loop_num)
2735 break;
2736 }
2737 else
2738 outer_loop = -1;
2739
2740 if (loop_num != -1 && outer_loop == -1)
2741 {
2742 LABEL_OUTSIDE_LOOP_P (x) = 1;
2743 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2744 loop_number_exit_labels[loop_num] = x;
2745
2746 for (outer_loop = loop_num;
2747 outer_loop != -1 && outer_loop != dest_loop;
2748 outer_loop = loop_outer_loop[outer_loop])
2749 loop_number_exit_count[outer_loop]++;
2750 }
2751
2752 /* If this is inside a loop, but not in the current loop or one enclosed
2753 by it, it invalidates at least one loop. */
2754
2755 if (dest_loop == -1)
2756 return;
2757
2758 /* We must invalidate every nested loop containing the target of this
2759 label, except those that also contain the jump insn. */
2760
2761 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2762 {
2763 /* Stop when we reach a loop that also contains the jump insn. */
2764 for (outer_loop = loop_num; outer_loop != -1;
2765 outer_loop = loop_outer_loop[outer_loop])
2766 if (dest_loop == outer_loop)
2767 return;
2768
2769 /* If we get here, we know we need to invalidate a loop. */
2770 if (loop_dump_stream && ! loop_invalid[dest_loop])
2771 fprintf (loop_dump_stream,
2772 "\nLoop at %d ignored due to multiple entry points.\n",
2773 INSN_UID (loop_number_loop_starts[dest_loop]));
2774
2775 loop_invalid[dest_loop] = 1;
2776 }
2777 return;
2778
2779 case SET:
2780 /* If this is not setting pc, ignore. */
2781 if (SET_DEST (x) == pc_rtx)
2782 mark_loop_jump (SET_SRC (x), loop_num);
2783 return;
2784
2785 case IF_THEN_ELSE:
2786 mark_loop_jump (XEXP (x, 1), loop_num);
2787 mark_loop_jump (XEXP (x, 2), loop_num);
2788 return;
2789
2790 case PARALLEL:
2791 case ADDR_VEC:
2792 for (i = 0; i < XVECLEN (x, 0); i++)
2793 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2794 return;
2795
2796 case ADDR_DIFF_VEC:
2797 for (i = 0; i < XVECLEN (x, 1); i++)
2798 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2799 return;
2800
2801 default:
2802 /* Treat anything else (such as a symbol_ref)
2803 as a branch out of this loop, but not into any loop. */
2804
2805 if (loop_num != -1)
2806 {
2807 #ifdef HAIFA
2808 LABEL_OUTSIDE_LOOP_P (x) = 1;
2809 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2810 #endif /* HAIFA */
2811
2812 loop_number_exit_labels[loop_num] = x;
2813
2814 for (outer_loop = loop_num; outer_loop != -1;
2815 outer_loop = loop_outer_loop[outer_loop])
2816 loop_number_exit_count[outer_loop]++;
2817 }
2818 return;
2819 }
2820 }
2821 \f
2822 /* Return nonzero if there is a label in the range from
2823 insn INSN to and including the insn whose luid is END
2824 INSN must have an assigned luid (i.e., it must not have
2825 been previously created by loop.c). */
2826
2827 static int
2828 labels_in_range_p (insn, end)
2829 rtx insn;
2830 int end;
2831 {
2832 while (insn && INSN_LUID (insn) <= end)
2833 {
2834 if (GET_CODE (insn) == CODE_LABEL)
2835 return 1;
2836 insn = NEXT_INSN (insn);
2837 }
2838
2839 return 0;
2840 }
2841
2842 /* Record that a memory reference X is being set. */
2843
2844 static void
2845 note_addr_stored (x, y)
2846 rtx x;
2847 rtx y ATTRIBUTE_UNUSED;
2848 {
2849 register int i;
2850
2851 if (x == 0 || GET_CODE (x) != MEM)
2852 return;
2853
2854 /* Count number of memory writes.
2855 This affects heuristics in strength_reduce. */
2856 num_mem_sets++;
2857
2858 /* BLKmode MEM means all memory is clobbered. */
2859 if (GET_MODE (x) == BLKmode)
2860 unknown_address_altered = 1;
2861
2862 if (unknown_address_altered)
2863 return;
2864
2865 for (i = 0; i < loop_store_mems_idx; i++)
2866 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2867 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2868 {
2869 /* We are storing at the same address as previously noted. Save the
2870 wider reference. */
2871 if (GET_MODE_SIZE (GET_MODE (x))
2872 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2873 loop_store_mems[i] = x;
2874 break;
2875 }
2876
2877 if (i == NUM_STORES)
2878 unknown_address_altered = 1;
2879
2880 else if (i == loop_store_mems_idx)
2881 loop_store_mems[loop_store_mems_idx++] = x;
2882 }
2883 \f
2884 /* Return nonzero if the rtx X is invariant over the current loop.
2885
2886 The value is 2 if we refer to something only conditionally invariant.
2887
2888 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2889 Otherwise, a memory ref is invariant if it does not conflict with
2890 anything stored in `loop_store_mems'. */
2891
2892 int
2893 invariant_p (x)
2894 register rtx x;
2895 {
2896 register int i;
2897 register enum rtx_code code;
2898 register char *fmt;
2899 int conditional = 0;
2900
2901 if (x == 0)
2902 return 1;
2903 code = GET_CODE (x);
2904 switch (code)
2905 {
2906 case CONST_INT:
2907 case CONST_DOUBLE:
2908 case SYMBOL_REF:
2909 case CONST:
2910 return 1;
2911
2912 case LABEL_REF:
2913 /* A LABEL_REF is normally invariant, however, if we are unrolling
2914 loops, and this label is inside the loop, then it isn't invariant.
2915 This is because each unrolled copy of the loop body will have
2916 a copy of this label. If this was invariant, then an insn loading
2917 the address of this label into a register might get moved outside
2918 the loop, and then each loop body would end up using the same label.
2919
2920 We don't know the loop bounds here though, so just fail for all
2921 labels. */
2922 if (flag_unroll_loops)
2923 return 0;
2924 else
2925 return 1;
2926
2927 case PC:
2928 case CC0:
2929 case UNSPEC_VOLATILE:
2930 return 0;
2931
2932 case REG:
2933 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2934 since the reg might be set by initialization within the loop. */
2935
2936 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2937 || x == arg_pointer_rtx)
2938 && ! current_function_has_nonlocal_goto)
2939 return 1;
2940
2941 if (loop_has_call
2942 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2943 return 0;
2944
2945 if (n_times_set[REGNO (x)] < 0)
2946 return 2;
2947
2948 return n_times_set[REGNO (x)] == 0;
2949
2950 case MEM:
2951 /* Volatile memory references must be rejected. Do this before
2952 checking for read-only items, so that volatile read-only items
2953 will be rejected also. */
2954 if (MEM_VOLATILE_P (x))
2955 return 0;
2956
2957 /* Read-only items (such as constants in a constant pool) are
2958 invariant if their address is. */
2959 if (RTX_UNCHANGING_P (x))
2960 break;
2961
2962 /* If we filled the table (or had a subroutine call), any location
2963 in memory could have been clobbered. */
2964 if (unknown_address_altered)
2965 return 0;
2966
2967 /* See if there is any dependence between a store and this load. */
2968 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2969 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2970 return 0;
2971
2972 /* It's not invalidated by a store in memory
2973 but we must still verify the address is invariant. */
2974 break;
2975
2976 case ASM_OPERANDS:
2977 /* Don't mess with insns declared volatile. */
2978 if (MEM_VOLATILE_P (x))
2979 return 0;
2980 break;
2981
2982 default:
2983 break;
2984 }
2985
2986 fmt = GET_RTX_FORMAT (code);
2987 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2988 {
2989 if (fmt[i] == 'e')
2990 {
2991 int tem = invariant_p (XEXP (x, i));
2992 if (tem == 0)
2993 return 0;
2994 if (tem == 2)
2995 conditional = 1;
2996 }
2997 else if (fmt[i] == 'E')
2998 {
2999 register int j;
3000 for (j = 0; j < XVECLEN (x, i); j++)
3001 {
3002 int tem = invariant_p (XVECEXP (x, i, j));
3003 if (tem == 0)
3004 return 0;
3005 if (tem == 2)
3006 conditional = 1;
3007 }
3008
3009 }
3010 }
3011
3012 return 1 + conditional;
3013 }
3014
3015 \f
3016 /* Return nonzero if all the insns in the loop that set REG
3017 are INSN and the immediately following insns,
3018 and if each of those insns sets REG in an invariant way
3019 (not counting uses of REG in them).
3020
3021 The value is 2 if some of these insns are only conditionally invariant.
3022
3023 We assume that INSN itself is the first set of REG
3024 and that its source is invariant. */
3025
3026 static int
3027 consec_sets_invariant_p (reg, n_sets, insn)
3028 int n_sets;
3029 rtx reg, insn;
3030 {
3031 register rtx p = insn;
3032 register int regno = REGNO (reg);
3033 rtx temp;
3034 /* Number of sets we have to insist on finding after INSN. */
3035 int count = n_sets - 1;
3036 int old = n_times_set[regno];
3037 int value = 0;
3038 int this;
3039
3040 /* If N_SETS hit the limit, we can't rely on its value. */
3041 if (n_sets == 127)
3042 return 0;
3043
3044 n_times_set[regno] = 0;
3045
3046 while (count > 0)
3047 {
3048 register enum rtx_code code;
3049 rtx set;
3050
3051 p = NEXT_INSN (p);
3052 code = GET_CODE (p);
3053
3054 /* If library call, skip to end of it. */
3055 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3056 p = XEXP (temp, 0);
3057
3058 this = 0;
3059 if (code == INSN
3060 && (set = single_set (p))
3061 && GET_CODE (SET_DEST (set)) == REG
3062 && REGNO (SET_DEST (set)) == regno)
3063 {
3064 this = invariant_p (SET_SRC (set));
3065 if (this != 0)
3066 value |= this;
3067 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3068 {
3069 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3070 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3071 notes are OK. */
3072 this = (CONSTANT_P (XEXP (temp, 0))
3073 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3074 && invariant_p (XEXP (temp, 0))));
3075 if (this != 0)
3076 value |= this;
3077 }
3078 }
3079 if (this != 0)
3080 count--;
3081 else if (code != NOTE)
3082 {
3083 n_times_set[regno] = old;
3084 return 0;
3085 }
3086 }
3087
3088 n_times_set[regno] = old;
3089 /* If invariant_p ever returned 2, we return 2. */
3090 return 1 + (value & 2);
3091 }
3092
3093 #if 0
3094 /* I don't think this condition is sufficient to allow INSN
3095 to be moved, so we no longer test it. */
3096
3097 /* Return 1 if all insns in the basic block of INSN and following INSN
3098 that set REG are invariant according to TABLE. */
3099
3100 static int
3101 all_sets_invariant_p (reg, insn, table)
3102 rtx reg, insn;
3103 short *table;
3104 {
3105 register rtx p = insn;
3106 register int regno = REGNO (reg);
3107
3108 while (1)
3109 {
3110 register enum rtx_code code;
3111 p = NEXT_INSN (p);
3112 code = GET_CODE (p);
3113 if (code == CODE_LABEL || code == JUMP_INSN)
3114 return 1;
3115 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3116 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3117 && REGNO (SET_DEST (PATTERN (p))) == regno)
3118 {
3119 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3120 return 0;
3121 }
3122 }
3123 }
3124 #endif /* 0 */
3125 \f
3126 /* Look at all uses (not sets) of registers in X. For each, if it is
3127 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3128 a different insn, set USAGE[REGNO] to const0_rtx. */
3129
3130 static void
3131 find_single_use_in_loop (insn, x, usage)
3132 rtx insn;
3133 rtx x;
3134 rtx *usage;
3135 {
3136 enum rtx_code code = GET_CODE (x);
3137 char *fmt = GET_RTX_FORMAT (code);
3138 int i, j;
3139
3140 if (code == REG)
3141 usage[REGNO (x)]
3142 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3143 ? const0_rtx : insn;
3144
3145 else if (code == SET)
3146 {
3147 /* Don't count SET_DEST if it is a REG; otherwise count things
3148 in SET_DEST because if a register is partially modified, it won't
3149 show up as a potential movable so we don't care how USAGE is set
3150 for it. */
3151 if (GET_CODE (SET_DEST (x)) != REG)
3152 find_single_use_in_loop (insn, SET_DEST (x), usage);
3153 find_single_use_in_loop (insn, SET_SRC (x), usage);
3154 }
3155 else
3156 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3157 {
3158 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3159 find_single_use_in_loop (insn, XEXP (x, i), usage);
3160 else if (fmt[i] == 'E')
3161 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3162 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3163 }
3164 }
3165 \f
3166 /* Increment N_TIMES_SET at the index of each register
3167 that is modified by an insn between FROM and TO.
3168 If the value of an element of N_TIMES_SET becomes 127 or more,
3169 stop incrementing it, to avoid overflow.
3170
3171 Store in SINGLE_USAGE[I] the single insn in which register I is
3172 used, if it is only used once. Otherwise, it is set to 0 (for no
3173 uses) or const0_rtx for more than one use. This parameter may be zero,
3174 in which case this processing is not done.
3175
3176 Store in *COUNT_PTR the number of actual instruction
3177 in the loop. We use this to decide what is worth moving out. */
3178
3179 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3180 In that case, it is the insn that last set reg n. */
3181
3182 static void
3183 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3184 register rtx from, to;
3185 char *may_not_move;
3186 rtx *single_usage;
3187 int *count_ptr;
3188 int nregs;
3189 {
3190 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3191 register rtx insn;
3192 register int count = 0;
3193 register rtx dest;
3194
3195 bzero ((char *) last_set, nregs * sizeof (rtx));
3196 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3197 {
3198 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3199 {
3200 ++count;
3201
3202 /* If requested, record registers that have exactly one use. */
3203 if (single_usage)
3204 {
3205 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3206
3207 /* Include uses in REG_EQUAL notes. */
3208 if (REG_NOTES (insn))
3209 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3210 }
3211
3212 if (GET_CODE (PATTERN (insn)) == CLOBBER
3213 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3214 /* Don't move a reg that has an explicit clobber.
3215 We might do so sometimes, but it's not worth the pain. */
3216 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3217
3218 if (GET_CODE (PATTERN (insn)) == SET
3219 || GET_CODE (PATTERN (insn)) == CLOBBER)
3220 {
3221 dest = SET_DEST (PATTERN (insn));
3222 while (GET_CODE (dest) == SUBREG
3223 || GET_CODE (dest) == ZERO_EXTRACT
3224 || GET_CODE (dest) == SIGN_EXTRACT
3225 || GET_CODE (dest) == STRICT_LOW_PART)
3226 dest = XEXP (dest, 0);
3227 if (GET_CODE (dest) == REG)
3228 {
3229 register int regno = REGNO (dest);
3230 /* If this is the first setting of this reg
3231 in current basic block, and it was set before,
3232 it must be set in two basic blocks, so it cannot
3233 be moved out of the loop. */
3234 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3235 may_not_move[regno] = 1;
3236 /* If this is not first setting in current basic block,
3237 see if reg was used in between previous one and this.
3238 If so, neither one can be moved. */
3239 if (last_set[regno] != 0
3240 && reg_used_between_p (dest, last_set[regno], insn))
3241 may_not_move[regno] = 1;
3242 if (n_times_set[regno] < 127)
3243 ++n_times_set[regno];
3244 last_set[regno] = insn;
3245 }
3246 }
3247 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3248 {
3249 register int i;
3250 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3251 {
3252 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3253 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3254 /* Don't move a reg that has an explicit clobber.
3255 It's not worth the pain to try to do it correctly. */
3256 may_not_move[REGNO (XEXP (x, 0))] = 1;
3257
3258 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3259 {
3260 dest = SET_DEST (x);
3261 while (GET_CODE (dest) == SUBREG
3262 || GET_CODE (dest) == ZERO_EXTRACT
3263 || GET_CODE (dest) == SIGN_EXTRACT
3264 || GET_CODE (dest) == STRICT_LOW_PART)
3265 dest = XEXP (dest, 0);
3266 if (GET_CODE (dest) == REG)
3267 {
3268 register int regno = REGNO (dest);
3269 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3270 may_not_move[regno] = 1;
3271 if (last_set[regno] != 0
3272 && reg_used_between_p (dest, last_set[regno], insn))
3273 may_not_move[regno] = 1;
3274 if (n_times_set[regno] < 127)
3275 ++n_times_set[regno];
3276 last_set[regno] = insn;
3277 }
3278 }
3279 }
3280 }
3281 }
3282
3283 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3284 bzero ((char *) last_set, nregs * sizeof (rtx));
3285 }
3286 *count_ptr = count;
3287 }
3288 \f
3289 /* Given a loop that is bounded by LOOP_START and LOOP_END
3290 and that is entered at SCAN_START,
3291 return 1 if the register set in SET contained in insn INSN is used by
3292 any insn that precedes INSN in cyclic order starting
3293 from the loop entry point.
3294
3295 We don't want to use INSN_LUID here because if we restrict INSN to those
3296 that have a valid INSN_LUID, it means we cannot move an invariant out
3297 from an inner loop past two loops. */
3298
3299 static int
3300 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3301 rtx set, insn, loop_start, scan_start, loop_end;
3302 {
3303 rtx reg = SET_DEST (set);
3304 rtx p;
3305
3306 /* Scan forward checking for register usage. If we hit INSN, we
3307 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3308 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3309 {
3310 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3311 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3312 return 1;
3313
3314 if (p == loop_end)
3315 p = loop_start;
3316 }
3317
3318 return 0;
3319 }
3320 \f
3321 /* A "basic induction variable" or biv is a pseudo reg that is set
3322 (within this loop) only by incrementing or decrementing it. */
3323 /* A "general induction variable" or giv is a pseudo reg whose
3324 value is a linear function of a biv. */
3325
3326 /* Bivs are recognized by `basic_induction_var';
3327 Givs by `general_induction_var'. */
3328
3329 /* Indexed by register number, indicates whether or not register is an
3330 induction variable, and if so what type. */
3331
3332 enum iv_mode *reg_iv_type;
3333
3334 /* Indexed by register number, contains pointer to `struct induction'
3335 if register is an induction variable. This holds general info for
3336 all induction variables. */
3337
3338 struct induction **reg_iv_info;
3339
3340 /* Indexed by register number, contains pointer to `struct iv_class'
3341 if register is a basic induction variable. This holds info describing
3342 the class (a related group) of induction variables that the biv belongs
3343 to. */
3344
3345 struct iv_class **reg_biv_class;
3346
3347 /* The head of a list which links together (via the next field)
3348 every iv class for the current loop. */
3349
3350 struct iv_class *loop_iv_list;
3351
3352 /* Communication with routines called via `note_stores'. */
3353
3354 static rtx note_insn;
3355
3356 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3357
3358 static rtx addr_placeholder;
3359
3360 /* ??? Unfinished optimizations, and possible future optimizations,
3361 for the strength reduction code. */
3362
3363 /* ??? There is one more optimization you might be interested in doing: to
3364 allocate pseudo registers for frequently-accessed memory locations.
3365 If the same memory location is referenced each time around, it might
3366 be possible to copy it into a register before and out after.
3367 This is especially useful when the memory location is a variable which
3368 is in a stack slot because somewhere its address is taken. If the
3369 loop doesn't contain a function call and the variable isn't volatile,
3370 it is safe to keep the value in a register for the duration of the
3371 loop. One tricky thing is that the copying of the value back from the
3372 register has to be done on all exits from the loop. You need to check that
3373 all the exits from the loop go to the same place. */
3374
3375 /* ??? The interaction of biv elimination, and recognition of 'constant'
3376 bivs, may cause problems. */
3377
3378 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3379 performance problems.
3380
3381 Perhaps don't eliminate things that can be combined with an addressing
3382 mode. Find all givs that have the same biv, mult_val, and add_val;
3383 then for each giv, check to see if its only use dies in a following
3384 memory address. If so, generate a new memory address and check to see
3385 if it is valid. If it is valid, then store the modified memory address,
3386 otherwise, mark the giv as not done so that it will get its own iv. */
3387
3388 /* ??? Could try to optimize branches when it is known that a biv is always
3389 positive. */
3390
3391 /* ??? When replace a biv in a compare insn, we should replace with closest
3392 giv so that an optimized branch can still be recognized by the combiner,
3393 e.g. the VAX acb insn. */
3394
3395 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3396 was rerun in loop_optimize whenever a register was added or moved.
3397 Also, some of the optimizations could be a little less conservative. */
3398 \f
3399 /* Perform strength reduction and induction variable elimination. */
3400
3401 /* Pseudo registers created during this function will be beyond the last
3402 valid index in several tables including n_times_set and regno_last_uid.
3403 This does not cause a problem here, because the added registers cannot be
3404 givs outside of their loop, and hence will never be reconsidered.
3405 But scan_loop must check regnos to make sure they are in bounds. */
3406
3407 static void
3408 strength_reduce (scan_start, end, loop_top, insn_count,
3409 loop_start, loop_end, unroll_p)
3410 rtx scan_start;
3411 rtx end;
3412 rtx loop_top;
3413 int insn_count;
3414 rtx loop_start;
3415 rtx loop_end;
3416 int unroll_p;
3417 {
3418 rtx p;
3419 rtx set;
3420 rtx inc_val;
3421 rtx mult_val;
3422 rtx dest_reg;
3423 /* This is 1 if current insn is not executed at least once for every loop
3424 iteration. */
3425 int not_every_iteration = 0;
3426 /* This is 1 if current insn may be executed more than once for every
3427 loop iteration. */
3428 int maybe_multiple = 0;
3429 /* Temporary list pointers for traversing loop_iv_list. */
3430 struct iv_class *bl, **backbl;
3431 /* Ratio of extra register life span we can justify
3432 for saving an instruction. More if loop doesn't call subroutines
3433 since in that case saving an insn makes more difference
3434 and more registers are available. */
3435 /* ??? could set this to last value of threshold in move_movables */
3436 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3437 /* Map of pseudo-register replacements. */
3438 rtx *reg_map;
3439 int call_seen;
3440 rtx test;
3441 rtx end_insert_before;
3442 int loop_depth = 0;
3443
3444 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3445 * sizeof (enum iv_mode *));
3446 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3447 reg_iv_info = (struct induction **)
3448 alloca (max_reg_before_loop * sizeof (struct induction *));
3449 bzero ((char *) reg_iv_info, (max_reg_before_loop
3450 * sizeof (struct induction *)));
3451 reg_biv_class = (struct iv_class **)
3452 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3453 bzero ((char *) reg_biv_class, (max_reg_before_loop
3454 * sizeof (struct iv_class *)));
3455
3456 loop_iv_list = 0;
3457 addr_placeholder = gen_reg_rtx (Pmode);
3458
3459 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3460 must be put before this insn, so that they will appear in the right
3461 order (i.e. loop order).
3462
3463 If loop_end is the end of the current function, then emit a
3464 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3465 dummy note insn. */
3466 if (NEXT_INSN (loop_end) != 0)
3467 end_insert_before = NEXT_INSN (loop_end);
3468 else
3469 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3470
3471 /* Scan through loop to find all possible bivs. */
3472
3473 p = scan_start;
3474 while (1)
3475 {
3476 p = NEXT_INSN (p);
3477 /* At end of a straight-in loop, we are done.
3478 At end of a loop entered at the bottom, scan the top. */
3479 if (p == scan_start)
3480 break;
3481 if (p == end)
3482 {
3483 if (loop_top != 0)
3484 p = loop_top;
3485 else
3486 break;
3487 if (p == scan_start)
3488 break;
3489 }
3490
3491 if (GET_CODE (p) == INSN
3492 && (set = single_set (p))
3493 && GET_CODE (SET_DEST (set)) == REG)
3494 {
3495 dest_reg = SET_DEST (set);
3496 if (REGNO (dest_reg) < max_reg_before_loop
3497 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3498 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3499 {
3500 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3501 dest_reg, p, &inc_val, &mult_val))
3502 {
3503 /* It is a possible basic induction variable.
3504 Create and initialize an induction structure for it. */
3505
3506 struct induction *v
3507 = (struct induction *) alloca (sizeof (struct induction));
3508
3509 record_biv (v, p, dest_reg, inc_val, mult_val,
3510 not_every_iteration, maybe_multiple);
3511 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3512 }
3513 else if (REGNO (dest_reg) < max_reg_before_loop)
3514 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3515 }
3516 }
3517
3518 /* Past CODE_LABEL, we get to insns that may be executed multiple
3519 times. The only way we can be sure that they can't is if every
3520 jump insn between here and the end of the loop either
3521 returns, exits the loop, is a forward jump, or is a jump
3522 to the loop start. */
3523
3524 if (GET_CODE (p) == CODE_LABEL)
3525 {
3526 rtx insn = p;
3527
3528 maybe_multiple = 0;
3529
3530 while (1)
3531 {
3532 insn = NEXT_INSN (insn);
3533 if (insn == scan_start)
3534 break;
3535 if (insn == end)
3536 {
3537 if (loop_top != 0)
3538 insn = loop_top;
3539 else
3540 break;
3541 if (insn == scan_start)
3542 break;
3543 }
3544
3545 if (GET_CODE (insn) == JUMP_INSN
3546 && GET_CODE (PATTERN (insn)) != RETURN
3547 && (! condjump_p (insn)
3548 || (JUMP_LABEL (insn) != 0
3549 && JUMP_LABEL (insn) != scan_start
3550 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3551 || INSN_UID (insn) >= max_uid_for_loop
3552 || (INSN_LUID (JUMP_LABEL (insn))
3553 < INSN_LUID (insn))))))
3554 {
3555 maybe_multiple = 1;
3556 break;
3557 }
3558 }
3559 }
3560
3561 /* Past a jump, we get to insns for which we can't count
3562 on whether they will be executed during each iteration. */
3563 /* This code appears twice in strength_reduce. There is also similar
3564 code in scan_loop. */
3565 if (GET_CODE (p) == JUMP_INSN
3566 /* If we enter the loop in the middle, and scan around to the
3567 beginning, don't set not_every_iteration for that.
3568 This can be any kind of jump, since we want to know if insns
3569 will be executed if the loop is executed. */
3570 && ! (JUMP_LABEL (p) == loop_top
3571 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3572 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3573 {
3574 rtx label = 0;
3575
3576 /* If this is a jump outside the loop, then it also doesn't
3577 matter. Check to see if the target of this branch is on the
3578 loop_number_exits_labels list. */
3579
3580 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3581 label;
3582 label = LABEL_NEXTREF (label))
3583 if (XEXP (label, 0) == JUMP_LABEL (p))
3584 break;
3585
3586 if (! label)
3587 not_every_iteration = 1;
3588 }
3589
3590 else if (GET_CODE (p) == NOTE)
3591 {
3592 /* At the virtual top of a converted loop, insns are again known to
3593 be executed each iteration: logically, the loop begins here
3594 even though the exit code has been duplicated. */
3595 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3596 not_every_iteration = 0;
3597 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3598 loop_depth++;
3599 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3600 loop_depth--;
3601 }
3602
3603 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3604 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3605 or not an insn is known to be executed each iteration of the
3606 loop, whether or not any iterations are known to occur.
3607
3608 Therefore, if we have just passed a label and have no more labels
3609 between here and the test insn of the loop, we know these insns
3610 will be executed each iteration. */
3611
3612 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3613 && no_labels_between_p (p, loop_end))
3614 not_every_iteration = 0;
3615 }
3616
3617 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3618 Make a sanity check against n_times_set. */
3619 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3620 {
3621 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3622 /* Above happens if register modified by subreg, etc. */
3623 /* Make sure it is not recognized as a basic induction var: */
3624 || n_times_set[bl->regno] != bl->biv_count
3625 /* If never incremented, it is invariant that we decided not to
3626 move. So leave it alone. */
3627 || ! bl->incremented)
3628 {
3629 if (loop_dump_stream)
3630 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3631 bl->regno,
3632 (reg_iv_type[bl->regno] != BASIC_INDUCT
3633 ? "not induction variable"
3634 : (! bl->incremented ? "never incremented"
3635 : "count error")));
3636
3637 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3638 *backbl = bl->next;
3639 }
3640 else
3641 {
3642 backbl = &bl->next;
3643
3644 if (loop_dump_stream)
3645 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3646 }
3647 }
3648
3649 /* Exit if there are no bivs. */
3650 if (! loop_iv_list)
3651 {
3652 /* Can still unroll the loop anyways, but indicate that there is no
3653 strength reduction info available. */
3654 if (unroll_p)
3655 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3656
3657 return;
3658 }
3659
3660 /* Find initial value for each biv by searching backwards from loop_start,
3661 halting at first label. Also record any test condition. */
3662
3663 call_seen = 0;
3664 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3665 {
3666 note_insn = p;
3667
3668 if (GET_CODE (p) == CALL_INSN)
3669 call_seen = 1;
3670
3671 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3672 || GET_CODE (p) == CALL_INSN)
3673 note_stores (PATTERN (p), record_initial);
3674
3675 /* Record any test of a biv that branches around the loop if no store
3676 between it and the start of loop. We only care about tests with
3677 constants and registers and only certain of those. */
3678 if (GET_CODE (p) == JUMP_INSN
3679 && JUMP_LABEL (p) != 0
3680 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3681 && (test = get_condition_for_loop (p)) != 0
3682 && GET_CODE (XEXP (test, 0)) == REG
3683 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3684 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3685 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3686 && bl->init_insn == 0)
3687 {
3688 /* If an NE test, we have an initial value! */
3689 if (GET_CODE (test) == NE)
3690 {
3691 bl->init_insn = p;
3692 bl->init_set = gen_rtx_SET (VOIDmode,
3693 XEXP (test, 0), XEXP (test, 1));
3694 }
3695 else
3696 bl->initial_test = test;
3697 }
3698 }
3699
3700 /* Look at the each biv and see if we can say anything better about its
3701 initial value from any initializing insns set up above. (This is done
3702 in two passes to avoid missing SETs in a PARALLEL.) */
3703 for (bl = loop_iv_list; bl; bl = bl->next)
3704 {
3705 rtx src;
3706 rtx note;
3707
3708 if (! bl->init_insn)
3709 continue;
3710
3711 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3712 is a constant, use the value of that. */
3713 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3714 && CONSTANT_P (XEXP (note, 0)))
3715 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3716 && CONSTANT_P (XEXP (note, 0))))
3717 src = XEXP (note, 0);
3718 else
3719 src = SET_SRC (bl->init_set);
3720
3721 if (loop_dump_stream)
3722 fprintf (loop_dump_stream,
3723 "Biv %d initialized at insn %d: initial value ",
3724 bl->regno, INSN_UID (bl->init_insn));
3725
3726 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3727 || GET_MODE (src) == VOIDmode)
3728 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3729 {
3730 bl->initial_value = src;
3731
3732 if (loop_dump_stream)
3733 {
3734 if (GET_CODE (src) == CONST_INT)
3735 {
3736 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3737 fputc ('\n', loop_dump_stream);
3738 }
3739 else
3740 {
3741 print_rtl (loop_dump_stream, src);
3742 fprintf (loop_dump_stream, "\n");
3743 }
3744 }
3745 }
3746 else
3747 {
3748 /* Biv initial value is not simple move,
3749 so let it keep initial value of "itself". */
3750
3751 if (loop_dump_stream)
3752 fprintf (loop_dump_stream, "is complex\n");
3753 }
3754 }
3755
3756 /* Search the loop for general induction variables. */
3757
3758 /* A register is a giv if: it is only set once, it is a function of a
3759 biv and a constant (or invariant), and it is not a biv. */
3760
3761 not_every_iteration = 0;
3762 loop_depth = 0;
3763 p = scan_start;
3764 while (1)
3765 {
3766 p = NEXT_INSN (p);
3767 /* At end of a straight-in loop, we are done.
3768 At end of a loop entered at the bottom, scan the top. */
3769 if (p == scan_start)
3770 break;
3771 if (p == end)
3772 {
3773 if (loop_top != 0)
3774 p = loop_top;
3775 else
3776 break;
3777 if (p == scan_start)
3778 break;
3779 }
3780
3781 /* Look for a general induction variable in a register. */
3782 if (GET_CODE (p) == INSN
3783 && (set = single_set (p))
3784 && GET_CODE (SET_DEST (set)) == REG
3785 && ! may_not_optimize[REGNO (SET_DEST (set))])
3786 {
3787 rtx src_reg;
3788 rtx add_val;
3789 rtx mult_val;
3790 int benefit;
3791 rtx regnote = 0;
3792
3793 dest_reg = SET_DEST (set);
3794 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3795 continue;
3796
3797 if (/* SET_SRC is a giv. */
3798 (general_induction_var (SET_SRC (set), &src_reg, &add_val,
3799 &mult_val, 0, &benefit)
3800 /* Equivalent expression is a giv. */
3801 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3802 && general_induction_var (XEXP (regnote, 0), &src_reg,
3803 &add_val, &mult_val, 0,
3804 &benefit)))
3805 /* Don't try to handle any regs made by loop optimization.
3806 We have nothing on them in regno_first_uid, etc. */
3807 && REGNO (dest_reg) < max_reg_before_loop
3808 /* Don't recognize a BASIC_INDUCT_VAR here. */
3809 && dest_reg != src_reg
3810 /* This must be the only place where the register is set. */
3811 && (n_times_set[REGNO (dest_reg)] == 1
3812 /* or all sets must be consecutive and make a giv. */
3813 || (benefit = consec_sets_giv (benefit, p,
3814 src_reg, dest_reg,
3815 &add_val, &mult_val))))
3816 {
3817 int count;
3818 struct induction *v
3819 = (struct induction *) alloca (sizeof (struct induction));
3820 rtx temp;
3821
3822 /* If this is a library call, increase benefit. */
3823 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3824 benefit += libcall_benefit (p);
3825
3826 /* Skip the consecutive insns, if there are any. */
3827 for (count = n_times_set[REGNO (dest_reg)] - 1;
3828 count > 0; count--)
3829 {
3830 /* If first insn of libcall sequence, skip to end.
3831 Do this at start of loop, since INSN is guaranteed to
3832 be an insn here. */
3833 if (GET_CODE (p) != NOTE
3834 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3835 p = XEXP (temp, 0);
3836
3837 do p = NEXT_INSN (p);
3838 while (GET_CODE (p) == NOTE);
3839 }
3840
3841 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3842 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3843 loop_end);
3844
3845 }
3846 }
3847
3848 #ifndef DONT_REDUCE_ADDR
3849 /* Look for givs which are memory addresses. */
3850 /* This resulted in worse code on a VAX 8600. I wonder if it
3851 still does. */
3852 if (GET_CODE (p) == INSN)
3853 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3854 loop_end);
3855 #endif
3856
3857 /* Update the status of whether giv can derive other givs. This can
3858 change when we pass a label or an insn that updates a biv. */
3859 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3860 || GET_CODE (p) == CODE_LABEL)
3861 update_giv_derive (p);
3862
3863 /* Past a jump, we get to insns for which we can't count
3864 on whether they will be executed during each iteration. */
3865 /* This code appears twice in strength_reduce. There is also similar
3866 code in scan_loop. */
3867 if (GET_CODE (p) == JUMP_INSN
3868 /* If we enter the loop in the middle, and scan around to the
3869 beginning, don't set not_every_iteration for that.
3870 This can be any kind of jump, since we want to know if insns
3871 will be executed if the loop is executed. */
3872 && ! (JUMP_LABEL (p) == loop_top
3873 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3874 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3875 {
3876 rtx label = 0;
3877
3878 /* If this is a jump outside the loop, then it also doesn't
3879 matter. Check to see if the target of this branch is on the
3880 loop_number_exits_labels list. */
3881
3882 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3883 label;
3884 label = LABEL_NEXTREF (label))
3885 if (XEXP (label, 0) == JUMP_LABEL (p))
3886 break;
3887
3888 if (! label)
3889 not_every_iteration = 1;
3890 }
3891
3892 else if (GET_CODE (p) == NOTE)
3893 {
3894 /* At the virtual top of a converted loop, insns are again known to
3895 be executed each iteration: logically, the loop begins here
3896 even though the exit code has been duplicated. */
3897 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3898 not_every_iteration = 0;
3899 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3900 loop_depth++;
3901 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3902 loop_depth--;
3903 }
3904
3905 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3906 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3907 or not an insn is known to be executed each iteration of the
3908 loop, whether or not any iterations are known to occur.
3909
3910 Therefore, if we have just passed a label and have no more labels
3911 between here and the test insn of the loop, we know these insns
3912 will be executed each iteration. */
3913
3914 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3915 && no_labels_between_p (p, loop_end))
3916 not_every_iteration = 0;
3917 }
3918
3919 /* Try to calculate and save the number of loop iterations. This is
3920 set to zero if the actual number can not be calculated. This must
3921 be called after all giv's have been identified, since otherwise it may
3922 fail if the iteration variable is a giv. */
3923
3924 loop_n_iterations = loop_iterations (loop_start, loop_end);
3925
3926 /* Now for each giv for which we still don't know whether or not it is
3927 replaceable, check to see if it is replaceable because its final value
3928 can be calculated. This must be done after loop_iterations is called,
3929 so that final_giv_value will work correctly. */
3930
3931 for (bl = loop_iv_list; bl; bl = bl->next)
3932 {
3933 struct induction *v;
3934
3935 for (v = bl->giv; v; v = v->next_iv)
3936 if (! v->replaceable && ! v->not_replaceable)
3937 check_final_value (v, loop_start, loop_end);
3938 }
3939
3940 /* Try to prove that the loop counter variable (if any) is always
3941 nonnegative; if so, record that fact with a REG_NONNEG note
3942 so that "decrement and branch until zero" insn can be used. */
3943 check_dbra_loop (loop_end, insn_count, loop_start);
3944
3945 #ifdef HAIFA
3946 /* record loop-variables relevant for BCT optimization before unrolling
3947 the loop. Unrolling may update part of this information, and the
3948 correct data will be used for generating the BCT. */
3949 #ifdef HAVE_decrement_and_branch_on_count
3950 if (HAVE_decrement_and_branch_on_count)
3951 analyze_loop_iterations (loop_start, loop_end);
3952 #endif
3953 #endif /* HAIFA */
3954
3955 /* Create reg_map to hold substitutions for replaceable giv regs. */
3956 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3957 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3958
3959 /* Examine each iv class for feasibility of strength reduction/induction
3960 variable elimination. */
3961
3962 for (bl = loop_iv_list; bl; bl = bl->next)
3963 {
3964 struct induction *v;
3965 int benefit;
3966 int all_reduced;
3967 rtx final_value = 0;
3968
3969 /* Test whether it will be possible to eliminate this biv
3970 provided all givs are reduced. This is possible if either
3971 the reg is not used outside the loop, or we can compute
3972 what its final value will be.
3973
3974 For architectures with a decrement_and_branch_until_zero insn,
3975 don't do this if we put a REG_NONNEG note on the endtest for
3976 this biv. */
3977
3978 /* Compare against bl->init_insn rather than loop_start.
3979 We aren't concerned with any uses of the biv between
3980 init_insn and loop_start since these won't be affected
3981 by the value of the biv elsewhere in the function, so
3982 long as init_insn doesn't use the biv itself.
3983 March 14, 1989 -- self@bayes.arc.nasa.gov */
3984
3985 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3986 && bl->init_insn
3987 && INSN_UID (bl->init_insn) < max_uid_for_loop
3988 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3989 #ifdef HAVE_decrement_and_branch_until_zero
3990 && ! bl->nonneg
3991 #endif
3992 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3993 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3994 #ifdef HAVE_decrement_and_branch_until_zero
3995 && ! bl->nonneg
3996 #endif
3997 ))
3998 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3999 threshold, insn_count);
4000 else
4001 {
4002 if (loop_dump_stream)
4003 {
4004 fprintf (loop_dump_stream,
4005 "Cannot eliminate biv %d.\n",
4006 bl->regno);
4007 fprintf (loop_dump_stream,
4008 "First use: insn %d, last use: insn %d.\n",
4009 REGNO_FIRST_UID (bl->regno),
4010 REGNO_LAST_UID (bl->regno));
4011 }
4012 }
4013
4014 /* Combine all giv's for this iv_class. */
4015 combine_givs (bl);
4016
4017 /* This will be true at the end, if all givs which depend on this
4018 biv have been strength reduced.
4019 We can't (currently) eliminate the biv unless this is so. */
4020 all_reduced = 1;
4021
4022 /* Check each giv in this class to see if we will benefit by reducing
4023 it. Skip giv's combined with others. */
4024 for (v = bl->giv; v; v = v->next_iv)
4025 {
4026 struct induction *tv;
4027
4028 if (v->ignore || v->same)
4029 continue;
4030
4031 benefit = v->benefit;
4032
4033 /* Reduce benefit if not replaceable, since we will insert
4034 a move-insn to replace the insn that calculates this giv.
4035 Don't do this unless the giv is a user variable, since it
4036 will often be marked non-replaceable because of the duplication
4037 of the exit code outside the loop. In such a case, the copies
4038 we insert are dead and will be deleted. So they don't have
4039 a cost. Similar situations exist. */
4040 /* ??? The new final_[bg]iv_value code does a much better job
4041 of finding replaceable giv's, and hence this code may no longer
4042 be necessary. */
4043 if (! v->replaceable && ! bl->eliminable
4044 && REG_USERVAR_P (v->dest_reg))
4045 benefit -= copy_cost;
4046
4047 /* Decrease the benefit to count the add-insns that we will
4048 insert to increment the reduced reg for the giv. */
4049 benefit -= add_cost * bl->biv_count;
4050
4051 /* Decide whether to strength-reduce this giv or to leave the code
4052 unchanged (recompute it from the biv each time it is used).
4053 This decision can be made independently for each giv. */
4054
4055 #ifdef AUTO_INC_DEC
4056 /* Attempt to guess whether autoincrement will handle some of the
4057 new add insns; if so, increase BENEFIT (undo the subtraction of
4058 add_cost that was done above). */
4059 if (v->giv_type == DEST_ADDR
4060 && GET_CODE (v->mult_val) == CONST_INT)
4061 {
4062 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4063 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4064 benefit += add_cost * bl->biv_count;
4065 #endif
4066 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4067 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4068 benefit += add_cost * bl->biv_count;
4069 #endif
4070 }
4071 #endif
4072
4073 /* If an insn is not to be strength reduced, then set its ignore
4074 flag, and clear all_reduced. */
4075
4076 /* A giv that depends on a reversed biv must be reduced if it is
4077 used after the loop exit, otherwise, it would have the wrong
4078 value after the loop exit. To make it simple, just reduce all
4079 of such giv's whether or not we know they are used after the loop
4080 exit. */
4081
4082 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4083 && ! bl->reversed )
4084 {
4085 if (loop_dump_stream)
4086 fprintf (loop_dump_stream,
4087 "giv of insn %d not worth while, %d vs %d.\n",
4088 INSN_UID (v->insn),
4089 v->lifetime * threshold * benefit, insn_count);
4090 v->ignore = 1;
4091 all_reduced = 0;
4092 }
4093 else
4094 {
4095 /* Check that we can increment the reduced giv without a
4096 multiply insn. If not, reject it. */
4097
4098 for (tv = bl->biv; tv; tv = tv->next_iv)
4099 if (tv->mult_val == const1_rtx
4100 && ! product_cheap_p (tv->add_val, v->mult_val))
4101 {
4102 if (loop_dump_stream)
4103 fprintf (loop_dump_stream,
4104 "giv of insn %d: would need a multiply.\n",
4105 INSN_UID (v->insn));
4106 v->ignore = 1;
4107 all_reduced = 0;
4108 break;
4109 }
4110 }
4111 }
4112
4113 /* Reduce each giv that we decided to reduce. */
4114
4115 for (v = bl->giv; v; v = v->next_iv)
4116 {
4117 struct induction *tv;
4118 if (! v->ignore && v->same == 0)
4119 {
4120 int auto_inc_opt = 0;
4121
4122 v->new_reg = gen_reg_rtx (v->mode);
4123
4124 #ifdef AUTO_INC_DEC
4125 /* If the target has auto-increment addressing modes, and
4126 this is an address giv, then try to put the increment
4127 immediately after its use, so that flow can create an
4128 auto-increment addressing mode. */
4129 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4130 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4131 /* We don't handle reversed biv's because bl->biv->insn
4132 does not have a valid INSN_LUID. */
4133 && ! bl->reversed
4134 && v->always_executed && ! v->maybe_multiple
4135 && INSN_UID (v->insn) < max_uid_for_loop)
4136 {
4137 /* If other giv's have been combined with this one, then
4138 this will work only if all uses of the other giv's occur
4139 before this giv's insn. This is difficult to check.
4140
4141 We simplify this by looking for the common case where
4142 there is one DEST_REG giv, and this giv's insn is the
4143 last use of the dest_reg of that DEST_REG giv. If the
4144 increment occurs after the address giv, then we can
4145 perform the optimization. (Otherwise, the increment
4146 would have to go before other_giv, and we would not be
4147 able to combine it with the address giv to get an
4148 auto-inc address.) */
4149 if (v->combined_with)
4150 {
4151 struct induction *other_giv = 0;
4152
4153 for (tv = bl->giv; tv; tv = tv->next_iv)
4154 if (tv->same == v)
4155 {
4156 if (other_giv)
4157 break;
4158 else
4159 other_giv = tv;
4160 }
4161 if (! tv && other_giv
4162 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4163 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4164 == INSN_UID (v->insn))
4165 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4166 auto_inc_opt = 1;
4167 }
4168 /* Check for case where increment is before the address
4169 giv. Do this test in "loop order". */
4170 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4171 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4172 || (INSN_LUID (bl->biv->insn)
4173 > INSN_LUID (scan_start))))
4174 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4175 && (INSN_LUID (scan_start)
4176 < INSN_LUID (bl->biv->insn))))
4177 auto_inc_opt = -1;
4178 else
4179 auto_inc_opt = 1;
4180
4181 #ifdef HAVE_cc0
4182 {
4183 rtx prev;
4184
4185 /* We can't put an insn immediately after one setting
4186 cc0, or immediately before one using cc0. */
4187 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4188 || (auto_inc_opt == -1
4189 && (prev = prev_nonnote_insn (v->insn)) != 0
4190 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4191 && sets_cc0_p (PATTERN (prev))))
4192 auto_inc_opt = 0;
4193 }
4194 #endif
4195
4196 if (auto_inc_opt)
4197 v->auto_inc_opt = 1;
4198 }
4199 #endif
4200
4201 /* For each place where the biv is incremented, add an insn
4202 to increment the new, reduced reg for the giv. */
4203 for (tv = bl->biv; tv; tv = tv->next_iv)
4204 {
4205 rtx insert_before;
4206
4207 if (! auto_inc_opt)
4208 insert_before = tv->insn;
4209 else if (auto_inc_opt == 1)
4210 insert_before = NEXT_INSN (v->insn);
4211 else
4212 insert_before = v->insn;
4213
4214 if (tv->mult_val == const1_rtx)
4215 emit_iv_add_mult (tv->add_val, v->mult_val,
4216 v->new_reg, v->new_reg, insert_before);
4217 else /* tv->mult_val == const0_rtx */
4218 /* A multiply is acceptable here
4219 since this is presumed to be seldom executed. */
4220 emit_iv_add_mult (tv->add_val, v->mult_val,
4221 v->add_val, v->new_reg, insert_before);
4222 }
4223
4224 /* Add code at loop start to initialize giv's reduced reg. */
4225
4226 emit_iv_add_mult (bl->initial_value, v->mult_val,
4227 v->add_val, v->new_reg, loop_start);
4228 }
4229 }
4230
4231 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4232 as not reduced.
4233
4234 For each giv register that can be reduced now: if replaceable,
4235 substitute reduced reg wherever the old giv occurs;
4236 else add new move insn "giv_reg = reduced_reg".
4237
4238 Also check for givs whose first use is their definition and whose
4239 last use is the definition of another giv. If so, it is likely
4240 dead and should not be used to eliminate a biv. */
4241 for (v = bl->giv; v; v = v->next_iv)
4242 {
4243 if (v->same && v->same->ignore)
4244 v->ignore = 1;
4245
4246 if (v->ignore)
4247 continue;
4248
4249 if (v->giv_type == DEST_REG
4250 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4251 {
4252 struct induction *v1;
4253
4254 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4255 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4256 v->maybe_dead = 1;
4257 }
4258
4259 /* Update expression if this was combined, in case other giv was
4260 replaced. */
4261 if (v->same)
4262 v->new_reg = replace_rtx (v->new_reg,
4263 v->same->dest_reg, v->same->new_reg);
4264
4265 if (v->giv_type == DEST_ADDR)
4266 /* Store reduced reg as the address in the memref where we found
4267 this giv. */
4268 validate_change (v->insn, v->location, v->new_reg, 0);
4269 else if (v->replaceable)
4270 {
4271 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4272
4273 #if 0
4274 /* I can no longer duplicate the original problem. Perhaps
4275 this is unnecessary now? */
4276
4277 /* Replaceable; it isn't strictly necessary to delete the old
4278 insn and emit a new one, because v->dest_reg is now dead.
4279
4280 However, especially when unrolling loops, the special
4281 handling for (set REG0 REG1) in the second cse pass may
4282 make v->dest_reg live again. To avoid this problem, emit
4283 an insn to set the original giv reg from the reduced giv.
4284 We can not delete the original insn, since it may be part
4285 of a LIBCALL, and the code in flow that eliminates dead
4286 libcalls will fail if it is deleted. */
4287 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4288 v->insn);
4289 #endif
4290 }
4291 else
4292 {
4293 /* Not replaceable; emit an insn to set the original giv reg from
4294 the reduced giv, same as above. */
4295 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4296 v->insn);
4297 }
4298
4299 /* When a loop is reversed, givs which depend on the reversed
4300 biv, and which are live outside the loop, must be set to their
4301 correct final value. This insn is only needed if the giv is
4302 not replaceable. The correct final value is the same as the
4303 value that the giv starts the reversed loop with. */
4304 if (bl->reversed && ! v->replaceable)
4305 emit_iv_add_mult (bl->initial_value, v->mult_val,
4306 v->add_val, v->dest_reg, end_insert_before);
4307 else if (v->final_value)
4308 {
4309 rtx insert_before;
4310
4311 /* If the loop has multiple exits, emit the insn before the
4312 loop to ensure that it will always be executed no matter
4313 how the loop exits. Otherwise, emit the insn after the loop,
4314 since this is slightly more efficient. */
4315 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4316 insert_before = loop_start;
4317 else
4318 insert_before = end_insert_before;
4319 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4320 insert_before);
4321
4322 #if 0
4323 /* If the insn to set the final value of the giv was emitted
4324 before the loop, then we must delete the insn inside the loop
4325 that sets it. If this is a LIBCALL, then we must delete
4326 every insn in the libcall. Note, however, that
4327 final_giv_value will only succeed when there are multiple
4328 exits if the giv is dead at each exit, hence it does not
4329 matter that the original insn remains because it is dead
4330 anyways. */
4331 /* Delete the insn inside the loop that sets the giv since
4332 the giv is now set before (or after) the loop. */
4333 delete_insn (v->insn);
4334 #endif
4335 }
4336
4337 if (loop_dump_stream)
4338 {
4339 fprintf (loop_dump_stream, "giv at %d reduced to ",
4340 INSN_UID (v->insn));
4341 print_rtl (loop_dump_stream, v->new_reg);
4342 fprintf (loop_dump_stream, "\n");
4343 }
4344 }
4345
4346 /* All the givs based on the biv bl have been reduced if they
4347 merit it. */
4348
4349 /* For each giv not marked as maybe dead that has been combined with a
4350 second giv, clear any "maybe dead" mark on that second giv.
4351 v->new_reg will either be or refer to the register of the giv it
4352 combined with.
4353
4354 Doing this clearing avoids problems in biv elimination where a
4355 giv's new_reg is a complex value that can't be put in the insn but
4356 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4357 Since the register will be used in either case, we'd prefer it be
4358 used from the simpler giv. */
4359
4360 for (v = bl->giv; v; v = v->next_iv)
4361 if (! v->maybe_dead && v->same)
4362 v->same->maybe_dead = 0;
4363
4364 /* Try to eliminate the biv, if it is a candidate.
4365 This won't work if ! all_reduced,
4366 since the givs we planned to use might not have been reduced.
4367
4368 We have to be careful that we didn't initially think we could eliminate
4369 this biv because of a giv that we now think may be dead and shouldn't
4370 be used as a biv replacement.
4371
4372 Also, there is the possibility that we may have a giv that looks
4373 like it can be used to eliminate a biv, but the resulting insn
4374 isn't valid. This can happen, for example, on the 88k, where a
4375 JUMP_INSN can compare a register only with zero. Attempts to
4376 replace it with a compare with a constant will fail.
4377
4378 Note that in cases where this call fails, we may have replaced some
4379 of the occurrences of the biv with a giv, but no harm was done in
4380 doing so in the rare cases where it can occur. */
4381
4382 if (all_reduced == 1 && bl->eliminable
4383 && maybe_eliminate_biv (bl, loop_start, end, 1,
4384 threshold, insn_count))
4385
4386 {
4387 /* ?? If we created a new test to bypass the loop entirely,
4388 or otherwise drop straight in, based on this test, then
4389 we might want to rewrite it also. This way some later
4390 pass has more hope of removing the initialization of this
4391 biv entirely. */
4392
4393 /* If final_value != 0, then the biv may be used after loop end
4394 and we must emit an insn to set it just in case.
4395
4396 Reversed bivs already have an insn after the loop setting their
4397 value, so we don't need another one. We can't calculate the
4398 proper final value for such a biv here anyways. */
4399 if (final_value != 0 && ! bl->reversed)
4400 {
4401 rtx insert_before;
4402
4403 /* If the loop has multiple exits, emit the insn before the
4404 loop to ensure that it will always be executed no matter
4405 how the loop exits. Otherwise, emit the insn after the
4406 loop, since this is slightly more efficient. */
4407 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4408 insert_before = loop_start;
4409 else
4410 insert_before = end_insert_before;
4411
4412 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4413 end_insert_before);
4414 }
4415
4416 #if 0
4417 /* Delete all of the instructions inside the loop which set
4418 the biv, as they are all dead. If is safe to delete them,
4419 because an insn setting a biv will never be part of a libcall. */
4420 /* However, deleting them will invalidate the regno_last_uid info,
4421 so keeping them around is more convenient. Final_biv_value
4422 will only succeed when there are multiple exits if the biv
4423 is dead at each exit, hence it does not matter that the original
4424 insn remains, because it is dead anyways. */
4425 for (v = bl->biv; v; v = v->next_iv)
4426 delete_insn (v->insn);
4427 #endif
4428
4429 if (loop_dump_stream)
4430 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4431 bl->regno);
4432 }
4433 }
4434
4435 /* Go through all the instructions in the loop, making all the
4436 register substitutions scheduled in REG_MAP. */
4437
4438 for (p = loop_start; p != end; p = NEXT_INSN (p))
4439 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4440 || GET_CODE (p) == CALL_INSN)
4441 {
4442 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4443 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4444 INSN_CODE (p) = -1;
4445 }
4446
4447 /* Unroll loops from within strength reduction so that we can use the
4448 induction variable information that strength_reduce has already
4449 collected. */
4450
4451 if (unroll_p)
4452 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4453
4454 #ifdef HAIFA
4455 /* instrument the loop with bct insn */
4456 #ifdef HAVE_decrement_and_branch_on_count
4457 if (HAVE_decrement_and_branch_on_count)
4458 insert_bct (loop_start, loop_end);
4459 #endif
4460 #endif /* HAIFA */
4461
4462 if (loop_dump_stream)
4463 fprintf (loop_dump_stream, "\n");
4464 }
4465 \f
4466 /* Return 1 if X is a valid source for an initial value (or as value being
4467 compared against in an initial test).
4468
4469 X must be either a register or constant and must not be clobbered between
4470 the current insn and the start of the loop.
4471
4472 INSN is the insn containing X. */
4473
4474 static int
4475 valid_initial_value_p (x, insn, call_seen, loop_start)
4476 rtx x;
4477 rtx insn;
4478 int call_seen;
4479 rtx loop_start;
4480 {
4481 if (CONSTANT_P (x))
4482 return 1;
4483
4484 /* Only consider pseudos we know about initialized in insns whose luids
4485 we know. */
4486 if (GET_CODE (x) != REG
4487 || REGNO (x) >= max_reg_before_loop)
4488 return 0;
4489
4490 /* Don't use call-clobbered registers across a call which clobbers it. On
4491 some machines, don't use any hard registers at all. */
4492 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4493 && (SMALL_REGISTER_CLASSES
4494 || (call_used_regs[REGNO (x)] && call_seen)))
4495 return 0;
4496
4497 /* Don't use registers that have been clobbered before the start of the
4498 loop. */
4499 if (reg_set_between_p (x, insn, loop_start))
4500 return 0;
4501
4502 return 1;
4503 }
4504 \f
4505 /* Scan X for memory refs and check each memory address
4506 as a possible giv. INSN is the insn whose pattern X comes from.
4507 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4508 every loop iteration. */
4509
4510 static void
4511 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4512 rtx x;
4513 rtx insn;
4514 int not_every_iteration;
4515 rtx loop_start, loop_end;
4516 {
4517 register int i, j;
4518 register enum rtx_code code;
4519 register char *fmt;
4520
4521 if (x == 0)
4522 return;
4523
4524 code = GET_CODE (x);
4525 switch (code)
4526 {
4527 case REG:
4528 case CONST_INT:
4529 case CONST:
4530 case CONST_DOUBLE:
4531 case SYMBOL_REF:
4532 case LABEL_REF:
4533 case PC:
4534 case CC0:
4535 case ADDR_VEC:
4536 case ADDR_DIFF_VEC:
4537 case USE:
4538 case CLOBBER:
4539 return;
4540
4541 case MEM:
4542 {
4543 rtx src_reg;
4544 rtx add_val;
4545 rtx mult_val;
4546 int benefit;
4547
4548 /* This code used to disable creating GIVs with mult_val == 1 and
4549 add_val == 0. However, this leads to lost optimizations when
4550 it comes time to combine a set of related DEST_ADDR GIVs, since
4551 this one would not be seen. */
4552
4553 if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
4554 &mult_val, 1, &benefit))
4555 {
4556 /* Found one; record it. */
4557 struct induction *v
4558 = (struct induction *) oballoc (sizeof (struct induction));
4559
4560 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4561 add_val, benefit, DEST_ADDR, not_every_iteration,
4562 &XEXP (x, 0), loop_start, loop_end);
4563
4564 v->mem_mode = GET_MODE (x);
4565 }
4566 }
4567 return;
4568
4569 default:
4570 break;
4571 }
4572
4573 /* Recursively scan the subexpressions for other mem refs. */
4574
4575 fmt = GET_RTX_FORMAT (code);
4576 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4577 if (fmt[i] == 'e')
4578 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4579 loop_end);
4580 else if (fmt[i] == 'E')
4581 for (j = 0; j < XVECLEN (x, i); j++)
4582 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4583 loop_start, loop_end);
4584 }
4585 \f
4586 /* Fill in the data about one biv update.
4587 V is the `struct induction' in which we record the biv. (It is
4588 allocated by the caller, with alloca.)
4589 INSN is the insn that sets it.
4590 DEST_REG is the biv's reg.
4591
4592 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4593 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4594 being set to INC_VAL.
4595
4596 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4597 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4598 can be executed more than once per iteration. If MAYBE_MULTIPLE
4599 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4600 executed exactly once per iteration. */
4601
4602 static void
4603 record_biv (v, insn, dest_reg, inc_val, mult_val,
4604 not_every_iteration, maybe_multiple)
4605 struct induction *v;
4606 rtx insn;
4607 rtx dest_reg;
4608 rtx inc_val;
4609 rtx mult_val;
4610 int not_every_iteration;
4611 int maybe_multiple;
4612 {
4613 struct iv_class *bl;
4614
4615 v->insn = insn;
4616 v->src_reg = dest_reg;
4617 v->dest_reg = dest_reg;
4618 v->mult_val = mult_val;
4619 v->add_val = inc_val;
4620 v->mode = GET_MODE (dest_reg);
4621 v->always_computable = ! not_every_iteration;
4622 v->always_executed = ! not_every_iteration;
4623 v->maybe_multiple = maybe_multiple;
4624
4625 /* Add this to the reg's iv_class, creating a class
4626 if this is the first incrementation of the reg. */
4627
4628 bl = reg_biv_class[REGNO (dest_reg)];
4629 if (bl == 0)
4630 {
4631 /* Create and initialize new iv_class. */
4632
4633 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4634
4635 bl->regno = REGNO (dest_reg);
4636 bl->biv = 0;
4637 bl->giv = 0;
4638 bl->biv_count = 0;
4639 bl->giv_count = 0;
4640
4641 /* Set initial value to the reg itself. */
4642 bl->initial_value = dest_reg;
4643 /* We haven't seen the initializing insn yet */
4644 bl->init_insn = 0;
4645 bl->init_set = 0;
4646 bl->initial_test = 0;
4647 bl->incremented = 0;
4648 bl->eliminable = 0;
4649 bl->nonneg = 0;
4650 bl->reversed = 0;
4651 bl->total_benefit = 0;
4652
4653 /* Add this class to loop_iv_list. */
4654 bl->next = loop_iv_list;
4655 loop_iv_list = bl;
4656
4657 /* Put it in the array of biv register classes. */
4658 reg_biv_class[REGNO (dest_reg)] = bl;
4659 }
4660
4661 /* Update IV_CLASS entry for this biv. */
4662 v->next_iv = bl->biv;
4663 bl->biv = v;
4664 bl->biv_count++;
4665 if (mult_val == const1_rtx)
4666 bl->incremented = 1;
4667
4668 if (loop_dump_stream)
4669 {
4670 fprintf (loop_dump_stream,
4671 "Insn %d: possible biv, reg %d,",
4672 INSN_UID (insn), REGNO (dest_reg));
4673 if (GET_CODE (inc_val) == CONST_INT)
4674 {
4675 fprintf (loop_dump_stream, " const =");
4676 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4677 fputc ('\n', loop_dump_stream);
4678 }
4679 else
4680 {
4681 fprintf (loop_dump_stream, " const = ");
4682 print_rtl (loop_dump_stream, inc_val);
4683 fprintf (loop_dump_stream, "\n");
4684 }
4685 }
4686 }
4687 \f
4688 /* Fill in the data about one giv.
4689 V is the `struct induction' in which we record the giv. (It is
4690 allocated by the caller, with alloca.)
4691 INSN is the insn that sets it.
4692 BENEFIT estimates the savings from deleting this insn.
4693 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4694 into a register or is used as a memory address.
4695
4696 SRC_REG is the biv reg which the giv is computed from.
4697 DEST_REG is the giv's reg (if the giv is stored in a reg).
4698 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4699 LOCATION points to the place where this giv's value appears in INSN. */
4700
4701 static void
4702 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4703 type, not_every_iteration, location, loop_start, loop_end)
4704 struct induction *v;
4705 rtx insn;
4706 rtx src_reg;
4707 rtx dest_reg;
4708 rtx mult_val, add_val;
4709 int benefit;
4710 enum g_types type;
4711 int not_every_iteration;
4712 rtx *location;
4713 rtx loop_start, loop_end;
4714 {
4715 struct induction *b;
4716 struct iv_class *bl;
4717 rtx set = single_set (insn);
4718
4719 v->insn = insn;
4720 v->src_reg = src_reg;
4721 v->giv_type = type;
4722 v->dest_reg = dest_reg;
4723 v->mult_val = mult_val;
4724 v->add_val = add_val;
4725 v->benefit = benefit;
4726 v->location = location;
4727 v->cant_derive = 0;
4728 v->combined_with = 0;
4729 v->maybe_multiple = 0;
4730 v->maybe_dead = 0;
4731 v->derive_adjustment = 0;
4732 v->same = 0;
4733 v->ignore = 0;
4734 v->new_reg = 0;
4735 v->final_value = 0;
4736 v->same_insn = 0;
4737 v->auto_inc_opt = 0;
4738 v->unrolled = 0;
4739 v->shared = 0;
4740
4741 /* The v->always_computable field is used in update_giv_derive, to
4742 determine whether a giv can be used to derive another giv. For a
4743 DEST_REG giv, INSN computes a new value for the giv, so its value
4744 isn't computable if INSN insn't executed every iteration.
4745 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4746 it does not compute a new value. Hence the value is always computable
4747 regardless of whether INSN is executed each iteration. */
4748
4749 if (type == DEST_ADDR)
4750 v->always_computable = 1;
4751 else
4752 v->always_computable = ! not_every_iteration;
4753
4754 v->always_executed = ! not_every_iteration;
4755
4756 if (type == DEST_ADDR)
4757 {
4758 v->mode = GET_MODE (*location);
4759 v->lifetime = 1;
4760 v->times_used = 1;
4761 }
4762 else /* type == DEST_REG */
4763 {
4764 v->mode = GET_MODE (SET_DEST (set));
4765
4766 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4767 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4768
4769 v->times_used = n_times_used[REGNO (dest_reg)];
4770
4771 /* If the lifetime is zero, it means that this register is
4772 really a dead store. So mark this as a giv that can be
4773 ignored. This will not prevent the biv from being eliminated. */
4774 if (v->lifetime == 0)
4775 v->ignore = 1;
4776
4777 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4778 reg_iv_info[REGNO (dest_reg)] = v;
4779 }
4780
4781 /* Add the giv to the class of givs computed from one biv. */
4782
4783 bl = reg_biv_class[REGNO (src_reg)];
4784 if (bl)
4785 {
4786 v->next_iv = bl->giv;
4787 bl->giv = v;
4788 /* Don't count DEST_ADDR. This is supposed to count the number of
4789 insns that calculate givs. */
4790 if (type == DEST_REG)
4791 bl->giv_count++;
4792 bl->total_benefit += benefit;
4793 }
4794 else
4795 /* Fatal error, biv missing for this giv? */
4796 abort ();
4797
4798 if (type == DEST_ADDR)
4799 v->replaceable = 1;
4800 else
4801 {
4802 /* The giv can be replaced outright by the reduced register only if all
4803 of the following conditions are true:
4804 - the insn that sets the giv is always executed on any iteration
4805 on which the giv is used at all
4806 (there are two ways to deduce this:
4807 either the insn is executed on every iteration,
4808 or all uses follow that insn in the same basic block),
4809 - the giv is not used outside the loop
4810 - no assignments to the biv occur during the giv's lifetime. */
4811
4812 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4813 /* Previous line always fails if INSN was moved by loop opt. */
4814 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4815 && (! not_every_iteration
4816 || last_use_this_basic_block (dest_reg, insn)))
4817 {
4818 /* Now check that there are no assignments to the biv within the
4819 giv's lifetime. This requires two separate checks. */
4820
4821 /* Check each biv update, and fail if any are between the first
4822 and last use of the giv.
4823
4824 If this loop contains an inner loop that was unrolled, then
4825 the insn modifying the biv may have been emitted by the loop
4826 unrolling code, and hence does not have a valid luid. Just
4827 mark the biv as not replaceable in this case. It is not very
4828 useful as a biv, because it is used in two different loops.
4829 It is very unlikely that we would be able to optimize the giv
4830 using this biv anyways. */
4831
4832 v->replaceable = 1;
4833 for (b = bl->biv; b; b = b->next_iv)
4834 {
4835 if (INSN_UID (b->insn) >= max_uid_for_loop
4836 || ((uid_luid[INSN_UID (b->insn)]
4837 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4838 && (uid_luid[INSN_UID (b->insn)]
4839 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4840 {
4841 v->replaceable = 0;
4842 v->not_replaceable = 1;
4843 break;
4844 }
4845 }
4846
4847 /* If there are any backwards branches that go from after the
4848 biv update to before it, then this giv is not replaceable. */
4849 if (v->replaceable)
4850 for (b = bl->biv; b; b = b->next_iv)
4851 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4852 {
4853 v->replaceable = 0;
4854 v->not_replaceable = 1;
4855 break;
4856 }
4857 }
4858 else
4859 {
4860 /* May still be replaceable, we don't have enough info here to
4861 decide. */
4862 v->replaceable = 0;
4863 v->not_replaceable = 0;
4864 }
4865 }
4866
4867 /* Record whether the add_val contains a const_int, for later use by
4868 combine_givs. */
4869 {
4870 rtx tem = add_val;
4871
4872 v->no_const_addval = 1;
4873 if (tem == const0_rtx)
4874 ;
4875 else if (GET_CODE (tem) == CONST_INT)
4876 v->no_const_addval = 0;
4877 else if (GET_CODE (tem) == PLUS)
4878 {
4879 while (1)
4880 {
4881 if (GET_CODE (XEXP (tem, 0)) == PLUS)
4882 tem = XEXP (tem, 0);
4883 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
4884 tem = XEXP (tem, 1);
4885 else
4886 break;
4887 }
4888 if (GET_CODE (XEXP (tem, 1)) == CONST_INT)
4889 v->no_const_addval = 0;
4890 }
4891 }
4892
4893 if (loop_dump_stream)
4894 {
4895 if (type == DEST_REG)
4896 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4897 INSN_UID (insn), REGNO (dest_reg));
4898 else
4899 fprintf (loop_dump_stream, "Insn %d: dest address",
4900 INSN_UID (insn));
4901
4902 fprintf (loop_dump_stream, " src reg %d benefit %d",
4903 REGNO (src_reg), v->benefit);
4904 fprintf (loop_dump_stream, " used %d lifetime %d",
4905 v->times_used, v->lifetime);
4906
4907 if (v->replaceable)
4908 fprintf (loop_dump_stream, " replaceable");
4909
4910 if (v->no_const_addval)
4911 fprintf (loop_dump_stream, " ncav");
4912
4913 if (GET_CODE (mult_val) == CONST_INT)
4914 {
4915 fprintf (loop_dump_stream, " mult ");
4916 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
4917 }
4918 else
4919 {
4920 fprintf (loop_dump_stream, " mult ");
4921 print_rtl (loop_dump_stream, mult_val);
4922 }
4923
4924 if (GET_CODE (add_val) == CONST_INT)
4925 {
4926 fprintf (loop_dump_stream, " add ");
4927 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
4928 }
4929 else
4930 {
4931 fprintf (loop_dump_stream, " add ");
4932 print_rtl (loop_dump_stream, add_val);
4933 }
4934 }
4935
4936 if (loop_dump_stream)
4937 fprintf (loop_dump_stream, "\n");
4938
4939 }
4940
4941
4942 /* All this does is determine whether a giv can be made replaceable because
4943 its final value can be calculated. This code can not be part of record_giv
4944 above, because final_giv_value requires that the number of loop iterations
4945 be known, and that can not be accurately calculated until after all givs
4946 have been identified. */
4947
4948 static void
4949 check_final_value (v, loop_start, loop_end)
4950 struct induction *v;
4951 rtx loop_start, loop_end;
4952 {
4953 struct iv_class *bl;
4954 rtx final_value = 0;
4955
4956 bl = reg_biv_class[REGNO (v->src_reg)];
4957
4958 /* DEST_ADDR givs will never reach here, because they are always marked
4959 replaceable above in record_giv. */
4960
4961 /* The giv can be replaced outright by the reduced register only if all
4962 of the following conditions are true:
4963 - the insn that sets the giv is always executed on any iteration
4964 on which the giv is used at all
4965 (there are two ways to deduce this:
4966 either the insn is executed on every iteration,
4967 or all uses follow that insn in the same basic block),
4968 - its final value can be calculated (this condition is different
4969 than the one above in record_giv)
4970 - no assignments to the biv occur during the giv's lifetime. */
4971
4972 #if 0
4973 /* This is only called now when replaceable is known to be false. */
4974 /* Clear replaceable, so that it won't confuse final_giv_value. */
4975 v->replaceable = 0;
4976 #endif
4977
4978 if ((final_value = final_giv_value (v, loop_start, loop_end))
4979 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4980 {
4981 int biv_increment_seen = 0;
4982 rtx p = v->insn;
4983 rtx last_giv_use;
4984
4985 v->replaceable = 1;
4986
4987 /* When trying to determine whether or not a biv increment occurs
4988 during the lifetime of the giv, we can ignore uses of the variable
4989 outside the loop because final_value is true. Hence we can not
4990 use regno_last_uid and regno_first_uid as above in record_giv. */
4991
4992 /* Search the loop to determine whether any assignments to the
4993 biv occur during the giv's lifetime. Start with the insn
4994 that sets the giv, and search around the loop until we come
4995 back to that insn again.
4996
4997 Also fail if there is a jump within the giv's lifetime that jumps
4998 to somewhere outside the lifetime but still within the loop. This
4999 catches spaghetti code where the execution order is not linear, and
5000 hence the above test fails. Here we assume that the giv lifetime
5001 does not extend from one iteration of the loop to the next, so as
5002 to make the test easier. Since the lifetime isn't known yet,
5003 this requires two loops. See also record_giv above. */
5004
5005 last_giv_use = v->insn;
5006
5007 while (1)
5008 {
5009 p = NEXT_INSN (p);
5010 if (p == loop_end)
5011 p = NEXT_INSN (loop_start);
5012 if (p == v->insn)
5013 break;
5014
5015 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5016 || GET_CODE (p) == CALL_INSN)
5017 {
5018 if (biv_increment_seen)
5019 {
5020 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5021 {
5022 v->replaceable = 0;
5023 v->not_replaceable = 1;
5024 break;
5025 }
5026 }
5027 else if (reg_set_p (v->src_reg, PATTERN (p)))
5028 biv_increment_seen = 1;
5029 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5030 last_giv_use = p;
5031 }
5032 }
5033
5034 /* Now that the lifetime of the giv is known, check for branches
5035 from within the lifetime to outside the lifetime if it is still
5036 replaceable. */
5037
5038 if (v->replaceable)
5039 {
5040 p = v->insn;
5041 while (1)
5042 {
5043 p = NEXT_INSN (p);
5044 if (p == loop_end)
5045 p = NEXT_INSN (loop_start);
5046 if (p == last_giv_use)
5047 break;
5048
5049 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5050 && LABEL_NAME (JUMP_LABEL (p))
5051 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
5052 || (INSN_UID (v->insn) >= max_uid_for_loop)
5053 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
5054 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
5055 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
5056 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
5057 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
5058 {
5059 v->replaceable = 0;
5060 v->not_replaceable = 1;
5061
5062 if (loop_dump_stream)
5063 fprintf (loop_dump_stream,
5064 "Found branch outside giv lifetime.\n");
5065
5066 break;
5067 }
5068 }
5069 }
5070
5071 /* If it is replaceable, then save the final value. */
5072 if (v->replaceable)
5073 v->final_value = final_value;
5074 }
5075
5076 if (loop_dump_stream && v->replaceable)
5077 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5078 INSN_UID (v->insn), REGNO (v->dest_reg));
5079 }
5080 \f
5081 /* Update the status of whether a giv can derive other givs.
5082
5083 We need to do something special if there is or may be an update to the biv
5084 between the time the giv is defined and the time it is used to derive
5085 another giv.
5086
5087 In addition, a giv that is only conditionally set is not allowed to
5088 derive another giv once a label has been passed.
5089
5090 The cases we look at are when a label or an update to a biv is passed. */
5091
5092 static void
5093 update_giv_derive (p)
5094 rtx p;
5095 {
5096 struct iv_class *bl;
5097 struct induction *biv, *giv;
5098 rtx tem;
5099 int dummy;
5100
5101 /* Search all IV classes, then all bivs, and finally all givs.
5102
5103 There are three cases we are concerned with. First we have the situation
5104 of a giv that is only updated conditionally. In that case, it may not
5105 derive any givs after a label is passed.
5106
5107 The second case is when a biv update occurs, or may occur, after the
5108 definition of a giv. For certain biv updates (see below) that are
5109 known to occur between the giv definition and use, we can adjust the
5110 giv definition. For others, or when the biv update is conditional,
5111 we must prevent the giv from deriving any other givs. There are two
5112 sub-cases within this case.
5113
5114 If this is a label, we are concerned with any biv update that is done
5115 conditionally, since it may be done after the giv is defined followed by
5116 a branch here (actually, we need to pass both a jump and a label, but
5117 this extra tracking doesn't seem worth it).
5118
5119 If this is a jump, we are concerned about any biv update that may be
5120 executed multiple times. We are actually only concerned about
5121 backward jumps, but it is probably not worth performing the test
5122 on the jump again here.
5123
5124 If this is a biv update, we must adjust the giv status to show that a
5125 subsequent biv update was performed. If this adjustment cannot be done,
5126 the giv cannot derive further givs. */
5127
5128 for (bl = loop_iv_list; bl; bl = bl->next)
5129 for (biv = bl->biv; biv; biv = biv->next_iv)
5130 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5131 || biv->insn == p)
5132 {
5133 for (giv = bl->giv; giv; giv = giv->next_iv)
5134 {
5135 /* If cant_derive is already true, there is no point in
5136 checking all of these conditions again. */
5137 if (giv->cant_derive)
5138 continue;
5139
5140 /* If this giv is conditionally set and we have passed a label,
5141 it cannot derive anything. */
5142 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5143 giv->cant_derive = 1;
5144
5145 /* Skip givs that have mult_val == 0, since
5146 they are really invariants. Also skip those that are
5147 replaceable, since we know their lifetime doesn't contain
5148 any biv update. */
5149 else if (giv->mult_val == const0_rtx || giv->replaceable)
5150 continue;
5151
5152 /* The only way we can allow this giv to derive another
5153 is if this is a biv increment and we can form the product
5154 of biv->add_val and giv->mult_val. In this case, we will
5155 be able to compute a compensation. */
5156 else if (biv->insn == p)
5157 {
5158 tem = 0;
5159
5160 if (biv->mult_val == const1_rtx)
5161 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5162 biv->add_val,
5163 giv->mult_val),
5164 &dummy);
5165
5166 if (tem && giv->derive_adjustment)
5167 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5168 giv->derive_adjustment),
5169 &dummy);
5170 if (tem)
5171 giv->derive_adjustment = tem;
5172 else
5173 giv->cant_derive = 1;
5174 }
5175 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5176 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5177 giv->cant_derive = 1;
5178 }
5179 }
5180 }
5181 \f
5182 /* Check whether an insn is an increment legitimate for a basic induction var.
5183 X is the source of insn P, or a part of it.
5184 MODE is the mode in which X should be interpreted.
5185
5186 DEST_REG is the putative biv, also the destination of the insn.
5187 We accept patterns of these forms:
5188 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5189 REG = INVARIANT + REG
5190
5191 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5192 and store the additive term into *INC_VAL.
5193
5194 If X is an assignment of an invariant into DEST_REG, we set
5195 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5196
5197 We also want to detect a BIV when it corresponds to a variable
5198 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5199 of the variable may be a PLUS that adds a SUBREG of that variable to
5200 an invariant and then sign- or zero-extends the result of the PLUS
5201 into the variable.
5202
5203 Most GIVs in such cases will be in the promoted mode, since that is the
5204 probably the natural computation mode (and almost certainly the mode
5205 used for addresses) on the machine. So we view the pseudo-reg containing
5206 the variable as the BIV, as if it were simply incremented.
5207
5208 Note that treating the entire pseudo as a BIV will result in making
5209 simple increments to any GIVs based on it. However, if the variable
5210 overflows in its declared mode but not its promoted mode, the result will
5211 be incorrect. This is acceptable if the variable is signed, since
5212 overflows in such cases are undefined, but not if it is unsigned, since
5213 those overflows are defined. So we only check for SIGN_EXTEND and
5214 not ZERO_EXTEND.
5215
5216 If we cannot find a biv, we return 0. */
5217
5218 static int
5219 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5220 register rtx x;
5221 enum machine_mode mode;
5222 rtx p;
5223 rtx dest_reg;
5224 rtx *inc_val;
5225 rtx *mult_val;
5226 {
5227 register enum rtx_code code;
5228 rtx arg;
5229 rtx insn, set = 0;
5230
5231 code = GET_CODE (x);
5232 switch (code)
5233 {
5234 case PLUS:
5235 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5236 || (GET_CODE (XEXP (x, 0)) == SUBREG
5237 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5238 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5239 arg = XEXP (x, 1);
5240 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5241 || (GET_CODE (XEXP (x, 1)) == SUBREG
5242 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5243 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5244 arg = XEXP (x, 0);
5245 else
5246 return 0;
5247
5248 if (invariant_p (arg) != 1)
5249 return 0;
5250
5251 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5252 *mult_val = const1_rtx;
5253 return 1;
5254
5255 case SUBREG:
5256 /* If this is a SUBREG for a promoted variable, check the inner
5257 value. */
5258 if (SUBREG_PROMOTED_VAR_P (x))
5259 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5260 dest_reg, p, inc_val, mult_val);
5261 return 0;
5262
5263 case REG:
5264 /* If this register is assigned in a previous insn, look at its
5265 source, but don't go outside the loop or past a label. */
5266
5267 insn = p;
5268 while (1)
5269 {
5270 do {
5271 insn = PREV_INSN (insn);
5272 } while (insn && GET_CODE (insn) == NOTE
5273 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5274
5275 if (!insn)
5276 break;
5277 set = single_set (insn);
5278 if (set == 0)
5279 break;
5280
5281 if ((SET_DEST (set) == x
5282 || (GET_CODE (SET_DEST (set)) == SUBREG
5283 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5284 <= UNITS_PER_WORD)
5285 && SUBREG_REG (SET_DEST (set)) == x))
5286 && basic_induction_var (SET_SRC (set),
5287 (GET_MODE (SET_SRC (set)) == VOIDmode
5288 ? GET_MODE (x)
5289 : GET_MODE (SET_SRC (set))),
5290 dest_reg, insn,
5291 inc_val, mult_val))
5292 return 1;
5293 }
5294 /* ... fall through ... */
5295
5296 /* Can accept constant setting of biv only when inside inner most loop.
5297 Otherwise, a biv of an inner loop may be incorrectly recognized
5298 as a biv of the outer loop,
5299 causing code to be moved INTO the inner loop. */
5300 case MEM:
5301 if (invariant_p (x) != 1)
5302 return 0;
5303 case CONST_INT:
5304 case SYMBOL_REF:
5305 case CONST:
5306 /* convert_modes aborts if we try to convert to or from CCmode, so just
5307 exclude that case. It is very unlikely that a condition code value
5308 would be a useful iterator anyways. */
5309 if (loops_enclosed == 1
5310 && GET_MODE_CLASS (mode) != MODE_CC
5311 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5312 {
5313 /* Possible bug here? Perhaps we don't know the mode of X. */
5314 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5315 *mult_val = const0_rtx;
5316 return 1;
5317 }
5318 else
5319 return 0;
5320
5321 case SIGN_EXTEND:
5322 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5323 dest_reg, p, inc_val, mult_val);
5324
5325 case ASHIFTRT:
5326 /* Similar, since this can be a sign extension. */
5327 for (insn = PREV_INSN (p);
5328 (insn && GET_CODE (insn) == NOTE
5329 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5330 insn = PREV_INSN (insn))
5331 ;
5332
5333 if (insn)
5334 set = single_set (insn);
5335
5336 if (set && SET_DEST (set) == XEXP (x, 0)
5337 && GET_CODE (XEXP (x, 1)) == CONST_INT
5338 && INTVAL (XEXP (x, 1)) >= 0
5339 && GET_CODE (SET_SRC (set)) == ASHIFT
5340 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5341 return basic_induction_var (XEXP (SET_SRC (set), 0),
5342 GET_MODE (XEXP (x, 0)),
5343 dest_reg, insn, inc_val, mult_val);
5344 return 0;
5345
5346 default:
5347 return 0;
5348 }
5349 }
5350 \f
5351 /* A general induction variable (giv) is any quantity that is a linear
5352 function of a basic induction variable,
5353 i.e. giv = biv * mult_val + add_val.
5354 The coefficients can be any loop invariant quantity.
5355 A giv need not be computed directly from the biv;
5356 it can be computed by way of other givs. */
5357
5358 /* Determine whether X computes a giv.
5359 If it does, return a nonzero value
5360 which is the benefit from eliminating the computation of X;
5361 set *SRC_REG to the register of the biv that it is computed from;
5362 set *ADD_VAL and *MULT_VAL to the coefficients,
5363 such that the value of X is biv * mult + add; */
5364
5365 static int
5366 general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
5367 rtx x;
5368 rtx *src_reg;
5369 rtx *add_val;
5370 rtx *mult_val;
5371 int is_addr;
5372 int *pbenefit;
5373 {
5374 rtx orig_x = x;
5375 char *storage;
5376
5377 /* If this is an invariant, forget it, it isn't a giv. */
5378 if (invariant_p (x) == 1)
5379 return 0;
5380
5381 /* See if the expression could be a giv and get its form.
5382 Mark our place on the obstack in case we don't find a giv. */
5383 storage = (char *) oballoc (0);
5384 *pbenefit = 0;
5385 x = simplify_giv_expr (x, pbenefit);
5386 if (x == 0)
5387 {
5388 obfree (storage);
5389 return 0;
5390 }
5391
5392 switch (GET_CODE (x))
5393 {
5394 case USE:
5395 case CONST_INT:
5396 /* Since this is now an invariant and wasn't before, it must be a giv
5397 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5398 with. */
5399 *src_reg = loop_iv_list->biv->dest_reg;
5400 *mult_val = const0_rtx;
5401 *add_val = x;
5402 break;
5403
5404 case REG:
5405 /* This is equivalent to a BIV. */
5406 *src_reg = x;
5407 *mult_val = const1_rtx;
5408 *add_val = const0_rtx;
5409 break;
5410
5411 case PLUS:
5412 /* Either (plus (biv) (invar)) or
5413 (plus (mult (biv) (invar_1)) (invar_2)). */
5414 if (GET_CODE (XEXP (x, 0)) == MULT)
5415 {
5416 *src_reg = XEXP (XEXP (x, 0), 0);
5417 *mult_val = XEXP (XEXP (x, 0), 1);
5418 }
5419 else
5420 {
5421 *src_reg = XEXP (x, 0);
5422 *mult_val = const1_rtx;
5423 }
5424 *add_val = XEXP (x, 1);
5425 break;
5426
5427 case MULT:
5428 /* ADD_VAL is zero. */
5429 *src_reg = XEXP (x, 0);
5430 *mult_val = XEXP (x, 1);
5431 *add_val = const0_rtx;
5432 break;
5433
5434 default:
5435 abort ();
5436 }
5437
5438 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5439 unless they are CONST_INT). */
5440 if (GET_CODE (*add_val) == USE)
5441 *add_val = XEXP (*add_val, 0);
5442 if (GET_CODE (*mult_val) == USE)
5443 *mult_val = XEXP (*mult_val, 0);
5444
5445 if (is_addr)
5446 {
5447 #ifdef ADDRESS_COST
5448 *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost;
5449 #else
5450 *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost;
5451 #endif
5452 }
5453 else
5454 *pbenefit += rtx_cost (orig_x, SET);
5455
5456 /* Always return true if this is a giv so it will be detected as such,
5457 even if the benefit is zero or negative. This allows elimination
5458 of bivs that might otherwise not be eliminated. */
5459 return 1;
5460 }
5461 \f
5462 /* Given an expression, X, try to form it as a linear function of a biv.
5463 We will canonicalize it to be of the form
5464 (plus (mult (BIV) (invar_1))
5465 (invar_2))
5466 with possible degeneracies.
5467
5468 The invariant expressions must each be of a form that can be used as a
5469 machine operand. We surround then with a USE rtx (a hack, but localized
5470 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5471 routine; it is the caller's responsibility to strip them.
5472
5473 If no such canonicalization is possible (i.e., two biv's are used or an
5474 expression that is neither invariant nor a biv or giv), this routine
5475 returns 0.
5476
5477 For a non-zero return, the result will have a code of CONST_INT, USE,
5478 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5479
5480 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5481
5482 static rtx sge_plus PROTO ((enum machine_mode, rtx, rtx));
5483 static rtx sge_plus_constant PROTO ((rtx, rtx));
5484
5485 static rtx
5486 simplify_giv_expr (x, benefit)
5487 rtx x;
5488 int *benefit;
5489 {
5490 enum machine_mode mode = GET_MODE (x);
5491 rtx arg0, arg1;
5492 rtx tem;
5493
5494 /* If this is not an integer mode, or if we cannot do arithmetic in this
5495 mode, this can't be a giv. */
5496 if (mode != VOIDmode
5497 && (GET_MODE_CLASS (mode) != MODE_INT
5498 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5499 return NULL_RTX;
5500
5501 switch (GET_CODE (x))
5502 {
5503 case PLUS:
5504 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5505 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5506 if (arg0 == 0 || arg1 == 0)
5507 return NULL_RTX;
5508
5509 /* Put constant last, CONST_INT last if both constant. */
5510 if ((GET_CODE (arg0) == USE
5511 || GET_CODE (arg0) == CONST_INT)
5512 && ! ((GET_CODE (arg0) == USE
5513 && GET_CODE (arg1) == USE)
5514 || GET_CODE (arg1) == CONST_INT))
5515 tem = arg0, arg0 = arg1, arg1 = tem;
5516
5517 /* Handle addition of zero, then addition of an invariant. */
5518 if (arg1 == const0_rtx)
5519 return arg0;
5520 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5521 switch (GET_CODE (arg0))
5522 {
5523 case CONST_INT:
5524 case USE:
5525 /* Adding two invariants must result in an invariant, so enclose
5526 addition operation inside a USE and return it. */
5527 if (GET_CODE (arg0) == USE)
5528 arg0 = XEXP (arg0, 0);
5529 if (GET_CODE (arg1) == USE)
5530 arg1 = XEXP (arg1, 0);
5531
5532 if (GET_CODE (arg0) == CONST_INT)
5533 tem = arg0, arg0 = arg1, arg1 = tem;
5534 if (GET_CODE (arg1) == CONST_INT)
5535 tem = sge_plus_constant (arg0, arg1);
5536 else
5537 tem = sge_plus (mode, arg0, arg1);
5538
5539 if (GET_CODE (tem) != CONST_INT)
5540 tem = gen_rtx_USE (mode, tem);
5541 return tem;
5542
5543 case REG:
5544 case MULT:
5545 /* biv + invar or mult + invar. Return sum. */
5546 return gen_rtx_PLUS (mode, arg0, arg1);
5547
5548 case PLUS:
5549 /* (a + invar_1) + invar_2. Associate. */
5550 return simplify_giv_expr (
5551 gen_rtx_PLUS (mode, XEXP (arg0, 0),
5552 gen_rtx_PLUS (mode, XEXP (arg0, 1), arg1)),
5553 benefit);
5554
5555 default:
5556 abort ();
5557 }
5558
5559 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5560 MULT to reduce cases. */
5561 if (GET_CODE (arg0) == REG)
5562 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5563 if (GET_CODE (arg1) == REG)
5564 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5565
5566 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5567 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5568 Recurse to associate the second PLUS. */
5569 if (GET_CODE (arg1) == MULT)
5570 tem = arg0, arg0 = arg1, arg1 = tem;
5571
5572 if (GET_CODE (arg1) == PLUS)
5573 return simplify_giv_expr (gen_rtx_PLUS (mode,
5574 gen_rtx_PLUS (mode, arg0,
5575 XEXP (arg1, 0)),
5576 XEXP (arg1, 1)),
5577 benefit);
5578
5579 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5580 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5581 return NULL_RTX;
5582
5583 if (!rtx_equal_p (arg0, arg1))
5584 return NULL_RTX;
5585
5586 return simplify_giv_expr (gen_rtx_MULT (mode,
5587 XEXP (arg0, 0),
5588 gen_rtx_PLUS (mode,
5589 XEXP (arg0, 1),
5590 XEXP (arg1, 1))),
5591 benefit);
5592
5593 case MINUS:
5594 /* Handle "a - b" as "a + b * (-1)". */
5595 return simplify_giv_expr (gen_rtx_PLUS (mode,
5596 XEXP (x, 0),
5597 gen_rtx_MULT (mode, XEXP (x, 1),
5598 constm1_rtx)),
5599 benefit);
5600
5601 case MULT:
5602 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5603 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5604 if (arg0 == 0 || arg1 == 0)
5605 return NULL_RTX;
5606
5607 /* Put constant last, CONST_INT last if both constant. */
5608 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5609 && GET_CODE (arg1) != CONST_INT)
5610 tem = arg0, arg0 = arg1, arg1 = tem;
5611
5612 /* If second argument is not now constant, not giv. */
5613 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5614 return NULL_RTX;
5615
5616 /* Handle multiply by 0 or 1. */
5617 if (arg1 == const0_rtx)
5618 return const0_rtx;
5619
5620 else if (arg1 == const1_rtx)
5621 return arg0;
5622
5623 switch (GET_CODE (arg0))
5624 {
5625 case REG:
5626 /* biv * invar. Done. */
5627 return gen_rtx_MULT (mode, arg0, arg1);
5628
5629 case CONST_INT:
5630 /* Product of two constants. */
5631 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5632
5633 case USE:
5634 /* invar * invar. It is a giv, but very few of these will
5635 actually pay off, so limit to simple registers. */
5636 if (GET_CODE (arg1) != CONST_INT)
5637 return NULL_RTX;
5638
5639 arg0 = XEXP (arg0, 0);
5640 if (GET_CODE (arg0) == REG)
5641 tem = gen_rtx_MULT (mode, arg0, arg1);
5642 else if (GET_CODE (arg0) == MULT
5643 && GET_CODE (XEXP (arg0, 0)) == REG
5644 && GET_CODE (XEXP (arg0, 1)) == CONST_INT)
5645 {
5646 tem = gen_rtx_MULT (mode, XEXP (arg0, 0),
5647 GEN_INT (INTVAL (XEXP (arg0, 1))
5648 * INTVAL (arg1)));
5649 }
5650 else
5651 return NULL_RTX;
5652 return gen_rtx_USE (mode, tem);
5653
5654 case MULT:
5655 /* (a * invar_1) * invar_2. Associate. */
5656 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5657 gen_rtx_MULT (mode,
5658 XEXP (arg0, 1),
5659 arg1)),
5660 benefit);
5661
5662 case PLUS:
5663 /* (a + invar_1) * invar_2. Distribute. */
5664 return simplify_giv_expr (gen_rtx_PLUS (mode,
5665 gen_rtx_MULT (mode,
5666 XEXP (arg0, 0),
5667 arg1),
5668 gen_rtx_MULT (mode,
5669 XEXP (arg0, 1),
5670 arg1)),
5671 benefit);
5672
5673 default:
5674 abort ();
5675 }
5676
5677 case ASHIFT:
5678 /* Shift by constant is multiply by power of two. */
5679 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5680 return 0;
5681
5682 return simplify_giv_expr (gen_rtx_MULT (mode,
5683 XEXP (x, 0),
5684 GEN_INT ((HOST_WIDE_INT) 1
5685 << INTVAL (XEXP (x, 1)))),
5686 benefit);
5687
5688 case NEG:
5689 /* "-a" is "a * (-1)" */
5690 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5691 benefit);
5692
5693 case NOT:
5694 /* "~a" is "-a - 1". Silly, but easy. */
5695 return simplify_giv_expr (gen_rtx_MINUS (mode,
5696 gen_rtx_NEG (mode, XEXP (x, 0)),
5697 const1_rtx),
5698 benefit);
5699
5700 case USE:
5701 /* Already in proper form for invariant. */
5702 return x;
5703
5704 case REG:
5705 /* If this is a new register, we can't deal with it. */
5706 if (REGNO (x) >= max_reg_before_loop)
5707 return 0;
5708
5709 /* Check for biv or giv. */
5710 switch (reg_iv_type[REGNO (x)])
5711 {
5712 case BASIC_INDUCT:
5713 return x;
5714 case GENERAL_INDUCT:
5715 {
5716 struct induction *v = reg_iv_info[REGNO (x)];
5717
5718 /* Form expression from giv and add benefit. Ensure this giv
5719 can derive another and subtract any needed adjustment if so. */
5720 *benefit += v->benefit;
5721 if (v->cant_derive)
5722 return 0;
5723
5724 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5725 v->mult_val),
5726 v->add_val);
5727 if (v->derive_adjustment)
5728 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5729 return simplify_giv_expr (tem, benefit);
5730 }
5731
5732 default:
5733 /* If it isn't an induction variable, and it is invariant, we
5734 may be able to simplify things further by looking through
5735 the bits we just moved outside the loop. */
5736 if (invariant_p (x) == 1)
5737 {
5738 struct movable *m;
5739
5740 for (m = the_movables; m ; m = m->next)
5741 if (rtx_equal_p (x, m->set_dest))
5742 {
5743 /* Ok, we found a match. Substitute and simplify. */
5744
5745 /* If we match another movable, we must use that, as
5746 this one is going away. */
5747 if (m->match)
5748 return simplify_giv_expr (m->match->set_dest, benefit);
5749
5750 /* If consec is non-zero, this is a member of a group of
5751 instructions that were moved together. We handle this
5752 case only to the point of seeking to the last insn and
5753 looking for a REG_EQUAL. Fail if we don't find one. */
5754 if (m->consec != 0)
5755 {
5756 int i = m->consec;
5757 tem = m->insn;
5758 do { tem = NEXT_INSN (tem); } while (--i > 0);
5759
5760 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
5761 if (tem)
5762 tem = XEXP (tem, 0);
5763 }
5764 else
5765 {
5766 tem = single_set (m->insn);
5767 if (tem)
5768 tem = SET_SRC (tem);
5769 }
5770
5771 if (tem)
5772 {
5773 /* What we are most interested in is pointer
5774 arithmetic on invariants -- only take
5775 patterns we may be able to do something with. */
5776 if (GET_CODE (tem) == PLUS
5777 || GET_CODE (tem) == MULT
5778 || GET_CODE (tem) == ASHIFT
5779 || GET_CODE (tem) == CONST_INT
5780 || GET_CODE (tem) == SYMBOL_REF)
5781 {
5782 tem = simplify_giv_expr (tem, benefit);
5783 if (tem)
5784 return tem;
5785 }
5786 else if (GET_CODE (tem) == CONST
5787 && GET_CODE (XEXP (tem, 0)) == PLUS
5788 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
5789 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
5790 {
5791 tem = simplify_giv_expr (XEXP (tem, 0), benefit);
5792 if (tem)
5793 return tem;
5794 }
5795 }
5796 break;
5797 }
5798 }
5799 break;
5800 }
5801
5802 /* Fall through to general case. */
5803 default:
5804 /* If invariant, return as USE (unless CONST_INT).
5805 Otherwise, not giv. */
5806 if (GET_CODE (x) == USE)
5807 x = XEXP (x, 0);
5808
5809 if (invariant_p (x) == 1)
5810 {
5811 if (GET_CODE (x) == CONST_INT)
5812 return x;
5813 if (GET_CODE (x) == CONST
5814 && GET_CODE (XEXP (x, 0)) == PLUS
5815 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5816 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
5817 x = XEXP (x, 0);
5818 return gen_rtx_USE (mode, x);
5819 }
5820 else
5821 return 0;
5822 }
5823 }
5824
5825 /* This routine folds invariants such that there is only ever one
5826 CONST_INT in the summation. It is only used by simplify_giv_expr. */
5827
5828 static rtx
5829 sge_plus_constant (x, c)
5830 rtx x, c;
5831 {
5832 if (GET_CODE (x) == CONST_INT)
5833 return GEN_INT (INTVAL (x) + INTVAL (c));
5834 else if (GET_CODE (x) != PLUS)
5835 return gen_rtx_PLUS (GET_MODE (x), x, c);
5836 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5837 {
5838 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
5839 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
5840 }
5841 else if (GET_CODE (XEXP (x, 0)) == PLUS
5842 || GET_CODE (XEXP (x, 1)) != PLUS)
5843 {
5844 return gen_rtx_PLUS (GET_MODE (x),
5845 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
5846 }
5847 else
5848 {
5849 return gen_rtx_PLUS (GET_MODE (x),
5850 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
5851 }
5852 }
5853
5854 static rtx
5855 sge_plus (mode, x, y)
5856 enum machine_mode mode;
5857 rtx x, y;
5858 {
5859 while (GET_CODE (y) == PLUS)
5860 {
5861 rtx a = XEXP (y, 0);
5862 if (GET_CODE (a) == CONST_INT)
5863 x = sge_plus_constant (x, a);
5864 else
5865 x = gen_rtx_PLUS (mode, x, a);
5866 y = XEXP (y, 1);
5867 }
5868 if (GET_CODE (y) == CONST_INT)
5869 x = sge_plus_constant (x, y);
5870 else
5871 x = gen_rtx_PLUS (mode, x, y);
5872 return x;
5873 }
5874 \f
5875 /* Help detect a giv that is calculated by several consecutive insns;
5876 for example,
5877 giv = biv * M
5878 giv = giv + A
5879 The caller has already identified the first insn P as having a giv as dest;
5880 we check that all other insns that set the same register follow
5881 immediately after P, that they alter nothing else,
5882 and that the result of the last is still a giv.
5883
5884 The value is 0 if the reg set in P is not really a giv.
5885 Otherwise, the value is the amount gained by eliminating
5886 all the consecutive insns that compute the value.
5887
5888 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5889 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5890
5891 The coefficients of the ultimate giv value are stored in
5892 *MULT_VAL and *ADD_VAL. */
5893
5894 static int
5895 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5896 add_val, mult_val)
5897 int first_benefit;
5898 rtx p;
5899 rtx src_reg;
5900 rtx dest_reg;
5901 rtx *add_val;
5902 rtx *mult_val;
5903 {
5904 int count;
5905 enum rtx_code code;
5906 int benefit;
5907 rtx temp;
5908 rtx set;
5909
5910 /* Indicate that this is a giv so that we can update the value produced in
5911 each insn of the multi-insn sequence.
5912
5913 This induction structure will be used only by the call to
5914 general_induction_var below, so we can allocate it on our stack.
5915 If this is a giv, our caller will replace the induct var entry with
5916 a new induction structure. */
5917 struct induction *v
5918 = (struct induction *) alloca (sizeof (struct induction));
5919 v->src_reg = src_reg;
5920 v->mult_val = *mult_val;
5921 v->add_val = *add_val;
5922 v->benefit = first_benefit;
5923 v->cant_derive = 0;
5924 v->derive_adjustment = 0;
5925
5926 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5927 reg_iv_info[REGNO (dest_reg)] = v;
5928
5929 count = n_times_set[REGNO (dest_reg)] - 1;
5930
5931 while (count > 0)
5932 {
5933 p = NEXT_INSN (p);
5934 code = GET_CODE (p);
5935
5936 /* If libcall, skip to end of call sequence. */
5937 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5938 p = XEXP (temp, 0);
5939
5940 if (code == INSN
5941 && (set = single_set (p))
5942 && GET_CODE (SET_DEST (set)) == REG
5943 && SET_DEST (set) == dest_reg
5944 && (general_induction_var (SET_SRC (set), &src_reg,
5945 add_val, mult_val, 0, &benefit)
5946 /* Giv created by equivalent expression. */
5947 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5948 && general_induction_var (XEXP (temp, 0), &src_reg,
5949 add_val, mult_val, 0, &benefit)))
5950 && src_reg == v->src_reg)
5951 {
5952 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5953 benefit += libcall_benefit (p);
5954
5955 count--;
5956 v->mult_val = *mult_val;
5957 v->add_val = *add_val;
5958 v->benefit = benefit;
5959 }
5960 else if (code != NOTE)
5961 {
5962 /* Allow insns that set something other than this giv to a
5963 constant. Such insns are needed on machines which cannot
5964 include long constants and should not disqualify a giv. */
5965 if (code == INSN
5966 && (set = single_set (p))
5967 && SET_DEST (set) != dest_reg
5968 && CONSTANT_P (SET_SRC (set)))
5969 continue;
5970
5971 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5972 return 0;
5973 }
5974 }
5975
5976 return v->benefit;
5977 }
5978 \f
5979 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5980 represented by G1. If no such expression can be found, or it is clear that
5981 it cannot possibly be a valid address, 0 is returned.
5982
5983 To perform the computation, we note that
5984 G1 = x * v + a and
5985 G2 = y * v + b
5986 where `v' is the biv.
5987
5988 So G2 = (y/b) * G1 + (b - a*y/x).
5989
5990 Note that MULT = y/x.
5991
5992 Update: A and B are now allowed to be additive expressions such that
5993 B contains all variables in A. That is, computing B-A will not require
5994 subtracting variables. */
5995
5996 static rtx
5997 express_from_1 (a, b, mult)
5998 rtx a, b, mult;
5999 {
6000 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6001
6002 if (mult == const0_rtx)
6003 return b;
6004
6005 /* If MULT is not 1, we cannot handle A with non-constants, since we
6006 would then be required to subtract multiples of the registers in A.
6007 This is theoretically possible, and may even apply to some Fortran
6008 constructs, but it is a lot of work and we do not attempt it here. */
6009
6010 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6011 return NULL_RTX;
6012
6013 /* In general these structures are sorted top to bottom (down the PLUS
6014 chain), but not left to right across the PLUS. If B is a higher
6015 order giv than A, we can strip one level and recurse. If A is higher
6016 order, we'll eventually bail out, but won't know that until the end.
6017 If they are the same, we'll strip one level around this loop. */
6018
6019 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6020 {
6021 rtx ra, rb, oa, ob, tmp;
6022
6023 ra = XEXP (a, 0), oa = XEXP (a, 1);
6024 if (GET_CODE (ra) == PLUS)
6025 tmp = ra, ra = oa, oa = tmp;
6026
6027 rb = XEXP (b, 0), ob = XEXP (b, 1);
6028 if (GET_CODE (rb) == PLUS)
6029 tmp = rb, rb = ob, ob = tmp;
6030
6031 if (rtx_equal_p (ra, rb))
6032 /* We matched: remove one reg completely. */
6033 a = oa, b = ob;
6034 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6035 /* An alternate match. */
6036 a = oa, b = rb;
6037 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6038 /* An alternate match. */
6039 a = ra, b = ob;
6040 else
6041 {
6042 /* Indicates an extra register in B. Strip one level from B and
6043 recurse, hoping B was the higher order expression. */
6044 ob = express_from_1 (a, ob, mult);
6045 if (ob == NULL_RTX)
6046 return NULL_RTX;
6047 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6048 }
6049 }
6050
6051 /* Here we are at the last level of A, go through the cases hoping to
6052 get rid of everything but a constant. */
6053
6054 if (GET_CODE (a) == PLUS)
6055 {
6056 rtx ra, oa, tmp;
6057
6058 ra = XEXP (a, 0), oa = XEXP (a, 1);
6059 if (rtx_equal_p (oa, b))
6060 oa = ra;
6061 else if (!rtx_equal_p (ra, b))
6062 return NULL_RTX;
6063
6064 if (GET_CODE (oa) != CONST_INT)
6065 return NULL_RTX;
6066
6067 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6068 }
6069 else if (GET_CODE (a) == CONST_INT)
6070 {
6071 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6072 }
6073 else if (GET_CODE (b) == PLUS)
6074 {
6075 if (rtx_equal_p (a, XEXP (b, 0)))
6076 return XEXP (b, 1);
6077 else if (rtx_equal_p (a, XEXP (b, 1)))
6078 return XEXP (b, 0);
6079 else
6080 return NULL_RTX;
6081 }
6082 else if (rtx_equal_p (a, b))
6083 return const0_rtx;
6084
6085 return NULL_RTX;
6086 }
6087
6088 static rtx
6089 express_from (g1, g2)
6090 struct induction *g1, *g2;
6091 {
6092 rtx mult, add;
6093
6094 /* The value that G1 will be multiplied by must be a constant integer. Also,
6095 the only chance we have of getting a valid address is if b*c/a (see above
6096 for notation) is also an integer. */
6097 if (GET_CODE (g1->mult_val) == CONST_INT
6098 && GET_CODE (g2->mult_val) == CONST_INT)
6099 {
6100 if (g1->mult_val == const0_rtx
6101 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6102 return NULL_RTX;
6103 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6104 }
6105 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6106 mult = const1_rtx;
6107 else
6108 {
6109 /* ??? Find out if the one is a multiple of the other? */
6110 return NULL_RTX;
6111 }
6112
6113 add = express_from_1 (g1->add_val, g2->add_val, mult);
6114 if (add == NULL_RTX)
6115 return NULL_RTX;
6116
6117 /* Form simplified final result. */
6118 if (mult == const0_rtx)
6119 return add;
6120 else if (mult == const1_rtx)
6121 mult = g1->dest_reg;
6122 else
6123 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6124
6125 if (add == const0_rtx)
6126 return mult;
6127 else
6128 return gen_rtx_PLUS (g2->mode, mult, add);
6129 }
6130 \f
6131 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
6132 (either directly or via an address expression) a register used to represent
6133 G1. Set g2->new_reg to a represtation of G1 (normally just
6134 g1->dest_reg). */
6135
6136 static rtx
6137 combine_givs_p (g1, g2)
6138 struct induction *g1, *g2;
6139 {
6140 rtx tem = express_from (g1, g2);
6141
6142 /* If these givs are identical, they can be combined. We use the results
6143 of express_from because the addends are not in a canonical form, so
6144 rtx_equal_p is a weaker test. */
6145 if (tem == const0_rtx)
6146 {
6147 return g1->dest_reg;
6148 }
6149
6150 /* If G2 can be expressed as a function of G1 and that function is valid
6151 as an address and no more expensive than using a register for G2,
6152 the expression of G2 in terms of G1 can be used. */
6153 if (tem != NULL_RTX
6154 && g2->giv_type == DEST_ADDR
6155 && memory_address_p (g2->mem_mode, tem)
6156 /* ??? Looses, especially with -fforce-addr, where *g2->location
6157 will always be a register, and so anything more complicated
6158 gets discarded. */
6159 #if 0
6160 #ifdef ADDRESS_COST
6161 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6162 #else
6163 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6164 #endif
6165 #endif
6166 )
6167 {
6168 return tem;
6169 }
6170
6171 return NULL_RTX;
6172 }
6173 \f
6174 struct combine_givs_stats
6175 {
6176 int giv_number;
6177 int total_benefit;
6178 };
6179
6180 static int
6181 cmp_combine_givs_stats (x, y)
6182 struct combine_givs_stats *x, *y;
6183 {
6184 int d;
6185 d = y->total_benefit - x->total_benefit;
6186 /* Stabilize the sort. */
6187 if (!d)
6188 d = x->giv_number - y->giv_number;
6189 return d;
6190 }
6191
6192 /* If one of these givs is a DEST_REG that was only used once, by the
6193 other giv, this is actually a single use. Return 0 if this is not
6194 the case, -1 if g1 is the DEST_REG involved, and 1 if it was g2. */
6195
6196 static int
6197 combine_givs_used_once (g1, g2)
6198 struct induction *g1, *g2;
6199 {
6200 if (g1->giv_type == DEST_REG
6201 && n_times_used[REGNO (g1->dest_reg)] == 1
6202 && reg_mentioned_p (g1->dest_reg, PATTERN (g2->insn)))
6203 return -1;
6204
6205 if (g2->giv_type == DEST_REG
6206 && n_times_used[REGNO (g2->dest_reg)] == 1
6207 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
6208 return 1;
6209
6210 return 0;
6211 }
6212
6213 static int
6214 combine_givs_benefit_from (g1, g2)
6215 struct induction *g1, *g2;
6216 {
6217 int tmp = combine_givs_used_once (g1, g2);
6218 if (tmp < 0)
6219 return 0;
6220 else if (tmp > 0)
6221 return g2->benefit - g1->benefit;
6222 else
6223 return g2->benefit;
6224 }
6225
6226 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6227 any other. If so, point SAME to the giv combined with and set NEW_REG to
6228 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6229 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6230
6231 static void
6232 combine_givs (bl)
6233 struct iv_class *bl;
6234 {
6235 struct induction *g1, *g2, **giv_array;
6236 int i, j, k, giv_count;
6237 struct combine_givs_stats *stats;
6238 rtx *can_combine;
6239
6240 /* Count givs, because bl->giv_count is incorrect here. */
6241 giv_count = 0;
6242 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6243 if (!g1->ignore)
6244 giv_count++;
6245
6246 giv_array
6247 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6248 i = 0;
6249 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6250 if (!g1->ignore)
6251 giv_array[i++] = g1;
6252
6253 stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats));
6254 bzero (stats, giv_count * sizeof (*stats));
6255
6256 can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx));
6257 bzero (can_combine, giv_count * giv_count * sizeof(rtx));
6258
6259 for (i = 0; i < giv_count; i++)
6260 {
6261 int this_benefit;
6262
6263 g1 = giv_array[i];
6264
6265 this_benefit = g1->benefit;
6266 /* Add an additional weight for zero addends. */
6267 if (g1->no_const_addval)
6268 this_benefit += 1;
6269 for (j = 0; j < giv_count; j++)
6270 {
6271 rtx this_combine;
6272
6273 g2 = giv_array[j];
6274 if (g1 != g2
6275 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6276 {
6277 can_combine[i*giv_count + j] = this_combine;
6278 this_benefit += combine_givs_benefit_from (g1, g2);
6279 /* Add an additional weight for being reused more times. */
6280 this_benefit += 3;
6281 }
6282 }
6283 stats[i].giv_number = i;
6284 stats[i].total_benefit = this_benefit;
6285 }
6286
6287 /* Iterate, combining until we can't. */
6288 restart:
6289 qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats);
6290
6291 if (loop_dump_stream)
6292 {
6293 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6294 for (k = 0; k < giv_count; k++)
6295 {
6296 g1 = giv_array[stats[k].giv_number];
6297 if (!g1->combined_with && !g1->same)
6298 fprintf (loop_dump_stream, " {%d, %d}",
6299 INSN_UID (giv_array[stats[k].giv_number]->insn),
6300 stats[k].total_benefit);
6301 }
6302 putc ('\n', loop_dump_stream);
6303 }
6304
6305 for (k = 0; k < giv_count; k++)
6306 {
6307 int g1_add_benefit = 0;
6308
6309 i = stats[k].giv_number;
6310 g1 = giv_array[i];
6311
6312 /* If it has already been combined, skip. */
6313 if (g1->combined_with || g1->same)
6314 continue;
6315
6316 for (j = 0; j < giv_count; j++)
6317 {
6318 g2 = giv_array[j];
6319 if (g1 != g2 && can_combine[i*giv_count + j]
6320 /* If it has already been combined, skip. */
6321 && ! g2->same && ! g2->combined_with)
6322 {
6323 int l;
6324
6325 g2->new_reg = can_combine[i*giv_count + j];
6326 g2->same = g1;
6327 g1->combined_with = 1;
6328 if (!combine_givs_used_once (g1, g2))
6329 g1->times_used += 1;
6330 g1->lifetime += g2->lifetime;
6331
6332 g1_add_benefit += combine_givs_benefit_from (g1, g2);
6333
6334 /* ??? The new final_[bg]iv_value code does a much better job
6335 of finding replaceable giv's, and hence this code may no
6336 longer be necessary. */
6337 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6338 g1_add_benefit -= copy_cost;
6339
6340 /* To help optimize the next set of combinations, remove
6341 this giv from the benefits of other potential mates. */
6342 for (l = 0; l < giv_count; ++l)
6343 {
6344 int m = stats[l].giv_number;
6345 if (can_combine[m*giv_count + j])
6346 {
6347 /* Remove additional weight for being reused. */
6348 stats[l].total_benefit -= 3 +
6349 combine_givs_benefit_from (giv_array[m], g2);
6350 }
6351 }
6352
6353 if (loop_dump_stream)
6354 fprintf (loop_dump_stream,
6355 "giv at %d combined with giv at %d\n",
6356 INSN_UID (g2->insn), INSN_UID (g1->insn));
6357 }
6358 }
6359
6360 /* To help optimize the next set of combinations, remove
6361 this giv from the benefits of other potential mates. */
6362 if (g1->combined_with)
6363 {
6364 for (j = 0; j < giv_count; ++j)
6365 {
6366 int m = stats[j].giv_number;
6367 if (can_combine[m*giv_count + j])
6368 {
6369 /* Remove additional weight for being reused. */
6370 stats[j].total_benefit -= 3 +
6371 combine_givs_benefit_from (giv_array[m], g1);
6372 }
6373 }
6374
6375 g1->benefit += g1_add_benefit;
6376
6377 /* We've finished with this giv, and everything it touched.
6378 Restart the combination so that proper weights for the
6379 rest of the givs are properly taken into account. */
6380 /* ??? Ideally we would compact the arrays at this point, so
6381 as to not cover old ground. But sanely compacting
6382 can_combine is tricky. */
6383 goto restart;
6384 }
6385 }
6386 }
6387 \f
6388 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
6389
6390 void
6391 emit_iv_add_mult (b, m, a, reg, insert_before)
6392 rtx b; /* initial value of basic induction variable */
6393 rtx m; /* multiplicative constant */
6394 rtx a; /* additive constant */
6395 rtx reg; /* destination register */
6396 rtx insert_before;
6397 {
6398 rtx seq;
6399 rtx result;
6400
6401 /* Prevent unexpected sharing of these rtx. */
6402 a = copy_rtx (a);
6403 b = copy_rtx (b);
6404
6405 /* Increase the lifetime of any invariants moved further in code. */
6406 update_reg_last_use (a, insert_before);
6407 update_reg_last_use (b, insert_before);
6408 update_reg_last_use (m, insert_before);
6409
6410 start_sequence ();
6411 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
6412 if (reg != result)
6413 emit_move_insn (reg, result);
6414 seq = gen_sequence ();
6415 end_sequence ();
6416
6417 emit_insn_before (seq, insert_before);
6418
6419 /* It is entirely possible that the expansion created lots of new
6420 registers. Iterate over the sequence we just created and
6421 record them all. */
6422
6423 if (GET_CODE (seq) == SEQUENCE)
6424 {
6425 int i;
6426 for (i = 0; i < XVECLEN (seq, 0); ++i)
6427 {
6428 rtx set = single_set (XVECEXP (seq, 0, i));
6429 if (set && GET_CODE (SET_DEST (set)) == REG)
6430 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6431 }
6432 }
6433 else if (GET_CODE (seq) == SET
6434 && GET_CODE (SET_DEST (seq)) == REG)
6435 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
6436 }
6437 \f
6438 /* Test whether A * B can be computed without
6439 an actual multiply insn. Value is 1 if so. */
6440
6441 static int
6442 product_cheap_p (a, b)
6443 rtx a;
6444 rtx b;
6445 {
6446 int i;
6447 rtx tmp;
6448 struct obstack *old_rtl_obstack = rtl_obstack;
6449 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
6450 int win = 1;
6451
6452 /* If only one is constant, make it B. */
6453 if (GET_CODE (a) == CONST_INT)
6454 tmp = a, a = b, b = tmp;
6455
6456 /* If first constant, both constant, so don't need multiply. */
6457 if (GET_CODE (a) == CONST_INT)
6458 return 1;
6459
6460 /* If second not constant, neither is constant, so would need multiply. */
6461 if (GET_CODE (b) != CONST_INT)
6462 return 0;
6463
6464 /* One operand is constant, so might not need multiply insn. Generate the
6465 code for the multiply and see if a call or multiply, or long sequence
6466 of insns is generated. */
6467
6468 rtl_obstack = &temp_obstack;
6469 start_sequence ();
6470 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
6471 tmp = gen_sequence ();
6472 end_sequence ();
6473
6474 if (GET_CODE (tmp) == SEQUENCE)
6475 {
6476 if (XVEC (tmp, 0) == 0)
6477 win = 1;
6478 else if (XVECLEN (tmp, 0) > 3)
6479 win = 0;
6480 else
6481 for (i = 0; i < XVECLEN (tmp, 0); i++)
6482 {
6483 rtx insn = XVECEXP (tmp, 0, i);
6484
6485 if (GET_CODE (insn) != INSN
6486 || (GET_CODE (PATTERN (insn)) == SET
6487 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6488 || (GET_CODE (PATTERN (insn)) == PARALLEL
6489 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6490 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6491 {
6492 win = 0;
6493 break;
6494 }
6495 }
6496 }
6497 else if (GET_CODE (tmp) == SET
6498 && GET_CODE (SET_SRC (tmp)) == MULT)
6499 win = 0;
6500 else if (GET_CODE (tmp) == PARALLEL
6501 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6502 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6503 win = 0;
6504
6505 /* Free any storage we obtained in generating this multiply and restore rtl
6506 allocation to its normal obstack. */
6507 obstack_free (&temp_obstack, storage);
6508 rtl_obstack = old_rtl_obstack;
6509
6510 return win;
6511 }
6512 \f
6513 /* Check to see if loop can be terminated by a "decrement and branch until
6514 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6515 Also try reversing an increment loop to a decrement loop
6516 to see if the optimization can be performed.
6517 Value is nonzero if optimization was performed. */
6518
6519 /* This is useful even if the architecture doesn't have such an insn,
6520 because it might change a loops which increments from 0 to n to a loop
6521 which decrements from n to 0. A loop that decrements to zero is usually
6522 faster than one that increments from zero. */
6523
6524 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6525 such as approx_final_value, biv_total_increment, loop_iterations, and
6526 final_[bg]iv_value. */
6527
6528 static int
6529 check_dbra_loop (loop_end, insn_count, loop_start)
6530 rtx loop_end;
6531 int insn_count;
6532 rtx loop_start;
6533 {
6534 struct iv_class *bl;
6535 rtx reg;
6536 rtx jump_label;
6537 rtx final_value;
6538 rtx start_value;
6539 rtx new_add_val;
6540 rtx comparison;
6541 rtx before_comparison;
6542 rtx p;
6543 rtx jump;
6544 rtx first_compare;
6545 int compare_and_branch;
6546
6547 /* If last insn is a conditional branch, and the insn before tests a
6548 register value, try to optimize it. Otherwise, we can't do anything. */
6549
6550 jump = PREV_INSN (loop_end);
6551 comparison = get_condition_for_loop (jump);
6552 if (comparison == 0)
6553 return 0;
6554
6555 /* Try to compute whether the compare/branch at the loop end is one or
6556 two instructions. */
6557 get_condition (jump, &first_compare);
6558 if (first_compare == jump)
6559 compare_and_branch = 1;
6560 else if (first_compare == prev_nonnote_insn (jump))
6561 compare_and_branch = 2;
6562 else
6563 return 0;
6564
6565 /* Check all of the bivs to see if the compare uses one of them.
6566 Skip biv's set more than once because we can't guarantee that
6567 it will be zero on the last iteration. Also skip if the biv is
6568 used between its update and the test insn. */
6569
6570 for (bl = loop_iv_list; bl; bl = bl->next)
6571 {
6572 if (bl->biv_count == 1
6573 && bl->biv->dest_reg == XEXP (comparison, 0)
6574 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6575 first_compare))
6576 break;
6577 }
6578
6579 if (! bl)
6580 return 0;
6581
6582 /* Look for the case where the basic induction variable is always
6583 nonnegative, and equals zero on the last iteration.
6584 In this case, add a reg_note REG_NONNEG, which allows the
6585 m68k DBRA instruction to be used. */
6586
6587 if (((GET_CODE (comparison) == GT
6588 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6589 && INTVAL (XEXP (comparison, 1)) == -1)
6590 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6591 && GET_CODE (bl->biv->add_val) == CONST_INT
6592 && INTVAL (bl->biv->add_val) < 0)
6593 {
6594 /* Initial value must be greater than 0,
6595 init_val % -dec_value == 0 to ensure that it equals zero on
6596 the last iteration */
6597
6598 if (GET_CODE (bl->initial_value) == CONST_INT
6599 && INTVAL (bl->initial_value) > 0
6600 && (INTVAL (bl->initial_value)
6601 % (-INTVAL (bl->biv->add_val))) == 0)
6602 {
6603 /* register always nonnegative, add REG_NOTE to branch */
6604 REG_NOTES (PREV_INSN (loop_end))
6605 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6606 REG_NOTES (PREV_INSN (loop_end)));
6607 bl->nonneg = 1;
6608
6609 return 1;
6610 }
6611
6612 /* If the decrement is 1 and the value was tested as >= 0 before
6613 the loop, then we can safely optimize. */
6614 for (p = loop_start; p; p = PREV_INSN (p))
6615 {
6616 if (GET_CODE (p) == CODE_LABEL)
6617 break;
6618 if (GET_CODE (p) != JUMP_INSN)
6619 continue;
6620
6621 before_comparison = get_condition_for_loop (p);
6622 if (before_comparison
6623 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6624 && GET_CODE (before_comparison) == LT
6625 && XEXP (before_comparison, 1) == const0_rtx
6626 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6627 && INTVAL (bl->biv->add_val) == -1)
6628 {
6629 REG_NOTES (PREV_INSN (loop_end))
6630 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6631 REG_NOTES (PREV_INSN (loop_end)));
6632 bl->nonneg = 1;
6633
6634 return 1;
6635 }
6636 }
6637 }
6638 else if (num_mem_sets <= 1)
6639 {
6640 /* Try to change inc to dec, so can apply above optimization. */
6641 /* Can do this if:
6642 all registers modified are induction variables or invariant,
6643 all memory references have non-overlapping addresses
6644 (obviously true if only one write)
6645 allow 2 insns for the compare/jump at the end of the loop. */
6646 /* Also, we must avoid any instructions which use both the reversed
6647 biv and another biv. Such instructions will fail if the loop is
6648 reversed. We meet this condition by requiring that either
6649 no_use_except_counting is true, or else that there is only
6650 one biv. */
6651 int num_nonfixed_reads = 0;
6652 /* 1 if the iteration var is used only to count iterations. */
6653 int no_use_except_counting = 0;
6654 /* 1 if the loop has no memory store, or it has a single memory store
6655 which is reversible. */
6656 int reversible_mem_store = 1;
6657
6658 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6659 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6660 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6661
6662 if (bl->giv_count == 0
6663 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6664 {
6665 rtx bivreg = regno_reg_rtx[bl->regno];
6666
6667 /* If there are no givs for this biv, and the only exit is the
6668 fall through at the end of the loop, then
6669 see if perhaps there are no uses except to count. */
6670 no_use_except_counting = 1;
6671 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6672 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6673 {
6674 rtx set = single_set (p);
6675
6676 if (set && GET_CODE (SET_DEST (set)) == REG
6677 && REGNO (SET_DEST (set)) == bl->regno)
6678 /* An insn that sets the biv is okay. */
6679 ;
6680 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6681 || p == prev_nonnote_insn (loop_end))
6682 /* Don't bother about the end test. */
6683 ;
6684 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6685 /* Any other use of the biv is no good. */
6686 {
6687 no_use_except_counting = 0;
6688 break;
6689 }
6690 }
6691 }
6692
6693 /* If the loop has a single store, and the destination address is
6694 invariant, then we can't reverse the loop, because this address
6695 might then have the wrong value at loop exit.
6696 This would work if the source was invariant also, however, in that
6697 case, the insn should have been moved out of the loop. */
6698
6699 if (num_mem_sets == 1)
6700 reversible_mem_store
6701 = (! unknown_address_altered
6702 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6703
6704 /* This code only acts for innermost loops. Also it simplifies
6705 the memory address check by only reversing loops with
6706 zero or one memory access.
6707 Two memory accesses could involve parts of the same array,
6708 and that can't be reversed. */
6709
6710 if (num_nonfixed_reads <= 1
6711 && !loop_has_call
6712 && !loop_has_volatile
6713 && reversible_mem_store
6714 && (no_use_except_counting
6715 || ((bl->giv_count + bl->biv_count + num_mem_sets
6716 + num_movables + compare_and_branch == insn_count)
6717 && (bl == loop_iv_list && bl->next == 0))))
6718 {
6719 rtx tem;
6720
6721 /* Loop can be reversed. */
6722 if (loop_dump_stream)
6723 fprintf (loop_dump_stream, "Can reverse loop\n");
6724
6725 /* Now check other conditions:
6726
6727 The increment must be a constant, as must the initial value,
6728 and the comparison code must be LT.
6729
6730 This test can probably be improved since +/- 1 in the constant
6731 can be obtained by changing LT to LE and vice versa; this is
6732 confusing. */
6733
6734 if (comparison
6735 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6736 /* LE gets turned into LT */
6737 && GET_CODE (comparison) == LT
6738 && GET_CODE (bl->initial_value) == CONST_INT)
6739 {
6740 HOST_WIDE_INT add_val, comparison_val;
6741 rtx initial_value;
6742
6743 add_val = INTVAL (bl->biv->add_val);
6744 comparison_val = INTVAL (XEXP (comparison, 1));
6745 final_value = XEXP (comparison, 1);
6746 initial_value = bl->initial_value;
6747
6748 /* Normalize the initial value if it is an integer and
6749 has no other use except as a counter. This will allow
6750 a few more loops to be reversed. */
6751 if (no_use_except_counting
6752 && GET_CODE (initial_value) == CONST_INT)
6753 {
6754 comparison_val = comparison_val - INTVAL (bl->initial_value);
6755 /* Check for overflow. If comparison_val ends up as a
6756 negative value, then we can't reverse the loop. */
6757 if (comparison_val >= 0)
6758 initial_value = const0_rtx;
6759 }
6760
6761 /* If the initial value is not zero, or if the comparison
6762 value is not an exact multiple of the increment, then we
6763 can not reverse this loop. */
6764 if (initial_value != const0_rtx
6765 || (comparison_val % add_val) != 0)
6766 return 0;
6767
6768 /* Reset these in case we normalized the initial value
6769 and comparison value above. */
6770 bl->initial_value = initial_value;
6771 XEXP (comparison, 1) = GEN_INT (comparison_val);
6772
6773 /* Register will always be nonnegative, with value
6774 0 on last iteration if loop reversed */
6775
6776 /* Save some info needed to produce the new insns. */
6777 reg = bl->biv->dest_reg;
6778 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6779 if (jump_label == pc_rtx)
6780 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6781 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6782
6783 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6784 - INTVAL (bl->biv->add_val));
6785
6786 /* Initialize biv to start_value before loop start.
6787 The old initializing insn will be deleted as a
6788 dead store by flow.c. */
6789 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6790
6791 /* Add insn to decrement register, and delete insn
6792 that incremented the register. */
6793 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6794 bl->biv->insn);
6795 delete_insn (bl->biv->insn);
6796
6797 /* Update biv info to reflect its new status. */
6798 bl->biv->insn = p;
6799 bl->initial_value = start_value;
6800 bl->biv->add_val = new_add_val;
6801
6802 /* Inc LABEL_NUSES so that delete_insn will
6803 not delete the label. */
6804 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6805
6806 /* Emit an insn after the end of the loop to set the biv's
6807 proper exit value if it is used anywhere outside the loop. */
6808 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
6809 || ! bl->init_insn
6810 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6811 emit_insn_after (gen_move_insn (reg, final_value),
6812 loop_end);
6813
6814 /* Delete compare/branch at end of loop. */
6815 delete_insn (PREV_INSN (loop_end));
6816 if (compare_and_branch == 2)
6817 delete_insn (first_compare);
6818
6819 /* Add new compare/branch insn at end of loop. */
6820 start_sequence ();
6821 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6822 GET_MODE (reg), 0, 0);
6823 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6824 tem = gen_sequence ();
6825 end_sequence ();
6826 emit_jump_insn_before (tem, loop_end);
6827
6828 for (tem = PREV_INSN (loop_end);
6829 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6830 ;
6831 if (tem)
6832 {
6833 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6834
6835 /* Increment of LABEL_NUSES done above. */
6836 /* Register is now always nonnegative,
6837 so add REG_NONNEG note to the branch. */
6838 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6839 REG_NOTES (tem));
6840 }
6841
6842 bl->nonneg = 1;
6843
6844 /* Mark that this biv has been reversed. Each giv which depends
6845 on this biv, and which is also live past the end of the loop
6846 will have to be fixed up. */
6847
6848 bl->reversed = 1;
6849
6850 if (loop_dump_stream)
6851 fprintf (loop_dump_stream,
6852 "Reversed loop and added reg_nonneg\n");
6853
6854 return 1;
6855 }
6856 }
6857 }
6858
6859 return 0;
6860 }
6861 \f
6862 /* Verify whether the biv BL appears to be eliminable,
6863 based on the insns in the loop that refer to it.
6864 LOOP_START is the first insn of the loop, and END is the end insn.
6865
6866 If ELIMINATE_P is non-zero, actually do the elimination.
6867
6868 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6869 determine whether invariant insns should be placed inside or at the
6870 start of the loop. */
6871
6872 static int
6873 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6874 struct iv_class *bl;
6875 rtx loop_start;
6876 rtx end;
6877 int eliminate_p;
6878 int threshold, insn_count;
6879 {
6880 rtx reg = bl->biv->dest_reg;
6881 rtx p;
6882
6883 /* Scan all insns in the loop, stopping if we find one that uses the
6884 biv in a way that we cannot eliminate. */
6885
6886 for (p = loop_start; p != end; p = NEXT_INSN (p))
6887 {
6888 enum rtx_code code = GET_CODE (p);
6889 rtx where = threshold >= insn_count ? loop_start : p;
6890
6891 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6892 && reg_mentioned_p (reg, PATTERN (p))
6893 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6894 {
6895 if (loop_dump_stream)
6896 fprintf (loop_dump_stream,
6897 "Cannot eliminate biv %d: biv used in insn %d.\n",
6898 bl->regno, INSN_UID (p));
6899 break;
6900 }
6901 }
6902
6903 if (p == end)
6904 {
6905 if (loop_dump_stream)
6906 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6907 bl->regno, eliminate_p ? "was" : "can be");
6908 return 1;
6909 }
6910
6911 return 0;
6912 }
6913 \f
6914 /* If BL appears in X (part of the pattern of INSN), see if we can
6915 eliminate its use. If so, return 1. If not, return 0.
6916
6917 If BIV does not appear in X, return 1.
6918
6919 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6920 where extra insns should be added. Depending on how many items have been
6921 moved out of the loop, it will either be before INSN or at the start of
6922 the loop. */
6923
6924 static int
6925 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6926 rtx x, insn;
6927 struct iv_class *bl;
6928 int eliminate_p;
6929 rtx where;
6930 {
6931 enum rtx_code code = GET_CODE (x);
6932 rtx reg = bl->biv->dest_reg;
6933 enum machine_mode mode = GET_MODE (reg);
6934 struct induction *v;
6935 rtx arg, tem;
6936 #ifdef HAVE_cc0
6937 rtx new;
6938 #endif
6939 int arg_operand;
6940 char *fmt;
6941 int i, j;
6942
6943 switch (code)
6944 {
6945 case REG:
6946 /* If we haven't already been able to do something with this BIV,
6947 we can't eliminate it. */
6948 if (x == reg)
6949 return 0;
6950 return 1;
6951
6952 case SET:
6953 /* If this sets the BIV, it is not a problem. */
6954 if (SET_DEST (x) == reg)
6955 return 1;
6956
6957 /* If this is an insn that defines a giv, it is also ok because
6958 it will go away when the giv is reduced. */
6959 for (v = bl->giv; v; v = v->next_iv)
6960 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6961 return 1;
6962
6963 #ifdef HAVE_cc0
6964 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6965 {
6966 /* Can replace with any giv that was reduced and
6967 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6968 Require a constant for MULT_VAL, so we know it's nonzero.
6969 ??? We disable this optimization to avoid potential
6970 overflows. */
6971
6972 for (v = bl->giv; v; v = v->next_iv)
6973 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6974 && v->add_val == const0_rtx
6975 && ! v->ignore && ! v->maybe_dead && v->always_computable
6976 && v->mode == mode
6977 && 0)
6978 {
6979 /* If the giv V had the auto-inc address optimization applied
6980 to it, and INSN occurs between the giv insn and the biv
6981 insn, then we must adjust the value used here.
6982 This is rare, so we don't bother to do so. */
6983 if (v->auto_inc_opt
6984 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6985 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6986 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6987 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6988 continue;
6989
6990 if (! eliminate_p)
6991 return 1;
6992
6993 /* If the giv has the opposite direction of change,
6994 then reverse the comparison. */
6995 if (INTVAL (v->mult_val) < 0)
6996 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
6997 const0_rtx, v->new_reg);
6998 else
6999 new = v->new_reg;
7000
7001 /* We can probably test that giv's reduced reg. */
7002 if (validate_change (insn, &SET_SRC (x), new, 0))
7003 return 1;
7004 }
7005
7006 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7007 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7008 Require a constant for MULT_VAL, so we know it's nonzero.
7009 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7010 overflow problem. */
7011
7012 for (v = bl->giv; v; v = v->next_iv)
7013 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
7014 && ! v->ignore && ! v->maybe_dead && v->always_computable
7015 && v->mode == mode
7016 && (GET_CODE (v->add_val) == SYMBOL_REF
7017 || GET_CODE (v->add_val) == LABEL_REF
7018 || GET_CODE (v->add_val) == CONST
7019 || (GET_CODE (v->add_val) == REG
7020 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
7021 {
7022 /* If the giv V had the auto-inc address optimization applied
7023 to it, and INSN occurs between the giv insn and the biv
7024 insn, then we must adjust the value used here.
7025 This is rare, so we don't bother to do so. */
7026 if (v->auto_inc_opt
7027 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7028 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7029 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7030 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7031 continue;
7032
7033 if (! eliminate_p)
7034 return 1;
7035
7036 /* If the giv has the opposite direction of change,
7037 then reverse the comparison. */
7038 if (INTVAL (v->mult_val) < 0)
7039 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7040 v->new_reg);
7041 else
7042 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7043 copy_rtx (v->add_val));
7044
7045 /* Replace biv with the giv's reduced register. */
7046 update_reg_last_use (v->add_val, insn);
7047 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7048 return 1;
7049
7050 /* Insn doesn't support that constant or invariant. Copy it
7051 into a register (it will be a loop invariant.) */
7052 tem = gen_reg_rtx (GET_MODE (v->new_reg));
7053
7054 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
7055 where);
7056
7057 /* Substitute the new register for its invariant value in
7058 the compare expression. */
7059 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
7060 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7061 return 1;
7062 }
7063 }
7064 #endif
7065 break;
7066
7067 case COMPARE:
7068 case EQ: case NE:
7069 case GT: case GE: case GTU: case GEU:
7070 case LT: case LE: case LTU: case LEU:
7071 /* See if either argument is the biv. */
7072 if (XEXP (x, 0) == reg)
7073 arg = XEXP (x, 1), arg_operand = 1;
7074 else if (XEXP (x, 1) == reg)
7075 arg = XEXP (x, 0), arg_operand = 0;
7076 else
7077 break;
7078
7079 if (CONSTANT_P (arg))
7080 {
7081 /* First try to replace with any giv that has constant positive
7082 mult_val and constant add_val. We might be able to support
7083 negative mult_val, but it seems complex to do it in general. */
7084
7085 for (v = bl->giv; v; v = v->next_iv)
7086 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7087 && (GET_CODE (v->add_val) == SYMBOL_REF
7088 || GET_CODE (v->add_val) == LABEL_REF
7089 || GET_CODE (v->add_val) == CONST
7090 || (GET_CODE (v->add_val) == REG
7091 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
7092 && ! v->ignore && ! v->maybe_dead && v->always_computable
7093 && v->mode == mode)
7094 {
7095 /* If the giv V had the auto-inc address optimization applied
7096 to it, and INSN occurs between the giv insn and the biv
7097 insn, then we must adjust the value used here.
7098 This is rare, so we don't bother to do so. */
7099 if (v->auto_inc_opt
7100 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7101 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7102 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7103 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7104 continue;
7105
7106 if (! eliminate_p)
7107 return 1;
7108
7109 /* Replace biv with the giv's reduced reg. */
7110 XEXP (x, 1-arg_operand) = v->new_reg;
7111
7112 /* If all constants are actually constant integers and
7113 the derived constant can be directly placed in the COMPARE,
7114 do so. */
7115 if (GET_CODE (arg) == CONST_INT
7116 && GET_CODE (v->mult_val) == CONST_INT
7117 && GET_CODE (v->add_val) == CONST_INT
7118 && validate_change (insn, &XEXP (x, arg_operand),
7119 GEN_INT (INTVAL (arg)
7120 * INTVAL (v->mult_val)
7121 + INTVAL (v->add_val)), 0))
7122 return 1;
7123
7124 /* Otherwise, load it into a register. */
7125 tem = gen_reg_rtx (mode);
7126 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
7127 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
7128 return 1;
7129
7130 /* If that failed, put back the change we made above. */
7131 XEXP (x, 1-arg_operand) = reg;
7132 }
7133
7134 /* Look for giv with positive constant mult_val and nonconst add_val.
7135 Insert insns to calculate new compare value.
7136 ??? Turn this off due to possible overflow. */
7137
7138 for (v = bl->giv; v; v = v->next_iv)
7139 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7140 && ! v->ignore && ! v->maybe_dead && v->always_computable
7141 && v->mode == mode
7142 && 0)
7143 {
7144 rtx tem;
7145
7146 /* If the giv V had the auto-inc address optimization applied
7147 to it, and INSN occurs between the giv insn and the biv
7148 insn, then we must adjust the value used here.
7149 This is rare, so we don't bother to do so. */
7150 if (v->auto_inc_opt
7151 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7152 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7153 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7154 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7155 continue;
7156
7157 if (! eliminate_p)
7158 return 1;
7159
7160 tem = gen_reg_rtx (mode);
7161
7162 /* Replace biv with giv's reduced register. */
7163 validate_change (insn, &XEXP (x, 1 - arg_operand),
7164 v->new_reg, 1);
7165
7166 /* Compute value to compare against. */
7167 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
7168 /* Use it in this insn. */
7169 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
7170 if (apply_change_group ())
7171 return 1;
7172 }
7173 }
7174 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
7175 {
7176 if (invariant_p (arg) == 1)
7177 {
7178 /* Look for giv with constant positive mult_val and nonconst
7179 add_val. Insert insns to compute new compare value.
7180 ??? Turn this off due to possible overflow. */
7181
7182 for (v = bl->giv; v; v = v->next_iv)
7183 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7184 && ! v->ignore && ! v->maybe_dead && v->always_computable
7185 && v->mode == mode
7186 && 0)
7187 {
7188 rtx tem;
7189
7190 /* If the giv V had the auto-inc address optimization applied
7191 to it, and INSN occurs between the giv insn and the biv
7192 insn, then we must adjust the value used here.
7193 This is rare, so we don't bother to do so. */
7194 if (v->auto_inc_opt
7195 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7196 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7197 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7198 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7199 continue;
7200
7201 if (! eliminate_p)
7202 return 1;
7203
7204 tem = gen_reg_rtx (mode);
7205
7206 /* Replace biv with giv's reduced register. */
7207 validate_change (insn, &XEXP (x, 1 - arg_operand),
7208 v->new_reg, 1);
7209
7210 /* Compute value to compare against. */
7211 emit_iv_add_mult (arg, v->mult_val, v->add_val,
7212 tem, where);
7213 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
7214 if (apply_change_group ())
7215 return 1;
7216 }
7217 }
7218
7219 /* This code has problems. Basically, you can't know when
7220 seeing if we will eliminate BL, whether a particular giv
7221 of ARG will be reduced. If it isn't going to be reduced,
7222 we can't eliminate BL. We can try forcing it to be reduced,
7223 but that can generate poor code.
7224
7225 The problem is that the benefit of reducing TV, below should
7226 be increased if BL can actually be eliminated, but this means
7227 we might have to do a topological sort of the order in which
7228 we try to process biv. It doesn't seem worthwhile to do
7229 this sort of thing now. */
7230
7231 #if 0
7232 /* Otherwise the reg compared with had better be a biv. */
7233 if (GET_CODE (arg) != REG
7234 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
7235 return 0;
7236
7237 /* Look for a pair of givs, one for each biv,
7238 with identical coefficients. */
7239 for (v = bl->giv; v; v = v->next_iv)
7240 {
7241 struct induction *tv;
7242
7243 if (v->ignore || v->maybe_dead || v->mode != mode)
7244 continue;
7245
7246 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
7247 if (! tv->ignore && ! tv->maybe_dead
7248 && rtx_equal_p (tv->mult_val, v->mult_val)
7249 && rtx_equal_p (tv->add_val, v->add_val)
7250 && tv->mode == mode)
7251 {
7252 /* If the giv V had the auto-inc address optimization applied
7253 to it, and INSN occurs between the giv insn and the biv
7254 insn, then we must adjust the value used here.
7255 This is rare, so we don't bother to do so. */
7256 if (v->auto_inc_opt
7257 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7258 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7259 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7260 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7261 continue;
7262
7263 if (! eliminate_p)
7264 return 1;
7265
7266 /* Replace biv with its giv's reduced reg. */
7267 XEXP (x, 1-arg_operand) = v->new_reg;
7268 /* Replace other operand with the other giv's
7269 reduced reg. */
7270 XEXP (x, arg_operand) = tv->new_reg;
7271 return 1;
7272 }
7273 }
7274 #endif
7275 }
7276
7277 /* If we get here, the biv can't be eliminated. */
7278 return 0;
7279
7280 case MEM:
7281 /* If this address is a DEST_ADDR giv, it doesn't matter if the
7282 biv is used in it, since it will be replaced. */
7283 for (v = bl->giv; v; v = v->next_iv)
7284 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
7285 return 1;
7286 break;
7287
7288 default:
7289 break;
7290 }
7291
7292 /* See if any subexpression fails elimination. */
7293 fmt = GET_RTX_FORMAT (code);
7294 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7295 {
7296 switch (fmt[i])
7297 {
7298 case 'e':
7299 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
7300 eliminate_p, where))
7301 return 0;
7302 break;
7303
7304 case 'E':
7305 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7306 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
7307 eliminate_p, where))
7308 return 0;
7309 break;
7310 }
7311 }
7312
7313 return 1;
7314 }
7315 \f
7316 /* Return nonzero if the last use of REG
7317 is in an insn following INSN in the same basic block. */
7318
7319 static int
7320 last_use_this_basic_block (reg, insn)
7321 rtx reg;
7322 rtx insn;
7323 {
7324 rtx n;
7325 for (n = insn;
7326 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
7327 n = NEXT_INSN (n))
7328 {
7329 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
7330 return 1;
7331 }
7332 return 0;
7333 }
7334 \f
7335 /* Called via `note_stores' to record the initial value of a biv. Here we
7336 just record the location of the set and process it later. */
7337
7338 static void
7339 record_initial (dest, set)
7340 rtx dest;
7341 rtx set;
7342 {
7343 struct iv_class *bl;
7344
7345 if (GET_CODE (dest) != REG
7346 || REGNO (dest) >= max_reg_before_loop
7347 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
7348 return;
7349
7350 bl = reg_biv_class[REGNO (dest)];
7351
7352 /* If this is the first set found, record it. */
7353 if (bl->init_insn == 0)
7354 {
7355 bl->init_insn = note_insn;
7356 bl->init_set = set;
7357 }
7358 }
7359 \f
7360 /* If any of the registers in X are "old" and currently have a last use earlier
7361 than INSN, update them to have a last use of INSN. Their actual last use
7362 will be the previous insn but it will not have a valid uid_luid so we can't
7363 use it. */
7364
7365 static void
7366 update_reg_last_use (x, insn)
7367 rtx x;
7368 rtx insn;
7369 {
7370 /* Check for the case where INSN does not have a valid luid. In this case,
7371 there is no need to modify the regno_last_uid, as this can only happen
7372 when code is inserted after the loop_end to set a pseudo's final value,
7373 and hence this insn will never be the last use of x. */
7374 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
7375 && INSN_UID (insn) < max_uid_for_loop
7376 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
7377 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
7378 else
7379 {
7380 register int i, j;
7381 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
7382 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7383 {
7384 if (fmt[i] == 'e')
7385 update_reg_last_use (XEXP (x, i), insn);
7386 else if (fmt[i] == 'E')
7387 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7388 update_reg_last_use (XVECEXP (x, i, j), insn);
7389 }
7390 }
7391 }
7392 \f
7393 /* Given a jump insn JUMP, return the condition that will cause it to branch
7394 to its JUMP_LABEL. If the condition cannot be understood, or is an
7395 inequality floating-point comparison which needs to be reversed, 0 will
7396 be returned.
7397
7398 If EARLIEST is non-zero, it is a pointer to a place where the earliest
7399 insn used in locating the condition was found. If a replacement test
7400 of the condition is desired, it should be placed in front of that
7401 insn and we will be sure that the inputs are still valid.
7402
7403 The condition will be returned in a canonical form to simplify testing by
7404 callers. Specifically:
7405
7406 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
7407 (2) Both operands will be machine operands; (cc0) will have been replaced.
7408 (3) If an operand is a constant, it will be the second operand.
7409 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
7410 for GE, GEU, and LEU. */
7411
7412 rtx
7413 get_condition (jump, earliest)
7414 rtx jump;
7415 rtx *earliest;
7416 {
7417 enum rtx_code code;
7418 rtx prev = jump;
7419 rtx set;
7420 rtx tem;
7421 rtx op0, op1;
7422 int reverse_code = 0;
7423 int did_reverse_condition = 0;
7424 enum machine_mode mode;
7425
7426 /* If this is not a standard conditional jump, we can't parse it. */
7427 if (GET_CODE (jump) != JUMP_INSN
7428 || ! condjump_p (jump) || simplejump_p (jump))
7429 return 0;
7430
7431 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7432 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7433 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
7434 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
7435
7436 if (earliest)
7437 *earliest = jump;
7438
7439 /* If this branches to JUMP_LABEL when the condition is false, reverse
7440 the condition. */
7441 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
7442 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
7443 code = reverse_condition (code), did_reverse_condition ^= 1;
7444
7445 /* If we are comparing a register with zero, see if the register is set
7446 in the previous insn to a COMPARE or a comparison operation. Perform
7447 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7448 in cse.c */
7449
7450 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
7451 {
7452 /* Set non-zero when we find something of interest. */
7453 rtx x = 0;
7454
7455 #ifdef HAVE_cc0
7456 /* If comparison with cc0, import actual comparison from compare
7457 insn. */
7458 if (op0 == cc0_rtx)
7459 {
7460 if ((prev = prev_nonnote_insn (prev)) == 0
7461 || GET_CODE (prev) != INSN
7462 || (set = single_set (prev)) == 0
7463 || SET_DEST (set) != cc0_rtx)
7464 return 0;
7465
7466 op0 = SET_SRC (set);
7467 op1 = CONST0_RTX (GET_MODE (op0));
7468 if (earliest)
7469 *earliest = prev;
7470 }
7471 #endif
7472
7473 /* If this is a COMPARE, pick up the two things being compared. */
7474 if (GET_CODE (op0) == COMPARE)
7475 {
7476 op1 = XEXP (op0, 1);
7477 op0 = XEXP (op0, 0);
7478 continue;
7479 }
7480 else if (GET_CODE (op0) != REG)
7481 break;
7482
7483 /* Go back to the previous insn. Stop if it is not an INSN. We also
7484 stop if it isn't a single set or if it has a REG_INC note because
7485 we don't want to bother dealing with it. */
7486
7487 if ((prev = prev_nonnote_insn (prev)) == 0
7488 || GET_CODE (prev) != INSN
7489 || FIND_REG_INC_NOTE (prev, 0)
7490 || (set = single_set (prev)) == 0)
7491 break;
7492
7493 /* If this is setting OP0, get what it sets it to if it looks
7494 relevant. */
7495 if (rtx_equal_p (SET_DEST (set), op0))
7496 {
7497 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7498
7499 /* ??? We may not combine comparisons done in a CCmode with
7500 comparisons not done in a CCmode. This is to aid targets
7501 like Alpha that have an IEEE compliant EQ instruction, and
7502 a non-IEEE compliant BEQ instruction. The use of CCmode is
7503 actually artificial, simply to prevent the combination, but
7504 should not affect other platforms. */
7505
7506 if ((GET_CODE (SET_SRC (set)) == COMPARE
7507 || (((code == NE
7508 || (code == LT
7509 && GET_MODE_CLASS (inner_mode) == MODE_INT
7510 && (GET_MODE_BITSIZE (inner_mode)
7511 <= HOST_BITS_PER_WIDE_INT)
7512 && (STORE_FLAG_VALUE
7513 & ((HOST_WIDE_INT) 1
7514 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7515 #ifdef FLOAT_STORE_FLAG_VALUE
7516 || (code == LT
7517 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7518 && FLOAT_STORE_FLAG_VALUE < 0)
7519 #endif
7520 ))
7521 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
7522 && ((GET_MODE_CLASS (mode) == MODE_CC)
7523 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7524 x = SET_SRC (set);
7525 else if (((code == EQ
7526 || (code == GE
7527 && (GET_MODE_BITSIZE (inner_mode)
7528 <= HOST_BITS_PER_WIDE_INT)
7529 && GET_MODE_CLASS (inner_mode) == MODE_INT
7530 && (STORE_FLAG_VALUE
7531 & ((HOST_WIDE_INT) 1
7532 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7533 #ifdef FLOAT_STORE_FLAG_VALUE
7534 || (code == GE
7535 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7536 && FLOAT_STORE_FLAG_VALUE < 0)
7537 #endif
7538 ))
7539 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
7540 && ((GET_MODE_CLASS (mode) == MODE_CC)
7541 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7542 {
7543 /* We might have reversed a LT to get a GE here. But this wasn't
7544 actually the comparison of data, so we don't flag that we
7545 have had to reverse the condition. */
7546 did_reverse_condition ^= 1;
7547 reverse_code = 1;
7548 x = SET_SRC (set);
7549 }
7550 else
7551 break;
7552 }
7553
7554 else if (reg_set_p (op0, prev))
7555 /* If this sets OP0, but not directly, we have to give up. */
7556 break;
7557
7558 if (x)
7559 {
7560 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7561 code = GET_CODE (x);
7562 if (reverse_code)
7563 {
7564 code = reverse_condition (code);
7565 did_reverse_condition ^= 1;
7566 reverse_code = 0;
7567 }
7568
7569 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7570 if (earliest)
7571 *earliest = prev;
7572 }
7573 }
7574
7575 /* If constant is first, put it last. */
7576 if (CONSTANT_P (op0))
7577 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7578
7579 /* If OP0 is the result of a comparison, we weren't able to find what
7580 was really being compared, so fail. */
7581 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7582 return 0;
7583
7584 /* Canonicalize any ordered comparison with integers involving equality
7585 if we can do computations in the relevant mode and we do not
7586 overflow. */
7587
7588 if (GET_CODE (op1) == CONST_INT
7589 && GET_MODE (op0) != VOIDmode
7590 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7591 {
7592 HOST_WIDE_INT const_val = INTVAL (op1);
7593 unsigned HOST_WIDE_INT uconst_val = const_val;
7594 unsigned HOST_WIDE_INT max_val
7595 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7596
7597 switch (code)
7598 {
7599 case LE:
7600 if (const_val != max_val >> 1)
7601 code = LT, op1 = GEN_INT (const_val + 1);
7602 break;
7603
7604 /* When cross-compiling, const_val might be sign-extended from
7605 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7606 case GE:
7607 if ((const_val & max_val)
7608 != (((HOST_WIDE_INT) 1
7609 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7610 code = GT, op1 = GEN_INT (const_val - 1);
7611 break;
7612
7613 case LEU:
7614 if (uconst_val < max_val)
7615 code = LTU, op1 = GEN_INT (uconst_val + 1);
7616 break;
7617
7618 case GEU:
7619 if (uconst_val != 0)
7620 code = GTU, op1 = GEN_INT (uconst_val - 1);
7621 break;
7622
7623 default:
7624 break;
7625 }
7626 }
7627
7628 /* If this was floating-point and we reversed anything other than an
7629 EQ or NE, return zero. */
7630 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7631 && did_reverse_condition && code != NE && code != EQ
7632 && ! flag_fast_math
7633 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7634 return 0;
7635
7636 #ifdef HAVE_cc0
7637 /* Never return CC0; return zero instead. */
7638 if (op0 == cc0_rtx)
7639 return 0;
7640 #endif
7641
7642 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7643 }
7644
7645 /* Similar to above routine, except that we also put an invariant last
7646 unless both operands are invariants. */
7647
7648 rtx
7649 get_condition_for_loop (x)
7650 rtx x;
7651 {
7652 rtx comparison = get_condition (x, NULL_PTR);
7653
7654 if (comparison == 0
7655 || ! invariant_p (XEXP (comparison, 0))
7656 || invariant_p (XEXP (comparison, 1)))
7657 return comparison;
7658
7659 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7660 XEXP (comparison, 1), XEXP (comparison, 0));
7661 }
7662
7663 #ifdef HAIFA
7664 /* Analyze a loop in order to instrument it with the use of count register.
7665 loop_start and loop_end are the first and last insns of the loop.
7666 This function works in cooperation with insert_bct ().
7667 loop_can_insert_bct[loop_num] is set according to whether the optimization
7668 is applicable to the loop. When it is applicable, the following variables
7669 are also set:
7670 loop_start_value[loop_num]
7671 loop_comparison_value[loop_num]
7672 loop_increment[loop_num]
7673 loop_comparison_code[loop_num] */
7674
7675 #ifdef HAVE_decrement_and_branch_on_count
7676 static void
7677 analyze_loop_iterations (loop_start, loop_end)
7678 rtx loop_start, loop_end;
7679 {
7680 rtx comparison, comparison_value;
7681 rtx iteration_var, initial_value, increment;
7682 enum rtx_code comparison_code;
7683
7684 rtx last_loop_insn;
7685 rtx insn;
7686 int i;
7687
7688 /* loop_variable mode */
7689 enum machine_mode original_mode;
7690
7691 /* find the number of the loop */
7692 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7693
7694 /* we change our mind only when we are sure that loop will be instrumented */
7695 loop_can_insert_bct[loop_num] = 0;
7696
7697 /* is the optimization suppressed. */
7698 if ( !flag_branch_on_count_reg )
7699 return;
7700
7701 /* make sure that count-reg is not in use */
7702 if (loop_used_count_register[loop_num]){
7703 if (loop_dump_stream)
7704 fprintf (loop_dump_stream,
7705 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7706 loop_num);
7707 return;
7708 }
7709
7710 /* make sure that the function has no indirect jumps. */
7711 if (indirect_jump_in_function){
7712 if (loop_dump_stream)
7713 fprintf (loop_dump_stream,
7714 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7715 loop_num);
7716 return;
7717 }
7718
7719 /* make sure that the last loop insn is a conditional jump */
7720 last_loop_insn = PREV_INSN (loop_end);
7721 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7722 if (loop_dump_stream)
7723 fprintf (loop_dump_stream,
7724 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7725 loop_num);
7726 return;
7727 }
7728
7729 /* First find the iteration variable. If the last insn is a conditional
7730 branch, and the insn preceding it tests a register value, make that
7731 register the iteration variable. */
7732
7733 /* We used to use prev_nonnote_insn here, but that fails because it might
7734 accidentally get the branch for a contained loop if the branch for this
7735 loop was deleted. We can only trust branches immediately before the
7736 loop_end. */
7737
7738 comparison = get_condition_for_loop (last_loop_insn);
7739 /* ??? Get_condition may switch position of induction variable and
7740 invariant register when it canonicalizes the comparison. */
7741
7742 if (comparison == 0) {
7743 if (loop_dump_stream)
7744 fprintf (loop_dump_stream,
7745 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7746 loop_num);
7747 return;
7748 }
7749
7750 comparison_code = GET_CODE (comparison);
7751 iteration_var = XEXP (comparison, 0);
7752 comparison_value = XEXP (comparison, 1);
7753
7754 original_mode = GET_MODE (iteration_var);
7755 if (GET_MODE_CLASS (original_mode) != MODE_INT
7756 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7757 if (loop_dump_stream)
7758 fprintf (loop_dump_stream,
7759 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7760 loop_num);
7761 return;
7762 }
7763
7764 /* get info about loop bounds and increment */
7765 iteration_info (iteration_var, &initial_value, &increment,
7766 loop_start, loop_end);
7767
7768 /* make sure that all required loop data were found */
7769 if (!(initial_value && increment && comparison_value
7770 && invariant_p (comparison_value) && invariant_p (increment)
7771 && ! indirect_jump_in_function))
7772 {
7773 if (loop_dump_stream) {
7774 fprintf (loop_dump_stream,
7775 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7776 if (!(initial_value && increment && comparison_value)) {
7777 fprintf (loop_dump_stream, "\tbounds not available: ");
7778 if ( ! initial_value )
7779 fprintf (loop_dump_stream, "initial ");
7780 if ( ! increment )
7781 fprintf (loop_dump_stream, "increment ");
7782 if ( ! comparison_value )
7783 fprintf (loop_dump_stream, "comparison ");
7784 fprintf (loop_dump_stream, "\n");
7785 }
7786 if (!invariant_p (comparison_value) || !invariant_p (increment))
7787 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7788 }
7789 return;
7790 }
7791
7792 /* make sure that the increment is constant */
7793 if (GET_CODE (increment) != CONST_INT) {
7794 if (loop_dump_stream)
7795 fprintf (loop_dump_stream,
7796 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7797 loop_num);
7798 return;
7799 }
7800
7801 /* make sure that the loop contains neither function call, nor jump on table.
7802 (the count register might be altered by the called function, and might
7803 be used for a branch on table). */
7804 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7805 if (GET_CODE (insn) == CALL_INSN){
7806 if (loop_dump_stream)
7807 fprintf (loop_dump_stream,
7808 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7809 loop_num);
7810 return;
7811 }
7812
7813 if (GET_CODE (insn) == JUMP_INSN
7814 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7815 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7816 if (loop_dump_stream)
7817 fprintf (loop_dump_stream,
7818 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7819 loop_num);
7820 return;
7821 }
7822 }
7823
7824 /* At this point, we are sure that the loop can be instrumented with BCT.
7825 Some of the loops, however, will not be instrumented - the final decision
7826 is taken by insert_bct () */
7827 if (loop_dump_stream)
7828 fprintf (loop_dump_stream,
7829 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7830 loop_num);
7831
7832 /* mark all enclosing loops that they cannot use count register */
7833 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7834 marking here may prevent instrumenting an enclosing loop that could
7835 actually be instrumented. But since this is rare, it is safer to mark
7836 here in case the order of calling (analyze/insert)_bct would be changed. */
7837 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7838 loop_used_count_register[i] = 1;
7839
7840 /* Set data structures which will be used by the instrumentation phase */
7841 loop_start_value[loop_num] = initial_value;
7842 loop_comparison_value[loop_num] = comparison_value;
7843 loop_increment[loop_num] = increment;
7844 loop_comparison_code[loop_num] = comparison_code;
7845 loop_can_insert_bct[loop_num] = 1;
7846 }
7847
7848
7849 /* instrument loop for insertion of bct instruction. We distinguish between
7850 loops with compile-time bounds, to those with run-time bounds. The loop
7851 behaviour is analized according to the following characteristics/variables:
7852 ; Input variables:
7853 ; comparison-value: the value to which the iteration counter is compared.
7854 ; initial-value: iteration-counter initial value.
7855 ; increment: iteration-counter increment.
7856 ; Computed variables:
7857 ; increment-direction: the sign of the increment.
7858 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7859 ; range-direction: sign (comparison-value - initial-value)
7860 We give up on the following cases:
7861 ; loop variable overflow.
7862 ; run-time loop bounds with comparison code NE.
7863 */
7864
7865 static void
7866 insert_bct (loop_start, loop_end)
7867 rtx loop_start, loop_end;
7868 {
7869 rtx initial_value, comparison_value, increment;
7870 enum rtx_code comparison_code;
7871
7872 int increment_direction, compare_direction;
7873 int unsigned_p = 0;
7874
7875 /* if the loop condition is <= or >=, the number of iteration
7876 is 1 more than the range of the bounds of the loop */
7877 int add_iteration = 0;
7878
7879 /* the only machine mode we work with - is the integer of the size that the
7880 machine has */
7881 enum machine_mode loop_var_mode = SImode;
7882
7883 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7884
7885 /* get loop-variables. No need to check that these are valid - already
7886 checked in analyze_loop_iterations (). */
7887 comparison_code = loop_comparison_code[loop_num];
7888 initial_value = loop_start_value[loop_num];
7889 comparison_value = loop_comparison_value[loop_num];
7890 increment = loop_increment[loop_num];
7891
7892 /* check analyze_loop_iterations decision for this loop. */
7893 if (! loop_can_insert_bct[loop_num]){
7894 if (loop_dump_stream)
7895 fprintf (loop_dump_stream,
7896 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7897 loop_num);
7898 return;
7899 }
7900
7901 /* It's impossible to instrument a competely unrolled loop. */
7902 if (loop_unroll_factor [loop_num] == -1)
7903 return;
7904
7905 /* make sure that the last loop insn is a conditional jump .
7906 This check is repeated from analyze_loop_iterations (),
7907 because unrolling might have changed that. */
7908 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7909 || !condjump_p (PREV_INSN (loop_end))) {
7910 if (loop_dump_stream)
7911 fprintf (loop_dump_stream,
7912 "insert_bct: not instrumenting BCT because of invalid branch\n");
7913 return;
7914 }
7915
7916 /* fix increment in case loop was unrolled. */
7917 if (loop_unroll_factor [loop_num] > 1)
7918 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
7919
7920 /* determine properties and directions of the loop */
7921 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7922 switch ( comparison_code ) {
7923 case LEU:
7924 unsigned_p = 1;
7925 /* fallthrough */
7926 case LE:
7927 compare_direction = 1;
7928 add_iteration = 1;
7929 break;
7930 case GEU:
7931 unsigned_p = 1;
7932 /* fallthrough */
7933 case GE:
7934 compare_direction = -1;
7935 add_iteration = 1;
7936 break;
7937 case EQ:
7938 /* in this case we cannot know the number of iterations */
7939 if (loop_dump_stream)
7940 fprintf (loop_dump_stream,
7941 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7942 loop_num);
7943 return;
7944 case LTU:
7945 unsigned_p = 1;
7946 /* fallthrough */
7947 case LT:
7948 compare_direction = 1;
7949 break;
7950 case GTU:
7951 unsigned_p = 1;
7952 /* fallthrough */
7953 case GT:
7954 compare_direction = -1;
7955 break;
7956 case NE:
7957 compare_direction = 0;
7958 break;
7959 default:
7960 abort ();
7961 }
7962
7963
7964 /* make sure that the loop does not end by an overflow */
7965 if (compare_direction != increment_direction) {
7966 if (loop_dump_stream)
7967 fprintf (loop_dump_stream,
7968 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7969 loop_num);
7970 return;
7971 }
7972
7973 /* try to instrument the loop. */
7974
7975 /* Handle the simpler case, where the bounds are known at compile time. */
7976 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7977 {
7978 int n_iterations;
7979 int increment_value_abs = INTVAL (increment) * increment_direction;
7980
7981 /* check the relation between compare-val and initial-val */
7982 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7983 int range_direction = (difference > 0) ? 1 : -1;
7984
7985 /* make sure the loop executes enough iterations to gain from BCT */
7986 if (difference > -3 && difference < 3) {
7987 if (loop_dump_stream)
7988 fprintf (loop_dump_stream,
7989 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7990 loop_num);
7991 return;
7992 }
7993
7994 /* make sure that the loop executes at least once */
7995 if ((range_direction == 1 && compare_direction == -1)
7996 || (range_direction == -1 && compare_direction == 1))
7997 {
7998 if (loop_dump_stream)
7999 fprintf (loop_dump_stream,
8000 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
8001 loop_num);
8002 return;
8003 }
8004
8005 /* make sure that the loop does not end by an overflow (in compile time
8006 bounds we must have an additional check for overflow, because here
8007 we also support the compare code of 'NE'. */
8008 if (comparison_code == NE
8009 && increment_direction != range_direction) {
8010 if (loop_dump_stream)
8011 fprintf (loop_dump_stream,
8012 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
8013 loop_num);
8014 return;
8015 }
8016
8017 /* Determine the number of iterations by:
8018 ;
8019 ; compare-val - initial-val + (increment -1) + additional-iteration
8020 ; num_iterations = -----------------------------------------------------------------
8021 ; increment
8022 */
8023 difference = (range_direction > 0) ? difference : -difference;
8024 #if 0
8025 fprintf (stderr, "difference is: %d\n", difference); /* @*/
8026 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
8027 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
8028 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
8029 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
8030 #endif
8031
8032 if (increment_value_abs == 0) {
8033 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
8034 abort ();
8035 }
8036 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
8037 / increment_value_abs;
8038
8039 #if 0
8040 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
8041 #endif
8042 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
8043
8044 /* Done with this loop. */
8045 return;
8046 }
8047
8048 /* Handle the more complex case, that the bounds are NOT known at compile time. */
8049 /* In this case we generate run_time calculation of the number of iterations */
8050
8051 /* With runtime bounds, if the compare is of the form '!=' we give up */
8052 if (comparison_code == NE) {
8053 if (loop_dump_stream)
8054 fprintf (loop_dump_stream,
8055 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
8056 loop_num);
8057 return;
8058 }
8059
8060 else {
8061 /* We rely on the existence of run-time guard to ensure that the
8062 loop executes at least once. */
8063 rtx sequence;
8064 rtx iterations_num_reg;
8065
8066 int increment_value_abs = INTVAL (increment) * increment_direction;
8067
8068 /* make sure that the increment is a power of two, otherwise (an
8069 expensive) divide is needed. */
8070 if (exact_log2 (increment_value_abs) == -1)
8071 {
8072 if (loop_dump_stream)
8073 fprintf (loop_dump_stream,
8074 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
8075 return;
8076 }
8077
8078 /* compute the number of iterations */
8079 start_sequence ();
8080 {
8081 rtx temp_reg;
8082
8083 /* Again, the number of iterations is calculated by:
8084 ;
8085 ; compare-val - initial-val + (increment -1) + additional-iteration
8086 ; num_iterations = -----------------------------------------------------------------
8087 ; increment
8088 */
8089 /* ??? Do we have to call copy_rtx here before passing rtx to
8090 expand_binop? */
8091 if (compare_direction > 0) {
8092 /* <, <= :the loop variable is increasing */
8093 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
8094 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
8095 }
8096 else {
8097 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
8098 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
8099 }
8100
8101 if (increment_value_abs - 1 + add_iteration != 0)
8102 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
8103 GEN_INT (increment_value_abs - 1 + add_iteration),
8104 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8105
8106 if (increment_value_abs != 1)
8107 {
8108 /* ??? This will generate an expensive divide instruction for
8109 most targets. The original authors apparently expected this
8110 to be a shift, since they test for power-of-2 divisors above,
8111 but just naively generating a divide instruction will not give
8112 a shift. It happens to work for the PowerPC target because
8113 the rs6000.md file has a divide pattern that emits shifts.
8114 It will probably not work for any other target. */
8115 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
8116 temp_reg,
8117 GEN_INT (increment_value_abs),
8118 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8119 }
8120 else
8121 iterations_num_reg = temp_reg;
8122 }
8123 sequence = gen_sequence ();
8124 end_sequence ();
8125 emit_insn_before (sequence, loop_start);
8126 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
8127 }
8128 }
8129
8130 /* instrument loop by inserting a bct in it. This is done in the following way:
8131 1. A new register is created and assigned the hard register number of the count
8132 register.
8133 2. In the head of the loop the new variable is initialized by the value passed in the
8134 loop_num_iterations parameter.
8135 3. At the end of the loop, comparison of the register with 0 is generated.
8136 The created comparison follows the pattern defined for the
8137 decrement_and_branch_on_count insn, so this insn will be generated in assembly
8138 generation phase.
8139 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
8140 not used elsewhere, it will be eliminated by data-flow analisys. */
8141
8142 static void
8143 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
8144 rtx loop_start, loop_end;
8145 rtx loop_num_iterations;
8146 {
8147 rtx temp_reg1, temp_reg2;
8148 rtx start_label;
8149
8150 rtx sequence;
8151 enum machine_mode loop_var_mode = SImode;
8152
8153 if (HAVE_decrement_and_branch_on_count)
8154 {
8155 if (loop_dump_stream)
8156 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
8157
8158 /* eliminate the check on the old variable */
8159 delete_insn (PREV_INSN (loop_end));
8160 delete_insn (PREV_INSN (loop_end));
8161
8162 /* insert the label which will delimit the start of the loop */
8163 start_label = gen_label_rtx ();
8164 emit_label_after (start_label, loop_start);
8165
8166 /* insert initialization of the count register into the loop header */
8167 start_sequence ();
8168 temp_reg1 = gen_reg_rtx (loop_var_mode);
8169 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
8170
8171 /* this will be count register */
8172 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
8173 /* we have to move the value to the count register from an GPR
8174 because rtx pointed to by loop_num_iterations could contain
8175 expression which cannot be moved into count register */
8176 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
8177
8178 sequence = gen_sequence ();
8179 end_sequence ();
8180 emit_insn_after (sequence, loop_start);
8181
8182 /* insert new comparison on the count register instead of the
8183 old one, generating the needed BCT pattern (that will be
8184 later recognized by assembly generation phase). */
8185 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
8186 loop_end);
8187 LABEL_NUSES (start_label)++;
8188 }
8189
8190 }
8191 #endif /* HAVE_decrement_and_branch_on_count */
8192
8193 #endif /* HAIFA */
8194
8195 /* Scan the function and determine whether it has indirect (computed) jumps.
8196
8197 This is taken mostly from flow.c; similar code exists elsewhere
8198 in the compiler. It may be useful to put this into rtlanal.c. */
8199 static int
8200 indirect_jump_in_function_p (start)
8201 rtx start;
8202 {
8203 rtx insn;
8204
8205 for (insn = start; insn; insn = NEXT_INSN (insn))
8206 if (computed_jump_p (insn))
8207 return 1;
8208
8209 return 0;
8210 }