loop.c (check_dbra_loop): Use a vanilla loop reversal if the biv is used to compute...
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51 #include "toplev.h"
52
53 /* Vector mapping INSN_UIDs to luids.
54 The luids are like uids but increase monotonically always.
55 We use them to see whether a jump comes from outside a given loop. */
56
57 int *uid_luid;
58
59 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
60 number the insn is contained in. */
61
62 int *uid_loop_num;
63
64 /* 1 + largest uid of any insn. */
65
66 int max_uid_for_loop;
67
68 /* 1 + luid of last insn. */
69
70 static int max_luid;
71
72 /* Number of loops detected in current function. Used as index to the
73 next few tables. */
74
75 static int max_loop_num;
76
77 /* Indexed by loop number, contains the first and last insn of each loop. */
78
79 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
80
81 /* For each loop, gives the containing loop number, -1 if none. */
82
83 int *loop_outer_loop;
84
85 #ifdef HAIFA
86 /* The main output of analyze_loop_iterations is placed here */
87
88 int *loop_can_insert_bct;
89
90 /* For each loop, determines whether some of its inner loops has used
91 count register */
92
93 int *loop_used_count_register;
94
95 /* loop parameters for arithmetic loops. These loops have a loop variable
96 which is initialized to loop_start_value, incremented in each iteration
97 by "loop_increment". At the end of the iteration the loop variable is
98 compared to the loop_comparison_value (using loop_comparison_code). */
99
100 rtx *loop_increment;
101 rtx *loop_comparison_value;
102 rtx *loop_start_value;
103 enum rtx_code *loop_comparison_code;
104 #endif /* HAIFA */
105
106 /* For each loop, keep track of its unrolling factor.
107 Potential values:
108 0: unrolled
109 1: not unrolled.
110 -1: completely unrolled
111 >0: holds the unroll exact factor. */
112 int *loop_unroll_factor;
113
114 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
115 really a loop (an insn outside the loop branches into it). */
116
117 static char *loop_invalid;
118
119 /* Indexed by loop number, links together all LABEL_REFs which refer to
120 code labels outside the loop. Used by routines that need to know all
121 loop exits, such as final_biv_value and final_giv_value.
122
123 This does not include loop exits due to return instructions. This is
124 because all bivs and givs are pseudos, and hence must be dead after a
125 return, so the presense of a return does not affect any of the
126 optimizations that use this info. It is simpler to just not include return
127 instructions on this list. */
128
129 rtx *loop_number_exit_labels;
130
131 /* Indexed by loop number, counts the number of LABEL_REFs on
132 loop_number_exit_labels for this loop and all loops nested inside it. */
133
134 int *loop_number_exit_count;
135
136 /* Holds the number of loop iterations. It is zero if the number could not be
137 calculated. Must be unsigned since the number of iterations can
138 be as high as 2^wordsize-1. For loops with a wider iterator, this number
139 will be zero if the number of loop iterations is too large for an
140 unsigned integer to hold. */
141
142 unsigned HOST_WIDE_INT loop_n_iterations;
143
144 /* Nonzero if there is a subroutine call in the current loop. */
145
146 static int loop_has_call;
147
148 /* Nonzero if there is a volatile memory reference in the current
149 loop. */
150
151 static int loop_has_volatile;
152
153 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
154 current loop. A continue statement will generate a branch to
155 NEXT_INSN (loop_continue). */
156
157 static rtx loop_continue;
158
159 /* Indexed by register number, contains the number of times the reg
160 is set during the loop being scanned.
161 During code motion, a negative value indicates a reg that has been
162 made a candidate; in particular -2 means that it is an candidate that
163 we know is equal to a constant and -1 means that it is an candidate
164 not known equal to a constant.
165 After code motion, regs moved have 0 (which is accurate now)
166 while the failed candidates have the original number of times set.
167
168 Therefore, at all times, == 0 indicates an invariant register;
169 < 0 a conditionally invariant one. */
170
171 static varray_type n_times_set;
172
173 /* Original value of n_times_set; same except that this value
174 is not set negative for a reg whose sets have been made candidates
175 and not set to 0 for a reg that is moved. */
176
177 static varray_type n_times_used;
178
179 /* Index by register number, 1 indicates that the register
180 cannot be moved or strength reduced. */
181
182 static varray_type may_not_optimize;
183
184 /* Nonzero means reg N has already been moved out of one loop.
185 This reduces the desire to move it out of another. */
186
187 static char *moved_once;
188
189 /* Array of MEMs that are stored in this loop. If there are too many to fit
190 here, we just turn on unknown_address_altered. */
191
192 #define NUM_STORES 30
193 static rtx loop_store_mems[NUM_STORES];
194
195 /* Index of first available slot in above array. */
196 static int loop_store_mems_idx;
197
198 typedef struct loop_mem_info {
199 rtx mem; /* The MEM itself. */
200 rtx reg; /* Corresponding pseudo, if any. */
201 int optimize; /* Nonzero if we can optimize access to this MEM. */
202 } loop_mem_info;
203
204 /* Array of MEMs that are used (read or written) in this loop, but
205 cannot be aliased by anything in this loop, except perhaps
206 themselves. In other words, if loop_mems[i] is altered during the
207 loop, it is altered by an expression that is rtx_equal_p to it. */
208
209 static loop_mem_info *loop_mems;
210
211 /* The index of the next available slot in LOOP_MEMS. */
212
213 static int loop_mems_idx;
214
215 /* The number of elements allocated in LOOP_MEMs. */
216
217 static int loop_mems_allocated;
218
219 /* Nonzero if we don't know what MEMs were changed in the current loop.
220 This happens if the loop contains a call (in which case `loop_has_call'
221 will also be set) or if we store into more than NUM_STORES MEMs. */
222
223 static int unknown_address_altered;
224
225 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
226 static int num_movables;
227
228 /* Count of memory write instructions discovered in the loop. */
229 static int num_mem_sets;
230
231 /* Number of loops contained within the current one, including itself. */
232 static int loops_enclosed;
233
234 /* Bound on pseudo register number before loop optimization.
235 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
236 int max_reg_before_loop;
237
238 /* This obstack is used in product_cheap_p to allocate its rtl. It
239 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
240 If we used the same obstack that it did, we would be deallocating
241 that array. */
242
243 static struct obstack temp_obstack;
244
245 /* This is where the pointer to the obstack being used for RTL is stored. */
246
247 extern struct obstack *rtl_obstack;
248
249 #define obstack_chunk_alloc xmalloc
250 #define obstack_chunk_free free
251 \f
252 /* During the analysis of a loop, a chain of `struct movable's
253 is made to record all the movable insns found.
254 Then the entire chain can be scanned to decide which to move. */
255
256 struct movable
257 {
258 rtx insn; /* A movable insn */
259 rtx set_src; /* The expression this reg is set from. */
260 rtx set_dest; /* The destination of this SET. */
261 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
262 of any registers used within the LIBCALL. */
263 int consec; /* Number of consecutive following insns
264 that must be moved with this one. */
265 int regno; /* The register it sets */
266 short lifetime; /* lifetime of that register;
267 may be adjusted when matching movables
268 that load the same value are found. */
269 short savings; /* Number of insns we can move for this reg,
270 including other movables that force this
271 or match this one. */
272 unsigned int cond : 1; /* 1 if only conditionally movable */
273 unsigned int force : 1; /* 1 means MUST move this insn */
274 unsigned int global : 1; /* 1 means reg is live outside this loop */
275 /* If PARTIAL is 1, GLOBAL means something different:
276 that the reg is live outside the range from where it is set
277 to the following label. */
278 unsigned int done : 1; /* 1 inhibits further processing of this */
279
280 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
281 In particular, moving it does not make it
282 invariant. */
283 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
284 load SRC, rather than copying INSN. */
285 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
286 first insn of a consecutive sets group. */
287 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
288 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
289 that we should avoid changing when clearing
290 the rest of the reg. */
291 struct movable *match; /* First entry for same value */
292 struct movable *forces; /* An insn that must be moved if this is */
293 struct movable *next;
294 };
295
296 static struct movable *the_movables;
297
298 FILE *loop_dump_stream;
299
300 /* Forward declarations. */
301
302 static void find_and_verify_loops PROTO((rtx));
303 static void mark_loop_jump PROTO((rtx, int));
304 static void prescan_loop PROTO((rtx, rtx));
305 static int reg_in_basic_block_p PROTO((rtx, rtx));
306 static int consec_sets_invariant_p PROTO((rtx, int, rtx));
307 static rtx libcall_other_reg PROTO((rtx, rtx));
308 static int labels_in_range_p PROTO((rtx, int));
309 static void count_loop_regs_set PROTO((rtx, rtx, varray_type, varray_type,
310 int *, int));
311 static void note_addr_stored PROTO((rtx, rtx));
312 static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
313 static void scan_loop PROTO((rtx, rtx, int, int));
314 #if 0
315 static void replace_call_address PROTO((rtx, rtx, rtx));
316 #endif
317 static rtx skip_consec_insns PROTO((rtx, int));
318 static int libcall_benefit PROTO((rtx));
319 static void ignore_some_movables PROTO((struct movable *));
320 static void force_movables PROTO((struct movable *));
321 static void combine_movables PROTO((struct movable *, int));
322 static int regs_match_p PROTO((rtx, rtx, struct movable *));
323 static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
324 static void add_label_notes PROTO((rtx, rtx));
325 static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
326 static int count_nonfixed_reads PROTO((rtx));
327 static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, int, int));
328 static void find_single_use_in_loop PROTO((rtx, rtx, varray_type));
329 static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
330 static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
331 static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, int, int));
332 static void check_final_value PROTO((struct induction *, rtx, rtx));
333 static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
334 static void update_giv_derive PROTO((rtx));
335 static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *));
336 static rtx simplify_giv_expr PROTO((rtx, int *));
337 static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *));
338 static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *));
339 static int check_dbra_loop PROTO((rtx, int, rtx));
340 static rtx express_from_1 PROTO((rtx, rtx, rtx));
341 static rtx express_from PROTO((struct induction *, struct induction *));
342 static rtx combine_givs_p PROTO((struct induction *, struct induction *));
343 static void combine_givs PROTO((struct iv_class *));
344 static int product_cheap_p PROTO((rtx, rtx));
345 static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
346 static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
347 static int last_use_this_basic_block PROTO((rtx, rtx));
348 static void record_initial PROTO((rtx, rtx));
349 static void update_reg_last_use PROTO((rtx, rtx));
350 static rtx next_insn_in_loop PROTO((rtx, rtx, rtx, rtx));
351 static void load_mems_and_recount_loop_regs_set PROTO((rtx, rtx, rtx,
352 rtx, varray_type,
353 int *));
354 static void load_mems PROTO((rtx, rtx, rtx, rtx));
355 static int insert_loop_mem PROTO((rtx *, void *));
356 static int replace_loop_mem PROTO((rtx *, void *));
357 static int replace_label PROTO((rtx *, void *));
358
359 typedef struct rtx_and_int {
360 rtx r;
361 int i;
362 } rtx_and_int;
363
364 typedef struct rtx_pair {
365 rtx r1;
366 rtx r2;
367 } rtx_pair;
368
369 /* Nonzero iff INSN is between START and END, inclusive. */
370 #define INSN_IN_RANGE_P(INSN, START, END) \
371 (INSN_UID (INSN) < max_uid_for_loop \
372 && INSN_LUID (INSN) >= INSN_LUID (START) \
373 && INSN_LUID (INSN) <= INSN_LUID (END))
374
375 #ifdef HAIFA
376 /* This is extern from unroll.c */
377 extern void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
378
379 /* Two main functions for implementing bct:
380 first - to be called before loop unrolling, and the second - after */
381 #ifdef HAVE_decrement_and_branch_on_count
382 static void analyze_loop_iterations PROTO((rtx, rtx));
383 static void insert_bct PROTO((rtx, rtx));
384
385 /* Auxiliary function that inserts the bct pattern into the loop */
386 static void instrument_loop_bct PROTO((rtx, rtx, rtx));
387 #endif /* HAVE_decrement_and_branch_on_count */
388 #endif /* HAIFA */
389
390 /* Indirect_jump_in_function is computed once per function. */
391 int indirect_jump_in_function = 0;
392 static int indirect_jump_in_function_p PROTO((rtx));
393
394 \f
395 /* Relative gain of eliminating various kinds of operations. */
396 static int add_cost;
397 #if 0
398 static int shift_cost;
399 static int mult_cost;
400 #endif
401
402 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
403 copy the value of the strength reduced giv to its original register. */
404 static int copy_cost;
405
406 /* Cost of using a register, to normalize the benefits of a giv. */
407 static int reg_address_cost;
408
409
410 void
411 init_loop ()
412 {
413 char *free_point = (char *) oballoc (1);
414 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
415
416 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
417
418 #ifdef ADDRESS_COST
419 reg_address_cost = ADDRESS_COST (reg);
420 #else
421 reg_address_cost = rtx_cost (reg, MEM);
422 #endif
423
424 /* We multiply by 2 to reconcile the difference in scale between
425 these two ways of computing costs. Otherwise the cost of a copy
426 will be far less than the cost of an add. */
427
428 copy_cost = 2 * 2;
429
430 /* Free the objects we just allocated. */
431 obfree (free_point);
432
433 /* Initialize the obstack used for rtl in product_cheap_p. */
434 gcc_obstack_init (&temp_obstack);
435 }
436 \f
437 /* Entry point of this file. Perform loop optimization
438 on the current function. F is the first insn of the function
439 and DUMPFILE is a stream for output of a trace of actions taken
440 (or 0 if none should be output). */
441
442 void
443 loop_optimize (f, dumpfile, unroll_p, bct_p)
444 /* f is the first instruction of a chain of insns for one function */
445 rtx f;
446 FILE *dumpfile;
447 int unroll_p, bct_p;
448 {
449 register rtx insn;
450 register int i;
451 rtx last_insn;
452
453 loop_dump_stream = dumpfile;
454
455 init_recog_no_volatile ();
456
457 max_reg_before_loop = max_reg_num ();
458
459 moved_once = (char *) alloca (max_reg_before_loop);
460 bzero (moved_once, max_reg_before_loop);
461
462 regs_may_share = 0;
463
464 /* Count the number of loops. */
465
466 max_loop_num = 0;
467 for (insn = f; insn; insn = NEXT_INSN (insn))
468 {
469 if (GET_CODE (insn) == NOTE
470 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
471 max_loop_num++;
472 }
473
474 /* Don't waste time if no loops. */
475 if (max_loop_num == 0)
476 return;
477
478 /* Get size to use for tables indexed by uids.
479 Leave some space for labels allocated by find_and_verify_loops. */
480 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
481
482 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
483 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
484
485 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
486 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
487
488 /* Allocate tables for recording each loop. We set each entry, so they need
489 not be zeroed. */
490 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
491 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
492 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
493 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
494 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
495 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
496
497 /* This is initialized by the unrolling code, so we go ahead
498 and clear them just in case we are not performing loop
499 unrolling. */
500 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
501 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
502
503 #ifdef HAIFA
504 /* Allocate for BCT optimization */
505 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
506 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
507
508 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
509 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
510
511 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
512 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
513 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
514 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
515 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
516 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
517
518 loop_comparison_code
519 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
520 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
521 #endif /* HAIFA */
522
523 /* Find and process each loop.
524 First, find them, and record them in order of their beginnings. */
525 find_and_verify_loops (f);
526
527 /* Now find all register lifetimes. This must be done after
528 find_and_verify_loops, because it might reorder the insns in the
529 function. */
530 reg_scan (f, max_reg_num (), 1);
531
532 /* This must occur after reg_scan so that registers created by gcse
533 will have entries in the register tables.
534
535 We could have added a call to reg_scan after gcse_main in toplev.c,
536 but moving this call to init_alias_analysis is more efficient. */
537 init_alias_analysis ();
538
539 /* See if we went too far. */
540 if (get_max_uid () > max_uid_for_loop)
541 abort ();
542 /* Now reset it to the actual size we need. See above. */
543 max_uid_for_loop = get_max_uid () + 1;
544
545 /* Compute the mapping from uids to luids.
546 LUIDs are numbers assigned to insns, like uids,
547 except that luids increase monotonically through the code.
548 Don't assign luids to line-number NOTEs, so that the distance in luids
549 between two insns is not affected by -g. */
550
551 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
552 {
553 last_insn = insn;
554 if (GET_CODE (insn) != NOTE
555 || NOTE_LINE_NUMBER (insn) <= 0)
556 uid_luid[INSN_UID (insn)] = ++i;
557 else
558 /* Give a line number note the same luid as preceding insn. */
559 uid_luid[INSN_UID (insn)] = i;
560 }
561
562 max_luid = i + 1;
563
564 /* Don't leave gaps in uid_luid for insns that have been
565 deleted. It is possible that the first or last insn
566 using some register has been deleted by cross-jumping.
567 Make sure that uid_luid for that former insn's uid
568 points to the general area where that insn used to be. */
569 for (i = 0; i < max_uid_for_loop; i++)
570 {
571 uid_luid[0] = uid_luid[i];
572 if (uid_luid[0] != 0)
573 break;
574 }
575 for (i = 0; i < max_uid_for_loop; i++)
576 if (uid_luid[i] == 0)
577 uid_luid[i] = uid_luid[i - 1];
578
579 /* Create a mapping from loops to BLOCK tree nodes. */
580 if (unroll_p && write_symbols != NO_DEBUG)
581 find_loop_tree_blocks ();
582
583 /* Determine if the function has indirect jump. On some systems
584 this prevents low overhead loop instructions from being used. */
585 indirect_jump_in_function = indirect_jump_in_function_p (f);
586
587 /* Now scan the loops, last ones first, since this means inner ones are done
588 before outer ones. */
589 for (i = max_loop_num-1; i >= 0; i--)
590 if (! loop_invalid[i] && loop_number_loop_ends[i])
591 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
592 unroll_p, bct_p);
593
594 /* If debugging and unrolling loops, we must replicate the tree nodes
595 corresponding to the blocks inside the loop, so that the original one
596 to one mapping will remain. */
597 if (unroll_p && write_symbols != NO_DEBUG)
598 unroll_block_trees ();
599
600 end_alias_analysis ();
601 }
602 \f
603 /* Returns the next insn, in execution order, after INSN. START and
604 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
605 respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
606 insn-stream; it is used with loops that are entered near the
607 bottom. */
608
609 static rtx
610 next_insn_in_loop (insn, start, end, loop_top)
611 rtx insn;
612 rtx start;
613 rtx end;
614 rtx loop_top;
615 {
616 insn = NEXT_INSN (insn);
617
618 if (insn == end)
619 {
620 if (loop_top)
621 /* Go to the top of the loop, and continue there. */
622 insn = loop_top;
623 else
624 /* We're done. */
625 insn = NULL_RTX;
626 }
627
628 if (insn == start)
629 /* We're done. */
630 insn = NULL_RTX;
631
632 return insn;
633 }
634
635 /* Optimize one loop whose start is LOOP_START and end is END.
636 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
637 NOTE_INSN_LOOP_END. */
638
639 /* ??? Could also move memory writes out of loops if the destination address
640 is invariant, the source is invariant, the memory write is not volatile,
641 and if we can prove that no read inside the loop can read this address
642 before the write occurs. If there is a read of this address after the
643 write, then we can also mark the memory read as invariant. */
644
645 static void
646 scan_loop (loop_start, end, unroll_p, bct_p)
647 rtx loop_start, end;
648 int unroll_p, bct_p;
649 {
650 register int i;
651 rtx p;
652 /* 1 if we are scanning insns that could be executed zero times. */
653 int maybe_never = 0;
654 /* 1 if we are scanning insns that might never be executed
655 due to a subroutine call which might exit before they are reached. */
656 int call_passed = 0;
657 /* For a rotated loop that is entered near the bottom,
658 this is the label at the top. Otherwise it is zero. */
659 rtx loop_top = 0;
660 /* Jump insn that enters the loop, or 0 if control drops in. */
661 rtx loop_entry_jump = 0;
662 /* Place in the loop where control enters. */
663 rtx scan_start;
664 /* Number of insns in the loop. */
665 int insn_count;
666 int in_libcall = 0;
667 int tem;
668 rtx temp;
669 /* The SET from an insn, if it is the only SET in the insn. */
670 rtx set, set1;
671 /* Chain describing insns movable in current loop. */
672 struct movable *movables = 0;
673 /* Last element in `movables' -- so we can add elements at the end. */
674 struct movable *last_movable = 0;
675 /* Ratio of extra register life span we can justify
676 for saving an instruction. More if loop doesn't call subroutines
677 since in that case saving an insn makes more difference
678 and more registers are available. */
679 int threshold;
680 /* If we have calls, contains the insn in which a register was used
681 if it was used exactly once; contains const0_rtx if it was used more
682 than once. */
683 varray_type reg_single_usage = 0;
684 /* Nonzero if we are scanning instructions in a sub-loop. */
685 int loop_depth = 0;
686 int nregs;
687
688 /* Determine whether this loop starts with a jump down to a test at
689 the end. This will occur for a small number of loops with a test
690 that is too complex to duplicate in front of the loop.
691
692 We search for the first insn or label in the loop, skipping NOTEs.
693 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
694 (because we might have a loop executed only once that contains a
695 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
696 (in case we have a degenerate loop).
697
698 Note that if we mistakenly think that a loop is entered at the top
699 when, in fact, it is entered at the exit test, the only effect will be
700 slightly poorer optimization. Making the opposite error can generate
701 incorrect code. Since very few loops now start with a jump to the
702 exit test, the code here to detect that case is very conservative. */
703
704 for (p = NEXT_INSN (loop_start);
705 p != end
706 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
707 && (GET_CODE (p) != NOTE
708 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
709 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
710 p = NEXT_INSN (p))
711 ;
712
713 scan_start = p;
714
715 /* Set up variables describing this loop. */
716 prescan_loop (loop_start, end);
717 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
718
719 /* If loop has a jump before the first label,
720 the true entry is the target of that jump.
721 Start scan from there.
722 But record in LOOP_TOP the place where the end-test jumps
723 back to so we can scan that after the end of the loop. */
724 if (GET_CODE (p) == JUMP_INSN)
725 {
726 loop_entry_jump = p;
727
728 /* Loop entry must be unconditional jump (and not a RETURN) */
729 if (simplejump_p (p)
730 && JUMP_LABEL (p) != 0
731 /* Check to see whether the jump actually
732 jumps out of the loop (meaning it's no loop).
733 This case can happen for things like
734 do {..} while (0). If this label was generated previously
735 by loop, we can't tell anything about it and have to reject
736 the loop. */
737 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, end))
738 {
739 loop_top = next_label (scan_start);
740 scan_start = JUMP_LABEL (p);
741 }
742 }
743
744 /* If SCAN_START was an insn created by loop, we don't know its luid
745 as required by loop_reg_used_before_p. So skip such loops. (This
746 test may never be true, but it's best to play it safe.)
747
748 Also, skip loops where we do not start scanning at a label. This
749 test also rejects loops starting with a JUMP_INSN that failed the
750 test above. */
751
752 if (INSN_UID (scan_start) >= max_uid_for_loop
753 || GET_CODE (scan_start) != CODE_LABEL)
754 {
755 if (loop_dump_stream)
756 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
757 INSN_UID (loop_start), INSN_UID (end));
758 return;
759 }
760
761 /* Count number of times each reg is set during this loop.
762 Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out
763 the setting of register I. If this loop has calls, set
764 VARRAY_RTX (reg_single_usage, I). */
765
766 /* Allocate extra space for REGS that might be created by
767 load_mems. We allocate a little extra slop as well, in the hopes
768 that even after the moving of movables creates some new registers
769 we won't have to reallocate these arrays. However, we do grow
770 the arrays, if necessary, in load_mems_recount_loop_regs_set. */
771 nregs = max_reg_num () + loop_mems_idx + 16;
772 VARRAY_INT_INIT (n_times_set, nregs, "n_times_set");
773 VARRAY_INT_INIT (n_times_used, nregs, "n_times_used");
774 VARRAY_CHAR_INIT (may_not_optimize, nregs, "may_not_optimize");
775
776 if (loop_has_call)
777 VARRAY_RTX_INIT (reg_single_usage, nregs, "reg_single_usage");
778
779 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
780 may_not_optimize, reg_single_usage, &insn_count, nregs);
781
782 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
783 {
784 VARRAY_CHAR (may_not_optimize, i) = 1;
785 VARRAY_INT (n_times_set, i) = 1;
786 }
787
788 #ifdef AVOID_CCMODE_COPIES
789 /* Don't try to move insns which set CC registers if we should not
790 create CCmode register copies. */
791 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
792 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
793 VARRAY_CHAR (may_not_optimize, i) = 1;
794 #endif
795
796 bcopy ((char *) &n_times_set->data,
797 (char *) &n_times_used->data, nregs * sizeof (int));
798
799 if (loop_dump_stream)
800 {
801 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
802 INSN_UID (loop_start), INSN_UID (end), insn_count);
803 if (loop_continue)
804 fprintf (loop_dump_stream, "Continue at insn %d.\n",
805 INSN_UID (loop_continue));
806 }
807
808 /* Scan through the loop finding insns that are safe to move.
809 Set n_times_set negative for the reg being set, so that
810 this reg will be considered invariant for subsequent insns.
811 We consider whether subsequent insns use the reg
812 in deciding whether it is worth actually moving.
813
814 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
815 and therefore it is possible that the insns we are scanning
816 would never be executed. At such times, we must make sure
817 that it is safe to execute the insn once instead of zero times.
818 When MAYBE_NEVER is 0, all insns will be executed at least once
819 so that is not a problem. */
820
821 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
822 p != NULL_RTX;
823 p = next_insn_in_loop (p, scan_start, end, loop_top))
824 {
825 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
826 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
827 in_libcall = 1;
828 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
829 && find_reg_note (p, REG_RETVAL, NULL_RTX))
830 in_libcall = 0;
831
832 if (GET_CODE (p) == INSN
833 && (set = single_set (p))
834 && GET_CODE (SET_DEST (set)) == REG
835 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
836 {
837 int tem1 = 0;
838 int tem2 = 0;
839 int move_insn = 0;
840 rtx src = SET_SRC (set);
841 rtx dependencies = 0;
842
843 /* Figure out what to use as a source of this insn. If a REG_EQUIV
844 note is given or if a REG_EQUAL note with a constant operand is
845 specified, use it as the source and mark that we should move
846 this insn by calling emit_move_insn rather that duplicating the
847 insn.
848
849 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
850 is present. */
851 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
852 if (temp)
853 src = XEXP (temp, 0), move_insn = 1;
854 else
855 {
856 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
857 if (temp && CONSTANT_P (XEXP (temp, 0)))
858 src = XEXP (temp, 0), move_insn = 1;
859 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
860 {
861 src = XEXP (temp, 0);
862 /* A libcall block can use regs that don't appear in
863 the equivalent expression. To move the libcall,
864 we must move those regs too. */
865 dependencies = libcall_other_reg (p, src);
866 }
867 }
868
869 /* Don't try to optimize a register that was made
870 by loop-optimization for an inner loop.
871 We don't know its life-span, so we can't compute the benefit. */
872 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
873 ;
874 else if (/* The set is a user-variable or it is used in
875 the exit test (this can cause the variable to be
876 used before it is set just like a
877 user-variable)... */
878 (REG_USERVAR_P (SET_DEST (set))
879 || REG_LOOP_TEST_P (SET_DEST (set)))
880 /* And the set is not guaranteed to be executed one
881 the loop starts, or the value before the set is
882 needed before the set occurs... */
883 && (maybe_never
884 || loop_reg_used_before_p (set, p, loop_start,
885 scan_start, end))
886 /* And the register is used in basic blocks other
887 than the one where it is set (meaning that
888 something after this point in the loop might
889 depend on its value before the set). */
890 && !reg_in_basic_block_p (p, SET_DEST (set)))
891 /* It is unsafe to move the set. The fact that these
892 three conditions are considered in conjunction means
893 that we are assuming various conditions, such as:
894
895 o It's OK to move a set of a variable which was not
896 created by the user and is not used in an exit test
897 even if that point in the set would not be reached
898 during execution of the loop. */
899 ;
900 else if ((tem = invariant_p (src))
901 && (dependencies == 0
902 || (tem2 = invariant_p (dependencies)) != 0)
903 && (VARRAY_INT (n_times_set,
904 REGNO (SET_DEST (set))) == 1
905 || (tem1
906 = consec_sets_invariant_p
907 (SET_DEST (set),
908 VARRAY_INT (n_times_set, REGNO (SET_DEST (set))),
909 p)))
910 /* If the insn can cause a trap (such as divide by zero),
911 can't move it unless it's guaranteed to be executed
912 once loop is entered. Even a function call might
913 prevent the trap insn from being reached
914 (since it might exit!) */
915 && ! ((maybe_never || call_passed)
916 && may_trap_p (src)))
917 {
918 register struct movable *m;
919 register int regno = REGNO (SET_DEST (set));
920
921 /* A potential lossage is where we have a case where two insns
922 can be combined as long as they are both in the loop, but
923 we move one of them outside the loop. For large loops,
924 this can lose. The most common case of this is the address
925 of a function being called.
926
927 Therefore, if this register is marked as being used exactly
928 once if we are in a loop with calls (a "large loop"), see if
929 we can replace the usage of this register with the source
930 of this SET. If we can, delete this insn.
931
932 Don't do this if P has a REG_RETVAL note or if we have
933 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
934
935 if (reg_single_usage && VARRAY_RTX (reg_single_usage, regno) != 0
936 && VARRAY_RTX (reg_single_usage, regno) != const0_rtx
937 && REGNO_FIRST_UID (regno) == INSN_UID (p)
938 && (REGNO_LAST_UID (regno)
939 == INSN_UID (VARRAY_RTX (reg_single_usage, regno)))
940 && VARRAY_INT (n_times_set, regno) == 1
941 && ! side_effects_p (SET_SRC (set))
942 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
943 && (! SMALL_REGISTER_CLASSES
944 || (! (GET_CODE (SET_SRC (set)) == REG
945 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
946 /* This test is not redundant; SET_SRC (set) might be
947 a call-clobbered register and the life of REGNO
948 might span a call. */
949 && ! modified_between_p (SET_SRC (set), p,
950 VARRAY_RTX
951 (reg_single_usage, regno))
952 && no_labels_between_p (p, VARRAY_RTX (reg_single_usage, regno))
953 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
954 VARRAY_RTX
955 (reg_single_usage, regno)))
956 {
957 /* Replace any usage in a REG_EQUAL note. Must copy the
958 new source, so that we don't get rtx sharing between the
959 SET_SOURCE and REG_NOTES of insn p. */
960 REG_NOTES (VARRAY_RTX (reg_single_usage, regno))
961 = replace_rtx (REG_NOTES (VARRAY_RTX
962 (reg_single_usage, regno)),
963 SET_DEST (set), copy_rtx (SET_SRC (set)));
964
965 PUT_CODE (p, NOTE);
966 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
967 NOTE_SOURCE_FILE (p) = 0;
968 VARRAY_INT (n_times_set, regno) = 0;
969 continue;
970 }
971
972 m = (struct movable *) alloca (sizeof (struct movable));
973 m->next = 0;
974 m->insn = p;
975 m->set_src = src;
976 m->dependencies = dependencies;
977 m->set_dest = SET_DEST (set);
978 m->force = 0;
979 m->consec = VARRAY_INT (n_times_set,
980 REGNO (SET_DEST (set))) - 1;
981 m->done = 0;
982 m->forces = 0;
983 m->partial = 0;
984 m->move_insn = move_insn;
985 m->move_insn_first = 0;
986 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
987 m->savemode = VOIDmode;
988 m->regno = regno;
989 /* Set M->cond if either invariant_p or consec_sets_invariant_p
990 returned 2 (only conditionally invariant). */
991 m->cond = ((tem | tem1 | tem2) > 1);
992 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
993 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
994 m->match = 0;
995 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
996 - uid_luid[REGNO_FIRST_UID (regno)]);
997 m->savings = VARRAY_INT (n_times_used, regno);
998 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
999 m->savings += libcall_benefit (p);
1000 VARRAY_INT (n_times_set, regno) = move_insn ? -2 : -1;
1001 /* Add M to the end of the chain MOVABLES. */
1002 if (movables == 0)
1003 movables = m;
1004 else
1005 last_movable->next = m;
1006 last_movable = m;
1007
1008 if (m->consec > 0)
1009 {
1010 /* It is possible for the first instruction to have a
1011 REG_EQUAL note but a non-invariant SET_SRC, so we must
1012 remember the status of the first instruction in case
1013 the last instruction doesn't have a REG_EQUAL note. */
1014 m->move_insn_first = m->move_insn;
1015
1016 /* Skip this insn, not checking REG_LIBCALL notes. */
1017 p = next_nonnote_insn (p);
1018 /* Skip the consecutive insns, if there are any. */
1019 p = skip_consec_insns (p, m->consec);
1020 /* Back up to the last insn of the consecutive group. */
1021 p = prev_nonnote_insn (p);
1022
1023 /* We must now reset m->move_insn, m->is_equiv, and possibly
1024 m->set_src to correspond to the effects of all the
1025 insns. */
1026 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1027 if (temp)
1028 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1029 else
1030 {
1031 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1032 if (temp && CONSTANT_P (XEXP (temp, 0)))
1033 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1034 else
1035 m->move_insn = 0;
1036
1037 }
1038 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1039 }
1040 }
1041 /* If this register is always set within a STRICT_LOW_PART
1042 or set to zero, then its high bytes are constant.
1043 So clear them outside the loop and within the loop
1044 just load the low bytes.
1045 We must check that the machine has an instruction to do so.
1046 Also, if the value loaded into the register
1047 depends on the same register, this cannot be done. */
1048 else if (SET_SRC (set) == const0_rtx
1049 && GET_CODE (NEXT_INSN (p)) == INSN
1050 && (set1 = single_set (NEXT_INSN (p)))
1051 && GET_CODE (set1) == SET
1052 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1053 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1054 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1055 == SET_DEST (set))
1056 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1057 {
1058 register int regno = REGNO (SET_DEST (set));
1059 if (VARRAY_INT (n_times_set, regno) == 2)
1060 {
1061 register struct movable *m;
1062 m = (struct movable *) alloca (sizeof (struct movable));
1063 m->next = 0;
1064 m->insn = p;
1065 m->set_dest = SET_DEST (set);
1066 m->dependencies = 0;
1067 m->force = 0;
1068 m->consec = 0;
1069 m->done = 0;
1070 m->forces = 0;
1071 m->move_insn = 0;
1072 m->move_insn_first = 0;
1073 m->partial = 1;
1074 /* If the insn may not be executed on some cycles,
1075 we can't clear the whole reg; clear just high part.
1076 Not even if the reg is used only within this loop.
1077 Consider this:
1078 while (1)
1079 while (s != t) {
1080 if (foo ()) x = *s;
1081 use (x);
1082 }
1083 Clearing x before the inner loop could clobber a value
1084 being saved from the last time around the outer loop.
1085 However, if the reg is not used outside this loop
1086 and all uses of the register are in the same
1087 basic block as the store, there is no problem.
1088
1089 If this insn was made by loop, we don't know its
1090 INSN_LUID and hence must make a conservative
1091 assumption. */
1092 m->global = (INSN_UID (p) >= max_uid_for_loop
1093 || (uid_luid[REGNO_LAST_UID (regno)]
1094 > INSN_LUID (end))
1095 || (uid_luid[REGNO_FIRST_UID (regno)]
1096 < INSN_LUID (p))
1097 || (labels_in_range_p
1098 (p, uid_luid[REGNO_FIRST_UID (regno)])));
1099 if (maybe_never && m->global)
1100 m->savemode = GET_MODE (SET_SRC (set1));
1101 else
1102 m->savemode = VOIDmode;
1103 m->regno = regno;
1104 m->cond = 0;
1105 m->match = 0;
1106 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1107 - uid_luid[REGNO_FIRST_UID (regno)]);
1108 m->savings = 1;
1109 VARRAY_INT (n_times_set, regno) = -1;
1110 /* Add M to the end of the chain MOVABLES. */
1111 if (movables == 0)
1112 movables = m;
1113 else
1114 last_movable->next = m;
1115 last_movable = m;
1116 }
1117 }
1118 }
1119 /* Past a call insn, we get to insns which might not be executed
1120 because the call might exit. This matters for insns that trap.
1121 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1122 so they don't count. */
1123 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1124 call_passed = 1;
1125 /* Past a label or a jump, we get to insns for which we
1126 can't count on whether or how many times they will be
1127 executed during each iteration. Therefore, we can
1128 only move out sets of trivial variables
1129 (those not used after the loop). */
1130 /* Similar code appears twice in strength_reduce. */
1131 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1132 /* If we enter the loop in the middle, and scan around to the
1133 beginning, don't set maybe_never for that. This must be an
1134 unconditional jump, otherwise the code at the top of the
1135 loop might never be executed. Unconditional jumps are
1136 followed a by barrier then loop end. */
1137 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1138 && NEXT_INSN (NEXT_INSN (p)) == end
1139 && simplejump_p (p)))
1140 maybe_never = 1;
1141 else if (GET_CODE (p) == NOTE)
1142 {
1143 /* At the virtual top of a converted loop, insns are again known to
1144 be executed: logically, the loop begins here even though the exit
1145 code has been duplicated. */
1146 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1147 maybe_never = call_passed = 0;
1148 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1149 loop_depth++;
1150 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1151 loop_depth--;
1152 }
1153 }
1154
1155 /* If one movable subsumes another, ignore that other. */
1156
1157 ignore_some_movables (movables);
1158
1159 /* For each movable insn, see if the reg that it loads
1160 leads when it dies right into another conditionally movable insn.
1161 If so, record that the second insn "forces" the first one,
1162 since the second can be moved only if the first is. */
1163
1164 force_movables (movables);
1165
1166 /* See if there are multiple movable insns that load the same value.
1167 If there are, make all but the first point at the first one
1168 through the `match' field, and add the priorities of them
1169 all together as the priority of the first. */
1170
1171 combine_movables (movables, nregs);
1172
1173 /* Now consider each movable insn to decide whether it is worth moving.
1174 Store 0 in n_times_set for each reg that is moved.
1175
1176 Generally this increases code size, so do not move moveables when
1177 optimizing for code size. */
1178
1179 if (! optimize_size)
1180 move_movables (movables, threshold,
1181 insn_count, loop_start, end, nregs);
1182
1183 /* Now candidates that still are negative are those not moved.
1184 Change n_times_set to indicate that those are not actually invariant. */
1185 for (i = 0; i < nregs; i++)
1186 if (VARRAY_INT (n_times_set, i) < 0)
1187 VARRAY_INT (n_times_set, i) = VARRAY_INT (n_times_used, i);
1188
1189 /* Now that we've moved some things out of the loop, we able to
1190 hoist even more memory references. There's no need to pass
1191 reg_single_usage this time, since we're done with it. */
1192 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top,
1193 loop_start, 0,
1194 &insn_count);
1195
1196 if (flag_strength_reduce)
1197 {
1198 the_movables = movables;
1199 strength_reduce (scan_start, end, loop_top,
1200 insn_count, loop_start, end, unroll_p, bct_p);
1201 }
1202
1203 VARRAY_FREE (n_times_set);
1204 VARRAY_FREE (n_times_used);
1205 VARRAY_FREE (may_not_optimize);
1206 VARRAY_FREE (reg_single_usage);
1207 }
1208 \f
1209 /* Add elements to *OUTPUT to record all the pseudo-regs
1210 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1211
1212 void
1213 record_excess_regs (in_this, not_in_this, output)
1214 rtx in_this, not_in_this;
1215 rtx *output;
1216 {
1217 enum rtx_code code;
1218 char *fmt;
1219 int i;
1220
1221 code = GET_CODE (in_this);
1222
1223 switch (code)
1224 {
1225 case PC:
1226 case CC0:
1227 case CONST_INT:
1228 case CONST_DOUBLE:
1229 case CONST:
1230 case SYMBOL_REF:
1231 case LABEL_REF:
1232 return;
1233
1234 case REG:
1235 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1236 && ! reg_mentioned_p (in_this, not_in_this))
1237 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1238 return;
1239
1240 default:
1241 break;
1242 }
1243
1244 fmt = GET_RTX_FORMAT (code);
1245 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1246 {
1247 int j;
1248
1249 switch (fmt[i])
1250 {
1251 case 'E':
1252 for (j = 0; j < XVECLEN (in_this, i); j++)
1253 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1254 break;
1255
1256 case 'e':
1257 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1258 break;
1259 }
1260 }
1261 }
1262 \f
1263 /* Check what regs are referred to in the libcall block ending with INSN,
1264 aside from those mentioned in the equivalent value.
1265 If there are none, return 0.
1266 If there are one or more, return an EXPR_LIST containing all of them. */
1267
1268 static rtx
1269 libcall_other_reg (insn, equiv)
1270 rtx insn, equiv;
1271 {
1272 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1273 rtx p = XEXP (note, 0);
1274 rtx output = 0;
1275
1276 /* First, find all the regs used in the libcall block
1277 that are not mentioned as inputs to the result. */
1278
1279 while (p != insn)
1280 {
1281 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1282 || GET_CODE (p) == CALL_INSN)
1283 record_excess_regs (PATTERN (p), equiv, &output);
1284 p = NEXT_INSN (p);
1285 }
1286
1287 return output;
1288 }
1289 \f
1290 /* Return 1 if all uses of REG
1291 are between INSN and the end of the basic block. */
1292
1293 static int
1294 reg_in_basic_block_p (insn, reg)
1295 rtx insn, reg;
1296 {
1297 int regno = REGNO (reg);
1298 rtx p;
1299
1300 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1301 return 0;
1302
1303 /* Search this basic block for the already recorded last use of the reg. */
1304 for (p = insn; p; p = NEXT_INSN (p))
1305 {
1306 switch (GET_CODE (p))
1307 {
1308 case NOTE:
1309 break;
1310
1311 case INSN:
1312 case CALL_INSN:
1313 /* Ordinary insn: if this is the last use, we win. */
1314 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1315 return 1;
1316 break;
1317
1318 case JUMP_INSN:
1319 /* Jump insn: if this is the last use, we win. */
1320 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1321 return 1;
1322 /* Otherwise, it's the end of the basic block, so we lose. */
1323 return 0;
1324
1325 case CODE_LABEL:
1326 case BARRIER:
1327 /* It's the end of the basic block, so we lose. */
1328 return 0;
1329
1330 default:
1331 break;
1332 }
1333 }
1334
1335 /* The "last use" doesn't follow the "first use"?? */
1336 abort ();
1337 }
1338 \f
1339 /* Compute the benefit of eliminating the insns in the block whose
1340 last insn is LAST. This may be a group of insns used to compute a
1341 value directly or can contain a library call. */
1342
1343 static int
1344 libcall_benefit (last)
1345 rtx last;
1346 {
1347 rtx insn;
1348 int benefit = 0;
1349
1350 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1351 insn != last; insn = NEXT_INSN (insn))
1352 {
1353 if (GET_CODE (insn) == CALL_INSN)
1354 benefit += 10; /* Assume at least this many insns in a library
1355 routine. */
1356 else if (GET_CODE (insn) == INSN
1357 && GET_CODE (PATTERN (insn)) != USE
1358 && GET_CODE (PATTERN (insn)) != CLOBBER)
1359 benefit++;
1360 }
1361
1362 return benefit;
1363 }
1364 \f
1365 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1366
1367 static rtx
1368 skip_consec_insns (insn, count)
1369 rtx insn;
1370 int count;
1371 {
1372 for (; count > 0; count--)
1373 {
1374 rtx temp;
1375
1376 /* If first insn of libcall sequence, skip to end. */
1377 /* Do this at start of loop, since INSN is guaranteed to
1378 be an insn here. */
1379 if (GET_CODE (insn) != NOTE
1380 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1381 insn = XEXP (temp, 0);
1382
1383 do insn = NEXT_INSN (insn);
1384 while (GET_CODE (insn) == NOTE);
1385 }
1386
1387 return insn;
1388 }
1389
1390 /* Ignore any movable whose insn falls within a libcall
1391 which is part of another movable.
1392 We make use of the fact that the movable for the libcall value
1393 was made later and so appears later on the chain. */
1394
1395 static void
1396 ignore_some_movables (movables)
1397 struct movable *movables;
1398 {
1399 register struct movable *m, *m1;
1400
1401 for (m = movables; m; m = m->next)
1402 {
1403 /* Is this a movable for the value of a libcall? */
1404 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1405 if (note)
1406 {
1407 rtx insn;
1408 /* Check for earlier movables inside that range,
1409 and mark them invalid. We cannot use LUIDs here because
1410 insns created by loop.c for prior loops don't have LUIDs.
1411 Rather than reject all such insns from movables, we just
1412 explicitly check each insn in the libcall (since invariant
1413 libcalls aren't that common). */
1414 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1415 for (m1 = movables; m1 != m; m1 = m1->next)
1416 if (m1->insn == insn)
1417 m1->done = 1;
1418 }
1419 }
1420 }
1421
1422 /* For each movable insn, see if the reg that it loads
1423 leads when it dies right into another conditionally movable insn.
1424 If so, record that the second insn "forces" the first one,
1425 since the second can be moved only if the first is. */
1426
1427 static void
1428 force_movables (movables)
1429 struct movable *movables;
1430 {
1431 register struct movable *m, *m1;
1432 for (m1 = movables; m1; m1 = m1->next)
1433 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1434 if (!m1->partial && !m1->done)
1435 {
1436 int regno = m1->regno;
1437 for (m = m1->next; m; m = m->next)
1438 /* ??? Could this be a bug? What if CSE caused the
1439 register of M1 to be used after this insn?
1440 Since CSE does not update regno_last_uid,
1441 this insn M->insn might not be where it dies.
1442 But very likely this doesn't matter; what matters is
1443 that M's reg is computed from M1's reg. */
1444 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1445 && !m->done)
1446 break;
1447 if (m != 0 && m->set_src == m1->set_dest
1448 /* If m->consec, m->set_src isn't valid. */
1449 && m->consec == 0)
1450 m = 0;
1451
1452 /* Increase the priority of the moving the first insn
1453 since it permits the second to be moved as well. */
1454 if (m != 0)
1455 {
1456 m->forces = m1;
1457 m1->lifetime += m->lifetime;
1458 m1->savings += m->savings;
1459 }
1460 }
1461 }
1462 \f
1463 /* Find invariant expressions that are equal and can be combined into
1464 one register. */
1465
1466 static void
1467 combine_movables (movables, nregs)
1468 struct movable *movables;
1469 int nregs;
1470 {
1471 register struct movable *m;
1472 char *matched_regs = (char *) alloca (nregs);
1473 enum machine_mode mode;
1474
1475 /* Regs that are set more than once are not allowed to match
1476 or be matched. I'm no longer sure why not. */
1477 /* Perhaps testing m->consec_sets would be more appropriate here? */
1478
1479 for (m = movables; m; m = m->next)
1480 if (m->match == 0 && VARRAY_INT (n_times_used, m->regno) == 1 && !m->partial)
1481 {
1482 register struct movable *m1;
1483 int regno = m->regno;
1484
1485 bzero (matched_regs, nregs);
1486 matched_regs[regno] = 1;
1487
1488 /* We want later insns to match the first one. Don't make the first
1489 one match any later ones. So start this loop at m->next. */
1490 for (m1 = m->next; m1; m1 = m1->next)
1491 if (m != m1 && m1->match == 0 && VARRAY_INT (n_times_used, m1->regno) == 1
1492 /* A reg used outside the loop mustn't be eliminated. */
1493 && !m1->global
1494 /* A reg used for zero-extending mustn't be eliminated. */
1495 && !m1->partial
1496 && (matched_regs[m1->regno]
1497 ||
1498 (
1499 /* Can combine regs with different modes loaded from the
1500 same constant only if the modes are the same or
1501 if both are integer modes with M wider or the same
1502 width as M1. The check for integer is redundant, but
1503 safe, since the only case of differing destination
1504 modes with equal sources is when both sources are
1505 VOIDmode, i.e., CONST_INT. */
1506 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1507 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1508 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1509 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1510 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1511 /* See if the source of M1 says it matches M. */
1512 && ((GET_CODE (m1->set_src) == REG
1513 && matched_regs[REGNO (m1->set_src)])
1514 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1515 movables))))
1516 && ((m->dependencies == m1->dependencies)
1517 || rtx_equal_p (m->dependencies, m1->dependencies)))
1518 {
1519 m->lifetime += m1->lifetime;
1520 m->savings += m1->savings;
1521 m1->done = 1;
1522 m1->match = m;
1523 matched_regs[m1->regno] = 1;
1524 }
1525 }
1526
1527 /* Now combine the regs used for zero-extension.
1528 This can be done for those not marked `global'
1529 provided their lives don't overlap. */
1530
1531 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1532 mode = GET_MODE_WIDER_MODE (mode))
1533 {
1534 register struct movable *m0 = 0;
1535
1536 /* Combine all the registers for extension from mode MODE.
1537 Don't combine any that are used outside this loop. */
1538 for (m = movables; m; m = m->next)
1539 if (m->partial && ! m->global
1540 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1541 {
1542 register struct movable *m1;
1543 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1544 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1545
1546 if (m0 == 0)
1547 {
1548 /* First one: don't check for overlap, just record it. */
1549 m0 = m;
1550 continue;
1551 }
1552
1553 /* Make sure they extend to the same mode.
1554 (Almost always true.) */
1555 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1556 continue;
1557
1558 /* We already have one: check for overlap with those
1559 already combined together. */
1560 for (m1 = movables; m1 != m; m1 = m1->next)
1561 if (m1 == m0 || (m1->partial && m1->match == m0))
1562 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1563 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1564 goto overlap;
1565
1566 /* No overlap: we can combine this with the others. */
1567 m0->lifetime += m->lifetime;
1568 m0->savings += m->savings;
1569 m->done = 1;
1570 m->match = m0;
1571
1572 overlap: ;
1573 }
1574 }
1575 }
1576 \f
1577 /* Return 1 if regs X and Y will become the same if moved. */
1578
1579 static int
1580 regs_match_p (x, y, movables)
1581 rtx x, y;
1582 struct movable *movables;
1583 {
1584 int xn = REGNO (x);
1585 int yn = REGNO (y);
1586 struct movable *mx, *my;
1587
1588 for (mx = movables; mx; mx = mx->next)
1589 if (mx->regno == xn)
1590 break;
1591
1592 for (my = movables; my; my = my->next)
1593 if (my->regno == yn)
1594 break;
1595
1596 return (mx && my
1597 && ((mx->match == my->match && mx->match != 0)
1598 || mx->match == my
1599 || mx == my->match));
1600 }
1601
1602 /* Return 1 if X and Y are identical-looking rtx's.
1603 This is the Lisp function EQUAL for rtx arguments.
1604
1605 If two registers are matching movables or a movable register and an
1606 equivalent constant, consider them equal. */
1607
1608 static int
1609 rtx_equal_for_loop_p (x, y, movables)
1610 rtx x, y;
1611 struct movable *movables;
1612 {
1613 register int i;
1614 register int j;
1615 register struct movable *m;
1616 register enum rtx_code code;
1617 register char *fmt;
1618
1619 if (x == y)
1620 return 1;
1621 if (x == 0 || y == 0)
1622 return 0;
1623
1624 code = GET_CODE (x);
1625
1626 /* If we have a register and a constant, they may sometimes be
1627 equal. */
1628 if (GET_CODE (x) == REG && VARRAY_INT (n_times_set, REGNO (x)) == -2
1629 && CONSTANT_P (y))
1630 {
1631 for (m = movables; m; m = m->next)
1632 if (m->move_insn && m->regno == REGNO (x)
1633 && rtx_equal_p (m->set_src, y))
1634 return 1;
1635 }
1636 else if (GET_CODE (y) == REG && VARRAY_INT (n_times_set, REGNO (y)) == -2
1637 && CONSTANT_P (x))
1638 {
1639 for (m = movables; m; m = m->next)
1640 if (m->move_insn && m->regno == REGNO (y)
1641 && rtx_equal_p (m->set_src, x))
1642 return 1;
1643 }
1644
1645 /* Otherwise, rtx's of different codes cannot be equal. */
1646 if (code != GET_CODE (y))
1647 return 0;
1648
1649 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1650 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1651
1652 if (GET_MODE (x) != GET_MODE (y))
1653 return 0;
1654
1655 /* These three types of rtx's can be compared nonrecursively. */
1656 if (code == REG)
1657 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1658
1659 if (code == LABEL_REF)
1660 return XEXP (x, 0) == XEXP (y, 0);
1661 if (code == SYMBOL_REF)
1662 return XSTR (x, 0) == XSTR (y, 0);
1663
1664 /* Compare the elements. If any pair of corresponding elements
1665 fail to match, return 0 for the whole things. */
1666
1667 fmt = GET_RTX_FORMAT (code);
1668 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1669 {
1670 switch (fmt[i])
1671 {
1672 case 'w':
1673 if (XWINT (x, i) != XWINT (y, i))
1674 return 0;
1675 break;
1676
1677 case 'i':
1678 if (XINT (x, i) != XINT (y, i))
1679 return 0;
1680 break;
1681
1682 case 'E':
1683 /* Two vectors must have the same length. */
1684 if (XVECLEN (x, i) != XVECLEN (y, i))
1685 return 0;
1686
1687 /* And the corresponding elements must match. */
1688 for (j = 0; j < XVECLEN (x, i); j++)
1689 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1690 return 0;
1691 break;
1692
1693 case 'e':
1694 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1695 return 0;
1696 break;
1697
1698 case 's':
1699 if (strcmp (XSTR (x, i), XSTR (y, i)))
1700 return 0;
1701 break;
1702
1703 case 'u':
1704 /* These are just backpointers, so they don't matter. */
1705 break;
1706
1707 case '0':
1708 break;
1709
1710 /* It is believed that rtx's at this level will never
1711 contain anything but integers and other rtx's,
1712 except for within LABEL_REFs and SYMBOL_REFs. */
1713 default:
1714 abort ();
1715 }
1716 }
1717 return 1;
1718 }
1719 \f
1720 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1721 insns in INSNS which use thet reference. */
1722
1723 static void
1724 add_label_notes (x, insns)
1725 rtx x;
1726 rtx insns;
1727 {
1728 enum rtx_code code = GET_CODE (x);
1729 int i, j;
1730 char *fmt;
1731 rtx insn;
1732
1733 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1734 {
1735 /* This code used to ignore labels that referred to dispatch tables to
1736 avoid flow generating (slighly) worse code.
1737
1738 We no longer ignore such label references (see LABEL_REF handling in
1739 mark_jump_label for additional information). */
1740 for (insn = insns; insn; insn = NEXT_INSN (insn))
1741 if (reg_mentioned_p (XEXP (x, 0), insn))
1742 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1743 REG_NOTES (insn));
1744 }
1745
1746 fmt = GET_RTX_FORMAT (code);
1747 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1748 {
1749 if (fmt[i] == 'e')
1750 add_label_notes (XEXP (x, i), insns);
1751 else if (fmt[i] == 'E')
1752 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1753 add_label_notes (XVECEXP (x, i, j), insns);
1754 }
1755 }
1756 \f
1757 /* Scan MOVABLES, and move the insns that deserve to be moved.
1758 If two matching movables are combined, replace one reg with the
1759 other throughout. */
1760
1761 static void
1762 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1763 struct movable *movables;
1764 int threshold;
1765 int insn_count;
1766 rtx loop_start;
1767 rtx end;
1768 int nregs;
1769 {
1770 rtx new_start = 0;
1771 register struct movable *m;
1772 register rtx p;
1773 /* Map of pseudo-register replacements to handle combining
1774 when we move several insns that load the same value
1775 into different pseudo-registers. */
1776 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1777 char *already_moved = (char *) alloca (nregs);
1778
1779 bzero (already_moved, nregs);
1780 bzero ((char *) reg_map, nregs * sizeof (rtx));
1781
1782 num_movables = 0;
1783
1784 for (m = movables; m; m = m->next)
1785 {
1786 /* Describe this movable insn. */
1787
1788 if (loop_dump_stream)
1789 {
1790 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1791 INSN_UID (m->insn), m->regno, m->lifetime);
1792 if (m->consec > 0)
1793 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1794 if (m->cond)
1795 fprintf (loop_dump_stream, "cond ");
1796 if (m->force)
1797 fprintf (loop_dump_stream, "force ");
1798 if (m->global)
1799 fprintf (loop_dump_stream, "global ");
1800 if (m->done)
1801 fprintf (loop_dump_stream, "done ");
1802 if (m->move_insn)
1803 fprintf (loop_dump_stream, "move-insn ");
1804 if (m->match)
1805 fprintf (loop_dump_stream, "matches %d ",
1806 INSN_UID (m->match->insn));
1807 if (m->forces)
1808 fprintf (loop_dump_stream, "forces %d ",
1809 INSN_UID (m->forces->insn));
1810 }
1811
1812 /* Count movables. Value used in heuristics in strength_reduce. */
1813 num_movables++;
1814
1815 /* Ignore the insn if it's already done (it matched something else).
1816 Otherwise, see if it is now safe to move. */
1817
1818 if (!m->done
1819 && (! m->cond
1820 || (1 == invariant_p (m->set_src)
1821 && (m->dependencies == 0
1822 || 1 == invariant_p (m->dependencies))
1823 && (m->consec == 0
1824 || 1 == consec_sets_invariant_p (m->set_dest,
1825 m->consec + 1,
1826 m->insn))))
1827 && (! m->forces || m->forces->done))
1828 {
1829 register int regno;
1830 register rtx p;
1831 int savings = m->savings;
1832
1833 /* We have an insn that is safe to move.
1834 Compute its desirability. */
1835
1836 p = m->insn;
1837 regno = m->regno;
1838
1839 if (loop_dump_stream)
1840 fprintf (loop_dump_stream, "savings %d ", savings);
1841
1842 if (moved_once[regno])
1843 {
1844 insn_count *= 2;
1845
1846 if (loop_dump_stream)
1847 fprintf (loop_dump_stream, "halved since already moved ");
1848 }
1849
1850 /* An insn MUST be moved if we already moved something else
1851 which is safe only if this one is moved too: that is,
1852 if already_moved[REGNO] is nonzero. */
1853
1854 /* An insn is desirable to move if the new lifetime of the
1855 register is no more than THRESHOLD times the old lifetime.
1856 If it's not desirable, it means the loop is so big
1857 that moving won't speed things up much,
1858 and it is liable to make register usage worse. */
1859
1860 /* It is also desirable to move if it can be moved at no
1861 extra cost because something else was already moved. */
1862
1863 if (already_moved[regno]
1864 || flag_move_all_movables
1865 || (threshold * savings * m->lifetime) >= insn_count
1866 || (m->forces && m->forces->done
1867 && VARRAY_INT (n_times_used, m->forces->regno) == 1))
1868 {
1869 int count;
1870 register struct movable *m1;
1871 rtx first;
1872
1873 /* Now move the insns that set the reg. */
1874
1875 if (m->partial && m->match)
1876 {
1877 rtx newpat, i1;
1878 rtx r1, r2;
1879 /* Find the end of this chain of matching regs.
1880 Thus, we load each reg in the chain from that one reg.
1881 And that reg is loaded with 0 directly,
1882 since it has ->match == 0. */
1883 for (m1 = m; m1->match; m1 = m1->match);
1884 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1885 SET_DEST (PATTERN (m1->insn)));
1886 i1 = emit_insn_before (newpat, loop_start);
1887
1888 /* Mark the moved, invariant reg as being allowed to
1889 share a hard reg with the other matching invariant. */
1890 REG_NOTES (i1) = REG_NOTES (m->insn);
1891 r1 = SET_DEST (PATTERN (m->insn));
1892 r2 = SET_DEST (PATTERN (m1->insn));
1893 regs_may_share
1894 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1895 gen_rtx_EXPR_LIST (VOIDmode, r2,
1896 regs_may_share));
1897 delete_insn (m->insn);
1898
1899 if (new_start == 0)
1900 new_start = i1;
1901
1902 if (loop_dump_stream)
1903 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1904 }
1905 /* If we are to re-generate the item being moved with a
1906 new move insn, first delete what we have and then emit
1907 the move insn before the loop. */
1908 else if (m->move_insn)
1909 {
1910 rtx i1, temp;
1911
1912 for (count = m->consec; count >= 0; count--)
1913 {
1914 /* If this is the first insn of a library call sequence,
1915 skip to the end. */
1916 if (GET_CODE (p) != NOTE
1917 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1918 p = XEXP (temp, 0);
1919
1920 /* If this is the last insn of a libcall sequence, then
1921 delete every insn in the sequence except the last.
1922 The last insn is handled in the normal manner. */
1923 if (GET_CODE (p) != NOTE
1924 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1925 {
1926 temp = XEXP (temp, 0);
1927 while (temp != p)
1928 temp = delete_insn (temp);
1929 }
1930
1931 temp = p;
1932 p = delete_insn (p);
1933
1934 /* simplify_giv_expr expects that it can walk the insns
1935 at m->insn forwards and see this old sequence we are
1936 tossing here. delete_insn does preserve the next
1937 pointers, but when we skip over a NOTE we must fix
1938 it up. Otherwise that code walks into the non-deleted
1939 insn stream. */
1940 while (p && GET_CODE (p) == NOTE)
1941 p = NEXT_INSN (temp) = NEXT_INSN (p);
1942 }
1943
1944 start_sequence ();
1945 emit_move_insn (m->set_dest, m->set_src);
1946 temp = get_insns ();
1947 end_sequence ();
1948
1949 add_label_notes (m->set_src, temp);
1950
1951 i1 = emit_insns_before (temp, loop_start);
1952 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1953 REG_NOTES (i1)
1954 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1955 m->set_src, REG_NOTES (i1));
1956
1957 if (loop_dump_stream)
1958 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1959
1960 /* The more regs we move, the less we like moving them. */
1961 threshold -= 3;
1962 }
1963 else
1964 {
1965 for (count = m->consec; count >= 0; count--)
1966 {
1967 rtx i1, temp;
1968
1969 /* If first insn of libcall sequence, skip to end. */
1970 /* Do this at start of loop, since p is guaranteed to
1971 be an insn here. */
1972 if (GET_CODE (p) != NOTE
1973 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1974 p = XEXP (temp, 0);
1975
1976 /* If last insn of libcall sequence, move all
1977 insns except the last before the loop. The last
1978 insn is handled in the normal manner. */
1979 if (GET_CODE (p) != NOTE
1980 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1981 {
1982 rtx fn_address = 0;
1983 rtx fn_reg = 0;
1984 rtx fn_address_insn = 0;
1985
1986 first = 0;
1987 for (temp = XEXP (temp, 0); temp != p;
1988 temp = NEXT_INSN (temp))
1989 {
1990 rtx body;
1991 rtx n;
1992 rtx next;
1993
1994 if (GET_CODE (temp) == NOTE)
1995 continue;
1996
1997 body = PATTERN (temp);
1998
1999 /* Find the next insn after TEMP,
2000 not counting USE or NOTE insns. */
2001 for (next = NEXT_INSN (temp); next != p;
2002 next = NEXT_INSN (next))
2003 if (! (GET_CODE (next) == INSN
2004 && GET_CODE (PATTERN (next)) == USE)
2005 && GET_CODE (next) != NOTE)
2006 break;
2007
2008 /* If that is the call, this may be the insn
2009 that loads the function address.
2010
2011 Extract the function address from the insn
2012 that loads it into a register.
2013 If this insn was cse'd, we get incorrect code.
2014
2015 So emit a new move insn that copies the
2016 function address into the register that the
2017 call insn will use. flow.c will delete any
2018 redundant stores that we have created. */
2019 if (GET_CODE (next) == CALL_INSN
2020 && GET_CODE (body) == SET
2021 && GET_CODE (SET_DEST (body)) == REG
2022 && (n = find_reg_note (temp, REG_EQUAL,
2023 NULL_RTX)))
2024 {
2025 fn_reg = SET_SRC (body);
2026 if (GET_CODE (fn_reg) != REG)
2027 fn_reg = SET_DEST (body);
2028 fn_address = XEXP (n, 0);
2029 fn_address_insn = temp;
2030 }
2031 /* We have the call insn.
2032 If it uses the register we suspect it might,
2033 load it with the correct address directly. */
2034 if (GET_CODE (temp) == CALL_INSN
2035 && fn_address != 0
2036 && reg_referenced_p (fn_reg, body))
2037 emit_insn_after (gen_move_insn (fn_reg,
2038 fn_address),
2039 fn_address_insn);
2040
2041 if (GET_CODE (temp) == CALL_INSN)
2042 {
2043 i1 = emit_call_insn_before (body, loop_start);
2044 /* Because the USAGE information potentially
2045 contains objects other than hard registers
2046 we need to copy it. */
2047 if (CALL_INSN_FUNCTION_USAGE (temp))
2048 CALL_INSN_FUNCTION_USAGE (i1)
2049 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2050 }
2051 else
2052 i1 = emit_insn_before (body, loop_start);
2053 if (first == 0)
2054 first = i1;
2055 if (temp == fn_address_insn)
2056 fn_address_insn = i1;
2057 REG_NOTES (i1) = REG_NOTES (temp);
2058 delete_insn (temp);
2059 }
2060 }
2061 if (m->savemode != VOIDmode)
2062 {
2063 /* P sets REG to zero; but we should clear only
2064 the bits that are not covered by the mode
2065 m->savemode. */
2066 rtx reg = m->set_dest;
2067 rtx sequence;
2068 rtx tem;
2069
2070 start_sequence ();
2071 tem = expand_binop
2072 (GET_MODE (reg), and_optab, reg,
2073 GEN_INT ((((HOST_WIDE_INT) 1
2074 << GET_MODE_BITSIZE (m->savemode)))
2075 - 1),
2076 reg, 1, OPTAB_LIB_WIDEN);
2077 if (tem == 0)
2078 abort ();
2079 if (tem != reg)
2080 emit_move_insn (reg, tem);
2081 sequence = gen_sequence ();
2082 end_sequence ();
2083 i1 = emit_insn_before (sequence, loop_start);
2084 }
2085 else if (GET_CODE (p) == CALL_INSN)
2086 {
2087 i1 = emit_call_insn_before (PATTERN (p), loop_start);
2088 /* Because the USAGE information potentially
2089 contains objects other than hard registers
2090 we need to copy it. */
2091 if (CALL_INSN_FUNCTION_USAGE (p))
2092 CALL_INSN_FUNCTION_USAGE (i1)
2093 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2094 }
2095 else if (count == m->consec && m->move_insn_first)
2096 {
2097 /* The SET_SRC might not be invariant, so we must
2098 use the REG_EQUAL note. */
2099 start_sequence ();
2100 emit_move_insn (m->set_dest, m->set_src);
2101 temp = get_insns ();
2102 end_sequence ();
2103
2104 add_label_notes (m->set_src, temp);
2105
2106 i1 = emit_insns_before (temp, loop_start);
2107 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2108 REG_NOTES (i1)
2109 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
2110 : REG_EQUAL),
2111 m->set_src, REG_NOTES (i1));
2112 }
2113 else
2114 i1 = emit_insn_before (PATTERN (p), loop_start);
2115
2116 if (REG_NOTES (i1) == 0)
2117 {
2118 REG_NOTES (i1) = REG_NOTES (p);
2119
2120 /* If there is a REG_EQUAL note present whose value
2121 is not loop invariant, then delete it, since it
2122 may cause problems with later optimization passes.
2123 It is possible for cse to create such notes
2124 like this as a result of record_jump_cond. */
2125
2126 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2127 && ! invariant_p (XEXP (temp, 0)))
2128 remove_note (i1, temp);
2129 }
2130
2131 if (new_start == 0)
2132 new_start = i1;
2133
2134 if (loop_dump_stream)
2135 fprintf (loop_dump_stream, " moved to %d",
2136 INSN_UID (i1));
2137
2138 /* If library call, now fix the REG_NOTES that contain
2139 insn pointers, namely REG_LIBCALL on FIRST
2140 and REG_RETVAL on I1. */
2141 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2142 {
2143 XEXP (temp, 0) = first;
2144 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2145 XEXP (temp, 0) = i1;
2146 }
2147
2148 temp = p;
2149 delete_insn (p);
2150 p = NEXT_INSN (p);
2151
2152 /* simplify_giv_expr expects that it can walk the insns
2153 at m->insn forwards and see this old sequence we are
2154 tossing here. delete_insn does preserve the next
2155 pointers, but when we skip over a NOTE we must fix
2156 it up. Otherwise that code walks into the non-deleted
2157 insn stream. */
2158 while (p && GET_CODE (p) == NOTE)
2159 p = NEXT_INSN (temp) = NEXT_INSN (p);
2160 }
2161
2162 /* The more regs we move, the less we like moving them. */
2163 threshold -= 3;
2164 }
2165
2166 /* Any other movable that loads the same register
2167 MUST be moved. */
2168 already_moved[regno] = 1;
2169
2170 /* This reg has been moved out of one loop. */
2171 moved_once[regno] = 1;
2172
2173 /* The reg set here is now invariant. */
2174 if (! m->partial)
2175 VARRAY_INT (n_times_set, regno) = 0;
2176
2177 m->done = 1;
2178
2179 /* Change the length-of-life info for the register
2180 to say it lives at least the full length of this loop.
2181 This will help guide optimizations in outer loops. */
2182
2183 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2184 /* This is the old insn before all the moved insns.
2185 We can't use the moved insn because it is out of range
2186 in uid_luid. Only the old insns have luids. */
2187 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2188 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2189 REGNO_LAST_UID (regno) = INSN_UID (end);
2190
2191 /* Combine with this moved insn any other matching movables. */
2192
2193 if (! m->partial)
2194 for (m1 = movables; m1; m1 = m1->next)
2195 if (m1->match == m)
2196 {
2197 rtx temp;
2198
2199 /* Schedule the reg loaded by M1
2200 for replacement so that shares the reg of M.
2201 If the modes differ (only possible in restricted
2202 circumstances, make a SUBREG. */
2203 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2204 reg_map[m1->regno] = m->set_dest;
2205 else
2206 reg_map[m1->regno]
2207 = gen_lowpart_common (GET_MODE (m1->set_dest),
2208 m->set_dest);
2209
2210 /* Get rid of the matching insn
2211 and prevent further processing of it. */
2212 m1->done = 1;
2213
2214 /* if library call, delete all insn except last, which
2215 is deleted below */
2216 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2217 NULL_RTX)))
2218 {
2219 for (temp = XEXP (temp, 0); temp != m1->insn;
2220 temp = NEXT_INSN (temp))
2221 delete_insn (temp);
2222 }
2223 delete_insn (m1->insn);
2224
2225 /* Any other movable that loads the same register
2226 MUST be moved. */
2227 already_moved[m1->regno] = 1;
2228
2229 /* The reg merged here is now invariant,
2230 if the reg it matches is invariant. */
2231 if (! m->partial)
2232 VARRAY_INT (n_times_set, m1->regno) = 0;
2233 }
2234 }
2235 else if (loop_dump_stream)
2236 fprintf (loop_dump_stream, "not desirable");
2237 }
2238 else if (loop_dump_stream && !m->match)
2239 fprintf (loop_dump_stream, "not safe");
2240
2241 if (loop_dump_stream)
2242 fprintf (loop_dump_stream, "\n");
2243 }
2244
2245 if (new_start == 0)
2246 new_start = loop_start;
2247
2248 /* Go through all the instructions in the loop, making
2249 all the register substitutions scheduled in REG_MAP. */
2250 for (p = new_start; p != end; p = NEXT_INSN (p))
2251 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2252 || GET_CODE (p) == CALL_INSN)
2253 {
2254 replace_regs (PATTERN (p), reg_map, nregs, 0);
2255 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2256 INSN_CODE (p) = -1;
2257 }
2258 }
2259 \f
2260 #if 0
2261 /* Scan X and replace the address of any MEM in it with ADDR.
2262 REG is the address that MEM should have before the replacement. */
2263
2264 static void
2265 replace_call_address (x, reg, addr)
2266 rtx x, reg, addr;
2267 {
2268 register enum rtx_code code;
2269 register int i;
2270 register char *fmt;
2271
2272 if (x == 0)
2273 return;
2274 code = GET_CODE (x);
2275 switch (code)
2276 {
2277 case PC:
2278 case CC0:
2279 case CONST_INT:
2280 case CONST_DOUBLE:
2281 case CONST:
2282 case SYMBOL_REF:
2283 case LABEL_REF:
2284 case REG:
2285 return;
2286
2287 case SET:
2288 /* Short cut for very common case. */
2289 replace_call_address (XEXP (x, 1), reg, addr);
2290 return;
2291
2292 case CALL:
2293 /* Short cut for very common case. */
2294 replace_call_address (XEXP (x, 0), reg, addr);
2295 return;
2296
2297 case MEM:
2298 /* If this MEM uses a reg other than the one we expected,
2299 something is wrong. */
2300 if (XEXP (x, 0) != reg)
2301 abort ();
2302 XEXP (x, 0) = addr;
2303 return;
2304
2305 default:
2306 break;
2307 }
2308
2309 fmt = GET_RTX_FORMAT (code);
2310 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2311 {
2312 if (fmt[i] == 'e')
2313 replace_call_address (XEXP (x, i), reg, addr);
2314 if (fmt[i] == 'E')
2315 {
2316 register int j;
2317 for (j = 0; j < XVECLEN (x, i); j++)
2318 replace_call_address (XVECEXP (x, i, j), reg, addr);
2319 }
2320 }
2321 }
2322 #endif
2323 \f
2324 /* Return the number of memory refs to addresses that vary
2325 in the rtx X. */
2326
2327 static int
2328 count_nonfixed_reads (x)
2329 rtx x;
2330 {
2331 register enum rtx_code code;
2332 register int i;
2333 register char *fmt;
2334 int value;
2335
2336 if (x == 0)
2337 return 0;
2338
2339 code = GET_CODE (x);
2340 switch (code)
2341 {
2342 case PC:
2343 case CC0:
2344 case CONST_INT:
2345 case CONST_DOUBLE:
2346 case CONST:
2347 case SYMBOL_REF:
2348 case LABEL_REF:
2349 case REG:
2350 return 0;
2351
2352 case MEM:
2353 return ((invariant_p (XEXP (x, 0)) != 1)
2354 + count_nonfixed_reads (XEXP (x, 0)));
2355
2356 default:
2357 break;
2358 }
2359
2360 value = 0;
2361 fmt = GET_RTX_FORMAT (code);
2362 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2363 {
2364 if (fmt[i] == 'e')
2365 value += count_nonfixed_reads (XEXP (x, i));
2366 if (fmt[i] == 'E')
2367 {
2368 register int j;
2369 for (j = 0; j < XVECLEN (x, i); j++)
2370 value += count_nonfixed_reads (XVECEXP (x, i, j));
2371 }
2372 }
2373 return value;
2374 }
2375
2376 \f
2377 #if 0
2378 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2379 Replace it with an instruction to load just the low bytes
2380 if the machine supports such an instruction,
2381 and insert above LOOP_START an instruction to clear the register. */
2382
2383 static void
2384 constant_high_bytes (p, loop_start)
2385 rtx p, loop_start;
2386 {
2387 register rtx new;
2388 register int insn_code_number;
2389
2390 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2391 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2392
2393 new = gen_rtx_SET (VOIDmode,
2394 gen_rtx_STRICT_LOW_PART (VOIDmode,
2395 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2396 SET_DEST (PATTERN (p)),
2397 0)),
2398 XEXP (SET_SRC (PATTERN (p)), 0));
2399 insn_code_number = recog (new, p);
2400
2401 if (insn_code_number)
2402 {
2403 register int i;
2404
2405 /* Clear destination register before the loop. */
2406 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2407 const0_rtx),
2408 loop_start);
2409
2410 /* Inside the loop, just load the low part. */
2411 PATTERN (p) = new;
2412 }
2413 }
2414 #endif
2415 \f
2416 /* Scan a loop setting the variables `unknown_address_altered',
2417 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2418 and `loop_has_volatile'. Also, fill in the arrays `loop_mems' and
2419 `loop_store_mems'. */
2420
2421 static void
2422 prescan_loop (start, end)
2423 rtx start, end;
2424 {
2425 register int level = 1;
2426 rtx insn;
2427 int loop_has_multiple_exit_targets = 0;
2428 /* The label after END. Jumping here is just like falling off the
2429 end of the loop. We use next_nonnote_insn instead of next_label
2430 as a hedge against the (pathological) case where some actual insn
2431 might end up between the two. */
2432 rtx exit_target = next_nonnote_insn (end);
2433 if (exit_target == NULL_RTX || GET_CODE (exit_target) != CODE_LABEL)
2434 loop_has_multiple_exit_targets = 1;
2435
2436 unknown_address_altered = 0;
2437 loop_has_call = 0;
2438 loop_has_volatile = 0;
2439 loop_store_mems_idx = 0;
2440 loop_mems_idx = 0;
2441
2442 num_mem_sets = 0;
2443 loops_enclosed = 1;
2444 loop_continue = 0;
2445
2446 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2447 insn = NEXT_INSN (insn))
2448 {
2449 if (GET_CODE (insn) == NOTE)
2450 {
2451 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2452 {
2453 ++level;
2454 /* Count number of loops contained in this one. */
2455 loops_enclosed++;
2456 }
2457 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2458 {
2459 --level;
2460 if (level == 0)
2461 {
2462 end = insn;
2463 break;
2464 }
2465 }
2466 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2467 {
2468 if (level == 1)
2469 loop_continue = insn;
2470 }
2471 }
2472 else if (GET_CODE (insn) == CALL_INSN)
2473 {
2474 if (! CONST_CALL_P (insn))
2475 unknown_address_altered = 1;
2476 loop_has_call = 1;
2477 }
2478 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2479 {
2480 rtx label1 = NULL_RTX;
2481 rtx label2 = NULL_RTX;
2482
2483 if (volatile_refs_p (PATTERN (insn)))
2484 loop_has_volatile = 1;
2485
2486 note_stores (PATTERN (insn), note_addr_stored);
2487
2488 if (!loop_has_multiple_exit_targets
2489 && GET_CODE (insn) == JUMP_INSN
2490 && GET_CODE (PATTERN (insn)) == SET
2491 && SET_DEST (PATTERN (insn)) == pc_rtx)
2492 {
2493 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2494 {
2495 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2496 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2497 }
2498 else
2499 {
2500 label1 = SET_SRC (PATTERN (insn));
2501 }
2502
2503 do {
2504 if (label1 && label1 != pc_rtx)
2505 {
2506 if (GET_CODE (label1) != LABEL_REF)
2507 {
2508 /* Something tricky. */
2509 loop_has_multiple_exit_targets = 1;
2510 break;
2511 }
2512 else if (XEXP (label1, 0) != exit_target
2513 && LABEL_OUTSIDE_LOOP_P (label1))
2514 {
2515 /* A jump outside the current loop. */
2516 loop_has_multiple_exit_targets = 1;
2517 break;
2518 }
2519 }
2520
2521 label1 = label2;
2522 label2 = NULL_RTX;
2523 } while (label1);
2524 }
2525 }
2526 else if (GET_CODE (insn) == RETURN)
2527 loop_has_multiple_exit_targets = 1;
2528 }
2529
2530 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2531 if (/* We can't tell what MEMs are aliased by what. */
2532 !unknown_address_altered
2533 /* An exception thrown by a called function might land us
2534 anywhere. */
2535 && !loop_has_call
2536 /* We don't want loads for MEMs moved to a location before the
2537 one at which their stack memory becomes allocated. (Note
2538 that this is not a problem for malloc, etc., since those
2539 require actual function calls. */
2540 && !current_function_calls_alloca
2541 /* There are ways to leave the loop other than falling off the
2542 end. */
2543 && !loop_has_multiple_exit_targets)
2544 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2545 insn = NEXT_INSN (insn))
2546 for_each_rtx (&insn, insert_loop_mem, 0);
2547 }
2548 \f
2549 /* Scan the function looking for loops. Record the start and end of each loop.
2550 Also mark as invalid loops any loops that contain a setjmp or are branched
2551 to from outside the loop. */
2552
2553 static void
2554 find_and_verify_loops (f)
2555 rtx f;
2556 {
2557 rtx insn, label;
2558 int current_loop = -1;
2559 int next_loop = -1;
2560 int loop;
2561
2562 /* If there are jumps to undefined labels,
2563 treat them as jumps out of any/all loops.
2564 This also avoids writing past end of tables when there are no loops. */
2565 uid_loop_num[0] = -1;
2566
2567 /* Find boundaries of loops, mark which loops are contained within
2568 loops, and invalidate loops that have setjmp. */
2569
2570 for (insn = f; insn; insn = NEXT_INSN (insn))
2571 {
2572 if (GET_CODE (insn) == NOTE)
2573 switch (NOTE_LINE_NUMBER (insn))
2574 {
2575 case NOTE_INSN_LOOP_BEG:
2576 loop_number_loop_starts[++next_loop] = insn;
2577 loop_number_loop_ends[next_loop] = 0;
2578 loop_outer_loop[next_loop] = current_loop;
2579 loop_invalid[next_loop] = 0;
2580 loop_number_exit_labels[next_loop] = 0;
2581 loop_number_exit_count[next_loop] = 0;
2582 current_loop = next_loop;
2583 break;
2584
2585 case NOTE_INSN_SETJMP:
2586 /* In this case, we must invalidate our current loop and any
2587 enclosing loop. */
2588 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2589 {
2590 loop_invalid[loop] = 1;
2591 if (loop_dump_stream)
2592 fprintf (loop_dump_stream,
2593 "\nLoop at %d ignored due to setjmp.\n",
2594 INSN_UID (loop_number_loop_starts[loop]));
2595 }
2596 break;
2597
2598 case NOTE_INSN_LOOP_END:
2599 if (current_loop == -1)
2600 abort ();
2601
2602 loop_number_loop_ends[current_loop] = insn;
2603 current_loop = loop_outer_loop[current_loop];
2604 break;
2605
2606 default:
2607 break;
2608 }
2609
2610 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2611 enclosing loop, but this doesn't matter. */
2612 uid_loop_num[INSN_UID (insn)] = current_loop;
2613 }
2614
2615 /* Any loop containing a label used in an initializer must be invalidated,
2616 because it can be jumped into from anywhere. */
2617
2618 for (label = forced_labels; label; label = XEXP (label, 1))
2619 {
2620 int loop_num;
2621
2622 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2623 loop_num != -1;
2624 loop_num = loop_outer_loop[loop_num])
2625 loop_invalid[loop_num] = 1;
2626 }
2627
2628 /* Any loop containing a label used for an exception handler must be
2629 invalidated, because it can be jumped into from anywhere. */
2630
2631 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2632 {
2633 int loop_num;
2634
2635 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2636 loop_num != -1;
2637 loop_num = loop_outer_loop[loop_num])
2638 loop_invalid[loop_num] = 1;
2639 }
2640
2641 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2642 loop that it is not contained within, that loop is marked invalid.
2643 If any INSN or CALL_INSN uses a label's address, then the loop containing
2644 that label is marked invalid, because it could be jumped into from
2645 anywhere.
2646
2647 Also look for blocks of code ending in an unconditional branch that
2648 exits the loop. If such a block is surrounded by a conditional
2649 branch around the block, move the block elsewhere (see below) and
2650 invert the jump to point to the code block. This may eliminate a
2651 label in our loop and will simplify processing by both us and a
2652 possible second cse pass. */
2653
2654 for (insn = f; insn; insn = NEXT_INSN (insn))
2655 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2656 {
2657 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2658
2659 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2660 {
2661 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2662 if (note)
2663 {
2664 int loop_num;
2665
2666 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2667 loop_num != -1;
2668 loop_num = loop_outer_loop[loop_num])
2669 loop_invalid[loop_num] = 1;
2670 }
2671 }
2672
2673 if (GET_CODE (insn) != JUMP_INSN)
2674 continue;
2675
2676 mark_loop_jump (PATTERN (insn), this_loop_num);
2677
2678 /* See if this is an unconditional branch outside the loop. */
2679 if (this_loop_num != -1
2680 && (GET_CODE (PATTERN (insn)) == RETURN
2681 || (simplejump_p (insn)
2682 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2683 != this_loop_num)))
2684 && get_max_uid () < max_uid_for_loop)
2685 {
2686 rtx p;
2687 rtx our_next = next_real_insn (insn);
2688 int dest_loop;
2689 int outer_loop = -1;
2690
2691 /* Go backwards until we reach the start of the loop, a label,
2692 or a JUMP_INSN. */
2693 for (p = PREV_INSN (insn);
2694 GET_CODE (p) != CODE_LABEL
2695 && ! (GET_CODE (p) == NOTE
2696 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2697 && GET_CODE (p) != JUMP_INSN;
2698 p = PREV_INSN (p))
2699 ;
2700
2701 /* Check for the case where we have a jump to an inner nested
2702 loop, and do not perform the optimization in that case. */
2703
2704 if (JUMP_LABEL (insn))
2705 {
2706 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2707 if (dest_loop != -1)
2708 {
2709 for (outer_loop = dest_loop; outer_loop != -1;
2710 outer_loop = loop_outer_loop[outer_loop])
2711 if (outer_loop == this_loop_num)
2712 break;
2713 }
2714 }
2715
2716 /* Make sure that the target of P is within the current loop. */
2717
2718 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2719 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2720 outer_loop = this_loop_num;
2721
2722 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2723 we have a block of code to try to move.
2724
2725 We look backward and then forward from the target of INSN
2726 to find a BARRIER at the same loop depth as the target.
2727 If we find such a BARRIER, we make a new label for the start
2728 of the block, invert the jump in P and point it to that label,
2729 and move the block of code to the spot we found. */
2730
2731 if (outer_loop == -1
2732 && GET_CODE (p) == JUMP_INSN
2733 && JUMP_LABEL (p) != 0
2734 /* Just ignore jumps to labels that were never emitted.
2735 These always indicate compilation errors. */
2736 && INSN_UID (JUMP_LABEL (p)) != 0
2737 && condjump_p (p)
2738 && ! simplejump_p (p)
2739 && next_real_insn (JUMP_LABEL (p)) == our_next)
2740 {
2741 rtx target
2742 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2743 int target_loop_num = uid_loop_num[INSN_UID (target)];
2744 rtx loc;
2745
2746 for (loc = target; loc; loc = PREV_INSN (loc))
2747 if (GET_CODE (loc) == BARRIER
2748 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2749 break;
2750
2751 if (loc == 0)
2752 for (loc = target; loc; loc = NEXT_INSN (loc))
2753 if (GET_CODE (loc) == BARRIER
2754 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2755 break;
2756
2757 if (loc)
2758 {
2759 rtx cond_label = JUMP_LABEL (p);
2760 rtx new_label = get_label_after (p);
2761
2762 /* Ensure our label doesn't go away. */
2763 LABEL_NUSES (cond_label)++;
2764
2765 /* Verify that uid_loop_num is large enough and that
2766 we can invert P. */
2767 if (invert_jump (p, new_label))
2768 {
2769 rtx q, r;
2770
2771 /* If no suitable BARRIER was found, create a suitable
2772 one before TARGET. Since TARGET is a fall through
2773 path, we'll need to insert an jump around our block
2774 and a add a BARRIER before TARGET.
2775
2776 This creates an extra unconditional jump outside
2777 the loop. However, the benefits of removing rarely
2778 executed instructions from inside the loop usually
2779 outweighs the cost of the extra unconditional jump
2780 outside the loop. */
2781 if (loc == 0)
2782 {
2783 rtx temp;
2784
2785 temp = gen_jump (JUMP_LABEL (insn));
2786 temp = emit_jump_insn_before (temp, target);
2787 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2788 LABEL_NUSES (JUMP_LABEL (insn))++;
2789 loc = emit_barrier_before (target);
2790 }
2791
2792 /* Include the BARRIER after INSN and copy the
2793 block after LOC. */
2794 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2795 reorder_insns (new_label, NEXT_INSN (insn), loc);
2796
2797 /* All those insns are now in TARGET_LOOP_NUM. */
2798 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2799 q = NEXT_INSN (q))
2800 uid_loop_num[INSN_UID (q)] = target_loop_num;
2801
2802 /* The label jumped to by INSN is no longer a loop exit.
2803 Unless INSN does not have a label (e.g., it is a
2804 RETURN insn), search loop_number_exit_labels to find
2805 its label_ref, and remove it. Also turn off
2806 LABEL_OUTSIDE_LOOP_P bit. */
2807 if (JUMP_LABEL (insn))
2808 {
2809 int loop_num;
2810
2811 for (q = 0,
2812 r = loop_number_exit_labels[this_loop_num];
2813 r; q = r, r = LABEL_NEXTREF (r))
2814 if (XEXP (r, 0) == JUMP_LABEL (insn))
2815 {
2816 LABEL_OUTSIDE_LOOP_P (r) = 0;
2817 if (q)
2818 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2819 else
2820 loop_number_exit_labels[this_loop_num]
2821 = LABEL_NEXTREF (r);
2822 break;
2823 }
2824
2825 for (loop_num = this_loop_num;
2826 loop_num != -1 && loop_num != target_loop_num;
2827 loop_num = loop_outer_loop[loop_num])
2828 loop_number_exit_count[loop_num]--;
2829
2830 /* If we didn't find it, then something is wrong. */
2831 if (! r)
2832 abort ();
2833 }
2834
2835 /* P is now a jump outside the loop, so it must be put
2836 in loop_number_exit_labels, and marked as such.
2837 The easiest way to do this is to just call
2838 mark_loop_jump again for P. */
2839 mark_loop_jump (PATTERN (p), this_loop_num);
2840
2841 /* If INSN now jumps to the insn after it,
2842 delete INSN. */
2843 if (JUMP_LABEL (insn) != 0
2844 && (next_real_insn (JUMP_LABEL (insn))
2845 == next_real_insn (insn)))
2846 delete_insn (insn);
2847 }
2848
2849 /* Continue the loop after where the conditional
2850 branch used to jump, since the only branch insn
2851 in the block (if it still remains) is an inter-loop
2852 branch and hence needs no processing. */
2853 insn = NEXT_INSN (cond_label);
2854
2855 if (--LABEL_NUSES (cond_label) == 0)
2856 delete_insn (cond_label);
2857
2858 /* This loop will be continued with NEXT_INSN (insn). */
2859 insn = PREV_INSN (insn);
2860 }
2861 }
2862 }
2863 }
2864 }
2865
2866 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2867 loops it is contained in, mark the target loop invalid.
2868
2869 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2870
2871 static void
2872 mark_loop_jump (x, loop_num)
2873 rtx x;
2874 int loop_num;
2875 {
2876 int dest_loop;
2877 int outer_loop;
2878 int i;
2879
2880 switch (GET_CODE (x))
2881 {
2882 case PC:
2883 case USE:
2884 case CLOBBER:
2885 case REG:
2886 case MEM:
2887 case CONST_INT:
2888 case CONST_DOUBLE:
2889 case RETURN:
2890 return;
2891
2892 case CONST:
2893 /* There could be a label reference in here. */
2894 mark_loop_jump (XEXP (x, 0), loop_num);
2895 return;
2896
2897 case PLUS:
2898 case MINUS:
2899 case MULT:
2900 mark_loop_jump (XEXP (x, 0), loop_num);
2901 mark_loop_jump (XEXP (x, 1), loop_num);
2902 return;
2903
2904 case SIGN_EXTEND:
2905 case ZERO_EXTEND:
2906 mark_loop_jump (XEXP (x, 0), loop_num);
2907 return;
2908
2909 case LABEL_REF:
2910 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2911
2912 /* Link together all labels that branch outside the loop. This
2913 is used by final_[bg]iv_value and the loop unrolling code. Also
2914 mark this LABEL_REF so we know that this branch should predict
2915 false. */
2916
2917 /* A check to make sure the label is not in an inner nested loop,
2918 since this does not count as a loop exit. */
2919 if (dest_loop != -1)
2920 {
2921 for (outer_loop = dest_loop; outer_loop != -1;
2922 outer_loop = loop_outer_loop[outer_loop])
2923 if (outer_loop == loop_num)
2924 break;
2925 }
2926 else
2927 outer_loop = -1;
2928
2929 if (loop_num != -1 && outer_loop == -1)
2930 {
2931 LABEL_OUTSIDE_LOOP_P (x) = 1;
2932 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2933 loop_number_exit_labels[loop_num] = x;
2934
2935 for (outer_loop = loop_num;
2936 outer_loop != -1 && outer_loop != dest_loop;
2937 outer_loop = loop_outer_loop[outer_loop])
2938 loop_number_exit_count[outer_loop]++;
2939 }
2940
2941 /* If this is inside a loop, but not in the current loop or one enclosed
2942 by it, it invalidates at least one loop. */
2943
2944 if (dest_loop == -1)
2945 return;
2946
2947 /* We must invalidate every nested loop containing the target of this
2948 label, except those that also contain the jump insn. */
2949
2950 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2951 {
2952 /* Stop when we reach a loop that also contains the jump insn. */
2953 for (outer_loop = loop_num; outer_loop != -1;
2954 outer_loop = loop_outer_loop[outer_loop])
2955 if (dest_loop == outer_loop)
2956 return;
2957
2958 /* If we get here, we know we need to invalidate a loop. */
2959 if (loop_dump_stream && ! loop_invalid[dest_loop])
2960 fprintf (loop_dump_stream,
2961 "\nLoop at %d ignored due to multiple entry points.\n",
2962 INSN_UID (loop_number_loop_starts[dest_loop]));
2963
2964 loop_invalid[dest_loop] = 1;
2965 }
2966 return;
2967
2968 case SET:
2969 /* If this is not setting pc, ignore. */
2970 if (SET_DEST (x) == pc_rtx)
2971 mark_loop_jump (SET_SRC (x), loop_num);
2972 return;
2973
2974 case IF_THEN_ELSE:
2975 mark_loop_jump (XEXP (x, 1), loop_num);
2976 mark_loop_jump (XEXP (x, 2), loop_num);
2977 return;
2978
2979 case PARALLEL:
2980 case ADDR_VEC:
2981 for (i = 0; i < XVECLEN (x, 0); i++)
2982 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2983 return;
2984
2985 case ADDR_DIFF_VEC:
2986 for (i = 0; i < XVECLEN (x, 1); i++)
2987 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2988 return;
2989
2990 default:
2991 /* Treat anything else (such as a symbol_ref)
2992 as a branch out of this loop, but not into any loop. */
2993
2994 if (loop_num != -1)
2995 {
2996 #ifdef HAIFA
2997 LABEL_OUTSIDE_LOOP_P (x) = 1;
2998 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2999 #endif /* HAIFA */
3000
3001 loop_number_exit_labels[loop_num] = x;
3002
3003 for (outer_loop = loop_num; outer_loop != -1;
3004 outer_loop = loop_outer_loop[outer_loop])
3005 loop_number_exit_count[outer_loop]++;
3006 }
3007 return;
3008 }
3009 }
3010 \f
3011 /* Return nonzero if there is a label in the range from
3012 insn INSN to and including the insn whose luid is END
3013 INSN must have an assigned luid (i.e., it must not have
3014 been previously created by loop.c). */
3015
3016 static int
3017 labels_in_range_p (insn, end)
3018 rtx insn;
3019 int end;
3020 {
3021 while (insn && INSN_LUID (insn) <= end)
3022 {
3023 if (GET_CODE (insn) == CODE_LABEL)
3024 return 1;
3025 insn = NEXT_INSN (insn);
3026 }
3027
3028 return 0;
3029 }
3030
3031 /* Record that a memory reference X is being set. */
3032
3033 static void
3034 note_addr_stored (x, y)
3035 rtx x;
3036 rtx y ATTRIBUTE_UNUSED;
3037 {
3038 register int i;
3039
3040 if (x == 0 || GET_CODE (x) != MEM)
3041 return;
3042
3043 /* Count number of memory writes.
3044 This affects heuristics in strength_reduce. */
3045 num_mem_sets++;
3046
3047 /* BLKmode MEM means all memory is clobbered. */
3048 if (GET_MODE (x) == BLKmode)
3049 unknown_address_altered = 1;
3050
3051 if (unknown_address_altered)
3052 return;
3053
3054 for (i = 0; i < loop_store_mems_idx; i++)
3055 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
3056 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
3057 {
3058 /* We are storing at the same address as previously noted. Save the
3059 wider reference. */
3060 if (GET_MODE_SIZE (GET_MODE (x))
3061 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
3062 loop_store_mems[i] = x;
3063 break;
3064 }
3065
3066 if (i == NUM_STORES)
3067 unknown_address_altered = 1;
3068
3069 else if (i == loop_store_mems_idx)
3070 loop_store_mems[loop_store_mems_idx++] = x;
3071 }
3072 \f
3073 /* Return nonzero if the rtx X is invariant over the current loop.
3074
3075 The value is 2 if we refer to something only conditionally invariant.
3076
3077 If `unknown_address_altered' is nonzero, no memory ref is invariant.
3078 Otherwise, a memory ref is invariant if it does not conflict with
3079 anything stored in `loop_store_mems'. */
3080
3081 int
3082 invariant_p (x)
3083 register rtx x;
3084 {
3085 register int i;
3086 register enum rtx_code code;
3087 register char *fmt;
3088 int conditional = 0;
3089
3090 if (x == 0)
3091 return 1;
3092 code = GET_CODE (x);
3093 switch (code)
3094 {
3095 case CONST_INT:
3096 case CONST_DOUBLE:
3097 case SYMBOL_REF:
3098 case CONST:
3099 return 1;
3100
3101 case LABEL_REF:
3102 /* A LABEL_REF is normally invariant, however, if we are unrolling
3103 loops, and this label is inside the loop, then it isn't invariant.
3104 This is because each unrolled copy of the loop body will have
3105 a copy of this label. If this was invariant, then an insn loading
3106 the address of this label into a register might get moved outside
3107 the loop, and then each loop body would end up using the same label.
3108
3109 We don't know the loop bounds here though, so just fail for all
3110 labels. */
3111 if (flag_unroll_loops)
3112 return 0;
3113 else
3114 return 1;
3115
3116 case PC:
3117 case CC0:
3118 case UNSPEC_VOLATILE:
3119 return 0;
3120
3121 case REG:
3122 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3123 since the reg might be set by initialization within the loop. */
3124
3125 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3126 || x == arg_pointer_rtx)
3127 && ! current_function_has_nonlocal_goto)
3128 return 1;
3129
3130 if (loop_has_call
3131 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3132 return 0;
3133
3134 if (VARRAY_INT (n_times_set, REGNO (x)) < 0)
3135 return 2;
3136
3137 return VARRAY_INT (n_times_set, REGNO (x)) == 0;
3138
3139 case MEM:
3140 /* Volatile memory references must be rejected. Do this before
3141 checking for read-only items, so that volatile read-only items
3142 will be rejected also. */
3143 if (MEM_VOLATILE_P (x))
3144 return 0;
3145
3146 /* Read-only items (such as constants in a constant pool) are
3147 invariant if their address is. */
3148 if (RTX_UNCHANGING_P (x))
3149 break;
3150
3151 /* If we filled the table (or had a subroutine call), any location
3152 in memory could have been clobbered. */
3153 if (unknown_address_altered)
3154 return 0;
3155
3156 /* See if there is any dependence between a store and this load. */
3157 for (i = loop_store_mems_idx - 1; i >= 0; i--)
3158 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
3159 return 0;
3160
3161 /* It's not invalidated by a store in memory
3162 but we must still verify the address is invariant. */
3163 break;
3164
3165 case ASM_OPERANDS:
3166 /* Don't mess with insns declared volatile. */
3167 if (MEM_VOLATILE_P (x))
3168 return 0;
3169 break;
3170
3171 default:
3172 break;
3173 }
3174
3175 fmt = GET_RTX_FORMAT (code);
3176 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3177 {
3178 if (fmt[i] == 'e')
3179 {
3180 int tem = invariant_p (XEXP (x, i));
3181 if (tem == 0)
3182 return 0;
3183 if (tem == 2)
3184 conditional = 1;
3185 }
3186 else if (fmt[i] == 'E')
3187 {
3188 register int j;
3189 for (j = 0; j < XVECLEN (x, i); j++)
3190 {
3191 int tem = invariant_p (XVECEXP (x, i, j));
3192 if (tem == 0)
3193 return 0;
3194 if (tem == 2)
3195 conditional = 1;
3196 }
3197
3198 }
3199 }
3200
3201 return 1 + conditional;
3202 }
3203
3204 \f
3205 /* Return nonzero if all the insns in the loop that set REG
3206 are INSN and the immediately following insns,
3207 and if each of those insns sets REG in an invariant way
3208 (not counting uses of REG in them).
3209
3210 The value is 2 if some of these insns are only conditionally invariant.
3211
3212 We assume that INSN itself is the first set of REG
3213 and that its source is invariant. */
3214
3215 static int
3216 consec_sets_invariant_p (reg, n_sets, insn)
3217 int n_sets;
3218 rtx reg, insn;
3219 {
3220 register rtx p = insn;
3221 register int regno = REGNO (reg);
3222 rtx temp;
3223 /* Number of sets we have to insist on finding after INSN. */
3224 int count = n_sets - 1;
3225 int old = VARRAY_INT (n_times_set, regno);
3226 int value = 0;
3227 int this;
3228
3229 /* If N_SETS hit the limit, we can't rely on its value. */
3230 if (n_sets == 127)
3231 return 0;
3232
3233 VARRAY_INT (n_times_set, regno) = 0;
3234
3235 while (count > 0)
3236 {
3237 register enum rtx_code code;
3238 rtx set;
3239
3240 p = NEXT_INSN (p);
3241 code = GET_CODE (p);
3242
3243 /* If library call, skip to end of it. */
3244 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3245 p = XEXP (temp, 0);
3246
3247 this = 0;
3248 if (code == INSN
3249 && (set = single_set (p))
3250 && GET_CODE (SET_DEST (set)) == REG
3251 && REGNO (SET_DEST (set)) == regno)
3252 {
3253 this = invariant_p (SET_SRC (set));
3254 if (this != 0)
3255 value |= this;
3256 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3257 {
3258 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3259 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3260 notes are OK. */
3261 this = (CONSTANT_P (XEXP (temp, 0))
3262 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3263 && invariant_p (XEXP (temp, 0))));
3264 if (this != 0)
3265 value |= this;
3266 }
3267 }
3268 if (this != 0)
3269 count--;
3270 else if (code != NOTE)
3271 {
3272 VARRAY_INT (n_times_set, regno) = old;
3273 return 0;
3274 }
3275 }
3276
3277 VARRAY_INT (n_times_set, regno) = old;
3278 /* If invariant_p ever returned 2, we return 2. */
3279 return 1 + (value & 2);
3280 }
3281
3282 #if 0
3283 /* I don't think this condition is sufficient to allow INSN
3284 to be moved, so we no longer test it. */
3285
3286 /* Return 1 if all insns in the basic block of INSN and following INSN
3287 that set REG are invariant according to TABLE. */
3288
3289 static int
3290 all_sets_invariant_p (reg, insn, table)
3291 rtx reg, insn;
3292 short *table;
3293 {
3294 register rtx p = insn;
3295 register int regno = REGNO (reg);
3296
3297 while (1)
3298 {
3299 register enum rtx_code code;
3300 p = NEXT_INSN (p);
3301 code = GET_CODE (p);
3302 if (code == CODE_LABEL || code == JUMP_INSN)
3303 return 1;
3304 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3305 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3306 && REGNO (SET_DEST (PATTERN (p))) == regno)
3307 {
3308 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3309 return 0;
3310 }
3311 }
3312 }
3313 #endif /* 0 */
3314 \f
3315 /* Look at all uses (not sets) of registers in X. For each, if it is
3316 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3317 a different insn, set USAGE[REGNO] to const0_rtx. */
3318
3319 static void
3320 find_single_use_in_loop (insn, x, usage)
3321 rtx insn;
3322 rtx x;
3323 varray_type usage;
3324 {
3325 enum rtx_code code = GET_CODE (x);
3326 char *fmt = GET_RTX_FORMAT (code);
3327 int i, j;
3328
3329 if (code == REG)
3330 VARRAY_RTX (usage, REGNO (x))
3331 = (VARRAY_RTX (usage, REGNO (x)) != 0
3332 && VARRAY_RTX (usage, REGNO (x)) != insn)
3333 ? const0_rtx : insn;
3334
3335 else if (code == SET)
3336 {
3337 /* Don't count SET_DEST if it is a REG; otherwise count things
3338 in SET_DEST because if a register is partially modified, it won't
3339 show up as a potential movable so we don't care how USAGE is set
3340 for it. */
3341 if (GET_CODE (SET_DEST (x)) != REG)
3342 find_single_use_in_loop (insn, SET_DEST (x), usage);
3343 find_single_use_in_loop (insn, SET_SRC (x), usage);
3344 }
3345 else
3346 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3347 {
3348 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3349 find_single_use_in_loop (insn, XEXP (x, i), usage);
3350 else if (fmt[i] == 'E')
3351 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3352 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3353 }
3354 }
3355 \f
3356 /* Increment N_TIMES_SET at the index of each register
3357 that is modified by an insn between FROM and TO.
3358 If the value of an element of N_TIMES_SET becomes 127 or more,
3359 stop incrementing it, to avoid overflow.
3360
3361 Store in SINGLE_USAGE[I] the single insn in which register I is
3362 used, if it is only used once. Otherwise, it is set to 0 (for no
3363 uses) or const0_rtx for more than one use. This parameter may be zero,
3364 in which case this processing is not done.
3365
3366 Store in *COUNT_PTR the number of actual instruction
3367 in the loop. We use this to decide what is worth moving out. */
3368
3369 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3370 In that case, it is the insn that last set reg n. */
3371
3372 static void
3373 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3374 register rtx from, to;
3375 varray_type may_not_move;
3376 varray_type single_usage;
3377 int *count_ptr;
3378 int nregs;
3379 {
3380 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3381 register rtx insn;
3382 register int count = 0;
3383 register rtx dest;
3384
3385 bzero ((char *) last_set, nregs * sizeof (rtx));
3386 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3387 {
3388 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3389 {
3390 ++count;
3391
3392 /* If requested, record registers that have exactly one use. */
3393 if (single_usage)
3394 {
3395 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3396
3397 /* Include uses in REG_EQUAL notes. */
3398 if (REG_NOTES (insn))
3399 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3400 }
3401
3402 if (GET_CODE (PATTERN (insn)) == CLOBBER
3403 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3404 /* Don't move a reg that has an explicit clobber.
3405 We might do so sometimes, but it's not worth the pain. */
3406 VARRAY_CHAR (may_not_move, REGNO (XEXP (PATTERN (insn), 0))) = 1;
3407
3408 if (GET_CODE (PATTERN (insn)) == SET
3409 || GET_CODE (PATTERN (insn)) == CLOBBER)
3410 {
3411 dest = SET_DEST (PATTERN (insn));
3412 while (GET_CODE (dest) == SUBREG
3413 || GET_CODE (dest) == ZERO_EXTRACT
3414 || GET_CODE (dest) == SIGN_EXTRACT
3415 || GET_CODE (dest) == STRICT_LOW_PART)
3416 dest = XEXP (dest, 0);
3417 if (GET_CODE (dest) == REG)
3418 {
3419 register int regno = REGNO (dest);
3420 /* If this is the first setting of this reg
3421 in current basic block, and it was set before,
3422 it must be set in two basic blocks, so it cannot
3423 be moved out of the loop. */
3424 if (VARRAY_INT (n_times_set, regno) > 0
3425 && last_set[regno] == 0)
3426 VARRAY_CHAR (may_not_move, regno) = 1;
3427 /* If this is not first setting in current basic block,
3428 see if reg was used in between previous one and this.
3429 If so, neither one can be moved. */
3430 if (last_set[regno] != 0
3431 && reg_used_between_p (dest, last_set[regno], insn))
3432 VARRAY_CHAR (may_not_move, regno) = 1;
3433 if (VARRAY_INT (n_times_set, regno) < 127)
3434 ++VARRAY_INT (n_times_set, regno);
3435 last_set[regno] = insn;
3436 }
3437 }
3438 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3439 {
3440 register int i;
3441 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3442 {
3443 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3444 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3445 /* Don't move a reg that has an explicit clobber.
3446 It's not worth the pain to try to do it correctly. */
3447 VARRAY_CHAR (may_not_move, REGNO (XEXP (x, 0))) = 1;
3448
3449 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3450 {
3451 dest = SET_DEST (x);
3452 while (GET_CODE (dest) == SUBREG
3453 || GET_CODE (dest) == ZERO_EXTRACT
3454 || GET_CODE (dest) == SIGN_EXTRACT
3455 || GET_CODE (dest) == STRICT_LOW_PART)
3456 dest = XEXP (dest, 0);
3457 if (GET_CODE (dest) == REG)
3458 {
3459 register int regno = REGNO (dest);
3460 if (VARRAY_INT (n_times_set, regno) > 0
3461 && last_set[regno] == 0)
3462 VARRAY_CHAR (may_not_move, regno) = 1;
3463 if (last_set[regno] != 0
3464 && reg_used_between_p (dest, last_set[regno], insn))
3465 VARRAY_CHAR (may_not_move, regno) = 1;
3466 if (VARRAY_INT (n_times_set, regno) < 127)
3467 ++VARRAY_INT (n_times_set, regno);
3468 last_set[regno] = insn;
3469 }
3470 }
3471 }
3472 }
3473 }
3474
3475 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3476 bzero ((char *) last_set, nregs * sizeof (rtx));
3477 }
3478 *count_ptr = count;
3479 }
3480 \f
3481 /* Given a loop that is bounded by LOOP_START and LOOP_END
3482 and that is entered at SCAN_START,
3483 return 1 if the register set in SET contained in insn INSN is used by
3484 any insn that precedes INSN in cyclic order starting
3485 from the loop entry point.
3486
3487 We don't want to use INSN_LUID here because if we restrict INSN to those
3488 that have a valid INSN_LUID, it means we cannot move an invariant out
3489 from an inner loop past two loops. */
3490
3491 static int
3492 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3493 rtx set, insn, loop_start, scan_start, loop_end;
3494 {
3495 rtx reg = SET_DEST (set);
3496 rtx p;
3497
3498 /* Scan forward checking for register usage. If we hit INSN, we
3499 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3500 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3501 {
3502 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3503 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3504 return 1;
3505
3506 if (p == loop_end)
3507 p = loop_start;
3508 }
3509
3510 return 0;
3511 }
3512 \f
3513 /* A "basic induction variable" or biv is a pseudo reg that is set
3514 (within this loop) only by incrementing or decrementing it. */
3515 /* A "general induction variable" or giv is a pseudo reg whose
3516 value is a linear function of a biv. */
3517
3518 /* Bivs are recognized by `basic_induction_var';
3519 Givs by `general_induction_var'. */
3520
3521 /* Indexed by register number, indicates whether or not register is an
3522 induction variable, and if so what type. */
3523
3524 enum iv_mode *reg_iv_type;
3525
3526 /* Indexed by register number, contains pointer to `struct induction'
3527 if register is an induction variable. This holds general info for
3528 all induction variables. */
3529
3530 struct induction **reg_iv_info;
3531
3532 /* Indexed by register number, contains pointer to `struct iv_class'
3533 if register is a basic induction variable. This holds info describing
3534 the class (a related group) of induction variables that the biv belongs
3535 to. */
3536
3537 struct iv_class **reg_biv_class;
3538
3539 /* The head of a list which links together (via the next field)
3540 every iv class for the current loop. */
3541
3542 struct iv_class *loop_iv_list;
3543
3544 /* Communication with routines called via `note_stores'. */
3545
3546 static rtx note_insn;
3547
3548 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3549
3550 static rtx addr_placeholder;
3551
3552 /* ??? Unfinished optimizations, and possible future optimizations,
3553 for the strength reduction code. */
3554
3555 /* ??? The interaction of biv elimination, and recognition of 'constant'
3556 bivs, may cause problems. */
3557
3558 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3559 performance problems.
3560
3561 Perhaps don't eliminate things that can be combined with an addressing
3562 mode. Find all givs that have the same biv, mult_val, and add_val;
3563 then for each giv, check to see if its only use dies in a following
3564 memory address. If so, generate a new memory address and check to see
3565 if it is valid. If it is valid, then store the modified memory address,
3566 otherwise, mark the giv as not done so that it will get its own iv. */
3567
3568 /* ??? Could try to optimize branches when it is known that a biv is always
3569 positive. */
3570
3571 /* ??? When replace a biv in a compare insn, we should replace with closest
3572 giv so that an optimized branch can still be recognized by the combiner,
3573 e.g. the VAX acb insn. */
3574
3575 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3576 was rerun in loop_optimize whenever a register was added or moved.
3577 Also, some of the optimizations could be a little less conservative. */
3578 \f
3579 /* Perform strength reduction and induction variable elimination.
3580
3581 Pseudo registers created during this function will be beyond the last
3582 valid index in several tables including n_times_set and regno_last_uid.
3583 This does not cause a problem here, because the added registers cannot be
3584 givs outside of their loop, and hence will never be reconsidered.
3585 But scan_loop must check regnos to make sure they are in bounds.
3586
3587 SCAN_START is the first instruction in the loop, as the loop would
3588 actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
3589 the first instruction in the loop, as it is layed out in the
3590 instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG. */
3591
3592 static void
3593 strength_reduce (scan_start, end, loop_top, insn_count,
3594 loop_start, loop_end, unroll_p, bct_p)
3595 rtx scan_start;
3596 rtx end;
3597 rtx loop_top;
3598 int insn_count;
3599 rtx loop_start;
3600 rtx loop_end;
3601 int unroll_p, bct_p;
3602 {
3603 rtx p;
3604 rtx set;
3605 rtx inc_val;
3606 rtx mult_val;
3607 rtx dest_reg;
3608 /* This is 1 if current insn is not executed at least once for every loop
3609 iteration. */
3610 int not_every_iteration = 0;
3611 /* This is 1 if current insn may be executed more than once for every
3612 loop iteration. */
3613 int maybe_multiple = 0;
3614 /* Temporary list pointers for traversing loop_iv_list. */
3615 struct iv_class *bl, **backbl;
3616 /* Ratio of extra register life span we can justify
3617 for saving an instruction. More if loop doesn't call subroutines
3618 since in that case saving an insn makes more difference
3619 and more registers are available. */
3620 /* ??? could set this to last value of threshold in move_movables */
3621 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3622 /* Map of pseudo-register replacements. */
3623 rtx *reg_map;
3624 int call_seen;
3625 rtx test;
3626 rtx end_insert_before;
3627 int loop_depth = 0;
3628
3629 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3630 * sizeof (enum iv_mode *));
3631 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3632 reg_iv_info = (struct induction **)
3633 alloca (max_reg_before_loop * sizeof (struct induction *));
3634 bzero ((char *) reg_iv_info, (max_reg_before_loop
3635 * sizeof (struct induction *)));
3636 reg_biv_class = (struct iv_class **)
3637 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3638 bzero ((char *) reg_biv_class, (max_reg_before_loop
3639 * sizeof (struct iv_class *)));
3640
3641 loop_iv_list = 0;
3642 addr_placeholder = gen_reg_rtx (Pmode);
3643
3644 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3645 must be put before this insn, so that they will appear in the right
3646 order (i.e. loop order).
3647
3648 If loop_end is the end of the current function, then emit a
3649 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3650 dummy note insn. */
3651 if (NEXT_INSN (loop_end) != 0)
3652 end_insert_before = NEXT_INSN (loop_end);
3653 else
3654 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3655
3656 /* Scan through loop to find all possible bivs. */
3657
3658 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
3659 p != NULL_RTX;
3660 p = next_insn_in_loop (p, scan_start, end, loop_top))
3661 {
3662 if (GET_CODE (p) == INSN
3663 && (set = single_set (p))
3664 && GET_CODE (SET_DEST (set)) == REG)
3665 {
3666 dest_reg = SET_DEST (set);
3667 if (REGNO (dest_reg) < max_reg_before_loop
3668 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3669 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3670 {
3671 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3672 dest_reg, p, &inc_val, &mult_val))
3673 {
3674 /* It is a possible basic induction variable.
3675 Create and initialize an induction structure for it. */
3676
3677 struct induction *v
3678 = (struct induction *) alloca (sizeof (struct induction));
3679
3680 record_biv (v, p, dest_reg, inc_val, mult_val,
3681 not_every_iteration, maybe_multiple);
3682 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3683 }
3684 else if (REGNO (dest_reg) < max_reg_before_loop)
3685 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3686 }
3687 }
3688
3689 /* Past CODE_LABEL, we get to insns that may be executed multiple
3690 times. The only way we can be sure that they can't is if every
3691 jump insn between here and the end of the loop either
3692 returns, exits the loop, is a forward jump, or is a jump
3693 to the loop start. */
3694
3695 if (GET_CODE (p) == CODE_LABEL)
3696 {
3697 rtx insn = p;
3698
3699 maybe_multiple = 0;
3700
3701 while (1)
3702 {
3703 insn = NEXT_INSN (insn);
3704 if (insn == scan_start)
3705 break;
3706 if (insn == end)
3707 {
3708 if (loop_top != 0)
3709 insn = loop_top;
3710 else
3711 break;
3712 if (insn == scan_start)
3713 break;
3714 }
3715
3716 if (GET_CODE (insn) == JUMP_INSN
3717 && GET_CODE (PATTERN (insn)) != RETURN
3718 && (! condjump_p (insn)
3719 || (JUMP_LABEL (insn) != 0
3720 && JUMP_LABEL (insn) != scan_start
3721 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3722 || INSN_UID (insn) >= max_uid_for_loop
3723 || (INSN_LUID (JUMP_LABEL (insn))
3724 < INSN_LUID (insn))))))
3725 {
3726 maybe_multiple = 1;
3727 break;
3728 }
3729 }
3730 }
3731
3732 /* Past a jump, we get to insns for which we can't count
3733 on whether they will be executed during each iteration. */
3734 /* This code appears twice in strength_reduce. There is also similar
3735 code in scan_loop. */
3736 if (GET_CODE (p) == JUMP_INSN
3737 /* If we enter the loop in the middle, and scan around to the
3738 beginning, don't set not_every_iteration for that.
3739 This can be any kind of jump, since we want to know if insns
3740 will be executed if the loop is executed. */
3741 && ! (JUMP_LABEL (p) == loop_top
3742 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3743 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3744 {
3745 rtx label = 0;
3746
3747 /* If this is a jump outside the loop, then it also doesn't
3748 matter. Check to see if the target of this branch is on the
3749 loop_number_exits_labels list. */
3750
3751 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3752 label;
3753 label = LABEL_NEXTREF (label))
3754 if (XEXP (label, 0) == JUMP_LABEL (p))
3755 break;
3756
3757 if (! label)
3758 not_every_iteration = 1;
3759 }
3760
3761 else if (GET_CODE (p) == NOTE)
3762 {
3763 /* At the virtual top of a converted loop, insns are again known to
3764 be executed each iteration: logically, the loop begins here
3765 even though the exit code has been duplicated. */
3766 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3767 not_every_iteration = 0;
3768 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3769 loop_depth++;
3770 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3771 loop_depth--;
3772 }
3773
3774 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3775 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3776 or not an insn is known to be executed each iteration of the
3777 loop, whether or not any iterations are known to occur.
3778
3779 Therefore, if we have just passed a label and have no more labels
3780 between here and the test insn of the loop, we know these insns
3781 will be executed each iteration. */
3782
3783 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3784 && no_labels_between_p (p, loop_end))
3785 not_every_iteration = 0;
3786 }
3787
3788 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3789 Make a sanity check against n_times_set. */
3790 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3791 {
3792 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3793 /* Above happens if register modified by subreg, etc. */
3794 /* Make sure it is not recognized as a basic induction var: */
3795 || VARRAY_INT (n_times_set, bl->regno) != bl->biv_count
3796 /* If never incremented, it is invariant that we decided not to
3797 move. So leave it alone. */
3798 || ! bl->incremented)
3799 {
3800 if (loop_dump_stream)
3801 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3802 bl->regno,
3803 (reg_iv_type[bl->regno] != BASIC_INDUCT
3804 ? "not induction variable"
3805 : (! bl->incremented ? "never incremented"
3806 : "count error")));
3807
3808 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3809 *backbl = bl->next;
3810 }
3811 else
3812 {
3813 backbl = &bl->next;
3814
3815 if (loop_dump_stream)
3816 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3817 }
3818 }
3819
3820 /* Exit if there are no bivs. */
3821 if (! loop_iv_list)
3822 {
3823 /* Can still unroll the loop anyways, but indicate that there is no
3824 strength reduction info available. */
3825 if (unroll_p)
3826 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3827
3828 return;
3829 }
3830
3831 /* Find initial value for each biv by searching backwards from loop_start,
3832 halting at first label. Also record any test condition. */
3833
3834 call_seen = 0;
3835 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3836 {
3837 note_insn = p;
3838
3839 if (GET_CODE (p) == CALL_INSN)
3840 call_seen = 1;
3841
3842 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3843 || GET_CODE (p) == CALL_INSN)
3844 note_stores (PATTERN (p), record_initial);
3845
3846 /* Record any test of a biv that branches around the loop if no store
3847 between it and the start of loop. We only care about tests with
3848 constants and registers and only certain of those. */
3849 if (GET_CODE (p) == JUMP_INSN
3850 && JUMP_LABEL (p) != 0
3851 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3852 && (test = get_condition_for_loop (p)) != 0
3853 && GET_CODE (XEXP (test, 0)) == REG
3854 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3855 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3856 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3857 && bl->init_insn == 0)
3858 {
3859 /* If an NE test, we have an initial value! */
3860 if (GET_CODE (test) == NE)
3861 {
3862 bl->init_insn = p;
3863 bl->init_set = gen_rtx_SET (VOIDmode,
3864 XEXP (test, 0), XEXP (test, 1));
3865 }
3866 else
3867 bl->initial_test = test;
3868 }
3869 }
3870
3871 /* Look at the each biv and see if we can say anything better about its
3872 initial value from any initializing insns set up above. (This is done
3873 in two passes to avoid missing SETs in a PARALLEL.) */
3874 for (bl = loop_iv_list; bl; bl = bl->next)
3875 {
3876 rtx src;
3877 rtx note;
3878
3879 if (! bl->init_insn)
3880 continue;
3881
3882 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3883 is a constant, use the value of that. */
3884 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3885 && CONSTANT_P (XEXP (note, 0)))
3886 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3887 && CONSTANT_P (XEXP (note, 0))))
3888 src = XEXP (note, 0);
3889 else
3890 src = SET_SRC (bl->init_set);
3891
3892 if (loop_dump_stream)
3893 fprintf (loop_dump_stream,
3894 "Biv %d initialized at insn %d: initial value ",
3895 bl->regno, INSN_UID (bl->init_insn));
3896
3897 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3898 || GET_MODE (src) == VOIDmode)
3899 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3900 {
3901 bl->initial_value = src;
3902
3903 if (loop_dump_stream)
3904 {
3905 if (GET_CODE (src) == CONST_INT)
3906 {
3907 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3908 fputc ('\n', loop_dump_stream);
3909 }
3910 else
3911 {
3912 print_rtl (loop_dump_stream, src);
3913 fprintf (loop_dump_stream, "\n");
3914 }
3915 }
3916 }
3917 else
3918 {
3919 /* Biv initial value is not simple move,
3920 so let it keep initial value of "itself". */
3921
3922 if (loop_dump_stream)
3923 fprintf (loop_dump_stream, "is complex\n");
3924 }
3925 }
3926
3927 /* Search the loop for general induction variables. */
3928
3929 /* A register is a giv if: it is only set once, it is a function of a
3930 biv and a constant (or invariant), and it is not a biv. */
3931
3932 not_every_iteration = 0;
3933 loop_depth = 0;
3934 p = scan_start;
3935 while (1)
3936 {
3937 p = NEXT_INSN (p);
3938 /* At end of a straight-in loop, we are done.
3939 At end of a loop entered at the bottom, scan the top. */
3940 if (p == scan_start)
3941 break;
3942 if (p == end)
3943 {
3944 if (loop_top != 0)
3945 p = loop_top;
3946 else
3947 break;
3948 if (p == scan_start)
3949 break;
3950 }
3951
3952 /* Look for a general induction variable in a register. */
3953 if (GET_CODE (p) == INSN
3954 && (set = single_set (p))
3955 && GET_CODE (SET_DEST (set)) == REG
3956 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
3957 {
3958 rtx src_reg;
3959 rtx add_val;
3960 rtx mult_val;
3961 int benefit;
3962 rtx regnote = 0;
3963
3964 dest_reg = SET_DEST (set);
3965 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3966 continue;
3967
3968 if (/* SET_SRC is a giv. */
3969 (general_induction_var (SET_SRC (set), &src_reg, &add_val,
3970 &mult_val, 0, &benefit)
3971 /* Equivalent expression is a giv. */
3972 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3973 && general_induction_var (XEXP (regnote, 0), &src_reg,
3974 &add_val, &mult_val, 0,
3975 &benefit)))
3976 /* Don't try to handle any regs made by loop optimization.
3977 We have nothing on them in regno_first_uid, etc. */
3978 && REGNO (dest_reg) < max_reg_before_loop
3979 /* Don't recognize a BASIC_INDUCT_VAR here. */
3980 && dest_reg != src_reg
3981 /* This must be the only place where the register is set. */
3982 && (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1
3983 /* or all sets must be consecutive and make a giv. */
3984 || (benefit = consec_sets_giv (benefit, p,
3985 src_reg, dest_reg,
3986 &add_val, &mult_val))))
3987 {
3988 int count;
3989 struct induction *v
3990 = (struct induction *) alloca (sizeof (struct induction));
3991 rtx temp;
3992
3993 /* If this is a library call, increase benefit. */
3994 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3995 benefit += libcall_benefit (p);
3996
3997 /* Skip the consecutive insns, if there are any. */
3998 for (count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
3999 count > 0; count--)
4000 {
4001 /* If first insn of libcall sequence, skip to end.
4002 Do this at start of loop, since INSN is guaranteed to
4003 be an insn here. */
4004 if (GET_CODE (p) != NOTE
4005 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
4006 p = XEXP (temp, 0);
4007
4008 do p = NEXT_INSN (p);
4009 while (GET_CODE (p) == NOTE);
4010 }
4011
4012 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
4013 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
4014 loop_end);
4015
4016 }
4017 }
4018
4019 #ifndef DONT_REDUCE_ADDR
4020 /* Look for givs which are memory addresses. */
4021 /* This resulted in worse code on a VAX 8600. I wonder if it
4022 still does. */
4023 if (GET_CODE (p) == INSN)
4024 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
4025 loop_end);
4026 #endif
4027
4028 /* Update the status of whether giv can derive other givs. This can
4029 change when we pass a label or an insn that updates a biv. */
4030 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4031 || GET_CODE (p) == CODE_LABEL)
4032 update_giv_derive (p);
4033
4034 /* Past a jump, we get to insns for which we can't count
4035 on whether they will be executed during each iteration. */
4036 /* This code appears twice in strength_reduce. There is also similar
4037 code in scan_loop. */
4038 if (GET_CODE (p) == JUMP_INSN
4039 /* If we enter the loop in the middle, and scan around to the
4040 beginning, don't set not_every_iteration for that.
4041 This can be any kind of jump, since we want to know if insns
4042 will be executed if the loop is executed. */
4043 && ! (JUMP_LABEL (p) == loop_top
4044 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
4045 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
4046 {
4047 rtx label = 0;
4048
4049 /* If this is a jump outside the loop, then it also doesn't
4050 matter. Check to see if the target of this branch is on the
4051 loop_number_exits_labels list. */
4052
4053 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
4054 label;
4055 label = LABEL_NEXTREF (label))
4056 if (XEXP (label, 0) == JUMP_LABEL (p))
4057 break;
4058
4059 if (! label)
4060 not_every_iteration = 1;
4061 }
4062
4063 else if (GET_CODE (p) == NOTE)
4064 {
4065 /* At the virtual top of a converted loop, insns are again known to
4066 be executed each iteration: logically, the loop begins here
4067 even though the exit code has been duplicated. */
4068 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
4069 not_every_iteration = 0;
4070 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4071 loop_depth++;
4072 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4073 loop_depth--;
4074 }
4075
4076 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4077 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4078 or not an insn is known to be executed each iteration of the
4079 loop, whether or not any iterations are known to occur.
4080
4081 Therefore, if we have just passed a label and have no more labels
4082 between here and the test insn of the loop, we know these insns
4083 will be executed each iteration. */
4084
4085 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
4086 && no_labels_between_p (p, loop_end))
4087 not_every_iteration = 0;
4088 }
4089
4090 /* Try to calculate and save the number of loop iterations. This is
4091 set to zero if the actual number can not be calculated. This must
4092 be called after all giv's have been identified, since otherwise it may
4093 fail if the iteration variable is a giv. */
4094
4095 loop_n_iterations = loop_iterations (loop_start, loop_end);
4096
4097 /* Now for each giv for which we still don't know whether or not it is
4098 replaceable, check to see if it is replaceable because its final value
4099 can be calculated. This must be done after loop_iterations is called,
4100 so that final_giv_value will work correctly. */
4101
4102 for (bl = loop_iv_list; bl; bl = bl->next)
4103 {
4104 struct induction *v;
4105
4106 for (v = bl->giv; v; v = v->next_iv)
4107 if (! v->replaceable && ! v->not_replaceable)
4108 check_final_value (v, loop_start, loop_end);
4109 }
4110
4111 /* Try to prove that the loop counter variable (if any) is always
4112 nonnegative; if so, record that fact with a REG_NONNEG note
4113 so that "decrement and branch until zero" insn can be used. */
4114 check_dbra_loop (loop_end, insn_count, loop_start);
4115
4116 #ifdef HAIFA
4117 /* record loop-variables relevant for BCT optimization before unrolling
4118 the loop. Unrolling may update part of this information, and the
4119 correct data will be used for generating the BCT. */
4120 #ifdef HAVE_decrement_and_branch_on_count
4121 if (HAVE_decrement_and_branch_on_count && bct_p)
4122 analyze_loop_iterations (loop_start, loop_end);
4123 #endif
4124 #endif /* HAIFA */
4125
4126 /* Create reg_map to hold substitutions for replaceable giv regs. */
4127 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
4128 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
4129
4130 /* Examine each iv class for feasibility of strength reduction/induction
4131 variable elimination. */
4132
4133 for (bl = loop_iv_list; bl; bl = bl->next)
4134 {
4135 struct induction *v;
4136 int benefit;
4137 int all_reduced;
4138 rtx final_value = 0;
4139
4140 /* Test whether it will be possible to eliminate this biv
4141 provided all givs are reduced. This is possible if either
4142 the reg is not used outside the loop, or we can compute
4143 what its final value will be.
4144
4145 For architectures with a decrement_and_branch_until_zero insn,
4146 don't do this if we put a REG_NONNEG note on the endtest for
4147 this biv. */
4148
4149 /* Compare against bl->init_insn rather than loop_start.
4150 We aren't concerned with any uses of the biv between
4151 init_insn and loop_start since these won't be affected
4152 by the value of the biv elsewhere in the function, so
4153 long as init_insn doesn't use the biv itself.
4154 March 14, 1989 -- self@bayes.arc.nasa.gov */
4155
4156 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4157 && bl->init_insn
4158 && INSN_UID (bl->init_insn) < max_uid_for_loop
4159 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
4160 #ifdef HAVE_decrement_and_branch_until_zero
4161 && ! bl->nonneg
4162 #endif
4163 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4164 || ((final_value = final_biv_value (bl, loop_start, loop_end))
4165 #ifdef HAVE_decrement_and_branch_until_zero
4166 && ! bl->nonneg
4167 #endif
4168 ))
4169 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
4170 threshold, insn_count);
4171 else
4172 {
4173 if (loop_dump_stream)
4174 {
4175 fprintf (loop_dump_stream,
4176 "Cannot eliminate biv %d.\n",
4177 bl->regno);
4178 fprintf (loop_dump_stream,
4179 "First use: insn %d, last use: insn %d.\n",
4180 REGNO_FIRST_UID (bl->regno),
4181 REGNO_LAST_UID (bl->regno));
4182 }
4183 }
4184
4185 /* Combine all giv's for this iv_class. */
4186 combine_givs (bl);
4187
4188 /* This will be true at the end, if all givs which depend on this
4189 biv have been strength reduced.
4190 We can't (currently) eliminate the biv unless this is so. */
4191 all_reduced = 1;
4192
4193 /* Check each giv in this class to see if we will benefit by reducing
4194 it. Skip giv's combined with others. */
4195 for (v = bl->giv; v; v = v->next_iv)
4196 {
4197 struct induction *tv;
4198
4199 if (v->ignore || v->same)
4200 continue;
4201
4202 benefit = v->benefit;
4203
4204 /* Reduce benefit if not replaceable, since we will insert
4205 a move-insn to replace the insn that calculates this giv.
4206 Don't do this unless the giv is a user variable, since it
4207 will often be marked non-replaceable because of the duplication
4208 of the exit code outside the loop. In such a case, the copies
4209 we insert are dead and will be deleted. So they don't have
4210 a cost. Similar situations exist. */
4211 /* ??? The new final_[bg]iv_value code does a much better job
4212 of finding replaceable giv's, and hence this code may no longer
4213 be necessary. */
4214 if (! v->replaceable && ! bl->eliminable
4215 && REG_USERVAR_P (v->dest_reg))
4216 benefit -= copy_cost;
4217
4218 /* Decrease the benefit to count the add-insns that we will
4219 insert to increment the reduced reg for the giv. */
4220 benefit -= add_cost * bl->biv_count;
4221
4222 /* Decide whether to strength-reduce this giv or to leave the code
4223 unchanged (recompute it from the biv each time it is used).
4224 This decision can be made independently for each giv. */
4225
4226 #ifdef AUTO_INC_DEC
4227 /* Attempt to guess whether autoincrement will handle some of the
4228 new add insns; if so, increase BENEFIT (undo the subtraction of
4229 add_cost that was done above). */
4230 if (v->giv_type == DEST_ADDR
4231 && GET_CODE (v->mult_val) == CONST_INT)
4232 {
4233 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4234 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4235 benefit += add_cost * bl->biv_count;
4236 #endif
4237 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4238 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4239 benefit += add_cost * bl->biv_count;
4240 #endif
4241 }
4242 #endif
4243
4244 /* If an insn is not to be strength reduced, then set its ignore
4245 flag, and clear all_reduced. */
4246
4247 /* A giv that depends on a reversed biv must be reduced if it is
4248 used after the loop exit, otherwise, it would have the wrong
4249 value after the loop exit. To make it simple, just reduce all
4250 of such giv's whether or not we know they are used after the loop
4251 exit. */
4252
4253 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4254 && ! bl->reversed )
4255 {
4256 if (loop_dump_stream)
4257 fprintf (loop_dump_stream,
4258 "giv of insn %d not worth while, %d vs %d.\n",
4259 INSN_UID (v->insn),
4260 v->lifetime * threshold * benefit, insn_count);
4261 v->ignore = 1;
4262 all_reduced = 0;
4263 }
4264 else
4265 {
4266 /* Check that we can increment the reduced giv without a
4267 multiply insn. If not, reject it. */
4268
4269 for (tv = bl->biv; tv; tv = tv->next_iv)
4270 if (tv->mult_val == const1_rtx
4271 && ! product_cheap_p (tv->add_val, v->mult_val))
4272 {
4273 if (loop_dump_stream)
4274 fprintf (loop_dump_stream,
4275 "giv of insn %d: would need a multiply.\n",
4276 INSN_UID (v->insn));
4277 v->ignore = 1;
4278 all_reduced = 0;
4279 break;
4280 }
4281 }
4282 }
4283
4284 /* Reduce each giv that we decided to reduce. */
4285
4286 for (v = bl->giv; v; v = v->next_iv)
4287 {
4288 struct induction *tv;
4289 if (! v->ignore && v->same == 0)
4290 {
4291 int auto_inc_opt = 0;
4292
4293 v->new_reg = gen_reg_rtx (v->mode);
4294
4295 #ifdef AUTO_INC_DEC
4296 /* If the target has auto-increment addressing modes, and
4297 this is an address giv, then try to put the increment
4298 immediately after its use, so that flow can create an
4299 auto-increment addressing mode. */
4300 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4301 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4302 /* We don't handle reversed biv's because bl->biv->insn
4303 does not have a valid INSN_LUID. */
4304 && ! bl->reversed
4305 && v->always_executed && ! v->maybe_multiple
4306 && INSN_UID (v->insn) < max_uid_for_loop)
4307 {
4308 /* If other giv's have been combined with this one, then
4309 this will work only if all uses of the other giv's occur
4310 before this giv's insn. This is difficult to check.
4311
4312 We simplify this by looking for the common case where
4313 there is one DEST_REG giv, and this giv's insn is the
4314 last use of the dest_reg of that DEST_REG giv. If the
4315 increment occurs after the address giv, then we can
4316 perform the optimization. (Otherwise, the increment
4317 would have to go before other_giv, and we would not be
4318 able to combine it with the address giv to get an
4319 auto-inc address.) */
4320 if (v->combined_with)
4321 {
4322 struct induction *other_giv = 0;
4323
4324 for (tv = bl->giv; tv; tv = tv->next_iv)
4325 if (tv->same == v)
4326 {
4327 if (other_giv)
4328 break;
4329 else
4330 other_giv = tv;
4331 }
4332 if (! tv && other_giv
4333 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4334 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4335 == INSN_UID (v->insn))
4336 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4337 auto_inc_opt = 1;
4338 }
4339 /* Check for case where increment is before the address
4340 giv. Do this test in "loop order". */
4341 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4342 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4343 || (INSN_LUID (bl->biv->insn)
4344 > INSN_LUID (scan_start))))
4345 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4346 && (INSN_LUID (scan_start)
4347 < INSN_LUID (bl->biv->insn))))
4348 auto_inc_opt = -1;
4349 else
4350 auto_inc_opt = 1;
4351
4352 #ifdef HAVE_cc0
4353 {
4354 rtx prev;
4355
4356 /* We can't put an insn immediately after one setting
4357 cc0, or immediately before one using cc0. */
4358 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4359 || (auto_inc_opt == -1
4360 && (prev = prev_nonnote_insn (v->insn)) != 0
4361 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4362 && sets_cc0_p (PATTERN (prev))))
4363 auto_inc_opt = 0;
4364 }
4365 #endif
4366
4367 if (auto_inc_opt)
4368 v->auto_inc_opt = 1;
4369 }
4370 #endif
4371
4372 /* For each place where the biv is incremented, add an insn
4373 to increment the new, reduced reg for the giv. */
4374 for (tv = bl->biv; tv; tv = tv->next_iv)
4375 {
4376 rtx insert_before;
4377
4378 if (! auto_inc_opt)
4379 insert_before = tv->insn;
4380 else if (auto_inc_opt == 1)
4381 insert_before = NEXT_INSN (v->insn);
4382 else
4383 insert_before = v->insn;
4384
4385 if (tv->mult_val == const1_rtx)
4386 emit_iv_add_mult (tv->add_val, v->mult_val,
4387 v->new_reg, v->new_reg, insert_before);
4388 else /* tv->mult_val == const0_rtx */
4389 /* A multiply is acceptable here
4390 since this is presumed to be seldom executed. */
4391 emit_iv_add_mult (tv->add_val, v->mult_val,
4392 v->add_val, v->new_reg, insert_before);
4393 }
4394
4395 /* Add code at loop start to initialize giv's reduced reg. */
4396
4397 emit_iv_add_mult (bl->initial_value, v->mult_val,
4398 v->add_val, v->new_reg, loop_start);
4399 }
4400 }
4401
4402 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4403 as not reduced.
4404
4405 For each giv register that can be reduced now: if replaceable,
4406 substitute reduced reg wherever the old giv occurs;
4407 else add new move insn "giv_reg = reduced_reg".
4408
4409 Also check for givs whose first use is their definition and whose
4410 last use is the definition of another giv. If so, it is likely
4411 dead and should not be used to eliminate a biv. */
4412 for (v = bl->giv; v; v = v->next_iv)
4413 {
4414 if (v->same && v->same->ignore)
4415 v->ignore = 1;
4416
4417 if (v->ignore)
4418 continue;
4419
4420 if (v->giv_type == DEST_REG
4421 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4422 {
4423 struct induction *v1;
4424
4425 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4426 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4427 v->maybe_dead = 1;
4428 }
4429
4430 /* Update expression if this was combined, in case other giv was
4431 replaced. */
4432 if (v->same)
4433 v->new_reg = replace_rtx (v->new_reg,
4434 v->same->dest_reg, v->same->new_reg);
4435
4436 if (v->giv_type == DEST_ADDR)
4437 /* Store reduced reg as the address in the memref where we found
4438 this giv. */
4439 validate_change (v->insn, v->location, v->new_reg, 0);
4440 else if (v->replaceable)
4441 {
4442 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4443
4444 #if 0
4445 /* I can no longer duplicate the original problem. Perhaps
4446 this is unnecessary now? */
4447
4448 /* Replaceable; it isn't strictly necessary to delete the old
4449 insn and emit a new one, because v->dest_reg is now dead.
4450
4451 However, especially when unrolling loops, the special
4452 handling for (set REG0 REG1) in the second cse pass may
4453 make v->dest_reg live again. To avoid this problem, emit
4454 an insn to set the original giv reg from the reduced giv.
4455 We can not delete the original insn, since it may be part
4456 of a LIBCALL, and the code in flow that eliminates dead
4457 libcalls will fail if it is deleted. */
4458 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4459 v->insn);
4460 #endif
4461 }
4462 else
4463 {
4464 /* Not replaceable; emit an insn to set the original giv reg from
4465 the reduced giv, same as above. */
4466 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4467 v->insn);
4468 }
4469
4470 /* When a loop is reversed, givs which depend on the reversed
4471 biv, and which are live outside the loop, must be set to their
4472 correct final value. This insn is only needed if the giv is
4473 not replaceable. The correct final value is the same as the
4474 value that the giv starts the reversed loop with. */
4475 if (bl->reversed && ! v->replaceable)
4476 emit_iv_add_mult (bl->initial_value, v->mult_val,
4477 v->add_val, v->dest_reg, end_insert_before);
4478 else if (v->final_value)
4479 {
4480 rtx insert_before;
4481
4482 /* If the loop has multiple exits, emit the insn before the
4483 loop to ensure that it will always be executed no matter
4484 how the loop exits. Otherwise, emit the insn after the loop,
4485 since this is slightly more efficient. */
4486 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4487 insert_before = loop_start;
4488 else
4489 insert_before = end_insert_before;
4490 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4491 insert_before);
4492
4493 #if 0
4494 /* If the insn to set the final value of the giv was emitted
4495 before the loop, then we must delete the insn inside the loop
4496 that sets it. If this is a LIBCALL, then we must delete
4497 every insn in the libcall. Note, however, that
4498 final_giv_value will only succeed when there are multiple
4499 exits if the giv is dead at each exit, hence it does not
4500 matter that the original insn remains because it is dead
4501 anyways. */
4502 /* Delete the insn inside the loop that sets the giv since
4503 the giv is now set before (or after) the loop. */
4504 delete_insn (v->insn);
4505 #endif
4506 }
4507
4508 if (loop_dump_stream)
4509 {
4510 fprintf (loop_dump_stream, "giv at %d reduced to ",
4511 INSN_UID (v->insn));
4512 print_rtl (loop_dump_stream, v->new_reg);
4513 fprintf (loop_dump_stream, "\n");
4514 }
4515 }
4516
4517 /* All the givs based on the biv bl have been reduced if they
4518 merit it. */
4519
4520 /* For each giv not marked as maybe dead that has been combined with a
4521 second giv, clear any "maybe dead" mark on that second giv.
4522 v->new_reg will either be or refer to the register of the giv it
4523 combined with.
4524
4525 Doing this clearing avoids problems in biv elimination where a
4526 giv's new_reg is a complex value that can't be put in the insn but
4527 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4528 Since the register will be used in either case, we'd prefer it be
4529 used from the simpler giv. */
4530
4531 for (v = bl->giv; v; v = v->next_iv)
4532 if (! v->maybe_dead && v->same)
4533 v->same->maybe_dead = 0;
4534
4535 /* Try to eliminate the biv, if it is a candidate.
4536 This won't work if ! all_reduced,
4537 since the givs we planned to use might not have been reduced.
4538
4539 We have to be careful that we didn't initially think we could eliminate
4540 this biv because of a giv that we now think may be dead and shouldn't
4541 be used as a biv replacement.
4542
4543 Also, there is the possibility that we may have a giv that looks
4544 like it can be used to eliminate a biv, but the resulting insn
4545 isn't valid. This can happen, for example, on the 88k, where a
4546 JUMP_INSN can compare a register only with zero. Attempts to
4547 replace it with a compare with a constant will fail.
4548
4549 Note that in cases where this call fails, we may have replaced some
4550 of the occurrences of the biv with a giv, but no harm was done in
4551 doing so in the rare cases where it can occur. */
4552
4553 if (all_reduced == 1 && bl->eliminable
4554 && maybe_eliminate_biv (bl, loop_start, end, 1,
4555 threshold, insn_count))
4556
4557 {
4558 /* ?? If we created a new test to bypass the loop entirely,
4559 or otherwise drop straight in, based on this test, then
4560 we might want to rewrite it also. This way some later
4561 pass has more hope of removing the initialization of this
4562 biv entirely. */
4563
4564 /* If final_value != 0, then the biv may be used after loop end
4565 and we must emit an insn to set it just in case.
4566
4567 Reversed bivs already have an insn after the loop setting their
4568 value, so we don't need another one. We can't calculate the
4569 proper final value for such a biv here anyways. */
4570 if (final_value != 0 && ! bl->reversed)
4571 {
4572 rtx insert_before;
4573
4574 /* If the loop has multiple exits, emit the insn before the
4575 loop to ensure that it will always be executed no matter
4576 how the loop exits. Otherwise, emit the insn after the
4577 loop, since this is slightly more efficient. */
4578 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4579 insert_before = loop_start;
4580 else
4581 insert_before = end_insert_before;
4582
4583 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4584 end_insert_before);
4585 }
4586
4587 #if 0
4588 /* Delete all of the instructions inside the loop which set
4589 the biv, as they are all dead. If is safe to delete them,
4590 because an insn setting a biv will never be part of a libcall. */
4591 /* However, deleting them will invalidate the regno_last_uid info,
4592 so keeping them around is more convenient. Final_biv_value
4593 will only succeed when there are multiple exits if the biv
4594 is dead at each exit, hence it does not matter that the original
4595 insn remains, because it is dead anyways. */
4596 for (v = bl->biv; v; v = v->next_iv)
4597 delete_insn (v->insn);
4598 #endif
4599
4600 if (loop_dump_stream)
4601 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4602 bl->regno);
4603 }
4604 }
4605
4606 /* Go through all the instructions in the loop, making all the
4607 register substitutions scheduled in REG_MAP. */
4608
4609 for (p = loop_start; p != end; p = NEXT_INSN (p))
4610 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4611 || GET_CODE (p) == CALL_INSN)
4612 {
4613 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4614 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4615 INSN_CODE (p) = -1;
4616 }
4617
4618 /* Unroll loops from within strength reduction so that we can use the
4619 induction variable information that strength_reduce has already
4620 collected. */
4621
4622 if (unroll_p)
4623 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4624
4625 #ifdef HAIFA
4626 /* instrument the loop with bct insn */
4627 #ifdef HAVE_decrement_and_branch_on_count
4628 if (HAVE_decrement_and_branch_on_count && bct_p)
4629 insert_bct (loop_start, loop_end);
4630 #endif
4631 #endif /* HAIFA */
4632
4633 if (loop_dump_stream)
4634 fprintf (loop_dump_stream, "\n");
4635 }
4636 \f
4637 /* Return 1 if X is a valid source for an initial value (or as value being
4638 compared against in an initial test).
4639
4640 X must be either a register or constant and must not be clobbered between
4641 the current insn and the start of the loop.
4642
4643 INSN is the insn containing X. */
4644
4645 static int
4646 valid_initial_value_p (x, insn, call_seen, loop_start)
4647 rtx x;
4648 rtx insn;
4649 int call_seen;
4650 rtx loop_start;
4651 {
4652 if (CONSTANT_P (x))
4653 return 1;
4654
4655 /* Only consider pseudos we know about initialized in insns whose luids
4656 we know. */
4657 if (GET_CODE (x) != REG
4658 || REGNO (x) >= max_reg_before_loop)
4659 return 0;
4660
4661 /* Don't use call-clobbered registers across a call which clobbers it. On
4662 some machines, don't use any hard registers at all. */
4663 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4664 && (SMALL_REGISTER_CLASSES
4665 || (call_used_regs[REGNO (x)] && call_seen)))
4666 return 0;
4667
4668 /* Don't use registers that have been clobbered before the start of the
4669 loop. */
4670 if (reg_set_between_p (x, insn, loop_start))
4671 return 0;
4672
4673 return 1;
4674 }
4675 \f
4676 /* Scan X for memory refs and check each memory address
4677 as a possible giv. INSN is the insn whose pattern X comes from.
4678 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4679 every loop iteration. */
4680
4681 static void
4682 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4683 rtx x;
4684 rtx insn;
4685 int not_every_iteration;
4686 rtx loop_start, loop_end;
4687 {
4688 register int i, j;
4689 register enum rtx_code code;
4690 register char *fmt;
4691
4692 if (x == 0)
4693 return;
4694
4695 code = GET_CODE (x);
4696 switch (code)
4697 {
4698 case REG:
4699 case CONST_INT:
4700 case CONST:
4701 case CONST_DOUBLE:
4702 case SYMBOL_REF:
4703 case LABEL_REF:
4704 case PC:
4705 case CC0:
4706 case ADDR_VEC:
4707 case ADDR_DIFF_VEC:
4708 case USE:
4709 case CLOBBER:
4710 return;
4711
4712 case MEM:
4713 {
4714 rtx src_reg;
4715 rtx add_val;
4716 rtx mult_val;
4717 int benefit;
4718
4719 /* This code used to disable creating GIVs with mult_val == 1 and
4720 add_val == 0. However, this leads to lost optimizations when
4721 it comes time to combine a set of related DEST_ADDR GIVs, since
4722 this one would not be seen. */
4723
4724 if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
4725 &mult_val, 1, &benefit))
4726 {
4727 /* Found one; record it. */
4728 struct induction *v
4729 = (struct induction *) oballoc (sizeof (struct induction));
4730
4731 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4732 add_val, benefit, DEST_ADDR, not_every_iteration,
4733 &XEXP (x, 0), loop_start, loop_end);
4734
4735 v->mem_mode = GET_MODE (x);
4736 }
4737 }
4738 return;
4739
4740 default:
4741 break;
4742 }
4743
4744 /* Recursively scan the subexpressions for other mem refs. */
4745
4746 fmt = GET_RTX_FORMAT (code);
4747 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4748 if (fmt[i] == 'e')
4749 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4750 loop_end);
4751 else if (fmt[i] == 'E')
4752 for (j = 0; j < XVECLEN (x, i); j++)
4753 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4754 loop_start, loop_end);
4755 }
4756 \f
4757 /* Fill in the data about one biv update.
4758 V is the `struct induction' in which we record the biv. (It is
4759 allocated by the caller, with alloca.)
4760 INSN is the insn that sets it.
4761 DEST_REG is the biv's reg.
4762
4763 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4764 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4765 being set to INC_VAL.
4766
4767 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4768 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4769 can be executed more than once per iteration. If MAYBE_MULTIPLE
4770 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4771 executed exactly once per iteration. */
4772
4773 static void
4774 record_biv (v, insn, dest_reg, inc_val, mult_val,
4775 not_every_iteration, maybe_multiple)
4776 struct induction *v;
4777 rtx insn;
4778 rtx dest_reg;
4779 rtx inc_val;
4780 rtx mult_val;
4781 int not_every_iteration;
4782 int maybe_multiple;
4783 {
4784 struct iv_class *bl;
4785
4786 v->insn = insn;
4787 v->src_reg = dest_reg;
4788 v->dest_reg = dest_reg;
4789 v->mult_val = mult_val;
4790 v->add_val = inc_val;
4791 v->mode = GET_MODE (dest_reg);
4792 v->always_computable = ! not_every_iteration;
4793 v->always_executed = ! not_every_iteration;
4794 v->maybe_multiple = maybe_multiple;
4795
4796 /* Add this to the reg's iv_class, creating a class
4797 if this is the first incrementation of the reg. */
4798
4799 bl = reg_biv_class[REGNO (dest_reg)];
4800 if (bl == 0)
4801 {
4802 /* Create and initialize new iv_class. */
4803
4804 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4805
4806 bl->regno = REGNO (dest_reg);
4807 bl->biv = 0;
4808 bl->giv = 0;
4809 bl->biv_count = 0;
4810 bl->giv_count = 0;
4811
4812 /* Set initial value to the reg itself. */
4813 bl->initial_value = dest_reg;
4814 /* We haven't seen the initializing insn yet */
4815 bl->init_insn = 0;
4816 bl->init_set = 0;
4817 bl->initial_test = 0;
4818 bl->incremented = 0;
4819 bl->eliminable = 0;
4820 bl->nonneg = 0;
4821 bl->reversed = 0;
4822 bl->total_benefit = 0;
4823
4824 /* Add this class to loop_iv_list. */
4825 bl->next = loop_iv_list;
4826 loop_iv_list = bl;
4827
4828 /* Put it in the array of biv register classes. */
4829 reg_biv_class[REGNO (dest_reg)] = bl;
4830 }
4831
4832 /* Update IV_CLASS entry for this biv. */
4833 v->next_iv = bl->biv;
4834 bl->biv = v;
4835 bl->biv_count++;
4836 if (mult_val == const1_rtx)
4837 bl->incremented = 1;
4838
4839 if (loop_dump_stream)
4840 {
4841 fprintf (loop_dump_stream,
4842 "Insn %d: possible biv, reg %d,",
4843 INSN_UID (insn), REGNO (dest_reg));
4844 if (GET_CODE (inc_val) == CONST_INT)
4845 {
4846 fprintf (loop_dump_stream, " const =");
4847 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4848 fputc ('\n', loop_dump_stream);
4849 }
4850 else
4851 {
4852 fprintf (loop_dump_stream, " const = ");
4853 print_rtl (loop_dump_stream, inc_val);
4854 fprintf (loop_dump_stream, "\n");
4855 }
4856 }
4857 }
4858 \f
4859 /* Fill in the data about one giv.
4860 V is the `struct induction' in which we record the giv. (It is
4861 allocated by the caller, with alloca.)
4862 INSN is the insn that sets it.
4863 BENEFIT estimates the savings from deleting this insn.
4864 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4865 into a register or is used as a memory address.
4866
4867 SRC_REG is the biv reg which the giv is computed from.
4868 DEST_REG is the giv's reg (if the giv is stored in a reg).
4869 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4870 LOCATION points to the place where this giv's value appears in INSN. */
4871
4872 static void
4873 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4874 type, not_every_iteration, location, loop_start, loop_end)
4875 struct induction *v;
4876 rtx insn;
4877 rtx src_reg;
4878 rtx dest_reg;
4879 rtx mult_val, add_val;
4880 int benefit;
4881 enum g_types type;
4882 int not_every_iteration;
4883 rtx *location;
4884 rtx loop_start, loop_end;
4885 {
4886 struct induction *b;
4887 struct iv_class *bl;
4888 rtx set = single_set (insn);
4889
4890 v->insn = insn;
4891 v->src_reg = src_reg;
4892 v->giv_type = type;
4893 v->dest_reg = dest_reg;
4894 v->mult_val = mult_val;
4895 v->add_val = add_val;
4896 v->benefit = benefit;
4897 v->location = location;
4898 v->cant_derive = 0;
4899 v->combined_with = 0;
4900 v->maybe_multiple = 0;
4901 v->maybe_dead = 0;
4902 v->derive_adjustment = 0;
4903 v->same = 0;
4904 v->ignore = 0;
4905 v->new_reg = 0;
4906 v->final_value = 0;
4907 v->same_insn = 0;
4908 v->auto_inc_opt = 0;
4909 v->unrolled = 0;
4910 v->shared = 0;
4911
4912 /* The v->always_computable field is used in update_giv_derive, to
4913 determine whether a giv can be used to derive another giv. For a
4914 DEST_REG giv, INSN computes a new value for the giv, so its value
4915 isn't computable if INSN insn't executed every iteration.
4916 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4917 it does not compute a new value. Hence the value is always computable
4918 regardless of whether INSN is executed each iteration. */
4919
4920 if (type == DEST_ADDR)
4921 v->always_computable = 1;
4922 else
4923 v->always_computable = ! not_every_iteration;
4924
4925 v->always_executed = ! not_every_iteration;
4926
4927 if (type == DEST_ADDR)
4928 {
4929 v->mode = GET_MODE (*location);
4930 v->lifetime = 1;
4931 v->times_used = 1;
4932 }
4933 else /* type == DEST_REG */
4934 {
4935 v->mode = GET_MODE (SET_DEST (set));
4936
4937 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4938 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4939
4940 v->times_used = VARRAY_INT (n_times_used, REGNO (dest_reg));
4941
4942 /* If the lifetime is zero, it means that this register is
4943 really a dead store. So mark this as a giv that can be
4944 ignored. This will not prevent the biv from being eliminated. */
4945 if (v->lifetime == 0)
4946 v->ignore = 1;
4947
4948 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4949 reg_iv_info[REGNO (dest_reg)] = v;
4950 }
4951
4952 /* Add the giv to the class of givs computed from one biv. */
4953
4954 bl = reg_biv_class[REGNO (src_reg)];
4955 if (bl)
4956 {
4957 v->next_iv = bl->giv;
4958 bl->giv = v;
4959 /* Don't count DEST_ADDR. This is supposed to count the number of
4960 insns that calculate givs. */
4961 if (type == DEST_REG)
4962 bl->giv_count++;
4963 bl->total_benefit += benefit;
4964 }
4965 else
4966 /* Fatal error, biv missing for this giv? */
4967 abort ();
4968
4969 if (type == DEST_ADDR)
4970 v->replaceable = 1;
4971 else
4972 {
4973 /* The giv can be replaced outright by the reduced register only if all
4974 of the following conditions are true:
4975 - the insn that sets the giv is always executed on any iteration
4976 on which the giv is used at all
4977 (there are two ways to deduce this:
4978 either the insn is executed on every iteration,
4979 or all uses follow that insn in the same basic block),
4980 - the giv is not used outside the loop
4981 - no assignments to the biv occur during the giv's lifetime. */
4982
4983 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4984 /* Previous line always fails if INSN was moved by loop opt. */
4985 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4986 && (! not_every_iteration
4987 || last_use_this_basic_block (dest_reg, insn)))
4988 {
4989 /* Now check that there are no assignments to the biv within the
4990 giv's lifetime. This requires two separate checks. */
4991
4992 /* Check each biv update, and fail if any are between the first
4993 and last use of the giv.
4994
4995 If this loop contains an inner loop that was unrolled, then
4996 the insn modifying the biv may have been emitted by the loop
4997 unrolling code, and hence does not have a valid luid. Just
4998 mark the biv as not replaceable in this case. It is not very
4999 useful as a biv, because it is used in two different loops.
5000 It is very unlikely that we would be able to optimize the giv
5001 using this biv anyways. */
5002
5003 v->replaceable = 1;
5004 for (b = bl->biv; b; b = b->next_iv)
5005 {
5006 if (INSN_UID (b->insn) >= max_uid_for_loop
5007 || ((uid_luid[INSN_UID (b->insn)]
5008 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
5009 && (uid_luid[INSN_UID (b->insn)]
5010 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
5011 {
5012 v->replaceable = 0;
5013 v->not_replaceable = 1;
5014 break;
5015 }
5016 }
5017
5018 /* If there are any backwards branches that go from after the
5019 biv update to before it, then this giv is not replaceable. */
5020 if (v->replaceable)
5021 for (b = bl->biv; b; b = b->next_iv)
5022 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
5023 {
5024 v->replaceable = 0;
5025 v->not_replaceable = 1;
5026 break;
5027 }
5028 }
5029 else
5030 {
5031 /* May still be replaceable, we don't have enough info here to
5032 decide. */
5033 v->replaceable = 0;
5034 v->not_replaceable = 0;
5035 }
5036 }
5037
5038 /* Record whether the add_val contains a const_int, for later use by
5039 combine_givs. */
5040 {
5041 rtx tem = add_val;
5042
5043 v->no_const_addval = 1;
5044 if (tem == const0_rtx)
5045 ;
5046 else if (GET_CODE (tem) == CONST_INT)
5047 v->no_const_addval = 0;
5048 else if (GET_CODE (tem) == PLUS)
5049 {
5050 while (1)
5051 {
5052 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5053 tem = XEXP (tem, 0);
5054 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5055 tem = XEXP (tem, 1);
5056 else
5057 break;
5058 }
5059 if (GET_CODE (XEXP (tem, 1)) == CONST_INT)
5060 v->no_const_addval = 0;
5061 }
5062 }
5063
5064 if (loop_dump_stream)
5065 {
5066 if (type == DEST_REG)
5067 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
5068 INSN_UID (insn), REGNO (dest_reg));
5069 else
5070 fprintf (loop_dump_stream, "Insn %d: dest address",
5071 INSN_UID (insn));
5072
5073 fprintf (loop_dump_stream, " src reg %d benefit %d",
5074 REGNO (src_reg), v->benefit);
5075 fprintf (loop_dump_stream, " used %d lifetime %d",
5076 v->times_used, v->lifetime);
5077
5078 if (v->replaceable)
5079 fprintf (loop_dump_stream, " replaceable");
5080
5081 if (v->no_const_addval)
5082 fprintf (loop_dump_stream, " ncav");
5083
5084 if (GET_CODE (mult_val) == CONST_INT)
5085 {
5086 fprintf (loop_dump_stream, " mult ");
5087 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
5088 }
5089 else
5090 {
5091 fprintf (loop_dump_stream, " mult ");
5092 print_rtl (loop_dump_stream, mult_val);
5093 }
5094
5095 if (GET_CODE (add_val) == CONST_INT)
5096 {
5097 fprintf (loop_dump_stream, " add ");
5098 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
5099 }
5100 else
5101 {
5102 fprintf (loop_dump_stream, " add ");
5103 print_rtl (loop_dump_stream, add_val);
5104 }
5105 }
5106
5107 if (loop_dump_stream)
5108 fprintf (loop_dump_stream, "\n");
5109
5110 }
5111
5112
5113 /* All this does is determine whether a giv can be made replaceable because
5114 its final value can be calculated. This code can not be part of record_giv
5115 above, because final_giv_value requires that the number of loop iterations
5116 be known, and that can not be accurately calculated until after all givs
5117 have been identified. */
5118
5119 static void
5120 check_final_value (v, loop_start, loop_end)
5121 struct induction *v;
5122 rtx loop_start, loop_end;
5123 {
5124 struct iv_class *bl;
5125 rtx final_value = 0;
5126
5127 bl = reg_biv_class[REGNO (v->src_reg)];
5128
5129 /* DEST_ADDR givs will never reach here, because they are always marked
5130 replaceable above in record_giv. */
5131
5132 /* The giv can be replaced outright by the reduced register only if all
5133 of the following conditions are true:
5134 - the insn that sets the giv is always executed on any iteration
5135 on which the giv is used at all
5136 (there are two ways to deduce this:
5137 either the insn is executed on every iteration,
5138 or all uses follow that insn in the same basic block),
5139 - its final value can be calculated (this condition is different
5140 than the one above in record_giv)
5141 - no assignments to the biv occur during the giv's lifetime. */
5142
5143 #if 0
5144 /* This is only called now when replaceable is known to be false. */
5145 /* Clear replaceable, so that it won't confuse final_giv_value. */
5146 v->replaceable = 0;
5147 #endif
5148
5149 if ((final_value = final_giv_value (v, loop_start, loop_end))
5150 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5151 {
5152 int biv_increment_seen = 0;
5153 rtx p = v->insn;
5154 rtx last_giv_use;
5155
5156 v->replaceable = 1;
5157
5158 /* When trying to determine whether or not a biv increment occurs
5159 during the lifetime of the giv, we can ignore uses of the variable
5160 outside the loop because final_value is true. Hence we can not
5161 use regno_last_uid and regno_first_uid as above in record_giv. */
5162
5163 /* Search the loop to determine whether any assignments to the
5164 biv occur during the giv's lifetime. Start with the insn
5165 that sets the giv, and search around the loop until we come
5166 back to that insn again.
5167
5168 Also fail if there is a jump within the giv's lifetime that jumps
5169 to somewhere outside the lifetime but still within the loop. This
5170 catches spaghetti code where the execution order is not linear, and
5171 hence the above test fails. Here we assume that the giv lifetime
5172 does not extend from one iteration of the loop to the next, so as
5173 to make the test easier. Since the lifetime isn't known yet,
5174 this requires two loops. See also record_giv above. */
5175
5176 last_giv_use = v->insn;
5177
5178 while (1)
5179 {
5180 p = NEXT_INSN (p);
5181 if (p == loop_end)
5182 p = NEXT_INSN (loop_start);
5183 if (p == v->insn)
5184 break;
5185
5186 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5187 || GET_CODE (p) == CALL_INSN)
5188 {
5189 if (biv_increment_seen)
5190 {
5191 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5192 {
5193 v->replaceable = 0;
5194 v->not_replaceable = 1;
5195 break;
5196 }
5197 }
5198 else if (reg_set_p (v->src_reg, PATTERN (p)))
5199 biv_increment_seen = 1;
5200 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5201 last_giv_use = p;
5202 }
5203 }
5204
5205 /* Now that the lifetime of the giv is known, check for branches
5206 from within the lifetime to outside the lifetime if it is still
5207 replaceable. */
5208
5209 if (v->replaceable)
5210 {
5211 p = v->insn;
5212 while (1)
5213 {
5214 p = NEXT_INSN (p);
5215 if (p == loop_end)
5216 p = NEXT_INSN (loop_start);
5217 if (p == last_giv_use)
5218 break;
5219
5220 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5221 && LABEL_NAME (JUMP_LABEL (p))
5222 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
5223 || (INSN_UID (v->insn) >= max_uid_for_loop)
5224 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
5225 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
5226 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
5227 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
5228 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
5229 {
5230 v->replaceable = 0;
5231 v->not_replaceable = 1;
5232
5233 if (loop_dump_stream)
5234 fprintf (loop_dump_stream,
5235 "Found branch outside giv lifetime.\n");
5236
5237 break;
5238 }
5239 }
5240 }
5241
5242 /* If it is replaceable, then save the final value. */
5243 if (v->replaceable)
5244 v->final_value = final_value;
5245 }
5246
5247 if (loop_dump_stream && v->replaceable)
5248 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5249 INSN_UID (v->insn), REGNO (v->dest_reg));
5250 }
5251 \f
5252 /* Update the status of whether a giv can derive other givs.
5253
5254 We need to do something special if there is or may be an update to the biv
5255 between the time the giv is defined and the time it is used to derive
5256 another giv.
5257
5258 In addition, a giv that is only conditionally set is not allowed to
5259 derive another giv once a label has been passed.
5260
5261 The cases we look at are when a label or an update to a biv is passed. */
5262
5263 static void
5264 update_giv_derive (p)
5265 rtx p;
5266 {
5267 struct iv_class *bl;
5268 struct induction *biv, *giv;
5269 rtx tem;
5270 int dummy;
5271
5272 /* Search all IV classes, then all bivs, and finally all givs.
5273
5274 There are three cases we are concerned with. First we have the situation
5275 of a giv that is only updated conditionally. In that case, it may not
5276 derive any givs after a label is passed.
5277
5278 The second case is when a biv update occurs, or may occur, after the
5279 definition of a giv. For certain biv updates (see below) that are
5280 known to occur between the giv definition and use, we can adjust the
5281 giv definition. For others, or when the biv update is conditional,
5282 we must prevent the giv from deriving any other givs. There are two
5283 sub-cases within this case.
5284
5285 If this is a label, we are concerned with any biv update that is done
5286 conditionally, since it may be done after the giv is defined followed by
5287 a branch here (actually, we need to pass both a jump and a label, but
5288 this extra tracking doesn't seem worth it).
5289
5290 If this is a jump, we are concerned about any biv update that may be
5291 executed multiple times. We are actually only concerned about
5292 backward jumps, but it is probably not worth performing the test
5293 on the jump again here.
5294
5295 If this is a biv update, we must adjust the giv status to show that a
5296 subsequent biv update was performed. If this adjustment cannot be done,
5297 the giv cannot derive further givs. */
5298
5299 for (bl = loop_iv_list; bl; bl = bl->next)
5300 for (biv = bl->biv; biv; biv = biv->next_iv)
5301 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5302 || biv->insn == p)
5303 {
5304 for (giv = bl->giv; giv; giv = giv->next_iv)
5305 {
5306 /* If cant_derive is already true, there is no point in
5307 checking all of these conditions again. */
5308 if (giv->cant_derive)
5309 continue;
5310
5311 /* If this giv is conditionally set and we have passed a label,
5312 it cannot derive anything. */
5313 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5314 giv->cant_derive = 1;
5315
5316 /* Skip givs that have mult_val == 0, since
5317 they are really invariants. Also skip those that are
5318 replaceable, since we know their lifetime doesn't contain
5319 any biv update. */
5320 else if (giv->mult_val == const0_rtx || giv->replaceable)
5321 continue;
5322
5323 /* The only way we can allow this giv to derive another
5324 is if this is a biv increment and we can form the product
5325 of biv->add_val and giv->mult_val. In this case, we will
5326 be able to compute a compensation. */
5327 else if (biv->insn == p)
5328 {
5329 tem = 0;
5330
5331 if (biv->mult_val == const1_rtx)
5332 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5333 biv->add_val,
5334 giv->mult_val),
5335 &dummy);
5336
5337 if (tem && giv->derive_adjustment)
5338 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5339 giv->derive_adjustment),
5340 &dummy);
5341 if (tem)
5342 giv->derive_adjustment = tem;
5343 else
5344 giv->cant_derive = 1;
5345 }
5346 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5347 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5348 giv->cant_derive = 1;
5349 }
5350 }
5351 }
5352 \f
5353 /* Check whether an insn is an increment legitimate for a basic induction var.
5354 X is the source of insn P, or a part of it.
5355 MODE is the mode in which X should be interpreted.
5356
5357 DEST_REG is the putative biv, also the destination of the insn.
5358 We accept patterns of these forms:
5359 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5360 REG = INVARIANT + REG
5361
5362 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5363 and store the additive term into *INC_VAL.
5364
5365 If X is an assignment of an invariant into DEST_REG, we set
5366 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5367
5368 We also want to detect a BIV when it corresponds to a variable
5369 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5370 of the variable may be a PLUS that adds a SUBREG of that variable to
5371 an invariant and then sign- or zero-extends the result of the PLUS
5372 into the variable.
5373
5374 Most GIVs in such cases will be in the promoted mode, since that is the
5375 probably the natural computation mode (and almost certainly the mode
5376 used for addresses) on the machine. So we view the pseudo-reg containing
5377 the variable as the BIV, as if it were simply incremented.
5378
5379 Note that treating the entire pseudo as a BIV will result in making
5380 simple increments to any GIVs based on it. However, if the variable
5381 overflows in its declared mode but not its promoted mode, the result will
5382 be incorrect. This is acceptable if the variable is signed, since
5383 overflows in such cases are undefined, but not if it is unsigned, since
5384 those overflows are defined. So we only check for SIGN_EXTEND and
5385 not ZERO_EXTEND.
5386
5387 If we cannot find a biv, we return 0. */
5388
5389 static int
5390 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5391 register rtx x;
5392 enum machine_mode mode;
5393 rtx p;
5394 rtx dest_reg;
5395 rtx *inc_val;
5396 rtx *mult_val;
5397 {
5398 register enum rtx_code code;
5399 rtx arg;
5400 rtx insn, set = 0;
5401
5402 code = GET_CODE (x);
5403 switch (code)
5404 {
5405 case PLUS:
5406 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5407 || (GET_CODE (XEXP (x, 0)) == SUBREG
5408 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5409 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5410 arg = XEXP (x, 1);
5411 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5412 || (GET_CODE (XEXP (x, 1)) == SUBREG
5413 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5414 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5415 arg = XEXP (x, 0);
5416 else
5417 return 0;
5418
5419 if (invariant_p (arg) != 1)
5420 return 0;
5421
5422 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5423 *mult_val = const1_rtx;
5424 return 1;
5425
5426 case SUBREG:
5427 /* If this is a SUBREG for a promoted variable, check the inner
5428 value. */
5429 if (SUBREG_PROMOTED_VAR_P (x))
5430 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5431 dest_reg, p, inc_val, mult_val);
5432 return 0;
5433
5434 case REG:
5435 /* If this register is assigned in a previous insn, look at its
5436 source, but don't go outside the loop or past a label. */
5437
5438 insn = p;
5439 while (1)
5440 {
5441 do {
5442 insn = PREV_INSN (insn);
5443 } while (insn && GET_CODE (insn) == NOTE
5444 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5445
5446 if (!insn)
5447 break;
5448 set = single_set (insn);
5449 if (set == 0)
5450 break;
5451
5452 if ((SET_DEST (set) == x
5453 || (GET_CODE (SET_DEST (set)) == SUBREG
5454 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5455 <= UNITS_PER_WORD)
5456 && SUBREG_REG (SET_DEST (set)) == x))
5457 && basic_induction_var (SET_SRC (set),
5458 (GET_MODE (SET_SRC (set)) == VOIDmode
5459 ? GET_MODE (x)
5460 : GET_MODE (SET_SRC (set))),
5461 dest_reg, insn,
5462 inc_val, mult_val))
5463 return 1;
5464 }
5465 /* ... fall through ... */
5466
5467 /* Can accept constant setting of biv only when inside inner most loop.
5468 Otherwise, a biv of an inner loop may be incorrectly recognized
5469 as a biv of the outer loop,
5470 causing code to be moved INTO the inner loop. */
5471 case MEM:
5472 if (invariant_p (x) != 1)
5473 return 0;
5474 case CONST_INT:
5475 case SYMBOL_REF:
5476 case CONST:
5477 /* convert_modes aborts if we try to convert to or from CCmode, so just
5478 exclude that case. It is very unlikely that a condition code value
5479 would be a useful iterator anyways. */
5480 if (loops_enclosed == 1
5481 && GET_MODE_CLASS (mode) != MODE_CC
5482 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5483 {
5484 /* Possible bug here? Perhaps we don't know the mode of X. */
5485 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5486 *mult_val = const0_rtx;
5487 return 1;
5488 }
5489 else
5490 return 0;
5491
5492 case SIGN_EXTEND:
5493 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5494 dest_reg, p, inc_val, mult_val);
5495
5496 case ASHIFTRT:
5497 /* Similar, since this can be a sign extension. */
5498 for (insn = PREV_INSN (p);
5499 (insn && GET_CODE (insn) == NOTE
5500 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5501 insn = PREV_INSN (insn))
5502 ;
5503
5504 if (insn)
5505 set = single_set (insn);
5506
5507 if (set && SET_DEST (set) == XEXP (x, 0)
5508 && GET_CODE (XEXP (x, 1)) == CONST_INT
5509 && INTVAL (XEXP (x, 1)) >= 0
5510 && GET_CODE (SET_SRC (set)) == ASHIFT
5511 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5512 return basic_induction_var (XEXP (SET_SRC (set), 0),
5513 GET_MODE (XEXP (x, 0)),
5514 dest_reg, insn, inc_val, mult_val);
5515 return 0;
5516
5517 default:
5518 return 0;
5519 }
5520 }
5521 \f
5522 /* A general induction variable (giv) is any quantity that is a linear
5523 function of a basic induction variable,
5524 i.e. giv = biv * mult_val + add_val.
5525 The coefficients can be any loop invariant quantity.
5526 A giv need not be computed directly from the biv;
5527 it can be computed by way of other givs. */
5528
5529 /* Determine whether X computes a giv.
5530 If it does, return a nonzero value
5531 which is the benefit from eliminating the computation of X;
5532 set *SRC_REG to the register of the biv that it is computed from;
5533 set *ADD_VAL and *MULT_VAL to the coefficients,
5534 such that the value of X is biv * mult + add; */
5535
5536 static int
5537 general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
5538 rtx x;
5539 rtx *src_reg;
5540 rtx *add_val;
5541 rtx *mult_val;
5542 int is_addr;
5543 int *pbenefit;
5544 {
5545 rtx orig_x = x;
5546 char *storage;
5547
5548 /* If this is an invariant, forget it, it isn't a giv. */
5549 if (invariant_p (x) == 1)
5550 return 0;
5551
5552 /* See if the expression could be a giv and get its form.
5553 Mark our place on the obstack in case we don't find a giv. */
5554 storage = (char *) oballoc (0);
5555 *pbenefit = 0;
5556 x = simplify_giv_expr (x, pbenefit);
5557 if (x == 0)
5558 {
5559 obfree (storage);
5560 return 0;
5561 }
5562
5563 switch (GET_CODE (x))
5564 {
5565 case USE:
5566 case CONST_INT:
5567 /* Since this is now an invariant and wasn't before, it must be a giv
5568 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5569 with. */
5570 *src_reg = loop_iv_list->biv->dest_reg;
5571 *mult_val = const0_rtx;
5572 *add_val = x;
5573 break;
5574
5575 case REG:
5576 /* This is equivalent to a BIV. */
5577 *src_reg = x;
5578 *mult_val = const1_rtx;
5579 *add_val = const0_rtx;
5580 break;
5581
5582 case PLUS:
5583 /* Either (plus (biv) (invar)) or
5584 (plus (mult (biv) (invar_1)) (invar_2)). */
5585 if (GET_CODE (XEXP (x, 0)) == MULT)
5586 {
5587 *src_reg = XEXP (XEXP (x, 0), 0);
5588 *mult_val = XEXP (XEXP (x, 0), 1);
5589 }
5590 else
5591 {
5592 *src_reg = XEXP (x, 0);
5593 *mult_val = const1_rtx;
5594 }
5595 *add_val = XEXP (x, 1);
5596 break;
5597
5598 case MULT:
5599 /* ADD_VAL is zero. */
5600 *src_reg = XEXP (x, 0);
5601 *mult_val = XEXP (x, 1);
5602 *add_val = const0_rtx;
5603 break;
5604
5605 default:
5606 abort ();
5607 }
5608
5609 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5610 unless they are CONST_INT). */
5611 if (GET_CODE (*add_val) == USE)
5612 *add_val = XEXP (*add_val, 0);
5613 if (GET_CODE (*mult_val) == USE)
5614 *mult_val = XEXP (*mult_val, 0);
5615
5616 if (is_addr)
5617 {
5618 #ifdef ADDRESS_COST
5619 *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost;
5620 #else
5621 *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost;
5622 #endif
5623 }
5624 else
5625 *pbenefit += rtx_cost (orig_x, SET);
5626
5627 /* Always return true if this is a giv so it will be detected as such,
5628 even if the benefit is zero or negative. This allows elimination
5629 of bivs that might otherwise not be eliminated. */
5630 return 1;
5631 }
5632 \f
5633 /* Given an expression, X, try to form it as a linear function of a biv.
5634 We will canonicalize it to be of the form
5635 (plus (mult (BIV) (invar_1))
5636 (invar_2))
5637 with possible degeneracies.
5638
5639 The invariant expressions must each be of a form that can be used as a
5640 machine operand. We surround then with a USE rtx (a hack, but localized
5641 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5642 routine; it is the caller's responsibility to strip them.
5643
5644 If no such canonicalization is possible (i.e., two biv's are used or an
5645 expression that is neither invariant nor a biv or giv), this routine
5646 returns 0.
5647
5648 For a non-zero return, the result will have a code of CONST_INT, USE,
5649 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5650
5651 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5652
5653 static rtx sge_plus PROTO ((enum machine_mode, rtx, rtx));
5654 static rtx sge_plus_constant PROTO ((rtx, rtx));
5655
5656 static rtx
5657 simplify_giv_expr (x, benefit)
5658 rtx x;
5659 int *benefit;
5660 {
5661 enum machine_mode mode = GET_MODE (x);
5662 rtx arg0, arg1;
5663 rtx tem;
5664
5665 /* If this is not an integer mode, or if we cannot do arithmetic in this
5666 mode, this can't be a giv. */
5667 if (mode != VOIDmode
5668 && (GET_MODE_CLASS (mode) != MODE_INT
5669 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5670 return NULL_RTX;
5671
5672 switch (GET_CODE (x))
5673 {
5674 case PLUS:
5675 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5676 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5677 if (arg0 == 0 || arg1 == 0)
5678 return NULL_RTX;
5679
5680 /* Put constant last, CONST_INT last if both constant. */
5681 if ((GET_CODE (arg0) == USE
5682 || GET_CODE (arg0) == CONST_INT)
5683 && ! ((GET_CODE (arg0) == USE
5684 && GET_CODE (arg1) == USE)
5685 || GET_CODE (arg1) == CONST_INT))
5686 tem = arg0, arg0 = arg1, arg1 = tem;
5687
5688 /* Handle addition of zero, then addition of an invariant. */
5689 if (arg1 == const0_rtx)
5690 return arg0;
5691 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5692 switch (GET_CODE (arg0))
5693 {
5694 case CONST_INT:
5695 case USE:
5696 /* Adding two invariants must result in an invariant, so enclose
5697 addition operation inside a USE and return it. */
5698 if (GET_CODE (arg0) == USE)
5699 arg0 = XEXP (arg0, 0);
5700 if (GET_CODE (arg1) == USE)
5701 arg1 = XEXP (arg1, 0);
5702
5703 if (GET_CODE (arg0) == CONST_INT)
5704 tem = arg0, arg0 = arg1, arg1 = tem;
5705 if (GET_CODE (arg1) == CONST_INT)
5706 tem = sge_plus_constant (arg0, arg1);
5707 else
5708 tem = sge_plus (mode, arg0, arg1);
5709
5710 if (GET_CODE (tem) != CONST_INT)
5711 tem = gen_rtx_USE (mode, tem);
5712 return tem;
5713
5714 case REG:
5715 case MULT:
5716 /* biv + invar or mult + invar. Return sum. */
5717 return gen_rtx_PLUS (mode, arg0, arg1);
5718
5719 case PLUS:
5720 /* (a + invar_1) + invar_2. Associate. */
5721 return simplify_giv_expr (
5722 gen_rtx_PLUS (mode, XEXP (arg0, 0),
5723 gen_rtx_PLUS (mode, XEXP (arg0, 1), arg1)),
5724 benefit);
5725
5726 default:
5727 abort ();
5728 }
5729
5730 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5731 MULT to reduce cases. */
5732 if (GET_CODE (arg0) == REG)
5733 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5734 if (GET_CODE (arg1) == REG)
5735 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5736
5737 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5738 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5739 Recurse to associate the second PLUS. */
5740 if (GET_CODE (arg1) == MULT)
5741 tem = arg0, arg0 = arg1, arg1 = tem;
5742
5743 if (GET_CODE (arg1) == PLUS)
5744 return simplify_giv_expr (gen_rtx_PLUS (mode,
5745 gen_rtx_PLUS (mode, arg0,
5746 XEXP (arg1, 0)),
5747 XEXP (arg1, 1)),
5748 benefit);
5749
5750 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5751 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5752 return NULL_RTX;
5753
5754 if (!rtx_equal_p (arg0, arg1))
5755 return NULL_RTX;
5756
5757 return simplify_giv_expr (gen_rtx_MULT (mode,
5758 XEXP (arg0, 0),
5759 gen_rtx_PLUS (mode,
5760 XEXP (arg0, 1),
5761 XEXP (arg1, 1))),
5762 benefit);
5763
5764 case MINUS:
5765 /* Handle "a - b" as "a + b * (-1)". */
5766 return simplify_giv_expr (gen_rtx_PLUS (mode,
5767 XEXP (x, 0),
5768 gen_rtx_MULT (mode, XEXP (x, 1),
5769 constm1_rtx)),
5770 benefit);
5771
5772 case MULT:
5773 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5774 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5775 if (arg0 == 0 || arg1 == 0)
5776 return NULL_RTX;
5777
5778 /* Put constant last, CONST_INT last if both constant. */
5779 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5780 && GET_CODE (arg1) != CONST_INT)
5781 tem = arg0, arg0 = arg1, arg1 = tem;
5782
5783 /* If second argument is not now constant, not giv. */
5784 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5785 return NULL_RTX;
5786
5787 /* Handle multiply by 0 or 1. */
5788 if (arg1 == const0_rtx)
5789 return const0_rtx;
5790
5791 else if (arg1 == const1_rtx)
5792 return arg0;
5793
5794 switch (GET_CODE (arg0))
5795 {
5796 case REG:
5797 /* biv * invar. Done. */
5798 return gen_rtx_MULT (mode, arg0, arg1);
5799
5800 case CONST_INT:
5801 /* Product of two constants. */
5802 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5803
5804 case USE:
5805 /* invar * invar. It is a giv, but very few of these will
5806 actually pay off, so limit to simple registers. */
5807 if (GET_CODE (arg1) != CONST_INT)
5808 return NULL_RTX;
5809
5810 arg0 = XEXP (arg0, 0);
5811 if (GET_CODE (arg0) == REG)
5812 tem = gen_rtx_MULT (mode, arg0, arg1);
5813 else if (GET_CODE (arg0) == MULT
5814 && GET_CODE (XEXP (arg0, 0)) == REG
5815 && GET_CODE (XEXP (arg0, 1)) == CONST_INT)
5816 {
5817 tem = gen_rtx_MULT (mode, XEXP (arg0, 0),
5818 GEN_INT (INTVAL (XEXP (arg0, 1))
5819 * INTVAL (arg1)));
5820 }
5821 else
5822 return NULL_RTX;
5823 return gen_rtx_USE (mode, tem);
5824
5825 case MULT:
5826 /* (a * invar_1) * invar_2. Associate. */
5827 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5828 gen_rtx_MULT (mode,
5829 XEXP (arg0, 1),
5830 arg1)),
5831 benefit);
5832
5833 case PLUS:
5834 /* (a + invar_1) * invar_2. Distribute. */
5835 return simplify_giv_expr (gen_rtx_PLUS (mode,
5836 gen_rtx_MULT (mode,
5837 XEXP (arg0, 0),
5838 arg1),
5839 gen_rtx_MULT (mode,
5840 XEXP (arg0, 1),
5841 arg1)),
5842 benefit);
5843
5844 default:
5845 abort ();
5846 }
5847
5848 case ASHIFT:
5849 /* Shift by constant is multiply by power of two. */
5850 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5851 return 0;
5852
5853 return simplify_giv_expr (gen_rtx_MULT (mode,
5854 XEXP (x, 0),
5855 GEN_INT ((HOST_WIDE_INT) 1
5856 << INTVAL (XEXP (x, 1)))),
5857 benefit);
5858
5859 case NEG:
5860 /* "-a" is "a * (-1)" */
5861 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5862 benefit);
5863
5864 case NOT:
5865 /* "~a" is "-a - 1". Silly, but easy. */
5866 return simplify_giv_expr (gen_rtx_MINUS (mode,
5867 gen_rtx_NEG (mode, XEXP (x, 0)),
5868 const1_rtx),
5869 benefit);
5870
5871 case USE:
5872 /* Already in proper form for invariant. */
5873 return x;
5874
5875 case REG:
5876 /* If this is a new register, we can't deal with it. */
5877 if (REGNO (x) >= max_reg_before_loop)
5878 return 0;
5879
5880 /* Check for biv or giv. */
5881 switch (reg_iv_type[REGNO (x)])
5882 {
5883 case BASIC_INDUCT:
5884 return x;
5885 case GENERAL_INDUCT:
5886 {
5887 struct induction *v = reg_iv_info[REGNO (x)];
5888
5889 /* Form expression from giv and add benefit. Ensure this giv
5890 can derive another and subtract any needed adjustment if so. */
5891 *benefit += v->benefit;
5892 if (v->cant_derive)
5893 return 0;
5894
5895 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5896 v->mult_val),
5897 v->add_val);
5898 if (v->derive_adjustment)
5899 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5900 return simplify_giv_expr (tem, benefit);
5901 }
5902
5903 default:
5904 /* If it isn't an induction variable, and it is invariant, we
5905 may be able to simplify things further by looking through
5906 the bits we just moved outside the loop. */
5907 if (invariant_p (x) == 1)
5908 {
5909 struct movable *m;
5910
5911 for (m = the_movables; m ; m = m->next)
5912 if (rtx_equal_p (x, m->set_dest))
5913 {
5914 /* Ok, we found a match. Substitute and simplify. */
5915
5916 /* If we match another movable, we must use that, as
5917 this one is going away. */
5918 if (m->match)
5919 return simplify_giv_expr (m->match->set_dest, benefit);
5920
5921 /* If consec is non-zero, this is a member of a group of
5922 instructions that were moved together. We handle this
5923 case only to the point of seeking to the last insn and
5924 looking for a REG_EQUAL. Fail if we don't find one. */
5925 if (m->consec != 0)
5926 {
5927 int i = m->consec;
5928 tem = m->insn;
5929 do { tem = NEXT_INSN (tem); } while (--i > 0);
5930
5931 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
5932 if (tem)
5933 tem = XEXP (tem, 0);
5934 }
5935 else
5936 {
5937 tem = single_set (m->insn);
5938 if (tem)
5939 tem = SET_SRC (tem);
5940 }
5941
5942 if (tem)
5943 {
5944 /* What we are most interested in is pointer
5945 arithmetic on invariants -- only take
5946 patterns we may be able to do something with. */
5947 if (GET_CODE (tem) == PLUS
5948 || GET_CODE (tem) == MULT
5949 || GET_CODE (tem) == ASHIFT
5950 || GET_CODE (tem) == CONST_INT
5951 || GET_CODE (tem) == SYMBOL_REF)
5952 {
5953 tem = simplify_giv_expr (tem, benefit);
5954 if (tem)
5955 return tem;
5956 }
5957 else if (GET_CODE (tem) == CONST
5958 && GET_CODE (XEXP (tem, 0)) == PLUS
5959 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
5960 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
5961 {
5962 tem = simplify_giv_expr (XEXP (tem, 0), benefit);
5963 if (tem)
5964 return tem;
5965 }
5966 }
5967 break;
5968 }
5969 }
5970 break;
5971 }
5972
5973 /* Fall through to general case. */
5974 default:
5975 /* If invariant, return as USE (unless CONST_INT).
5976 Otherwise, not giv. */
5977 if (GET_CODE (x) == USE)
5978 x = XEXP (x, 0);
5979
5980 if (invariant_p (x) == 1)
5981 {
5982 if (GET_CODE (x) == CONST_INT)
5983 return x;
5984 if (GET_CODE (x) == CONST
5985 && GET_CODE (XEXP (x, 0)) == PLUS
5986 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5987 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
5988 x = XEXP (x, 0);
5989 return gen_rtx_USE (mode, x);
5990 }
5991 else
5992 return 0;
5993 }
5994 }
5995
5996 /* This routine folds invariants such that there is only ever one
5997 CONST_INT in the summation. It is only used by simplify_giv_expr. */
5998
5999 static rtx
6000 sge_plus_constant (x, c)
6001 rtx x, c;
6002 {
6003 if (GET_CODE (x) == CONST_INT)
6004 return GEN_INT (INTVAL (x) + INTVAL (c));
6005 else if (GET_CODE (x) != PLUS)
6006 return gen_rtx_PLUS (GET_MODE (x), x, c);
6007 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6008 {
6009 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6010 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6011 }
6012 else if (GET_CODE (XEXP (x, 0)) == PLUS
6013 || GET_CODE (XEXP (x, 1)) != PLUS)
6014 {
6015 return gen_rtx_PLUS (GET_MODE (x),
6016 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6017 }
6018 else
6019 {
6020 return gen_rtx_PLUS (GET_MODE (x),
6021 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6022 }
6023 }
6024
6025 static rtx
6026 sge_plus (mode, x, y)
6027 enum machine_mode mode;
6028 rtx x, y;
6029 {
6030 while (GET_CODE (y) == PLUS)
6031 {
6032 rtx a = XEXP (y, 0);
6033 if (GET_CODE (a) == CONST_INT)
6034 x = sge_plus_constant (x, a);
6035 else
6036 x = gen_rtx_PLUS (mode, x, a);
6037 y = XEXP (y, 1);
6038 }
6039 if (GET_CODE (y) == CONST_INT)
6040 x = sge_plus_constant (x, y);
6041 else
6042 x = gen_rtx_PLUS (mode, x, y);
6043 return x;
6044 }
6045 \f
6046 /* Help detect a giv that is calculated by several consecutive insns;
6047 for example,
6048 giv = biv * M
6049 giv = giv + A
6050 The caller has already identified the first insn P as having a giv as dest;
6051 we check that all other insns that set the same register follow
6052 immediately after P, that they alter nothing else,
6053 and that the result of the last is still a giv.
6054
6055 The value is 0 if the reg set in P is not really a giv.
6056 Otherwise, the value is the amount gained by eliminating
6057 all the consecutive insns that compute the value.
6058
6059 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6060 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6061
6062 The coefficients of the ultimate giv value are stored in
6063 *MULT_VAL and *ADD_VAL. */
6064
6065 static int
6066 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
6067 add_val, mult_val)
6068 int first_benefit;
6069 rtx p;
6070 rtx src_reg;
6071 rtx dest_reg;
6072 rtx *add_val;
6073 rtx *mult_val;
6074 {
6075 int count;
6076 enum rtx_code code;
6077 int benefit;
6078 rtx temp;
6079 rtx set;
6080
6081 /* Indicate that this is a giv so that we can update the value produced in
6082 each insn of the multi-insn sequence.
6083
6084 This induction structure will be used only by the call to
6085 general_induction_var below, so we can allocate it on our stack.
6086 If this is a giv, our caller will replace the induct var entry with
6087 a new induction structure. */
6088 struct induction *v
6089 = (struct induction *) alloca (sizeof (struct induction));
6090 v->src_reg = src_reg;
6091 v->mult_val = *mult_val;
6092 v->add_val = *add_val;
6093 v->benefit = first_benefit;
6094 v->cant_derive = 0;
6095 v->derive_adjustment = 0;
6096
6097 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
6098 reg_iv_info[REGNO (dest_reg)] = v;
6099
6100 count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
6101
6102 while (count > 0)
6103 {
6104 p = NEXT_INSN (p);
6105 code = GET_CODE (p);
6106
6107 /* If libcall, skip to end of call sequence. */
6108 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6109 p = XEXP (temp, 0);
6110
6111 if (code == INSN
6112 && (set = single_set (p))
6113 && GET_CODE (SET_DEST (set)) == REG
6114 && SET_DEST (set) == dest_reg
6115 && (general_induction_var (SET_SRC (set), &src_reg,
6116 add_val, mult_val, 0, &benefit)
6117 /* Giv created by equivalent expression. */
6118 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6119 && general_induction_var (XEXP (temp, 0), &src_reg,
6120 add_val, mult_val, 0, &benefit)))
6121 && src_reg == v->src_reg)
6122 {
6123 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6124 benefit += libcall_benefit (p);
6125
6126 count--;
6127 v->mult_val = *mult_val;
6128 v->add_val = *add_val;
6129 v->benefit = benefit;
6130 }
6131 else if (code != NOTE)
6132 {
6133 /* Allow insns that set something other than this giv to a
6134 constant. Such insns are needed on machines which cannot
6135 include long constants and should not disqualify a giv. */
6136 if (code == INSN
6137 && (set = single_set (p))
6138 && SET_DEST (set) != dest_reg
6139 && CONSTANT_P (SET_SRC (set)))
6140 continue;
6141
6142 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
6143 return 0;
6144 }
6145 }
6146
6147 return v->benefit;
6148 }
6149 \f
6150 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6151 represented by G1. If no such expression can be found, or it is clear that
6152 it cannot possibly be a valid address, 0 is returned.
6153
6154 To perform the computation, we note that
6155 G1 = x * v + a and
6156 G2 = y * v + b
6157 where `v' is the biv.
6158
6159 So G2 = (y/b) * G1 + (b - a*y/x).
6160
6161 Note that MULT = y/x.
6162
6163 Update: A and B are now allowed to be additive expressions such that
6164 B contains all variables in A. That is, computing B-A will not require
6165 subtracting variables. */
6166
6167 static rtx
6168 express_from_1 (a, b, mult)
6169 rtx a, b, mult;
6170 {
6171 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6172
6173 if (mult == const0_rtx)
6174 return b;
6175
6176 /* If MULT is not 1, we cannot handle A with non-constants, since we
6177 would then be required to subtract multiples of the registers in A.
6178 This is theoretically possible, and may even apply to some Fortran
6179 constructs, but it is a lot of work and we do not attempt it here. */
6180
6181 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6182 return NULL_RTX;
6183
6184 /* In general these structures are sorted top to bottom (down the PLUS
6185 chain), but not left to right across the PLUS. If B is a higher
6186 order giv than A, we can strip one level and recurse. If A is higher
6187 order, we'll eventually bail out, but won't know that until the end.
6188 If they are the same, we'll strip one level around this loop. */
6189
6190 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6191 {
6192 rtx ra, rb, oa, ob, tmp;
6193
6194 ra = XEXP (a, 0), oa = XEXP (a, 1);
6195 if (GET_CODE (ra) == PLUS)
6196 tmp = ra, ra = oa, oa = tmp;
6197
6198 rb = XEXP (b, 0), ob = XEXP (b, 1);
6199 if (GET_CODE (rb) == PLUS)
6200 tmp = rb, rb = ob, ob = tmp;
6201
6202 if (rtx_equal_p (ra, rb))
6203 /* We matched: remove one reg completely. */
6204 a = oa, b = ob;
6205 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6206 /* An alternate match. */
6207 a = oa, b = rb;
6208 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6209 /* An alternate match. */
6210 a = ra, b = ob;
6211 else
6212 {
6213 /* Indicates an extra register in B. Strip one level from B and
6214 recurse, hoping B was the higher order expression. */
6215 ob = express_from_1 (a, ob, mult);
6216 if (ob == NULL_RTX)
6217 return NULL_RTX;
6218 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6219 }
6220 }
6221
6222 /* Here we are at the last level of A, go through the cases hoping to
6223 get rid of everything but a constant. */
6224
6225 if (GET_CODE (a) == PLUS)
6226 {
6227 rtx ra, oa;
6228
6229 ra = XEXP (a, 0), oa = XEXP (a, 1);
6230 if (rtx_equal_p (oa, b))
6231 oa = ra;
6232 else if (!rtx_equal_p (ra, b))
6233 return NULL_RTX;
6234
6235 if (GET_CODE (oa) != CONST_INT)
6236 return NULL_RTX;
6237
6238 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6239 }
6240 else if (GET_CODE (a) == CONST_INT)
6241 {
6242 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6243 }
6244 else if (GET_CODE (b) == PLUS)
6245 {
6246 if (rtx_equal_p (a, XEXP (b, 0)))
6247 return XEXP (b, 1);
6248 else if (rtx_equal_p (a, XEXP (b, 1)))
6249 return XEXP (b, 0);
6250 else
6251 return NULL_RTX;
6252 }
6253 else if (rtx_equal_p (a, b))
6254 return const0_rtx;
6255
6256 return NULL_RTX;
6257 }
6258
6259 static rtx
6260 express_from (g1, g2)
6261 struct induction *g1, *g2;
6262 {
6263 rtx mult, add;
6264
6265 /* The value that G1 will be multiplied by must be a constant integer. Also,
6266 the only chance we have of getting a valid address is if b*c/a (see above
6267 for notation) is also an integer. */
6268 if (GET_CODE (g1->mult_val) == CONST_INT
6269 && GET_CODE (g2->mult_val) == CONST_INT)
6270 {
6271 if (g1->mult_val == const0_rtx
6272 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6273 return NULL_RTX;
6274 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6275 }
6276 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6277 mult = const1_rtx;
6278 else
6279 {
6280 /* ??? Find out if the one is a multiple of the other? */
6281 return NULL_RTX;
6282 }
6283
6284 add = express_from_1 (g1->add_val, g2->add_val, mult);
6285 if (add == NULL_RTX)
6286 return NULL_RTX;
6287
6288 /* Form simplified final result. */
6289 if (mult == const0_rtx)
6290 return add;
6291 else if (mult == const1_rtx)
6292 mult = g1->dest_reg;
6293 else
6294 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6295
6296 if (add == const0_rtx)
6297 return mult;
6298 else
6299 return gen_rtx_PLUS (g2->mode, mult, add);
6300 }
6301 \f
6302 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
6303 (either directly or via an address expression) a register used to represent
6304 G1. Set g2->new_reg to a represtation of G1 (normally just
6305 g1->dest_reg). */
6306
6307 static rtx
6308 combine_givs_p (g1, g2)
6309 struct induction *g1, *g2;
6310 {
6311 rtx tem = express_from (g1, g2);
6312
6313 /* If these givs are identical, they can be combined. We use the results
6314 of express_from because the addends are not in a canonical form, so
6315 rtx_equal_p is a weaker test. */
6316 if (tem == const0_rtx)
6317 {
6318 return g1->dest_reg;
6319 }
6320
6321 /* If G2 can be expressed as a function of G1 and that function is valid
6322 as an address and no more expensive than using a register for G2,
6323 the expression of G2 in terms of G1 can be used. */
6324 if (tem != NULL_RTX
6325 && g2->giv_type == DEST_ADDR
6326 && memory_address_p (g2->mem_mode, tem)
6327 /* ??? Looses, especially with -fforce-addr, where *g2->location
6328 will always be a register, and so anything more complicated
6329 gets discarded. */
6330 #if 0
6331 #ifdef ADDRESS_COST
6332 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6333 #else
6334 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6335 #endif
6336 #endif
6337 )
6338 {
6339 return tem;
6340 }
6341
6342 return NULL_RTX;
6343 }
6344 \f
6345 struct combine_givs_stats
6346 {
6347 int giv_number;
6348 int total_benefit;
6349 };
6350
6351 static int
6352 cmp_combine_givs_stats (x, y)
6353 struct combine_givs_stats *x, *y;
6354 {
6355 int d;
6356 d = y->total_benefit - x->total_benefit;
6357 /* Stabilize the sort. */
6358 if (!d)
6359 d = x->giv_number - y->giv_number;
6360 return d;
6361 }
6362
6363 /* If one of these givs is a DEST_REG that was only used once, by the
6364 other giv, this is actually a single use. Return 0 if this is not
6365 the case, -1 if g1 is the DEST_REG involved, and 1 if it was g2. */
6366
6367 static int
6368 combine_givs_used_once (g1, g2)
6369 struct induction *g1, *g2;
6370 {
6371 if (g1->giv_type == DEST_REG
6372 && VARRAY_INT (n_times_used, REGNO (g1->dest_reg)) == 1
6373 && reg_mentioned_p (g1->dest_reg, PATTERN (g2->insn)))
6374 return -1;
6375
6376 if (g2->giv_type == DEST_REG
6377 && VARRAY_INT (n_times_used, REGNO (g2->dest_reg)) == 1
6378 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
6379 return 1;
6380
6381 return 0;
6382 }
6383
6384 static int
6385 combine_givs_benefit_from (g1, g2)
6386 struct induction *g1, *g2;
6387 {
6388 int tmp = combine_givs_used_once (g1, g2);
6389 if (tmp < 0)
6390 return 0;
6391 else if (tmp > 0)
6392 return g2->benefit - g1->benefit;
6393 else
6394 return g2->benefit;
6395 }
6396
6397 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6398 any other. If so, point SAME to the giv combined with and set NEW_REG to
6399 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6400 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6401
6402 static void
6403 combine_givs (bl)
6404 struct iv_class *bl;
6405 {
6406 struct induction *g1, *g2, **giv_array;
6407 int i, j, k, giv_count;
6408 struct combine_givs_stats *stats;
6409 rtx *can_combine;
6410
6411 /* Count givs, because bl->giv_count is incorrect here. */
6412 giv_count = 0;
6413 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6414 if (!g1->ignore)
6415 giv_count++;
6416
6417 giv_array
6418 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6419 i = 0;
6420 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6421 if (!g1->ignore)
6422 giv_array[i++] = g1;
6423
6424 stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats));
6425 bzero ((char *) stats, giv_count * sizeof (*stats));
6426
6427 can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx));
6428 bzero ((char *) can_combine, giv_count * giv_count * sizeof(rtx));
6429
6430 for (i = 0; i < giv_count; i++)
6431 {
6432 int this_benefit;
6433
6434 g1 = giv_array[i];
6435
6436 this_benefit = g1->benefit;
6437 /* Add an additional weight for zero addends. */
6438 if (g1->no_const_addval)
6439 this_benefit += 1;
6440 for (j = 0; j < giv_count; j++)
6441 {
6442 rtx this_combine;
6443
6444 g2 = giv_array[j];
6445 if (g1 != g2
6446 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6447 {
6448 can_combine[i*giv_count + j] = this_combine;
6449 this_benefit += combine_givs_benefit_from (g1, g2);
6450 /* Add an additional weight for being reused more times. */
6451 this_benefit += 3;
6452 }
6453 }
6454 stats[i].giv_number = i;
6455 stats[i].total_benefit = this_benefit;
6456 }
6457
6458 /* Iterate, combining until we can't. */
6459 restart:
6460 qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats);
6461
6462 if (loop_dump_stream)
6463 {
6464 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6465 for (k = 0; k < giv_count; k++)
6466 {
6467 g1 = giv_array[stats[k].giv_number];
6468 if (!g1->combined_with && !g1->same)
6469 fprintf (loop_dump_stream, " {%d, %d}",
6470 INSN_UID (giv_array[stats[k].giv_number]->insn),
6471 stats[k].total_benefit);
6472 }
6473 putc ('\n', loop_dump_stream);
6474 }
6475
6476 for (k = 0; k < giv_count; k++)
6477 {
6478 int g1_add_benefit = 0;
6479
6480 i = stats[k].giv_number;
6481 g1 = giv_array[i];
6482
6483 /* If it has already been combined, skip. */
6484 if (g1->combined_with || g1->same)
6485 continue;
6486
6487 for (j = 0; j < giv_count; j++)
6488 {
6489 g2 = giv_array[j];
6490 if (g1 != g2 && can_combine[i*giv_count + j]
6491 /* If it has already been combined, skip. */
6492 && ! g2->same && ! g2->combined_with)
6493 {
6494 int l;
6495
6496 g2->new_reg = can_combine[i*giv_count + j];
6497 g2->same = g1;
6498 g1->combined_with = 1;
6499 if (!combine_givs_used_once (g1, g2))
6500 g1->times_used += 1;
6501 g1->lifetime += g2->lifetime;
6502
6503 g1_add_benefit += combine_givs_benefit_from (g1, g2);
6504
6505 /* ??? The new final_[bg]iv_value code does a much better job
6506 of finding replaceable giv's, and hence this code may no
6507 longer be necessary. */
6508 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6509 g1_add_benefit -= copy_cost;
6510
6511 /* To help optimize the next set of combinations, remove
6512 this giv from the benefits of other potential mates. */
6513 for (l = 0; l < giv_count; ++l)
6514 {
6515 int m = stats[l].giv_number;
6516 if (can_combine[m*giv_count + j])
6517 {
6518 /* Remove additional weight for being reused. */
6519 stats[l].total_benefit -= 3 +
6520 combine_givs_benefit_from (giv_array[m], g2);
6521 }
6522 }
6523
6524 if (loop_dump_stream)
6525 fprintf (loop_dump_stream,
6526 "giv at %d combined with giv at %d\n",
6527 INSN_UID (g2->insn), INSN_UID (g1->insn));
6528 }
6529 }
6530
6531 /* To help optimize the next set of combinations, remove
6532 this giv from the benefits of other potential mates. */
6533 if (g1->combined_with)
6534 {
6535 for (j = 0; j < giv_count; ++j)
6536 {
6537 int m = stats[j].giv_number;
6538 if (can_combine[m*giv_count + j])
6539 {
6540 /* Remove additional weight for being reused. */
6541 stats[j].total_benefit -= 3 +
6542 combine_givs_benefit_from (giv_array[m], g1);
6543 }
6544 }
6545
6546 g1->benefit += g1_add_benefit;
6547
6548 /* We've finished with this giv, and everything it touched.
6549 Restart the combination so that proper weights for the
6550 rest of the givs are properly taken into account. */
6551 /* ??? Ideally we would compact the arrays at this point, so
6552 as to not cover old ground. But sanely compacting
6553 can_combine is tricky. */
6554 goto restart;
6555 }
6556 }
6557 }
6558 \f
6559 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
6560
6561 void
6562 emit_iv_add_mult (b, m, a, reg, insert_before)
6563 rtx b; /* initial value of basic induction variable */
6564 rtx m; /* multiplicative constant */
6565 rtx a; /* additive constant */
6566 rtx reg; /* destination register */
6567 rtx insert_before;
6568 {
6569 rtx seq;
6570 rtx result;
6571
6572 /* Prevent unexpected sharing of these rtx. */
6573 a = copy_rtx (a);
6574 b = copy_rtx (b);
6575
6576 /* Increase the lifetime of any invariants moved further in code. */
6577 update_reg_last_use (a, insert_before);
6578 update_reg_last_use (b, insert_before);
6579 update_reg_last_use (m, insert_before);
6580
6581 start_sequence ();
6582 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
6583 if (reg != result)
6584 emit_move_insn (reg, result);
6585 seq = gen_sequence ();
6586 end_sequence ();
6587
6588 emit_insn_before (seq, insert_before);
6589
6590 /* It is entirely possible that the expansion created lots of new
6591 registers. Iterate over the sequence we just created and
6592 record them all. */
6593
6594 if (GET_CODE (seq) == SEQUENCE)
6595 {
6596 int i;
6597 for (i = 0; i < XVECLEN (seq, 0); ++i)
6598 {
6599 rtx set = single_set (XVECEXP (seq, 0, i));
6600 if (set && GET_CODE (SET_DEST (set)) == REG)
6601 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6602 }
6603 }
6604 else if (GET_CODE (seq) == SET
6605 && GET_CODE (SET_DEST (seq)) == REG)
6606 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
6607 }
6608 \f
6609 /* Test whether A * B can be computed without
6610 an actual multiply insn. Value is 1 if so. */
6611
6612 static int
6613 product_cheap_p (a, b)
6614 rtx a;
6615 rtx b;
6616 {
6617 int i;
6618 rtx tmp;
6619 struct obstack *old_rtl_obstack = rtl_obstack;
6620 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
6621 int win = 1;
6622
6623 /* If only one is constant, make it B. */
6624 if (GET_CODE (a) == CONST_INT)
6625 tmp = a, a = b, b = tmp;
6626
6627 /* If first constant, both constant, so don't need multiply. */
6628 if (GET_CODE (a) == CONST_INT)
6629 return 1;
6630
6631 /* If second not constant, neither is constant, so would need multiply. */
6632 if (GET_CODE (b) != CONST_INT)
6633 return 0;
6634
6635 /* One operand is constant, so might not need multiply insn. Generate the
6636 code for the multiply and see if a call or multiply, or long sequence
6637 of insns is generated. */
6638
6639 rtl_obstack = &temp_obstack;
6640 start_sequence ();
6641 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
6642 tmp = gen_sequence ();
6643 end_sequence ();
6644
6645 if (GET_CODE (tmp) == SEQUENCE)
6646 {
6647 if (XVEC (tmp, 0) == 0)
6648 win = 1;
6649 else if (XVECLEN (tmp, 0) > 3)
6650 win = 0;
6651 else
6652 for (i = 0; i < XVECLEN (tmp, 0); i++)
6653 {
6654 rtx insn = XVECEXP (tmp, 0, i);
6655
6656 if (GET_CODE (insn) != INSN
6657 || (GET_CODE (PATTERN (insn)) == SET
6658 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6659 || (GET_CODE (PATTERN (insn)) == PARALLEL
6660 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6661 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6662 {
6663 win = 0;
6664 break;
6665 }
6666 }
6667 }
6668 else if (GET_CODE (tmp) == SET
6669 && GET_CODE (SET_SRC (tmp)) == MULT)
6670 win = 0;
6671 else if (GET_CODE (tmp) == PARALLEL
6672 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6673 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6674 win = 0;
6675
6676 /* Free any storage we obtained in generating this multiply and restore rtl
6677 allocation to its normal obstack. */
6678 obstack_free (&temp_obstack, storage);
6679 rtl_obstack = old_rtl_obstack;
6680
6681 return win;
6682 }
6683 \f
6684 /* Check to see if loop can be terminated by a "decrement and branch until
6685 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6686 Also try reversing an increment loop to a decrement loop
6687 to see if the optimization can be performed.
6688 Value is nonzero if optimization was performed. */
6689
6690 /* This is useful even if the architecture doesn't have such an insn,
6691 because it might change a loops which increments from 0 to n to a loop
6692 which decrements from n to 0. A loop that decrements to zero is usually
6693 faster than one that increments from zero. */
6694
6695 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6696 such as approx_final_value, biv_total_increment, loop_iterations, and
6697 final_[bg]iv_value. */
6698
6699 static int
6700 check_dbra_loop (loop_end, insn_count, loop_start)
6701 rtx loop_end;
6702 int insn_count;
6703 rtx loop_start;
6704 {
6705 struct iv_class *bl;
6706 rtx reg;
6707 rtx jump_label;
6708 rtx final_value;
6709 rtx start_value;
6710 rtx new_add_val;
6711 rtx comparison;
6712 rtx before_comparison;
6713 rtx p;
6714 rtx jump;
6715 rtx first_compare;
6716 int compare_and_branch;
6717
6718 /* If last insn is a conditional branch, and the insn before tests a
6719 register value, try to optimize it. Otherwise, we can't do anything. */
6720
6721 jump = PREV_INSN (loop_end);
6722 comparison = get_condition_for_loop (jump);
6723 if (comparison == 0)
6724 return 0;
6725
6726 /* Try to compute whether the compare/branch at the loop end is one or
6727 two instructions. */
6728 get_condition (jump, &first_compare);
6729 if (first_compare == jump)
6730 compare_and_branch = 1;
6731 else if (first_compare == prev_nonnote_insn (jump))
6732 compare_and_branch = 2;
6733 else
6734 return 0;
6735
6736 /* Check all of the bivs to see if the compare uses one of them.
6737 Skip biv's set more than once because we can't guarantee that
6738 it will be zero on the last iteration. Also skip if the biv is
6739 used between its update and the test insn. */
6740
6741 for (bl = loop_iv_list; bl; bl = bl->next)
6742 {
6743 if (bl->biv_count == 1
6744 && bl->biv->dest_reg == XEXP (comparison, 0)
6745 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6746 first_compare))
6747 break;
6748 }
6749
6750 if (! bl)
6751 return 0;
6752
6753 /* Look for the case where the basic induction variable is always
6754 nonnegative, and equals zero on the last iteration.
6755 In this case, add a reg_note REG_NONNEG, which allows the
6756 m68k DBRA instruction to be used. */
6757
6758 if (((GET_CODE (comparison) == GT
6759 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6760 && INTVAL (XEXP (comparison, 1)) == -1)
6761 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6762 && GET_CODE (bl->biv->add_val) == CONST_INT
6763 && INTVAL (bl->biv->add_val) < 0)
6764 {
6765 /* Initial value must be greater than 0,
6766 init_val % -dec_value == 0 to ensure that it equals zero on
6767 the last iteration */
6768
6769 if (GET_CODE (bl->initial_value) == CONST_INT
6770 && INTVAL (bl->initial_value) > 0
6771 && (INTVAL (bl->initial_value)
6772 % (-INTVAL (bl->biv->add_val))) == 0)
6773 {
6774 /* register always nonnegative, add REG_NOTE to branch */
6775 REG_NOTES (PREV_INSN (loop_end))
6776 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6777 REG_NOTES (PREV_INSN (loop_end)));
6778 bl->nonneg = 1;
6779
6780 return 1;
6781 }
6782
6783 /* If the decrement is 1 and the value was tested as >= 0 before
6784 the loop, then we can safely optimize. */
6785 for (p = loop_start; p; p = PREV_INSN (p))
6786 {
6787 if (GET_CODE (p) == CODE_LABEL)
6788 break;
6789 if (GET_CODE (p) != JUMP_INSN)
6790 continue;
6791
6792 before_comparison = get_condition_for_loop (p);
6793 if (before_comparison
6794 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6795 && GET_CODE (before_comparison) == LT
6796 && XEXP (before_comparison, 1) == const0_rtx
6797 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6798 && INTVAL (bl->biv->add_val) == -1)
6799 {
6800 REG_NOTES (PREV_INSN (loop_end))
6801 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6802 REG_NOTES (PREV_INSN (loop_end)));
6803 bl->nonneg = 1;
6804
6805 return 1;
6806 }
6807 }
6808 }
6809 else if (INTVAL (bl->biv->add_val) > 0)
6810 {
6811 /* Try to change inc to dec, so can apply above optimization. */
6812 /* Can do this if:
6813 all registers modified are induction variables or invariant,
6814 all memory references have non-overlapping addresses
6815 (obviously true if only one write)
6816 allow 2 insns for the compare/jump at the end of the loop. */
6817 /* Also, we must avoid any instructions which use both the reversed
6818 biv and another biv. Such instructions will fail if the loop is
6819 reversed. We meet this condition by requiring that either
6820 no_use_except_counting is true, or else that there is only
6821 one biv. */
6822 int num_nonfixed_reads = 0;
6823 /* 1 if the iteration var is used only to count iterations. */
6824 int no_use_except_counting = 0;
6825 /* 1 if the loop has no memory store, or it has a single memory store
6826 which is reversible. */
6827 int reversible_mem_store = 1;
6828
6829 if (bl->giv_count == 0
6830 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6831 {
6832 rtx bivreg = regno_reg_rtx[bl->regno];
6833
6834 /* If there are no givs for this biv, and the only exit is the
6835 fall through at the end of the loop, then
6836 see if perhaps there are no uses except to count. */
6837 no_use_except_counting = 1;
6838 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6839 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6840 {
6841 rtx set = single_set (p);
6842
6843 if (set && GET_CODE (SET_DEST (set)) == REG
6844 && REGNO (SET_DEST (set)) == bl->regno)
6845 /* An insn that sets the biv is okay. */
6846 ;
6847 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6848 || p == prev_nonnote_insn (loop_end))
6849 /* Don't bother about the end test. */
6850 ;
6851 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6852 {
6853 no_use_except_counting = 0;
6854 break;
6855 }
6856 }
6857 }
6858
6859 if (no_use_except_counting)
6860 ; /* no need to worry about MEMs. */
6861 else if (num_mem_sets <= 1)
6862 {
6863 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6864 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6865 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6866
6867 /* If the loop has a single store, and the destination address is
6868 invariant, then we can't reverse the loop, because this address
6869 might then have the wrong value at loop exit.
6870 This would work if the source was invariant also, however, in that
6871 case, the insn should have been moved out of the loop. */
6872
6873 if (num_mem_sets == 1)
6874 reversible_mem_store
6875 = (! unknown_address_altered
6876 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6877 }
6878 else
6879 return 0;
6880
6881 /* This code only acts for innermost loops. Also it simplifies
6882 the memory address check by only reversing loops with
6883 zero or one memory access.
6884 Two memory accesses could involve parts of the same array,
6885 and that can't be reversed.
6886 If the biv is used only for counting, than we don't need to worry
6887 about all these things. */
6888
6889 if ((num_nonfixed_reads <= 1
6890 && !loop_has_call
6891 && !loop_has_volatile
6892 && reversible_mem_store
6893 && (bl->giv_count + bl->biv_count + num_mem_sets
6894 + num_movables + compare_and_branch == insn_count)
6895 && (bl == loop_iv_list && bl->next == 0))
6896 || no_use_except_counting)
6897 {
6898 rtx tem;
6899
6900 /* Loop can be reversed. */
6901 if (loop_dump_stream)
6902 fprintf (loop_dump_stream, "Can reverse loop\n");
6903
6904 /* Now check other conditions:
6905
6906 The increment must be a constant, as must the initial value,
6907 and the comparison code must be LT.
6908
6909 This test can probably be improved since +/- 1 in the constant
6910 can be obtained by changing LT to LE and vice versa; this is
6911 confusing. */
6912
6913 if (comparison
6914 /* for constants, LE gets turned into LT */
6915 && (GET_CODE (comparison) == LT
6916 || (GET_CODE (comparison) == LE
6917 && no_use_except_counting)))
6918 {
6919 HOST_WIDE_INT add_val, add_adjust, comparison_val;
6920 rtx initial_value, comparison_value;
6921 int nonneg = 0;
6922 enum rtx_code cmp_code;
6923 int comparison_const_width;
6924 unsigned HOST_WIDE_INT comparison_sign_mask;
6925 rtx vtop;
6926
6927 add_val = INTVAL (bl->biv->add_val);
6928 comparison_value = XEXP (comparison, 1);
6929 comparison_const_width
6930 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 1)));
6931 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
6932 comparison_const_width = HOST_BITS_PER_WIDE_INT;
6933 comparison_sign_mask
6934 = (unsigned HOST_WIDE_INT)1 << (comparison_const_width - 1);
6935
6936 /* If the comparison value is not a loop invariant, then we
6937 can not reverse this loop.
6938
6939 ??? If the insns which initialize the comparison value as
6940 a whole compute an invariant result, then we could move
6941 them out of the loop and proceed with loop reversal. */
6942 if (!invariant_p (comparison_value))
6943 return 0;
6944
6945 if (GET_CODE (comparison_value) == CONST_INT)
6946 comparison_val = INTVAL (comparison_value);
6947 initial_value = bl->initial_value;
6948
6949 /* Normalize the initial value if it is an integer and
6950 has no other use except as a counter. This will allow
6951 a few more loops to be reversed. */
6952 if (no_use_except_counting
6953 && GET_CODE (comparison_value) == CONST_INT
6954 && GET_CODE (initial_value) == CONST_INT)
6955 {
6956 comparison_val = comparison_val - INTVAL (bl->initial_value);
6957 /* The code below requires comparison_val to be a multiple
6958 of add_val in order to do the loop reversal, so
6959 round up comparison_val to a multiple of add_val.
6960 Since comparison_value is constant, we know that the
6961 current comparison code is LT. */
6962 comparison_val = comparison_val + add_val - 1;
6963 comparison_val
6964 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
6965 /* We postpone overflow checks for COMPARISON_VAL here;
6966 even if there is an overflow, we might still be able to
6967 reverse the loop, if converting the loop exit test to
6968 NE is possible. */
6969 initial_value = const0_rtx;
6970 }
6971
6972 /* Check if there is a NOTE_INSN_LOOP_VTOP note. If there is,
6973 that means that this is a for or while style loop, with
6974 a loop exit test at the start. Thus, we can assume that
6975 the loop condition was true when the loop was entered.
6976 This allows us to change the loop exit condition to an
6977 equality test.
6978 We start at the end and search backwards for the previous
6979 NOTE. If there is no NOTE_INSN_LOOP_VTOP for this loop,
6980 the search will stop at the NOTE_INSN_LOOP_CONT. */
6981 vtop = loop_end;
6982 do
6983 vtop = PREV_INSN (vtop);
6984 while (GET_CODE (vtop) != NOTE
6985 || NOTE_LINE_NUMBER (vtop) > 0
6986 || NOTE_LINE_NUMBER (vtop) == NOTE_REPEATED_LINE_NUMBER
6987 || NOTE_LINE_NUMBER (vtop) == NOTE_INSN_DELETED);
6988 if (NOTE_LINE_NUMBER (vtop) != NOTE_INSN_LOOP_VTOP)
6989 vtop = NULL_RTX;
6990
6991 /* First check if we can do a vanilla loop reversal. */
6992 if (initial_value == const0_rtx
6993 /* If we have a decrement_and_branch_on_count, prefer
6994 the NE test, since this will allow that instruction to
6995 be generated. Note that we must use a vanilla loop
6996 reversal if the biv is used to calculate a giv or has
6997 a non-counting use. */
6998 #if ! defined (HAVE_decrement_and_branch_until_zero) && defined (HAVE_decrement_and_branch_on_count)
6999 && (! (add_val == 1 && vtop
7000 && (bl->biv_count == 0
7001 || no_use_except_counting)))
7002 #endif
7003 && GET_CODE (comparison_value) == CONST_INT
7004 /* Now do postponed overflow checks on COMPARISON_VAL. */
7005 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
7006 & comparison_sign_mask))
7007 {
7008 /* Register will always be nonnegative, with value
7009 0 on last iteration */
7010 add_adjust = add_val;
7011 nonneg = 1;
7012 cmp_code = GE;
7013 }
7014 else if (add_val == 1 && vtop
7015 && (bl->biv_count == 0
7016 || no_use_except_counting))
7017 {
7018 add_adjust = 0;
7019 cmp_code = NE;
7020 }
7021 else
7022 return 0;
7023
7024 if (GET_CODE (comparison) == LE)
7025 add_adjust -= add_val;
7026
7027 /* If the initial value is not zero, or if the comparison
7028 value is not an exact multiple of the increment, then we
7029 can not reverse this loop. */
7030 if (initial_value == const0_rtx
7031 && GET_CODE (comparison_value) == CONST_INT)
7032 {
7033 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
7034 return 0;
7035 }
7036 else
7037 {
7038 if (! no_use_except_counting || add_val != 1)
7039 return 0;
7040 }
7041
7042 final_value = comparison_value;
7043
7044 /* Reset these in case we normalized the initial value
7045 and comparison value above. */
7046 if (GET_CODE (comparison_value) == CONST_INT
7047 && GET_CODE (initial_value) == CONST_INT)
7048 {
7049 comparison_value = GEN_INT (comparison_val);
7050 final_value
7051 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
7052 }
7053 bl->initial_value = initial_value;
7054
7055 /* Save some info needed to produce the new insns. */
7056 reg = bl->biv->dest_reg;
7057 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
7058 if (jump_label == pc_rtx)
7059 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
7060 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
7061
7062 /* Set start_value; if this is not a CONST_INT, we need
7063 to generate a SUB.
7064 Initialize biv to start_value before loop start.
7065 The old initializing insn will be deleted as a
7066 dead store by flow.c. */
7067 if (initial_value == const0_rtx
7068 && GET_CODE (comparison_value) == CONST_INT)
7069 {
7070 start_value = GEN_INT (comparison_val - add_adjust);
7071 emit_insn_before (gen_move_insn (reg, start_value),
7072 loop_start);
7073 }
7074 else if (GET_CODE (initial_value) == CONST_INT)
7075 {
7076 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
7077 enum machine_mode mode = GET_MODE (reg);
7078 enum insn_code icode
7079 = add_optab->handlers[(int) mode].insn_code;
7080 if (! (*insn_operand_predicate[icode][0]) (reg, mode)
7081 || ! ((*insn_operand_predicate[icode][1])
7082 (comparison_value, mode))
7083 || ! (*insn_operand_predicate[icode][2]) (offset, mode))
7084 return 0;
7085 start_value
7086 = gen_rtx_PLUS (mode, comparison_value, offset);
7087 emit_insn_before ((GEN_FCN (icode)
7088 (reg, comparison_value, offset)),
7089 loop_start);
7090 if (GET_CODE (comparison) == LE)
7091 final_value = gen_rtx_PLUS (mode, comparison_value,
7092 GEN_INT (add_val));
7093 }
7094 else if (! add_adjust)
7095 {
7096 enum machine_mode mode = GET_MODE (reg);
7097 enum insn_code icode
7098 = sub_optab->handlers[(int) mode].insn_code;
7099 if (! (*insn_operand_predicate[icode][0]) (reg, mode)
7100 || ! ((*insn_operand_predicate[icode][1])
7101 (comparison_value, mode))
7102 || ! ((*insn_operand_predicate[icode][2])
7103 (initial_value, mode)))
7104 return 0;
7105 start_value
7106 = gen_rtx_MINUS (mode, comparison_value, initial_value);
7107 emit_insn_before ((GEN_FCN (icode)
7108 (reg, comparison_value, initial_value)),
7109 loop_start);
7110 }
7111 else
7112 /* We could handle the other cases too, but it'll be
7113 better to have a testcase first. */
7114 return 0;
7115
7116 /* Add insn to decrement register, and delete insn
7117 that incremented the register. */
7118 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
7119 bl->biv->insn);
7120 delete_insn (bl->biv->insn);
7121
7122 /* Update biv info to reflect its new status. */
7123 bl->biv->insn = p;
7124 bl->initial_value = start_value;
7125 bl->biv->add_val = new_add_val;
7126
7127 /* Inc LABEL_NUSES so that delete_insn will
7128 not delete the label. */
7129 LABEL_NUSES (XEXP (jump_label, 0)) ++;
7130
7131 /* Emit an insn after the end of the loop to set the biv's
7132 proper exit value if it is used anywhere outside the loop. */
7133 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
7134 || ! bl->init_insn
7135 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
7136 emit_insn_after (gen_move_insn (reg, final_value),
7137 loop_end);
7138
7139 /* Delete compare/branch at end of loop. */
7140 delete_insn (PREV_INSN (loop_end));
7141 if (compare_and_branch == 2)
7142 delete_insn (first_compare);
7143
7144 /* Add new compare/branch insn at end of loop. */
7145 start_sequence ();
7146 emit_cmp_insn (reg, const0_rtx, cmp_code, NULL_RTX,
7147 GET_MODE (reg), 0, 0);
7148 emit_jump_insn ((*bcc_gen_fctn[(int) cmp_code])
7149 (XEXP (jump_label, 0)));
7150 tem = gen_sequence ();
7151 end_sequence ();
7152 emit_jump_insn_before (tem, loop_end);
7153
7154 if (nonneg)
7155 {
7156 for (tem = PREV_INSN (loop_end);
7157 tem && GET_CODE (tem) != JUMP_INSN;
7158 tem = PREV_INSN (tem))
7159 ;
7160 if (tem)
7161 {
7162 JUMP_LABEL (tem) = XEXP (jump_label, 0);
7163
7164 /* Increment of LABEL_NUSES done above. */
7165 /* Register is now always nonnegative,
7166 so add REG_NONNEG note to the branch. */
7167 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
7168 REG_NOTES (tem));
7169 }
7170 bl->nonneg = 1;
7171 }
7172
7173 /* Mark that this biv has been reversed. Each giv which depends
7174 on this biv, and which is also live past the end of the loop
7175 will have to be fixed up. */
7176
7177 bl->reversed = 1;
7178
7179 if (loop_dump_stream)
7180 fprintf (loop_dump_stream,
7181 "Reversed loop and added reg_nonneg\n");
7182
7183 return 1;
7184 }
7185 }
7186 }
7187
7188 return 0;
7189 }
7190 \f
7191 /* Verify whether the biv BL appears to be eliminable,
7192 based on the insns in the loop that refer to it.
7193 LOOP_START is the first insn of the loop, and END is the end insn.
7194
7195 If ELIMINATE_P is non-zero, actually do the elimination.
7196
7197 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
7198 determine whether invariant insns should be placed inside or at the
7199 start of the loop. */
7200
7201 static int
7202 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
7203 struct iv_class *bl;
7204 rtx loop_start;
7205 rtx end;
7206 int eliminate_p;
7207 int threshold, insn_count;
7208 {
7209 rtx reg = bl->biv->dest_reg;
7210 rtx p;
7211
7212 /* Scan all insns in the loop, stopping if we find one that uses the
7213 biv in a way that we cannot eliminate. */
7214
7215 for (p = loop_start; p != end; p = NEXT_INSN (p))
7216 {
7217 enum rtx_code code = GET_CODE (p);
7218 rtx where = threshold >= insn_count ? loop_start : p;
7219
7220 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
7221 && reg_mentioned_p (reg, PATTERN (p))
7222 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
7223 {
7224 if (loop_dump_stream)
7225 fprintf (loop_dump_stream,
7226 "Cannot eliminate biv %d: biv used in insn %d.\n",
7227 bl->regno, INSN_UID (p));
7228 break;
7229 }
7230 }
7231
7232 if (p == end)
7233 {
7234 if (loop_dump_stream)
7235 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
7236 bl->regno, eliminate_p ? "was" : "can be");
7237 return 1;
7238 }
7239
7240 return 0;
7241 }
7242 \f
7243 /* If BL appears in X (part of the pattern of INSN), see if we can
7244 eliminate its use. If so, return 1. If not, return 0.
7245
7246 If BIV does not appear in X, return 1.
7247
7248 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
7249 where extra insns should be added. Depending on how many items have been
7250 moved out of the loop, it will either be before INSN or at the start of
7251 the loop. */
7252
7253 static int
7254 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
7255 rtx x, insn;
7256 struct iv_class *bl;
7257 int eliminate_p;
7258 rtx where;
7259 {
7260 enum rtx_code code = GET_CODE (x);
7261 rtx reg = bl->biv->dest_reg;
7262 enum machine_mode mode = GET_MODE (reg);
7263 struct induction *v;
7264 rtx arg, tem;
7265 #ifdef HAVE_cc0
7266 rtx new;
7267 #endif
7268 int arg_operand;
7269 char *fmt;
7270 int i, j;
7271
7272 switch (code)
7273 {
7274 case REG:
7275 /* If we haven't already been able to do something with this BIV,
7276 we can't eliminate it. */
7277 if (x == reg)
7278 return 0;
7279 return 1;
7280
7281 case SET:
7282 /* If this sets the BIV, it is not a problem. */
7283 if (SET_DEST (x) == reg)
7284 return 1;
7285
7286 /* If this is an insn that defines a giv, it is also ok because
7287 it will go away when the giv is reduced. */
7288 for (v = bl->giv; v; v = v->next_iv)
7289 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
7290 return 1;
7291
7292 #ifdef HAVE_cc0
7293 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
7294 {
7295 /* Can replace with any giv that was reduced and
7296 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7297 Require a constant for MULT_VAL, so we know it's nonzero.
7298 ??? We disable this optimization to avoid potential
7299 overflows. */
7300
7301 for (v = bl->giv; v; v = v->next_iv)
7302 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
7303 && v->add_val == const0_rtx
7304 && ! v->ignore && ! v->maybe_dead && v->always_computable
7305 && v->mode == mode
7306 && 0)
7307 {
7308 /* If the giv V had the auto-inc address optimization applied
7309 to it, and INSN occurs between the giv insn and the biv
7310 insn, then we must adjust the value used here.
7311 This is rare, so we don't bother to do so. */
7312 if (v->auto_inc_opt
7313 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7314 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7315 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7316 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7317 continue;
7318
7319 if (! eliminate_p)
7320 return 1;
7321
7322 /* If the giv has the opposite direction of change,
7323 then reverse the comparison. */
7324 if (INTVAL (v->mult_val) < 0)
7325 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
7326 const0_rtx, v->new_reg);
7327 else
7328 new = v->new_reg;
7329
7330 /* We can probably test that giv's reduced reg. */
7331 if (validate_change (insn, &SET_SRC (x), new, 0))
7332 return 1;
7333 }
7334
7335 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7336 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7337 Require a constant for MULT_VAL, so we know it's nonzero.
7338 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7339 overflow problem. */
7340
7341 for (v = bl->giv; v; v = v->next_iv)
7342 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
7343 && ! v->ignore && ! v->maybe_dead && v->always_computable
7344 && v->mode == mode
7345 && (GET_CODE (v->add_val) == SYMBOL_REF
7346 || GET_CODE (v->add_val) == LABEL_REF
7347 || GET_CODE (v->add_val) == CONST
7348 || (GET_CODE (v->add_val) == REG
7349 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
7350 {
7351 /* If the giv V had the auto-inc address optimization applied
7352 to it, and INSN occurs between the giv insn and the biv
7353 insn, then we must adjust the value used here.
7354 This is rare, so we don't bother to do so. */
7355 if (v->auto_inc_opt
7356 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7357 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7358 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7359 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7360 continue;
7361
7362 if (! eliminate_p)
7363 return 1;
7364
7365 /* If the giv has the opposite direction of change,
7366 then reverse the comparison. */
7367 if (INTVAL (v->mult_val) < 0)
7368 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7369 v->new_reg);
7370 else
7371 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7372 copy_rtx (v->add_val));
7373
7374 /* Replace biv with the giv's reduced register. */
7375 update_reg_last_use (v->add_val, insn);
7376 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7377 return 1;
7378
7379 /* Insn doesn't support that constant or invariant. Copy it
7380 into a register (it will be a loop invariant.) */
7381 tem = gen_reg_rtx (GET_MODE (v->new_reg));
7382
7383 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
7384 where);
7385
7386 /* Substitute the new register for its invariant value in
7387 the compare expression. */
7388 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
7389 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7390 return 1;
7391 }
7392 }
7393 #endif
7394 break;
7395
7396 case COMPARE:
7397 case EQ: case NE:
7398 case GT: case GE: case GTU: case GEU:
7399 case LT: case LE: case LTU: case LEU:
7400 /* See if either argument is the biv. */
7401 if (XEXP (x, 0) == reg)
7402 arg = XEXP (x, 1), arg_operand = 1;
7403 else if (XEXP (x, 1) == reg)
7404 arg = XEXP (x, 0), arg_operand = 0;
7405 else
7406 break;
7407
7408 if (CONSTANT_P (arg))
7409 {
7410 /* First try to replace with any giv that has constant positive
7411 mult_val and constant add_val. We might be able to support
7412 negative mult_val, but it seems complex to do it in general. */
7413
7414 for (v = bl->giv; v; v = v->next_iv)
7415 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7416 && (GET_CODE (v->add_val) == SYMBOL_REF
7417 || GET_CODE (v->add_val) == LABEL_REF
7418 || GET_CODE (v->add_val) == CONST
7419 || (GET_CODE (v->add_val) == REG
7420 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
7421 && ! v->ignore && ! v->maybe_dead && v->always_computable
7422 && v->mode == mode)
7423 {
7424 /* If the giv V had the auto-inc address optimization applied
7425 to it, and INSN occurs between the giv insn and the biv
7426 insn, then we must adjust the value used here.
7427 This is rare, so we don't bother to do so. */
7428 if (v->auto_inc_opt
7429 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7430 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7431 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7432 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7433 continue;
7434
7435 if (! eliminate_p)
7436 return 1;
7437
7438 /* Replace biv with the giv's reduced reg. */
7439 XEXP (x, 1-arg_operand) = v->new_reg;
7440
7441 /* If all constants are actually constant integers and
7442 the derived constant can be directly placed in the COMPARE,
7443 do so. */
7444 if (GET_CODE (arg) == CONST_INT
7445 && GET_CODE (v->mult_val) == CONST_INT
7446 && GET_CODE (v->add_val) == CONST_INT
7447 && validate_change (insn, &XEXP (x, arg_operand),
7448 GEN_INT (INTVAL (arg)
7449 * INTVAL (v->mult_val)
7450 + INTVAL (v->add_val)), 0))
7451 return 1;
7452
7453 /* Otherwise, load it into a register. */
7454 tem = gen_reg_rtx (mode);
7455 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
7456 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
7457 return 1;
7458
7459 /* If that failed, put back the change we made above. */
7460 XEXP (x, 1-arg_operand) = reg;
7461 }
7462
7463 /* Look for giv with positive constant mult_val and nonconst add_val.
7464 Insert insns to calculate new compare value.
7465 ??? Turn this off due to possible overflow. */
7466
7467 for (v = bl->giv; v; v = v->next_iv)
7468 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7469 && ! v->ignore && ! v->maybe_dead && v->always_computable
7470 && v->mode == mode
7471 && 0)
7472 {
7473 rtx tem;
7474
7475 /* If the giv V had the auto-inc address optimization applied
7476 to it, and INSN occurs between the giv insn and the biv
7477 insn, then we must adjust the value used here.
7478 This is rare, so we don't bother to do so. */
7479 if (v->auto_inc_opt
7480 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7481 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7482 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7483 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7484 continue;
7485
7486 if (! eliminate_p)
7487 return 1;
7488
7489 tem = gen_reg_rtx (mode);
7490
7491 /* Replace biv with giv's reduced register. */
7492 validate_change (insn, &XEXP (x, 1 - arg_operand),
7493 v->new_reg, 1);
7494
7495 /* Compute value to compare against. */
7496 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
7497 /* Use it in this insn. */
7498 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
7499 if (apply_change_group ())
7500 return 1;
7501 }
7502 }
7503 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
7504 {
7505 if (invariant_p (arg) == 1)
7506 {
7507 /* Look for giv with constant positive mult_val and nonconst
7508 add_val. Insert insns to compute new compare value.
7509 ??? Turn this off due to possible overflow. */
7510
7511 for (v = bl->giv; v; v = v->next_iv)
7512 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7513 && ! v->ignore && ! v->maybe_dead && v->always_computable
7514 && v->mode == mode
7515 && 0)
7516 {
7517 rtx tem;
7518
7519 /* If the giv V had the auto-inc address optimization applied
7520 to it, and INSN occurs between the giv insn and the biv
7521 insn, then we must adjust the value used here.
7522 This is rare, so we don't bother to do so. */
7523 if (v->auto_inc_opt
7524 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7525 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7526 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7527 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7528 continue;
7529
7530 if (! eliminate_p)
7531 return 1;
7532
7533 tem = gen_reg_rtx (mode);
7534
7535 /* Replace biv with giv's reduced register. */
7536 validate_change (insn, &XEXP (x, 1 - arg_operand),
7537 v->new_reg, 1);
7538
7539 /* Compute value to compare against. */
7540 emit_iv_add_mult (arg, v->mult_val, v->add_val,
7541 tem, where);
7542 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
7543 if (apply_change_group ())
7544 return 1;
7545 }
7546 }
7547
7548 /* This code has problems. Basically, you can't know when
7549 seeing if we will eliminate BL, whether a particular giv
7550 of ARG will be reduced. If it isn't going to be reduced,
7551 we can't eliminate BL. We can try forcing it to be reduced,
7552 but that can generate poor code.
7553
7554 The problem is that the benefit of reducing TV, below should
7555 be increased if BL can actually be eliminated, but this means
7556 we might have to do a topological sort of the order in which
7557 we try to process biv. It doesn't seem worthwhile to do
7558 this sort of thing now. */
7559
7560 #if 0
7561 /* Otherwise the reg compared with had better be a biv. */
7562 if (GET_CODE (arg) != REG
7563 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
7564 return 0;
7565
7566 /* Look for a pair of givs, one for each biv,
7567 with identical coefficients. */
7568 for (v = bl->giv; v; v = v->next_iv)
7569 {
7570 struct induction *tv;
7571
7572 if (v->ignore || v->maybe_dead || v->mode != mode)
7573 continue;
7574
7575 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
7576 if (! tv->ignore && ! tv->maybe_dead
7577 && rtx_equal_p (tv->mult_val, v->mult_val)
7578 && rtx_equal_p (tv->add_val, v->add_val)
7579 && tv->mode == mode)
7580 {
7581 /* If the giv V had the auto-inc address optimization applied
7582 to it, and INSN occurs between the giv insn and the biv
7583 insn, then we must adjust the value used here.
7584 This is rare, so we don't bother to do so. */
7585 if (v->auto_inc_opt
7586 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7587 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7588 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7589 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7590 continue;
7591
7592 if (! eliminate_p)
7593 return 1;
7594
7595 /* Replace biv with its giv's reduced reg. */
7596 XEXP (x, 1-arg_operand) = v->new_reg;
7597 /* Replace other operand with the other giv's
7598 reduced reg. */
7599 XEXP (x, arg_operand) = tv->new_reg;
7600 return 1;
7601 }
7602 }
7603 #endif
7604 }
7605
7606 /* If we get here, the biv can't be eliminated. */
7607 return 0;
7608
7609 case MEM:
7610 /* If this address is a DEST_ADDR giv, it doesn't matter if the
7611 biv is used in it, since it will be replaced. */
7612 for (v = bl->giv; v; v = v->next_iv)
7613 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
7614 return 1;
7615 break;
7616
7617 default:
7618 break;
7619 }
7620
7621 /* See if any subexpression fails elimination. */
7622 fmt = GET_RTX_FORMAT (code);
7623 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7624 {
7625 switch (fmt[i])
7626 {
7627 case 'e':
7628 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
7629 eliminate_p, where))
7630 return 0;
7631 break;
7632
7633 case 'E':
7634 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7635 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
7636 eliminate_p, where))
7637 return 0;
7638 break;
7639 }
7640 }
7641
7642 return 1;
7643 }
7644 \f
7645 /* Return nonzero if the last use of REG
7646 is in an insn following INSN in the same basic block. */
7647
7648 static int
7649 last_use_this_basic_block (reg, insn)
7650 rtx reg;
7651 rtx insn;
7652 {
7653 rtx n;
7654 for (n = insn;
7655 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
7656 n = NEXT_INSN (n))
7657 {
7658 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
7659 return 1;
7660 }
7661 return 0;
7662 }
7663 \f
7664 /* Called via `note_stores' to record the initial value of a biv. Here we
7665 just record the location of the set and process it later. */
7666
7667 static void
7668 record_initial (dest, set)
7669 rtx dest;
7670 rtx set;
7671 {
7672 struct iv_class *bl;
7673
7674 if (GET_CODE (dest) != REG
7675 || REGNO (dest) >= max_reg_before_loop
7676 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
7677 return;
7678
7679 bl = reg_biv_class[REGNO (dest)];
7680
7681 /* If this is the first set found, record it. */
7682 if (bl->init_insn == 0)
7683 {
7684 bl->init_insn = note_insn;
7685 bl->init_set = set;
7686 }
7687 }
7688 \f
7689 /* If any of the registers in X are "old" and currently have a last use earlier
7690 than INSN, update them to have a last use of INSN. Their actual last use
7691 will be the previous insn but it will not have a valid uid_luid so we can't
7692 use it. */
7693
7694 static void
7695 update_reg_last_use (x, insn)
7696 rtx x;
7697 rtx insn;
7698 {
7699 /* Check for the case where INSN does not have a valid luid. In this case,
7700 there is no need to modify the regno_last_uid, as this can only happen
7701 when code is inserted after the loop_end to set a pseudo's final value,
7702 and hence this insn will never be the last use of x. */
7703 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
7704 && INSN_UID (insn) < max_uid_for_loop
7705 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
7706 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
7707 else
7708 {
7709 register int i, j;
7710 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
7711 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7712 {
7713 if (fmt[i] == 'e')
7714 update_reg_last_use (XEXP (x, i), insn);
7715 else if (fmt[i] == 'E')
7716 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7717 update_reg_last_use (XVECEXP (x, i, j), insn);
7718 }
7719 }
7720 }
7721 \f
7722 /* Given a jump insn JUMP, return the condition that will cause it to branch
7723 to its JUMP_LABEL. If the condition cannot be understood, or is an
7724 inequality floating-point comparison which needs to be reversed, 0 will
7725 be returned.
7726
7727 If EARLIEST is non-zero, it is a pointer to a place where the earliest
7728 insn used in locating the condition was found. If a replacement test
7729 of the condition is desired, it should be placed in front of that
7730 insn and we will be sure that the inputs are still valid.
7731
7732 The condition will be returned in a canonical form to simplify testing by
7733 callers. Specifically:
7734
7735 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
7736 (2) Both operands will be machine operands; (cc0) will have been replaced.
7737 (3) If an operand is a constant, it will be the second operand.
7738 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
7739 for GE, GEU, and LEU. */
7740
7741 rtx
7742 get_condition (jump, earliest)
7743 rtx jump;
7744 rtx *earliest;
7745 {
7746 enum rtx_code code;
7747 rtx prev = jump;
7748 rtx set;
7749 rtx tem;
7750 rtx op0, op1;
7751 int reverse_code = 0;
7752 int did_reverse_condition = 0;
7753 enum machine_mode mode;
7754
7755 /* If this is not a standard conditional jump, we can't parse it. */
7756 if (GET_CODE (jump) != JUMP_INSN
7757 || ! condjump_p (jump) || simplejump_p (jump))
7758 return 0;
7759
7760 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7761 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7762 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
7763 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
7764
7765 if (earliest)
7766 *earliest = jump;
7767
7768 /* If this branches to JUMP_LABEL when the condition is false, reverse
7769 the condition. */
7770 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
7771 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
7772 code = reverse_condition (code), did_reverse_condition ^= 1;
7773
7774 /* If we are comparing a register with zero, see if the register is set
7775 in the previous insn to a COMPARE or a comparison operation. Perform
7776 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7777 in cse.c */
7778
7779 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
7780 {
7781 /* Set non-zero when we find something of interest. */
7782 rtx x = 0;
7783
7784 #ifdef HAVE_cc0
7785 /* If comparison with cc0, import actual comparison from compare
7786 insn. */
7787 if (op0 == cc0_rtx)
7788 {
7789 if ((prev = prev_nonnote_insn (prev)) == 0
7790 || GET_CODE (prev) != INSN
7791 || (set = single_set (prev)) == 0
7792 || SET_DEST (set) != cc0_rtx)
7793 return 0;
7794
7795 op0 = SET_SRC (set);
7796 op1 = CONST0_RTX (GET_MODE (op0));
7797 if (earliest)
7798 *earliest = prev;
7799 }
7800 #endif
7801
7802 /* If this is a COMPARE, pick up the two things being compared. */
7803 if (GET_CODE (op0) == COMPARE)
7804 {
7805 op1 = XEXP (op0, 1);
7806 op0 = XEXP (op0, 0);
7807 continue;
7808 }
7809 else if (GET_CODE (op0) != REG)
7810 break;
7811
7812 /* Go back to the previous insn. Stop if it is not an INSN. We also
7813 stop if it isn't a single set or if it has a REG_INC note because
7814 we don't want to bother dealing with it. */
7815
7816 if ((prev = prev_nonnote_insn (prev)) == 0
7817 || GET_CODE (prev) != INSN
7818 || FIND_REG_INC_NOTE (prev, 0)
7819 || (set = single_set (prev)) == 0)
7820 break;
7821
7822 /* If this is setting OP0, get what it sets it to if it looks
7823 relevant. */
7824 if (rtx_equal_p (SET_DEST (set), op0))
7825 {
7826 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7827
7828 /* ??? We may not combine comparisons done in a CCmode with
7829 comparisons not done in a CCmode. This is to aid targets
7830 like Alpha that have an IEEE compliant EQ instruction, and
7831 a non-IEEE compliant BEQ instruction. The use of CCmode is
7832 actually artificial, simply to prevent the combination, but
7833 should not affect other platforms. */
7834
7835 if ((GET_CODE (SET_SRC (set)) == COMPARE
7836 || (((code == NE
7837 || (code == LT
7838 && GET_MODE_CLASS (inner_mode) == MODE_INT
7839 && (GET_MODE_BITSIZE (inner_mode)
7840 <= HOST_BITS_PER_WIDE_INT)
7841 && (STORE_FLAG_VALUE
7842 & ((HOST_WIDE_INT) 1
7843 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7844 #ifdef FLOAT_STORE_FLAG_VALUE
7845 || (code == LT
7846 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7847 && FLOAT_STORE_FLAG_VALUE < 0)
7848 #endif
7849 ))
7850 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
7851 && ((GET_MODE_CLASS (mode) == MODE_CC)
7852 == (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7853 x = SET_SRC (set);
7854 else if (((code == EQ
7855 || (code == GE
7856 && (GET_MODE_BITSIZE (inner_mode)
7857 <= HOST_BITS_PER_WIDE_INT)
7858 && GET_MODE_CLASS (inner_mode) == MODE_INT
7859 && (STORE_FLAG_VALUE
7860 & ((HOST_WIDE_INT) 1
7861 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7862 #ifdef FLOAT_STORE_FLAG_VALUE
7863 || (code == GE
7864 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7865 && FLOAT_STORE_FLAG_VALUE < 0)
7866 #endif
7867 ))
7868 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
7869 && ((GET_MODE_CLASS (mode) == MODE_CC)
7870 == (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7871 {
7872 /* We might have reversed a LT to get a GE here. But this wasn't
7873 actually the comparison of data, so we don't flag that we
7874 have had to reverse the condition. */
7875 did_reverse_condition ^= 1;
7876 reverse_code = 1;
7877 x = SET_SRC (set);
7878 }
7879 else
7880 break;
7881 }
7882
7883 else if (reg_set_p (op0, prev))
7884 /* If this sets OP0, but not directly, we have to give up. */
7885 break;
7886
7887 if (x)
7888 {
7889 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7890 code = GET_CODE (x);
7891 if (reverse_code)
7892 {
7893 code = reverse_condition (code);
7894 did_reverse_condition ^= 1;
7895 reverse_code = 0;
7896 }
7897
7898 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7899 if (earliest)
7900 *earliest = prev;
7901 }
7902 }
7903
7904 /* If constant is first, put it last. */
7905 if (CONSTANT_P (op0))
7906 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7907
7908 /* If OP0 is the result of a comparison, we weren't able to find what
7909 was really being compared, so fail. */
7910 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7911 return 0;
7912
7913 /* Canonicalize any ordered comparison with integers involving equality
7914 if we can do computations in the relevant mode and we do not
7915 overflow. */
7916
7917 if (GET_CODE (op1) == CONST_INT
7918 && GET_MODE (op0) != VOIDmode
7919 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7920 {
7921 HOST_WIDE_INT const_val = INTVAL (op1);
7922 unsigned HOST_WIDE_INT uconst_val = const_val;
7923 unsigned HOST_WIDE_INT max_val
7924 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7925
7926 switch (code)
7927 {
7928 case LE:
7929 if (const_val != max_val >> 1)
7930 code = LT, op1 = GEN_INT (const_val + 1);
7931 break;
7932
7933 /* When cross-compiling, const_val might be sign-extended from
7934 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7935 case GE:
7936 if ((const_val & max_val)
7937 != (((HOST_WIDE_INT) 1
7938 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7939 code = GT, op1 = GEN_INT (const_val - 1);
7940 break;
7941
7942 case LEU:
7943 if (uconst_val < max_val)
7944 code = LTU, op1 = GEN_INT (uconst_val + 1);
7945 break;
7946
7947 case GEU:
7948 if (uconst_val != 0)
7949 code = GTU, op1 = GEN_INT (uconst_val - 1);
7950 break;
7951
7952 default:
7953 break;
7954 }
7955 }
7956
7957 /* If this was floating-point and we reversed anything other than an
7958 EQ or NE, return zero. */
7959 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7960 && did_reverse_condition && code != NE && code != EQ
7961 && ! flag_fast_math
7962 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7963 return 0;
7964
7965 #ifdef HAVE_cc0
7966 /* Never return CC0; return zero instead. */
7967 if (op0 == cc0_rtx)
7968 return 0;
7969 #endif
7970
7971 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7972 }
7973
7974 /* Similar to above routine, except that we also put an invariant last
7975 unless both operands are invariants. */
7976
7977 rtx
7978 get_condition_for_loop (x)
7979 rtx x;
7980 {
7981 rtx comparison = get_condition (x, NULL_PTR);
7982
7983 if (comparison == 0
7984 || ! invariant_p (XEXP (comparison, 0))
7985 || invariant_p (XEXP (comparison, 1)))
7986 return comparison;
7987
7988 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7989 XEXP (comparison, 1), XEXP (comparison, 0));
7990 }
7991
7992 #ifdef HAIFA
7993 /* Analyze a loop in order to instrument it with the use of count register.
7994 loop_start and loop_end are the first and last insns of the loop.
7995 This function works in cooperation with insert_bct ().
7996 loop_can_insert_bct[loop_num] is set according to whether the optimization
7997 is applicable to the loop. When it is applicable, the following variables
7998 are also set:
7999 loop_start_value[loop_num]
8000 loop_comparison_value[loop_num]
8001 loop_increment[loop_num]
8002 loop_comparison_code[loop_num] */
8003
8004 #ifdef HAVE_decrement_and_branch_on_count
8005 static void
8006 analyze_loop_iterations (loop_start, loop_end)
8007 rtx loop_start, loop_end;
8008 {
8009 rtx comparison, comparison_value;
8010 rtx iteration_var, initial_value, increment;
8011 enum rtx_code comparison_code;
8012
8013 rtx last_loop_insn;
8014 rtx insn;
8015 int i;
8016
8017 /* loop_variable mode */
8018 enum machine_mode original_mode;
8019
8020 /* find the number of the loop */
8021 int loop_num = uid_loop_num [INSN_UID (loop_start)];
8022
8023 /* we change our mind only when we are sure that loop will be instrumented */
8024 loop_can_insert_bct[loop_num] = 0;
8025
8026 /* is the optimization suppressed. */
8027 if ( !flag_branch_on_count_reg )
8028 return;
8029
8030 /* make sure that count-reg is not in use */
8031 if (loop_used_count_register[loop_num]){
8032 if (loop_dump_stream)
8033 fprintf (loop_dump_stream,
8034 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
8035 loop_num);
8036 return;
8037 }
8038
8039 /* make sure that the function has no indirect jumps. */
8040 if (indirect_jump_in_function){
8041 if (loop_dump_stream)
8042 fprintf (loop_dump_stream,
8043 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
8044 loop_num);
8045 return;
8046 }
8047
8048 /* make sure that the last loop insn is a conditional jump */
8049 last_loop_insn = PREV_INSN (loop_end);
8050 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
8051 if (loop_dump_stream)
8052 fprintf (loop_dump_stream,
8053 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
8054 loop_num);
8055 return;
8056 }
8057
8058 /* First find the iteration variable. If the last insn is a conditional
8059 branch, and the insn preceding it tests a register value, make that
8060 register the iteration variable. */
8061
8062 /* We used to use prev_nonnote_insn here, but that fails because it might
8063 accidentally get the branch for a contained loop if the branch for this
8064 loop was deleted. We can only trust branches immediately before the
8065 loop_end. */
8066
8067 comparison = get_condition_for_loop (last_loop_insn);
8068 /* ??? Get_condition may switch position of induction variable and
8069 invariant register when it canonicalizes the comparison. */
8070
8071 if (comparison == 0) {
8072 if (loop_dump_stream)
8073 fprintf (loop_dump_stream,
8074 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
8075 loop_num);
8076 return;
8077 }
8078
8079 comparison_code = GET_CODE (comparison);
8080 iteration_var = XEXP (comparison, 0);
8081 comparison_value = XEXP (comparison, 1);
8082
8083 original_mode = GET_MODE (iteration_var);
8084 if (GET_MODE_CLASS (original_mode) != MODE_INT
8085 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
8086 if (loop_dump_stream)
8087 fprintf (loop_dump_stream,
8088 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
8089 loop_num);
8090 return;
8091 }
8092
8093 /* get info about loop bounds and increment */
8094 iteration_info (iteration_var, &initial_value, &increment,
8095 loop_start, loop_end);
8096
8097 /* make sure that all required loop data were found */
8098 if (!(initial_value && increment && comparison_value
8099 && invariant_p (comparison_value) && invariant_p (increment)
8100 && ! indirect_jump_in_function))
8101 {
8102 if (loop_dump_stream) {
8103 fprintf (loop_dump_stream,
8104 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
8105 if (!(initial_value && increment && comparison_value)) {
8106 fprintf (loop_dump_stream, "\tbounds not available: ");
8107 if ( ! initial_value )
8108 fprintf (loop_dump_stream, "initial ");
8109 if ( ! increment )
8110 fprintf (loop_dump_stream, "increment ");
8111 if ( ! comparison_value )
8112 fprintf (loop_dump_stream, "comparison ");
8113 fprintf (loop_dump_stream, "\n");
8114 }
8115 if (!invariant_p (comparison_value) || !invariant_p (increment))
8116 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
8117 }
8118 return;
8119 }
8120
8121 /* make sure that the increment is constant */
8122 if (GET_CODE (increment) != CONST_INT) {
8123 if (loop_dump_stream)
8124 fprintf (loop_dump_stream,
8125 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
8126 loop_num);
8127 return;
8128 }
8129
8130 /* make sure that the loop contains neither function call, nor jump on table.
8131 (the count register might be altered by the called function, and might
8132 be used for a branch on table). */
8133 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
8134 if (GET_CODE (insn) == CALL_INSN){
8135 if (loop_dump_stream)
8136 fprintf (loop_dump_stream,
8137 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
8138 loop_num);
8139 return;
8140 }
8141
8142 if (GET_CODE (insn) == JUMP_INSN
8143 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
8144 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
8145 if (loop_dump_stream)
8146 fprintf (loop_dump_stream,
8147 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
8148 loop_num);
8149 return;
8150 }
8151 }
8152
8153 /* At this point, we are sure that the loop can be instrumented with BCT.
8154 Some of the loops, however, will not be instrumented - the final decision
8155 is taken by insert_bct () */
8156 if (loop_dump_stream)
8157 fprintf (loop_dump_stream,
8158 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
8159 loop_num);
8160
8161 /* mark all enclosing loops that they cannot use count register */
8162 /* ???: In fact, since insert_bct may decide not to instrument this loop,
8163 marking here may prevent instrumenting an enclosing loop that could
8164 actually be instrumented. But since this is rare, it is safer to mark
8165 here in case the order of calling (analyze/insert)_bct would be changed. */
8166 for (i=loop_num; i != -1; i = loop_outer_loop[i])
8167 loop_used_count_register[i] = 1;
8168
8169 /* Set data structures which will be used by the instrumentation phase */
8170 loop_start_value[loop_num] = initial_value;
8171 loop_comparison_value[loop_num] = comparison_value;
8172 loop_increment[loop_num] = increment;
8173 loop_comparison_code[loop_num] = comparison_code;
8174 loop_can_insert_bct[loop_num] = 1;
8175 }
8176
8177
8178 /* instrument loop for insertion of bct instruction. We distinguish between
8179 loops with compile-time bounds, to those with run-time bounds. The loop
8180 behaviour is analized according to the following characteristics/variables:
8181 ; Input variables:
8182 ; comparison-value: the value to which the iteration counter is compared.
8183 ; initial-value: iteration-counter initial value.
8184 ; increment: iteration-counter increment.
8185 ; Computed variables:
8186 ; increment-direction: the sign of the increment.
8187 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
8188 ; range-direction: sign (comparison-value - initial-value)
8189 We give up on the following cases:
8190 ; loop variable overflow.
8191 ; run-time loop bounds with comparison code NE.
8192 */
8193
8194 static void
8195 insert_bct (loop_start, loop_end)
8196 rtx loop_start, loop_end;
8197 {
8198 rtx initial_value, comparison_value, increment;
8199 enum rtx_code comparison_code;
8200
8201 int increment_direction, compare_direction;
8202 int unsigned_p = 0;
8203
8204 /* if the loop condition is <= or >=, the number of iteration
8205 is 1 more than the range of the bounds of the loop */
8206 int add_iteration = 0;
8207
8208 /* the only machine mode we work with - is the integer of the size that the
8209 machine has */
8210 enum machine_mode loop_var_mode = word_mode;
8211
8212 int loop_num = uid_loop_num [INSN_UID (loop_start)];
8213
8214 /* get loop-variables. No need to check that these are valid - already
8215 checked in analyze_loop_iterations (). */
8216 comparison_code = loop_comparison_code[loop_num];
8217 initial_value = loop_start_value[loop_num];
8218 comparison_value = loop_comparison_value[loop_num];
8219 increment = loop_increment[loop_num];
8220
8221 /* check analyze_loop_iterations decision for this loop. */
8222 if (! loop_can_insert_bct[loop_num]){
8223 if (loop_dump_stream)
8224 fprintf (loop_dump_stream,
8225 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
8226 loop_num);
8227 return;
8228 }
8229
8230 /* It's impossible to instrument a competely unrolled loop. */
8231 if (loop_unroll_factor [loop_num] == -1)
8232 return;
8233
8234 /* make sure that the last loop insn is a conditional jump .
8235 This check is repeated from analyze_loop_iterations (),
8236 because unrolling might have changed that. */
8237 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
8238 || !condjump_p (PREV_INSN (loop_end))) {
8239 if (loop_dump_stream)
8240 fprintf (loop_dump_stream,
8241 "insert_bct: not instrumenting BCT because of invalid branch\n");
8242 return;
8243 }
8244
8245 /* fix increment in case loop was unrolled. */
8246 if (loop_unroll_factor [loop_num] > 1)
8247 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
8248
8249 /* determine properties and directions of the loop */
8250 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
8251 switch ( comparison_code ) {
8252 case LEU:
8253 unsigned_p = 1;
8254 /* fallthrough */
8255 case LE:
8256 compare_direction = 1;
8257 add_iteration = 1;
8258 break;
8259 case GEU:
8260 unsigned_p = 1;
8261 /* fallthrough */
8262 case GE:
8263 compare_direction = -1;
8264 add_iteration = 1;
8265 break;
8266 case EQ:
8267 /* in this case we cannot know the number of iterations */
8268 if (loop_dump_stream)
8269 fprintf (loop_dump_stream,
8270 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
8271 loop_num);
8272 return;
8273 case LTU:
8274 unsigned_p = 1;
8275 /* fallthrough */
8276 case LT:
8277 compare_direction = 1;
8278 break;
8279 case GTU:
8280 unsigned_p = 1;
8281 /* fallthrough */
8282 case GT:
8283 compare_direction = -1;
8284 break;
8285 case NE:
8286 compare_direction = 0;
8287 break;
8288 default:
8289 abort ();
8290 }
8291
8292
8293 /* make sure that the loop does not end by an overflow */
8294 if (compare_direction != increment_direction) {
8295 if (loop_dump_stream)
8296 fprintf (loop_dump_stream,
8297 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
8298 loop_num);
8299 return;
8300 }
8301
8302 /* try to instrument the loop. */
8303
8304 /* Handle the simpler case, where the bounds are known at compile time. */
8305 if (GET_CODE (initial_value) == CONST_INT
8306 && GET_CODE (comparison_value) == CONST_INT)
8307 {
8308 int n_iterations;
8309 int increment_value_abs = INTVAL (increment) * increment_direction;
8310
8311 /* check the relation between compare-val and initial-val */
8312 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
8313 int range_direction = (difference > 0) ? 1 : -1;
8314
8315 /* make sure the loop executes enough iterations to gain from BCT */
8316 if (difference > -3 && difference < 3) {
8317 if (loop_dump_stream)
8318 fprintf (loop_dump_stream,
8319 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
8320 loop_num);
8321 return;
8322 }
8323
8324 /* make sure that the loop executes at least once */
8325 if ((range_direction == 1 && compare_direction == -1)
8326 || (range_direction == -1 && compare_direction == 1))
8327 {
8328 if (loop_dump_stream)
8329 fprintf (loop_dump_stream,
8330 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
8331 loop_num);
8332 return;
8333 }
8334
8335 /* make sure that the loop does not end by an overflow (in compile time
8336 bounds we must have an additional check for overflow, because here
8337 we also support the compare code of 'NE'. */
8338 if (comparison_code == NE
8339 && increment_direction != range_direction) {
8340 if (loop_dump_stream)
8341 fprintf (loop_dump_stream,
8342 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
8343 loop_num);
8344 return;
8345 }
8346
8347 /* Determine the number of iterations by:
8348 ;
8349 ; compare-val - initial-val + (increment -1) + additional-iteration
8350 ; num_iterations = -----------------------------------------------------------------
8351 ; increment
8352 */
8353 difference = (range_direction > 0) ? difference : -difference;
8354 #if 0
8355 fprintf (stderr, "difference is: %d\n", difference); /* @*/
8356 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
8357 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
8358 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
8359 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
8360 #endif
8361
8362 if (increment_value_abs == 0) {
8363 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
8364 abort ();
8365 }
8366 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
8367 / increment_value_abs;
8368
8369 #if 0
8370 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
8371 #endif
8372 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
8373
8374 /* Done with this loop. */
8375 return;
8376 }
8377
8378 /* Handle the more complex case, that the bounds are NOT known at compile time. */
8379 /* In this case we generate run_time calculation of the number of iterations */
8380
8381 /* With runtime bounds, if the compare is of the form '!=' we give up */
8382 if (comparison_code == NE) {
8383 if (loop_dump_stream)
8384 fprintf (loop_dump_stream,
8385 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
8386 loop_num);
8387 return;
8388 }
8389
8390 else {
8391 /* We rely on the existence of run-time guard to ensure that the
8392 loop executes at least once. */
8393 rtx sequence;
8394 rtx iterations_num_reg;
8395
8396 int increment_value_abs = INTVAL (increment) * increment_direction;
8397
8398 /* make sure that the increment is a power of two, otherwise (an
8399 expensive) divide is needed. */
8400 if (exact_log2 (increment_value_abs) == -1)
8401 {
8402 if (loop_dump_stream)
8403 fprintf (loop_dump_stream,
8404 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
8405 return;
8406 }
8407
8408 /* compute the number of iterations */
8409 start_sequence ();
8410 {
8411 rtx temp_reg;
8412
8413 /* Again, the number of iterations is calculated by:
8414 ;
8415 ; compare-val - initial-val + (increment -1) + additional-iteration
8416 ; num_iterations = -----------------------------------------------------------------
8417 ; increment
8418 */
8419 /* ??? Do we have to call copy_rtx here before passing rtx to
8420 expand_binop? */
8421 if (compare_direction > 0) {
8422 /* <, <= :the loop variable is increasing */
8423 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
8424 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
8425 }
8426 else {
8427 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
8428 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
8429 }
8430
8431 if (increment_value_abs - 1 + add_iteration != 0)
8432 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
8433 GEN_INT (increment_value_abs - 1 + add_iteration),
8434 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8435
8436 if (increment_value_abs != 1)
8437 {
8438 /* ??? This will generate an expensive divide instruction for
8439 most targets. The original authors apparently expected this
8440 to be a shift, since they test for power-of-2 divisors above,
8441 but just naively generating a divide instruction will not give
8442 a shift. It happens to work for the PowerPC target because
8443 the rs6000.md file has a divide pattern that emits shifts.
8444 It will probably not work for any other target. */
8445 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
8446 temp_reg,
8447 GEN_INT (increment_value_abs),
8448 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8449 }
8450 else
8451 iterations_num_reg = temp_reg;
8452 }
8453 sequence = gen_sequence ();
8454 end_sequence ();
8455 emit_insn_before (sequence, loop_start);
8456 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
8457 }
8458 }
8459
8460 /* instrument loop by inserting a bct in it. This is done in the following way:
8461 1. A new register is created and assigned the hard register number of the count
8462 register.
8463 2. In the head of the loop the new variable is initialized by the value passed in the
8464 loop_num_iterations parameter.
8465 3. At the end of the loop, comparison of the register with 0 is generated.
8466 The created comparison follows the pattern defined for the
8467 decrement_and_branch_on_count insn, so this insn will be generated in assembly
8468 generation phase.
8469 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
8470 not used elsewhere, it will be eliminated by data-flow analisys. */
8471
8472 static void
8473 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
8474 rtx loop_start, loop_end;
8475 rtx loop_num_iterations;
8476 {
8477 rtx temp_reg1, temp_reg2;
8478 rtx start_label;
8479
8480 rtx sequence;
8481 enum machine_mode loop_var_mode = word_mode;
8482
8483 if (HAVE_decrement_and_branch_on_count)
8484 {
8485 if (loop_dump_stream)
8486 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
8487
8488 /* Discard original jump to continue loop. Original compare result
8489 may still be live, so it cannot be discarded explicitly. */
8490 delete_insn (PREV_INSN (loop_end));
8491
8492 /* insert the label which will delimit the start of the loop */
8493 start_label = gen_label_rtx ();
8494 emit_label_after (start_label, loop_start);
8495
8496 /* insert initialization of the count register into the loop header */
8497 start_sequence ();
8498 temp_reg1 = gen_reg_rtx (loop_var_mode);
8499 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
8500
8501 /* this will be count register */
8502 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
8503 /* we have to move the value to the count register from an GPR
8504 because rtx pointed to by loop_num_iterations could contain
8505 expression which cannot be moved into count register */
8506 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
8507
8508 sequence = gen_sequence ();
8509 end_sequence ();
8510 emit_insn_before (sequence, loop_start);
8511
8512 /* insert new comparison on the count register instead of the
8513 old one, generating the needed BCT pattern (that will be
8514 later recognized by assembly generation phase). */
8515 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2,
8516 start_label),
8517 loop_end);
8518 LABEL_NUSES (start_label)++;
8519 }
8520
8521 }
8522 #endif /* HAVE_decrement_and_branch_on_count */
8523
8524 #endif /* HAIFA */
8525
8526 /* Scan the function and determine whether it has indirect (computed) jumps.
8527
8528 This is taken mostly from flow.c; similar code exists elsewhere
8529 in the compiler. It may be useful to put this into rtlanal.c. */
8530 static int
8531 indirect_jump_in_function_p (start)
8532 rtx start;
8533 {
8534 rtx insn;
8535
8536 for (insn = start; insn; insn = NEXT_INSN (insn))
8537 if (computed_jump_p (insn))
8538 return 1;
8539
8540 return 0;
8541 }
8542
8543 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8544 documentation for LOOP_MEMS for the definition of `appropriate'.
8545 This function is called from prescan_loop via for_each_rtx. */
8546
8547 static int
8548 insert_loop_mem (mem, data)
8549 rtx *mem;
8550 void *data;
8551 {
8552 int i;
8553 rtx m = *mem;
8554
8555 if (m == NULL_RTX)
8556 return 0;
8557
8558 switch (GET_CODE (m))
8559 {
8560 case MEM:
8561 break;
8562
8563 case CONST_DOUBLE:
8564 /* We're not interested in the MEM associated with a
8565 CONST_DOUBLE, so there's no need to traverse into this. */
8566 return -1;
8567
8568 default:
8569 /* This is not a MEM. */
8570 return 0;
8571 }
8572
8573 /* See if we've already seen this MEM. */
8574 for (i = 0; i < loop_mems_idx; ++i)
8575 if (rtx_equal_p (m, loop_mems[i].mem))
8576 {
8577 if (GET_MODE (m) != GET_MODE (loop_mems[i].mem))
8578 /* The modes of the two memory accesses are different. If
8579 this happens, something tricky is going on, and we just
8580 don't optimize accesses to this MEM. */
8581 loop_mems[i].optimize = 0;
8582
8583 return 0;
8584 }
8585
8586 /* Resize the array, if necessary. */
8587 if (loop_mems_idx == loop_mems_allocated)
8588 {
8589 if (loop_mems_allocated != 0)
8590 loop_mems_allocated *= 2;
8591 else
8592 loop_mems_allocated = 32;
8593
8594 loop_mems = (loop_mem_info*)
8595 xrealloc (loop_mems,
8596 loop_mems_allocated * sizeof (loop_mem_info));
8597 }
8598
8599 /* Actually insert the MEM. */
8600 loop_mems[loop_mems_idx].mem = m;
8601 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8602 because we can't put it in a register. We still store it in the
8603 table, though, so that if we see the same address later, but in a
8604 non-BLK mode, we'll not think we can optimize it at that point. */
8605 loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode);
8606 loop_mems[loop_mems_idx].reg = NULL_RTX;
8607 ++loop_mems_idx;
8608
8609 return 0;
8610 }
8611
8612 /* Like load_mems, but also ensures that N_TIMES_SET,
8613 MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
8614 values after load_mems. */
8615
8616 static void
8617 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, start,
8618 reg_single_usage, insn_count)
8619 rtx scan_start;
8620 rtx end;
8621 rtx loop_top;
8622 rtx start;
8623 varray_type reg_single_usage;
8624 int *insn_count;
8625 {
8626 int nregs = max_reg_num ();
8627
8628 load_mems (scan_start, end, loop_top, start);
8629
8630 /* Recalculate n_times_set and friends since load_mems may have
8631 created new registers. */
8632 if (max_reg_num () > nregs)
8633 {
8634 int i;
8635 int old_nregs;
8636
8637 old_nregs = nregs;
8638 nregs = max_reg_num ();
8639
8640 if (nregs > n_times_set->num_elements)
8641 {
8642 /* Grow all the arrays. */
8643 VARRAY_GROW (n_times_set, nregs);
8644 VARRAY_GROW (n_times_used, nregs);
8645 VARRAY_GROW (may_not_optimize, nregs);
8646 if (reg_single_usage)
8647 VARRAY_GROW (reg_single_usage, nregs);
8648 }
8649 /* Clear the arrays */
8650 bzero ((char *) &n_times_set->data, nregs * sizeof (int));
8651 bzero ((char *) &may_not_optimize->data, nregs * sizeof (char));
8652 if (reg_single_usage)
8653 bzero ((char *) &reg_single_usage->data, nregs * sizeof (rtx));
8654
8655 count_loop_regs_set (loop_top ? loop_top : start, end,
8656 may_not_optimize, reg_single_usage,
8657 insn_count, nregs);
8658
8659 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8660 {
8661 VARRAY_CHAR (may_not_optimize, i) = 1;
8662 VARRAY_INT (n_times_set, i) = 1;
8663 }
8664
8665 #ifdef AVOID_CCMODE_COPIES
8666 /* Don't try to move insns which set CC registers if we should not
8667 create CCmode register copies. */
8668 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
8669 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
8670 VARRAY_CHAR (may_not_optimize, i) = 1;
8671 #endif
8672
8673 /* Set n_times_used for the new registers. */
8674 bcopy ((char *) (&n_times_set->data.i[0] + old_nregs),
8675 (char *) (&n_times_used->data.i[0] + old_nregs),
8676 (nregs - old_nregs) * sizeof (int));
8677 }
8678 }
8679
8680 /* Move MEMs into registers for the duration of the loop. SCAN_START
8681 is the first instruction in the loop (as it is executed). The
8682 other parameters are as for next_insn_in_loop. */
8683
8684 static void
8685 load_mems (scan_start, end, loop_top, start)
8686 rtx scan_start;
8687 rtx end;
8688 rtx loop_top;
8689 rtx start;
8690 {
8691 int maybe_never = 0;
8692 int i;
8693 rtx p;
8694 rtx label = NULL_RTX;
8695 rtx end_label;
8696
8697 if (loop_mems_idx > 0)
8698 {
8699 /* Nonzero if the next instruction may never be executed. */
8700 int next_maybe_never = 0;
8701
8702 /* Check to see if it's possible that some instructions in the
8703 loop are never executed. */
8704 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
8705 p != NULL_RTX && !maybe_never;
8706 p = next_insn_in_loop (p, scan_start, end, loop_top))
8707 {
8708 if (GET_CODE (p) == CODE_LABEL)
8709 maybe_never = 1;
8710 else if (GET_CODE (p) == JUMP_INSN
8711 /* If we enter the loop in the middle, and scan
8712 around to the beginning, don't set maybe_never
8713 for that. This must be an unconditional jump,
8714 otherwise the code at the top of the loop might
8715 never be executed. Unconditional jumps are
8716 followed a by barrier then loop end. */
8717 && ! (GET_CODE (p) == JUMP_INSN
8718 && JUMP_LABEL (p) == loop_top
8719 && NEXT_INSN (NEXT_INSN (p)) == end
8720 && simplejump_p (p)))
8721 {
8722 if (!condjump_p (p))
8723 /* Something complicated. */
8724 maybe_never = 1;
8725 else
8726 /* If there are any more instructions in the loop, they
8727 might not be reached. */
8728 next_maybe_never = 1;
8729 }
8730 else if (next_maybe_never)
8731 maybe_never = 1;
8732 }
8733
8734 /* Actually move the MEMs. */
8735 for (i = 0; i < loop_mems_idx; ++i)
8736 {
8737 int j;
8738 int written = 0;
8739 rtx reg;
8740 rtx mem = loop_mems[i].mem;
8741
8742 if (MEM_VOLATILE_P (mem)
8743 || invariant_p (XEXP (mem, 0)) != 1)
8744 /* There's no telling whether or not MEM is modified. */
8745 loop_mems[i].optimize = 0;
8746
8747 /* Go through the MEMs written to in the loop to see if this
8748 one is aliased by one of them. */
8749 for (j = 0; j < loop_store_mems_idx; ++j)
8750 {
8751 if (rtx_equal_p (mem, loop_store_mems[j]))
8752 written = 1;
8753 else if (true_dependence (loop_store_mems[j], VOIDmode,
8754 mem, rtx_varies_p))
8755 {
8756 /* MEM is indeed aliased by this store. */
8757 loop_mems[i].optimize = 0;
8758 break;
8759 }
8760 }
8761
8762 /* If this MEM is written to, we must be sure that there
8763 are no reads from another MEM that aliases this one. */
8764 if (loop_mems[i].optimize && written)
8765 {
8766 int j;
8767
8768 for (j = 0; j < loop_mems_idx; ++j)
8769 {
8770 if (j == i)
8771 continue;
8772 else if (true_dependence (mem,
8773 VOIDmode,
8774 loop_mems[j].mem,
8775 rtx_varies_p))
8776 {
8777 /* It's not safe to hoist loop_mems[i] out of
8778 the loop because writes to it might not be
8779 seen by reads from loop_mems[j]. */
8780 loop_mems[i].optimize = 0;
8781 break;
8782 }
8783 }
8784 }
8785
8786 if (maybe_never && may_trap_p (mem))
8787 /* We can't access the MEM outside the loop; it might
8788 cause a trap that wouldn't have happened otherwise. */
8789 loop_mems[i].optimize = 0;
8790
8791 if (!loop_mems[i].optimize)
8792 /* We thought we were going to lift this MEM out of the
8793 loop, but later discovered that we could not. */
8794 continue;
8795
8796 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
8797 order to keep scan_loop from moving stores to this MEM
8798 out of the loop just because this REG is neither a
8799 user-variable nor used in the loop test. */
8800 reg = gen_reg_rtx (GET_MODE (mem));
8801 REG_USERVAR_P (reg) = 1;
8802 loop_mems[i].reg = reg;
8803
8804 /* Now, replace all references to the MEM with the
8805 corresponding pesudos. */
8806 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
8807 p != NULL_RTX;
8808 p = next_insn_in_loop (p, scan_start, end, loop_top))
8809 {
8810 rtx_and_int ri;
8811 ri.r = p;
8812 ri.i = i;
8813 for_each_rtx (&p, replace_loop_mem, &ri);
8814 }
8815
8816 if (!apply_change_group ())
8817 /* We couldn't replace all occurrences of the MEM. */
8818 loop_mems[i].optimize = 0;
8819 else
8820 {
8821 rtx set;
8822
8823 /* Load the memory immediately before START, which is
8824 the NOTE_LOOP_BEG. */
8825 set = gen_rtx_SET (GET_MODE (reg), reg, mem);
8826 emit_insn_before (set, start);
8827
8828 if (written)
8829 {
8830 if (label == NULL_RTX)
8831 {
8832 /* We must compute the former
8833 right-after-the-end label before we insert
8834 the new one. */
8835 end_label = next_label (end);
8836 label = gen_label_rtx ();
8837 emit_label_after (label, end);
8838 }
8839
8840 /* Store the memory immediately after END, which is
8841 the NOTE_LOOP_END. */
8842 set = gen_rtx_SET (GET_MODE (reg), copy_rtx (mem), reg);
8843 emit_insn_after (set, label);
8844 }
8845
8846 if (loop_dump_stream)
8847 {
8848 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
8849 REGNO (reg), (written ? "r/w" : "r/o"));
8850 print_rtl (loop_dump_stream, mem);
8851 fputc ('\n', loop_dump_stream);
8852 }
8853 }
8854 }
8855 }
8856
8857 if (label != NULL_RTX)
8858 {
8859 /* Now, we need to replace all references to the previous exit
8860 label with the new one. */
8861 rtx_pair rr;
8862 rr.r1 = end_label;
8863 rr.r2 = label;
8864
8865 for (p = start; p != end; p = NEXT_INSN (p))
8866 {
8867 for_each_rtx (&p, replace_label, &rr);
8868
8869 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
8870 field. This is not handled by for_each_rtx because it doesn't
8871 handle unprinted ('0') fields. We need to update JUMP_LABEL
8872 because the immediately following unroll pass will use it.
8873 replace_label would not work anyways, because that only handles
8874 LABEL_REFs. */
8875 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
8876 JUMP_LABEL (p) = label;
8877 }
8878 }
8879 }
8880
8881 /* Replace MEM with its associated pseudo register. This function is
8882 called from load_mems via for_each_rtx. DATA is actually an
8883 rtx_and_int * describing the instruction currently being scanned
8884 and the MEM we are currently replacing. */
8885
8886 static int
8887 replace_loop_mem (mem, data)
8888 rtx *mem;
8889 void *data;
8890 {
8891 rtx_and_int *ri;
8892 rtx insn;
8893 int i;
8894 rtx m = *mem;
8895
8896 if (m == NULL_RTX)
8897 return 0;
8898
8899 switch (GET_CODE (m))
8900 {
8901 case MEM:
8902 break;
8903
8904 case CONST_DOUBLE:
8905 /* We're not interested in the MEM associated with a
8906 CONST_DOUBLE, so there's no need to traverse into one. */
8907 return -1;
8908
8909 default:
8910 /* This is not a MEM. */
8911 return 0;
8912 }
8913
8914 ri = (rtx_and_int*) data;
8915 i = ri->i;
8916
8917 if (!rtx_equal_p (loop_mems[i].mem, m))
8918 /* This is not the MEM we are currently replacing. */
8919 return 0;
8920
8921 insn = ri->r;
8922
8923 /* Actually replace the MEM. */
8924 validate_change (insn, mem, loop_mems[i].reg, 1);
8925
8926 return 0;
8927 }
8928
8929 /* Replace occurrences of the old exit label for the loop with the new
8930 one. DATA is an rtx_pair containing the old and new labels,
8931 respectively. */
8932
8933 static int
8934 replace_label (x, data)
8935 rtx *x;
8936 void *data;
8937 {
8938 rtx l = *x;
8939 rtx old_label = ((rtx_pair*) data)->r1;
8940 rtx new_label = ((rtx_pair*) data)->r2;
8941
8942 if (l == NULL_RTX)
8943 return 0;
8944
8945 if (GET_CODE (l) != LABEL_REF)
8946 return 0;
8947
8948 if (XEXP (l, 0) != old_label)
8949 return 0;
8950
8951 XEXP (l, 0) = new_label;
8952 ++LABEL_NUSES (new_label);
8953 --LABEL_NUSES (old_label);
8954
8955 return 0;
8956 }
8957