* loop.c (scan_loop): Honor AVOID_CC_MODE_COPIES.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51 #include "toplev.h"
52
53 /* Vector mapping INSN_UIDs to luids.
54 The luids are like uids but increase monotonically always.
55 We use them to see whether a jump comes from outside a given loop. */
56
57 int *uid_luid;
58
59 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
60 number the insn is contained in. */
61
62 int *uid_loop_num;
63
64 /* 1 + largest uid of any insn. */
65
66 int max_uid_for_loop;
67
68 /* 1 + luid of last insn. */
69
70 static int max_luid;
71
72 /* Number of loops detected in current function. Used as index to the
73 next few tables. */
74
75 static int max_loop_num;
76
77 /* Indexed by loop number, contains the first and last insn of each loop. */
78
79 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
80
81 /* For each loop, gives the containing loop number, -1 if none. */
82
83 int *loop_outer_loop;
84
85 #ifdef HAIFA
86 /* The main output of analyze_loop_iterations is placed here */
87
88 int *loop_can_insert_bct;
89
90 /* For each loop, determines whether some of its inner loops has used
91 count register */
92
93 int *loop_used_count_register;
94
95 /* loop parameters for arithmetic loops. These loops have a loop variable
96 which is initialized to loop_start_value, incremented in each iteration
97 by "loop_increment". At the end of the iteration the loop variable is
98 compared to the loop_comparison_value (using loop_comparison_code). */
99
100 rtx *loop_increment;
101 rtx *loop_comparison_value;
102 rtx *loop_start_value;
103 enum rtx_code *loop_comparison_code;
104 #endif /* HAIFA */
105
106 /* For each loop, keep track of its unrolling factor.
107 Potential values:
108 0: unrolled
109 1: not unrolled.
110 -1: completely unrolled
111 >0: holds the unroll exact factor. */
112 int *loop_unroll_factor;
113
114 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
115 really a loop (an insn outside the loop branches into it). */
116
117 static char *loop_invalid;
118
119 /* Indexed by loop number, links together all LABEL_REFs which refer to
120 code labels outside the loop. Used by routines that need to know all
121 loop exits, such as final_biv_value and final_giv_value.
122
123 This does not include loop exits due to return instructions. This is
124 because all bivs and givs are pseudos, and hence must be dead after a
125 return, so the presense of a return does not affect any of the
126 optimizations that use this info. It is simpler to just not include return
127 instructions on this list. */
128
129 rtx *loop_number_exit_labels;
130
131 /* Indexed by loop number, counts the number of LABEL_REFs on
132 loop_number_exit_labels for this loop and all loops nested inside it. */
133
134 int *loop_number_exit_count;
135
136 /* Holds the number of loop iterations. It is zero if the number could not be
137 calculated. Must be unsigned since the number of iterations can
138 be as high as 2^wordsize-1. For loops with a wider iterator, this number
139 will be zero if the number of loop iterations is too large for an
140 unsigned integer to hold. */
141
142 unsigned HOST_WIDE_INT loop_n_iterations;
143
144 /* Nonzero if there is a subroutine call in the current loop. */
145
146 static int loop_has_call;
147
148 /* Nonzero if there is a volatile memory reference in the current
149 loop. */
150
151 static int loop_has_volatile;
152
153 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
154 current loop. A continue statement will generate a branch to
155 NEXT_INSN (loop_continue). */
156
157 static rtx loop_continue;
158
159 /* Indexed by register number, contains the number of times the reg
160 is set during the loop being scanned.
161 During code motion, a negative value indicates a reg that has been
162 made a candidate; in particular -2 means that it is an candidate that
163 we know is equal to a constant and -1 means that it is an candidate
164 not known equal to a constant.
165 After code motion, regs moved have 0 (which is accurate now)
166 while the failed candidates have the original number of times set.
167
168 Therefore, at all times, == 0 indicates an invariant register;
169 < 0 a conditionally invariant one. */
170
171 static int *n_times_set;
172
173 /* Original value of n_times_set; same except that this value
174 is not set negative for a reg whose sets have been made candidates
175 and not set to 0 for a reg that is moved. */
176
177 static int *n_times_used;
178
179 /* Index by register number, 1 indicates that the register
180 cannot be moved or strength reduced. */
181
182 static char *may_not_optimize;
183
184 /* Nonzero means reg N has already been moved out of one loop.
185 This reduces the desire to move it out of another. */
186
187 static char *moved_once;
188
189 /* Array of MEMs that are stored in this loop. If there are too many to fit
190 here, we just turn on unknown_address_altered. */
191
192 #define NUM_STORES 30
193 static rtx loop_store_mems[NUM_STORES];
194
195 /* Index of first available slot in above array. */
196 static int loop_store_mems_idx;
197
198 typedef struct loop_mem_info {
199 rtx mem; /* The MEM itself. */
200 rtx reg; /* Corresponding pseudo, if any. */
201 int optimize; /* Nonzero if we can optimize access to this MEM. */
202 } loop_mem_info;
203
204 /* Array of MEMs that are used (read or written) in this loop, but
205 cannot be aliased by anything in this loop, except perhaps
206 themselves. In other words, if loop_mems[i] is altered during the
207 loop, it is altered by an expression that is rtx_equal_p to it. */
208
209 static loop_mem_info *loop_mems;
210
211 /* The index of the next available slot in LOOP_MEMS. */
212
213 static int loop_mems_idx;
214
215 /* The number of elements allocated in LOOP_MEMs. */
216
217 static int loop_mems_allocated;
218
219 /* Nonzero if we don't know what MEMs were changed in the current loop.
220 This happens if the loop contains a call (in which case `loop_has_call'
221 will also be set) or if we store into more than NUM_STORES MEMs. */
222
223 static int unknown_address_altered;
224
225 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
226 static int num_movables;
227
228 /* Count of memory write instructions discovered in the loop. */
229 static int num_mem_sets;
230
231 /* Number of loops contained within the current one, including itself. */
232 static int loops_enclosed;
233
234 /* Bound on pseudo register number before loop optimization.
235 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
236 int max_reg_before_loop;
237
238 /* This obstack is used in product_cheap_p to allocate its rtl. It
239 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
240 If we used the same obstack that it did, we would be deallocating
241 that array. */
242
243 static struct obstack temp_obstack;
244
245 /* This is where the pointer to the obstack being used for RTL is stored. */
246
247 extern struct obstack *rtl_obstack;
248
249 #define obstack_chunk_alloc xmalloc
250 #define obstack_chunk_free free
251 \f
252 /* During the analysis of a loop, a chain of `struct movable's
253 is made to record all the movable insns found.
254 Then the entire chain can be scanned to decide which to move. */
255
256 struct movable
257 {
258 rtx insn; /* A movable insn */
259 rtx set_src; /* The expression this reg is set from. */
260 rtx set_dest; /* The destination of this SET. */
261 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
262 of any registers used within the LIBCALL. */
263 int consec; /* Number of consecutive following insns
264 that must be moved with this one. */
265 int regno; /* The register it sets */
266 short lifetime; /* lifetime of that register;
267 may be adjusted when matching movables
268 that load the same value are found. */
269 short savings; /* Number of insns we can move for this reg,
270 including other movables that force this
271 or match this one. */
272 unsigned int cond : 1; /* 1 if only conditionally movable */
273 unsigned int force : 1; /* 1 means MUST move this insn */
274 unsigned int global : 1; /* 1 means reg is live outside this loop */
275 /* If PARTIAL is 1, GLOBAL means something different:
276 that the reg is live outside the range from where it is set
277 to the following label. */
278 unsigned int done : 1; /* 1 inhibits further processing of this */
279
280 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
281 In particular, moving it does not make it
282 invariant. */
283 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
284 load SRC, rather than copying INSN. */
285 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
286 first insn of a consecutive sets group. */
287 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
288 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
289 that we should avoid changing when clearing
290 the rest of the reg. */
291 struct movable *match; /* First entry for same value */
292 struct movable *forces; /* An insn that must be moved if this is */
293 struct movable *next;
294 };
295
296 static struct movable *the_movables;
297
298 FILE *loop_dump_stream;
299
300 /* Forward declarations. */
301
302 static void find_and_verify_loops PROTO((rtx));
303 static void mark_loop_jump PROTO((rtx, int));
304 static void prescan_loop PROTO((rtx, rtx));
305 static int reg_in_basic_block_p PROTO((rtx, rtx));
306 static int consec_sets_invariant_p PROTO((rtx, int, rtx));
307 static rtx libcall_other_reg PROTO((rtx, rtx));
308 static int labels_in_range_p PROTO((rtx, int));
309 static void count_loop_regs_set PROTO((rtx, rtx, char *, rtx *, int *, int));
310 static void note_addr_stored PROTO((rtx, rtx));
311 static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
312 static void scan_loop PROTO((rtx, rtx, int));
313 #if 0
314 static void replace_call_address PROTO((rtx, rtx, rtx));
315 #endif
316 static rtx skip_consec_insns PROTO((rtx, int));
317 static int libcall_benefit PROTO((rtx));
318 static void ignore_some_movables PROTO((struct movable *));
319 static void force_movables PROTO((struct movable *));
320 static void combine_movables PROTO((struct movable *, int));
321 static int regs_match_p PROTO((rtx, rtx, struct movable *));
322 static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
323 static void add_label_notes PROTO((rtx, rtx));
324 static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
325 static int count_nonfixed_reads PROTO((rtx));
326 static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, int));
327 static void find_single_use_in_loop PROTO((rtx, rtx, rtx *));
328 static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
329 static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
330 static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, int, int));
331 static void check_final_value PROTO((struct induction *, rtx, rtx));
332 static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
333 static void update_giv_derive PROTO((rtx));
334 static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *));
335 static rtx simplify_giv_expr PROTO((rtx, int *));
336 static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *));
337 static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *));
338 static int check_dbra_loop PROTO((rtx, int, rtx));
339 static rtx express_from_1 PROTO((rtx, rtx, rtx));
340 static rtx express_from PROTO((struct induction *, struct induction *));
341 static rtx combine_givs_p PROTO((struct induction *, struct induction *));
342 static void combine_givs PROTO((struct iv_class *));
343 static int product_cheap_p PROTO((rtx, rtx));
344 static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
345 static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
346 static int last_use_this_basic_block PROTO((rtx, rtx));
347 static void record_initial PROTO((rtx, rtx));
348 static void update_reg_last_use PROTO((rtx, rtx));
349 static rtx next_insn_in_loop PROTO((rtx, rtx, rtx, rtx));
350 static void load_mems_and_recount_loop_regs_set PROTO((rtx, rtx, rtx,
351 rtx, rtx *, int *));
352 static void load_mems PROTO((rtx, rtx, rtx, rtx));
353 static int insert_loop_mem PROTO((rtx *, void *));
354 static int replace_loop_mem PROTO((rtx *, void *));
355 static int replace_label PROTO((rtx *, void *));
356
357 typedef struct rtx_and_int {
358 rtx r;
359 int i;
360 } rtx_and_int;
361
362 typedef struct rtx_pair {
363 rtx r1;
364 rtx r2;
365 } rtx_pair;
366
367 /* Nonzero iff INSN is between START and END, inclusive. */
368 #define INSN_IN_RANGE_P(INSN, START, END) \
369 (INSN_UID (INSN) < max_uid_for_loop \
370 && INSN_LUID (INSN) >= INSN_LUID (START) \
371 && INSN_LUID (INSN) <= INSN_LUID (END))
372
373 #ifdef HAIFA
374 /* This is extern from unroll.c */
375 extern void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
376
377 /* Two main functions for implementing bct:
378 first - to be called before loop unrolling, and the second - after */
379 #ifdef HAVE_decrement_and_branch_on_count
380 static void analyze_loop_iterations PROTO((rtx, rtx));
381 static void insert_bct PROTO((rtx, rtx));
382
383 /* Auxiliary function that inserts the bct pattern into the loop */
384 static void instrument_loop_bct PROTO((rtx, rtx, rtx));
385 #endif /* HAVE_decrement_and_branch_on_count */
386 #endif /* HAIFA */
387
388 /* Indirect_jump_in_function is computed once per function. */
389 int indirect_jump_in_function = 0;
390 static int indirect_jump_in_function_p PROTO((rtx));
391
392 \f
393 /* Relative gain of eliminating various kinds of operations. */
394 static int add_cost;
395 #if 0
396 static int shift_cost;
397 static int mult_cost;
398 #endif
399
400 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
401 copy the value of the strength reduced giv to its original register. */
402 static int copy_cost;
403
404 /* Cost of using a register, to normalize the benefits of a giv. */
405 static int reg_address_cost;
406
407
408 void
409 init_loop ()
410 {
411 char *free_point = (char *) oballoc (1);
412 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
413
414 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
415
416 #ifdef ADDRESS_COST
417 reg_address_cost = ADDRESS_COST (reg);
418 #else
419 reg_address_cost = rtx_cost (reg, MEM);
420 #endif
421
422 /* We multiply by 2 to reconcile the difference in scale between
423 these two ways of computing costs. Otherwise the cost of a copy
424 will be far less than the cost of an add. */
425
426 copy_cost = 2 * 2;
427
428 /* Free the objects we just allocated. */
429 obfree (free_point);
430
431 /* Initialize the obstack used for rtl in product_cheap_p. */
432 gcc_obstack_init (&temp_obstack);
433 }
434 \f
435 /* Entry point of this file. Perform loop optimization
436 on the current function. F is the first insn of the function
437 and DUMPFILE is a stream for output of a trace of actions taken
438 (or 0 if none should be output). */
439
440 void
441 loop_optimize (f, dumpfile, unroll_p)
442 /* f is the first instruction of a chain of insns for one function */
443 rtx f;
444 FILE *dumpfile;
445 int unroll_p;
446 {
447 register rtx insn;
448 register int i;
449 rtx last_insn;
450
451 loop_dump_stream = dumpfile;
452
453 init_recog_no_volatile ();
454
455 max_reg_before_loop = max_reg_num ();
456
457 moved_once = (char *) alloca (max_reg_before_loop);
458 bzero (moved_once, max_reg_before_loop);
459
460 regs_may_share = 0;
461
462 /* Count the number of loops. */
463
464 max_loop_num = 0;
465 for (insn = f; insn; insn = NEXT_INSN (insn))
466 {
467 if (GET_CODE (insn) == NOTE
468 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
469 max_loop_num++;
470 }
471
472 /* Don't waste time if no loops. */
473 if (max_loop_num == 0)
474 return;
475
476 /* Get size to use for tables indexed by uids.
477 Leave some space for labels allocated by find_and_verify_loops. */
478 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
479
480 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
481 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
482
483 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
484 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
485
486 /* Allocate tables for recording each loop. We set each entry, so they need
487 not be zeroed. */
488 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
489 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
490 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
491 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
492 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
493 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
494
495 /* This is initialized by the unrolling code, so we go ahead
496 and clear them just in case we are not performing loop
497 unrolling. */
498 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
499 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
500
501 #ifdef HAIFA
502 /* Allocate for BCT optimization */
503 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
504 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
505
506 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
507 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
508
509 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
510 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
511 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
512 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
513 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
514 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
515
516 loop_comparison_code
517 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
518 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
519 #endif /* HAIFA */
520
521 /* Find and process each loop.
522 First, find them, and record them in order of their beginnings. */
523 find_and_verify_loops (f);
524
525 /* Now find all register lifetimes. This must be done after
526 find_and_verify_loops, because it might reorder the insns in the
527 function. */
528 reg_scan (f, max_reg_num (), 1);
529
530 /* This must occur after reg_scan so that registers created by gcse
531 will have entries in the register tables.
532
533 We could have added a call to reg_scan after gcse_main in toplev.c,
534 but moving this call to init_alias_analysis is more efficient. */
535 init_alias_analysis ();
536
537 /* See if we went too far. */
538 if (get_max_uid () > max_uid_for_loop)
539 abort ();
540 /* Now reset it to the actual size we need. See above. */
541 max_uid_for_loop = get_max_uid () + 1;
542
543 /* Compute the mapping from uids to luids.
544 LUIDs are numbers assigned to insns, like uids,
545 except that luids increase monotonically through the code.
546 Don't assign luids to line-number NOTEs, so that the distance in luids
547 between two insns is not affected by -g. */
548
549 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
550 {
551 last_insn = insn;
552 if (GET_CODE (insn) != NOTE
553 || NOTE_LINE_NUMBER (insn) <= 0)
554 uid_luid[INSN_UID (insn)] = ++i;
555 else
556 /* Give a line number note the same luid as preceding insn. */
557 uid_luid[INSN_UID (insn)] = i;
558 }
559
560 max_luid = i + 1;
561
562 /* Don't leave gaps in uid_luid for insns that have been
563 deleted. It is possible that the first or last insn
564 using some register has been deleted by cross-jumping.
565 Make sure that uid_luid for that former insn's uid
566 points to the general area where that insn used to be. */
567 for (i = 0; i < max_uid_for_loop; i++)
568 {
569 uid_luid[0] = uid_luid[i];
570 if (uid_luid[0] != 0)
571 break;
572 }
573 for (i = 0; i < max_uid_for_loop; i++)
574 if (uid_luid[i] == 0)
575 uid_luid[i] = uid_luid[i - 1];
576
577 /* Create a mapping from loops to BLOCK tree nodes. */
578 if (unroll_p && write_symbols != NO_DEBUG)
579 find_loop_tree_blocks ();
580
581 /* Determine if the function has indirect jump. On some systems
582 this prevents low overhead loop instructions from being used. */
583 indirect_jump_in_function = indirect_jump_in_function_p (f);
584
585 /* Now scan the loops, last ones first, since this means inner ones are done
586 before outer ones. */
587 for (i = max_loop_num-1; i >= 0; i--)
588 if (! loop_invalid[i] && loop_number_loop_ends[i])
589 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
590 unroll_p);
591
592 /* If debugging and unrolling loops, we must replicate the tree nodes
593 corresponding to the blocks inside the loop, so that the original one
594 to one mapping will remain. */
595 if (unroll_p && write_symbols != NO_DEBUG)
596 unroll_block_trees ();
597
598 end_alias_analysis ();
599 }
600 \f
601 /* Returns the next insn, in execution order, after INSN. START and
602 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
603 respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
604 insn-stream; it is used with loops that are entered near the
605 bottom. */
606
607 static rtx
608 next_insn_in_loop (insn, start, end, loop_top)
609 rtx insn;
610 rtx start;
611 rtx end;
612 rtx loop_top;
613 {
614 insn = NEXT_INSN (insn);
615
616 if (insn == end)
617 {
618 if (loop_top)
619 /* Go to the top of the loop, and continue there. */
620 insn = loop_top;
621 else
622 /* We're done. */
623 insn = NULL_RTX;
624 }
625
626 if (insn == start)
627 /* We're done. */
628 insn = NULL_RTX;
629
630 return insn;
631 }
632
633 /* Optimize one loop whose start is LOOP_START and end is END.
634 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
635 NOTE_INSN_LOOP_END. */
636
637 /* ??? Could also move memory writes out of loops if the destination address
638 is invariant, the source is invariant, the memory write is not volatile,
639 and if we can prove that no read inside the loop can read this address
640 before the write occurs. If there is a read of this address after the
641 write, then we can also mark the memory read as invariant. */
642
643 static void
644 scan_loop (loop_start, end, unroll_p)
645 rtx loop_start, end;
646 int unroll_p;
647 {
648 register int i;
649 rtx p;
650 /* 1 if we are scanning insns that could be executed zero times. */
651 int maybe_never = 0;
652 /* 1 if we are scanning insns that might never be executed
653 due to a subroutine call which might exit before they are reached. */
654 int call_passed = 0;
655 /* For a rotated loop that is entered near the bottom,
656 this is the label at the top. Otherwise it is zero. */
657 rtx loop_top = 0;
658 /* Jump insn that enters the loop, or 0 if control drops in. */
659 rtx loop_entry_jump = 0;
660 /* Place in the loop where control enters. */
661 rtx scan_start;
662 /* Number of insns in the loop. */
663 int insn_count;
664 int in_libcall = 0;
665 int tem;
666 rtx temp;
667 /* The SET from an insn, if it is the only SET in the insn. */
668 rtx set, set1;
669 /* Chain describing insns movable in current loop. */
670 struct movable *movables = 0;
671 /* Last element in `movables' -- so we can add elements at the end. */
672 struct movable *last_movable = 0;
673 /* Ratio of extra register life span we can justify
674 for saving an instruction. More if loop doesn't call subroutines
675 since in that case saving an insn makes more difference
676 and more registers are available. */
677 int threshold;
678 /* If we have calls, contains the insn in which a register was used
679 if it was used exactly once; contains const0_rtx if it was used more
680 than once. */
681 rtx *reg_single_usage = 0;
682 /* Nonzero if we are scanning instructions in a sub-loop. */
683 int loop_depth = 0;
684 int nregs;
685
686 /* Determine whether this loop starts with a jump down to a test at
687 the end. This will occur for a small number of loops with a test
688 that is too complex to duplicate in front of the loop.
689
690 We search for the first insn or label in the loop, skipping NOTEs.
691 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
692 (because we might have a loop executed only once that contains a
693 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
694 (in case we have a degenerate loop).
695
696 Note that if we mistakenly think that a loop is entered at the top
697 when, in fact, it is entered at the exit test, the only effect will be
698 slightly poorer optimization. Making the opposite error can generate
699 incorrect code. Since very few loops now start with a jump to the
700 exit test, the code here to detect that case is very conservative. */
701
702 for (p = NEXT_INSN (loop_start);
703 p != end
704 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
705 && (GET_CODE (p) != NOTE
706 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
707 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
708 p = NEXT_INSN (p))
709 ;
710
711 scan_start = p;
712
713 /* Set up variables describing this loop. */
714 prescan_loop (loop_start, end);
715 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
716
717 /* If loop has a jump before the first label,
718 the true entry is the target of that jump.
719 Start scan from there.
720 But record in LOOP_TOP the place where the end-test jumps
721 back to so we can scan that after the end of the loop. */
722 if (GET_CODE (p) == JUMP_INSN)
723 {
724 loop_entry_jump = p;
725
726 /* Loop entry must be unconditional jump (and not a RETURN) */
727 if (simplejump_p (p)
728 && JUMP_LABEL (p) != 0
729 /* Check to see whether the jump actually
730 jumps out of the loop (meaning it's no loop).
731 This case can happen for things like
732 do {..} while (0). If this label was generated previously
733 by loop, we can't tell anything about it and have to reject
734 the loop. */
735 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, end))
736 {
737 loop_top = next_label (scan_start);
738 scan_start = JUMP_LABEL (p);
739 }
740 }
741
742 /* If SCAN_START was an insn created by loop, we don't know its luid
743 as required by loop_reg_used_before_p. So skip such loops. (This
744 test may never be true, but it's best to play it safe.)
745
746 Also, skip loops where we do not start scanning at a label. This
747 test also rejects loops starting with a JUMP_INSN that failed the
748 test above. */
749
750 if (INSN_UID (scan_start) >= max_uid_for_loop
751 || GET_CODE (scan_start) != CODE_LABEL)
752 {
753 if (loop_dump_stream)
754 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
755 INSN_UID (loop_start), INSN_UID (end));
756 return;
757 }
758
759 /* Count number of times each reg is set during this loop.
760 Set may_not_optimize[I] if it is not safe to move out
761 the setting of register I. If this loop has calls, set
762 reg_single_usage[I]. */
763
764 /* Allocate extra space for REGS that might be created by
765 load_mems. */
766 nregs = max_reg_num () + loop_mems_idx;
767 n_times_set = (int *) alloca (nregs * sizeof (int));
768 n_times_used = (int *) alloca (nregs * sizeof (int));
769 may_not_optimize = (char *) alloca (nregs);
770 bzero ((char *) n_times_set, nregs * sizeof (int));
771 bzero (may_not_optimize, nregs);
772
773 if (loop_has_call)
774 {
775 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
776 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
777 }
778
779 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
780 may_not_optimize, reg_single_usage, &insn_count, nregs);
781
782 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
783 may_not_optimize[i] = 1, n_times_set[i] = 1;
784
785 #ifdef AVOID_CCMODE_COPIES
786 /* Don't try to move insns which set CC registers if we should not
787 create CCmode register copies. */
788 be avoiding
789 for (i = FIRST_PSEUDO_REGISTER; i < nregs - loop_mems_idx; i++)
790 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
791 may_not_optimize[i] = 1;
792 #endif
793
794 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
795
796 if (loop_dump_stream)
797 {
798 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
799 INSN_UID (loop_start), INSN_UID (end), insn_count);
800 if (loop_continue)
801 fprintf (loop_dump_stream, "Continue at insn %d.\n",
802 INSN_UID (loop_continue));
803 }
804
805 /* Scan through the loop finding insns that are safe to move.
806 Set n_times_set negative for the reg being set, so that
807 this reg will be considered invariant for subsequent insns.
808 We consider whether subsequent insns use the reg
809 in deciding whether it is worth actually moving.
810
811 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
812 and therefore it is possible that the insns we are scanning
813 would never be executed. At such times, we must make sure
814 that it is safe to execute the insn once instead of zero times.
815 When MAYBE_NEVER is 0, all insns will be executed at least once
816 so that is not a problem. */
817
818 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
819 p != NULL_RTX;
820 p = next_insn_in_loop (p, scan_start, end, loop_top))
821 {
822 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
823 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
824 in_libcall = 1;
825 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
826 && find_reg_note (p, REG_RETVAL, NULL_RTX))
827 in_libcall = 0;
828
829 if (GET_CODE (p) == INSN
830 && (set = single_set (p))
831 && GET_CODE (SET_DEST (set)) == REG
832 && ! may_not_optimize[REGNO (SET_DEST (set))])
833 {
834 int tem1 = 0;
835 int tem2 = 0;
836 int move_insn = 0;
837 rtx src = SET_SRC (set);
838 rtx dependencies = 0;
839
840 /* Figure out what to use as a source of this insn. If a REG_EQUIV
841 note is given or if a REG_EQUAL note with a constant operand is
842 specified, use it as the source and mark that we should move
843 this insn by calling emit_move_insn rather that duplicating the
844 insn.
845
846 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
847 is present. */
848 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
849 if (temp)
850 src = XEXP (temp, 0), move_insn = 1;
851 else
852 {
853 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
854 if (temp && CONSTANT_P (XEXP (temp, 0)))
855 src = XEXP (temp, 0), move_insn = 1;
856 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
857 {
858 src = XEXP (temp, 0);
859 /* A libcall block can use regs that don't appear in
860 the equivalent expression. To move the libcall,
861 we must move those regs too. */
862 dependencies = libcall_other_reg (p, src);
863 }
864 }
865
866 /* Don't try to optimize a register that was made
867 by loop-optimization for an inner loop.
868 We don't know its life-span, so we can't compute the benefit. */
869 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
870 ;
871 /* In order to move a register, we need to have one of three cases:
872 (1) it is used only in the same basic block as the set
873 (2) it is not a user variable and it is not used in the
874 exit test (this can cause the variable to be used
875 before it is set just like a user-variable).
876 (3) the set is guaranteed to be executed once the loop starts,
877 and the reg is not used until after that. */
878 else if (! ((! maybe_never
879 && ! loop_reg_used_before_p (set, p, loop_start,
880 scan_start, end))
881 || (! REG_USERVAR_P (SET_DEST (set))
882 && ! REG_LOOP_TEST_P (SET_DEST (set)))
883 || reg_in_basic_block_p (p, SET_DEST (set))))
884 ;
885 else if ((tem = invariant_p (src))
886 && (dependencies == 0
887 || (tem2 = invariant_p (dependencies)) != 0)
888 && (n_times_set[REGNO (SET_DEST (set))] == 1
889 || (tem1
890 = consec_sets_invariant_p (SET_DEST (set),
891 n_times_set[REGNO (SET_DEST (set))],
892 p)))
893 /* If the insn can cause a trap (such as divide by zero),
894 can't move it unless it's guaranteed to be executed
895 once loop is entered. Even a function call might
896 prevent the trap insn from being reached
897 (since it might exit!) */
898 && ! ((maybe_never || call_passed)
899 && may_trap_p (src)))
900 {
901 register struct movable *m;
902 register int regno = REGNO (SET_DEST (set));
903
904 /* A potential lossage is where we have a case where two insns
905 can be combined as long as they are both in the loop, but
906 we move one of them outside the loop. For large loops,
907 this can lose. The most common case of this is the address
908 of a function being called.
909
910 Therefore, if this register is marked as being used exactly
911 once if we are in a loop with calls (a "large loop"), see if
912 we can replace the usage of this register with the source
913 of this SET. If we can, delete this insn.
914
915 Don't do this if P has a REG_RETVAL note or if we have
916 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
917
918 if (reg_single_usage && reg_single_usage[regno] != 0
919 && reg_single_usage[regno] != const0_rtx
920 && REGNO_FIRST_UID (regno) == INSN_UID (p)
921 && (REGNO_LAST_UID (regno)
922 == INSN_UID (reg_single_usage[regno]))
923 && n_times_set[REGNO (SET_DEST (set))] == 1
924 && ! side_effects_p (SET_SRC (set))
925 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
926 && (! SMALL_REGISTER_CLASSES
927 || (! (GET_CODE (SET_SRC (set)) == REG
928 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
929 /* This test is not redundant; SET_SRC (set) might be
930 a call-clobbered register and the life of REGNO
931 might span a call. */
932 && ! modified_between_p (SET_SRC (set), p,
933 reg_single_usage[regno])
934 && no_labels_between_p (p, reg_single_usage[regno])
935 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
936 reg_single_usage[regno]))
937 {
938 /* Replace any usage in a REG_EQUAL note. Must copy the
939 new source, so that we don't get rtx sharing between the
940 SET_SOURCE and REG_NOTES of insn p. */
941 REG_NOTES (reg_single_usage[regno])
942 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
943 SET_DEST (set), copy_rtx (SET_SRC (set)));
944
945 PUT_CODE (p, NOTE);
946 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
947 NOTE_SOURCE_FILE (p) = 0;
948 n_times_set[regno] = 0;
949 continue;
950 }
951
952 m = (struct movable *) alloca (sizeof (struct movable));
953 m->next = 0;
954 m->insn = p;
955 m->set_src = src;
956 m->dependencies = dependencies;
957 m->set_dest = SET_DEST (set);
958 m->force = 0;
959 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
960 m->done = 0;
961 m->forces = 0;
962 m->partial = 0;
963 m->move_insn = move_insn;
964 m->move_insn_first = 0;
965 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
966 m->savemode = VOIDmode;
967 m->regno = regno;
968 /* Set M->cond if either invariant_p or consec_sets_invariant_p
969 returned 2 (only conditionally invariant). */
970 m->cond = ((tem | tem1 | tem2) > 1);
971 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
972 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
973 m->match = 0;
974 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
975 - uid_luid[REGNO_FIRST_UID (regno)]);
976 m->savings = n_times_used[regno];
977 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
978 m->savings += libcall_benefit (p);
979 n_times_set[regno] = move_insn ? -2 : -1;
980 /* Add M to the end of the chain MOVABLES. */
981 if (movables == 0)
982 movables = m;
983 else
984 last_movable->next = m;
985 last_movable = m;
986
987 if (m->consec > 0)
988 {
989 /* It is possible for the first instruction to have a
990 REG_EQUAL note but a non-invariant SET_SRC, so we must
991 remember the status of the first instruction in case
992 the last instruction doesn't have a REG_EQUAL note. */
993 m->move_insn_first = m->move_insn;
994
995 /* Skip this insn, not checking REG_LIBCALL notes. */
996 p = next_nonnote_insn (p);
997 /* Skip the consecutive insns, if there are any. */
998 p = skip_consec_insns (p, m->consec);
999 /* Back up to the last insn of the consecutive group. */
1000 p = prev_nonnote_insn (p);
1001
1002 /* We must now reset m->move_insn, m->is_equiv, and possibly
1003 m->set_src to correspond to the effects of all the
1004 insns. */
1005 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1006 if (temp)
1007 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1008 else
1009 {
1010 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1011 if (temp && CONSTANT_P (XEXP (temp, 0)))
1012 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1013 else
1014 m->move_insn = 0;
1015
1016 }
1017 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1018 }
1019 }
1020 /* If this register is always set within a STRICT_LOW_PART
1021 or set to zero, then its high bytes are constant.
1022 So clear them outside the loop and within the loop
1023 just load the low bytes.
1024 We must check that the machine has an instruction to do so.
1025 Also, if the value loaded into the register
1026 depends on the same register, this cannot be done. */
1027 else if (SET_SRC (set) == const0_rtx
1028 && GET_CODE (NEXT_INSN (p)) == INSN
1029 && (set1 = single_set (NEXT_INSN (p)))
1030 && GET_CODE (set1) == SET
1031 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1032 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1033 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1034 == SET_DEST (set))
1035 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1036 {
1037 register int regno = REGNO (SET_DEST (set));
1038 if (n_times_set[regno] == 2)
1039 {
1040 register struct movable *m;
1041 m = (struct movable *) alloca (sizeof (struct movable));
1042 m->next = 0;
1043 m->insn = p;
1044 m->set_dest = SET_DEST (set);
1045 m->dependencies = 0;
1046 m->force = 0;
1047 m->consec = 0;
1048 m->done = 0;
1049 m->forces = 0;
1050 m->move_insn = 0;
1051 m->move_insn_first = 0;
1052 m->partial = 1;
1053 /* If the insn may not be executed on some cycles,
1054 we can't clear the whole reg; clear just high part.
1055 Not even if the reg is used only within this loop.
1056 Consider this:
1057 while (1)
1058 while (s != t) {
1059 if (foo ()) x = *s;
1060 use (x);
1061 }
1062 Clearing x before the inner loop could clobber a value
1063 being saved from the last time around the outer loop.
1064 However, if the reg is not used outside this loop
1065 and all uses of the register are in the same
1066 basic block as the store, there is no problem.
1067
1068 If this insn was made by loop, we don't know its
1069 INSN_LUID and hence must make a conservative
1070 assumption. */
1071 m->global = (INSN_UID (p) >= max_uid_for_loop
1072 || (uid_luid[REGNO_LAST_UID (regno)]
1073 > INSN_LUID (end))
1074 || (uid_luid[REGNO_FIRST_UID (regno)]
1075 < INSN_LUID (p))
1076 || (labels_in_range_p
1077 (p, uid_luid[REGNO_FIRST_UID (regno)])));
1078 if (maybe_never && m->global)
1079 m->savemode = GET_MODE (SET_SRC (set1));
1080 else
1081 m->savemode = VOIDmode;
1082 m->regno = regno;
1083 m->cond = 0;
1084 m->match = 0;
1085 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1086 - uid_luid[REGNO_FIRST_UID (regno)]);
1087 m->savings = 1;
1088 n_times_set[regno] = -1;
1089 /* Add M to the end of the chain MOVABLES. */
1090 if (movables == 0)
1091 movables = m;
1092 else
1093 last_movable->next = m;
1094 last_movable = m;
1095 }
1096 }
1097 }
1098 /* Past a call insn, we get to insns which might not be executed
1099 because the call might exit. This matters for insns that trap.
1100 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1101 so they don't count. */
1102 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1103 call_passed = 1;
1104 /* Past a label or a jump, we get to insns for which we
1105 can't count on whether or how many times they will be
1106 executed during each iteration. Therefore, we can
1107 only move out sets of trivial variables
1108 (those not used after the loop). */
1109 /* Similar code appears twice in strength_reduce. */
1110 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1111 /* If we enter the loop in the middle, and scan around to the
1112 beginning, don't set maybe_never for that. This must be an
1113 unconditional jump, otherwise the code at the top of the
1114 loop might never be executed. Unconditional jumps are
1115 followed a by barrier then loop end. */
1116 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1117 && NEXT_INSN (NEXT_INSN (p)) == end
1118 && simplejump_p (p)))
1119 maybe_never = 1;
1120 else if (GET_CODE (p) == NOTE)
1121 {
1122 /* At the virtual top of a converted loop, insns are again known to
1123 be executed: logically, the loop begins here even though the exit
1124 code has been duplicated. */
1125 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1126 maybe_never = call_passed = 0;
1127 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1128 loop_depth++;
1129 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1130 loop_depth--;
1131 }
1132 }
1133
1134 /* If one movable subsumes another, ignore that other. */
1135
1136 ignore_some_movables (movables);
1137
1138 /* For each movable insn, see if the reg that it loads
1139 leads when it dies right into another conditionally movable insn.
1140 If so, record that the second insn "forces" the first one,
1141 since the second can be moved only if the first is. */
1142
1143 force_movables (movables);
1144
1145 /* See if there are multiple movable insns that load the same value.
1146 If there are, make all but the first point at the first one
1147 through the `match' field, and add the priorities of them
1148 all together as the priority of the first. */
1149
1150 combine_movables (movables, nregs);
1151
1152 /* Now consider each movable insn to decide whether it is worth moving.
1153 Store 0 in n_times_set for each reg that is moved.
1154
1155 Generally this increases code size, so do not move moveables when
1156 optimizing for code size. */
1157
1158 if (! optimize_size)
1159 move_movables (movables, threshold,
1160 insn_count, loop_start, end, nregs);
1161
1162 /* Now candidates that still are negative are those not moved.
1163 Change n_times_set to indicate that those are not actually invariant. */
1164 for (i = 0; i < nregs; i++)
1165 if (n_times_set[i] < 0)
1166 n_times_set[i] = n_times_used[i];
1167
1168 /* Now that we've moved some things out of the loop, we able to
1169 hoist even more memory references. There's no need to pass
1170 reg_single_usage this time, since we're done with it. */
1171 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top,
1172 loop_start, 0,
1173 &insn_count);
1174
1175 if (flag_strength_reduce)
1176 {
1177 the_movables = movables;
1178 strength_reduce (scan_start, end, loop_top,
1179 insn_count, loop_start, end, unroll_p);
1180 }
1181 }
1182 \f
1183 /* Add elements to *OUTPUT to record all the pseudo-regs
1184 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1185
1186 void
1187 record_excess_regs (in_this, not_in_this, output)
1188 rtx in_this, not_in_this;
1189 rtx *output;
1190 {
1191 enum rtx_code code;
1192 char *fmt;
1193 int i;
1194
1195 code = GET_CODE (in_this);
1196
1197 switch (code)
1198 {
1199 case PC:
1200 case CC0:
1201 case CONST_INT:
1202 case CONST_DOUBLE:
1203 case CONST:
1204 case SYMBOL_REF:
1205 case LABEL_REF:
1206 return;
1207
1208 case REG:
1209 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1210 && ! reg_mentioned_p (in_this, not_in_this))
1211 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1212 return;
1213
1214 default:
1215 break;
1216 }
1217
1218 fmt = GET_RTX_FORMAT (code);
1219 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1220 {
1221 int j;
1222
1223 switch (fmt[i])
1224 {
1225 case 'E':
1226 for (j = 0; j < XVECLEN (in_this, i); j++)
1227 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1228 break;
1229
1230 case 'e':
1231 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1232 break;
1233 }
1234 }
1235 }
1236 \f
1237 /* Check what regs are referred to in the libcall block ending with INSN,
1238 aside from those mentioned in the equivalent value.
1239 If there are none, return 0.
1240 If there are one or more, return an EXPR_LIST containing all of them. */
1241
1242 static rtx
1243 libcall_other_reg (insn, equiv)
1244 rtx insn, equiv;
1245 {
1246 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1247 rtx p = XEXP (note, 0);
1248 rtx output = 0;
1249
1250 /* First, find all the regs used in the libcall block
1251 that are not mentioned as inputs to the result. */
1252
1253 while (p != insn)
1254 {
1255 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1256 || GET_CODE (p) == CALL_INSN)
1257 record_excess_regs (PATTERN (p), equiv, &output);
1258 p = NEXT_INSN (p);
1259 }
1260
1261 return output;
1262 }
1263 \f
1264 /* Return 1 if all uses of REG
1265 are between INSN and the end of the basic block. */
1266
1267 static int
1268 reg_in_basic_block_p (insn, reg)
1269 rtx insn, reg;
1270 {
1271 int regno = REGNO (reg);
1272 rtx p;
1273
1274 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1275 return 0;
1276
1277 /* Search this basic block for the already recorded last use of the reg. */
1278 for (p = insn; p; p = NEXT_INSN (p))
1279 {
1280 switch (GET_CODE (p))
1281 {
1282 case NOTE:
1283 break;
1284
1285 case INSN:
1286 case CALL_INSN:
1287 /* Ordinary insn: if this is the last use, we win. */
1288 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1289 return 1;
1290 break;
1291
1292 case JUMP_INSN:
1293 /* Jump insn: if this is the last use, we win. */
1294 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1295 return 1;
1296 /* Otherwise, it's the end of the basic block, so we lose. */
1297 return 0;
1298
1299 case CODE_LABEL:
1300 case BARRIER:
1301 /* It's the end of the basic block, so we lose. */
1302 return 0;
1303
1304 default:
1305 break;
1306 }
1307 }
1308
1309 /* The "last use" doesn't follow the "first use"?? */
1310 abort ();
1311 }
1312 \f
1313 /* Compute the benefit of eliminating the insns in the block whose
1314 last insn is LAST. This may be a group of insns used to compute a
1315 value directly or can contain a library call. */
1316
1317 static int
1318 libcall_benefit (last)
1319 rtx last;
1320 {
1321 rtx insn;
1322 int benefit = 0;
1323
1324 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1325 insn != last; insn = NEXT_INSN (insn))
1326 {
1327 if (GET_CODE (insn) == CALL_INSN)
1328 benefit += 10; /* Assume at least this many insns in a library
1329 routine. */
1330 else if (GET_CODE (insn) == INSN
1331 && GET_CODE (PATTERN (insn)) != USE
1332 && GET_CODE (PATTERN (insn)) != CLOBBER)
1333 benefit++;
1334 }
1335
1336 return benefit;
1337 }
1338 \f
1339 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1340
1341 static rtx
1342 skip_consec_insns (insn, count)
1343 rtx insn;
1344 int count;
1345 {
1346 for (; count > 0; count--)
1347 {
1348 rtx temp;
1349
1350 /* If first insn of libcall sequence, skip to end. */
1351 /* Do this at start of loop, since INSN is guaranteed to
1352 be an insn here. */
1353 if (GET_CODE (insn) != NOTE
1354 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1355 insn = XEXP (temp, 0);
1356
1357 do insn = NEXT_INSN (insn);
1358 while (GET_CODE (insn) == NOTE);
1359 }
1360
1361 return insn;
1362 }
1363
1364 /* Ignore any movable whose insn falls within a libcall
1365 which is part of another movable.
1366 We make use of the fact that the movable for the libcall value
1367 was made later and so appears later on the chain. */
1368
1369 static void
1370 ignore_some_movables (movables)
1371 struct movable *movables;
1372 {
1373 register struct movable *m, *m1;
1374
1375 for (m = movables; m; m = m->next)
1376 {
1377 /* Is this a movable for the value of a libcall? */
1378 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1379 if (note)
1380 {
1381 rtx insn;
1382 /* Check for earlier movables inside that range,
1383 and mark them invalid. We cannot use LUIDs here because
1384 insns created by loop.c for prior loops don't have LUIDs.
1385 Rather than reject all such insns from movables, we just
1386 explicitly check each insn in the libcall (since invariant
1387 libcalls aren't that common). */
1388 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1389 for (m1 = movables; m1 != m; m1 = m1->next)
1390 if (m1->insn == insn)
1391 m1->done = 1;
1392 }
1393 }
1394 }
1395
1396 /* For each movable insn, see if the reg that it loads
1397 leads when it dies right into another conditionally movable insn.
1398 If so, record that the second insn "forces" the first one,
1399 since the second can be moved only if the first is. */
1400
1401 static void
1402 force_movables (movables)
1403 struct movable *movables;
1404 {
1405 register struct movable *m, *m1;
1406 for (m1 = movables; m1; m1 = m1->next)
1407 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1408 if (!m1->partial && !m1->done)
1409 {
1410 int regno = m1->regno;
1411 for (m = m1->next; m; m = m->next)
1412 /* ??? Could this be a bug? What if CSE caused the
1413 register of M1 to be used after this insn?
1414 Since CSE does not update regno_last_uid,
1415 this insn M->insn might not be where it dies.
1416 But very likely this doesn't matter; what matters is
1417 that M's reg is computed from M1's reg. */
1418 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1419 && !m->done)
1420 break;
1421 if (m != 0 && m->set_src == m1->set_dest
1422 /* If m->consec, m->set_src isn't valid. */
1423 && m->consec == 0)
1424 m = 0;
1425
1426 /* Increase the priority of the moving the first insn
1427 since it permits the second to be moved as well. */
1428 if (m != 0)
1429 {
1430 m->forces = m1;
1431 m1->lifetime += m->lifetime;
1432 m1->savings += m->savings;
1433 }
1434 }
1435 }
1436 \f
1437 /* Find invariant expressions that are equal and can be combined into
1438 one register. */
1439
1440 static void
1441 combine_movables (movables, nregs)
1442 struct movable *movables;
1443 int nregs;
1444 {
1445 register struct movable *m;
1446 char *matched_regs = (char *) alloca (nregs);
1447 enum machine_mode mode;
1448
1449 /* Regs that are set more than once are not allowed to match
1450 or be matched. I'm no longer sure why not. */
1451 /* Perhaps testing m->consec_sets would be more appropriate here? */
1452
1453 for (m = movables; m; m = m->next)
1454 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1455 {
1456 register struct movable *m1;
1457 int regno = m->regno;
1458
1459 bzero (matched_regs, nregs);
1460 matched_regs[regno] = 1;
1461
1462 /* We want later insns to match the first one. Don't make the first
1463 one match any later ones. So start this loop at m->next. */
1464 for (m1 = m->next; m1; m1 = m1->next)
1465 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1466 /* A reg used outside the loop mustn't be eliminated. */
1467 && !m1->global
1468 /* A reg used for zero-extending mustn't be eliminated. */
1469 && !m1->partial
1470 && (matched_regs[m1->regno]
1471 ||
1472 (
1473 /* Can combine regs with different modes loaded from the
1474 same constant only if the modes are the same or
1475 if both are integer modes with M wider or the same
1476 width as M1. The check for integer is redundant, but
1477 safe, since the only case of differing destination
1478 modes with equal sources is when both sources are
1479 VOIDmode, i.e., CONST_INT. */
1480 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1481 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1482 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1483 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1484 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1485 /* See if the source of M1 says it matches M. */
1486 && ((GET_CODE (m1->set_src) == REG
1487 && matched_regs[REGNO (m1->set_src)])
1488 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1489 movables))))
1490 && ((m->dependencies == m1->dependencies)
1491 || rtx_equal_p (m->dependencies, m1->dependencies)))
1492 {
1493 m->lifetime += m1->lifetime;
1494 m->savings += m1->savings;
1495 m1->done = 1;
1496 m1->match = m;
1497 matched_regs[m1->regno] = 1;
1498 }
1499 }
1500
1501 /* Now combine the regs used for zero-extension.
1502 This can be done for those not marked `global'
1503 provided their lives don't overlap. */
1504
1505 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1506 mode = GET_MODE_WIDER_MODE (mode))
1507 {
1508 register struct movable *m0 = 0;
1509
1510 /* Combine all the registers for extension from mode MODE.
1511 Don't combine any that are used outside this loop. */
1512 for (m = movables; m; m = m->next)
1513 if (m->partial && ! m->global
1514 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1515 {
1516 register struct movable *m1;
1517 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1518 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1519
1520 if (m0 == 0)
1521 {
1522 /* First one: don't check for overlap, just record it. */
1523 m0 = m;
1524 continue;
1525 }
1526
1527 /* Make sure they extend to the same mode.
1528 (Almost always true.) */
1529 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1530 continue;
1531
1532 /* We already have one: check for overlap with those
1533 already combined together. */
1534 for (m1 = movables; m1 != m; m1 = m1->next)
1535 if (m1 == m0 || (m1->partial && m1->match == m0))
1536 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1537 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1538 goto overlap;
1539
1540 /* No overlap: we can combine this with the others. */
1541 m0->lifetime += m->lifetime;
1542 m0->savings += m->savings;
1543 m->done = 1;
1544 m->match = m0;
1545
1546 overlap: ;
1547 }
1548 }
1549 }
1550 \f
1551 /* Return 1 if regs X and Y will become the same if moved. */
1552
1553 static int
1554 regs_match_p (x, y, movables)
1555 rtx x, y;
1556 struct movable *movables;
1557 {
1558 int xn = REGNO (x);
1559 int yn = REGNO (y);
1560 struct movable *mx, *my;
1561
1562 for (mx = movables; mx; mx = mx->next)
1563 if (mx->regno == xn)
1564 break;
1565
1566 for (my = movables; my; my = my->next)
1567 if (my->regno == yn)
1568 break;
1569
1570 return (mx && my
1571 && ((mx->match == my->match && mx->match != 0)
1572 || mx->match == my
1573 || mx == my->match));
1574 }
1575
1576 /* Return 1 if X and Y are identical-looking rtx's.
1577 This is the Lisp function EQUAL for rtx arguments.
1578
1579 If two registers are matching movables or a movable register and an
1580 equivalent constant, consider them equal. */
1581
1582 static int
1583 rtx_equal_for_loop_p (x, y, movables)
1584 rtx x, y;
1585 struct movable *movables;
1586 {
1587 register int i;
1588 register int j;
1589 register struct movable *m;
1590 register enum rtx_code code;
1591 register char *fmt;
1592
1593 if (x == y)
1594 return 1;
1595 if (x == 0 || y == 0)
1596 return 0;
1597
1598 code = GET_CODE (x);
1599
1600 /* If we have a register and a constant, they may sometimes be
1601 equal. */
1602 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1603 && CONSTANT_P (y))
1604 {
1605 for (m = movables; m; m = m->next)
1606 if (m->move_insn && m->regno == REGNO (x)
1607 && rtx_equal_p (m->set_src, y))
1608 return 1;
1609 }
1610 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1611 && CONSTANT_P (x))
1612 {
1613 for (m = movables; m; m = m->next)
1614 if (m->move_insn && m->regno == REGNO (y)
1615 && rtx_equal_p (m->set_src, x))
1616 return 1;
1617 }
1618
1619 /* Otherwise, rtx's of different codes cannot be equal. */
1620 if (code != GET_CODE (y))
1621 return 0;
1622
1623 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1624 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1625
1626 if (GET_MODE (x) != GET_MODE (y))
1627 return 0;
1628
1629 /* These three types of rtx's can be compared nonrecursively. */
1630 if (code == REG)
1631 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1632
1633 if (code == LABEL_REF)
1634 return XEXP (x, 0) == XEXP (y, 0);
1635 if (code == SYMBOL_REF)
1636 return XSTR (x, 0) == XSTR (y, 0);
1637
1638 /* Compare the elements. If any pair of corresponding elements
1639 fail to match, return 0 for the whole things. */
1640
1641 fmt = GET_RTX_FORMAT (code);
1642 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1643 {
1644 switch (fmt[i])
1645 {
1646 case 'w':
1647 if (XWINT (x, i) != XWINT (y, i))
1648 return 0;
1649 break;
1650
1651 case 'i':
1652 if (XINT (x, i) != XINT (y, i))
1653 return 0;
1654 break;
1655
1656 case 'E':
1657 /* Two vectors must have the same length. */
1658 if (XVECLEN (x, i) != XVECLEN (y, i))
1659 return 0;
1660
1661 /* And the corresponding elements must match. */
1662 for (j = 0; j < XVECLEN (x, i); j++)
1663 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1664 return 0;
1665 break;
1666
1667 case 'e':
1668 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1669 return 0;
1670 break;
1671
1672 case 's':
1673 if (strcmp (XSTR (x, i), XSTR (y, i)))
1674 return 0;
1675 break;
1676
1677 case 'u':
1678 /* These are just backpointers, so they don't matter. */
1679 break;
1680
1681 case '0':
1682 break;
1683
1684 /* It is believed that rtx's at this level will never
1685 contain anything but integers and other rtx's,
1686 except for within LABEL_REFs and SYMBOL_REFs. */
1687 default:
1688 abort ();
1689 }
1690 }
1691 return 1;
1692 }
1693 \f
1694 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1695 insns in INSNS which use thet reference. */
1696
1697 static void
1698 add_label_notes (x, insns)
1699 rtx x;
1700 rtx insns;
1701 {
1702 enum rtx_code code = GET_CODE (x);
1703 int i, j;
1704 char *fmt;
1705 rtx insn;
1706
1707 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1708 {
1709 /* This code used to ignore labels that referred to dispatch tables to
1710 avoid flow generating (slighly) worse code.
1711
1712 We no longer ignore such label references (see LABEL_REF handling in
1713 mark_jump_label for additional information). */
1714 for (insn = insns; insn; insn = NEXT_INSN (insn))
1715 if (reg_mentioned_p (XEXP (x, 0), insn))
1716 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1717 REG_NOTES (insn));
1718 }
1719
1720 fmt = GET_RTX_FORMAT (code);
1721 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1722 {
1723 if (fmt[i] == 'e')
1724 add_label_notes (XEXP (x, i), insns);
1725 else if (fmt[i] == 'E')
1726 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1727 add_label_notes (XVECEXP (x, i, j), insns);
1728 }
1729 }
1730 \f
1731 /* Scan MOVABLES, and move the insns that deserve to be moved.
1732 If two matching movables are combined, replace one reg with the
1733 other throughout. */
1734
1735 static void
1736 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1737 struct movable *movables;
1738 int threshold;
1739 int insn_count;
1740 rtx loop_start;
1741 rtx end;
1742 int nregs;
1743 {
1744 rtx new_start = 0;
1745 register struct movable *m;
1746 register rtx p;
1747 /* Map of pseudo-register replacements to handle combining
1748 when we move several insns that load the same value
1749 into different pseudo-registers. */
1750 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1751 char *already_moved = (char *) alloca (nregs);
1752
1753 bzero (already_moved, nregs);
1754 bzero ((char *) reg_map, nregs * sizeof (rtx));
1755
1756 num_movables = 0;
1757
1758 for (m = movables; m; m = m->next)
1759 {
1760 /* Describe this movable insn. */
1761
1762 if (loop_dump_stream)
1763 {
1764 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1765 INSN_UID (m->insn), m->regno, m->lifetime);
1766 if (m->consec > 0)
1767 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1768 if (m->cond)
1769 fprintf (loop_dump_stream, "cond ");
1770 if (m->force)
1771 fprintf (loop_dump_stream, "force ");
1772 if (m->global)
1773 fprintf (loop_dump_stream, "global ");
1774 if (m->done)
1775 fprintf (loop_dump_stream, "done ");
1776 if (m->move_insn)
1777 fprintf (loop_dump_stream, "move-insn ");
1778 if (m->match)
1779 fprintf (loop_dump_stream, "matches %d ",
1780 INSN_UID (m->match->insn));
1781 if (m->forces)
1782 fprintf (loop_dump_stream, "forces %d ",
1783 INSN_UID (m->forces->insn));
1784 }
1785
1786 /* Count movables. Value used in heuristics in strength_reduce. */
1787 num_movables++;
1788
1789 /* Ignore the insn if it's already done (it matched something else).
1790 Otherwise, see if it is now safe to move. */
1791
1792 if (!m->done
1793 && (! m->cond
1794 || (1 == invariant_p (m->set_src)
1795 && (m->dependencies == 0
1796 || 1 == invariant_p (m->dependencies))
1797 && (m->consec == 0
1798 || 1 == consec_sets_invariant_p (m->set_dest,
1799 m->consec + 1,
1800 m->insn))))
1801 && (! m->forces || m->forces->done))
1802 {
1803 register int regno;
1804 register rtx p;
1805 int savings = m->savings;
1806
1807 /* We have an insn that is safe to move.
1808 Compute its desirability. */
1809
1810 p = m->insn;
1811 regno = m->regno;
1812
1813 if (loop_dump_stream)
1814 fprintf (loop_dump_stream, "savings %d ", savings);
1815
1816 if (moved_once[regno])
1817 {
1818 insn_count *= 2;
1819
1820 if (loop_dump_stream)
1821 fprintf (loop_dump_stream, "halved since already moved ");
1822 }
1823
1824 /* An insn MUST be moved if we already moved something else
1825 which is safe only if this one is moved too: that is,
1826 if already_moved[REGNO] is nonzero. */
1827
1828 /* An insn is desirable to move if the new lifetime of the
1829 register is no more than THRESHOLD times the old lifetime.
1830 If it's not desirable, it means the loop is so big
1831 that moving won't speed things up much,
1832 and it is liable to make register usage worse. */
1833
1834 /* It is also desirable to move if it can be moved at no
1835 extra cost because something else was already moved. */
1836
1837 if (already_moved[regno]
1838 || flag_move_all_movables
1839 || (threshold * savings * m->lifetime) >= insn_count
1840 || (m->forces && m->forces->done
1841 && n_times_used[m->forces->regno] == 1))
1842 {
1843 int count;
1844 register struct movable *m1;
1845 rtx first;
1846
1847 /* Now move the insns that set the reg. */
1848
1849 if (m->partial && m->match)
1850 {
1851 rtx newpat, i1;
1852 rtx r1, r2;
1853 /* Find the end of this chain of matching regs.
1854 Thus, we load each reg in the chain from that one reg.
1855 And that reg is loaded with 0 directly,
1856 since it has ->match == 0. */
1857 for (m1 = m; m1->match; m1 = m1->match);
1858 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1859 SET_DEST (PATTERN (m1->insn)));
1860 i1 = emit_insn_before (newpat, loop_start);
1861
1862 /* Mark the moved, invariant reg as being allowed to
1863 share a hard reg with the other matching invariant. */
1864 REG_NOTES (i1) = REG_NOTES (m->insn);
1865 r1 = SET_DEST (PATTERN (m->insn));
1866 r2 = SET_DEST (PATTERN (m1->insn));
1867 regs_may_share
1868 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1869 gen_rtx_EXPR_LIST (VOIDmode, r2,
1870 regs_may_share));
1871 delete_insn (m->insn);
1872
1873 if (new_start == 0)
1874 new_start = i1;
1875
1876 if (loop_dump_stream)
1877 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1878 }
1879 /* If we are to re-generate the item being moved with a
1880 new move insn, first delete what we have and then emit
1881 the move insn before the loop. */
1882 else if (m->move_insn)
1883 {
1884 rtx i1, temp;
1885
1886 for (count = m->consec; count >= 0; count--)
1887 {
1888 /* If this is the first insn of a library call sequence,
1889 skip to the end. */
1890 if (GET_CODE (p) != NOTE
1891 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1892 p = XEXP (temp, 0);
1893
1894 /* If this is the last insn of a libcall sequence, then
1895 delete every insn in the sequence except the last.
1896 The last insn is handled in the normal manner. */
1897 if (GET_CODE (p) != NOTE
1898 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1899 {
1900 temp = XEXP (temp, 0);
1901 while (temp != p)
1902 temp = delete_insn (temp);
1903 }
1904
1905 p = delete_insn (p);
1906 while (p && GET_CODE (p) == NOTE)
1907 p = NEXT_INSN (p);
1908 }
1909
1910 start_sequence ();
1911 emit_move_insn (m->set_dest, m->set_src);
1912 temp = get_insns ();
1913 end_sequence ();
1914
1915 add_label_notes (m->set_src, temp);
1916
1917 i1 = emit_insns_before (temp, loop_start);
1918 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1919 REG_NOTES (i1)
1920 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1921 m->set_src, REG_NOTES (i1));
1922
1923 if (loop_dump_stream)
1924 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1925
1926 /* The more regs we move, the less we like moving them. */
1927 threshold -= 3;
1928 }
1929 else
1930 {
1931 for (count = m->consec; count >= 0; count--)
1932 {
1933 rtx i1, temp;
1934
1935 /* If first insn of libcall sequence, skip to end. */
1936 /* Do this at start of loop, since p is guaranteed to
1937 be an insn here. */
1938 if (GET_CODE (p) != NOTE
1939 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1940 p = XEXP (temp, 0);
1941
1942 /* If last insn of libcall sequence, move all
1943 insns except the last before the loop. The last
1944 insn is handled in the normal manner. */
1945 if (GET_CODE (p) != NOTE
1946 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1947 {
1948 rtx fn_address = 0;
1949 rtx fn_reg = 0;
1950 rtx fn_address_insn = 0;
1951
1952 first = 0;
1953 for (temp = XEXP (temp, 0); temp != p;
1954 temp = NEXT_INSN (temp))
1955 {
1956 rtx body;
1957 rtx n;
1958 rtx next;
1959
1960 if (GET_CODE (temp) == NOTE)
1961 continue;
1962
1963 body = PATTERN (temp);
1964
1965 /* Find the next insn after TEMP,
1966 not counting USE or NOTE insns. */
1967 for (next = NEXT_INSN (temp); next != p;
1968 next = NEXT_INSN (next))
1969 if (! (GET_CODE (next) == INSN
1970 && GET_CODE (PATTERN (next)) == USE)
1971 && GET_CODE (next) != NOTE)
1972 break;
1973
1974 /* If that is the call, this may be the insn
1975 that loads the function address.
1976
1977 Extract the function address from the insn
1978 that loads it into a register.
1979 If this insn was cse'd, we get incorrect code.
1980
1981 So emit a new move insn that copies the
1982 function address into the register that the
1983 call insn will use. flow.c will delete any
1984 redundant stores that we have created. */
1985 if (GET_CODE (next) == CALL_INSN
1986 && GET_CODE (body) == SET
1987 && GET_CODE (SET_DEST (body)) == REG
1988 && (n = find_reg_note (temp, REG_EQUAL,
1989 NULL_RTX)))
1990 {
1991 fn_reg = SET_SRC (body);
1992 if (GET_CODE (fn_reg) != REG)
1993 fn_reg = SET_DEST (body);
1994 fn_address = XEXP (n, 0);
1995 fn_address_insn = temp;
1996 }
1997 /* We have the call insn.
1998 If it uses the register we suspect it might,
1999 load it with the correct address directly. */
2000 if (GET_CODE (temp) == CALL_INSN
2001 && fn_address != 0
2002 && reg_referenced_p (fn_reg, body))
2003 emit_insn_after (gen_move_insn (fn_reg,
2004 fn_address),
2005 fn_address_insn);
2006
2007 if (GET_CODE (temp) == CALL_INSN)
2008 {
2009 i1 = emit_call_insn_before (body, loop_start);
2010 /* Because the USAGE information potentially
2011 contains objects other than hard registers
2012 we need to copy it. */
2013 if (CALL_INSN_FUNCTION_USAGE (temp))
2014 CALL_INSN_FUNCTION_USAGE (i1)
2015 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2016 }
2017 else
2018 i1 = emit_insn_before (body, loop_start);
2019 if (first == 0)
2020 first = i1;
2021 if (temp == fn_address_insn)
2022 fn_address_insn = i1;
2023 REG_NOTES (i1) = REG_NOTES (temp);
2024 delete_insn (temp);
2025 }
2026 }
2027 if (m->savemode != VOIDmode)
2028 {
2029 /* P sets REG to zero; but we should clear only
2030 the bits that are not covered by the mode
2031 m->savemode. */
2032 rtx reg = m->set_dest;
2033 rtx sequence;
2034 rtx tem;
2035
2036 start_sequence ();
2037 tem = expand_binop
2038 (GET_MODE (reg), and_optab, reg,
2039 GEN_INT ((((HOST_WIDE_INT) 1
2040 << GET_MODE_BITSIZE (m->savemode)))
2041 - 1),
2042 reg, 1, OPTAB_LIB_WIDEN);
2043 if (tem == 0)
2044 abort ();
2045 if (tem != reg)
2046 emit_move_insn (reg, tem);
2047 sequence = gen_sequence ();
2048 end_sequence ();
2049 i1 = emit_insn_before (sequence, loop_start);
2050 }
2051 else if (GET_CODE (p) == CALL_INSN)
2052 {
2053 i1 = emit_call_insn_before (PATTERN (p), loop_start);
2054 /* Because the USAGE information potentially
2055 contains objects other than hard registers
2056 we need to copy it. */
2057 if (CALL_INSN_FUNCTION_USAGE (p))
2058 CALL_INSN_FUNCTION_USAGE (i1)
2059 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2060 }
2061 else if (count == m->consec && m->move_insn_first)
2062 {
2063 /* The SET_SRC might not be invariant, so we must
2064 use the REG_EQUAL note. */
2065 start_sequence ();
2066 emit_move_insn (m->set_dest, m->set_src);
2067 temp = get_insns ();
2068 end_sequence ();
2069
2070 add_label_notes (m->set_src, temp);
2071
2072 i1 = emit_insns_before (temp, loop_start);
2073 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2074 REG_NOTES (i1)
2075 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
2076 : REG_EQUAL),
2077 m->set_src, REG_NOTES (i1));
2078 }
2079 else
2080 i1 = emit_insn_before (PATTERN (p), loop_start);
2081
2082 if (REG_NOTES (i1) == 0)
2083 {
2084 REG_NOTES (i1) = REG_NOTES (p);
2085
2086 /* If there is a REG_EQUAL note present whose value
2087 is not loop invariant, then delete it, since it
2088 may cause problems with later optimization passes.
2089 It is possible for cse to create such notes
2090 like this as a result of record_jump_cond. */
2091
2092 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2093 && ! invariant_p (XEXP (temp, 0)))
2094 remove_note (i1, temp);
2095 }
2096
2097 if (new_start == 0)
2098 new_start = i1;
2099
2100 if (loop_dump_stream)
2101 fprintf (loop_dump_stream, " moved to %d",
2102 INSN_UID (i1));
2103
2104 /* If library call, now fix the REG_NOTES that contain
2105 insn pointers, namely REG_LIBCALL on FIRST
2106 and REG_RETVAL on I1. */
2107 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2108 {
2109 XEXP (temp, 0) = first;
2110 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2111 XEXP (temp, 0) = i1;
2112 }
2113
2114 delete_insn (p);
2115 do p = NEXT_INSN (p);
2116 while (p && GET_CODE (p) == NOTE);
2117 }
2118
2119 /* The more regs we move, the less we like moving them. */
2120 threshold -= 3;
2121 }
2122
2123 /* Any other movable that loads the same register
2124 MUST be moved. */
2125 already_moved[regno] = 1;
2126
2127 /* This reg has been moved out of one loop. */
2128 moved_once[regno] = 1;
2129
2130 /* The reg set here is now invariant. */
2131 if (! m->partial)
2132 n_times_set[regno] = 0;
2133
2134 m->done = 1;
2135
2136 /* Change the length-of-life info for the register
2137 to say it lives at least the full length of this loop.
2138 This will help guide optimizations in outer loops. */
2139
2140 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2141 /* This is the old insn before all the moved insns.
2142 We can't use the moved insn because it is out of range
2143 in uid_luid. Only the old insns have luids. */
2144 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2145 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2146 REGNO_LAST_UID (regno) = INSN_UID (end);
2147
2148 /* Combine with this moved insn any other matching movables. */
2149
2150 if (! m->partial)
2151 for (m1 = movables; m1; m1 = m1->next)
2152 if (m1->match == m)
2153 {
2154 rtx temp;
2155
2156 /* Schedule the reg loaded by M1
2157 for replacement so that shares the reg of M.
2158 If the modes differ (only possible in restricted
2159 circumstances, make a SUBREG. */
2160 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2161 reg_map[m1->regno] = m->set_dest;
2162 else
2163 reg_map[m1->regno]
2164 = gen_lowpart_common (GET_MODE (m1->set_dest),
2165 m->set_dest);
2166
2167 /* Get rid of the matching insn
2168 and prevent further processing of it. */
2169 m1->done = 1;
2170
2171 /* if library call, delete all insn except last, which
2172 is deleted below */
2173 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2174 NULL_RTX)))
2175 {
2176 for (temp = XEXP (temp, 0); temp != m1->insn;
2177 temp = NEXT_INSN (temp))
2178 delete_insn (temp);
2179 }
2180 delete_insn (m1->insn);
2181
2182 /* Any other movable that loads the same register
2183 MUST be moved. */
2184 already_moved[m1->regno] = 1;
2185
2186 /* The reg merged here is now invariant,
2187 if the reg it matches is invariant. */
2188 if (! m->partial)
2189 n_times_set[m1->regno] = 0;
2190 }
2191 }
2192 else if (loop_dump_stream)
2193 fprintf (loop_dump_stream, "not desirable");
2194 }
2195 else if (loop_dump_stream && !m->match)
2196 fprintf (loop_dump_stream, "not safe");
2197
2198 if (loop_dump_stream)
2199 fprintf (loop_dump_stream, "\n");
2200 }
2201
2202 if (new_start == 0)
2203 new_start = loop_start;
2204
2205 /* Go through all the instructions in the loop, making
2206 all the register substitutions scheduled in REG_MAP. */
2207 for (p = new_start; p != end; p = NEXT_INSN (p))
2208 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2209 || GET_CODE (p) == CALL_INSN)
2210 {
2211 replace_regs (PATTERN (p), reg_map, nregs, 0);
2212 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2213 INSN_CODE (p) = -1;
2214 }
2215 }
2216 \f
2217 #if 0
2218 /* Scan X and replace the address of any MEM in it with ADDR.
2219 REG is the address that MEM should have before the replacement. */
2220
2221 static void
2222 replace_call_address (x, reg, addr)
2223 rtx x, reg, addr;
2224 {
2225 register enum rtx_code code;
2226 register int i;
2227 register char *fmt;
2228
2229 if (x == 0)
2230 return;
2231 code = GET_CODE (x);
2232 switch (code)
2233 {
2234 case PC:
2235 case CC0:
2236 case CONST_INT:
2237 case CONST_DOUBLE:
2238 case CONST:
2239 case SYMBOL_REF:
2240 case LABEL_REF:
2241 case REG:
2242 return;
2243
2244 case SET:
2245 /* Short cut for very common case. */
2246 replace_call_address (XEXP (x, 1), reg, addr);
2247 return;
2248
2249 case CALL:
2250 /* Short cut for very common case. */
2251 replace_call_address (XEXP (x, 0), reg, addr);
2252 return;
2253
2254 case MEM:
2255 /* If this MEM uses a reg other than the one we expected,
2256 something is wrong. */
2257 if (XEXP (x, 0) != reg)
2258 abort ();
2259 XEXP (x, 0) = addr;
2260 return;
2261
2262 default:
2263 break;
2264 }
2265
2266 fmt = GET_RTX_FORMAT (code);
2267 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2268 {
2269 if (fmt[i] == 'e')
2270 replace_call_address (XEXP (x, i), reg, addr);
2271 if (fmt[i] == 'E')
2272 {
2273 register int j;
2274 for (j = 0; j < XVECLEN (x, i); j++)
2275 replace_call_address (XVECEXP (x, i, j), reg, addr);
2276 }
2277 }
2278 }
2279 #endif
2280 \f
2281 /* Return the number of memory refs to addresses that vary
2282 in the rtx X. */
2283
2284 static int
2285 count_nonfixed_reads (x)
2286 rtx x;
2287 {
2288 register enum rtx_code code;
2289 register int i;
2290 register char *fmt;
2291 int value;
2292
2293 if (x == 0)
2294 return 0;
2295
2296 code = GET_CODE (x);
2297 switch (code)
2298 {
2299 case PC:
2300 case CC0:
2301 case CONST_INT:
2302 case CONST_DOUBLE:
2303 case CONST:
2304 case SYMBOL_REF:
2305 case LABEL_REF:
2306 case REG:
2307 return 0;
2308
2309 case MEM:
2310 return ((invariant_p (XEXP (x, 0)) != 1)
2311 + count_nonfixed_reads (XEXP (x, 0)));
2312
2313 default:
2314 break;
2315 }
2316
2317 value = 0;
2318 fmt = GET_RTX_FORMAT (code);
2319 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2320 {
2321 if (fmt[i] == 'e')
2322 value += count_nonfixed_reads (XEXP (x, i));
2323 if (fmt[i] == 'E')
2324 {
2325 register int j;
2326 for (j = 0; j < XVECLEN (x, i); j++)
2327 value += count_nonfixed_reads (XVECEXP (x, i, j));
2328 }
2329 }
2330 return value;
2331 }
2332
2333 \f
2334 #if 0
2335 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2336 Replace it with an instruction to load just the low bytes
2337 if the machine supports such an instruction,
2338 and insert above LOOP_START an instruction to clear the register. */
2339
2340 static void
2341 constant_high_bytes (p, loop_start)
2342 rtx p, loop_start;
2343 {
2344 register rtx new;
2345 register int insn_code_number;
2346
2347 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2348 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2349
2350 new = gen_rtx_SET (VOIDmode,
2351 gen_rtx_STRICT_LOW_PART (VOIDmode,
2352 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2353 SET_DEST (PATTERN (p)),
2354 0)),
2355 XEXP (SET_SRC (PATTERN (p)), 0));
2356 insn_code_number = recog (new, p);
2357
2358 if (insn_code_number)
2359 {
2360 register int i;
2361
2362 /* Clear destination register before the loop. */
2363 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2364 const0_rtx),
2365 loop_start);
2366
2367 /* Inside the loop, just load the low part. */
2368 PATTERN (p) = new;
2369 }
2370 }
2371 #endif
2372 \f
2373 /* Scan a loop setting the variables `unknown_address_altered',
2374 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2375 and `loop_has_volatile'. Also, fill in the arrays `loop_mems' and
2376 `loop_store_mems'. */
2377
2378 static void
2379 prescan_loop (start, end)
2380 rtx start, end;
2381 {
2382 register int level = 1;
2383 rtx insn;
2384 int loop_has_multiple_exit_targets = 0;
2385 /* The label after END. Jumping here is just like falling off the
2386 end of the loop. We use next_nonnote_insn instead of next_label
2387 as a hedge against the (pathological) case where some actual insn
2388 might end up between the two. */
2389 rtx exit_target = next_nonnote_insn (end);
2390 if (exit_target == NULL_RTX || GET_CODE (exit_target) != CODE_LABEL)
2391 loop_has_multiple_exit_targets = 1;
2392
2393 unknown_address_altered = 0;
2394 loop_has_call = 0;
2395 loop_has_volatile = 0;
2396 loop_store_mems_idx = 0;
2397 loop_mems_idx = 0;
2398
2399 num_mem_sets = 0;
2400 loops_enclosed = 1;
2401 loop_continue = 0;
2402
2403 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2404 insn = NEXT_INSN (insn))
2405 {
2406 if (GET_CODE (insn) == NOTE)
2407 {
2408 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2409 {
2410 ++level;
2411 /* Count number of loops contained in this one. */
2412 loops_enclosed++;
2413 }
2414 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2415 {
2416 --level;
2417 if (level == 0)
2418 {
2419 end = insn;
2420 break;
2421 }
2422 }
2423 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2424 {
2425 if (level == 1)
2426 loop_continue = insn;
2427 }
2428 }
2429 else if (GET_CODE (insn) == CALL_INSN)
2430 {
2431 if (! CONST_CALL_P (insn))
2432 unknown_address_altered = 1;
2433 loop_has_call = 1;
2434 }
2435 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2436 {
2437 rtx label1 = NULL_RTX;
2438 rtx label2 = NULL_RTX;
2439
2440 if (volatile_refs_p (PATTERN (insn)))
2441 loop_has_volatile = 1;
2442
2443 note_stores (PATTERN (insn), note_addr_stored);
2444
2445 if (!loop_has_multiple_exit_targets
2446 && GET_CODE (insn) == JUMP_INSN
2447 && GET_CODE (PATTERN (insn)) == SET
2448 && SET_DEST (PATTERN (insn)) == pc_rtx)
2449 {
2450 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2451 {
2452 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2453 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2454 }
2455 else
2456 {
2457 label1 = SET_SRC (PATTERN (insn));
2458 }
2459
2460 do {
2461 if (label1 && label1 != pc_rtx)
2462 {
2463 if (GET_CODE (label1) != LABEL_REF)
2464 {
2465 /* Something tricky. */
2466 loop_has_multiple_exit_targets = 1;
2467 break;
2468 }
2469 else if (XEXP (label1, 0) != exit_target
2470 && LABEL_OUTSIDE_LOOP_P (label1))
2471 {
2472 /* A jump outside the current loop. */
2473 loop_has_multiple_exit_targets = 1;
2474 break;
2475 }
2476 }
2477
2478 label1 = label2;
2479 label2 = NULL_RTX;
2480 } while (label1);
2481 }
2482 }
2483 else if (GET_CODE (insn) == RETURN)
2484 loop_has_multiple_exit_targets = 1;
2485 }
2486
2487 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2488 if (/* We can't tell what MEMs are aliased by what. */
2489 !unknown_address_altered
2490 /* An exception thrown by a called function might land us
2491 anywhere. */
2492 && !loop_has_call
2493 /* We don't want loads for MEMs moved to a location before the
2494 one at which their stack memory becomes allocated. (Note
2495 that this is not a problem for malloc, etc., since those
2496 require actual function calls. */
2497 && !current_function_calls_alloca
2498 /* There are ways to leave the loop other than falling off the
2499 end. */
2500 && !loop_has_multiple_exit_targets)
2501 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2502 insn = NEXT_INSN (insn))
2503 for_each_rtx (&insn, insert_loop_mem, 0);
2504 }
2505 \f
2506 /* Scan the function looking for loops. Record the start and end of each loop.
2507 Also mark as invalid loops any loops that contain a setjmp or are branched
2508 to from outside the loop. */
2509
2510 static void
2511 find_and_verify_loops (f)
2512 rtx f;
2513 {
2514 rtx insn, label;
2515 int current_loop = -1;
2516 int next_loop = -1;
2517 int loop;
2518
2519 /* If there are jumps to undefined labels,
2520 treat them as jumps out of any/all loops.
2521 This also avoids writing past end of tables when there are no loops. */
2522 uid_loop_num[0] = -1;
2523
2524 /* Find boundaries of loops, mark which loops are contained within
2525 loops, and invalidate loops that have setjmp. */
2526
2527 for (insn = f; insn; insn = NEXT_INSN (insn))
2528 {
2529 if (GET_CODE (insn) == NOTE)
2530 switch (NOTE_LINE_NUMBER (insn))
2531 {
2532 case NOTE_INSN_LOOP_BEG:
2533 loop_number_loop_starts[++next_loop] = insn;
2534 loop_number_loop_ends[next_loop] = 0;
2535 loop_outer_loop[next_loop] = current_loop;
2536 loop_invalid[next_loop] = 0;
2537 loop_number_exit_labels[next_loop] = 0;
2538 loop_number_exit_count[next_loop] = 0;
2539 current_loop = next_loop;
2540 break;
2541
2542 case NOTE_INSN_SETJMP:
2543 /* In this case, we must invalidate our current loop and any
2544 enclosing loop. */
2545 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2546 {
2547 loop_invalid[loop] = 1;
2548 if (loop_dump_stream)
2549 fprintf (loop_dump_stream,
2550 "\nLoop at %d ignored due to setjmp.\n",
2551 INSN_UID (loop_number_loop_starts[loop]));
2552 }
2553 break;
2554
2555 case NOTE_INSN_LOOP_END:
2556 if (current_loop == -1)
2557 abort ();
2558
2559 loop_number_loop_ends[current_loop] = insn;
2560 current_loop = loop_outer_loop[current_loop];
2561 break;
2562
2563 default:
2564 break;
2565 }
2566
2567 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2568 enclosing loop, but this doesn't matter. */
2569 uid_loop_num[INSN_UID (insn)] = current_loop;
2570 }
2571
2572 /* Any loop containing a label used in an initializer must be invalidated,
2573 because it can be jumped into from anywhere. */
2574
2575 for (label = forced_labels; label; label = XEXP (label, 1))
2576 {
2577 int loop_num;
2578
2579 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2580 loop_num != -1;
2581 loop_num = loop_outer_loop[loop_num])
2582 loop_invalid[loop_num] = 1;
2583 }
2584
2585 /* Any loop containing a label used for an exception handler must be
2586 invalidated, because it can be jumped into from anywhere. */
2587
2588 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2589 {
2590 int loop_num;
2591
2592 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2593 loop_num != -1;
2594 loop_num = loop_outer_loop[loop_num])
2595 loop_invalid[loop_num] = 1;
2596 }
2597
2598 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2599 loop that it is not contained within, that loop is marked invalid.
2600 If any INSN or CALL_INSN uses a label's address, then the loop containing
2601 that label is marked invalid, because it could be jumped into from
2602 anywhere.
2603
2604 Also look for blocks of code ending in an unconditional branch that
2605 exits the loop. If such a block is surrounded by a conditional
2606 branch around the block, move the block elsewhere (see below) and
2607 invert the jump to point to the code block. This may eliminate a
2608 label in our loop and will simplify processing by both us and a
2609 possible second cse pass. */
2610
2611 for (insn = f; insn; insn = NEXT_INSN (insn))
2612 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2613 {
2614 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2615
2616 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2617 {
2618 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2619 if (note)
2620 {
2621 int loop_num;
2622
2623 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2624 loop_num != -1;
2625 loop_num = loop_outer_loop[loop_num])
2626 loop_invalid[loop_num] = 1;
2627 }
2628 }
2629
2630 if (GET_CODE (insn) != JUMP_INSN)
2631 continue;
2632
2633 mark_loop_jump (PATTERN (insn), this_loop_num);
2634
2635 /* See if this is an unconditional branch outside the loop. */
2636 if (this_loop_num != -1
2637 && (GET_CODE (PATTERN (insn)) == RETURN
2638 || (simplejump_p (insn)
2639 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2640 != this_loop_num)))
2641 && get_max_uid () < max_uid_for_loop)
2642 {
2643 rtx p;
2644 rtx our_next = next_real_insn (insn);
2645 int dest_loop;
2646 int outer_loop = -1;
2647
2648 /* Go backwards until we reach the start of the loop, a label,
2649 or a JUMP_INSN. */
2650 for (p = PREV_INSN (insn);
2651 GET_CODE (p) != CODE_LABEL
2652 && ! (GET_CODE (p) == NOTE
2653 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2654 && GET_CODE (p) != JUMP_INSN;
2655 p = PREV_INSN (p))
2656 ;
2657
2658 /* Check for the case where we have a jump to an inner nested
2659 loop, and do not perform the optimization in that case. */
2660
2661 if (JUMP_LABEL (insn))
2662 {
2663 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2664 if (dest_loop != -1)
2665 {
2666 for (outer_loop = dest_loop; outer_loop != -1;
2667 outer_loop = loop_outer_loop[outer_loop])
2668 if (outer_loop == this_loop_num)
2669 break;
2670 }
2671 }
2672
2673 /* Make sure that the target of P is within the current loop. */
2674
2675 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2676 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2677 outer_loop = this_loop_num;
2678
2679 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2680 we have a block of code to try to move.
2681
2682 We look backward and then forward from the target of INSN
2683 to find a BARRIER at the same loop depth as the target.
2684 If we find such a BARRIER, we make a new label for the start
2685 of the block, invert the jump in P and point it to that label,
2686 and move the block of code to the spot we found. */
2687
2688 if (outer_loop == -1
2689 && GET_CODE (p) == JUMP_INSN
2690 && JUMP_LABEL (p) != 0
2691 /* Just ignore jumps to labels that were never emitted.
2692 These always indicate compilation errors. */
2693 && INSN_UID (JUMP_LABEL (p)) != 0
2694 && condjump_p (p)
2695 && ! simplejump_p (p)
2696 && next_real_insn (JUMP_LABEL (p)) == our_next)
2697 {
2698 rtx target
2699 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2700 int target_loop_num = uid_loop_num[INSN_UID (target)];
2701 rtx loc;
2702
2703 for (loc = target; loc; loc = PREV_INSN (loc))
2704 if (GET_CODE (loc) == BARRIER
2705 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2706 break;
2707
2708 if (loc == 0)
2709 for (loc = target; loc; loc = NEXT_INSN (loc))
2710 if (GET_CODE (loc) == BARRIER
2711 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2712 break;
2713
2714 if (loc)
2715 {
2716 rtx cond_label = JUMP_LABEL (p);
2717 rtx new_label = get_label_after (p);
2718
2719 /* Ensure our label doesn't go away. */
2720 LABEL_NUSES (cond_label)++;
2721
2722 /* Verify that uid_loop_num is large enough and that
2723 we can invert P. */
2724 if (invert_jump (p, new_label))
2725 {
2726 rtx q, r;
2727
2728 /* If no suitable BARRIER was found, create a suitable
2729 one before TARGET. Since TARGET is a fall through
2730 path, we'll need to insert an jump around our block
2731 and a add a BARRIER before TARGET.
2732
2733 This creates an extra unconditional jump outside
2734 the loop. However, the benefits of removing rarely
2735 executed instructions from inside the loop usually
2736 outweighs the cost of the extra unconditional jump
2737 outside the loop. */
2738 if (loc == 0)
2739 {
2740 rtx temp;
2741
2742 temp = gen_jump (JUMP_LABEL (insn));
2743 temp = emit_jump_insn_before (temp, target);
2744 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2745 LABEL_NUSES (JUMP_LABEL (insn))++;
2746 loc = emit_barrier_before (target);
2747 }
2748
2749 /* Include the BARRIER after INSN and copy the
2750 block after LOC. */
2751 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2752 reorder_insns (new_label, NEXT_INSN (insn), loc);
2753
2754 /* All those insns are now in TARGET_LOOP_NUM. */
2755 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2756 q = NEXT_INSN (q))
2757 uid_loop_num[INSN_UID (q)] = target_loop_num;
2758
2759 /* The label jumped to by INSN is no longer a loop exit.
2760 Unless INSN does not have a label (e.g., it is a
2761 RETURN insn), search loop_number_exit_labels to find
2762 its label_ref, and remove it. Also turn off
2763 LABEL_OUTSIDE_LOOP_P bit. */
2764 if (JUMP_LABEL (insn))
2765 {
2766 int loop_num;
2767
2768 for (q = 0,
2769 r = loop_number_exit_labels[this_loop_num];
2770 r; q = r, r = LABEL_NEXTREF (r))
2771 if (XEXP (r, 0) == JUMP_LABEL (insn))
2772 {
2773 LABEL_OUTSIDE_LOOP_P (r) = 0;
2774 if (q)
2775 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2776 else
2777 loop_number_exit_labels[this_loop_num]
2778 = LABEL_NEXTREF (r);
2779 break;
2780 }
2781
2782 for (loop_num = this_loop_num;
2783 loop_num != -1 && loop_num != target_loop_num;
2784 loop_num = loop_outer_loop[loop_num])
2785 loop_number_exit_count[loop_num]--;
2786
2787 /* If we didn't find it, then something is wrong. */
2788 if (! r)
2789 abort ();
2790 }
2791
2792 /* P is now a jump outside the loop, so it must be put
2793 in loop_number_exit_labels, and marked as such.
2794 The easiest way to do this is to just call
2795 mark_loop_jump again for P. */
2796 mark_loop_jump (PATTERN (p), this_loop_num);
2797
2798 /* If INSN now jumps to the insn after it,
2799 delete INSN. */
2800 if (JUMP_LABEL (insn) != 0
2801 && (next_real_insn (JUMP_LABEL (insn))
2802 == next_real_insn (insn)))
2803 delete_insn (insn);
2804 }
2805
2806 /* Continue the loop after where the conditional
2807 branch used to jump, since the only branch insn
2808 in the block (if it still remains) is an inter-loop
2809 branch and hence needs no processing. */
2810 insn = NEXT_INSN (cond_label);
2811
2812 if (--LABEL_NUSES (cond_label) == 0)
2813 delete_insn (cond_label);
2814
2815 /* This loop will be continued with NEXT_INSN (insn). */
2816 insn = PREV_INSN (insn);
2817 }
2818 }
2819 }
2820 }
2821 }
2822
2823 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2824 loops it is contained in, mark the target loop invalid.
2825
2826 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2827
2828 static void
2829 mark_loop_jump (x, loop_num)
2830 rtx x;
2831 int loop_num;
2832 {
2833 int dest_loop;
2834 int outer_loop;
2835 int i;
2836
2837 switch (GET_CODE (x))
2838 {
2839 case PC:
2840 case USE:
2841 case CLOBBER:
2842 case REG:
2843 case MEM:
2844 case CONST_INT:
2845 case CONST_DOUBLE:
2846 case RETURN:
2847 return;
2848
2849 case CONST:
2850 /* There could be a label reference in here. */
2851 mark_loop_jump (XEXP (x, 0), loop_num);
2852 return;
2853
2854 case PLUS:
2855 case MINUS:
2856 case MULT:
2857 mark_loop_jump (XEXP (x, 0), loop_num);
2858 mark_loop_jump (XEXP (x, 1), loop_num);
2859 return;
2860
2861 case SIGN_EXTEND:
2862 case ZERO_EXTEND:
2863 mark_loop_jump (XEXP (x, 0), loop_num);
2864 return;
2865
2866 case LABEL_REF:
2867 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2868
2869 /* Link together all labels that branch outside the loop. This
2870 is used by final_[bg]iv_value and the loop unrolling code. Also
2871 mark this LABEL_REF so we know that this branch should predict
2872 false. */
2873
2874 /* A check to make sure the label is not in an inner nested loop,
2875 since this does not count as a loop exit. */
2876 if (dest_loop != -1)
2877 {
2878 for (outer_loop = dest_loop; outer_loop != -1;
2879 outer_loop = loop_outer_loop[outer_loop])
2880 if (outer_loop == loop_num)
2881 break;
2882 }
2883 else
2884 outer_loop = -1;
2885
2886 if (loop_num != -1 && outer_loop == -1)
2887 {
2888 LABEL_OUTSIDE_LOOP_P (x) = 1;
2889 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2890 loop_number_exit_labels[loop_num] = x;
2891
2892 for (outer_loop = loop_num;
2893 outer_loop != -1 && outer_loop != dest_loop;
2894 outer_loop = loop_outer_loop[outer_loop])
2895 loop_number_exit_count[outer_loop]++;
2896 }
2897
2898 /* If this is inside a loop, but not in the current loop or one enclosed
2899 by it, it invalidates at least one loop. */
2900
2901 if (dest_loop == -1)
2902 return;
2903
2904 /* We must invalidate every nested loop containing the target of this
2905 label, except those that also contain the jump insn. */
2906
2907 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2908 {
2909 /* Stop when we reach a loop that also contains the jump insn. */
2910 for (outer_loop = loop_num; outer_loop != -1;
2911 outer_loop = loop_outer_loop[outer_loop])
2912 if (dest_loop == outer_loop)
2913 return;
2914
2915 /* If we get here, we know we need to invalidate a loop. */
2916 if (loop_dump_stream && ! loop_invalid[dest_loop])
2917 fprintf (loop_dump_stream,
2918 "\nLoop at %d ignored due to multiple entry points.\n",
2919 INSN_UID (loop_number_loop_starts[dest_loop]));
2920
2921 loop_invalid[dest_loop] = 1;
2922 }
2923 return;
2924
2925 case SET:
2926 /* If this is not setting pc, ignore. */
2927 if (SET_DEST (x) == pc_rtx)
2928 mark_loop_jump (SET_SRC (x), loop_num);
2929 return;
2930
2931 case IF_THEN_ELSE:
2932 mark_loop_jump (XEXP (x, 1), loop_num);
2933 mark_loop_jump (XEXP (x, 2), loop_num);
2934 return;
2935
2936 case PARALLEL:
2937 case ADDR_VEC:
2938 for (i = 0; i < XVECLEN (x, 0); i++)
2939 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2940 return;
2941
2942 case ADDR_DIFF_VEC:
2943 for (i = 0; i < XVECLEN (x, 1); i++)
2944 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2945 return;
2946
2947 default:
2948 /* Treat anything else (such as a symbol_ref)
2949 as a branch out of this loop, but not into any loop. */
2950
2951 if (loop_num != -1)
2952 {
2953 #ifdef HAIFA
2954 LABEL_OUTSIDE_LOOP_P (x) = 1;
2955 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2956 #endif /* HAIFA */
2957
2958 loop_number_exit_labels[loop_num] = x;
2959
2960 for (outer_loop = loop_num; outer_loop != -1;
2961 outer_loop = loop_outer_loop[outer_loop])
2962 loop_number_exit_count[outer_loop]++;
2963 }
2964 return;
2965 }
2966 }
2967 \f
2968 /* Return nonzero if there is a label in the range from
2969 insn INSN to and including the insn whose luid is END
2970 INSN must have an assigned luid (i.e., it must not have
2971 been previously created by loop.c). */
2972
2973 static int
2974 labels_in_range_p (insn, end)
2975 rtx insn;
2976 int end;
2977 {
2978 while (insn && INSN_LUID (insn) <= end)
2979 {
2980 if (GET_CODE (insn) == CODE_LABEL)
2981 return 1;
2982 insn = NEXT_INSN (insn);
2983 }
2984
2985 return 0;
2986 }
2987
2988 /* Record that a memory reference X is being set. */
2989
2990 static void
2991 note_addr_stored (x, y)
2992 rtx x;
2993 rtx y ATTRIBUTE_UNUSED;
2994 {
2995 register int i;
2996
2997 if (x == 0 || GET_CODE (x) != MEM)
2998 return;
2999
3000 /* Count number of memory writes.
3001 This affects heuristics in strength_reduce. */
3002 num_mem_sets++;
3003
3004 /* BLKmode MEM means all memory is clobbered. */
3005 if (GET_MODE (x) == BLKmode)
3006 unknown_address_altered = 1;
3007
3008 if (unknown_address_altered)
3009 return;
3010
3011 for (i = 0; i < loop_store_mems_idx; i++)
3012 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
3013 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
3014 {
3015 /* We are storing at the same address as previously noted. Save the
3016 wider reference. */
3017 if (GET_MODE_SIZE (GET_MODE (x))
3018 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
3019 loop_store_mems[i] = x;
3020 break;
3021 }
3022
3023 if (i == NUM_STORES)
3024 unknown_address_altered = 1;
3025
3026 else if (i == loop_store_mems_idx)
3027 loop_store_mems[loop_store_mems_idx++] = x;
3028 }
3029 \f
3030 /* Return nonzero if the rtx X is invariant over the current loop.
3031
3032 The value is 2 if we refer to something only conditionally invariant.
3033
3034 If `unknown_address_altered' is nonzero, no memory ref is invariant.
3035 Otherwise, a memory ref is invariant if it does not conflict with
3036 anything stored in `loop_store_mems'. */
3037
3038 int
3039 invariant_p (x)
3040 register rtx x;
3041 {
3042 register int i;
3043 register enum rtx_code code;
3044 register char *fmt;
3045 int conditional = 0;
3046
3047 if (x == 0)
3048 return 1;
3049 code = GET_CODE (x);
3050 switch (code)
3051 {
3052 case CONST_INT:
3053 case CONST_DOUBLE:
3054 case SYMBOL_REF:
3055 case CONST:
3056 return 1;
3057
3058 case LABEL_REF:
3059 /* A LABEL_REF is normally invariant, however, if we are unrolling
3060 loops, and this label is inside the loop, then it isn't invariant.
3061 This is because each unrolled copy of the loop body will have
3062 a copy of this label. If this was invariant, then an insn loading
3063 the address of this label into a register might get moved outside
3064 the loop, and then each loop body would end up using the same label.
3065
3066 We don't know the loop bounds here though, so just fail for all
3067 labels. */
3068 if (flag_unroll_loops)
3069 return 0;
3070 else
3071 return 1;
3072
3073 case PC:
3074 case CC0:
3075 case UNSPEC_VOLATILE:
3076 return 0;
3077
3078 case REG:
3079 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3080 since the reg might be set by initialization within the loop. */
3081
3082 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3083 || x == arg_pointer_rtx)
3084 && ! current_function_has_nonlocal_goto)
3085 return 1;
3086
3087 if (loop_has_call
3088 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3089 return 0;
3090
3091 if (n_times_set[REGNO (x)] < 0)
3092 return 2;
3093
3094 return n_times_set[REGNO (x)] == 0;
3095
3096 case MEM:
3097 /* Volatile memory references must be rejected. Do this before
3098 checking for read-only items, so that volatile read-only items
3099 will be rejected also. */
3100 if (MEM_VOLATILE_P (x))
3101 return 0;
3102
3103 /* Read-only items (such as constants in a constant pool) are
3104 invariant if their address is. */
3105 if (RTX_UNCHANGING_P (x))
3106 break;
3107
3108 /* If we filled the table (or had a subroutine call), any location
3109 in memory could have been clobbered. */
3110 if (unknown_address_altered)
3111 return 0;
3112
3113 /* See if there is any dependence between a store and this load. */
3114 for (i = loop_store_mems_idx - 1; i >= 0; i--)
3115 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
3116 return 0;
3117
3118 /* It's not invalidated by a store in memory
3119 but we must still verify the address is invariant. */
3120 break;
3121
3122 case ASM_OPERANDS:
3123 /* Don't mess with insns declared volatile. */
3124 if (MEM_VOLATILE_P (x))
3125 return 0;
3126 break;
3127
3128 default:
3129 break;
3130 }
3131
3132 fmt = GET_RTX_FORMAT (code);
3133 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3134 {
3135 if (fmt[i] == 'e')
3136 {
3137 int tem = invariant_p (XEXP (x, i));
3138 if (tem == 0)
3139 return 0;
3140 if (tem == 2)
3141 conditional = 1;
3142 }
3143 else if (fmt[i] == 'E')
3144 {
3145 register int j;
3146 for (j = 0; j < XVECLEN (x, i); j++)
3147 {
3148 int tem = invariant_p (XVECEXP (x, i, j));
3149 if (tem == 0)
3150 return 0;
3151 if (tem == 2)
3152 conditional = 1;
3153 }
3154
3155 }
3156 }
3157
3158 return 1 + conditional;
3159 }
3160
3161 \f
3162 /* Return nonzero if all the insns in the loop that set REG
3163 are INSN and the immediately following insns,
3164 and if each of those insns sets REG in an invariant way
3165 (not counting uses of REG in them).
3166
3167 The value is 2 if some of these insns are only conditionally invariant.
3168
3169 We assume that INSN itself is the first set of REG
3170 and that its source is invariant. */
3171
3172 static int
3173 consec_sets_invariant_p (reg, n_sets, insn)
3174 int n_sets;
3175 rtx reg, insn;
3176 {
3177 register rtx p = insn;
3178 register int regno = REGNO (reg);
3179 rtx temp;
3180 /* Number of sets we have to insist on finding after INSN. */
3181 int count = n_sets - 1;
3182 int old = n_times_set[regno];
3183 int value = 0;
3184 int this;
3185
3186 /* If N_SETS hit the limit, we can't rely on its value. */
3187 if (n_sets == 127)
3188 return 0;
3189
3190 n_times_set[regno] = 0;
3191
3192 while (count > 0)
3193 {
3194 register enum rtx_code code;
3195 rtx set;
3196
3197 p = NEXT_INSN (p);
3198 code = GET_CODE (p);
3199
3200 /* If library call, skip to end of it. */
3201 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3202 p = XEXP (temp, 0);
3203
3204 this = 0;
3205 if (code == INSN
3206 && (set = single_set (p))
3207 && GET_CODE (SET_DEST (set)) == REG
3208 && REGNO (SET_DEST (set)) == regno)
3209 {
3210 this = invariant_p (SET_SRC (set));
3211 if (this != 0)
3212 value |= this;
3213 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3214 {
3215 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3216 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3217 notes are OK. */
3218 this = (CONSTANT_P (XEXP (temp, 0))
3219 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3220 && invariant_p (XEXP (temp, 0))));
3221 if (this != 0)
3222 value |= this;
3223 }
3224 }
3225 if (this != 0)
3226 count--;
3227 else if (code != NOTE)
3228 {
3229 n_times_set[regno] = old;
3230 return 0;
3231 }
3232 }
3233
3234 n_times_set[regno] = old;
3235 /* If invariant_p ever returned 2, we return 2. */
3236 return 1 + (value & 2);
3237 }
3238
3239 #if 0
3240 /* I don't think this condition is sufficient to allow INSN
3241 to be moved, so we no longer test it. */
3242
3243 /* Return 1 if all insns in the basic block of INSN and following INSN
3244 that set REG are invariant according to TABLE. */
3245
3246 static int
3247 all_sets_invariant_p (reg, insn, table)
3248 rtx reg, insn;
3249 short *table;
3250 {
3251 register rtx p = insn;
3252 register int regno = REGNO (reg);
3253
3254 while (1)
3255 {
3256 register enum rtx_code code;
3257 p = NEXT_INSN (p);
3258 code = GET_CODE (p);
3259 if (code == CODE_LABEL || code == JUMP_INSN)
3260 return 1;
3261 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3262 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3263 && REGNO (SET_DEST (PATTERN (p))) == regno)
3264 {
3265 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3266 return 0;
3267 }
3268 }
3269 }
3270 #endif /* 0 */
3271 \f
3272 /* Look at all uses (not sets) of registers in X. For each, if it is
3273 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3274 a different insn, set USAGE[REGNO] to const0_rtx. */
3275
3276 static void
3277 find_single_use_in_loop (insn, x, usage)
3278 rtx insn;
3279 rtx x;
3280 rtx *usage;
3281 {
3282 enum rtx_code code = GET_CODE (x);
3283 char *fmt = GET_RTX_FORMAT (code);
3284 int i, j;
3285
3286 if (code == REG)
3287 usage[REGNO (x)]
3288 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3289 ? const0_rtx : insn;
3290
3291 else if (code == SET)
3292 {
3293 /* Don't count SET_DEST if it is a REG; otherwise count things
3294 in SET_DEST because if a register is partially modified, it won't
3295 show up as a potential movable so we don't care how USAGE is set
3296 for it. */
3297 if (GET_CODE (SET_DEST (x)) != REG)
3298 find_single_use_in_loop (insn, SET_DEST (x), usage);
3299 find_single_use_in_loop (insn, SET_SRC (x), usage);
3300 }
3301 else
3302 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3303 {
3304 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3305 find_single_use_in_loop (insn, XEXP (x, i), usage);
3306 else if (fmt[i] == 'E')
3307 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3308 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3309 }
3310 }
3311 \f
3312 /* Increment N_TIMES_SET at the index of each register
3313 that is modified by an insn between FROM and TO.
3314 If the value of an element of N_TIMES_SET becomes 127 or more,
3315 stop incrementing it, to avoid overflow.
3316
3317 Store in SINGLE_USAGE[I] the single insn in which register I is
3318 used, if it is only used once. Otherwise, it is set to 0 (for no
3319 uses) or const0_rtx for more than one use. This parameter may be zero,
3320 in which case this processing is not done.
3321
3322 Store in *COUNT_PTR the number of actual instruction
3323 in the loop. We use this to decide what is worth moving out. */
3324
3325 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3326 In that case, it is the insn that last set reg n. */
3327
3328 static void
3329 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3330 register rtx from, to;
3331 char *may_not_move;
3332 rtx *single_usage;
3333 int *count_ptr;
3334 int nregs;
3335 {
3336 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3337 register rtx insn;
3338 register int count = 0;
3339 register rtx dest;
3340
3341 bzero ((char *) last_set, nregs * sizeof (rtx));
3342 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3343 {
3344 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3345 {
3346 ++count;
3347
3348 /* If requested, record registers that have exactly one use. */
3349 if (single_usage)
3350 {
3351 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3352
3353 /* Include uses in REG_EQUAL notes. */
3354 if (REG_NOTES (insn))
3355 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3356 }
3357
3358 if (GET_CODE (PATTERN (insn)) == CLOBBER
3359 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3360 /* Don't move a reg that has an explicit clobber.
3361 We might do so sometimes, but it's not worth the pain. */
3362 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3363
3364 if (GET_CODE (PATTERN (insn)) == SET
3365 || GET_CODE (PATTERN (insn)) == CLOBBER)
3366 {
3367 dest = SET_DEST (PATTERN (insn));
3368 while (GET_CODE (dest) == SUBREG
3369 || GET_CODE (dest) == ZERO_EXTRACT
3370 || GET_CODE (dest) == SIGN_EXTRACT
3371 || GET_CODE (dest) == STRICT_LOW_PART)
3372 dest = XEXP (dest, 0);
3373 if (GET_CODE (dest) == REG)
3374 {
3375 register int regno = REGNO (dest);
3376 /* If this is the first setting of this reg
3377 in current basic block, and it was set before,
3378 it must be set in two basic blocks, so it cannot
3379 be moved out of the loop. */
3380 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3381 may_not_move[regno] = 1;
3382 /* If this is not first setting in current basic block,
3383 see if reg was used in between previous one and this.
3384 If so, neither one can be moved. */
3385 if (last_set[regno] != 0
3386 && reg_used_between_p (dest, last_set[regno], insn))
3387 may_not_move[regno] = 1;
3388 if (n_times_set[regno] < 127)
3389 ++n_times_set[regno];
3390 last_set[regno] = insn;
3391 }
3392 }
3393 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3394 {
3395 register int i;
3396 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3397 {
3398 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3399 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3400 /* Don't move a reg that has an explicit clobber.
3401 It's not worth the pain to try to do it correctly. */
3402 may_not_move[REGNO (XEXP (x, 0))] = 1;
3403
3404 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3405 {
3406 dest = SET_DEST (x);
3407 while (GET_CODE (dest) == SUBREG
3408 || GET_CODE (dest) == ZERO_EXTRACT
3409 || GET_CODE (dest) == SIGN_EXTRACT
3410 || GET_CODE (dest) == STRICT_LOW_PART)
3411 dest = XEXP (dest, 0);
3412 if (GET_CODE (dest) == REG)
3413 {
3414 register int regno = REGNO (dest);
3415 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3416 may_not_move[regno] = 1;
3417 if (last_set[regno] != 0
3418 && reg_used_between_p (dest, last_set[regno], insn))
3419 may_not_move[regno] = 1;
3420 if (n_times_set[regno] < 127)
3421 ++n_times_set[regno];
3422 last_set[regno] = insn;
3423 }
3424 }
3425 }
3426 }
3427 }
3428
3429 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3430 bzero ((char *) last_set, nregs * sizeof (rtx));
3431 }
3432 *count_ptr = count;
3433 }
3434 \f
3435 /* Given a loop that is bounded by LOOP_START and LOOP_END
3436 and that is entered at SCAN_START,
3437 return 1 if the register set in SET contained in insn INSN is used by
3438 any insn that precedes INSN in cyclic order starting
3439 from the loop entry point.
3440
3441 We don't want to use INSN_LUID here because if we restrict INSN to those
3442 that have a valid INSN_LUID, it means we cannot move an invariant out
3443 from an inner loop past two loops. */
3444
3445 static int
3446 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3447 rtx set, insn, loop_start, scan_start, loop_end;
3448 {
3449 rtx reg = SET_DEST (set);
3450 rtx p;
3451
3452 /* Scan forward checking for register usage. If we hit INSN, we
3453 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3454 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3455 {
3456 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3457 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3458 return 1;
3459
3460 if (p == loop_end)
3461 p = loop_start;
3462 }
3463
3464 return 0;
3465 }
3466 \f
3467 /* A "basic induction variable" or biv is a pseudo reg that is set
3468 (within this loop) only by incrementing or decrementing it. */
3469 /* A "general induction variable" or giv is a pseudo reg whose
3470 value is a linear function of a biv. */
3471
3472 /* Bivs are recognized by `basic_induction_var';
3473 Givs by `general_induction_var'. */
3474
3475 /* Indexed by register number, indicates whether or not register is an
3476 induction variable, and if so what type. */
3477
3478 enum iv_mode *reg_iv_type;
3479
3480 /* Indexed by register number, contains pointer to `struct induction'
3481 if register is an induction variable. This holds general info for
3482 all induction variables. */
3483
3484 struct induction **reg_iv_info;
3485
3486 /* Indexed by register number, contains pointer to `struct iv_class'
3487 if register is a basic induction variable. This holds info describing
3488 the class (a related group) of induction variables that the biv belongs
3489 to. */
3490
3491 struct iv_class **reg_biv_class;
3492
3493 /* The head of a list which links together (via the next field)
3494 every iv class for the current loop. */
3495
3496 struct iv_class *loop_iv_list;
3497
3498 /* Communication with routines called via `note_stores'. */
3499
3500 static rtx note_insn;
3501
3502 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3503
3504 static rtx addr_placeholder;
3505
3506 /* ??? Unfinished optimizations, and possible future optimizations,
3507 for the strength reduction code. */
3508
3509 /* ??? The interaction of biv elimination, and recognition of 'constant'
3510 bivs, may cause problems. */
3511
3512 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3513 performance problems.
3514
3515 Perhaps don't eliminate things that can be combined with an addressing
3516 mode. Find all givs that have the same biv, mult_val, and add_val;
3517 then for each giv, check to see if its only use dies in a following
3518 memory address. If so, generate a new memory address and check to see
3519 if it is valid. If it is valid, then store the modified memory address,
3520 otherwise, mark the giv as not done so that it will get its own iv. */
3521
3522 /* ??? Could try to optimize branches when it is known that a biv is always
3523 positive. */
3524
3525 /* ??? When replace a biv in a compare insn, we should replace with closest
3526 giv so that an optimized branch can still be recognized by the combiner,
3527 e.g. the VAX acb insn. */
3528
3529 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3530 was rerun in loop_optimize whenever a register was added or moved.
3531 Also, some of the optimizations could be a little less conservative. */
3532 \f
3533 /* Perform strength reduction and induction variable elimination.
3534
3535 Pseudo registers created during this function will be beyond the last
3536 valid index in several tables including n_times_set and regno_last_uid.
3537 This does not cause a problem here, because the added registers cannot be
3538 givs outside of their loop, and hence will never be reconsidered.
3539 But scan_loop must check regnos to make sure they are in bounds.
3540
3541 SCAN_START is the first instruction in the loop, as the loop would
3542 actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
3543 the first instruction in the loop, as it is layed out in the
3544 instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG. */
3545
3546 static void
3547 strength_reduce (scan_start, end, loop_top, insn_count,
3548 loop_start, loop_end, unroll_p)
3549 rtx scan_start;
3550 rtx end;
3551 rtx loop_top;
3552 int insn_count;
3553 rtx loop_start;
3554 rtx loop_end;
3555 int unroll_p;
3556 {
3557 rtx p;
3558 rtx set;
3559 rtx inc_val;
3560 rtx mult_val;
3561 rtx dest_reg;
3562 /* This is 1 if current insn is not executed at least once for every loop
3563 iteration. */
3564 int not_every_iteration = 0;
3565 /* This is 1 if current insn may be executed more than once for every
3566 loop iteration. */
3567 int maybe_multiple = 0;
3568 /* Temporary list pointers for traversing loop_iv_list. */
3569 struct iv_class *bl, **backbl;
3570 /* Ratio of extra register life span we can justify
3571 for saving an instruction. More if loop doesn't call subroutines
3572 since in that case saving an insn makes more difference
3573 and more registers are available. */
3574 /* ??? could set this to last value of threshold in move_movables */
3575 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3576 /* Map of pseudo-register replacements. */
3577 rtx *reg_map;
3578 int call_seen;
3579 rtx test;
3580 rtx end_insert_before;
3581 int loop_depth = 0;
3582
3583 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3584 * sizeof (enum iv_mode *));
3585 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3586 reg_iv_info = (struct induction **)
3587 alloca (max_reg_before_loop * sizeof (struct induction *));
3588 bzero ((char *) reg_iv_info, (max_reg_before_loop
3589 * sizeof (struct induction *)));
3590 reg_biv_class = (struct iv_class **)
3591 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3592 bzero ((char *) reg_biv_class, (max_reg_before_loop
3593 * sizeof (struct iv_class *)));
3594
3595 loop_iv_list = 0;
3596 addr_placeholder = gen_reg_rtx (Pmode);
3597
3598 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3599 must be put before this insn, so that they will appear in the right
3600 order (i.e. loop order).
3601
3602 If loop_end is the end of the current function, then emit a
3603 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3604 dummy note insn. */
3605 if (NEXT_INSN (loop_end) != 0)
3606 end_insert_before = NEXT_INSN (loop_end);
3607 else
3608 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3609
3610 /* Scan through loop to find all possible bivs. */
3611
3612 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
3613 p != NULL_RTX;
3614 p = next_insn_in_loop (p, scan_start, end, loop_top))
3615 {
3616 if (GET_CODE (p) == INSN
3617 && (set = single_set (p))
3618 && GET_CODE (SET_DEST (set)) == REG)
3619 {
3620 dest_reg = SET_DEST (set);
3621 if (REGNO (dest_reg) < max_reg_before_loop
3622 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3623 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3624 {
3625 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3626 dest_reg, p, &inc_val, &mult_val))
3627 {
3628 /* It is a possible basic induction variable.
3629 Create and initialize an induction structure for it. */
3630
3631 struct induction *v
3632 = (struct induction *) alloca (sizeof (struct induction));
3633
3634 record_biv (v, p, dest_reg, inc_val, mult_val,
3635 not_every_iteration, maybe_multiple);
3636 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3637 }
3638 else if (REGNO (dest_reg) < max_reg_before_loop)
3639 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3640 }
3641 }
3642
3643 /* Past CODE_LABEL, we get to insns that may be executed multiple
3644 times. The only way we can be sure that they can't is if every
3645 jump insn between here and the end of the loop either
3646 returns, exits the loop, is a forward jump, or is a jump
3647 to the loop start. */
3648
3649 if (GET_CODE (p) == CODE_LABEL)
3650 {
3651 rtx insn = p;
3652
3653 maybe_multiple = 0;
3654
3655 while (1)
3656 {
3657 insn = NEXT_INSN (insn);
3658 if (insn == scan_start)
3659 break;
3660 if (insn == end)
3661 {
3662 if (loop_top != 0)
3663 insn = loop_top;
3664 else
3665 break;
3666 if (insn == scan_start)
3667 break;
3668 }
3669
3670 if (GET_CODE (insn) == JUMP_INSN
3671 && GET_CODE (PATTERN (insn)) != RETURN
3672 && (! condjump_p (insn)
3673 || (JUMP_LABEL (insn) != 0
3674 && JUMP_LABEL (insn) != scan_start
3675 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3676 || INSN_UID (insn) >= max_uid_for_loop
3677 || (INSN_LUID (JUMP_LABEL (insn))
3678 < INSN_LUID (insn))))))
3679 {
3680 maybe_multiple = 1;
3681 break;
3682 }
3683 }
3684 }
3685
3686 /* Past a jump, we get to insns for which we can't count
3687 on whether they will be executed during each iteration. */
3688 /* This code appears twice in strength_reduce. There is also similar
3689 code in scan_loop. */
3690 if (GET_CODE (p) == JUMP_INSN
3691 /* If we enter the loop in the middle, and scan around to the
3692 beginning, don't set not_every_iteration for that.
3693 This can be any kind of jump, since we want to know if insns
3694 will be executed if the loop is executed. */
3695 && ! (JUMP_LABEL (p) == loop_top
3696 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3697 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3698 {
3699 rtx label = 0;
3700
3701 /* If this is a jump outside the loop, then it also doesn't
3702 matter. Check to see if the target of this branch is on the
3703 loop_number_exits_labels list. */
3704
3705 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3706 label;
3707 label = LABEL_NEXTREF (label))
3708 if (XEXP (label, 0) == JUMP_LABEL (p))
3709 break;
3710
3711 if (! label)
3712 not_every_iteration = 1;
3713 }
3714
3715 else if (GET_CODE (p) == NOTE)
3716 {
3717 /* At the virtual top of a converted loop, insns are again known to
3718 be executed each iteration: logically, the loop begins here
3719 even though the exit code has been duplicated. */
3720 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3721 not_every_iteration = 0;
3722 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3723 loop_depth++;
3724 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3725 loop_depth--;
3726 }
3727
3728 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3729 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3730 or not an insn is known to be executed each iteration of the
3731 loop, whether or not any iterations are known to occur.
3732
3733 Therefore, if we have just passed a label and have no more labels
3734 between here and the test insn of the loop, we know these insns
3735 will be executed each iteration. */
3736
3737 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3738 && no_labels_between_p (p, loop_end))
3739 not_every_iteration = 0;
3740 }
3741
3742 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3743 Make a sanity check against n_times_set. */
3744 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3745 {
3746 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3747 /* Above happens if register modified by subreg, etc. */
3748 /* Make sure it is not recognized as a basic induction var: */
3749 || n_times_set[bl->regno] != bl->biv_count
3750 /* If never incremented, it is invariant that we decided not to
3751 move. So leave it alone. */
3752 || ! bl->incremented)
3753 {
3754 if (loop_dump_stream)
3755 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3756 bl->regno,
3757 (reg_iv_type[bl->regno] != BASIC_INDUCT
3758 ? "not induction variable"
3759 : (! bl->incremented ? "never incremented"
3760 : "count error")));
3761
3762 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3763 *backbl = bl->next;
3764 }
3765 else
3766 {
3767 backbl = &bl->next;
3768
3769 if (loop_dump_stream)
3770 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3771 }
3772 }
3773
3774 /* Exit if there are no bivs. */
3775 if (! loop_iv_list)
3776 {
3777 /* Can still unroll the loop anyways, but indicate that there is no
3778 strength reduction info available. */
3779 if (unroll_p)
3780 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3781
3782 return;
3783 }
3784
3785 /* Find initial value for each biv by searching backwards from loop_start,
3786 halting at first label. Also record any test condition. */
3787
3788 call_seen = 0;
3789 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3790 {
3791 note_insn = p;
3792
3793 if (GET_CODE (p) == CALL_INSN)
3794 call_seen = 1;
3795
3796 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3797 || GET_CODE (p) == CALL_INSN)
3798 note_stores (PATTERN (p), record_initial);
3799
3800 /* Record any test of a biv that branches around the loop if no store
3801 between it and the start of loop. We only care about tests with
3802 constants and registers and only certain of those. */
3803 if (GET_CODE (p) == JUMP_INSN
3804 && JUMP_LABEL (p) != 0
3805 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3806 && (test = get_condition_for_loop (p)) != 0
3807 && GET_CODE (XEXP (test, 0)) == REG
3808 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3809 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3810 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3811 && bl->init_insn == 0)
3812 {
3813 /* If an NE test, we have an initial value! */
3814 if (GET_CODE (test) == NE)
3815 {
3816 bl->init_insn = p;
3817 bl->init_set = gen_rtx_SET (VOIDmode,
3818 XEXP (test, 0), XEXP (test, 1));
3819 }
3820 else
3821 bl->initial_test = test;
3822 }
3823 }
3824
3825 /* Look at the each biv and see if we can say anything better about its
3826 initial value from any initializing insns set up above. (This is done
3827 in two passes to avoid missing SETs in a PARALLEL.) */
3828 for (bl = loop_iv_list; bl; bl = bl->next)
3829 {
3830 rtx src;
3831 rtx note;
3832
3833 if (! bl->init_insn)
3834 continue;
3835
3836 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3837 is a constant, use the value of that. */
3838 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3839 && CONSTANT_P (XEXP (note, 0)))
3840 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3841 && CONSTANT_P (XEXP (note, 0))))
3842 src = XEXP (note, 0);
3843 else
3844 src = SET_SRC (bl->init_set);
3845
3846 if (loop_dump_stream)
3847 fprintf (loop_dump_stream,
3848 "Biv %d initialized at insn %d: initial value ",
3849 bl->regno, INSN_UID (bl->init_insn));
3850
3851 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3852 || GET_MODE (src) == VOIDmode)
3853 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3854 {
3855 bl->initial_value = src;
3856
3857 if (loop_dump_stream)
3858 {
3859 if (GET_CODE (src) == CONST_INT)
3860 {
3861 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3862 fputc ('\n', loop_dump_stream);
3863 }
3864 else
3865 {
3866 print_rtl (loop_dump_stream, src);
3867 fprintf (loop_dump_stream, "\n");
3868 }
3869 }
3870 }
3871 else
3872 {
3873 /* Biv initial value is not simple move,
3874 so let it keep initial value of "itself". */
3875
3876 if (loop_dump_stream)
3877 fprintf (loop_dump_stream, "is complex\n");
3878 }
3879 }
3880
3881 /* Search the loop for general induction variables. */
3882
3883 /* A register is a giv if: it is only set once, it is a function of a
3884 biv and a constant (or invariant), and it is not a biv. */
3885
3886 not_every_iteration = 0;
3887 loop_depth = 0;
3888 p = scan_start;
3889 while (1)
3890 {
3891 p = NEXT_INSN (p);
3892 /* At end of a straight-in loop, we are done.
3893 At end of a loop entered at the bottom, scan the top. */
3894 if (p == scan_start)
3895 break;
3896 if (p == end)
3897 {
3898 if (loop_top != 0)
3899 p = loop_top;
3900 else
3901 break;
3902 if (p == scan_start)
3903 break;
3904 }
3905
3906 /* Look for a general induction variable in a register. */
3907 if (GET_CODE (p) == INSN
3908 && (set = single_set (p))
3909 && GET_CODE (SET_DEST (set)) == REG
3910 && ! may_not_optimize[REGNO (SET_DEST (set))])
3911 {
3912 rtx src_reg;
3913 rtx add_val;
3914 rtx mult_val;
3915 int benefit;
3916 rtx regnote = 0;
3917
3918 dest_reg = SET_DEST (set);
3919 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3920 continue;
3921
3922 if (/* SET_SRC is a giv. */
3923 (general_induction_var (SET_SRC (set), &src_reg, &add_val,
3924 &mult_val, 0, &benefit)
3925 /* Equivalent expression is a giv. */
3926 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3927 && general_induction_var (XEXP (regnote, 0), &src_reg,
3928 &add_val, &mult_val, 0,
3929 &benefit)))
3930 /* Don't try to handle any regs made by loop optimization.
3931 We have nothing on them in regno_first_uid, etc. */
3932 && REGNO (dest_reg) < max_reg_before_loop
3933 /* Don't recognize a BASIC_INDUCT_VAR here. */
3934 && dest_reg != src_reg
3935 /* This must be the only place where the register is set. */
3936 && (n_times_set[REGNO (dest_reg)] == 1
3937 /* or all sets must be consecutive and make a giv. */
3938 || (benefit = consec_sets_giv (benefit, p,
3939 src_reg, dest_reg,
3940 &add_val, &mult_val))))
3941 {
3942 int count;
3943 struct induction *v
3944 = (struct induction *) alloca (sizeof (struct induction));
3945 rtx temp;
3946
3947 /* If this is a library call, increase benefit. */
3948 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3949 benefit += libcall_benefit (p);
3950
3951 /* Skip the consecutive insns, if there are any. */
3952 for (count = n_times_set[REGNO (dest_reg)] - 1;
3953 count > 0; count--)
3954 {
3955 /* If first insn of libcall sequence, skip to end.
3956 Do this at start of loop, since INSN is guaranteed to
3957 be an insn here. */
3958 if (GET_CODE (p) != NOTE
3959 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3960 p = XEXP (temp, 0);
3961
3962 do p = NEXT_INSN (p);
3963 while (GET_CODE (p) == NOTE);
3964 }
3965
3966 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3967 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3968 loop_end);
3969
3970 }
3971 }
3972
3973 #ifndef DONT_REDUCE_ADDR
3974 /* Look for givs which are memory addresses. */
3975 /* This resulted in worse code on a VAX 8600. I wonder if it
3976 still does. */
3977 if (GET_CODE (p) == INSN)
3978 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3979 loop_end);
3980 #endif
3981
3982 /* Update the status of whether giv can derive other givs. This can
3983 change when we pass a label or an insn that updates a biv. */
3984 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3985 || GET_CODE (p) == CODE_LABEL)
3986 update_giv_derive (p);
3987
3988 /* Past a jump, we get to insns for which we can't count
3989 on whether they will be executed during each iteration. */
3990 /* This code appears twice in strength_reduce. There is also similar
3991 code in scan_loop. */
3992 if (GET_CODE (p) == JUMP_INSN
3993 /* If we enter the loop in the middle, and scan around to the
3994 beginning, don't set not_every_iteration for that.
3995 This can be any kind of jump, since we want to know if insns
3996 will be executed if the loop is executed. */
3997 && ! (JUMP_LABEL (p) == loop_top
3998 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3999 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
4000 {
4001 rtx label = 0;
4002
4003 /* If this is a jump outside the loop, then it also doesn't
4004 matter. Check to see if the target of this branch is on the
4005 loop_number_exits_labels list. */
4006
4007 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
4008 label;
4009 label = LABEL_NEXTREF (label))
4010 if (XEXP (label, 0) == JUMP_LABEL (p))
4011 break;
4012
4013 if (! label)
4014 not_every_iteration = 1;
4015 }
4016
4017 else if (GET_CODE (p) == NOTE)
4018 {
4019 /* At the virtual top of a converted loop, insns are again known to
4020 be executed each iteration: logically, the loop begins here
4021 even though the exit code has been duplicated. */
4022 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
4023 not_every_iteration = 0;
4024 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4025 loop_depth++;
4026 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4027 loop_depth--;
4028 }
4029
4030 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4031 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4032 or not an insn is known to be executed each iteration of the
4033 loop, whether or not any iterations are known to occur.
4034
4035 Therefore, if we have just passed a label and have no more labels
4036 between here and the test insn of the loop, we know these insns
4037 will be executed each iteration. */
4038
4039 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
4040 && no_labels_between_p (p, loop_end))
4041 not_every_iteration = 0;
4042 }
4043
4044 /* Try to calculate and save the number of loop iterations. This is
4045 set to zero if the actual number can not be calculated. This must
4046 be called after all giv's have been identified, since otherwise it may
4047 fail if the iteration variable is a giv. */
4048
4049 loop_n_iterations = loop_iterations (loop_start, loop_end);
4050
4051 /* Now for each giv for which we still don't know whether or not it is
4052 replaceable, check to see if it is replaceable because its final value
4053 can be calculated. This must be done after loop_iterations is called,
4054 so that final_giv_value will work correctly. */
4055
4056 for (bl = loop_iv_list; bl; bl = bl->next)
4057 {
4058 struct induction *v;
4059
4060 for (v = bl->giv; v; v = v->next_iv)
4061 if (! v->replaceable && ! v->not_replaceable)
4062 check_final_value (v, loop_start, loop_end);
4063 }
4064
4065 /* Try to prove that the loop counter variable (if any) is always
4066 nonnegative; if so, record that fact with a REG_NONNEG note
4067 so that "decrement and branch until zero" insn can be used. */
4068 check_dbra_loop (loop_end, insn_count, loop_start);
4069
4070 #ifdef HAIFA
4071 /* record loop-variables relevant for BCT optimization before unrolling
4072 the loop. Unrolling may update part of this information, and the
4073 correct data will be used for generating the BCT. */
4074 #ifdef HAVE_decrement_and_branch_on_count
4075 if (HAVE_decrement_and_branch_on_count)
4076 analyze_loop_iterations (loop_start, loop_end);
4077 #endif
4078 #endif /* HAIFA */
4079
4080 /* Create reg_map to hold substitutions for replaceable giv regs. */
4081 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
4082 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
4083
4084 /* Examine each iv class for feasibility of strength reduction/induction
4085 variable elimination. */
4086
4087 for (bl = loop_iv_list; bl; bl = bl->next)
4088 {
4089 struct induction *v;
4090 int benefit;
4091 int all_reduced;
4092 rtx final_value = 0;
4093
4094 /* Test whether it will be possible to eliminate this biv
4095 provided all givs are reduced. This is possible if either
4096 the reg is not used outside the loop, or we can compute
4097 what its final value will be.
4098
4099 For architectures with a decrement_and_branch_until_zero insn,
4100 don't do this if we put a REG_NONNEG note on the endtest for
4101 this biv. */
4102
4103 /* Compare against bl->init_insn rather than loop_start.
4104 We aren't concerned with any uses of the biv between
4105 init_insn and loop_start since these won't be affected
4106 by the value of the biv elsewhere in the function, so
4107 long as init_insn doesn't use the biv itself.
4108 March 14, 1989 -- self@bayes.arc.nasa.gov */
4109
4110 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4111 && bl->init_insn
4112 && INSN_UID (bl->init_insn) < max_uid_for_loop
4113 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
4114 #ifdef HAVE_decrement_and_branch_until_zero
4115 && ! bl->nonneg
4116 #endif
4117 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4118 || ((final_value = final_biv_value (bl, loop_start, loop_end))
4119 #ifdef HAVE_decrement_and_branch_until_zero
4120 && ! bl->nonneg
4121 #endif
4122 ))
4123 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
4124 threshold, insn_count);
4125 else
4126 {
4127 if (loop_dump_stream)
4128 {
4129 fprintf (loop_dump_stream,
4130 "Cannot eliminate biv %d.\n",
4131 bl->regno);
4132 fprintf (loop_dump_stream,
4133 "First use: insn %d, last use: insn %d.\n",
4134 REGNO_FIRST_UID (bl->regno),
4135 REGNO_LAST_UID (bl->regno));
4136 }
4137 }
4138
4139 /* Combine all giv's for this iv_class. */
4140 combine_givs (bl);
4141
4142 /* This will be true at the end, if all givs which depend on this
4143 biv have been strength reduced.
4144 We can't (currently) eliminate the biv unless this is so. */
4145 all_reduced = 1;
4146
4147 /* Check each giv in this class to see if we will benefit by reducing
4148 it. Skip giv's combined with others. */
4149 for (v = bl->giv; v; v = v->next_iv)
4150 {
4151 struct induction *tv;
4152
4153 if (v->ignore || v->same)
4154 continue;
4155
4156 benefit = v->benefit;
4157
4158 /* Reduce benefit if not replaceable, since we will insert
4159 a move-insn to replace the insn that calculates this giv.
4160 Don't do this unless the giv is a user variable, since it
4161 will often be marked non-replaceable because of the duplication
4162 of the exit code outside the loop. In such a case, the copies
4163 we insert are dead and will be deleted. So they don't have
4164 a cost. Similar situations exist. */
4165 /* ??? The new final_[bg]iv_value code does a much better job
4166 of finding replaceable giv's, and hence this code may no longer
4167 be necessary. */
4168 if (! v->replaceable && ! bl->eliminable
4169 && REG_USERVAR_P (v->dest_reg))
4170 benefit -= copy_cost;
4171
4172 /* Decrease the benefit to count the add-insns that we will
4173 insert to increment the reduced reg for the giv. */
4174 benefit -= add_cost * bl->biv_count;
4175
4176 /* Decide whether to strength-reduce this giv or to leave the code
4177 unchanged (recompute it from the biv each time it is used).
4178 This decision can be made independently for each giv. */
4179
4180 #ifdef AUTO_INC_DEC
4181 /* Attempt to guess whether autoincrement will handle some of the
4182 new add insns; if so, increase BENEFIT (undo the subtraction of
4183 add_cost that was done above). */
4184 if (v->giv_type == DEST_ADDR
4185 && GET_CODE (v->mult_val) == CONST_INT)
4186 {
4187 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4188 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4189 benefit += add_cost * bl->biv_count;
4190 #endif
4191 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4192 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4193 benefit += add_cost * bl->biv_count;
4194 #endif
4195 }
4196 #endif
4197
4198 /* If an insn is not to be strength reduced, then set its ignore
4199 flag, and clear all_reduced. */
4200
4201 /* A giv that depends on a reversed biv must be reduced if it is
4202 used after the loop exit, otherwise, it would have the wrong
4203 value after the loop exit. To make it simple, just reduce all
4204 of such giv's whether or not we know they are used after the loop
4205 exit. */
4206
4207 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4208 && ! bl->reversed )
4209 {
4210 if (loop_dump_stream)
4211 fprintf (loop_dump_stream,
4212 "giv of insn %d not worth while, %d vs %d.\n",
4213 INSN_UID (v->insn),
4214 v->lifetime * threshold * benefit, insn_count);
4215 v->ignore = 1;
4216 all_reduced = 0;
4217 }
4218 else
4219 {
4220 /* Check that we can increment the reduced giv without a
4221 multiply insn. If not, reject it. */
4222
4223 for (tv = bl->biv; tv; tv = tv->next_iv)
4224 if (tv->mult_val == const1_rtx
4225 && ! product_cheap_p (tv->add_val, v->mult_val))
4226 {
4227 if (loop_dump_stream)
4228 fprintf (loop_dump_stream,
4229 "giv of insn %d: would need a multiply.\n",
4230 INSN_UID (v->insn));
4231 v->ignore = 1;
4232 all_reduced = 0;
4233 break;
4234 }
4235 }
4236 }
4237
4238 /* Reduce each giv that we decided to reduce. */
4239
4240 for (v = bl->giv; v; v = v->next_iv)
4241 {
4242 struct induction *tv;
4243 if (! v->ignore && v->same == 0)
4244 {
4245 int auto_inc_opt = 0;
4246
4247 v->new_reg = gen_reg_rtx (v->mode);
4248
4249 #ifdef AUTO_INC_DEC
4250 /* If the target has auto-increment addressing modes, and
4251 this is an address giv, then try to put the increment
4252 immediately after its use, so that flow can create an
4253 auto-increment addressing mode. */
4254 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4255 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4256 /* We don't handle reversed biv's because bl->biv->insn
4257 does not have a valid INSN_LUID. */
4258 && ! bl->reversed
4259 && v->always_executed && ! v->maybe_multiple
4260 && INSN_UID (v->insn) < max_uid_for_loop)
4261 {
4262 /* If other giv's have been combined with this one, then
4263 this will work only if all uses of the other giv's occur
4264 before this giv's insn. This is difficult to check.
4265
4266 We simplify this by looking for the common case where
4267 there is one DEST_REG giv, and this giv's insn is the
4268 last use of the dest_reg of that DEST_REG giv. If the
4269 increment occurs after the address giv, then we can
4270 perform the optimization. (Otherwise, the increment
4271 would have to go before other_giv, and we would not be
4272 able to combine it with the address giv to get an
4273 auto-inc address.) */
4274 if (v->combined_with)
4275 {
4276 struct induction *other_giv = 0;
4277
4278 for (tv = bl->giv; tv; tv = tv->next_iv)
4279 if (tv->same == v)
4280 {
4281 if (other_giv)
4282 break;
4283 else
4284 other_giv = tv;
4285 }
4286 if (! tv && other_giv
4287 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4288 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4289 == INSN_UID (v->insn))
4290 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4291 auto_inc_opt = 1;
4292 }
4293 /* Check for case where increment is before the address
4294 giv. Do this test in "loop order". */
4295 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4296 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4297 || (INSN_LUID (bl->biv->insn)
4298 > INSN_LUID (scan_start))))
4299 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4300 && (INSN_LUID (scan_start)
4301 < INSN_LUID (bl->biv->insn))))
4302 auto_inc_opt = -1;
4303 else
4304 auto_inc_opt = 1;
4305
4306 #ifdef HAVE_cc0
4307 {
4308 rtx prev;
4309
4310 /* We can't put an insn immediately after one setting
4311 cc0, or immediately before one using cc0. */
4312 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4313 || (auto_inc_opt == -1
4314 && (prev = prev_nonnote_insn (v->insn)) != 0
4315 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4316 && sets_cc0_p (PATTERN (prev))))
4317 auto_inc_opt = 0;
4318 }
4319 #endif
4320
4321 if (auto_inc_opt)
4322 v->auto_inc_opt = 1;
4323 }
4324 #endif
4325
4326 /* For each place where the biv is incremented, add an insn
4327 to increment the new, reduced reg for the giv. */
4328 for (tv = bl->biv; tv; tv = tv->next_iv)
4329 {
4330 rtx insert_before;
4331
4332 if (! auto_inc_opt)
4333 insert_before = tv->insn;
4334 else if (auto_inc_opt == 1)
4335 insert_before = NEXT_INSN (v->insn);
4336 else
4337 insert_before = v->insn;
4338
4339 if (tv->mult_val == const1_rtx)
4340 emit_iv_add_mult (tv->add_val, v->mult_val,
4341 v->new_reg, v->new_reg, insert_before);
4342 else /* tv->mult_val == const0_rtx */
4343 /* A multiply is acceptable here
4344 since this is presumed to be seldom executed. */
4345 emit_iv_add_mult (tv->add_val, v->mult_val,
4346 v->add_val, v->new_reg, insert_before);
4347 }
4348
4349 /* Add code at loop start to initialize giv's reduced reg. */
4350
4351 emit_iv_add_mult (bl->initial_value, v->mult_val,
4352 v->add_val, v->new_reg, loop_start);
4353 }
4354 }
4355
4356 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4357 as not reduced.
4358
4359 For each giv register that can be reduced now: if replaceable,
4360 substitute reduced reg wherever the old giv occurs;
4361 else add new move insn "giv_reg = reduced_reg".
4362
4363 Also check for givs whose first use is their definition and whose
4364 last use is the definition of another giv. If so, it is likely
4365 dead and should not be used to eliminate a biv. */
4366 for (v = bl->giv; v; v = v->next_iv)
4367 {
4368 if (v->same && v->same->ignore)
4369 v->ignore = 1;
4370
4371 if (v->ignore)
4372 continue;
4373
4374 if (v->giv_type == DEST_REG
4375 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4376 {
4377 struct induction *v1;
4378
4379 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4380 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4381 v->maybe_dead = 1;
4382 }
4383
4384 /* Update expression if this was combined, in case other giv was
4385 replaced. */
4386 if (v->same)
4387 v->new_reg = replace_rtx (v->new_reg,
4388 v->same->dest_reg, v->same->new_reg);
4389
4390 if (v->giv_type == DEST_ADDR)
4391 /* Store reduced reg as the address in the memref where we found
4392 this giv. */
4393 validate_change (v->insn, v->location, v->new_reg, 0);
4394 else if (v->replaceable)
4395 {
4396 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4397
4398 #if 0
4399 /* I can no longer duplicate the original problem. Perhaps
4400 this is unnecessary now? */
4401
4402 /* Replaceable; it isn't strictly necessary to delete the old
4403 insn and emit a new one, because v->dest_reg is now dead.
4404
4405 However, especially when unrolling loops, the special
4406 handling for (set REG0 REG1) in the second cse pass may
4407 make v->dest_reg live again. To avoid this problem, emit
4408 an insn to set the original giv reg from the reduced giv.
4409 We can not delete the original insn, since it may be part
4410 of a LIBCALL, and the code in flow that eliminates dead
4411 libcalls will fail if it is deleted. */
4412 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4413 v->insn);
4414 #endif
4415 }
4416 else
4417 {
4418 /* Not replaceable; emit an insn to set the original giv reg from
4419 the reduced giv, same as above. */
4420 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4421 v->insn);
4422 }
4423
4424 /* When a loop is reversed, givs which depend on the reversed
4425 biv, and which are live outside the loop, must be set to their
4426 correct final value. This insn is only needed if the giv is
4427 not replaceable. The correct final value is the same as the
4428 value that the giv starts the reversed loop with. */
4429 if (bl->reversed && ! v->replaceable)
4430 emit_iv_add_mult (bl->initial_value, v->mult_val,
4431 v->add_val, v->dest_reg, end_insert_before);
4432 else if (v->final_value)
4433 {
4434 rtx insert_before;
4435
4436 /* If the loop has multiple exits, emit the insn before the
4437 loop to ensure that it will always be executed no matter
4438 how the loop exits. Otherwise, emit the insn after the loop,
4439 since this is slightly more efficient. */
4440 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4441 insert_before = loop_start;
4442 else
4443 insert_before = end_insert_before;
4444 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4445 insert_before);
4446
4447 #if 0
4448 /* If the insn to set the final value of the giv was emitted
4449 before the loop, then we must delete the insn inside the loop
4450 that sets it. If this is a LIBCALL, then we must delete
4451 every insn in the libcall. Note, however, that
4452 final_giv_value will only succeed when there are multiple
4453 exits if the giv is dead at each exit, hence it does not
4454 matter that the original insn remains because it is dead
4455 anyways. */
4456 /* Delete the insn inside the loop that sets the giv since
4457 the giv is now set before (or after) the loop. */
4458 delete_insn (v->insn);
4459 #endif
4460 }
4461
4462 if (loop_dump_stream)
4463 {
4464 fprintf (loop_dump_stream, "giv at %d reduced to ",
4465 INSN_UID (v->insn));
4466 print_rtl (loop_dump_stream, v->new_reg);
4467 fprintf (loop_dump_stream, "\n");
4468 }
4469 }
4470
4471 /* All the givs based on the biv bl have been reduced if they
4472 merit it. */
4473
4474 /* For each giv not marked as maybe dead that has been combined with a
4475 second giv, clear any "maybe dead" mark on that second giv.
4476 v->new_reg will either be or refer to the register of the giv it
4477 combined with.
4478
4479 Doing this clearing avoids problems in biv elimination where a
4480 giv's new_reg is a complex value that can't be put in the insn but
4481 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4482 Since the register will be used in either case, we'd prefer it be
4483 used from the simpler giv. */
4484
4485 for (v = bl->giv; v; v = v->next_iv)
4486 if (! v->maybe_dead && v->same)
4487 v->same->maybe_dead = 0;
4488
4489 /* Try to eliminate the biv, if it is a candidate.
4490 This won't work if ! all_reduced,
4491 since the givs we planned to use might not have been reduced.
4492
4493 We have to be careful that we didn't initially think we could eliminate
4494 this biv because of a giv that we now think may be dead and shouldn't
4495 be used as a biv replacement.
4496
4497 Also, there is the possibility that we may have a giv that looks
4498 like it can be used to eliminate a biv, but the resulting insn
4499 isn't valid. This can happen, for example, on the 88k, where a
4500 JUMP_INSN can compare a register only with zero. Attempts to
4501 replace it with a compare with a constant will fail.
4502
4503 Note that in cases where this call fails, we may have replaced some
4504 of the occurrences of the biv with a giv, but no harm was done in
4505 doing so in the rare cases where it can occur. */
4506
4507 if (all_reduced == 1 && bl->eliminable
4508 && maybe_eliminate_biv (bl, loop_start, end, 1,
4509 threshold, insn_count))
4510
4511 {
4512 /* ?? If we created a new test to bypass the loop entirely,
4513 or otherwise drop straight in, based on this test, then
4514 we might want to rewrite it also. This way some later
4515 pass has more hope of removing the initialization of this
4516 biv entirely. */
4517
4518 /* If final_value != 0, then the biv may be used after loop end
4519 and we must emit an insn to set it just in case.
4520
4521 Reversed bivs already have an insn after the loop setting their
4522 value, so we don't need another one. We can't calculate the
4523 proper final value for such a biv here anyways. */
4524 if (final_value != 0 && ! bl->reversed)
4525 {
4526 rtx insert_before;
4527
4528 /* If the loop has multiple exits, emit the insn before the
4529 loop to ensure that it will always be executed no matter
4530 how the loop exits. Otherwise, emit the insn after the
4531 loop, since this is slightly more efficient. */
4532 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4533 insert_before = loop_start;
4534 else
4535 insert_before = end_insert_before;
4536
4537 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4538 end_insert_before);
4539 }
4540
4541 #if 0
4542 /* Delete all of the instructions inside the loop which set
4543 the biv, as they are all dead. If is safe to delete them,
4544 because an insn setting a biv will never be part of a libcall. */
4545 /* However, deleting them will invalidate the regno_last_uid info,
4546 so keeping them around is more convenient. Final_biv_value
4547 will only succeed when there are multiple exits if the biv
4548 is dead at each exit, hence it does not matter that the original
4549 insn remains, because it is dead anyways. */
4550 for (v = bl->biv; v; v = v->next_iv)
4551 delete_insn (v->insn);
4552 #endif
4553
4554 if (loop_dump_stream)
4555 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4556 bl->regno);
4557 }
4558 }
4559
4560 /* Go through all the instructions in the loop, making all the
4561 register substitutions scheduled in REG_MAP. */
4562
4563 for (p = loop_start; p != end; p = NEXT_INSN (p))
4564 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4565 || GET_CODE (p) == CALL_INSN)
4566 {
4567 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4568 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4569 INSN_CODE (p) = -1;
4570 }
4571
4572 /* Unroll loops from within strength reduction so that we can use the
4573 induction variable information that strength_reduce has already
4574 collected. */
4575
4576 if (unroll_p)
4577 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4578
4579 #ifdef HAIFA
4580 /* instrument the loop with bct insn */
4581 #ifdef HAVE_decrement_and_branch_on_count
4582 if (HAVE_decrement_and_branch_on_count)
4583 insert_bct (loop_start, loop_end);
4584 #endif
4585 #endif /* HAIFA */
4586
4587 if (loop_dump_stream)
4588 fprintf (loop_dump_stream, "\n");
4589 }
4590 \f
4591 /* Return 1 if X is a valid source for an initial value (or as value being
4592 compared against in an initial test).
4593
4594 X must be either a register or constant and must not be clobbered between
4595 the current insn and the start of the loop.
4596
4597 INSN is the insn containing X. */
4598
4599 static int
4600 valid_initial_value_p (x, insn, call_seen, loop_start)
4601 rtx x;
4602 rtx insn;
4603 int call_seen;
4604 rtx loop_start;
4605 {
4606 if (CONSTANT_P (x))
4607 return 1;
4608
4609 /* Only consider pseudos we know about initialized in insns whose luids
4610 we know. */
4611 if (GET_CODE (x) != REG
4612 || REGNO (x) >= max_reg_before_loop)
4613 return 0;
4614
4615 /* Don't use call-clobbered registers across a call which clobbers it. On
4616 some machines, don't use any hard registers at all. */
4617 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4618 && (SMALL_REGISTER_CLASSES
4619 || (call_used_regs[REGNO (x)] && call_seen)))
4620 return 0;
4621
4622 /* Don't use registers that have been clobbered before the start of the
4623 loop. */
4624 if (reg_set_between_p (x, insn, loop_start))
4625 return 0;
4626
4627 return 1;
4628 }
4629 \f
4630 /* Scan X for memory refs and check each memory address
4631 as a possible giv. INSN is the insn whose pattern X comes from.
4632 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4633 every loop iteration. */
4634
4635 static void
4636 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4637 rtx x;
4638 rtx insn;
4639 int not_every_iteration;
4640 rtx loop_start, loop_end;
4641 {
4642 register int i, j;
4643 register enum rtx_code code;
4644 register char *fmt;
4645
4646 if (x == 0)
4647 return;
4648
4649 code = GET_CODE (x);
4650 switch (code)
4651 {
4652 case REG:
4653 case CONST_INT:
4654 case CONST:
4655 case CONST_DOUBLE:
4656 case SYMBOL_REF:
4657 case LABEL_REF:
4658 case PC:
4659 case CC0:
4660 case ADDR_VEC:
4661 case ADDR_DIFF_VEC:
4662 case USE:
4663 case CLOBBER:
4664 return;
4665
4666 case MEM:
4667 {
4668 rtx src_reg;
4669 rtx add_val;
4670 rtx mult_val;
4671 int benefit;
4672
4673 /* This code used to disable creating GIVs with mult_val == 1 and
4674 add_val == 0. However, this leads to lost optimizations when
4675 it comes time to combine a set of related DEST_ADDR GIVs, since
4676 this one would not be seen. */
4677
4678 if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
4679 &mult_val, 1, &benefit))
4680 {
4681 /* Found one; record it. */
4682 struct induction *v
4683 = (struct induction *) oballoc (sizeof (struct induction));
4684
4685 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4686 add_val, benefit, DEST_ADDR, not_every_iteration,
4687 &XEXP (x, 0), loop_start, loop_end);
4688
4689 v->mem_mode = GET_MODE (x);
4690 }
4691 }
4692 return;
4693
4694 default:
4695 break;
4696 }
4697
4698 /* Recursively scan the subexpressions for other mem refs. */
4699
4700 fmt = GET_RTX_FORMAT (code);
4701 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4702 if (fmt[i] == 'e')
4703 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4704 loop_end);
4705 else if (fmt[i] == 'E')
4706 for (j = 0; j < XVECLEN (x, i); j++)
4707 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4708 loop_start, loop_end);
4709 }
4710 \f
4711 /* Fill in the data about one biv update.
4712 V is the `struct induction' in which we record the biv. (It is
4713 allocated by the caller, with alloca.)
4714 INSN is the insn that sets it.
4715 DEST_REG is the biv's reg.
4716
4717 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4718 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4719 being set to INC_VAL.
4720
4721 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4722 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4723 can be executed more than once per iteration. If MAYBE_MULTIPLE
4724 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4725 executed exactly once per iteration. */
4726
4727 static void
4728 record_biv (v, insn, dest_reg, inc_val, mult_val,
4729 not_every_iteration, maybe_multiple)
4730 struct induction *v;
4731 rtx insn;
4732 rtx dest_reg;
4733 rtx inc_val;
4734 rtx mult_val;
4735 int not_every_iteration;
4736 int maybe_multiple;
4737 {
4738 struct iv_class *bl;
4739
4740 v->insn = insn;
4741 v->src_reg = dest_reg;
4742 v->dest_reg = dest_reg;
4743 v->mult_val = mult_val;
4744 v->add_val = inc_val;
4745 v->mode = GET_MODE (dest_reg);
4746 v->always_computable = ! not_every_iteration;
4747 v->always_executed = ! not_every_iteration;
4748 v->maybe_multiple = maybe_multiple;
4749
4750 /* Add this to the reg's iv_class, creating a class
4751 if this is the first incrementation of the reg. */
4752
4753 bl = reg_biv_class[REGNO (dest_reg)];
4754 if (bl == 0)
4755 {
4756 /* Create and initialize new iv_class. */
4757
4758 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4759
4760 bl->regno = REGNO (dest_reg);
4761 bl->biv = 0;
4762 bl->giv = 0;
4763 bl->biv_count = 0;
4764 bl->giv_count = 0;
4765
4766 /* Set initial value to the reg itself. */
4767 bl->initial_value = dest_reg;
4768 /* We haven't seen the initializing insn yet */
4769 bl->init_insn = 0;
4770 bl->init_set = 0;
4771 bl->initial_test = 0;
4772 bl->incremented = 0;
4773 bl->eliminable = 0;
4774 bl->nonneg = 0;
4775 bl->reversed = 0;
4776 bl->total_benefit = 0;
4777
4778 /* Add this class to loop_iv_list. */
4779 bl->next = loop_iv_list;
4780 loop_iv_list = bl;
4781
4782 /* Put it in the array of biv register classes. */
4783 reg_biv_class[REGNO (dest_reg)] = bl;
4784 }
4785
4786 /* Update IV_CLASS entry for this biv. */
4787 v->next_iv = bl->biv;
4788 bl->biv = v;
4789 bl->biv_count++;
4790 if (mult_val == const1_rtx)
4791 bl->incremented = 1;
4792
4793 if (loop_dump_stream)
4794 {
4795 fprintf (loop_dump_stream,
4796 "Insn %d: possible biv, reg %d,",
4797 INSN_UID (insn), REGNO (dest_reg));
4798 if (GET_CODE (inc_val) == CONST_INT)
4799 {
4800 fprintf (loop_dump_stream, " const =");
4801 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4802 fputc ('\n', loop_dump_stream);
4803 }
4804 else
4805 {
4806 fprintf (loop_dump_stream, " const = ");
4807 print_rtl (loop_dump_stream, inc_val);
4808 fprintf (loop_dump_stream, "\n");
4809 }
4810 }
4811 }
4812 \f
4813 /* Fill in the data about one giv.
4814 V is the `struct induction' in which we record the giv. (It is
4815 allocated by the caller, with alloca.)
4816 INSN is the insn that sets it.
4817 BENEFIT estimates the savings from deleting this insn.
4818 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4819 into a register or is used as a memory address.
4820
4821 SRC_REG is the biv reg which the giv is computed from.
4822 DEST_REG is the giv's reg (if the giv is stored in a reg).
4823 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4824 LOCATION points to the place where this giv's value appears in INSN. */
4825
4826 static void
4827 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4828 type, not_every_iteration, location, loop_start, loop_end)
4829 struct induction *v;
4830 rtx insn;
4831 rtx src_reg;
4832 rtx dest_reg;
4833 rtx mult_val, add_val;
4834 int benefit;
4835 enum g_types type;
4836 int not_every_iteration;
4837 rtx *location;
4838 rtx loop_start, loop_end;
4839 {
4840 struct induction *b;
4841 struct iv_class *bl;
4842 rtx set = single_set (insn);
4843
4844 v->insn = insn;
4845 v->src_reg = src_reg;
4846 v->giv_type = type;
4847 v->dest_reg = dest_reg;
4848 v->mult_val = mult_val;
4849 v->add_val = add_val;
4850 v->benefit = benefit;
4851 v->location = location;
4852 v->cant_derive = 0;
4853 v->combined_with = 0;
4854 v->maybe_multiple = 0;
4855 v->maybe_dead = 0;
4856 v->derive_adjustment = 0;
4857 v->same = 0;
4858 v->ignore = 0;
4859 v->new_reg = 0;
4860 v->final_value = 0;
4861 v->same_insn = 0;
4862 v->auto_inc_opt = 0;
4863 v->unrolled = 0;
4864 v->shared = 0;
4865
4866 /* The v->always_computable field is used in update_giv_derive, to
4867 determine whether a giv can be used to derive another giv. For a
4868 DEST_REG giv, INSN computes a new value for the giv, so its value
4869 isn't computable if INSN insn't executed every iteration.
4870 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4871 it does not compute a new value. Hence the value is always computable
4872 regardless of whether INSN is executed each iteration. */
4873
4874 if (type == DEST_ADDR)
4875 v->always_computable = 1;
4876 else
4877 v->always_computable = ! not_every_iteration;
4878
4879 v->always_executed = ! not_every_iteration;
4880
4881 if (type == DEST_ADDR)
4882 {
4883 v->mode = GET_MODE (*location);
4884 v->lifetime = 1;
4885 v->times_used = 1;
4886 }
4887 else /* type == DEST_REG */
4888 {
4889 v->mode = GET_MODE (SET_DEST (set));
4890
4891 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4892 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4893
4894 v->times_used = n_times_used[REGNO (dest_reg)];
4895
4896 /* If the lifetime is zero, it means that this register is
4897 really a dead store. So mark this as a giv that can be
4898 ignored. This will not prevent the biv from being eliminated. */
4899 if (v->lifetime == 0)
4900 v->ignore = 1;
4901
4902 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4903 reg_iv_info[REGNO (dest_reg)] = v;
4904 }
4905
4906 /* Add the giv to the class of givs computed from one biv. */
4907
4908 bl = reg_biv_class[REGNO (src_reg)];
4909 if (bl)
4910 {
4911 v->next_iv = bl->giv;
4912 bl->giv = v;
4913 /* Don't count DEST_ADDR. This is supposed to count the number of
4914 insns that calculate givs. */
4915 if (type == DEST_REG)
4916 bl->giv_count++;
4917 bl->total_benefit += benefit;
4918 }
4919 else
4920 /* Fatal error, biv missing for this giv? */
4921 abort ();
4922
4923 if (type == DEST_ADDR)
4924 v->replaceable = 1;
4925 else
4926 {
4927 /* The giv can be replaced outright by the reduced register only if all
4928 of the following conditions are true:
4929 - the insn that sets the giv is always executed on any iteration
4930 on which the giv is used at all
4931 (there are two ways to deduce this:
4932 either the insn is executed on every iteration,
4933 or all uses follow that insn in the same basic block),
4934 - the giv is not used outside the loop
4935 - no assignments to the biv occur during the giv's lifetime. */
4936
4937 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4938 /* Previous line always fails if INSN was moved by loop opt. */
4939 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4940 && (! not_every_iteration
4941 || last_use_this_basic_block (dest_reg, insn)))
4942 {
4943 /* Now check that there are no assignments to the biv within the
4944 giv's lifetime. This requires two separate checks. */
4945
4946 /* Check each biv update, and fail if any are between the first
4947 and last use of the giv.
4948
4949 If this loop contains an inner loop that was unrolled, then
4950 the insn modifying the biv may have been emitted by the loop
4951 unrolling code, and hence does not have a valid luid. Just
4952 mark the biv as not replaceable in this case. It is not very
4953 useful as a biv, because it is used in two different loops.
4954 It is very unlikely that we would be able to optimize the giv
4955 using this biv anyways. */
4956
4957 v->replaceable = 1;
4958 for (b = bl->biv; b; b = b->next_iv)
4959 {
4960 if (INSN_UID (b->insn) >= max_uid_for_loop
4961 || ((uid_luid[INSN_UID (b->insn)]
4962 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4963 && (uid_luid[INSN_UID (b->insn)]
4964 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4965 {
4966 v->replaceable = 0;
4967 v->not_replaceable = 1;
4968 break;
4969 }
4970 }
4971
4972 /* If there are any backwards branches that go from after the
4973 biv update to before it, then this giv is not replaceable. */
4974 if (v->replaceable)
4975 for (b = bl->biv; b; b = b->next_iv)
4976 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4977 {
4978 v->replaceable = 0;
4979 v->not_replaceable = 1;
4980 break;
4981 }
4982 }
4983 else
4984 {
4985 /* May still be replaceable, we don't have enough info here to
4986 decide. */
4987 v->replaceable = 0;
4988 v->not_replaceable = 0;
4989 }
4990 }
4991
4992 /* Record whether the add_val contains a const_int, for later use by
4993 combine_givs. */
4994 {
4995 rtx tem = add_val;
4996
4997 v->no_const_addval = 1;
4998 if (tem == const0_rtx)
4999 ;
5000 else if (GET_CODE (tem) == CONST_INT)
5001 v->no_const_addval = 0;
5002 else if (GET_CODE (tem) == PLUS)
5003 {
5004 while (1)
5005 {
5006 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5007 tem = XEXP (tem, 0);
5008 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5009 tem = XEXP (tem, 1);
5010 else
5011 break;
5012 }
5013 if (GET_CODE (XEXP (tem, 1)) == CONST_INT)
5014 v->no_const_addval = 0;
5015 }
5016 }
5017
5018 if (loop_dump_stream)
5019 {
5020 if (type == DEST_REG)
5021 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
5022 INSN_UID (insn), REGNO (dest_reg));
5023 else
5024 fprintf (loop_dump_stream, "Insn %d: dest address",
5025 INSN_UID (insn));
5026
5027 fprintf (loop_dump_stream, " src reg %d benefit %d",
5028 REGNO (src_reg), v->benefit);
5029 fprintf (loop_dump_stream, " used %d lifetime %d",
5030 v->times_used, v->lifetime);
5031
5032 if (v->replaceable)
5033 fprintf (loop_dump_stream, " replaceable");
5034
5035 if (v->no_const_addval)
5036 fprintf (loop_dump_stream, " ncav");
5037
5038 if (GET_CODE (mult_val) == CONST_INT)
5039 {
5040 fprintf (loop_dump_stream, " mult ");
5041 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
5042 }
5043 else
5044 {
5045 fprintf (loop_dump_stream, " mult ");
5046 print_rtl (loop_dump_stream, mult_val);
5047 }
5048
5049 if (GET_CODE (add_val) == CONST_INT)
5050 {
5051 fprintf (loop_dump_stream, " add ");
5052 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
5053 }
5054 else
5055 {
5056 fprintf (loop_dump_stream, " add ");
5057 print_rtl (loop_dump_stream, add_val);
5058 }
5059 }
5060
5061 if (loop_dump_stream)
5062 fprintf (loop_dump_stream, "\n");
5063
5064 }
5065
5066
5067 /* All this does is determine whether a giv can be made replaceable because
5068 its final value can be calculated. This code can not be part of record_giv
5069 above, because final_giv_value requires that the number of loop iterations
5070 be known, and that can not be accurately calculated until after all givs
5071 have been identified. */
5072
5073 static void
5074 check_final_value (v, loop_start, loop_end)
5075 struct induction *v;
5076 rtx loop_start, loop_end;
5077 {
5078 struct iv_class *bl;
5079 rtx final_value = 0;
5080
5081 bl = reg_biv_class[REGNO (v->src_reg)];
5082
5083 /* DEST_ADDR givs will never reach here, because they are always marked
5084 replaceable above in record_giv. */
5085
5086 /* The giv can be replaced outright by the reduced register only if all
5087 of the following conditions are true:
5088 - the insn that sets the giv is always executed on any iteration
5089 on which the giv is used at all
5090 (there are two ways to deduce this:
5091 either the insn is executed on every iteration,
5092 or all uses follow that insn in the same basic block),
5093 - its final value can be calculated (this condition is different
5094 than the one above in record_giv)
5095 - no assignments to the biv occur during the giv's lifetime. */
5096
5097 #if 0
5098 /* This is only called now when replaceable is known to be false. */
5099 /* Clear replaceable, so that it won't confuse final_giv_value. */
5100 v->replaceable = 0;
5101 #endif
5102
5103 if ((final_value = final_giv_value (v, loop_start, loop_end))
5104 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5105 {
5106 int biv_increment_seen = 0;
5107 rtx p = v->insn;
5108 rtx last_giv_use;
5109
5110 v->replaceable = 1;
5111
5112 /* When trying to determine whether or not a biv increment occurs
5113 during the lifetime of the giv, we can ignore uses of the variable
5114 outside the loop because final_value is true. Hence we can not
5115 use regno_last_uid and regno_first_uid as above in record_giv. */
5116
5117 /* Search the loop to determine whether any assignments to the
5118 biv occur during the giv's lifetime. Start with the insn
5119 that sets the giv, and search around the loop until we come
5120 back to that insn again.
5121
5122 Also fail if there is a jump within the giv's lifetime that jumps
5123 to somewhere outside the lifetime but still within the loop. This
5124 catches spaghetti code where the execution order is not linear, and
5125 hence the above test fails. Here we assume that the giv lifetime
5126 does not extend from one iteration of the loop to the next, so as
5127 to make the test easier. Since the lifetime isn't known yet,
5128 this requires two loops. See also record_giv above. */
5129
5130 last_giv_use = v->insn;
5131
5132 while (1)
5133 {
5134 p = NEXT_INSN (p);
5135 if (p == loop_end)
5136 p = NEXT_INSN (loop_start);
5137 if (p == v->insn)
5138 break;
5139
5140 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5141 || GET_CODE (p) == CALL_INSN)
5142 {
5143 if (biv_increment_seen)
5144 {
5145 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5146 {
5147 v->replaceable = 0;
5148 v->not_replaceable = 1;
5149 break;
5150 }
5151 }
5152 else if (reg_set_p (v->src_reg, PATTERN (p)))
5153 biv_increment_seen = 1;
5154 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5155 last_giv_use = p;
5156 }
5157 }
5158
5159 /* Now that the lifetime of the giv is known, check for branches
5160 from within the lifetime to outside the lifetime if it is still
5161 replaceable. */
5162
5163 if (v->replaceable)
5164 {
5165 p = v->insn;
5166 while (1)
5167 {
5168 p = NEXT_INSN (p);
5169 if (p == loop_end)
5170 p = NEXT_INSN (loop_start);
5171 if (p == last_giv_use)
5172 break;
5173
5174 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5175 && LABEL_NAME (JUMP_LABEL (p))
5176 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
5177 || (INSN_UID (v->insn) >= max_uid_for_loop)
5178 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
5179 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
5180 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
5181 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
5182 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
5183 {
5184 v->replaceable = 0;
5185 v->not_replaceable = 1;
5186
5187 if (loop_dump_stream)
5188 fprintf (loop_dump_stream,
5189 "Found branch outside giv lifetime.\n");
5190
5191 break;
5192 }
5193 }
5194 }
5195
5196 /* If it is replaceable, then save the final value. */
5197 if (v->replaceable)
5198 v->final_value = final_value;
5199 }
5200
5201 if (loop_dump_stream && v->replaceable)
5202 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5203 INSN_UID (v->insn), REGNO (v->dest_reg));
5204 }
5205 \f
5206 /* Update the status of whether a giv can derive other givs.
5207
5208 We need to do something special if there is or may be an update to the biv
5209 between the time the giv is defined and the time it is used to derive
5210 another giv.
5211
5212 In addition, a giv that is only conditionally set is not allowed to
5213 derive another giv once a label has been passed.
5214
5215 The cases we look at are when a label or an update to a biv is passed. */
5216
5217 static void
5218 update_giv_derive (p)
5219 rtx p;
5220 {
5221 struct iv_class *bl;
5222 struct induction *biv, *giv;
5223 rtx tem;
5224 int dummy;
5225
5226 /* Search all IV classes, then all bivs, and finally all givs.
5227
5228 There are three cases we are concerned with. First we have the situation
5229 of a giv that is only updated conditionally. In that case, it may not
5230 derive any givs after a label is passed.
5231
5232 The second case is when a biv update occurs, or may occur, after the
5233 definition of a giv. For certain biv updates (see below) that are
5234 known to occur between the giv definition and use, we can adjust the
5235 giv definition. For others, or when the biv update is conditional,
5236 we must prevent the giv from deriving any other givs. There are two
5237 sub-cases within this case.
5238
5239 If this is a label, we are concerned with any biv update that is done
5240 conditionally, since it may be done after the giv is defined followed by
5241 a branch here (actually, we need to pass both a jump and a label, but
5242 this extra tracking doesn't seem worth it).
5243
5244 If this is a jump, we are concerned about any biv update that may be
5245 executed multiple times. We are actually only concerned about
5246 backward jumps, but it is probably not worth performing the test
5247 on the jump again here.
5248
5249 If this is a biv update, we must adjust the giv status to show that a
5250 subsequent biv update was performed. If this adjustment cannot be done,
5251 the giv cannot derive further givs. */
5252
5253 for (bl = loop_iv_list; bl; bl = bl->next)
5254 for (biv = bl->biv; biv; biv = biv->next_iv)
5255 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5256 || biv->insn == p)
5257 {
5258 for (giv = bl->giv; giv; giv = giv->next_iv)
5259 {
5260 /* If cant_derive is already true, there is no point in
5261 checking all of these conditions again. */
5262 if (giv->cant_derive)
5263 continue;
5264
5265 /* If this giv is conditionally set and we have passed a label,
5266 it cannot derive anything. */
5267 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5268 giv->cant_derive = 1;
5269
5270 /* Skip givs that have mult_val == 0, since
5271 they are really invariants. Also skip those that are
5272 replaceable, since we know their lifetime doesn't contain
5273 any biv update. */
5274 else if (giv->mult_val == const0_rtx || giv->replaceable)
5275 continue;
5276
5277 /* The only way we can allow this giv to derive another
5278 is if this is a biv increment and we can form the product
5279 of biv->add_val and giv->mult_val. In this case, we will
5280 be able to compute a compensation. */
5281 else if (biv->insn == p)
5282 {
5283 tem = 0;
5284
5285 if (biv->mult_val == const1_rtx)
5286 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5287 biv->add_val,
5288 giv->mult_val),
5289 &dummy);
5290
5291 if (tem && giv->derive_adjustment)
5292 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5293 giv->derive_adjustment),
5294 &dummy);
5295 if (tem)
5296 giv->derive_adjustment = tem;
5297 else
5298 giv->cant_derive = 1;
5299 }
5300 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5301 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5302 giv->cant_derive = 1;
5303 }
5304 }
5305 }
5306 \f
5307 /* Check whether an insn is an increment legitimate for a basic induction var.
5308 X is the source of insn P, or a part of it.
5309 MODE is the mode in which X should be interpreted.
5310
5311 DEST_REG is the putative biv, also the destination of the insn.
5312 We accept patterns of these forms:
5313 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5314 REG = INVARIANT + REG
5315
5316 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5317 and store the additive term into *INC_VAL.
5318
5319 If X is an assignment of an invariant into DEST_REG, we set
5320 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5321
5322 We also want to detect a BIV when it corresponds to a variable
5323 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5324 of the variable may be a PLUS that adds a SUBREG of that variable to
5325 an invariant and then sign- or zero-extends the result of the PLUS
5326 into the variable.
5327
5328 Most GIVs in such cases will be in the promoted mode, since that is the
5329 probably the natural computation mode (and almost certainly the mode
5330 used for addresses) on the machine. So we view the pseudo-reg containing
5331 the variable as the BIV, as if it were simply incremented.
5332
5333 Note that treating the entire pseudo as a BIV will result in making
5334 simple increments to any GIVs based on it. However, if the variable
5335 overflows in its declared mode but not its promoted mode, the result will
5336 be incorrect. This is acceptable if the variable is signed, since
5337 overflows in such cases are undefined, but not if it is unsigned, since
5338 those overflows are defined. So we only check for SIGN_EXTEND and
5339 not ZERO_EXTEND.
5340
5341 If we cannot find a biv, we return 0. */
5342
5343 static int
5344 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5345 register rtx x;
5346 enum machine_mode mode;
5347 rtx p;
5348 rtx dest_reg;
5349 rtx *inc_val;
5350 rtx *mult_val;
5351 {
5352 register enum rtx_code code;
5353 rtx arg;
5354 rtx insn, set = 0;
5355
5356 code = GET_CODE (x);
5357 switch (code)
5358 {
5359 case PLUS:
5360 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5361 || (GET_CODE (XEXP (x, 0)) == SUBREG
5362 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5363 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5364 arg = XEXP (x, 1);
5365 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5366 || (GET_CODE (XEXP (x, 1)) == SUBREG
5367 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5368 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5369 arg = XEXP (x, 0);
5370 else
5371 return 0;
5372
5373 if (invariant_p (arg) != 1)
5374 return 0;
5375
5376 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5377 *mult_val = const1_rtx;
5378 return 1;
5379
5380 case SUBREG:
5381 /* If this is a SUBREG for a promoted variable, check the inner
5382 value. */
5383 if (SUBREG_PROMOTED_VAR_P (x))
5384 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5385 dest_reg, p, inc_val, mult_val);
5386 return 0;
5387
5388 case REG:
5389 /* If this register is assigned in a previous insn, look at its
5390 source, but don't go outside the loop or past a label. */
5391
5392 insn = p;
5393 while (1)
5394 {
5395 do {
5396 insn = PREV_INSN (insn);
5397 } while (insn && GET_CODE (insn) == NOTE
5398 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5399
5400 if (!insn)
5401 break;
5402 set = single_set (insn);
5403 if (set == 0)
5404 break;
5405
5406 if ((SET_DEST (set) == x
5407 || (GET_CODE (SET_DEST (set)) == SUBREG
5408 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5409 <= UNITS_PER_WORD)
5410 && SUBREG_REG (SET_DEST (set)) == x))
5411 && basic_induction_var (SET_SRC (set),
5412 (GET_MODE (SET_SRC (set)) == VOIDmode
5413 ? GET_MODE (x)
5414 : GET_MODE (SET_SRC (set))),
5415 dest_reg, insn,
5416 inc_val, mult_val))
5417 return 1;
5418 }
5419 /* ... fall through ... */
5420
5421 /* Can accept constant setting of biv only when inside inner most loop.
5422 Otherwise, a biv of an inner loop may be incorrectly recognized
5423 as a biv of the outer loop,
5424 causing code to be moved INTO the inner loop. */
5425 case MEM:
5426 if (invariant_p (x) != 1)
5427 return 0;
5428 case CONST_INT:
5429 case SYMBOL_REF:
5430 case CONST:
5431 /* convert_modes aborts if we try to convert to or from CCmode, so just
5432 exclude that case. It is very unlikely that a condition code value
5433 would be a useful iterator anyways. */
5434 if (loops_enclosed == 1
5435 && GET_MODE_CLASS (mode) != MODE_CC
5436 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5437 {
5438 /* Possible bug here? Perhaps we don't know the mode of X. */
5439 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5440 *mult_val = const0_rtx;
5441 return 1;
5442 }
5443 else
5444 return 0;
5445
5446 case SIGN_EXTEND:
5447 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5448 dest_reg, p, inc_val, mult_val);
5449
5450 case ASHIFTRT:
5451 /* Similar, since this can be a sign extension. */
5452 for (insn = PREV_INSN (p);
5453 (insn && GET_CODE (insn) == NOTE
5454 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5455 insn = PREV_INSN (insn))
5456 ;
5457
5458 if (insn)
5459 set = single_set (insn);
5460
5461 if (set && SET_DEST (set) == XEXP (x, 0)
5462 && GET_CODE (XEXP (x, 1)) == CONST_INT
5463 && INTVAL (XEXP (x, 1)) >= 0
5464 && GET_CODE (SET_SRC (set)) == ASHIFT
5465 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5466 return basic_induction_var (XEXP (SET_SRC (set), 0),
5467 GET_MODE (XEXP (x, 0)),
5468 dest_reg, insn, inc_val, mult_val);
5469 return 0;
5470
5471 default:
5472 return 0;
5473 }
5474 }
5475 \f
5476 /* A general induction variable (giv) is any quantity that is a linear
5477 function of a basic induction variable,
5478 i.e. giv = biv * mult_val + add_val.
5479 The coefficients can be any loop invariant quantity.
5480 A giv need not be computed directly from the biv;
5481 it can be computed by way of other givs. */
5482
5483 /* Determine whether X computes a giv.
5484 If it does, return a nonzero value
5485 which is the benefit from eliminating the computation of X;
5486 set *SRC_REG to the register of the biv that it is computed from;
5487 set *ADD_VAL and *MULT_VAL to the coefficients,
5488 such that the value of X is biv * mult + add; */
5489
5490 static int
5491 general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
5492 rtx x;
5493 rtx *src_reg;
5494 rtx *add_val;
5495 rtx *mult_val;
5496 int is_addr;
5497 int *pbenefit;
5498 {
5499 rtx orig_x = x;
5500 char *storage;
5501
5502 /* If this is an invariant, forget it, it isn't a giv. */
5503 if (invariant_p (x) == 1)
5504 return 0;
5505
5506 /* See if the expression could be a giv and get its form.
5507 Mark our place on the obstack in case we don't find a giv. */
5508 storage = (char *) oballoc (0);
5509 *pbenefit = 0;
5510 x = simplify_giv_expr (x, pbenefit);
5511 if (x == 0)
5512 {
5513 obfree (storage);
5514 return 0;
5515 }
5516
5517 switch (GET_CODE (x))
5518 {
5519 case USE:
5520 case CONST_INT:
5521 /* Since this is now an invariant and wasn't before, it must be a giv
5522 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5523 with. */
5524 *src_reg = loop_iv_list->biv->dest_reg;
5525 *mult_val = const0_rtx;
5526 *add_val = x;
5527 break;
5528
5529 case REG:
5530 /* This is equivalent to a BIV. */
5531 *src_reg = x;
5532 *mult_val = const1_rtx;
5533 *add_val = const0_rtx;
5534 break;
5535
5536 case PLUS:
5537 /* Either (plus (biv) (invar)) or
5538 (plus (mult (biv) (invar_1)) (invar_2)). */
5539 if (GET_CODE (XEXP (x, 0)) == MULT)
5540 {
5541 *src_reg = XEXP (XEXP (x, 0), 0);
5542 *mult_val = XEXP (XEXP (x, 0), 1);
5543 }
5544 else
5545 {
5546 *src_reg = XEXP (x, 0);
5547 *mult_val = const1_rtx;
5548 }
5549 *add_val = XEXP (x, 1);
5550 break;
5551
5552 case MULT:
5553 /* ADD_VAL is zero. */
5554 *src_reg = XEXP (x, 0);
5555 *mult_val = XEXP (x, 1);
5556 *add_val = const0_rtx;
5557 break;
5558
5559 default:
5560 abort ();
5561 }
5562
5563 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5564 unless they are CONST_INT). */
5565 if (GET_CODE (*add_val) == USE)
5566 *add_val = XEXP (*add_val, 0);
5567 if (GET_CODE (*mult_val) == USE)
5568 *mult_val = XEXP (*mult_val, 0);
5569
5570 if (is_addr)
5571 {
5572 #ifdef ADDRESS_COST
5573 *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost;
5574 #else
5575 *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost;
5576 #endif
5577 }
5578 else
5579 *pbenefit += rtx_cost (orig_x, SET);
5580
5581 /* Always return true if this is a giv so it will be detected as such,
5582 even if the benefit is zero or negative. This allows elimination
5583 of bivs that might otherwise not be eliminated. */
5584 return 1;
5585 }
5586 \f
5587 /* Given an expression, X, try to form it as a linear function of a biv.
5588 We will canonicalize it to be of the form
5589 (plus (mult (BIV) (invar_1))
5590 (invar_2))
5591 with possible degeneracies.
5592
5593 The invariant expressions must each be of a form that can be used as a
5594 machine operand. We surround then with a USE rtx (a hack, but localized
5595 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5596 routine; it is the caller's responsibility to strip them.
5597
5598 If no such canonicalization is possible (i.e., two biv's are used or an
5599 expression that is neither invariant nor a biv or giv), this routine
5600 returns 0.
5601
5602 For a non-zero return, the result will have a code of CONST_INT, USE,
5603 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5604
5605 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5606
5607 static rtx sge_plus PROTO ((enum machine_mode, rtx, rtx));
5608 static rtx sge_plus_constant PROTO ((rtx, rtx));
5609
5610 static rtx
5611 simplify_giv_expr (x, benefit)
5612 rtx x;
5613 int *benefit;
5614 {
5615 enum machine_mode mode = GET_MODE (x);
5616 rtx arg0, arg1;
5617 rtx tem;
5618
5619 /* If this is not an integer mode, or if we cannot do arithmetic in this
5620 mode, this can't be a giv. */
5621 if (mode != VOIDmode
5622 && (GET_MODE_CLASS (mode) != MODE_INT
5623 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5624 return NULL_RTX;
5625
5626 switch (GET_CODE (x))
5627 {
5628 case PLUS:
5629 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5630 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5631 if (arg0 == 0 || arg1 == 0)
5632 return NULL_RTX;
5633
5634 /* Put constant last, CONST_INT last if both constant. */
5635 if ((GET_CODE (arg0) == USE
5636 || GET_CODE (arg0) == CONST_INT)
5637 && ! ((GET_CODE (arg0) == USE
5638 && GET_CODE (arg1) == USE)
5639 || GET_CODE (arg1) == CONST_INT))
5640 tem = arg0, arg0 = arg1, arg1 = tem;
5641
5642 /* Handle addition of zero, then addition of an invariant. */
5643 if (arg1 == const0_rtx)
5644 return arg0;
5645 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5646 switch (GET_CODE (arg0))
5647 {
5648 case CONST_INT:
5649 case USE:
5650 /* Adding two invariants must result in an invariant, so enclose
5651 addition operation inside a USE and return it. */
5652 if (GET_CODE (arg0) == USE)
5653 arg0 = XEXP (arg0, 0);
5654 if (GET_CODE (arg1) == USE)
5655 arg1 = XEXP (arg1, 0);
5656
5657 if (GET_CODE (arg0) == CONST_INT)
5658 tem = arg0, arg0 = arg1, arg1 = tem;
5659 if (GET_CODE (arg1) == CONST_INT)
5660 tem = sge_plus_constant (arg0, arg1);
5661 else
5662 tem = sge_plus (mode, arg0, arg1);
5663
5664 if (GET_CODE (tem) != CONST_INT)
5665 tem = gen_rtx_USE (mode, tem);
5666 return tem;
5667
5668 case REG:
5669 case MULT:
5670 /* biv + invar or mult + invar. Return sum. */
5671 return gen_rtx_PLUS (mode, arg0, arg1);
5672
5673 case PLUS:
5674 /* (a + invar_1) + invar_2. Associate. */
5675 return simplify_giv_expr (
5676 gen_rtx_PLUS (mode, XEXP (arg0, 0),
5677 gen_rtx_PLUS (mode, XEXP (arg0, 1), arg1)),
5678 benefit);
5679
5680 default:
5681 abort ();
5682 }
5683
5684 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5685 MULT to reduce cases. */
5686 if (GET_CODE (arg0) == REG)
5687 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5688 if (GET_CODE (arg1) == REG)
5689 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5690
5691 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5692 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5693 Recurse to associate the second PLUS. */
5694 if (GET_CODE (arg1) == MULT)
5695 tem = arg0, arg0 = arg1, arg1 = tem;
5696
5697 if (GET_CODE (arg1) == PLUS)
5698 return simplify_giv_expr (gen_rtx_PLUS (mode,
5699 gen_rtx_PLUS (mode, arg0,
5700 XEXP (arg1, 0)),
5701 XEXP (arg1, 1)),
5702 benefit);
5703
5704 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5705 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5706 return NULL_RTX;
5707
5708 if (!rtx_equal_p (arg0, arg1))
5709 return NULL_RTX;
5710
5711 return simplify_giv_expr (gen_rtx_MULT (mode,
5712 XEXP (arg0, 0),
5713 gen_rtx_PLUS (mode,
5714 XEXP (arg0, 1),
5715 XEXP (arg1, 1))),
5716 benefit);
5717
5718 case MINUS:
5719 /* Handle "a - b" as "a + b * (-1)". */
5720 return simplify_giv_expr (gen_rtx_PLUS (mode,
5721 XEXP (x, 0),
5722 gen_rtx_MULT (mode, XEXP (x, 1),
5723 constm1_rtx)),
5724 benefit);
5725
5726 case MULT:
5727 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5728 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5729 if (arg0 == 0 || arg1 == 0)
5730 return NULL_RTX;
5731
5732 /* Put constant last, CONST_INT last if both constant. */
5733 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5734 && GET_CODE (arg1) != CONST_INT)
5735 tem = arg0, arg0 = arg1, arg1 = tem;
5736
5737 /* If second argument is not now constant, not giv. */
5738 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5739 return NULL_RTX;
5740
5741 /* Handle multiply by 0 or 1. */
5742 if (arg1 == const0_rtx)
5743 return const0_rtx;
5744
5745 else if (arg1 == const1_rtx)
5746 return arg0;
5747
5748 switch (GET_CODE (arg0))
5749 {
5750 case REG:
5751 /* biv * invar. Done. */
5752 return gen_rtx_MULT (mode, arg0, arg1);
5753
5754 case CONST_INT:
5755 /* Product of two constants. */
5756 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5757
5758 case USE:
5759 /* invar * invar. It is a giv, but very few of these will
5760 actually pay off, so limit to simple registers. */
5761 if (GET_CODE (arg1) != CONST_INT)
5762 return NULL_RTX;
5763
5764 arg0 = XEXP (arg0, 0);
5765 if (GET_CODE (arg0) == REG)
5766 tem = gen_rtx_MULT (mode, arg0, arg1);
5767 else if (GET_CODE (arg0) == MULT
5768 && GET_CODE (XEXP (arg0, 0)) == REG
5769 && GET_CODE (XEXP (arg0, 1)) == CONST_INT)
5770 {
5771 tem = gen_rtx_MULT (mode, XEXP (arg0, 0),
5772 GEN_INT (INTVAL (XEXP (arg0, 1))
5773 * INTVAL (arg1)));
5774 }
5775 else
5776 return NULL_RTX;
5777 return gen_rtx_USE (mode, tem);
5778
5779 case MULT:
5780 /* (a * invar_1) * invar_2. Associate. */
5781 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5782 gen_rtx_MULT (mode,
5783 XEXP (arg0, 1),
5784 arg1)),
5785 benefit);
5786
5787 case PLUS:
5788 /* (a + invar_1) * invar_2. Distribute. */
5789 return simplify_giv_expr (gen_rtx_PLUS (mode,
5790 gen_rtx_MULT (mode,
5791 XEXP (arg0, 0),
5792 arg1),
5793 gen_rtx_MULT (mode,
5794 XEXP (arg0, 1),
5795 arg1)),
5796 benefit);
5797
5798 default:
5799 abort ();
5800 }
5801
5802 case ASHIFT:
5803 /* Shift by constant is multiply by power of two. */
5804 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5805 return 0;
5806
5807 return simplify_giv_expr (gen_rtx_MULT (mode,
5808 XEXP (x, 0),
5809 GEN_INT ((HOST_WIDE_INT) 1
5810 << INTVAL (XEXP (x, 1)))),
5811 benefit);
5812
5813 case NEG:
5814 /* "-a" is "a * (-1)" */
5815 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5816 benefit);
5817
5818 case NOT:
5819 /* "~a" is "-a - 1". Silly, but easy. */
5820 return simplify_giv_expr (gen_rtx_MINUS (mode,
5821 gen_rtx_NEG (mode, XEXP (x, 0)),
5822 const1_rtx),
5823 benefit);
5824
5825 case USE:
5826 /* Already in proper form for invariant. */
5827 return x;
5828
5829 case REG:
5830 /* If this is a new register, we can't deal with it. */
5831 if (REGNO (x) >= max_reg_before_loop)
5832 return 0;
5833
5834 /* Check for biv or giv. */
5835 switch (reg_iv_type[REGNO (x)])
5836 {
5837 case BASIC_INDUCT:
5838 return x;
5839 case GENERAL_INDUCT:
5840 {
5841 struct induction *v = reg_iv_info[REGNO (x)];
5842
5843 /* Form expression from giv and add benefit. Ensure this giv
5844 can derive another and subtract any needed adjustment if so. */
5845 *benefit += v->benefit;
5846 if (v->cant_derive)
5847 return 0;
5848
5849 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5850 v->mult_val),
5851 v->add_val);
5852 if (v->derive_adjustment)
5853 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5854 return simplify_giv_expr (tem, benefit);
5855 }
5856
5857 default:
5858 /* If it isn't an induction variable, and it is invariant, we
5859 may be able to simplify things further by looking through
5860 the bits we just moved outside the loop. */
5861 if (invariant_p (x) == 1)
5862 {
5863 struct movable *m;
5864
5865 for (m = the_movables; m ; m = m->next)
5866 if (rtx_equal_p (x, m->set_dest))
5867 {
5868 /* Ok, we found a match. Substitute and simplify. */
5869
5870 /* If we match another movable, we must use that, as
5871 this one is going away. */
5872 if (m->match)
5873 return simplify_giv_expr (m->match->set_dest, benefit);
5874
5875 /* If consec is non-zero, this is a member of a group of
5876 instructions that were moved together. We handle this
5877 case only to the point of seeking to the last insn and
5878 looking for a REG_EQUAL. Fail if we don't find one. */
5879 if (m->consec != 0)
5880 {
5881 int i = m->consec;
5882 tem = m->insn;
5883 do { tem = NEXT_INSN (tem); } while (--i > 0);
5884
5885 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
5886 if (tem)
5887 tem = XEXP (tem, 0);
5888 }
5889 else
5890 {
5891 tem = single_set (m->insn);
5892 if (tem)
5893 tem = SET_SRC (tem);
5894 }
5895
5896 if (tem)
5897 {
5898 /* What we are most interested in is pointer
5899 arithmetic on invariants -- only take
5900 patterns we may be able to do something with. */
5901 if (GET_CODE (tem) == PLUS
5902 || GET_CODE (tem) == MULT
5903 || GET_CODE (tem) == ASHIFT
5904 || GET_CODE (tem) == CONST_INT
5905 || GET_CODE (tem) == SYMBOL_REF)
5906 {
5907 tem = simplify_giv_expr (tem, benefit);
5908 if (tem)
5909 return tem;
5910 }
5911 else if (GET_CODE (tem) == CONST
5912 && GET_CODE (XEXP (tem, 0)) == PLUS
5913 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
5914 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
5915 {
5916 tem = simplify_giv_expr (XEXP (tem, 0), benefit);
5917 if (tem)
5918 return tem;
5919 }
5920 }
5921 break;
5922 }
5923 }
5924 break;
5925 }
5926
5927 /* Fall through to general case. */
5928 default:
5929 /* If invariant, return as USE (unless CONST_INT).
5930 Otherwise, not giv. */
5931 if (GET_CODE (x) == USE)
5932 x = XEXP (x, 0);
5933
5934 if (invariant_p (x) == 1)
5935 {
5936 if (GET_CODE (x) == CONST_INT)
5937 return x;
5938 if (GET_CODE (x) == CONST
5939 && GET_CODE (XEXP (x, 0)) == PLUS
5940 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5941 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
5942 x = XEXP (x, 0);
5943 return gen_rtx_USE (mode, x);
5944 }
5945 else
5946 return 0;
5947 }
5948 }
5949
5950 /* This routine folds invariants such that there is only ever one
5951 CONST_INT in the summation. It is only used by simplify_giv_expr. */
5952
5953 static rtx
5954 sge_plus_constant (x, c)
5955 rtx x, c;
5956 {
5957 if (GET_CODE (x) == CONST_INT)
5958 return GEN_INT (INTVAL (x) + INTVAL (c));
5959 else if (GET_CODE (x) != PLUS)
5960 return gen_rtx_PLUS (GET_MODE (x), x, c);
5961 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5962 {
5963 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
5964 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
5965 }
5966 else if (GET_CODE (XEXP (x, 0)) == PLUS
5967 || GET_CODE (XEXP (x, 1)) != PLUS)
5968 {
5969 return gen_rtx_PLUS (GET_MODE (x),
5970 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
5971 }
5972 else
5973 {
5974 return gen_rtx_PLUS (GET_MODE (x),
5975 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
5976 }
5977 }
5978
5979 static rtx
5980 sge_plus (mode, x, y)
5981 enum machine_mode mode;
5982 rtx x, y;
5983 {
5984 while (GET_CODE (y) == PLUS)
5985 {
5986 rtx a = XEXP (y, 0);
5987 if (GET_CODE (a) == CONST_INT)
5988 x = sge_plus_constant (x, a);
5989 else
5990 x = gen_rtx_PLUS (mode, x, a);
5991 y = XEXP (y, 1);
5992 }
5993 if (GET_CODE (y) == CONST_INT)
5994 x = sge_plus_constant (x, y);
5995 else
5996 x = gen_rtx_PLUS (mode, x, y);
5997 return x;
5998 }
5999 \f
6000 /* Help detect a giv that is calculated by several consecutive insns;
6001 for example,
6002 giv = biv * M
6003 giv = giv + A
6004 The caller has already identified the first insn P as having a giv as dest;
6005 we check that all other insns that set the same register follow
6006 immediately after P, that they alter nothing else,
6007 and that the result of the last is still a giv.
6008
6009 The value is 0 if the reg set in P is not really a giv.
6010 Otherwise, the value is the amount gained by eliminating
6011 all the consecutive insns that compute the value.
6012
6013 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6014 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6015
6016 The coefficients of the ultimate giv value are stored in
6017 *MULT_VAL and *ADD_VAL. */
6018
6019 static int
6020 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
6021 add_val, mult_val)
6022 int first_benefit;
6023 rtx p;
6024 rtx src_reg;
6025 rtx dest_reg;
6026 rtx *add_val;
6027 rtx *mult_val;
6028 {
6029 int count;
6030 enum rtx_code code;
6031 int benefit;
6032 rtx temp;
6033 rtx set;
6034
6035 /* Indicate that this is a giv so that we can update the value produced in
6036 each insn of the multi-insn sequence.
6037
6038 This induction structure will be used only by the call to
6039 general_induction_var below, so we can allocate it on our stack.
6040 If this is a giv, our caller will replace the induct var entry with
6041 a new induction structure. */
6042 struct induction *v
6043 = (struct induction *) alloca (sizeof (struct induction));
6044 v->src_reg = src_reg;
6045 v->mult_val = *mult_val;
6046 v->add_val = *add_val;
6047 v->benefit = first_benefit;
6048 v->cant_derive = 0;
6049 v->derive_adjustment = 0;
6050
6051 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
6052 reg_iv_info[REGNO (dest_reg)] = v;
6053
6054 count = n_times_set[REGNO (dest_reg)] - 1;
6055
6056 while (count > 0)
6057 {
6058 p = NEXT_INSN (p);
6059 code = GET_CODE (p);
6060
6061 /* If libcall, skip to end of call sequence. */
6062 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6063 p = XEXP (temp, 0);
6064
6065 if (code == INSN
6066 && (set = single_set (p))
6067 && GET_CODE (SET_DEST (set)) == REG
6068 && SET_DEST (set) == dest_reg
6069 && (general_induction_var (SET_SRC (set), &src_reg,
6070 add_val, mult_val, 0, &benefit)
6071 /* Giv created by equivalent expression. */
6072 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6073 && general_induction_var (XEXP (temp, 0), &src_reg,
6074 add_val, mult_val, 0, &benefit)))
6075 && src_reg == v->src_reg)
6076 {
6077 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6078 benefit += libcall_benefit (p);
6079
6080 count--;
6081 v->mult_val = *mult_val;
6082 v->add_val = *add_val;
6083 v->benefit = benefit;
6084 }
6085 else if (code != NOTE)
6086 {
6087 /* Allow insns that set something other than this giv to a
6088 constant. Such insns are needed on machines which cannot
6089 include long constants and should not disqualify a giv. */
6090 if (code == INSN
6091 && (set = single_set (p))
6092 && SET_DEST (set) != dest_reg
6093 && CONSTANT_P (SET_SRC (set)))
6094 continue;
6095
6096 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
6097 return 0;
6098 }
6099 }
6100
6101 return v->benefit;
6102 }
6103 \f
6104 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6105 represented by G1. If no such expression can be found, or it is clear that
6106 it cannot possibly be a valid address, 0 is returned.
6107
6108 To perform the computation, we note that
6109 G1 = x * v + a and
6110 G2 = y * v + b
6111 where `v' is the biv.
6112
6113 So G2 = (y/b) * G1 + (b - a*y/x).
6114
6115 Note that MULT = y/x.
6116
6117 Update: A and B are now allowed to be additive expressions such that
6118 B contains all variables in A. That is, computing B-A will not require
6119 subtracting variables. */
6120
6121 static rtx
6122 express_from_1 (a, b, mult)
6123 rtx a, b, mult;
6124 {
6125 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6126
6127 if (mult == const0_rtx)
6128 return b;
6129
6130 /* If MULT is not 1, we cannot handle A with non-constants, since we
6131 would then be required to subtract multiples of the registers in A.
6132 This is theoretically possible, and may even apply to some Fortran
6133 constructs, but it is a lot of work and we do not attempt it here. */
6134
6135 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6136 return NULL_RTX;
6137
6138 /* In general these structures are sorted top to bottom (down the PLUS
6139 chain), but not left to right across the PLUS. If B is a higher
6140 order giv than A, we can strip one level and recurse. If A is higher
6141 order, we'll eventually bail out, but won't know that until the end.
6142 If they are the same, we'll strip one level around this loop. */
6143
6144 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6145 {
6146 rtx ra, rb, oa, ob, tmp;
6147
6148 ra = XEXP (a, 0), oa = XEXP (a, 1);
6149 if (GET_CODE (ra) == PLUS)
6150 tmp = ra, ra = oa, oa = tmp;
6151
6152 rb = XEXP (b, 0), ob = XEXP (b, 1);
6153 if (GET_CODE (rb) == PLUS)
6154 tmp = rb, rb = ob, ob = tmp;
6155
6156 if (rtx_equal_p (ra, rb))
6157 /* We matched: remove one reg completely. */
6158 a = oa, b = ob;
6159 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6160 /* An alternate match. */
6161 a = oa, b = rb;
6162 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6163 /* An alternate match. */
6164 a = ra, b = ob;
6165 else
6166 {
6167 /* Indicates an extra register in B. Strip one level from B and
6168 recurse, hoping B was the higher order expression. */
6169 ob = express_from_1 (a, ob, mult);
6170 if (ob == NULL_RTX)
6171 return NULL_RTX;
6172 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6173 }
6174 }
6175
6176 /* Here we are at the last level of A, go through the cases hoping to
6177 get rid of everything but a constant. */
6178
6179 if (GET_CODE (a) == PLUS)
6180 {
6181 rtx ra, oa;
6182
6183 ra = XEXP (a, 0), oa = XEXP (a, 1);
6184 if (rtx_equal_p (oa, b))
6185 oa = ra;
6186 else if (!rtx_equal_p (ra, b))
6187 return NULL_RTX;
6188
6189 if (GET_CODE (oa) != CONST_INT)
6190 return NULL_RTX;
6191
6192 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6193 }
6194 else if (GET_CODE (a) == CONST_INT)
6195 {
6196 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6197 }
6198 else if (GET_CODE (b) == PLUS)
6199 {
6200 if (rtx_equal_p (a, XEXP (b, 0)))
6201 return XEXP (b, 1);
6202 else if (rtx_equal_p (a, XEXP (b, 1)))
6203 return XEXP (b, 0);
6204 else
6205 return NULL_RTX;
6206 }
6207 else if (rtx_equal_p (a, b))
6208 return const0_rtx;
6209
6210 return NULL_RTX;
6211 }
6212
6213 static rtx
6214 express_from (g1, g2)
6215 struct induction *g1, *g2;
6216 {
6217 rtx mult, add;
6218
6219 /* The value that G1 will be multiplied by must be a constant integer. Also,
6220 the only chance we have of getting a valid address is if b*c/a (see above
6221 for notation) is also an integer. */
6222 if (GET_CODE (g1->mult_val) == CONST_INT
6223 && GET_CODE (g2->mult_val) == CONST_INT)
6224 {
6225 if (g1->mult_val == const0_rtx
6226 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6227 return NULL_RTX;
6228 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6229 }
6230 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6231 mult = const1_rtx;
6232 else
6233 {
6234 /* ??? Find out if the one is a multiple of the other? */
6235 return NULL_RTX;
6236 }
6237
6238 add = express_from_1 (g1->add_val, g2->add_val, mult);
6239 if (add == NULL_RTX)
6240 return NULL_RTX;
6241
6242 /* Form simplified final result. */
6243 if (mult == const0_rtx)
6244 return add;
6245 else if (mult == const1_rtx)
6246 mult = g1->dest_reg;
6247 else
6248 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6249
6250 if (add == const0_rtx)
6251 return mult;
6252 else
6253 return gen_rtx_PLUS (g2->mode, mult, add);
6254 }
6255 \f
6256 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
6257 (either directly or via an address expression) a register used to represent
6258 G1. Set g2->new_reg to a represtation of G1 (normally just
6259 g1->dest_reg). */
6260
6261 static rtx
6262 combine_givs_p (g1, g2)
6263 struct induction *g1, *g2;
6264 {
6265 rtx tem = express_from (g1, g2);
6266
6267 /* If these givs are identical, they can be combined. We use the results
6268 of express_from because the addends are not in a canonical form, so
6269 rtx_equal_p is a weaker test. */
6270 if (tem == const0_rtx)
6271 {
6272 return g1->dest_reg;
6273 }
6274
6275 /* If G2 can be expressed as a function of G1 and that function is valid
6276 as an address and no more expensive than using a register for G2,
6277 the expression of G2 in terms of G1 can be used. */
6278 if (tem != NULL_RTX
6279 && g2->giv_type == DEST_ADDR
6280 && memory_address_p (g2->mem_mode, tem)
6281 /* ??? Looses, especially with -fforce-addr, where *g2->location
6282 will always be a register, and so anything more complicated
6283 gets discarded. */
6284 #if 0
6285 #ifdef ADDRESS_COST
6286 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6287 #else
6288 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6289 #endif
6290 #endif
6291 )
6292 {
6293 return tem;
6294 }
6295
6296 return NULL_RTX;
6297 }
6298 \f
6299 struct combine_givs_stats
6300 {
6301 int giv_number;
6302 int total_benefit;
6303 };
6304
6305 static int
6306 cmp_combine_givs_stats (x, y)
6307 struct combine_givs_stats *x, *y;
6308 {
6309 int d;
6310 d = y->total_benefit - x->total_benefit;
6311 /* Stabilize the sort. */
6312 if (!d)
6313 d = x->giv_number - y->giv_number;
6314 return d;
6315 }
6316
6317 /* If one of these givs is a DEST_REG that was only used once, by the
6318 other giv, this is actually a single use. Return 0 if this is not
6319 the case, -1 if g1 is the DEST_REG involved, and 1 if it was g2. */
6320
6321 static int
6322 combine_givs_used_once (g1, g2)
6323 struct induction *g1, *g2;
6324 {
6325 if (g1->giv_type == DEST_REG
6326 && n_times_used[REGNO (g1->dest_reg)] == 1
6327 && reg_mentioned_p (g1->dest_reg, PATTERN (g2->insn)))
6328 return -1;
6329
6330 if (g2->giv_type == DEST_REG
6331 && n_times_used[REGNO (g2->dest_reg)] == 1
6332 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
6333 return 1;
6334
6335 return 0;
6336 }
6337
6338 static int
6339 combine_givs_benefit_from (g1, g2)
6340 struct induction *g1, *g2;
6341 {
6342 int tmp = combine_givs_used_once (g1, g2);
6343 if (tmp < 0)
6344 return 0;
6345 else if (tmp > 0)
6346 return g2->benefit - g1->benefit;
6347 else
6348 return g2->benefit;
6349 }
6350
6351 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6352 any other. If so, point SAME to the giv combined with and set NEW_REG to
6353 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6354 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6355
6356 static void
6357 combine_givs (bl)
6358 struct iv_class *bl;
6359 {
6360 struct induction *g1, *g2, **giv_array;
6361 int i, j, k, giv_count;
6362 struct combine_givs_stats *stats;
6363 rtx *can_combine;
6364
6365 /* Count givs, because bl->giv_count is incorrect here. */
6366 giv_count = 0;
6367 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6368 if (!g1->ignore)
6369 giv_count++;
6370
6371 giv_array
6372 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6373 i = 0;
6374 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6375 if (!g1->ignore)
6376 giv_array[i++] = g1;
6377
6378 stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats));
6379 bzero ((char *) stats, giv_count * sizeof (*stats));
6380
6381 can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx));
6382 bzero ((char *) can_combine, giv_count * giv_count * sizeof(rtx));
6383
6384 for (i = 0; i < giv_count; i++)
6385 {
6386 int this_benefit;
6387
6388 g1 = giv_array[i];
6389
6390 this_benefit = g1->benefit;
6391 /* Add an additional weight for zero addends. */
6392 if (g1->no_const_addval)
6393 this_benefit += 1;
6394 for (j = 0; j < giv_count; j++)
6395 {
6396 rtx this_combine;
6397
6398 g2 = giv_array[j];
6399 if (g1 != g2
6400 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6401 {
6402 can_combine[i*giv_count + j] = this_combine;
6403 this_benefit += combine_givs_benefit_from (g1, g2);
6404 /* Add an additional weight for being reused more times. */
6405 this_benefit += 3;
6406 }
6407 }
6408 stats[i].giv_number = i;
6409 stats[i].total_benefit = this_benefit;
6410 }
6411
6412 /* Iterate, combining until we can't. */
6413 restart:
6414 qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats);
6415
6416 if (loop_dump_stream)
6417 {
6418 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6419 for (k = 0; k < giv_count; k++)
6420 {
6421 g1 = giv_array[stats[k].giv_number];
6422 if (!g1->combined_with && !g1->same)
6423 fprintf (loop_dump_stream, " {%d, %d}",
6424 INSN_UID (giv_array[stats[k].giv_number]->insn),
6425 stats[k].total_benefit);
6426 }
6427 putc ('\n', loop_dump_stream);
6428 }
6429
6430 for (k = 0; k < giv_count; k++)
6431 {
6432 int g1_add_benefit = 0;
6433
6434 i = stats[k].giv_number;
6435 g1 = giv_array[i];
6436
6437 /* If it has already been combined, skip. */
6438 if (g1->combined_with || g1->same)
6439 continue;
6440
6441 for (j = 0; j < giv_count; j++)
6442 {
6443 g2 = giv_array[j];
6444 if (g1 != g2 && can_combine[i*giv_count + j]
6445 /* If it has already been combined, skip. */
6446 && ! g2->same && ! g2->combined_with)
6447 {
6448 int l;
6449
6450 g2->new_reg = can_combine[i*giv_count + j];
6451 g2->same = g1;
6452 g1->combined_with = 1;
6453 if (!combine_givs_used_once (g1, g2))
6454 g1->times_used += 1;
6455 g1->lifetime += g2->lifetime;
6456
6457 g1_add_benefit += combine_givs_benefit_from (g1, g2);
6458
6459 /* ??? The new final_[bg]iv_value code does a much better job
6460 of finding replaceable giv's, and hence this code may no
6461 longer be necessary. */
6462 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6463 g1_add_benefit -= copy_cost;
6464
6465 /* To help optimize the next set of combinations, remove
6466 this giv from the benefits of other potential mates. */
6467 for (l = 0; l < giv_count; ++l)
6468 {
6469 int m = stats[l].giv_number;
6470 if (can_combine[m*giv_count + j])
6471 {
6472 /* Remove additional weight for being reused. */
6473 stats[l].total_benefit -= 3 +
6474 combine_givs_benefit_from (giv_array[m], g2);
6475 }
6476 }
6477
6478 if (loop_dump_stream)
6479 fprintf (loop_dump_stream,
6480 "giv at %d combined with giv at %d\n",
6481 INSN_UID (g2->insn), INSN_UID (g1->insn));
6482 }
6483 }
6484
6485 /* To help optimize the next set of combinations, remove
6486 this giv from the benefits of other potential mates. */
6487 if (g1->combined_with)
6488 {
6489 for (j = 0; j < giv_count; ++j)
6490 {
6491 int m = stats[j].giv_number;
6492 if (can_combine[m*giv_count + j])
6493 {
6494 /* Remove additional weight for being reused. */
6495 stats[j].total_benefit -= 3 +
6496 combine_givs_benefit_from (giv_array[m], g1);
6497 }
6498 }
6499
6500 g1->benefit += g1_add_benefit;
6501
6502 /* We've finished with this giv, and everything it touched.
6503 Restart the combination so that proper weights for the
6504 rest of the givs are properly taken into account. */
6505 /* ??? Ideally we would compact the arrays at this point, so
6506 as to not cover old ground. But sanely compacting
6507 can_combine is tricky. */
6508 goto restart;
6509 }
6510 }
6511 }
6512 \f
6513 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
6514
6515 void
6516 emit_iv_add_mult (b, m, a, reg, insert_before)
6517 rtx b; /* initial value of basic induction variable */
6518 rtx m; /* multiplicative constant */
6519 rtx a; /* additive constant */
6520 rtx reg; /* destination register */
6521 rtx insert_before;
6522 {
6523 rtx seq;
6524 rtx result;
6525
6526 /* Prevent unexpected sharing of these rtx. */
6527 a = copy_rtx (a);
6528 b = copy_rtx (b);
6529
6530 /* Increase the lifetime of any invariants moved further in code. */
6531 update_reg_last_use (a, insert_before);
6532 update_reg_last_use (b, insert_before);
6533 update_reg_last_use (m, insert_before);
6534
6535 start_sequence ();
6536 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
6537 if (reg != result)
6538 emit_move_insn (reg, result);
6539 seq = gen_sequence ();
6540 end_sequence ();
6541
6542 emit_insn_before (seq, insert_before);
6543
6544 /* It is entirely possible that the expansion created lots of new
6545 registers. Iterate over the sequence we just created and
6546 record them all. */
6547
6548 if (GET_CODE (seq) == SEQUENCE)
6549 {
6550 int i;
6551 for (i = 0; i < XVECLEN (seq, 0); ++i)
6552 {
6553 rtx set = single_set (XVECEXP (seq, 0, i));
6554 if (set && GET_CODE (SET_DEST (set)) == REG)
6555 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6556 }
6557 }
6558 else if (GET_CODE (seq) == SET
6559 && GET_CODE (SET_DEST (seq)) == REG)
6560 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
6561 }
6562 \f
6563 /* Test whether A * B can be computed without
6564 an actual multiply insn. Value is 1 if so. */
6565
6566 static int
6567 product_cheap_p (a, b)
6568 rtx a;
6569 rtx b;
6570 {
6571 int i;
6572 rtx tmp;
6573 struct obstack *old_rtl_obstack = rtl_obstack;
6574 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
6575 int win = 1;
6576
6577 /* If only one is constant, make it B. */
6578 if (GET_CODE (a) == CONST_INT)
6579 tmp = a, a = b, b = tmp;
6580
6581 /* If first constant, both constant, so don't need multiply. */
6582 if (GET_CODE (a) == CONST_INT)
6583 return 1;
6584
6585 /* If second not constant, neither is constant, so would need multiply. */
6586 if (GET_CODE (b) != CONST_INT)
6587 return 0;
6588
6589 /* One operand is constant, so might not need multiply insn. Generate the
6590 code for the multiply and see if a call or multiply, or long sequence
6591 of insns is generated. */
6592
6593 rtl_obstack = &temp_obstack;
6594 start_sequence ();
6595 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
6596 tmp = gen_sequence ();
6597 end_sequence ();
6598
6599 if (GET_CODE (tmp) == SEQUENCE)
6600 {
6601 if (XVEC (tmp, 0) == 0)
6602 win = 1;
6603 else if (XVECLEN (tmp, 0) > 3)
6604 win = 0;
6605 else
6606 for (i = 0; i < XVECLEN (tmp, 0); i++)
6607 {
6608 rtx insn = XVECEXP (tmp, 0, i);
6609
6610 if (GET_CODE (insn) != INSN
6611 || (GET_CODE (PATTERN (insn)) == SET
6612 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6613 || (GET_CODE (PATTERN (insn)) == PARALLEL
6614 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6615 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6616 {
6617 win = 0;
6618 break;
6619 }
6620 }
6621 }
6622 else if (GET_CODE (tmp) == SET
6623 && GET_CODE (SET_SRC (tmp)) == MULT)
6624 win = 0;
6625 else if (GET_CODE (tmp) == PARALLEL
6626 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6627 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6628 win = 0;
6629
6630 /* Free any storage we obtained in generating this multiply and restore rtl
6631 allocation to its normal obstack. */
6632 obstack_free (&temp_obstack, storage);
6633 rtl_obstack = old_rtl_obstack;
6634
6635 return win;
6636 }
6637 \f
6638 /* Check to see if loop can be terminated by a "decrement and branch until
6639 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6640 Also try reversing an increment loop to a decrement loop
6641 to see if the optimization can be performed.
6642 Value is nonzero if optimization was performed. */
6643
6644 /* This is useful even if the architecture doesn't have such an insn,
6645 because it might change a loops which increments from 0 to n to a loop
6646 which decrements from n to 0. A loop that decrements to zero is usually
6647 faster than one that increments from zero. */
6648
6649 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6650 such as approx_final_value, biv_total_increment, loop_iterations, and
6651 final_[bg]iv_value. */
6652
6653 static int
6654 check_dbra_loop (loop_end, insn_count, loop_start)
6655 rtx loop_end;
6656 int insn_count;
6657 rtx loop_start;
6658 {
6659 struct iv_class *bl;
6660 rtx reg;
6661 rtx jump_label;
6662 rtx final_value;
6663 rtx start_value;
6664 rtx new_add_val;
6665 rtx comparison;
6666 rtx before_comparison;
6667 rtx p;
6668 rtx jump;
6669 rtx first_compare;
6670 int compare_and_branch;
6671
6672 /* If last insn is a conditional branch, and the insn before tests a
6673 register value, try to optimize it. Otherwise, we can't do anything. */
6674
6675 jump = PREV_INSN (loop_end);
6676 comparison = get_condition_for_loop (jump);
6677 if (comparison == 0)
6678 return 0;
6679
6680 /* Try to compute whether the compare/branch at the loop end is one or
6681 two instructions. */
6682 get_condition (jump, &first_compare);
6683 if (first_compare == jump)
6684 compare_and_branch = 1;
6685 else if (first_compare == prev_nonnote_insn (jump))
6686 compare_and_branch = 2;
6687 else
6688 return 0;
6689
6690 /* Check all of the bivs to see if the compare uses one of them.
6691 Skip biv's set more than once because we can't guarantee that
6692 it will be zero on the last iteration. Also skip if the biv is
6693 used between its update and the test insn. */
6694
6695 for (bl = loop_iv_list; bl; bl = bl->next)
6696 {
6697 if (bl->biv_count == 1
6698 && bl->biv->dest_reg == XEXP (comparison, 0)
6699 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6700 first_compare))
6701 break;
6702 }
6703
6704 if (! bl)
6705 return 0;
6706
6707 /* Look for the case where the basic induction variable is always
6708 nonnegative, and equals zero on the last iteration.
6709 In this case, add a reg_note REG_NONNEG, which allows the
6710 m68k DBRA instruction to be used. */
6711
6712 if (((GET_CODE (comparison) == GT
6713 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6714 && INTVAL (XEXP (comparison, 1)) == -1)
6715 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6716 && GET_CODE (bl->biv->add_val) == CONST_INT
6717 && INTVAL (bl->biv->add_val) < 0)
6718 {
6719 /* Initial value must be greater than 0,
6720 init_val % -dec_value == 0 to ensure that it equals zero on
6721 the last iteration */
6722
6723 if (GET_CODE (bl->initial_value) == CONST_INT
6724 && INTVAL (bl->initial_value) > 0
6725 && (INTVAL (bl->initial_value)
6726 % (-INTVAL (bl->biv->add_val))) == 0)
6727 {
6728 /* register always nonnegative, add REG_NOTE to branch */
6729 REG_NOTES (PREV_INSN (loop_end))
6730 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6731 REG_NOTES (PREV_INSN (loop_end)));
6732 bl->nonneg = 1;
6733
6734 return 1;
6735 }
6736
6737 /* If the decrement is 1 and the value was tested as >= 0 before
6738 the loop, then we can safely optimize. */
6739 for (p = loop_start; p; p = PREV_INSN (p))
6740 {
6741 if (GET_CODE (p) == CODE_LABEL)
6742 break;
6743 if (GET_CODE (p) != JUMP_INSN)
6744 continue;
6745
6746 before_comparison = get_condition_for_loop (p);
6747 if (before_comparison
6748 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6749 && GET_CODE (before_comparison) == LT
6750 && XEXP (before_comparison, 1) == const0_rtx
6751 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6752 && INTVAL (bl->biv->add_val) == -1)
6753 {
6754 REG_NOTES (PREV_INSN (loop_end))
6755 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6756 REG_NOTES (PREV_INSN (loop_end)));
6757 bl->nonneg = 1;
6758
6759 return 1;
6760 }
6761 }
6762 }
6763 else if (num_mem_sets <= 1)
6764 {
6765 /* Try to change inc to dec, so can apply above optimization. */
6766 /* Can do this if:
6767 all registers modified are induction variables or invariant,
6768 all memory references have non-overlapping addresses
6769 (obviously true if only one write)
6770 allow 2 insns for the compare/jump at the end of the loop. */
6771 /* Also, we must avoid any instructions which use both the reversed
6772 biv and another biv. Such instructions will fail if the loop is
6773 reversed. We meet this condition by requiring that either
6774 no_use_except_counting is true, or else that there is only
6775 one biv. */
6776 int num_nonfixed_reads = 0;
6777 /* 1 if the iteration var is used only to count iterations. */
6778 int no_use_except_counting = 0;
6779 /* 1 if the loop has no memory store, or it has a single memory store
6780 which is reversible. */
6781 int reversible_mem_store = 1;
6782
6783 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6784 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6785 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6786
6787 if (bl->giv_count == 0
6788 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6789 {
6790 rtx bivreg = regno_reg_rtx[bl->regno];
6791
6792 /* If there are no givs for this biv, and the only exit is the
6793 fall through at the end of the loop, then
6794 see if perhaps there are no uses except to count. */
6795 no_use_except_counting = 1;
6796 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6797 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6798 {
6799 rtx set = single_set (p);
6800
6801 if (set && GET_CODE (SET_DEST (set)) == REG
6802 && REGNO (SET_DEST (set)) == bl->regno)
6803 /* An insn that sets the biv is okay. */
6804 ;
6805 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6806 || p == prev_nonnote_insn (loop_end))
6807 /* Don't bother about the end test. */
6808 ;
6809 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6810 /* Any other use of the biv is no good. */
6811 {
6812 no_use_except_counting = 0;
6813 break;
6814 }
6815 }
6816 }
6817
6818 /* If the loop has a single store, and the destination address is
6819 invariant, then we can't reverse the loop, because this address
6820 might then have the wrong value at loop exit.
6821 This would work if the source was invariant also, however, in that
6822 case, the insn should have been moved out of the loop. */
6823
6824 if (num_mem_sets == 1)
6825 reversible_mem_store
6826 = (! unknown_address_altered
6827 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6828
6829 /* This code only acts for innermost loops. Also it simplifies
6830 the memory address check by only reversing loops with
6831 zero or one memory access.
6832 Two memory accesses could involve parts of the same array,
6833 and that can't be reversed. */
6834
6835 if (num_nonfixed_reads <= 1
6836 && !loop_has_call
6837 && !loop_has_volatile
6838 && reversible_mem_store
6839 && (no_use_except_counting
6840 || ((bl->giv_count + bl->biv_count + num_mem_sets
6841 + num_movables + compare_and_branch == insn_count)
6842 && (bl == loop_iv_list && bl->next == 0))))
6843 {
6844 rtx tem;
6845
6846 /* Loop can be reversed. */
6847 if (loop_dump_stream)
6848 fprintf (loop_dump_stream, "Can reverse loop\n");
6849
6850 /* Now check other conditions:
6851
6852 The increment must be a constant, as must the initial value,
6853 and the comparison code must be LT.
6854
6855 This test can probably be improved since +/- 1 in the constant
6856 can be obtained by changing LT to LE and vice versa; this is
6857 confusing. */
6858
6859 if (comparison
6860 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6861 /* LE gets turned into LT */
6862 && GET_CODE (comparison) == LT
6863 && GET_CODE (bl->initial_value) == CONST_INT)
6864 {
6865 HOST_WIDE_INT add_val, comparison_val;
6866 rtx initial_value;
6867
6868 add_val = INTVAL (bl->biv->add_val);
6869 comparison_val = INTVAL (XEXP (comparison, 1));
6870 final_value = XEXP (comparison, 1);
6871 initial_value = bl->initial_value;
6872
6873 /* Normalize the initial value if it is an integer and
6874 has no other use except as a counter. This will allow
6875 a few more loops to be reversed. */
6876 if (no_use_except_counting
6877 && GET_CODE (initial_value) == CONST_INT)
6878 {
6879 comparison_val = comparison_val - INTVAL (bl->initial_value);
6880 /* Check for overflow. If comparison_val ends up as a
6881 negative value, then we can't reverse the loop. */
6882 if (comparison_val >= 0)
6883 initial_value = const0_rtx;
6884 }
6885
6886 /* If the initial value is not zero, or if the comparison
6887 value is not an exact multiple of the increment, then we
6888 can not reverse this loop. */
6889 if (initial_value != const0_rtx
6890 || (comparison_val % add_val) != 0)
6891 return 0;
6892
6893 /* Reset these in case we normalized the initial value
6894 and comparison value above. */
6895 bl->initial_value = initial_value;
6896 XEXP (comparison, 1) = GEN_INT (comparison_val);
6897
6898 /* Register will always be nonnegative, with value
6899 0 on last iteration if loop reversed */
6900
6901 /* Save some info needed to produce the new insns. */
6902 reg = bl->biv->dest_reg;
6903 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6904 if (jump_label == pc_rtx)
6905 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6906 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6907
6908 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6909 - INTVAL (bl->biv->add_val));
6910
6911 /* Initialize biv to start_value before loop start.
6912 The old initializing insn will be deleted as a
6913 dead store by flow.c. */
6914 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6915
6916 /* Add insn to decrement register, and delete insn
6917 that incremented the register. */
6918 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6919 bl->biv->insn);
6920 delete_insn (bl->biv->insn);
6921
6922 /* Update biv info to reflect its new status. */
6923 bl->biv->insn = p;
6924 bl->initial_value = start_value;
6925 bl->biv->add_val = new_add_val;
6926
6927 /* Inc LABEL_NUSES so that delete_insn will
6928 not delete the label. */
6929 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6930
6931 /* Emit an insn after the end of the loop to set the biv's
6932 proper exit value if it is used anywhere outside the loop. */
6933 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
6934 || ! bl->init_insn
6935 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6936 emit_insn_after (gen_move_insn (reg, final_value),
6937 loop_end);
6938
6939 /* Delete compare/branch at end of loop. */
6940 delete_insn (PREV_INSN (loop_end));
6941 if (compare_and_branch == 2)
6942 delete_insn (first_compare);
6943
6944 /* Add new compare/branch insn at end of loop. */
6945 start_sequence ();
6946 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6947 GET_MODE (reg), 0, 0);
6948 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6949 tem = gen_sequence ();
6950 end_sequence ();
6951 emit_jump_insn_before (tem, loop_end);
6952
6953 for (tem = PREV_INSN (loop_end);
6954 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6955 ;
6956 if (tem)
6957 {
6958 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6959
6960 /* Increment of LABEL_NUSES done above. */
6961 /* Register is now always nonnegative,
6962 so add REG_NONNEG note to the branch. */
6963 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6964 REG_NOTES (tem));
6965 }
6966
6967 bl->nonneg = 1;
6968
6969 /* Mark that this biv has been reversed. Each giv which depends
6970 on this biv, and which is also live past the end of the loop
6971 will have to be fixed up. */
6972
6973 bl->reversed = 1;
6974
6975 if (loop_dump_stream)
6976 fprintf (loop_dump_stream,
6977 "Reversed loop and added reg_nonneg\n");
6978
6979 return 1;
6980 }
6981 }
6982 }
6983
6984 return 0;
6985 }
6986 \f
6987 /* Verify whether the biv BL appears to be eliminable,
6988 based on the insns in the loop that refer to it.
6989 LOOP_START is the first insn of the loop, and END is the end insn.
6990
6991 If ELIMINATE_P is non-zero, actually do the elimination.
6992
6993 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6994 determine whether invariant insns should be placed inside or at the
6995 start of the loop. */
6996
6997 static int
6998 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6999 struct iv_class *bl;
7000 rtx loop_start;
7001 rtx end;
7002 int eliminate_p;
7003 int threshold, insn_count;
7004 {
7005 rtx reg = bl->biv->dest_reg;
7006 rtx p;
7007
7008 /* Scan all insns in the loop, stopping if we find one that uses the
7009 biv in a way that we cannot eliminate. */
7010
7011 for (p = loop_start; p != end; p = NEXT_INSN (p))
7012 {
7013 enum rtx_code code = GET_CODE (p);
7014 rtx where = threshold >= insn_count ? loop_start : p;
7015
7016 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
7017 && reg_mentioned_p (reg, PATTERN (p))
7018 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
7019 {
7020 if (loop_dump_stream)
7021 fprintf (loop_dump_stream,
7022 "Cannot eliminate biv %d: biv used in insn %d.\n",
7023 bl->regno, INSN_UID (p));
7024 break;
7025 }
7026 }
7027
7028 if (p == end)
7029 {
7030 if (loop_dump_stream)
7031 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
7032 bl->regno, eliminate_p ? "was" : "can be");
7033 return 1;
7034 }
7035
7036 return 0;
7037 }
7038 \f
7039 /* If BL appears in X (part of the pattern of INSN), see if we can
7040 eliminate its use. If so, return 1. If not, return 0.
7041
7042 If BIV does not appear in X, return 1.
7043
7044 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
7045 where extra insns should be added. Depending on how many items have been
7046 moved out of the loop, it will either be before INSN or at the start of
7047 the loop. */
7048
7049 static int
7050 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
7051 rtx x, insn;
7052 struct iv_class *bl;
7053 int eliminate_p;
7054 rtx where;
7055 {
7056 enum rtx_code code = GET_CODE (x);
7057 rtx reg = bl->biv->dest_reg;
7058 enum machine_mode mode = GET_MODE (reg);
7059 struct induction *v;
7060 rtx arg, tem;
7061 #ifdef HAVE_cc0
7062 rtx new;
7063 #endif
7064 int arg_operand;
7065 char *fmt;
7066 int i, j;
7067
7068 switch (code)
7069 {
7070 case REG:
7071 /* If we haven't already been able to do something with this BIV,
7072 we can't eliminate it. */
7073 if (x == reg)
7074 return 0;
7075 return 1;
7076
7077 case SET:
7078 /* If this sets the BIV, it is not a problem. */
7079 if (SET_DEST (x) == reg)
7080 return 1;
7081
7082 /* If this is an insn that defines a giv, it is also ok because
7083 it will go away when the giv is reduced. */
7084 for (v = bl->giv; v; v = v->next_iv)
7085 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
7086 return 1;
7087
7088 #ifdef HAVE_cc0
7089 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
7090 {
7091 /* Can replace with any giv that was reduced and
7092 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7093 Require a constant for MULT_VAL, so we know it's nonzero.
7094 ??? We disable this optimization to avoid potential
7095 overflows. */
7096
7097 for (v = bl->giv; v; v = v->next_iv)
7098 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
7099 && v->add_val == const0_rtx
7100 && ! v->ignore && ! v->maybe_dead && v->always_computable
7101 && v->mode == mode
7102 && 0)
7103 {
7104 /* If the giv V had the auto-inc address optimization applied
7105 to it, and INSN occurs between the giv insn and the biv
7106 insn, then we must adjust the value used here.
7107 This is rare, so we don't bother to do so. */
7108 if (v->auto_inc_opt
7109 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7110 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7111 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7112 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7113 continue;
7114
7115 if (! eliminate_p)
7116 return 1;
7117
7118 /* If the giv has the opposite direction of change,
7119 then reverse the comparison. */
7120 if (INTVAL (v->mult_val) < 0)
7121 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
7122 const0_rtx, v->new_reg);
7123 else
7124 new = v->new_reg;
7125
7126 /* We can probably test that giv's reduced reg. */
7127 if (validate_change (insn, &SET_SRC (x), new, 0))
7128 return 1;
7129 }
7130
7131 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7132 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7133 Require a constant for MULT_VAL, so we know it's nonzero.
7134 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7135 overflow problem. */
7136
7137 for (v = bl->giv; v; v = v->next_iv)
7138 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
7139 && ! v->ignore && ! v->maybe_dead && v->always_computable
7140 && v->mode == mode
7141 && (GET_CODE (v->add_val) == SYMBOL_REF
7142 || GET_CODE (v->add_val) == LABEL_REF
7143 || GET_CODE (v->add_val) == CONST
7144 || (GET_CODE (v->add_val) == REG
7145 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
7146 {
7147 /* If the giv V had the auto-inc address optimization applied
7148 to it, and INSN occurs between the giv insn and the biv
7149 insn, then we must adjust the value used here.
7150 This is rare, so we don't bother to do so. */
7151 if (v->auto_inc_opt
7152 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7153 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7154 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7155 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7156 continue;
7157
7158 if (! eliminate_p)
7159 return 1;
7160
7161 /* If the giv has the opposite direction of change,
7162 then reverse the comparison. */
7163 if (INTVAL (v->mult_val) < 0)
7164 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7165 v->new_reg);
7166 else
7167 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7168 copy_rtx (v->add_val));
7169
7170 /* Replace biv with the giv's reduced register. */
7171 update_reg_last_use (v->add_val, insn);
7172 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7173 return 1;
7174
7175 /* Insn doesn't support that constant or invariant. Copy it
7176 into a register (it will be a loop invariant.) */
7177 tem = gen_reg_rtx (GET_MODE (v->new_reg));
7178
7179 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
7180 where);
7181
7182 /* Substitute the new register for its invariant value in
7183 the compare expression. */
7184 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
7185 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7186 return 1;
7187 }
7188 }
7189 #endif
7190 break;
7191
7192 case COMPARE:
7193 case EQ: case NE:
7194 case GT: case GE: case GTU: case GEU:
7195 case LT: case LE: case LTU: case LEU:
7196 /* See if either argument is the biv. */
7197 if (XEXP (x, 0) == reg)
7198 arg = XEXP (x, 1), arg_operand = 1;
7199 else if (XEXP (x, 1) == reg)
7200 arg = XEXP (x, 0), arg_operand = 0;
7201 else
7202 break;
7203
7204 if (CONSTANT_P (arg))
7205 {
7206 /* First try to replace with any giv that has constant positive
7207 mult_val and constant add_val. We might be able to support
7208 negative mult_val, but it seems complex to do it in general. */
7209
7210 for (v = bl->giv; v; v = v->next_iv)
7211 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7212 && (GET_CODE (v->add_val) == SYMBOL_REF
7213 || GET_CODE (v->add_val) == LABEL_REF
7214 || GET_CODE (v->add_val) == CONST
7215 || (GET_CODE (v->add_val) == REG
7216 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
7217 && ! v->ignore && ! v->maybe_dead && v->always_computable
7218 && v->mode == mode)
7219 {
7220 /* If the giv V had the auto-inc address optimization applied
7221 to it, and INSN occurs between the giv insn and the biv
7222 insn, then we must adjust the value used here.
7223 This is rare, so we don't bother to do so. */
7224 if (v->auto_inc_opt
7225 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7226 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7227 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7228 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7229 continue;
7230
7231 if (! eliminate_p)
7232 return 1;
7233
7234 /* Replace biv with the giv's reduced reg. */
7235 XEXP (x, 1-arg_operand) = v->new_reg;
7236
7237 /* If all constants are actually constant integers and
7238 the derived constant can be directly placed in the COMPARE,
7239 do so. */
7240 if (GET_CODE (arg) == CONST_INT
7241 && GET_CODE (v->mult_val) == CONST_INT
7242 && GET_CODE (v->add_val) == CONST_INT
7243 && validate_change (insn, &XEXP (x, arg_operand),
7244 GEN_INT (INTVAL (arg)
7245 * INTVAL (v->mult_val)
7246 + INTVAL (v->add_val)), 0))
7247 return 1;
7248
7249 /* Otherwise, load it into a register. */
7250 tem = gen_reg_rtx (mode);
7251 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
7252 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
7253 return 1;
7254
7255 /* If that failed, put back the change we made above. */
7256 XEXP (x, 1-arg_operand) = reg;
7257 }
7258
7259 /* Look for giv with positive constant mult_val and nonconst add_val.
7260 Insert insns to calculate new compare value.
7261 ??? Turn this off due to possible overflow. */
7262
7263 for (v = bl->giv; v; v = v->next_iv)
7264 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7265 && ! v->ignore && ! v->maybe_dead && v->always_computable
7266 && v->mode == mode
7267 && 0)
7268 {
7269 rtx tem;
7270
7271 /* If the giv V had the auto-inc address optimization applied
7272 to it, and INSN occurs between the giv insn and the biv
7273 insn, then we must adjust the value used here.
7274 This is rare, so we don't bother to do so. */
7275 if (v->auto_inc_opt
7276 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7277 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7278 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7279 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7280 continue;
7281
7282 if (! eliminate_p)
7283 return 1;
7284
7285 tem = gen_reg_rtx (mode);
7286
7287 /* Replace biv with giv's reduced register. */
7288 validate_change (insn, &XEXP (x, 1 - arg_operand),
7289 v->new_reg, 1);
7290
7291 /* Compute value to compare against. */
7292 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
7293 /* Use it in this insn. */
7294 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
7295 if (apply_change_group ())
7296 return 1;
7297 }
7298 }
7299 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
7300 {
7301 if (invariant_p (arg) == 1)
7302 {
7303 /* Look for giv with constant positive mult_val and nonconst
7304 add_val. Insert insns to compute new compare value.
7305 ??? Turn this off due to possible overflow. */
7306
7307 for (v = bl->giv; v; v = v->next_iv)
7308 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
7309 && ! v->ignore && ! v->maybe_dead && v->always_computable
7310 && v->mode == mode
7311 && 0)
7312 {
7313 rtx tem;
7314
7315 /* If the giv V had the auto-inc address optimization applied
7316 to it, and INSN occurs between the giv insn and the biv
7317 insn, then we must adjust the value used here.
7318 This is rare, so we don't bother to do so. */
7319 if (v->auto_inc_opt
7320 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7321 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7322 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7323 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7324 continue;
7325
7326 if (! eliminate_p)
7327 return 1;
7328
7329 tem = gen_reg_rtx (mode);
7330
7331 /* Replace biv with giv's reduced register. */
7332 validate_change (insn, &XEXP (x, 1 - arg_operand),
7333 v->new_reg, 1);
7334
7335 /* Compute value to compare against. */
7336 emit_iv_add_mult (arg, v->mult_val, v->add_val,
7337 tem, where);
7338 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
7339 if (apply_change_group ())
7340 return 1;
7341 }
7342 }
7343
7344 /* This code has problems. Basically, you can't know when
7345 seeing if we will eliminate BL, whether a particular giv
7346 of ARG will be reduced. If it isn't going to be reduced,
7347 we can't eliminate BL. We can try forcing it to be reduced,
7348 but that can generate poor code.
7349
7350 The problem is that the benefit of reducing TV, below should
7351 be increased if BL can actually be eliminated, but this means
7352 we might have to do a topological sort of the order in which
7353 we try to process biv. It doesn't seem worthwhile to do
7354 this sort of thing now. */
7355
7356 #if 0
7357 /* Otherwise the reg compared with had better be a biv. */
7358 if (GET_CODE (arg) != REG
7359 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
7360 return 0;
7361
7362 /* Look for a pair of givs, one for each biv,
7363 with identical coefficients. */
7364 for (v = bl->giv; v; v = v->next_iv)
7365 {
7366 struct induction *tv;
7367
7368 if (v->ignore || v->maybe_dead || v->mode != mode)
7369 continue;
7370
7371 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
7372 if (! tv->ignore && ! tv->maybe_dead
7373 && rtx_equal_p (tv->mult_val, v->mult_val)
7374 && rtx_equal_p (tv->add_val, v->add_val)
7375 && tv->mode == mode)
7376 {
7377 /* If the giv V had the auto-inc address optimization applied
7378 to it, and INSN occurs between the giv insn and the biv
7379 insn, then we must adjust the value used here.
7380 This is rare, so we don't bother to do so. */
7381 if (v->auto_inc_opt
7382 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7383 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7384 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7385 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7386 continue;
7387
7388 if (! eliminate_p)
7389 return 1;
7390
7391 /* Replace biv with its giv's reduced reg. */
7392 XEXP (x, 1-arg_operand) = v->new_reg;
7393 /* Replace other operand with the other giv's
7394 reduced reg. */
7395 XEXP (x, arg_operand) = tv->new_reg;
7396 return 1;
7397 }
7398 }
7399 #endif
7400 }
7401
7402 /* If we get here, the biv can't be eliminated. */
7403 return 0;
7404
7405 case MEM:
7406 /* If this address is a DEST_ADDR giv, it doesn't matter if the
7407 biv is used in it, since it will be replaced. */
7408 for (v = bl->giv; v; v = v->next_iv)
7409 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
7410 return 1;
7411 break;
7412
7413 default:
7414 break;
7415 }
7416
7417 /* See if any subexpression fails elimination. */
7418 fmt = GET_RTX_FORMAT (code);
7419 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7420 {
7421 switch (fmt[i])
7422 {
7423 case 'e':
7424 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
7425 eliminate_p, where))
7426 return 0;
7427 break;
7428
7429 case 'E':
7430 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7431 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
7432 eliminate_p, where))
7433 return 0;
7434 break;
7435 }
7436 }
7437
7438 return 1;
7439 }
7440 \f
7441 /* Return nonzero if the last use of REG
7442 is in an insn following INSN in the same basic block. */
7443
7444 static int
7445 last_use_this_basic_block (reg, insn)
7446 rtx reg;
7447 rtx insn;
7448 {
7449 rtx n;
7450 for (n = insn;
7451 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
7452 n = NEXT_INSN (n))
7453 {
7454 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
7455 return 1;
7456 }
7457 return 0;
7458 }
7459 \f
7460 /* Called via `note_stores' to record the initial value of a biv. Here we
7461 just record the location of the set and process it later. */
7462
7463 static void
7464 record_initial (dest, set)
7465 rtx dest;
7466 rtx set;
7467 {
7468 struct iv_class *bl;
7469
7470 if (GET_CODE (dest) != REG
7471 || REGNO (dest) >= max_reg_before_loop
7472 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
7473 return;
7474
7475 bl = reg_biv_class[REGNO (dest)];
7476
7477 /* If this is the first set found, record it. */
7478 if (bl->init_insn == 0)
7479 {
7480 bl->init_insn = note_insn;
7481 bl->init_set = set;
7482 }
7483 }
7484 \f
7485 /* If any of the registers in X are "old" and currently have a last use earlier
7486 than INSN, update them to have a last use of INSN. Their actual last use
7487 will be the previous insn but it will not have a valid uid_luid so we can't
7488 use it. */
7489
7490 static void
7491 update_reg_last_use (x, insn)
7492 rtx x;
7493 rtx insn;
7494 {
7495 /* Check for the case where INSN does not have a valid luid. In this case,
7496 there is no need to modify the regno_last_uid, as this can only happen
7497 when code is inserted after the loop_end to set a pseudo's final value,
7498 and hence this insn will never be the last use of x. */
7499 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
7500 && INSN_UID (insn) < max_uid_for_loop
7501 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
7502 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
7503 else
7504 {
7505 register int i, j;
7506 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
7507 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7508 {
7509 if (fmt[i] == 'e')
7510 update_reg_last_use (XEXP (x, i), insn);
7511 else if (fmt[i] == 'E')
7512 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7513 update_reg_last_use (XVECEXP (x, i, j), insn);
7514 }
7515 }
7516 }
7517 \f
7518 /* Given a jump insn JUMP, return the condition that will cause it to branch
7519 to its JUMP_LABEL. If the condition cannot be understood, or is an
7520 inequality floating-point comparison which needs to be reversed, 0 will
7521 be returned.
7522
7523 If EARLIEST is non-zero, it is a pointer to a place where the earliest
7524 insn used in locating the condition was found. If a replacement test
7525 of the condition is desired, it should be placed in front of that
7526 insn and we will be sure that the inputs are still valid.
7527
7528 The condition will be returned in a canonical form to simplify testing by
7529 callers. Specifically:
7530
7531 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
7532 (2) Both operands will be machine operands; (cc0) will have been replaced.
7533 (3) If an operand is a constant, it will be the second operand.
7534 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
7535 for GE, GEU, and LEU. */
7536
7537 rtx
7538 get_condition (jump, earliest)
7539 rtx jump;
7540 rtx *earliest;
7541 {
7542 enum rtx_code code;
7543 rtx prev = jump;
7544 rtx set;
7545 rtx tem;
7546 rtx op0, op1;
7547 int reverse_code = 0;
7548 int did_reverse_condition = 0;
7549 enum machine_mode mode;
7550
7551 /* If this is not a standard conditional jump, we can't parse it. */
7552 if (GET_CODE (jump) != JUMP_INSN
7553 || ! condjump_p (jump) || simplejump_p (jump))
7554 return 0;
7555
7556 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7557 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7558 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
7559 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
7560
7561 if (earliest)
7562 *earliest = jump;
7563
7564 /* If this branches to JUMP_LABEL when the condition is false, reverse
7565 the condition. */
7566 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
7567 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
7568 code = reverse_condition (code), did_reverse_condition ^= 1;
7569
7570 /* If we are comparing a register with zero, see if the register is set
7571 in the previous insn to a COMPARE or a comparison operation. Perform
7572 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7573 in cse.c */
7574
7575 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
7576 {
7577 /* Set non-zero when we find something of interest. */
7578 rtx x = 0;
7579
7580 #ifdef HAVE_cc0
7581 /* If comparison with cc0, import actual comparison from compare
7582 insn. */
7583 if (op0 == cc0_rtx)
7584 {
7585 if ((prev = prev_nonnote_insn (prev)) == 0
7586 || GET_CODE (prev) != INSN
7587 || (set = single_set (prev)) == 0
7588 || SET_DEST (set) != cc0_rtx)
7589 return 0;
7590
7591 op0 = SET_SRC (set);
7592 op1 = CONST0_RTX (GET_MODE (op0));
7593 if (earliest)
7594 *earliest = prev;
7595 }
7596 #endif
7597
7598 /* If this is a COMPARE, pick up the two things being compared. */
7599 if (GET_CODE (op0) == COMPARE)
7600 {
7601 op1 = XEXP (op0, 1);
7602 op0 = XEXP (op0, 0);
7603 continue;
7604 }
7605 else if (GET_CODE (op0) != REG)
7606 break;
7607
7608 /* Go back to the previous insn. Stop if it is not an INSN. We also
7609 stop if it isn't a single set or if it has a REG_INC note because
7610 we don't want to bother dealing with it. */
7611
7612 if ((prev = prev_nonnote_insn (prev)) == 0
7613 || GET_CODE (prev) != INSN
7614 || FIND_REG_INC_NOTE (prev, 0)
7615 || (set = single_set (prev)) == 0)
7616 break;
7617
7618 /* If this is setting OP0, get what it sets it to if it looks
7619 relevant. */
7620 if (rtx_equal_p (SET_DEST (set), op0))
7621 {
7622 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7623
7624 /* ??? We may not combine comparisons done in a CCmode with
7625 comparisons not done in a CCmode. This is to aid targets
7626 like Alpha that have an IEEE compliant EQ instruction, and
7627 a non-IEEE compliant BEQ instruction. The use of CCmode is
7628 actually artificial, simply to prevent the combination, but
7629 should not affect other platforms. */
7630
7631 if ((GET_CODE (SET_SRC (set)) == COMPARE
7632 || (((code == NE
7633 || (code == LT
7634 && GET_MODE_CLASS (inner_mode) == MODE_INT
7635 && (GET_MODE_BITSIZE (inner_mode)
7636 <= HOST_BITS_PER_WIDE_INT)
7637 && (STORE_FLAG_VALUE
7638 & ((HOST_WIDE_INT) 1
7639 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7640 #ifdef FLOAT_STORE_FLAG_VALUE
7641 || (code == LT
7642 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7643 && FLOAT_STORE_FLAG_VALUE < 0)
7644 #endif
7645 ))
7646 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
7647 && ((GET_MODE_CLASS (mode) == MODE_CC)
7648 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7649 x = SET_SRC (set);
7650 else if (((code == EQ
7651 || (code == GE
7652 && (GET_MODE_BITSIZE (inner_mode)
7653 <= HOST_BITS_PER_WIDE_INT)
7654 && GET_MODE_CLASS (inner_mode) == MODE_INT
7655 && (STORE_FLAG_VALUE
7656 & ((HOST_WIDE_INT) 1
7657 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7658 #ifdef FLOAT_STORE_FLAG_VALUE
7659 || (code == GE
7660 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7661 && FLOAT_STORE_FLAG_VALUE < 0)
7662 #endif
7663 ))
7664 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
7665 && ((GET_MODE_CLASS (mode) == MODE_CC)
7666 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7667 {
7668 /* We might have reversed a LT to get a GE here. But this wasn't
7669 actually the comparison of data, so we don't flag that we
7670 have had to reverse the condition. */
7671 did_reverse_condition ^= 1;
7672 reverse_code = 1;
7673 x = SET_SRC (set);
7674 }
7675 else
7676 break;
7677 }
7678
7679 else if (reg_set_p (op0, prev))
7680 /* If this sets OP0, but not directly, we have to give up. */
7681 break;
7682
7683 if (x)
7684 {
7685 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7686 code = GET_CODE (x);
7687 if (reverse_code)
7688 {
7689 code = reverse_condition (code);
7690 did_reverse_condition ^= 1;
7691 reverse_code = 0;
7692 }
7693
7694 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7695 if (earliest)
7696 *earliest = prev;
7697 }
7698 }
7699
7700 /* If constant is first, put it last. */
7701 if (CONSTANT_P (op0))
7702 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7703
7704 /* If OP0 is the result of a comparison, we weren't able to find what
7705 was really being compared, so fail. */
7706 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7707 return 0;
7708
7709 /* Canonicalize any ordered comparison with integers involving equality
7710 if we can do computations in the relevant mode and we do not
7711 overflow. */
7712
7713 if (GET_CODE (op1) == CONST_INT
7714 && GET_MODE (op0) != VOIDmode
7715 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7716 {
7717 HOST_WIDE_INT const_val = INTVAL (op1);
7718 unsigned HOST_WIDE_INT uconst_val = const_val;
7719 unsigned HOST_WIDE_INT max_val
7720 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7721
7722 switch (code)
7723 {
7724 case LE:
7725 if (const_val != max_val >> 1)
7726 code = LT, op1 = GEN_INT (const_val + 1);
7727 break;
7728
7729 /* When cross-compiling, const_val might be sign-extended from
7730 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7731 case GE:
7732 if ((const_val & max_val)
7733 != (((HOST_WIDE_INT) 1
7734 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7735 code = GT, op1 = GEN_INT (const_val - 1);
7736 break;
7737
7738 case LEU:
7739 if (uconst_val < max_val)
7740 code = LTU, op1 = GEN_INT (uconst_val + 1);
7741 break;
7742
7743 case GEU:
7744 if (uconst_val != 0)
7745 code = GTU, op1 = GEN_INT (uconst_val - 1);
7746 break;
7747
7748 default:
7749 break;
7750 }
7751 }
7752
7753 /* If this was floating-point and we reversed anything other than an
7754 EQ or NE, return zero. */
7755 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7756 && did_reverse_condition && code != NE && code != EQ
7757 && ! flag_fast_math
7758 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7759 return 0;
7760
7761 #ifdef HAVE_cc0
7762 /* Never return CC0; return zero instead. */
7763 if (op0 == cc0_rtx)
7764 return 0;
7765 #endif
7766
7767 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7768 }
7769
7770 /* Similar to above routine, except that we also put an invariant last
7771 unless both operands are invariants. */
7772
7773 rtx
7774 get_condition_for_loop (x)
7775 rtx x;
7776 {
7777 rtx comparison = get_condition (x, NULL_PTR);
7778
7779 if (comparison == 0
7780 || ! invariant_p (XEXP (comparison, 0))
7781 || invariant_p (XEXP (comparison, 1)))
7782 return comparison;
7783
7784 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7785 XEXP (comparison, 1), XEXP (comparison, 0));
7786 }
7787
7788 #ifdef HAIFA
7789 /* Analyze a loop in order to instrument it with the use of count register.
7790 loop_start and loop_end are the first and last insns of the loop.
7791 This function works in cooperation with insert_bct ().
7792 loop_can_insert_bct[loop_num] is set according to whether the optimization
7793 is applicable to the loop. When it is applicable, the following variables
7794 are also set:
7795 loop_start_value[loop_num]
7796 loop_comparison_value[loop_num]
7797 loop_increment[loop_num]
7798 loop_comparison_code[loop_num] */
7799
7800 #ifdef HAVE_decrement_and_branch_on_count
7801 static void
7802 analyze_loop_iterations (loop_start, loop_end)
7803 rtx loop_start, loop_end;
7804 {
7805 rtx comparison, comparison_value;
7806 rtx iteration_var, initial_value, increment;
7807 enum rtx_code comparison_code;
7808
7809 rtx last_loop_insn;
7810 rtx insn;
7811 int i;
7812
7813 /* loop_variable mode */
7814 enum machine_mode original_mode;
7815
7816 /* find the number of the loop */
7817 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7818
7819 /* we change our mind only when we are sure that loop will be instrumented */
7820 loop_can_insert_bct[loop_num] = 0;
7821
7822 /* is the optimization suppressed. */
7823 if ( !flag_branch_on_count_reg )
7824 return;
7825
7826 /* make sure that count-reg is not in use */
7827 if (loop_used_count_register[loop_num]){
7828 if (loop_dump_stream)
7829 fprintf (loop_dump_stream,
7830 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7831 loop_num);
7832 return;
7833 }
7834
7835 /* make sure that the function has no indirect jumps. */
7836 if (indirect_jump_in_function){
7837 if (loop_dump_stream)
7838 fprintf (loop_dump_stream,
7839 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7840 loop_num);
7841 return;
7842 }
7843
7844 /* make sure that the last loop insn is a conditional jump */
7845 last_loop_insn = PREV_INSN (loop_end);
7846 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7847 if (loop_dump_stream)
7848 fprintf (loop_dump_stream,
7849 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7850 loop_num);
7851 return;
7852 }
7853
7854 /* First find the iteration variable. If the last insn is a conditional
7855 branch, and the insn preceding it tests a register value, make that
7856 register the iteration variable. */
7857
7858 /* We used to use prev_nonnote_insn here, but that fails because it might
7859 accidentally get the branch for a contained loop if the branch for this
7860 loop was deleted. We can only trust branches immediately before the
7861 loop_end. */
7862
7863 comparison = get_condition_for_loop (last_loop_insn);
7864 /* ??? Get_condition may switch position of induction variable and
7865 invariant register when it canonicalizes the comparison. */
7866
7867 if (comparison == 0) {
7868 if (loop_dump_stream)
7869 fprintf (loop_dump_stream,
7870 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7871 loop_num);
7872 return;
7873 }
7874
7875 comparison_code = GET_CODE (comparison);
7876 iteration_var = XEXP (comparison, 0);
7877 comparison_value = XEXP (comparison, 1);
7878
7879 original_mode = GET_MODE (iteration_var);
7880 if (GET_MODE_CLASS (original_mode) != MODE_INT
7881 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7882 if (loop_dump_stream)
7883 fprintf (loop_dump_stream,
7884 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7885 loop_num);
7886 return;
7887 }
7888
7889 /* get info about loop bounds and increment */
7890 iteration_info (iteration_var, &initial_value, &increment,
7891 loop_start, loop_end);
7892
7893 /* make sure that all required loop data were found */
7894 if (!(initial_value && increment && comparison_value
7895 && invariant_p (comparison_value) && invariant_p (increment)
7896 && ! indirect_jump_in_function))
7897 {
7898 if (loop_dump_stream) {
7899 fprintf (loop_dump_stream,
7900 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7901 if (!(initial_value && increment && comparison_value)) {
7902 fprintf (loop_dump_stream, "\tbounds not available: ");
7903 if ( ! initial_value )
7904 fprintf (loop_dump_stream, "initial ");
7905 if ( ! increment )
7906 fprintf (loop_dump_stream, "increment ");
7907 if ( ! comparison_value )
7908 fprintf (loop_dump_stream, "comparison ");
7909 fprintf (loop_dump_stream, "\n");
7910 }
7911 if (!invariant_p (comparison_value) || !invariant_p (increment))
7912 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7913 }
7914 return;
7915 }
7916
7917 /* make sure that the increment is constant */
7918 if (GET_CODE (increment) != CONST_INT) {
7919 if (loop_dump_stream)
7920 fprintf (loop_dump_stream,
7921 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7922 loop_num);
7923 return;
7924 }
7925
7926 /* make sure that the loop contains neither function call, nor jump on table.
7927 (the count register might be altered by the called function, and might
7928 be used for a branch on table). */
7929 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7930 if (GET_CODE (insn) == CALL_INSN){
7931 if (loop_dump_stream)
7932 fprintf (loop_dump_stream,
7933 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7934 loop_num);
7935 return;
7936 }
7937
7938 if (GET_CODE (insn) == JUMP_INSN
7939 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7940 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7941 if (loop_dump_stream)
7942 fprintf (loop_dump_stream,
7943 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7944 loop_num);
7945 return;
7946 }
7947 }
7948
7949 /* At this point, we are sure that the loop can be instrumented with BCT.
7950 Some of the loops, however, will not be instrumented - the final decision
7951 is taken by insert_bct () */
7952 if (loop_dump_stream)
7953 fprintf (loop_dump_stream,
7954 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7955 loop_num);
7956
7957 /* mark all enclosing loops that they cannot use count register */
7958 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7959 marking here may prevent instrumenting an enclosing loop that could
7960 actually be instrumented. But since this is rare, it is safer to mark
7961 here in case the order of calling (analyze/insert)_bct would be changed. */
7962 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7963 loop_used_count_register[i] = 1;
7964
7965 /* Set data structures which will be used by the instrumentation phase */
7966 loop_start_value[loop_num] = initial_value;
7967 loop_comparison_value[loop_num] = comparison_value;
7968 loop_increment[loop_num] = increment;
7969 loop_comparison_code[loop_num] = comparison_code;
7970 loop_can_insert_bct[loop_num] = 1;
7971 }
7972
7973
7974 /* instrument loop for insertion of bct instruction. We distinguish between
7975 loops with compile-time bounds, to those with run-time bounds. The loop
7976 behaviour is analized according to the following characteristics/variables:
7977 ; Input variables:
7978 ; comparison-value: the value to which the iteration counter is compared.
7979 ; initial-value: iteration-counter initial value.
7980 ; increment: iteration-counter increment.
7981 ; Computed variables:
7982 ; increment-direction: the sign of the increment.
7983 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7984 ; range-direction: sign (comparison-value - initial-value)
7985 We give up on the following cases:
7986 ; loop variable overflow.
7987 ; run-time loop bounds with comparison code NE.
7988 */
7989
7990 static void
7991 insert_bct (loop_start, loop_end)
7992 rtx loop_start, loop_end;
7993 {
7994 rtx initial_value, comparison_value, increment;
7995 enum rtx_code comparison_code;
7996
7997 int increment_direction, compare_direction;
7998 int unsigned_p = 0;
7999
8000 /* if the loop condition is <= or >=, the number of iteration
8001 is 1 more than the range of the bounds of the loop */
8002 int add_iteration = 0;
8003
8004 /* the only machine mode we work with - is the integer of the size that the
8005 machine has */
8006 enum machine_mode loop_var_mode = SImode;
8007
8008 int loop_num = uid_loop_num [INSN_UID (loop_start)];
8009
8010 /* get loop-variables. No need to check that these are valid - already
8011 checked in analyze_loop_iterations (). */
8012 comparison_code = loop_comparison_code[loop_num];
8013 initial_value = loop_start_value[loop_num];
8014 comparison_value = loop_comparison_value[loop_num];
8015 increment = loop_increment[loop_num];
8016
8017 /* check analyze_loop_iterations decision for this loop. */
8018 if (! loop_can_insert_bct[loop_num]){
8019 if (loop_dump_stream)
8020 fprintf (loop_dump_stream,
8021 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
8022 loop_num);
8023 return;
8024 }
8025
8026 /* It's impossible to instrument a competely unrolled loop. */
8027 if (loop_unroll_factor [loop_num] == -1)
8028 return;
8029
8030 /* make sure that the last loop insn is a conditional jump .
8031 This check is repeated from analyze_loop_iterations (),
8032 because unrolling might have changed that. */
8033 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
8034 || !condjump_p (PREV_INSN (loop_end))) {
8035 if (loop_dump_stream)
8036 fprintf (loop_dump_stream,
8037 "insert_bct: not instrumenting BCT because of invalid branch\n");
8038 return;
8039 }
8040
8041 /* fix increment in case loop was unrolled. */
8042 if (loop_unroll_factor [loop_num] > 1)
8043 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
8044
8045 /* determine properties and directions of the loop */
8046 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
8047 switch ( comparison_code ) {
8048 case LEU:
8049 unsigned_p = 1;
8050 /* fallthrough */
8051 case LE:
8052 compare_direction = 1;
8053 add_iteration = 1;
8054 break;
8055 case GEU:
8056 unsigned_p = 1;
8057 /* fallthrough */
8058 case GE:
8059 compare_direction = -1;
8060 add_iteration = 1;
8061 break;
8062 case EQ:
8063 /* in this case we cannot know the number of iterations */
8064 if (loop_dump_stream)
8065 fprintf (loop_dump_stream,
8066 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
8067 loop_num);
8068 return;
8069 case LTU:
8070 unsigned_p = 1;
8071 /* fallthrough */
8072 case LT:
8073 compare_direction = 1;
8074 break;
8075 case GTU:
8076 unsigned_p = 1;
8077 /* fallthrough */
8078 case GT:
8079 compare_direction = -1;
8080 break;
8081 case NE:
8082 compare_direction = 0;
8083 break;
8084 default:
8085 abort ();
8086 }
8087
8088
8089 /* make sure that the loop does not end by an overflow */
8090 if (compare_direction != increment_direction) {
8091 if (loop_dump_stream)
8092 fprintf (loop_dump_stream,
8093 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
8094 loop_num);
8095 return;
8096 }
8097
8098 /* try to instrument the loop. */
8099
8100 /* Handle the simpler case, where the bounds are known at compile time. */
8101 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
8102 {
8103 int n_iterations;
8104 int increment_value_abs = INTVAL (increment) * increment_direction;
8105
8106 /* check the relation between compare-val and initial-val */
8107 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
8108 int range_direction = (difference > 0) ? 1 : -1;
8109
8110 /* make sure the loop executes enough iterations to gain from BCT */
8111 if (difference > -3 && difference < 3) {
8112 if (loop_dump_stream)
8113 fprintf (loop_dump_stream,
8114 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
8115 loop_num);
8116 return;
8117 }
8118
8119 /* make sure that the loop executes at least once */
8120 if ((range_direction == 1 && compare_direction == -1)
8121 || (range_direction == -1 && compare_direction == 1))
8122 {
8123 if (loop_dump_stream)
8124 fprintf (loop_dump_stream,
8125 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
8126 loop_num);
8127 return;
8128 }
8129
8130 /* make sure that the loop does not end by an overflow (in compile time
8131 bounds we must have an additional check for overflow, because here
8132 we also support the compare code of 'NE'. */
8133 if (comparison_code == NE
8134 && increment_direction != range_direction) {
8135 if (loop_dump_stream)
8136 fprintf (loop_dump_stream,
8137 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
8138 loop_num);
8139 return;
8140 }
8141
8142 /* Determine the number of iterations by:
8143 ;
8144 ; compare-val - initial-val + (increment -1) + additional-iteration
8145 ; num_iterations = -----------------------------------------------------------------
8146 ; increment
8147 */
8148 difference = (range_direction > 0) ? difference : -difference;
8149 #if 0
8150 fprintf (stderr, "difference is: %d\n", difference); /* @*/
8151 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
8152 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
8153 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
8154 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
8155 #endif
8156
8157 if (increment_value_abs == 0) {
8158 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
8159 abort ();
8160 }
8161 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
8162 / increment_value_abs;
8163
8164 #if 0
8165 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
8166 #endif
8167 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
8168
8169 /* Done with this loop. */
8170 return;
8171 }
8172
8173 /* Handle the more complex case, that the bounds are NOT known at compile time. */
8174 /* In this case we generate run_time calculation of the number of iterations */
8175
8176 /* With runtime bounds, if the compare is of the form '!=' we give up */
8177 if (comparison_code == NE) {
8178 if (loop_dump_stream)
8179 fprintf (loop_dump_stream,
8180 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
8181 loop_num);
8182 return;
8183 }
8184
8185 else {
8186 /* We rely on the existence of run-time guard to ensure that the
8187 loop executes at least once. */
8188 rtx sequence;
8189 rtx iterations_num_reg;
8190
8191 int increment_value_abs = INTVAL (increment) * increment_direction;
8192
8193 /* make sure that the increment is a power of two, otherwise (an
8194 expensive) divide is needed. */
8195 if (exact_log2 (increment_value_abs) == -1)
8196 {
8197 if (loop_dump_stream)
8198 fprintf (loop_dump_stream,
8199 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
8200 return;
8201 }
8202
8203 /* compute the number of iterations */
8204 start_sequence ();
8205 {
8206 rtx temp_reg;
8207
8208 /* Again, the number of iterations is calculated by:
8209 ;
8210 ; compare-val - initial-val + (increment -1) + additional-iteration
8211 ; num_iterations = -----------------------------------------------------------------
8212 ; increment
8213 */
8214 /* ??? Do we have to call copy_rtx here before passing rtx to
8215 expand_binop? */
8216 if (compare_direction > 0) {
8217 /* <, <= :the loop variable is increasing */
8218 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
8219 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
8220 }
8221 else {
8222 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
8223 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
8224 }
8225
8226 if (increment_value_abs - 1 + add_iteration != 0)
8227 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
8228 GEN_INT (increment_value_abs - 1 + add_iteration),
8229 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8230
8231 if (increment_value_abs != 1)
8232 {
8233 /* ??? This will generate an expensive divide instruction for
8234 most targets. The original authors apparently expected this
8235 to be a shift, since they test for power-of-2 divisors above,
8236 but just naively generating a divide instruction will not give
8237 a shift. It happens to work for the PowerPC target because
8238 the rs6000.md file has a divide pattern that emits shifts.
8239 It will probably not work for any other target. */
8240 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
8241 temp_reg,
8242 GEN_INT (increment_value_abs),
8243 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8244 }
8245 else
8246 iterations_num_reg = temp_reg;
8247 }
8248 sequence = gen_sequence ();
8249 end_sequence ();
8250 emit_insn_before (sequence, loop_start);
8251 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
8252 }
8253 }
8254
8255 /* instrument loop by inserting a bct in it. This is done in the following way:
8256 1. A new register is created and assigned the hard register number of the count
8257 register.
8258 2. In the head of the loop the new variable is initialized by the value passed in the
8259 loop_num_iterations parameter.
8260 3. At the end of the loop, comparison of the register with 0 is generated.
8261 The created comparison follows the pattern defined for the
8262 decrement_and_branch_on_count insn, so this insn will be generated in assembly
8263 generation phase.
8264 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
8265 not used elsewhere, it will be eliminated by data-flow analisys. */
8266
8267 static void
8268 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
8269 rtx loop_start, loop_end;
8270 rtx loop_num_iterations;
8271 {
8272 rtx temp_reg1, temp_reg2;
8273 rtx start_label;
8274
8275 rtx sequence;
8276 enum machine_mode loop_var_mode = SImode;
8277
8278 if (HAVE_decrement_and_branch_on_count)
8279 {
8280 if (loop_dump_stream)
8281 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
8282
8283 /* eliminate the check on the old variable */
8284 delete_insn (PREV_INSN (loop_end));
8285 delete_insn (PREV_INSN (loop_end));
8286
8287 /* insert the label which will delimit the start of the loop */
8288 start_label = gen_label_rtx ();
8289 emit_label_after (start_label, loop_start);
8290
8291 /* insert initialization of the count register into the loop header */
8292 start_sequence ();
8293 temp_reg1 = gen_reg_rtx (loop_var_mode);
8294 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
8295
8296 /* this will be count register */
8297 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
8298 /* we have to move the value to the count register from an GPR
8299 because rtx pointed to by loop_num_iterations could contain
8300 expression which cannot be moved into count register */
8301 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
8302
8303 sequence = gen_sequence ();
8304 end_sequence ();
8305 emit_insn_after (sequence, loop_start);
8306
8307 /* insert new comparison on the count register instead of the
8308 old one, generating the needed BCT pattern (that will be
8309 later recognized by assembly generation phase). */
8310 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
8311 loop_end);
8312 LABEL_NUSES (start_label)++;
8313 }
8314
8315 }
8316 #endif /* HAVE_decrement_and_branch_on_count */
8317
8318 #endif /* HAIFA */
8319
8320 /* Scan the function and determine whether it has indirect (computed) jumps.
8321
8322 This is taken mostly from flow.c; similar code exists elsewhere
8323 in the compiler. It may be useful to put this into rtlanal.c. */
8324 static int
8325 indirect_jump_in_function_p (start)
8326 rtx start;
8327 {
8328 rtx insn;
8329
8330 for (insn = start; insn; insn = NEXT_INSN (insn))
8331 if (computed_jump_p (insn))
8332 return 1;
8333
8334 return 0;
8335 }
8336
8337 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8338 documentation for LOOP_MEMS for the definition of `appropriate'.
8339 This function is called from prescan_loop via for_each_rtx. */
8340
8341 static int
8342 insert_loop_mem (mem, data)
8343 rtx *mem;
8344 void *data;
8345 {
8346 int i;
8347 rtx m = *mem;
8348
8349 if (m == NULL_RTX)
8350 return 0;
8351
8352 switch (GET_CODE (m))
8353 {
8354 case MEM:
8355 break;
8356
8357 case CONST_DOUBLE:
8358 /* We're not interested in the MEM associated with a
8359 CONST_DOUBLE, so there's no need to traverse into this. */
8360 return -1;
8361
8362 default:
8363 /* This is not a MEM. */
8364 return 0;
8365 }
8366
8367 /* See if we've already seen this MEM. */
8368 for (i = 0; i < loop_mems_idx; ++i)
8369 if (rtx_equal_p (m, loop_mems[i].mem))
8370 {
8371 if (GET_MODE (m) != GET_MODE (loop_mems[i].mem))
8372 /* The modes of the two memory accesses are different. If
8373 this happens, something tricky is going on, and we just
8374 don't optimize accesses to this MEM. */
8375 loop_mems[i].optimize = 0;
8376
8377 return 0;
8378 }
8379
8380 /* Resize the array, if necessary. */
8381 if (loop_mems_idx == loop_mems_allocated)
8382 {
8383 if (loop_mems_allocated != 0)
8384 loop_mems_allocated *= 2;
8385 else
8386 loop_mems_allocated = 32;
8387
8388 loop_mems = (loop_mem_info*)
8389 xrealloc (loop_mems,
8390 loop_mems_allocated * sizeof (loop_mem_info));
8391 }
8392
8393 /* Actually insert the MEM. */
8394 loop_mems[loop_mems_idx].mem = m;
8395 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8396 because we can't put it in a register. We still store it in the
8397 table, though, so that if we see the same address later, but in a
8398 non-BLK mode, we'll not think we can optimize it at that point. */
8399 loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode);
8400 loop_mems[loop_mems_idx].reg = NULL_RTX;
8401 ++loop_mems_idx;
8402 }
8403
8404 /* Like load_mems, but also ensures that N_TIMES_SET,
8405 MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
8406 values after load_mems. */
8407
8408 static void
8409 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, start,
8410 reg_single_usage, insn_count)
8411 rtx scan_start;
8412 rtx end;
8413 rtx loop_top;
8414 rtx start;
8415 rtx *reg_single_usage;
8416 int *insn_count;
8417 {
8418 int nregs = max_reg_num ();
8419
8420 load_mems (scan_start, end, loop_top, start);
8421
8422 /* Recalculate n_times_set and friends since load_mems may have
8423 created new registers. */
8424 if (max_reg_num () > nregs)
8425 {
8426 int i;
8427 int old_nregs;
8428
8429 old_nregs = nregs;
8430 nregs = max_reg_num ();
8431
8432 /* Note that we assume here that enough room was allocated in
8433 the various arrays to accomodate the extra registers created
8434 by load_mems. */
8435 bzero ((char *) n_times_set, nregs * sizeof (int));
8436 bzero (may_not_optimize, nregs);
8437 if (loop_has_call && reg_single_usage)
8438 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
8439
8440 count_loop_regs_set (loop_top ? loop_top : start, end,
8441 may_not_optimize, reg_single_usage,
8442 insn_count, nregs);
8443
8444 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8445 may_not_optimize[i] = 1, n_times_set[i] = 1;
8446
8447 /* Set n_times_used for the new registers. */
8448 bcopy ((char *) (n_times_set + old_nregs),
8449 (char *) (n_times_used + old_nregs),
8450 (nregs - old_nregs) * sizeof (int));
8451 }
8452 }
8453
8454 /* Move MEMs into registers for the duration of the loop. SCAN_START
8455 is the first instruction in the loop (as it is executed). The
8456 other parameters are as for next_insn_in_loop. */
8457
8458 static void
8459 load_mems (scan_start, end, loop_top, start)
8460 rtx scan_start;
8461 rtx end;
8462 rtx loop_top;
8463 rtx start;
8464 {
8465 int maybe_never = 0;
8466 int i;
8467 rtx p;
8468 rtx label = NULL_RTX;
8469 rtx end_label;
8470
8471 if (loop_mems_idx > 0)
8472 {
8473 /* Nonzero if the next instruction may never be executed. */
8474 int next_maybe_never = 0;
8475
8476 /* Check to see if it's possible that some instructions in the
8477 loop are never executed. */
8478 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
8479 p != NULL_RTX && !maybe_never;
8480 p = next_insn_in_loop (p, scan_start, end, loop_top))
8481 {
8482 if (GET_CODE (p) == CODE_LABEL)
8483 maybe_never = 1;
8484 else if (GET_CODE (p) == JUMP_INSN
8485 /* If we enter the loop in the middle, and scan
8486 around to the beginning, don't set maybe_never
8487 for that. This must be an unconditional jump,
8488 otherwise the code at the top of the loop might
8489 never be executed. Unconditional jumps are
8490 followed a by barrier then loop end. */
8491 && ! (GET_CODE (p) == JUMP_INSN
8492 && JUMP_LABEL (p) == loop_top
8493 && NEXT_INSN (NEXT_INSN (p)) == end
8494 && simplejump_p (p)))
8495 {
8496 if (!condjump_p (p))
8497 /* Something complicated. */
8498 maybe_never = 1;
8499 else
8500 /* If there are any more instructions in the loop, they
8501 might not be reached. */
8502 next_maybe_never = 1;
8503 }
8504 else if (next_maybe_never)
8505 maybe_never = 1;
8506 }
8507
8508 /* Actually move the MEMs. */
8509 for (i = 0; i < loop_mems_idx; ++i)
8510 {
8511 int j;
8512 int written = 0;
8513 rtx reg;
8514 rtx mem = loop_mems[i].mem;
8515
8516 if (MEM_VOLATILE_P (mem)
8517 || invariant_p (XEXP (mem, 0)) != 1)
8518 /* There's no telling whether or not MEM is modified. */
8519 loop_mems[i].optimize = 0;
8520
8521 /* Go through the MEMs written to in the loop to see if this
8522 one is aliased by one of them. */
8523 for (j = 0; j < loop_store_mems_idx; ++j)
8524 {
8525 if (rtx_equal_p (mem, loop_store_mems[j]))
8526 written = 1;
8527 else if (true_dependence (loop_store_mems[j], VOIDmode,
8528 mem, rtx_varies_p))
8529 {
8530 /* MEM is indeed aliased by this store. */
8531 loop_mems[i].optimize = 0;
8532 break;
8533 }
8534 }
8535
8536 /* If this MEM is written to, we must be sure that there
8537 are no reads from another MEM that aliases this one. */
8538 if (loop_mems[i].optimize && written)
8539 {
8540 int j;
8541
8542 for (j = 0; j < loop_mems_idx; ++j)
8543 {
8544 if (j == i)
8545 continue;
8546 else if (true_dependence (mem,
8547 VOIDmode,
8548 loop_mems[j].mem,
8549 rtx_varies_p))
8550 {
8551 /* It's not safe to hoist loop_mems[i] out of
8552 the loop because writes to it might not be
8553 seen by reads from loop_mems[j]. */
8554 loop_mems[i].optimize = 0;
8555 break;
8556 }
8557 }
8558 }
8559
8560 if (maybe_never && may_trap_p (mem))
8561 /* We can't access the MEM outside the loop; it might
8562 cause a trap that wouldn't have happened otherwise. */
8563 loop_mems[i].optimize = 0;
8564
8565 if (!loop_mems[i].optimize)
8566 /* We thought we were going to lift this MEM out of the
8567 loop, but later discovered that we could not. */
8568 continue;
8569
8570 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
8571 order to keep scan_loop from moving stores to this MEM
8572 out of the loop just because this REG is neither a
8573 user-variable nor used in the loop test. */
8574 reg = gen_reg_rtx (GET_MODE (mem));
8575 REG_USERVAR_P (reg) = 1;
8576 loop_mems[i].reg = reg;
8577
8578 /* Now, replace all references to the MEM with the
8579 corresponding pesudos. */
8580 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
8581 p != NULL_RTX;
8582 p = next_insn_in_loop (p, scan_start, end, loop_top))
8583 {
8584 rtx_and_int ri;
8585 ri.r = p;
8586 ri.i = i;
8587 for_each_rtx (&p, replace_loop_mem, &ri);
8588 }
8589
8590 if (!apply_change_group ())
8591 /* We couldn't replace all occurrences of the MEM. */
8592 loop_mems[i].optimize = 0;
8593 else
8594 {
8595 rtx set;
8596
8597 /* Load the memory immediately before START, which is
8598 the NOTE_LOOP_BEG. */
8599 set = gen_rtx_SET (GET_MODE (reg), reg, mem);
8600 emit_insn_before (set, start);
8601
8602 if (written)
8603 {
8604 if (label == NULL_RTX)
8605 {
8606 /* We must compute the former
8607 right-after-the-end label before we insert
8608 the new one. */
8609 end_label = next_label (end);
8610 label = gen_label_rtx ();
8611 emit_label_after (label, end);
8612 }
8613
8614 /* Store the memory immediately after END, which is
8615 the NOTE_LOOP_END. */
8616 set = gen_rtx_SET (GET_MODE (reg), mem, reg);
8617 emit_insn_after (set, label);
8618 }
8619
8620 if (loop_dump_stream)
8621 {
8622 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
8623 REGNO (reg), (written ? "r/w" : "r/o"));
8624 print_rtl (loop_dump_stream, mem);
8625 fputc ('\n', loop_dump_stream);
8626 }
8627 }
8628 }
8629 }
8630
8631 if (label != NULL_RTX)
8632 {
8633 /* Now, we need to replace all references to the previous exit
8634 label with the new one. */
8635 rtx_pair rr;
8636 rr.r1 = end_label;
8637 rr.r2 = label;
8638
8639 for (p = start; p != end; p = NEXT_INSN (p))
8640 for_each_rtx (&p, replace_label, &rr);
8641 }
8642 }
8643
8644 /* Replace MEM with its associated pseudo register. This function is
8645 called from load_mems via for_each_rtx. DATA is actually an
8646 rtx_and_int * describing the instruction currently being scanned
8647 and the MEM we are currently replacing. */
8648
8649 static int
8650 replace_loop_mem (mem, data)
8651 rtx *mem;
8652 void *data;
8653 {
8654 rtx_and_int *ri;
8655 rtx insn;
8656 int i;
8657 rtx m = *mem;
8658
8659 if (m == NULL_RTX)
8660 return 0;
8661
8662 switch (GET_CODE (m))
8663 {
8664 case MEM:
8665 break;
8666
8667 case CONST_DOUBLE:
8668 /* We're not interested in the MEM associated with a
8669 CONST_DOUBLE, so there's no need to traverse into one. */
8670 return -1;
8671
8672 default:
8673 /* This is not a MEM. */
8674 return 0;
8675 }
8676
8677 ri = (rtx_and_int*) data;
8678 i = ri->i;
8679
8680 if (!rtx_equal_p (loop_mems[i].mem, m))
8681 /* This is not the MEM we are currently replacing. */
8682 return 0;
8683
8684 insn = ri->r;
8685
8686 /* Actually replace the MEM. */
8687 validate_change (insn, mem, loop_mems[i].reg, 1);
8688
8689 return 0;
8690 }
8691
8692 /* Replace occurrences of the old exit label for the loop with the new
8693 one. DATA is an rtx_pair containing the old and new labels,
8694 respectively. */
8695
8696 static int
8697 replace_label (x, data)
8698 rtx *x;
8699 void *data;
8700 {
8701 rtx l = *x;
8702 rtx old_label = ((rtx_pair*) data)->r1;
8703 rtx new_label = ((rtx_pair*) data)->r2;
8704
8705 if (l == NULL_RTX)
8706 return 0;
8707
8708 if (GET_CODE (l) != LABEL_REF)
8709 return 0;
8710
8711 if (XEXP (l, 0) != old_label)
8712 return 0;
8713
8714 XEXP (l, 0) = new_label;
8715 ++LABEL_NUSES (new_label);
8716 --LABEL_NUSES (old_label);
8717
8718 return 0;
8719 }
8720
8721