alias.c: Include toplev.h
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51 #include "toplev.h"
52
53 /* Vector mapping INSN_UIDs to luids.
54 The luids are like uids but increase monotonically always.
55 We use them to see whether a jump comes from outside a given loop. */
56
57 int *uid_luid;
58
59 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
60 number the insn is contained in. */
61
62 int *uid_loop_num;
63
64 /* 1 + largest uid of any insn. */
65
66 int max_uid_for_loop;
67
68 /* 1 + luid of last insn. */
69
70 static int max_luid;
71
72 /* Number of loops detected in current function. Used as index to the
73 next few tables. */
74
75 static int max_loop_num;
76
77 /* Indexed by loop number, contains the first and last insn of each loop. */
78
79 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
80
81 /* For each loop, gives the containing loop number, -1 if none. */
82
83 int *loop_outer_loop;
84
85 #ifdef HAIFA
86 /* The main output of analyze_loop_iterations is placed here */
87
88 int *loop_can_insert_bct;
89
90 /* For each loop, determines whether some of its inner loops has used
91 count register */
92
93 int *loop_used_count_register;
94
95 /* loop parameters for arithmetic loops. These loops have a loop variable
96 which is initialized to loop_start_value, incremented in each iteration
97 by "loop_increment". At the end of the iteration the loop variable is
98 compared to the loop_comparison_value (using loop_comparison_code). */
99
100 rtx *loop_increment;
101 rtx *loop_comparison_value;
102 rtx *loop_start_value;
103 enum rtx_code *loop_comparison_code;
104 #endif /* HAIFA */
105
106 /* For each loop, keep track of its unrolling factor.
107 Potential values:
108 0: unrolled
109 1: not unrolled.
110 -1: completely unrolled
111 >0: holds the unroll exact factor. */
112 int *loop_unroll_factor;
113
114 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
115 really a loop (an insn outside the loop branches into it). */
116
117 static char *loop_invalid;
118
119 /* Indexed by loop number, links together all LABEL_REFs which refer to
120 code labels outside the loop. Used by routines that need to know all
121 loop exits, such as final_biv_value and final_giv_value.
122
123 This does not include loop exits due to return instructions. This is
124 because all bivs and givs are pseudos, and hence must be dead after a
125 return, so the presense of a return does not affect any of the
126 optimizations that use this info. It is simpler to just not include return
127 instructions on this list. */
128
129 rtx *loop_number_exit_labels;
130
131 /* Indexed by loop number, counts the number of LABEL_REFs on
132 loop_number_exit_labels for this loop and all loops nested inside it. */
133
134 int *loop_number_exit_count;
135
136 /* Holds the number of loop iterations. It is zero if the number could not be
137 calculated. Must be unsigned since the number of iterations can
138 be as high as 2^wordsize-1. For loops with a wider iterator, this number
139 will be zero if the number of loop iterations is too large for an
140 unsigned integer to hold. */
141
142 unsigned HOST_WIDE_INT loop_n_iterations;
143
144 /* Nonzero if there is a subroutine call in the current loop. */
145
146 static int loop_has_call;
147
148 /* Nonzero if there is a volatile memory reference in the current
149 loop. */
150
151 static int loop_has_volatile;
152
153 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
154 current loop. A continue statement will generate a branch to
155 NEXT_INSN (loop_continue). */
156
157 static rtx loop_continue;
158
159 /* Indexed by register number, contains the number of times the reg
160 is set during the loop being scanned.
161 During code motion, a negative value indicates a reg that has been
162 made a candidate; in particular -2 means that it is an candidate that
163 we know is equal to a constant and -1 means that it is an candidate
164 not known equal to a constant.
165 After code motion, regs moved have 0 (which is accurate now)
166 while the failed candidates have the original number of times set.
167
168 Therefore, at all times, == 0 indicates an invariant register;
169 < 0 a conditionally invariant one. */
170
171 static int *n_times_set;
172
173 /* Original value of n_times_set; same except that this value
174 is not set negative for a reg whose sets have been made candidates
175 and not set to 0 for a reg that is moved. */
176
177 static int *n_times_used;
178
179 /* Index by register number, 1 indicates that the register
180 cannot be moved or strength reduced. */
181
182 static char *may_not_optimize;
183
184 /* Nonzero means reg N has already been moved out of one loop.
185 This reduces the desire to move it out of another. */
186
187 static char *moved_once;
188
189 /* Array of MEMs that are stored in this loop. If there are too many to fit
190 here, we just turn on unknown_address_altered. */
191
192 #define NUM_STORES 30
193 static rtx loop_store_mems[NUM_STORES];
194
195 /* Index of first available slot in above array. */
196 static int loop_store_mems_idx;
197
198 /* Nonzero if we don't know what MEMs were changed in the current loop.
199 This happens if the loop contains a call (in which case `loop_has_call'
200 will also be set) or if we store into more than NUM_STORES MEMs. */
201
202 static int unknown_address_altered;
203
204 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
205 static int num_movables;
206
207 /* Count of memory write instructions discovered in the loop. */
208 static int num_mem_sets;
209
210 /* Number of loops contained within the current one, including itself. */
211 static int loops_enclosed;
212
213 /* Bound on pseudo register number before loop optimization.
214 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
215 int max_reg_before_loop;
216
217 /* This obstack is used in product_cheap_p to allocate its rtl. It
218 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
219 If we used the same obstack that it did, we would be deallocating
220 that array. */
221
222 static struct obstack temp_obstack;
223
224 /* This is where the pointer to the obstack being used for RTL is stored. */
225
226 extern struct obstack *rtl_obstack;
227
228 #define obstack_chunk_alloc xmalloc
229 #define obstack_chunk_free free
230
231 extern char *oballoc ();
232 \f
233 /* During the analysis of a loop, a chain of `struct movable's
234 is made to record all the movable insns found.
235 Then the entire chain can be scanned to decide which to move. */
236
237 struct movable
238 {
239 rtx insn; /* A movable insn */
240 rtx set_src; /* The expression this reg is set from. */
241 rtx set_dest; /* The destination of this SET. */
242 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
243 of any registers used within the LIBCALL. */
244 int consec; /* Number of consecutive following insns
245 that must be moved with this one. */
246 int regno; /* The register it sets */
247 short lifetime; /* lifetime of that register;
248 may be adjusted when matching movables
249 that load the same value are found. */
250 short savings; /* Number of insns we can move for this reg,
251 including other movables that force this
252 or match this one. */
253 unsigned int cond : 1; /* 1 if only conditionally movable */
254 unsigned int force : 1; /* 1 means MUST move this insn */
255 unsigned int global : 1; /* 1 means reg is live outside this loop */
256 /* If PARTIAL is 1, GLOBAL means something different:
257 that the reg is live outside the range from where it is set
258 to the following label. */
259 unsigned int done : 1; /* 1 inhibits further processing of this */
260
261 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
262 In particular, moving it does not make it
263 invariant. */
264 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
265 load SRC, rather than copying INSN. */
266 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
267 first insn of a consecutive sets group. */
268 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
269 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
270 that we should avoid changing when clearing
271 the rest of the reg. */
272 struct movable *match; /* First entry for same value */
273 struct movable *forces; /* An insn that must be moved if this is */
274 struct movable *next;
275 };
276
277 FILE *loop_dump_stream;
278
279 /* Forward declarations. */
280
281 static void find_and_verify_loops PROTO((rtx));
282 static void mark_loop_jump PROTO((rtx, int));
283 static void prescan_loop PROTO((rtx, rtx));
284 static int reg_in_basic_block_p PROTO((rtx, rtx));
285 static int consec_sets_invariant_p PROTO((rtx, int, rtx));
286 static rtx libcall_other_reg PROTO((rtx, rtx));
287 static int labels_in_range_p PROTO((rtx, int));
288 static void count_loop_regs_set PROTO((rtx, rtx, char *, rtx *, int *, int));
289 static void note_addr_stored PROTO((rtx, rtx));
290 static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
291 static void scan_loop PROTO((rtx, rtx, int, int));
292 #if 0
293 static void replace_call_address PROTO(());
294 #endif
295 static rtx skip_consec_insns PROTO((rtx, int));
296 static int libcall_benefit PROTO((rtx));
297 static void ignore_some_movables PROTO((struct movable *));
298 static void force_movables PROTO((struct movable *));
299 static void combine_movables PROTO((struct movable *, int));
300 static int regs_match_p PROTO((rtx, rtx, struct movable *));
301 static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
302 static void add_label_notes PROTO((rtx, rtx));
303 static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
304 static int count_nonfixed_reads PROTO((rtx));
305 static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, int));
306 static void find_single_use_in_loop PROTO((rtx, rtx, rtx *));
307 static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
308 static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
309 static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, int, int));
310 static void check_final_value PROTO((struct induction *, rtx, rtx));
311 static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
312 static void update_giv_derive PROTO((rtx));
313 static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *));
314 static rtx simplify_giv_expr PROTO((rtx, int *));
315 static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *));
316 static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *));
317 static int check_dbra_loop PROTO((rtx, int, rtx));
318 #ifdef ADDRESS_COST
319 static rtx express_from PROTO((struct induction *, struct induction *));
320 #endif
321 static int combine_givs_p PROTO((struct induction *, struct induction *));
322 #ifdef GIV_SORT_CRITERION
323 static int giv_sort PROTO((struct induction **, struct induction **));
324 #endif
325 static void combine_givs PROTO((struct iv_class *));
326 static int product_cheap_p PROTO((rtx, rtx));
327 static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
328 static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
329 static int last_use_this_basic_block PROTO((rtx, rtx));
330 static void record_initial PROTO((rtx, rtx));
331 static void update_reg_last_use PROTO((rtx, rtx));
332
333 #ifdef HAIFA
334 /* This is extern from unroll.c */
335 extern void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
336
337 /* Two main functions for implementing bct:
338 first - to be called before loop unrolling, and the second - after */
339 #ifdef HAVE_decrement_and_branch_on_count
340 static void analyze_loop_iterations PROTO((rtx, rtx));
341 static void insert_bct PROTO((rtx, rtx));
342
343 /* Auxiliary function that inserts the bct pattern into the loop */
344 static void instrument_loop_bct PROTO((rtx, rtx, rtx));
345 #endif /* HAVE_decrement_and_branch_on_count */
346 #endif /* HAIFA */
347
348 /* Indirect_jump_in_function is computed once per function. */
349 int indirect_jump_in_function = 0;
350 static int indirect_jump_in_function_p PROTO((rtx));
351
352 \f
353 /* Relative gain of eliminating various kinds of operations. */
354 int add_cost;
355 #if 0
356 int shift_cost;
357 int mult_cost;
358 #endif
359
360 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
361 copy the value of the strength reduced giv to its original register. */
362 int copy_cost;
363
364 void
365 init_loop ()
366 {
367 char *free_point = (char *) oballoc (1);
368 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
369
370 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
371
372 /* We multiply by 2 to reconcile the difference in scale between
373 these two ways of computing costs. Otherwise the cost of a copy
374 will be far less than the cost of an add. */
375
376 copy_cost = 2 * 2;
377
378 /* Free the objects we just allocated. */
379 obfree (free_point);
380
381 /* Initialize the obstack used for rtl in product_cheap_p. */
382 gcc_obstack_init (&temp_obstack);
383 }
384 \f
385 /* Entry point of this file. Perform loop optimization
386 on the current function. F is the first insn of the function
387 and DUMPFILE is a stream for output of a trace of actions taken
388 (or 0 if none should be output). */
389
390 void
391 loop_optimize (f, dumpfile, unroll_p)
392 /* f is the first instruction of a chain of insns for one function */
393 rtx f;
394 FILE *dumpfile;
395 int unroll_p;
396 {
397 register rtx insn;
398 register int i;
399 rtx last_insn;
400
401 loop_dump_stream = dumpfile;
402
403 init_recog_no_volatile ();
404
405 max_reg_before_loop = max_reg_num ();
406
407 moved_once = (char *) alloca (max_reg_before_loop);
408 bzero (moved_once, max_reg_before_loop);
409
410 regs_may_share = 0;
411
412 /* Count the number of loops. */
413
414 max_loop_num = 0;
415 for (insn = f; insn; insn = NEXT_INSN (insn))
416 {
417 if (GET_CODE (insn) == NOTE
418 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
419 max_loop_num++;
420 }
421
422 /* Don't waste time if no loops. */
423 if (max_loop_num == 0)
424 return;
425
426 /* Get size to use for tables indexed by uids.
427 Leave some space for labels allocated by find_and_verify_loops. */
428 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
429
430 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
431 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
432
433 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
434 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
435
436 /* Allocate tables for recording each loop. We set each entry, so they need
437 not be zeroed. */
438 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
439 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
440 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
441 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
442 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
443 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
444
445 /* This is initialized by the unrolling code, so we go ahead
446 and clear them just in case we are not performing loop
447 unrolling. */
448 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
449 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
450
451 #ifdef HAIFA
452 /* Allocate for BCT optimization */
453 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
454 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
455
456 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
457 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
458
459 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
460 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
461 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
462 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
463 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
464 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
465
466 loop_comparison_code
467 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
468 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
469 #endif /* HAIFA */
470
471 /* Find and process each loop.
472 First, find them, and record them in order of their beginnings. */
473 find_and_verify_loops (f);
474
475 /* Now find all register lifetimes. This must be done after
476 find_and_verify_loops, because it might reorder the insns in the
477 function. */
478 reg_scan (f, max_reg_num (), 1);
479
480 /* This must occur after reg_scan so that registers created by gcse
481 will have entries in the register tables.
482
483 We could have added a call to reg_scan after gcse_main in toplev.c,
484 but moving this call to init_alias_analysis is more efficient. */
485 init_alias_analysis ();
486
487 /* See if we went too far. */
488 if (get_max_uid () > max_uid_for_loop)
489 abort ();
490 /* Now reset it to the actual size we need. See above. */
491 max_uid_for_loop = get_max_uid () + 1;
492
493 /* Compute the mapping from uids to luids.
494 LUIDs are numbers assigned to insns, like uids,
495 except that luids increase monotonically through the code.
496 Don't assign luids to line-number NOTEs, so that the distance in luids
497 between two insns is not affected by -g. */
498
499 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
500 {
501 last_insn = insn;
502 if (GET_CODE (insn) != NOTE
503 || NOTE_LINE_NUMBER (insn) <= 0)
504 uid_luid[INSN_UID (insn)] = ++i;
505 else
506 /* Give a line number note the same luid as preceding insn. */
507 uid_luid[INSN_UID (insn)] = i;
508 }
509
510 max_luid = i + 1;
511
512 /* Don't leave gaps in uid_luid for insns that have been
513 deleted. It is possible that the first or last insn
514 using some register has been deleted by cross-jumping.
515 Make sure that uid_luid for that former insn's uid
516 points to the general area where that insn used to be. */
517 for (i = 0; i < max_uid_for_loop; i++)
518 {
519 uid_luid[0] = uid_luid[i];
520 if (uid_luid[0] != 0)
521 break;
522 }
523 for (i = 0; i < max_uid_for_loop; i++)
524 if (uid_luid[i] == 0)
525 uid_luid[i] = uid_luid[i - 1];
526
527 /* Create a mapping from loops to BLOCK tree nodes. */
528 if (unroll_p && write_symbols != NO_DEBUG)
529 find_loop_tree_blocks ();
530
531 /* Determine if the function has indirect jump. On some systems
532 this prevents low overhead loop instructions from being used. */
533 indirect_jump_in_function = indirect_jump_in_function_p (f);
534
535 /* Now scan the loops, last ones first, since this means inner ones are done
536 before outer ones. */
537 for (i = max_loop_num-1; i >= 0; i--)
538 if (! loop_invalid[i] && loop_number_loop_ends[i])
539 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
540 max_reg_num (), unroll_p);
541
542 /* If debugging and unrolling loops, we must replicate the tree nodes
543 corresponding to the blocks inside the loop, so that the original one
544 to one mapping will remain. */
545 if (unroll_p && write_symbols != NO_DEBUG)
546 unroll_block_trees ();
547 }
548 \f
549 /* Optimize one loop whose start is LOOP_START and end is END.
550 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
551 NOTE_INSN_LOOP_END. */
552
553 /* ??? Could also move memory writes out of loops if the destination address
554 is invariant, the source is invariant, the memory write is not volatile,
555 and if we can prove that no read inside the loop can read this address
556 before the write occurs. If there is a read of this address after the
557 write, then we can also mark the memory read as invariant. */
558
559 static void
560 scan_loop (loop_start, end, nregs, unroll_p)
561 rtx loop_start, end;
562 int nregs;
563 int unroll_p;
564 {
565 register int i;
566 register rtx p;
567 /* 1 if we are scanning insns that could be executed zero times. */
568 int maybe_never = 0;
569 /* 1 if we are scanning insns that might never be executed
570 due to a subroutine call which might exit before they are reached. */
571 int call_passed = 0;
572 /* For a rotated loop that is entered near the bottom,
573 this is the label at the top. Otherwise it is zero. */
574 rtx loop_top = 0;
575 /* Jump insn that enters the loop, or 0 if control drops in. */
576 rtx loop_entry_jump = 0;
577 /* Place in the loop where control enters. */
578 rtx scan_start;
579 /* Number of insns in the loop. */
580 int insn_count;
581 int in_libcall = 0;
582 int tem;
583 rtx temp;
584 /* The SET from an insn, if it is the only SET in the insn. */
585 rtx set, set1;
586 /* Chain describing insns movable in current loop. */
587 struct movable *movables = 0;
588 /* Last element in `movables' -- so we can add elements at the end. */
589 struct movable *last_movable = 0;
590 /* Ratio of extra register life span we can justify
591 for saving an instruction. More if loop doesn't call subroutines
592 since in that case saving an insn makes more difference
593 and more registers are available. */
594 int threshold;
595 /* If we have calls, contains the insn in which a register was used
596 if it was used exactly once; contains const0_rtx if it was used more
597 than once. */
598 rtx *reg_single_usage = 0;
599 /* Nonzero if we are scanning instructions in a sub-loop. */
600 int loop_depth = 0;
601
602 n_times_set = (int *) alloca (nregs * sizeof (int));
603 n_times_used = (int *) alloca (nregs * sizeof (int));
604 may_not_optimize = (char *) alloca (nregs);
605
606 /* Determine whether this loop starts with a jump down to a test at
607 the end. This will occur for a small number of loops with a test
608 that is too complex to duplicate in front of the loop.
609
610 We search for the first insn or label in the loop, skipping NOTEs.
611 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
612 (because we might have a loop executed only once that contains a
613 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
614 (in case we have a degenerate loop).
615
616 Note that if we mistakenly think that a loop is entered at the top
617 when, in fact, it is entered at the exit test, the only effect will be
618 slightly poorer optimization. Making the opposite error can generate
619 incorrect code. Since very few loops now start with a jump to the
620 exit test, the code here to detect that case is very conservative. */
621
622 for (p = NEXT_INSN (loop_start);
623 p != end
624 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
625 && (GET_CODE (p) != NOTE
626 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
627 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
628 p = NEXT_INSN (p))
629 ;
630
631 scan_start = p;
632
633 /* Set up variables describing this loop. */
634 prescan_loop (loop_start, end);
635 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
636
637 /* If loop has a jump before the first label,
638 the true entry is the target of that jump.
639 Start scan from there.
640 But record in LOOP_TOP the place where the end-test jumps
641 back to so we can scan that after the end of the loop. */
642 if (GET_CODE (p) == JUMP_INSN)
643 {
644 loop_entry_jump = p;
645
646 /* Loop entry must be unconditional jump (and not a RETURN) */
647 if (simplejump_p (p)
648 && JUMP_LABEL (p) != 0
649 /* Check to see whether the jump actually
650 jumps out of the loop (meaning it's no loop).
651 This case can happen for things like
652 do {..} while (0). If this label was generated previously
653 by loop, we can't tell anything about it and have to reject
654 the loop. */
655 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
656 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
657 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
658 {
659 loop_top = next_label (scan_start);
660 scan_start = JUMP_LABEL (p);
661 }
662 }
663
664 /* If SCAN_START was an insn created by loop, we don't know its luid
665 as required by loop_reg_used_before_p. So skip such loops. (This
666 test may never be true, but it's best to play it safe.)
667
668 Also, skip loops where we do not start scanning at a label. This
669 test also rejects loops starting with a JUMP_INSN that failed the
670 test above. */
671
672 if (INSN_UID (scan_start) >= max_uid_for_loop
673 || GET_CODE (scan_start) != CODE_LABEL)
674 {
675 if (loop_dump_stream)
676 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
677 INSN_UID (loop_start), INSN_UID (end));
678 return;
679 }
680
681 /* Count number of times each reg is set during this loop.
682 Set may_not_optimize[I] if it is not safe to move out
683 the setting of register I. If this loop has calls, set
684 reg_single_usage[I]. */
685
686 bzero ((char *) n_times_set, nregs * sizeof (int));
687 bzero (may_not_optimize, nregs);
688
689 if (loop_has_call)
690 {
691 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
692 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
693 }
694
695 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
696 may_not_optimize, reg_single_usage, &insn_count, nregs);
697
698 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
699 may_not_optimize[i] = 1, n_times_set[i] = 1;
700 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
701
702 if (loop_dump_stream)
703 {
704 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
705 INSN_UID (loop_start), INSN_UID (end), insn_count);
706 if (loop_continue)
707 fprintf (loop_dump_stream, "Continue at insn %d.\n",
708 INSN_UID (loop_continue));
709 }
710
711 /* Scan through the loop finding insns that are safe to move.
712 Set n_times_set negative for the reg being set, so that
713 this reg will be considered invariant for subsequent insns.
714 We consider whether subsequent insns use the reg
715 in deciding whether it is worth actually moving.
716
717 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
718 and therefore it is possible that the insns we are scanning
719 would never be executed. At such times, we must make sure
720 that it is safe to execute the insn once instead of zero times.
721 When MAYBE_NEVER is 0, all insns will be executed at least once
722 so that is not a problem. */
723
724 p = scan_start;
725 while (1)
726 {
727 p = NEXT_INSN (p);
728 /* At end of a straight-in loop, we are done.
729 At end of a loop entered at the bottom, scan the top. */
730 if (p == scan_start)
731 break;
732 if (p == end)
733 {
734 if (loop_top != 0)
735 p = loop_top;
736 else
737 break;
738 if (p == scan_start)
739 break;
740 }
741
742 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
743 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
744 in_libcall = 1;
745 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
746 && find_reg_note (p, REG_RETVAL, NULL_RTX))
747 in_libcall = 0;
748
749 if (GET_CODE (p) == INSN
750 && (set = single_set (p))
751 && GET_CODE (SET_DEST (set)) == REG
752 && ! may_not_optimize[REGNO (SET_DEST (set))])
753 {
754 int tem1 = 0;
755 int tem2 = 0;
756 int move_insn = 0;
757 rtx src = SET_SRC (set);
758 rtx dependencies = 0;
759
760 /* Figure out what to use as a source of this insn. If a REG_EQUIV
761 note is given or if a REG_EQUAL note with a constant operand is
762 specified, use it as the source and mark that we should move
763 this insn by calling emit_move_insn rather that duplicating the
764 insn.
765
766 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
767 is present. */
768 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
769 if (temp)
770 src = XEXP (temp, 0), move_insn = 1;
771 else
772 {
773 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
774 if (temp && CONSTANT_P (XEXP (temp, 0)))
775 src = XEXP (temp, 0), move_insn = 1;
776 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
777 {
778 src = XEXP (temp, 0);
779 /* A libcall block can use regs that don't appear in
780 the equivalent expression. To move the libcall,
781 we must move those regs too. */
782 dependencies = libcall_other_reg (p, src);
783 }
784 }
785
786 /* Don't try to optimize a register that was made
787 by loop-optimization for an inner loop.
788 We don't know its life-span, so we can't compute the benefit. */
789 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
790 ;
791 /* In order to move a register, we need to have one of three cases:
792 (1) it is used only in the same basic block as the set
793 (2) it is not a user variable and it is not used in the
794 exit test (this can cause the variable to be used
795 before it is set just like a user-variable).
796 (3) the set is guaranteed to be executed once the loop starts,
797 and the reg is not used until after that. */
798 else if (! ((! maybe_never
799 && ! loop_reg_used_before_p (set, p, loop_start,
800 scan_start, end))
801 || (! REG_USERVAR_P (SET_DEST (set))
802 && ! REG_LOOP_TEST_P (SET_DEST (set)))
803 || reg_in_basic_block_p (p, SET_DEST (set))))
804 ;
805 else if ((tem = invariant_p (src))
806 && (dependencies == 0
807 || (tem2 = invariant_p (dependencies)) != 0)
808 && (n_times_set[REGNO (SET_DEST (set))] == 1
809 || (tem1
810 = consec_sets_invariant_p (SET_DEST (set),
811 n_times_set[REGNO (SET_DEST (set))],
812 p)))
813 /* If the insn can cause a trap (such as divide by zero),
814 can't move it unless it's guaranteed to be executed
815 once loop is entered. Even a function call might
816 prevent the trap insn from being reached
817 (since it might exit!) */
818 && ! ((maybe_never || call_passed)
819 && may_trap_p (src)))
820 {
821 register struct movable *m;
822 register int regno = REGNO (SET_DEST (set));
823
824 /* A potential lossage is where we have a case where two insns
825 can be combined as long as they are both in the loop, but
826 we move one of them outside the loop. For large loops,
827 this can lose. The most common case of this is the address
828 of a function being called.
829
830 Therefore, if this register is marked as being used exactly
831 once if we are in a loop with calls (a "large loop"), see if
832 we can replace the usage of this register with the source
833 of this SET. If we can, delete this insn.
834
835 Don't do this if P has a REG_RETVAL note or if we have
836 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
837
838 if (reg_single_usage && reg_single_usage[regno] != 0
839 && reg_single_usage[regno] != const0_rtx
840 && REGNO_FIRST_UID (regno) == INSN_UID (p)
841 && (REGNO_LAST_UID (regno)
842 == INSN_UID (reg_single_usage[regno]))
843 && n_times_set[REGNO (SET_DEST (set))] == 1
844 && ! side_effects_p (SET_SRC (set))
845 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
846 && (! SMALL_REGISTER_CLASSES
847 || (! (GET_CODE (SET_SRC (set)) == REG
848 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
849 /* This test is not redundant; SET_SRC (set) might be
850 a call-clobbered register and the life of REGNO
851 might span a call. */
852 && ! modified_between_p (SET_SRC (set), p,
853 reg_single_usage[regno])
854 && no_labels_between_p (p, reg_single_usage[regno])
855 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
856 reg_single_usage[regno]))
857 {
858 /* Replace any usage in a REG_EQUAL note. Must copy the
859 new source, so that we don't get rtx sharing between the
860 SET_SOURCE and REG_NOTES of insn p. */
861 REG_NOTES (reg_single_usage[regno])
862 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
863 SET_DEST (set), copy_rtx (SET_SRC (set)));
864
865 PUT_CODE (p, NOTE);
866 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
867 NOTE_SOURCE_FILE (p) = 0;
868 n_times_set[regno] = 0;
869 continue;
870 }
871
872 m = (struct movable *) alloca (sizeof (struct movable));
873 m->next = 0;
874 m->insn = p;
875 m->set_src = src;
876 m->dependencies = dependencies;
877 m->set_dest = SET_DEST (set);
878 m->force = 0;
879 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
880 m->done = 0;
881 m->forces = 0;
882 m->partial = 0;
883 m->move_insn = move_insn;
884 m->move_insn_first = 0;
885 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
886 m->savemode = VOIDmode;
887 m->regno = regno;
888 /* Set M->cond if either invariant_p or consec_sets_invariant_p
889 returned 2 (only conditionally invariant). */
890 m->cond = ((tem | tem1 | tem2) > 1);
891 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
892 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
893 m->match = 0;
894 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
895 - uid_luid[REGNO_FIRST_UID (regno)]);
896 m->savings = n_times_used[regno];
897 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
898 m->savings += libcall_benefit (p);
899 n_times_set[regno] = move_insn ? -2 : -1;
900 /* Add M to the end of the chain MOVABLES. */
901 if (movables == 0)
902 movables = m;
903 else
904 last_movable->next = m;
905 last_movable = m;
906
907 if (m->consec > 0)
908 {
909 /* It is possible for the first instruction to have a
910 REG_EQUAL note but a non-invariant SET_SRC, so we must
911 remember the status of the first instruction in case
912 the last instruction doesn't have a REG_EQUAL note. */
913 m->move_insn_first = m->move_insn;
914
915 /* Skip this insn, not checking REG_LIBCALL notes. */
916 p = next_nonnote_insn (p);
917 /* Skip the consecutive insns, if there are any. */
918 p = skip_consec_insns (p, m->consec);
919 /* Back up to the last insn of the consecutive group. */
920 p = prev_nonnote_insn (p);
921
922 /* We must now reset m->move_insn, m->is_equiv, and possibly
923 m->set_src to correspond to the effects of all the
924 insns. */
925 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
926 if (temp)
927 m->set_src = XEXP (temp, 0), m->move_insn = 1;
928 else
929 {
930 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
931 if (temp && CONSTANT_P (XEXP (temp, 0)))
932 m->set_src = XEXP (temp, 0), m->move_insn = 1;
933 else
934 m->move_insn = 0;
935
936 }
937 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
938 }
939 }
940 /* If this register is always set within a STRICT_LOW_PART
941 or set to zero, then its high bytes are constant.
942 So clear them outside the loop and within the loop
943 just load the low bytes.
944 We must check that the machine has an instruction to do so.
945 Also, if the value loaded into the register
946 depends on the same register, this cannot be done. */
947 else if (SET_SRC (set) == const0_rtx
948 && GET_CODE (NEXT_INSN (p)) == INSN
949 && (set1 = single_set (NEXT_INSN (p)))
950 && GET_CODE (set1) == SET
951 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
952 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
953 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
954 == SET_DEST (set))
955 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
956 {
957 register int regno = REGNO (SET_DEST (set));
958 if (n_times_set[regno] == 2)
959 {
960 register struct movable *m;
961 m = (struct movable *) alloca (sizeof (struct movable));
962 m->next = 0;
963 m->insn = p;
964 m->set_dest = SET_DEST (set);
965 m->dependencies = 0;
966 m->force = 0;
967 m->consec = 0;
968 m->done = 0;
969 m->forces = 0;
970 m->move_insn = 0;
971 m->move_insn_first = 0;
972 m->partial = 1;
973 /* If the insn may not be executed on some cycles,
974 we can't clear the whole reg; clear just high part.
975 Not even if the reg is used only within this loop.
976 Consider this:
977 while (1)
978 while (s != t) {
979 if (foo ()) x = *s;
980 use (x);
981 }
982 Clearing x before the inner loop could clobber a value
983 being saved from the last time around the outer loop.
984 However, if the reg is not used outside this loop
985 and all uses of the register are in the same
986 basic block as the store, there is no problem.
987
988 If this insn was made by loop, we don't know its
989 INSN_LUID and hence must make a conservative
990 assumption. */
991 m->global = (INSN_UID (p) >= max_uid_for_loop
992 || (uid_luid[REGNO_LAST_UID (regno)]
993 > INSN_LUID (end))
994 || (uid_luid[REGNO_FIRST_UID (regno)]
995 < INSN_LUID (p))
996 || (labels_in_range_p
997 (p, uid_luid[REGNO_FIRST_UID (regno)])));
998 if (maybe_never && m->global)
999 m->savemode = GET_MODE (SET_SRC (set1));
1000 else
1001 m->savemode = VOIDmode;
1002 m->regno = regno;
1003 m->cond = 0;
1004 m->match = 0;
1005 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1006 - uid_luid[REGNO_FIRST_UID (regno)]);
1007 m->savings = 1;
1008 n_times_set[regno] = -1;
1009 /* Add M to the end of the chain MOVABLES. */
1010 if (movables == 0)
1011 movables = m;
1012 else
1013 last_movable->next = m;
1014 last_movable = m;
1015 }
1016 }
1017 }
1018 /* Past a call insn, we get to insns which might not be executed
1019 because the call might exit. This matters for insns that trap.
1020 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1021 so they don't count. */
1022 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1023 call_passed = 1;
1024 /* Past a label or a jump, we get to insns for which we
1025 can't count on whether or how many times they will be
1026 executed during each iteration. Therefore, we can
1027 only move out sets of trivial variables
1028 (those not used after the loop). */
1029 /* Similar code appears twice in strength_reduce. */
1030 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1031 /* If we enter the loop in the middle, and scan around to the
1032 beginning, don't set maybe_never for that. This must be an
1033 unconditional jump, otherwise the code at the top of the
1034 loop might never be executed. Unconditional jumps are
1035 followed a by barrier then loop end. */
1036 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1037 && NEXT_INSN (NEXT_INSN (p)) == end
1038 && simplejump_p (p)))
1039 maybe_never = 1;
1040 else if (GET_CODE (p) == NOTE)
1041 {
1042 /* At the virtual top of a converted loop, insns are again known to
1043 be executed: logically, the loop begins here even though the exit
1044 code has been duplicated. */
1045 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1046 maybe_never = call_passed = 0;
1047 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1048 loop_depth++;
1049 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1050 loop_depth--;
1051 }
1052 }
1053
1054 /* If one movable subsumes another, ignore that other. */
1055
1056 ignore_some_movables (movables);
1057
1058 /* For each movable insn, see if the reg that it loads
1059 leads when it dies right into another conditionally movable insn.
1060 If so, record that the second insn "forces" the first one,
1061 since the second can be moved only if the first is. */
1062
1063 force_movables (movables);
1064
1065 /* See if there are multiple movable insns that load the same value.
1066 If there are, make all but the first point at the first one
1067 through the `match' field, and add the priorities of them
1068 all together as the priority of the first. */
1069
1070 combine_movables (movables, nregs);
1071
1072 /* Now consider each movable insn to decide whether it is worth moving.
1073 Store 0 in n_times_set for each reg that is moved.
1074
1075 Generally this increases code size, so do not move moveables when
1076 optimizing for code size. */
1077
1078 if (! optimize_size)
1079 move_movables (movables, threshold,
1080 insn_count, loop_start, end, nregs);
1081
1082 /* Now candidates that still are negative are those not moved.
1083 Change n_times_set to indicate that those are not actually invariant. */
1084 for (i = 0; i < nregs; i++)
1085 if (n_times_set[i] < 0)
1086 n_times_set[i] = n_times_used[i];
1087
1088 if (flag_strength_reduce)
1089 strength_reduce (scan_start, end, loop_top,
1090 insn_count, loop_start, end, unroll_p);
1091 }
1092 \f
1093 /* Add elements to *OUTPUT to record all the pseudo-regs
1094 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1095
1096 void
1097 record_excess_regs (in_this, not_in_this, output)
1098 rtx in_this, not_in_this;
1099 rtx *output;
1100 {
1101 enum rtx_code code;
1102 char *fmt;
1103 int i;
1104
1105 code = GET_CODE (in_this);
1106
1107 switch (code)
1108 {
1109 case PC:
1110 case CC0:
1111 case CONST_INT:
1112 case CONST_DOUBLE:
1113 case CONST:
1114 case SYMBOL_REF:
1115 case LABEL_REF:
1116 return;
1117
1118 case REG:
1119 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1120 && ! reg_mentioned_p (in_this, not_in_this))
1121 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1122 return;
1123
1124 default:
1125 break;
1126 }
1127
1128 fmt = GET_RTX_FORMAT (code);
1129 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1130 {
1131 int j;
1132
1133 switch (fmt[i])
1134 {
1135 case 'E':
1136 for (j = 0; j < XVECLEN (in_this, i); j++)
1137 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1138 break;
1139
1140 case 'e':
1141 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1142 break;
1143 }
1144 }
1145 }
1146 \f
1147 /* Check what regs are referred to in the libcall block ending with INSN,
1148 aside from those mentioned in the equivalent value.
1149 If there are none, return 0.
1150 If there are one or more, return an EXPR_LIST containing all of them. */
1151
1152 static rtx
1153 libcall_other_reg (insn, equiv)
1154 rtx insn, equiv;
1155 {
1156 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1157 rtx p = XEXP (note, 0);
1158 rtx output = 0;
1159
1160 /* First, find all the regs used in the libcall block
1161 that are not mentioned as inputs to the result. */
1162
1163 while (p != insn)
1164 {
1165 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1166 || GET_CODE (p) == CALL_INSN)
1167 record_excess_regs (PATTERN (p), equiv, &output);
1168 p = NEXT_INSN (p);
1169 }
1170
1171 return output;
1172 }
1173 \f
1174 /* Return 1 if all uses of REG
1175 are between INSN and the end of the basic block. */
1176
1177 static int
1178 reg_in_basic_block_p (insn, reg)
1179 rtx insn, reg;
1180 {
1181 int regno = REGNO (reg);
1182 rtx p;
1183
1184 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1185 return 0;
1186
1187 /* Search this basic block for the already recorded last use of the reg. */
1188 for (p = insn; p; p = NEXT_INSN (p))
1189 {
1190 switch (GET_CODE (p))
1191 {
1192 case NOTE:
1193 break;
1194
1195 case INSN:
1196 case CALL_INSN:
1197 /* Ordinary insn: if this is the last use, we win. */
1198 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1199 return 1;
1200 break;
1201
1202 case JUMP_INSN:
1203 /* Jump insn: if this is the last use, we win. */
1204 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1205 return 1;
1206 /* Otherwise, it's the end of the basic block, so we lose. */
1207 return 0;
1208
1209 case CODE_LABEL:
1210 case BARRIER:
1211 /* It's the end of the basic block, so we lose. */
1212 return 0;
1213
1214 default:
1215 break;
1216 }
1217 }
1218
1219 /* The "last use" doesn't follow the "first use"?? */
1220 abort ();
1221 }
1222 \f
1223 /* Compute the benefit of eliminating the insns in the block whose
1224 last insn is LAST. This may be a group of insns used to compute a
1225 value directly or can contain a library call. */
1226
1227 static int
1228 libcall_benefit (last)
1229 rtx last;
1230 {
1231 rtx insn;
1232 int benefit = 0;
1233
1234 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1235 insn != last; insn = NEXT_INSN (insn))
1236 {
1237 if (GET_CODE (insn) == CALL_INSN)
1238 benefit += 10; /* Assume at least this many insns in a library
1239 routine. */
1240 else if (GET_CODE (insn) == INSN
1241 && GET_CODE (PATTERN (insn)) != USE
1242 && GET_CODE (PATTERN (insn)) != CLOBBER)
1243 benefit++;
1244 }
1245
1246 return benefit;
1247 }
1248 \f
1249 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1250
1251 static rtx
1252 skip_consec_insns (insn, count)
1253 rtx insn;
1254 int count;
1255 {
1256 for (; count > 0; count--)
1257 {
1258 rtx temp;
1259
1260 /* If first insn of libcall sequence, skip to end. */
1261 /* Do this at start of loop, since INSN is guaranteed to
1262 be an insn here. */
1263 if (GET_CODE (insn) != NOTE
1264 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1265 insn = XEXP (temp, 0);
1266
1267 do insn = NEXT_INSN (insn);
1268 while (GET_CODE (insn) == NOTE);
1269 }
1270
1271 return insn;
1272 }
1273
1274 /* Ignore any movable whose insn falls within a libcall
1275 which is part of another movable.
1276 We make use of the fact that the movable for the libcall value
1277 was made later and so appears later on the chain. */
1278
1279 static void
1280 ignore_some_movables (movables)
1281 struct movable *movables;
1282 {
1283 register struct movable *m, *m1;
1284
1285 for (m = movables; m; m = m->next)
1286 {
1287 /* Is this a movable for the value of a libcall? */
1288 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1289 if (note)
1290 {
1291 rtx insn;
1292 /* Check for earlier movables inside that range,
1293 and mark them invalid. We cannot use LUIDs here because
1294 insns created by loop.c for prior loops don't have LUIDs.
1295 Rather than reject all such insns from movables, we just
1296 explicitly check each insn in the libcall (since invariant
1297 libcalls aren't that common). */
1298 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1299 for (m1 = movables; m1 != m; m1 = m1->next)
1300 if (m1->insn == insn)
1301 m1->done = 1;
1302 }
1303 }
1304 }
1305
1306 /* For each movable insn, see if the reg that it loads
1307 leads when it dies right into another conditionally movable insn.
1308 If so, record that the second insn "forces" the first one,
1309 since the second can be moved only if the first is. */
1310
1311 static void
1312 force_movables (movables)
1313 struct movable *movables;
1314 {
1315 register struct movable *m, *m1;
1316 for (m1 = movables; m1; m1 = m1->next)
1317 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1318 if (!m1->partial && !m1->done)
1319 {
1320 int regno = m1->regno;
1321 for (m = m1->next; m; m = m->next)
1322 /* ??? Could this be a bug? What if CSE caused the
1323 register of M1 to be used after this insn?
1324 Since CSE does not update regno_last_uid,
1325 this insn M->insn might not be where it dies.
1326 But very likely this doesn't matter; what matters is
1327 that M's reg is computed from M1's reg. */
1328 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1329 && !m->done)
1330 break;
1331 if (m != 0 && m->set_src == m1->set_dest
1332 /* If m->consec, m->set_src isn't valid. */
1333 && m->consec == 0)
1334 m = 0;
1335
1336 /* Increase the priority of the moving the first insn
1337 since it permits the second to be moved as well. */
1338 if (m != 0)
1339 {
1340 m->forces = m1;
1341 m1->lifetime += m->lifetime;
1342 m1->savings += m->savings;
1343 }
1344 }
1345 }
1346 \f
1347 /* Find invariant expressions that are equal and can be combined into
1348 one register. */
1349
1350 static void
1351 combine_movables (movables, nregs)
1352 struct movable *movables;
1353 int nregs;
1354 {
1355 register struct movable *m;
1356 char *matched_regs = (char *) alloca (nregs);
1357 enum machine_mode mode;
1358
1359 /* Regs that are set more than once are not allowed to match
1360 or be matched. I'm no longer sure why not. */
1361 /* Perhaps testing m->consec_sets would be more appropriate here? */
1362
1363 for (m = movables; m; m = m->next)
1364 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1365 {
1366 register struct movable *m1;
1367 int regno = m->regno;
1368
1369 bzero (matched_regs, nregs);
1370 matched_regs[regno] = 1;
1371
1372 /* We want later insns to match the first one. Don't make the first
1373 one match any later ones. So start this loop at m->next. */
1374 for (m1 = m->next; m1; m1 = m1->next)
1375 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1376 /* A reg used outside the loop mustn't be eliminated. */
1377 && !m1->global
1378 /* A reg used for zero-extending mustn't be eliminated. */
1379 && !m1->partial
1380 && (matched_regs[m1->regno]
1381 ||
1382 (
1383 /* Can combine regs with different modes loaded from the
1384 same constant only if the modes are the same or
1385 if both are integer modes with M wider or the same
1386 width as M1. The check for integer is redundant, but
1387 safe, since the only case of differing destination
1388 modes with equal sources is when both sources are
1389 VOIDmode, i.e., CONST_INT. */
1390 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1391 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1392 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1393 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1394 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1395 /* See if the source of M1 says it matches M. */
1396 && ((GET_CODE (m1->set_src) == REG
1397 && matched_regs[REGNO (m1->set_src)])
1398 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1399 movables))))
1400 && ((m->dependencies == m1->dependencies)
1401 || rtx_equal_p (m->dependencies, m1->dependencies)))
1402 {
1403 m->lifetime += m1->lifetime;
1404 m->savings += m1->savings;
1405 m1->done = 1;
1406 m1->match = m;
1407 matched_regs[m1->regno] = 1;
1408 }
1409 }
1410
1411 /* Now combine the regs used for zero-extension.
1412 This can be done for those not marked `global'
1413 provided their lives don't overlap. */
1414
1415 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1416 mode = GET_MODE_WIDER_MODE (mode))
1417 {
1418 register struct movable *m0 = 0;
1419
1420 /* Combine all the registers for extension from mode MODE.
1421 Don't combine any that are used outside this loop. */
1422 for (m = movables; m; m = m->next)
1423 if (m->partial && ! m->global
1424 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1425 {
1426 register struct movable *m1;
1427 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1428 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1429
1430 if (m0 == 0)
1431 {
1432 /* First one: don't check for overlap, just record it. */
1433 m0 = m;
1434 continue;
1435 }
1436
1437 /* Make sure they extend to the same mode.
1438 (Almost always true.) */
1439 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1440 continue;
1441
1442 /* We already have one: check for overlap with those
1443 already combined together. */
1444 for (m1 = movables; m1 != m; m1 = m1->next)
1445 if (m1 == m0 || (m1->partial && m1->match == m0))
1446 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1447 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1448 goto overlap;
1449
1450 /* No overlap: we can combine this with the others. */
1451 m0->lifetime += m->lifetime;
1452 m0->savings += m->savings;
1453 m->done = 1;
1454 m->match = m0;
1455
1456 overlap: ;
1457 }
1458 }
1459 }
1460 \f
1461 /* Return 1 if regs X and Y will become the same if moved. */
1462
1463 static int
1464 regs_match_p (x, y, movables)
1465 rtx x, y;
1466 struct movable *movables;
1467 {
1468 int xn = REGNO (x);
1469 int yn = REGNO (y);
1470 struct movable *mx, *my;
1471
1472 for (mx = movables; mx; mx = mx->next)
1473 if (mx->regno == xn)
1474 break;
1475
1476 for (my = movables; my; my = my->next)
1477 if (my->regno == yn)
1478 break;
1479
1480 return (mx && my
1481 && ((mx->match == my->match && mx->match != 0)
1482 || mx->match == my
1483 || mx == my->match));
1484 }
1485
1486 /* Return 1 if X and Y are identical-looking rtx's.
1487 This is the Lisp function EQUAL for rtx arguments.
1488
1489 If two registers are matching movables or a movable register and an
1490 equivalent constant, consider them equal. */
1491
1492 static int
1493 rtx_equal_for_loop_p (x, y, movables)
1494 rtx x, y;
1495 struct movable *movables;
1496 {
1497 register int i;
1498 register int j;
1499 register struct movable *m;
1500 register enum rtx_code code;
1501 register char *fmt;
1502
1503 if (x == y)
1504 return 1;
1505 if (x == 0 || y == 0)
1506 return 0;
1507
1508 code = GET_CODE (x);
1509
1510 /* If we have a register and a constant, they may sometimes be
1511 equal. */
1512 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1513 && CONSTANT_P (y))
1514 {
1515 for (m = movables; m; m = m->next)
1516 if (m->move_insn && m->regno == REGNO (x)
1517 && rtx_equal_p (m->set_src, y))
1518 return 1;
1519 }
1520 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1521 && CONSTANT_P (x))
1522 {
1523 for (m = movables; m; m = m->next)
1524 if (m->move_insn && m->regno == REGNO (y)
1525 && rtx_equal_p (m->set_src, x))
1526 return 1;
1527 }
1528
1529 /* Otherwise, rtx's of different codes cannot be equal. */
1530 if (code != GET_CODE (y))
1531 return 0;
1532
1533 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1534 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1535
1536 if (GET_MODE (x) != GET_MODE (y))
1537 return 0;
1538
1539 /* These three types of rtx's can be compared nonrecursively. */
1540 if (code == REG)
1541 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1542
1543 if (code == LABEL_REF)
1544 return XEXP (x, 0) == XEXP (y, 0);
1545 if (code == SYMBOL_REF)
1546 return XSTR (x, 0) == XSTR (y, 0);
1547
1548 /* Compare the elements. If any pair of corresponding elements
1549 fail to match, return 0 for the whole things. */
1550
1551 fmt = GET_RTX_FORMAT (code);
1552 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1553 {
1554 switch (fmt[i])
1555 {
1556 case 'w':
1557 if (XWINT (x, i) != XWINT (y, i))
1558 return 0;
1559 break;
1560
1561 case 'i':
1562 if (XINT (x, i) != XINT (y, i))
1563 return 0;
1564 break;
1565
1566 case 'E':
1567 /* Two vectors must have the same length. */
1568 if (XVECLEN (x, i) != XVECLEN (y, i))
1569 return 0;
1570
1571 /* And the corresponding elements must match. */
1572 for (j = 0; j < XVECLEN (x, i); j++)
1573 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1574 return 0;
1575 break;
1576
1577 case 'e':
1578 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1579 return 0;
1580 break;
1581
1582 case 's':
1583 if (strcmp (XSTR (x, i), XSTR (y, i)))
1584 return 0;
1585 break;
1586
1587 case 'u':
1588 /* These are just backpointers, so they don't matter. */
1589 break;
1590
1591 case '0':
1592 break;
1593
1594 /* It is believed that rtx's at this level will never
1595 contain anything but integers and other rtx's,
1596 except for within LABEL_REFs and SYMBOL_REFs. */
1597 default:
1598 abort ();
1599 }
1600 }
1601 return 1;
1602 }
1603 \f
1604 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1605 insns in INSNS which use thet reference. */
1606
1607 static void
1608 add_label_notes (x, insns)
1609 rtx x;
1610 rtx insns;
1611 {
1612 enum rtx_code code = GET_CODE (x);
1613 int i, j;
1614 char *fmt;
1615 rtx insn;
1616
1617 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1618 {
1619 rtx next = next_real_insn (XEXP (x, 0));
1620
1621 /* Don't record labels that refer to dispatch tables.
1622 This is not necessary, since the tablejump references the same label.
1623 And if we did record them, flow.c would make worse code. */
1624 if (next == 0
1625 || ! (GET_CODE (next) == JUMP_INSN
1626 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1627 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1628 {
1629 for (insn = insns; insn; insn = NEXT_INSN (insn))
1630 if (reg_mentioned_p (XEXP (x, 0), insn))
1631 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1632 REG_NOTES (insn));
1633 }
1634 return;
1635 }
1636
1637 fmt = GET_RTX_FORMAT (code);
1638 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1639 {
1640 if (fmt[i] == 'e')
1641 add_label_notes (XEXP (x, i), insns);
1642 else if (fmt[i] == 'E')
1643 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1644 add_label_notes (XVECEXP (x, i, j), insns);
1645 }
1646 }
1647 \f
1648 /* Scan MOVABLES, and move the insns that deserve to be moved.
1649 If two matching movables are combined, replace one reg with the
1650 other throughout. */
1651
1652 static void
1653 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1654 struct movable *movables;
1655 int threshold;
1656 int insn_count;
1657 rtx loop_start;
1658 rtx end;
1659 int nregs;
1660 {
1661 rtx new_start = 0;
1662 register struct movable *m;
1663 register rtx p;
1664 /* Map of pseudo-register replacements to handle combining
1665 when we move several insns that load the same value
1666 into different pseudo-registers. */
1667 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1668 char *already_moved = (char *) alloca (nregs);
1669
1670 bzero (already_moved, nregs);
1671 bzero ((char *) reg_map, nregs * sizeof (rtx));
1672
1673 num_movables = 0;
1674
1675 for (m = movables; m; m = m->next)
1676 {
1677 /* Describe this movable insn. */
1678
1679 if (loop_dump_stream)
1680 {
1681 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1682 INSN_UID (m->insn), m->regno, m->lifetime);
1683 if (m->consec > 0)
1684 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1685 if (m->cond)
1686 fprintf (loop_dump_stream, "cond ");
1687 if (m->force)
1688 fprintf (loop_dump_stream, "force ");
1689 if (m->global)
1690 fprintf (loop_dump_stream, "global ");
1691 if (m->done)
1692 fprintf (loop_dump_stream, "done ");
1693 if (m->move_insn)
1694 fprintf (loop_dump_stream, "move-insn ");
1695 if (m->match)
1696 fprintf (loop_dump_stream, "matches %d ",
1697 INSN_UID (m->match->insn));
1698 if (m->forces)
1699 fprintf (loop_dump_stream, "forces %d ",
1700 INSN_UID (m->forces->insn));
1701 }
1702
1703 /* Count movables. Value used in heuristics in strength_reduce. */
1704 num_movables++;
1705
1706 /* Ignore the insn if it's already done (it matched something else).
1707 Otherwise, see if it is now safe to move. */
1708
1709 if (!m->done
1710 && (! m->cond
1711 || (1 == invariant_p (m->set_src)
1712 && (m->dependencies == 0
1713 || 1 == invariant_p (m->dependencies))
1714 && (m->consec == 0
1715 || 1 == consec_sets_invariant_p (m->set_dest,
1716 m->consec + 1,
1717 m->insn))))
1718 && (! m->forces || m->forces->done))
1719 {
1720 register int regno;
1721 register rtx p;
1722 int savings = m->savings;
1723
1724 /* We have an insn that is safe to move.
1725 Compute its desirability. */
1726
1727 p = m->insn;
1728 regno = m->regno;
1729
1730 if (loop_dump_stream)
1731 fprintf (loop_dump_stream, "savings %d ", savings);
1732
1733 if (moved_once[regno])
1734 {
1735 insn_count *= 2;
1736
1737 if (loop_dump_stream)
1738 fprintf (loop_dump_stream, "halved since already moved ");
1739 }
1740
1741 /* An insn MUST be moved if we already moved something else
1742 which is safe only if this one is moved too: that is,
1743 if already_moved[REGNO] is nonzero. */
1744
1745 /* An insn is desirable to move if the new lifetime of the
1746 register is no more than THRESHOLD times the old lifetime.
1747 If it's not desirable, it means the loop is so big
1748 that moving won't speed things up much,
1749 and it is liable to make register usage worse. */
1750
1751 /* It is also desirable to move if it can be moved at no
1752 extra cost because something else was already moved. */
1753
1754 if (already_moved[regno]
1755 || flag_move_all_movables
1756 || (threshold * savings * m->lifetime) >= insn_count
1757 || (m->forces && m->forces->done
1758 && n_times_used[m->forces->regno] == 1))
1759 {
1760 int count;
1761 register struct movable *m1;
1762 rtx first;
1763
1764 /* Now move the insns that set the reg. */
1765
1766 if (m->partial && m->match)
1767 {
1768 rtx newpat, i1;
1769 rtx r1, r2;
1770 /* Find the end of this chain of matching regs.
1771 Thus, we load each reg in the chain from that one reg.
1772 And that reg is loaded with 0 directly,
1773 since it has ->match == 0. */
1774 for (m1 = m; m1->match; m1 = m1->match);
1775 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1776 SET_DEST (PATTERN (m1->insn)));
1777 i1 = emit_insn_before (newpat, loop_start);
1778
1779 /* Mark the moved, invariant reg as being allowed to
1780 share a hard reg with the other matching invariant. */
1781 REG_NOTES (i1) = REG_NOTES (m->insn);
1782 r1 = SET_DEST (PATTERN (m->insn));
1783 r2 = SET_DEST (PATTERN (m1->insn));
1784 regs_may_share
1785 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1786 gen_rtx_EXPR_LIST (VOIDmode, r2,
1787 regs_may_share));
1788 delete_insn (m->insn);
1789
1790 if (new_start == 0)
1791 new_start = i1;
1792
1793 if (loop_dump_stream)
1794 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1795 }
1796 /* If we are to re-generate the item being moved with a
1797 new move insn, first delete what we have and then emit
1798 the move insn before the loop. */
1799 else if (m->move_insn)
1800 {
1801 rtx i1, temp;
1802
1803 for (count = m->consec; count >= 0; count--)
1804 {
1805 /* If this is the first insn of a library call sequence,
1806 skip to the end. */
1807 if (GET_CODE (p) != NOTE
1808 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1809 p = XEXP (temp, 0);
1810
1811 /* If this is the last insn of a libcall sequence, then
1812 delete every insn in the sequence except the last.
1813 The last insn is handled in the normal manner. */
1814 if (GET_CODE (p) != NOTE
1815 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1816 {
1817 temp = XEXP (temp, 0);
1818 while (temp != p)
1819 temp = delete_insn (temp);
1820 }
1821
1822 p = delete_insn (p);
1823 while (p && GET_CODE (p) == NOTE)
1824 p = NEXT_INSN (p);
1825 }
1826
1827 start_sequence ();
1828 emit_move_insn (m->set_dest, m->set_src);
1829 temp = get_insns ();
1830 end_sequence ();
1831
1832 add_label_notes (m->set_src, temp);
1833
1834 i1 = emit_insns_before (temp, loop_start);
1835 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1836 REG_NOTES (i1)
1837 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1838 m->set_src, REG_NOTES (i1));
1839
1840 if (loop_dump_stream)
1841 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1842
1843 /* The more regs we move, the less we like moving them. */
1844 threshold -= 3;
1845 }
1846 else
1847 {
1848 for (count = m->consec; count >= 0; count--)
1849 {
1850 rtx i1, temp;
1851
1852 /* If first insn of libcall sequence, skip to end. */
1853 /* Do this at start of loop, since p is guaranteed to
1854 be an insn here. */
1855 if (GET_CODE (p) != NOTE
1856 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1857 p = XEXP (temp, 0);
1858
1859 /* If last insn of libcall sequence, move all
1860 insns except the last before the loop. The last
1861 insn is handled in the normal manner. */
1862 if (GET_CODE (p) != NOTE
1863 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1864 {
1865 rtx fn_address = 0;
1866 rtx fn_reg = 0;
1867 rtx fn_address_insn = 0;
1868
1869 first = 0;
1870 for (temp = XEXP (temp, 0); temp != p;
1871 temp = NEXT_INSN (temp))
1872 {
1873 rtx body;
1874 rtx n;
1875 rtx next;
1876
1877 if (GET_CODE (temp) == NOTE)
1878 continue;
1879
1880 body = PATTERN (temp);
1881
1882 /* Find the next insn after TEMP,
1883 not counting USE or NOTE insns. */
1884 for (next = NEXT_INSN (temp); next != p;
1885 next = NEXT_INSN (next))
1886 if (! (GET_CODE (next) == INSN
1887 && GET_CODE (PATTERN (next)) == USE)
1888 && GET_CODE (next) != NOTE)
1889 break;
1890
1891 /* If that is the call, this may be the insn
1892 that loads the function address.
1893
1894 Extract the function address from the insn
1895 that loads it into a register.
1896 If this insn was cse'd, we get incorrect code.
1897
1898 So emit a new move insn that copies the
1899 function address into the register that the
1900 call insn will use. flow.c will delete any
1901 redundant stores that we have created. */
1902 if (GET_CODE (next) == CALL_INSN
1903 && GET_CODE (body) == SET
1904 && GET_CODE (SET_DEST (body)) == REG
1905 && (n = find_reg_note (temp, REG_EQUAL,
1906 NULL_RTX)))
1907 {
1908 fn_reg = SET_SRC (body);
1909 if (GET_CODE (fn_reg) != REG)
1910 fn_reg = SET_DEST (body);
1911 fn_address = XEXP (n, 0);
1912 fn_address_insn = temp;
1913 }
1914 /* We have the call insn.
1915 If it uses the register we suspect it might,
1916 load it with the correct address directly. */
1917 if (GET_CODE (temp) == CALL_INSN
1918 && fn_address != 0
1919 && reg_referenced_p (fn_reg, body))
1920 emit_insn_after (gen_move_insn (fn_reg,
1921 fn_address),
1922 fn_address_insn);
1923
1924 if (GET_CODE (temp) == CALL_INSN)
1925 {
1926 i1 = emit_call_insn_before (body, loop_start);
1927 /* Because the USAGE information potentially
1928 contains objects other than hard registers
1929 we need to copy it. */
1930 if (CALL_INSN_FUNCTION_USAGE (temp))
1931 CALL_INSN_FUNCTION_USAGE (i1)
1932 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1933 }
1934 else
1935 i1 = emit_insn_before (body, loop_start);
1936 if (first == 0)
1937 first = i1;
1938 if (temp == fn_address_insn)
1939 fn_address_insn = i1;
1940 REG_NOTES (i1) = REG_NOTES (temp);
1941 delete_insn (temp);
1942 }
1943 }
1944 if (m->savemode != VOIDmode)
1945 {
1946 /* P sets REG to zero; but we should clear only
1947 the bits that are not covered by the mode
1948 m->savemode. */
1949 rtx reg = m->set_dest;
1950 rtx sequence;
1951 rtx tem;
1952
1953 start_sequence ();
1954 tem = expand_binop
1955 (GET_MODE (reg), and_optab, reg,
1956 GEN_INT ((((HOST_WIDE_INT) 1
1957 << GET_MODE_BITSIZE (m->savemode)))
1958 - 1),
1959 reg, 1, OPTAB_LIB_WIDEN);
1960 if (tem == 0)
1961 abort ();
1962 if (tem != reg)
1963 emit_move_insn (reg, tem);
1964 sequence = gen_sequence ();
1965 end_sequence ();
1966 i1 = emit_insn_before (sequence, loop_start);
1967 }
1968 else if (GET_CODE (p) == CALL_INSN)
1969 {
1970 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1971 /* Because the USAGE information potentially
1972 contains objects other than hard registers
1973 we need to copy it. */
1974 if (CALL_INSN_FUNCTION_USAGE (p))
1975 CALL_INSN_FUNCTION_USAGE (i1)
1976 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1977 }
1978 else if (count == m->consec && m->move_insn_first)
1979 {
1980 /* The SET_SRC might not be invariant, so we must
1981 use the REG_EQUAL note. */
1982 start_sequence ();
1983 emit_move_insn (m->set_dest, m->set_src);
1984 temp = get_insns ();
1985 end_sequence ();
1986
1987 add_label_notes (m->set_src, temp);
1988
1989 i1 = emit_insns_before (temp, loop_start);
1990 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1991 REG_NOTES (i1)
1992 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1993 : REG_EQUAL),
1994 m->set_src, REG_NOTES (i1));
1995 }
1996 else
1997 i1 = emit_insn_before (PATTERN (p), loop_start);
1998
1999 if (REG_NOTES (i1) == 0)
2000 {
2001 REG_NOTES (i1) = REG_NOTES (p);
2002
2003 /* If there is a REG_EQUAL note present whose value
2004 is not loop invariant, then delete it, since it
2005 may cause problems with later optimization passes.
2006 It is possible for cse to create such notes
2007 like this as a result of record_jump_cond. */
2008
2009 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2010 && ! invariant_p (XEXP (temp, 0)))
2011 remove_note (i1, temp);
2012 }
2013
2014 if (new_start == 0)
2015 new_start = i1;
2016
2017 if (loop_dump_stream)
2018 fprintf (loop_dump_stream, " moved to %d",
2019 INSN_UID (i1));
2020
2021 /* If library call, now fix the REG_NOTES that contain
2022 insn pointers, namely REG_LIBCALL on FIRST
2023 and REG_RETVAL on I1. */
2024 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2025 {
2026 XEXP (temp, 0) = first;
2027 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2028 XEXP (temp, 0) = i1;
2029 }
2030
2031 delete_insn (p);
2032 do p = NEXT_INSN (p);
2033 while (p && GET_CODE (p) == NOTE);
2034 }
2035
2036 /* The more regs we move, the less we like moving them. */
2037 threshold -= 3;
2038 }
2039
2040 /* Any other movable that loads the same register
2041 MUST be moved. */
2042 already_moved[regno] = 1;
2043
2044 /* This reg has been moved out of one loop. */
2045 moved_once[regno] = 1;
2046
2047 /* The reg set here is now invariant. */
2048 if (! m->partial)
2049 n_times_set[regno] = 0;
2050
2051 m->done = 1;
2052
2053 /* Change the length-of-life info for the register
2054 to say it lives at least the full length of this loop.
2055 This will help guide optimizations in outer loops. */
2056
2057 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2058 /* This is the old insn before all the moved insns.
2059 We can't use the moved insn because it is out of range
2060 in uid_luid. Only the old insns have luids. */
2061 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2062 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2063 REGNO_LAST_UID (regno) = INSN_UID (end);
2064
2065 /* Combine with this moved insn any other matching movables. */
2066
2067 if (! m->partial)
2068 for (m1 = movables; m1; m1 = m1->next)
2069 if (m1->match == m)
2070 {
2071 rtx temp;
2072
2073 /* Schedule the reg loaded by M1
2074 for replacement so that shares the reg of M.
2075 If the modes differ (only possible in restricted
2076 circumstances, make a SUBREG. */
2077 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2078 reg_map[m1->regno] = m->set_dest;
2079 else
2080 reg_map[m1->regno]
2081 = gen_lowpart_common (GET_MODE (m1->set_dest),
2082 m->set_dest);
2083
2084 /* Get rid of the matching insn
2085 and prevent further processing of it. */
2086 m1->done = 1;
2087
2088 /* if library call, delete all insn except last, which
2089 is deleted below */
2090 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2091 NULL_RTX)))
2092 {
2093 for (temp = XEXP (temp, 0); temp != m1->insn;
2094 temp = NEXT_INSN (temp))
2095 delete_insn (temp);
2096 }
2097 delete_insn (m1->insn);
2098
2099 /* Any other movable that loads the same register
2100 MUST be moved. */
2101 already_moved[m1->regno] = 1;
2102
2103 /* The reg merged here is now invariant,
2104 if the reg it matches is invariant. */
2105 if (! m->partial)
2106 n_times_set[m1->regno] = 0;
2107 }
2108 }
2109 else if (loop_dump_stream)
2110 fprintf (loop_dump_stream, "not desirable");
2111 }
2112 else if (loop_dump_stream && !m->match)
2113 fprintf (loop_dump_stream, "not safe");
2114
2115 if (loop_dump_stream)
2116 fprintf (loop_dump_stream, "\n");
2117 }
2118
2119 if (new_start == 0)
2120 new_start = loop_start;
2121
2122 /* Go through all the instructions in the loop, making
2123 all the register substitutions scheduled in REG_MAP. */
2124 for (p = new_start; p != end; p = NEXT_INSN (p))
2125 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2126 || GET_CODE (p) == CALL_INSN)
2127 {
2128 replace_regs (PATTERN (p), reg_map, nregs, 0);
2129 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2130 INSN_CODE (p) = -1;
2131 }
2132 }
2133 \f
2134 #if 0
2135 /* Scan X and replace the address of any MEM in it with ADDR.
2136 REG is the address that MEM should have before the replacement. */
2137
2138 static void
2139 replace_call_address (x, reg, addr)
2140 rtx x, reg, addr;
2141 {
2142 register enum rtx_code code;
2143 register int i;
2144 register char *fmt;
2145
2146 if (x == 0)
2147 return;
2148 code = GET_CODE (x);
2149 switch (code)
2150 {
2151 case PC:
2152 case CC0:
2153 case CONST_INT:
2154 case CONST_DOUBLE:
2155 case CONST:
2156 case SYMBOL_REF:
2157 case LABEL_REF:
2158 case REG:
2159 return;
2160
2161 case SET:
2162 /* Short cut for very common case. */
2163 replace_call_address (XEXP (x, 1), reg, addr);
2164 return;
2165
2166 case CALL:
2167 /* Short cut for very common case. */
2168 replace_call_address (XEXP (x, 0), reg, addr);
2169 return;
2170
2171 case MEM:
2172 /* If this MEM uses a reg other than the one we expected,
2173 something is wrong. */
2174 if (XEXP (x, 0) != reg)
2175 abort ();
2176 XEXP (x, 0) = addr;
2177 return;
2178
2179 default:
2180 break;
2181 }
2182
2183 fmt = GET_RTX_FORMAT (code);
2184 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2185 {
2186 if (fmt[i] == 'e')
2187 replace_call_address (XEXP (x, i), reg, addr);
2188 if (fmt[i] == 'E')
2189 {
2190 register int j;
2191 for (j = 0; j < XVECLEN (x, i); j++)
2192 replace_call_address (XVECEXP (x, i, j), reg, addr);
2193 }
2194 }
2195 }
2196 #endif
2197 \f
2198 /* Return the number of memory refs to addresses that vary
2199 in the rtx X. */
2200
2201 static int
2202 count_nonfixed_reads (x)
2203 rtx x;
2204 {
2205 register enum rtx_code code;
2206 register int i;
2207 register char *fmt;
2208 int value;
2209
2210 if (x == 0)
2211 return 0;
2212
2213 code = GET_CODE (x);
2214 switch (code)
2215 {
2216 case PC:
2217 case CC0:
2218 case CONST_INT:
2219 case CONST_DOUBLE:
2220 case CONST:
2221 case SYMBOL_REF:
2222 case LABEL_REF:
2223 case REG:
2224 return 0;
2225
2226 case MEM:
2227 return ((invariant_p (XEXP (x, 0)) != 1)
2228 + count_nonfixed_reads (XEXP (x, 0)));
2229
2230 default:
2231 break;
2232 }
2233
2234 value = 0;
2235 fmt = GET_RTX_FORMAT (code);
2236 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2237 {
2238 if (fmt[i] == 'e')
2239 value += count_nonfixed_reads (XEXP (x, i));
2240 if (fmt[i] == 'E')
2241 {
2242 register int j;
2243 for (j = 0; j < XVECLEN (x, i); j++)
2244 value += count_nonfixed_reads (XVECEXP (x, i, j));
2245 }
2246 }
2247 return value;
2248 }
2249
2250 \f
2251 #if 0
2252 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2253 Replace it with an instruction to load just the low bytes
2254 if the machine supports such an instruction,
2255 and insert above LOOP_START an instruction to clear the register. */
2256
2257 static void
2258 constant_high_bytes (p, loop_start)
2259 rtx p, loop_start;
2260 {
2261 register rtx new;
2262 register int insn_code_number;
2263
2264 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2265 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2266
2267 new = gen_rtx_SET (VOIDmode,
2268 gen_rtx_STRICT_LOW_PART (VOIDmode,
2269 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2270 SET_DEST (PATTERN (p)),
2271 0)),
2272 XEXP (SET_SRC (PATTERN (p)), 0));
2273 insn_code_number = recog (new, p);
2274
2275 if (insn_code_number)
2276 {
2277 register int i;
2278
2279 /* Clear destination register before the loop. */
2280 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2281 const0_rtx),
2282 loop_start);
2283
2284 /* Inside the loop, just load the low part. */
2285 PATTERN (p) = new;
2286 }
2287 }
2288 #endif
2289 \f
2290 /* Scan a loop setting the variables `unknown_address_altered',
2291 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2292 and `loop_has_volatile'.
2293 Also, fill in the array `loop_store_mems'. */
2294
2295 static void
2296 prescan_loop (start, end)
2297 rtx start, end;
2298 {
2299 register int level = 1;
2300 register rtx insn;
2301
2302 unknown_address_altered = 0;
2303 loop_has_call = 0;
2304 loop_has_volatile = 0;
2305 loop_store_mems_idx = 0;
2306
2307 num_mem_sets = 0;
2308 loops_enclosed = 1;
2309 loop_continue = 0;
2310
2311 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2312 insn = NEXT_INSN (insn))
2313 {
2314 if (GET_CODE (insn) == NOTE)
2315 {
2316 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2317 {
2318 ++level;
2319 /* Count number of loops contained in this one. */
2320 loops_enclosed++;
2321 }
2322 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2323 {
2324 --level;
2325 if (level == 0)
2326 {
2327 end = insn;
2328 break;
2329 }
2330 }
2331 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2332 {
2333 if (level == 1)
2334 loop_continue = insn;
2335 }
2336 }
2337 else if (GET_CODE (insn) == CALL_INSN)
2338 {
2339 if (! CONST_CALL_P (insn))
2340 unknown_address_altered = 1;
2341 loop_has_call = 1;
2342 }
2343 else
2344 {
2345 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2346 {
2347 if (volatile_refs_p (PATTERN (insn)))
2348 loop_has_volatile = 1;
2349
2350 note_stores (PATTERN (insn), note_addr_stored);
2351 }
2352 }
2353 }
2354 }
2355 \f
2356 /* Scan the function looking for loops. Record the start and end of each loop.
2357 Also mark as invalid loops any loops that contain a setjmp or are branched
2358 to from outside the loop. */
2359
2360 static void
2361 find_and_verify_loops (f)
2362 rtx f;
2363 {
2364 rtx insn, label;
2365 int current_loop = -1;
2366 int next_loop = -1;
2367 int loop;
2368
2369 /* If there are jumps to undefined labels,
2370 treat them as jumps out of any/all loops.
2371 This also avoids writing past end of tables when there are no loops. */
2372 uid_loop_num[0] = -1;
2373
2374 /* Find boundaries of loops, mark which loops are contained within
2375 loops, and invalidate loops that have setjmp. */
2376
2377 for (insn = f; insn; insn = NEXT_INSN (insn))
2378 {
2379 if (GET_CODE (insn) == NOTE)
2380 switch (NOTE_LINE_NUMBER (insn))
2381 {
2382 case NOTE_INSN_LOOP_BEG:
2383 loop_number_loop_starts[++next_loop] = insn;
2384 loop_number_loop_ends[next_loop] = 0;
2385 loop_outer_loop[next_loop] = current_loop;
2386 loop_invalid[next_loop] = 0;
2387 loop_number_exit_labels[next_loop] = 0;
2388 loop_number_exit_count[next_loop] = 0;
2389 current_loop = next_loop;
2390 break;
2391
2392 case NOTE_INSN_SETJMP:
2393 /* In this case, we must invalidate our current loop and any
2394 enclosing loop. */
2395 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2396 {
2397 loop_invalid[loop] = 1;
2398 if (loop_dump_stream)
2399 fprintf (loop_dump_stream,
2400 "\nLoop at %d ignored due to setjmp.\n",
2401 INSN_UID (loop_number_loop_starts[loop]));
2402 }
2403 break;
2404
2405 case NOTE_INSN_LOOP_END:
2406 if (current_loop == -1)
2407 abort ();
2408
2409 loop_number_loop_ends[current_loop] = insn;
2410 current_loop = loop_outer_loop[current_loop];
2411 break;
2412
2413 default:
2414 break;
2415 }
2416
2417 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2418 enclosing loop, but this doesn't matter. */
2419 uid_loop_num[INSN_UID (insn)] = current_loop;
2420 }
2421
2422 /* Any loop containing a label used in an initializer must be invalidated,
2423 because it can be jumped into from anywhere. */
2424
2425 for (label = forced_labels; label; label = XEXP (label, 1))
2426 {
2427 int loop_num;
2428
2429 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2430 loop_num != -1;
2431 loop_num = loop_outer_loop[loop_num])
2432 loop_invalid[loop_num] = 1;
2433 }
2434
2435 /* Any loop containing a label used for an exception handler must be
2436 invalidated, because it can be jumped into from anywhere. */
2437
2438 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2439 {
2440 int loop_num;
2441
2442 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2443 loop_num != -1;
2444 loop_num = loop_outer_loop[loop_num])
2445 loop_invalid[loop_num] = 1;
2446 }
2447
2448 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2449 loop that it is not contained within, that loop is marked invalid.
2450 If any INSN or CALL_INSN uses a label's address, then the loop containing
2451 that label is marked invalid, because it could be jumped into from
2452 anywhere.
2453
2454 Also look for blocks of code ending in an unconditional branch that
2455 exits the loop. If such a block is surrounded by a conditional
2456 branch around the block, move the block elsewhere (see below) and
2457 invert the jump to point to the code block. This may eliminate a
2458 label in our loop and will simplify processing by both us and a
2459 possible second cse pass. */
2460
2461 for (insn = f; insn; insn = NEXT_INSN (insn))
2462 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2463 {
2464 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2465
2466 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2467 {
2468 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2469 if (note)
2470 {
2471 int loop_num;
2472
2473 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2474 loop_num != -1;
2475 loop_num = loop_outer_loop[loop_num])
2476 loop_invalid[loop_num] = 1;
2477 }
2478 }
2479
2480 if (GET_CODE (insn) != JUMP_INSN)
2481 continue;
2482
2483 mark_loop_jump (PATTERN (insn), this_loop_num);
2484
2485 /* See if this is an unconditional branch outside the loop. */
2486 if (this_loop_num != -1
2487 && (GET_CODE (PATTERN (insn)) == RETURN
2488 || (simplejump_p (insn)
2489 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2490 != this_loop_num)))
2491 && get_max_uid () < max_uid_for_loop)
2492 {
2493 rtx p;
2494 rtx our_next = next_real_insn (insn);
2495 int dest_loop;
2496 int outer_loop = -1;
2497
2498 /* Go backwards until we reach the start of the loop, a label,
2499 or a JUMP_INSN. */
2500 for (p = PREV_INSN (insn);
2501 GET_CODE (p) != CODE_LABEL
2502 && ! (GET_CODE (p) == NOTE
2503 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2504 && GET_CODE (p) != JUMP_INSN;
2505 p = PREV_INSN (p))
2506 ;
2507
2508 /* Check for the case where we have a jump to an inner nested
2509 loop, and do not perform the optimization in that case. */
2510
2511 if (JUMP_LABEL (insn))
2512 {
2513 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2514 if (dest_loop != -1)
2515 {
2516 for (outer_loop = dest_loop; outer_loop != -1;
2517 outer_loop = loop_outer_loop[outer_loop])
2518 if (outer_loop == this_loop_num)
2519 break;
2520 }
2521 }
2522
2523 /* Make sure that the target of P is within the current loop. */
2524
2525 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2526 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2527 outer_loop = this_loop_num;
2528
2529 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2530 we have a block of code to try to move.
2531
2532 We look backward and then forward from the target of INSN
2533 to find a BARRIER at the same loop depth as the target.
2534 If we find such a BARRIER, we make a new label for the start
2535 of the block, invert the jump in P and point it to that label,
2536 and move the block of code to the spot we found. */
2537
2538 if (outer_loop == -1
2539 && GET_CODE (p) == JUMP_INSN
2540 && JUMP_LABEL (p) != 0
2541 /* Just ignore jumps to labels that were never emitted.
2542 These always indicate compilation errors. */
2543 && INSN_UID (JUMP_LABEL (p)) != 0
2544 && condjump_p (p)
2545 && ! simplejump_p (p)
2546 && next_real_insn (JUMP_LABEL (p)) == our_next)
2547 {
2548 rtx target
2549 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2550 int target_loop_num = uid_loop_num[INSN_UID (target)];
2551 rtx loc;
2552
2553 for (loc = target; loc; loc = PREV_INSN (loc))
2554 if (GET_CODE (loc) == BARRIER
2555 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2556 break;
2557
2558 if (loc == 0)
2559 for (loc = target; loc; loc = NEXT_INSN (loc))
2560 if (GET_CODE (loc) == BARRIER
2561 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2562 break;
2563
2564 if (loc)
2565 {
2566 rtx cond_label = JUMP_LABEL (p);
2567 rtx new_label = get_label_after (p);
2568
2569 /* Ensure our label doesn't go away. */
2570 LABEL_NUSES (cond_label)++;
2571
2572 /* Verify that uid_loop_num is large enough and that
2573 we can invert P. */
2574 if (invert_jump (p, new_label))
2575 {
2576 rtx q, r;
2577
2578 /* If no suitable BARRIER was found, create a suitable
2579 one before TARGET. Since TARGET is a fall through
2580 path, we'll need to insert an jump around our block
2581 and a add a BARRIER before TARGET.
2582
2583 This creates an extra unconditional jump outside
2584 the loop. However, the benefits of removing rarely
2585 executed instructions from inside the loop usually
2586 outweighs the cost of the extra unconditional jump
2587 outside the loop. */
2588 if (loc == 0)
2589 {
2590 rtx temp;
2591
2592 temp = gen_jump (JUMP_LABEL (insn));
2593 temp = emit_jump_insn_before (temp, target);
2594 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2595 LABEL_NUSES (JUMP_LABEL (insn))++;
2596 loc = emit_barrier_before (target);
2597 }
2598
2599 /* Include the BARRIER after INSN and copy the
2600 block after LOC. */
2601 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2602 reorder_insns (new_label, NEXT_INSN (insn), loc);
2603
2604 /* All those insns are now in TARGET_LOOP_NUM. */
2605 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2606 q = NEXT_INSN (q))
2607 uid_loop_num[INSN_UID (q)] = target_loop_num;
2608
2609 /* The label jumped to by INSN is no longer a loop exit.
2610 Unless INSN does not have a label (e.g., it is a
2611 RETURN insn), search loop_number_exit_labels to find
2612 its label_ref, and remove it. Also turn off
2613 LABEL_OUTSIDE_LOOP_P bit. */
2614 if (JUMP_LABEL (insn))
2615 {
2616 int loop_num;
2617
2618 for (q = 0,
2619 r = loop_number_exit_labels[this_loop_num];
2620 r; q = r, r = LABEL_NEXTREF (r))
2621 if (XEXP (r, 0) == JUMP_LABEL (insn))
2622 {
2623 LABEL_OUTSIDE_LOOP_P (r) = 0;
2624 if (q)
2625 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2626 else
2627 loop_number_exit_labels[this_loop_num]
2628 = LABEL_NEXTREF (r);
2629 break;
2630 }
2631
2632 for (loop_num = this_loop_num;
2633 loop_num != -1 && loop_num != target_loop_num;
2634 loop_num = loop_outer_loop[loop_num])
2635 loop_number_exit_count[loop_num]--;
2636
2637 /* If we didn't find it, then something is wrong. */
2638 if (! r)
2639 abort ();
2640 }
2641
2642 /* P is now a jump outside the loop, so it must be put
2643 in loop_number_exit_labels, and marked as such.
2644 The easiest way to do this is to just call
2645 mark_loop_jump again for P. */
2646 mark_loop_jump (PATTERN (p), this_loop_num);
2647
2648 /* If INSN now jumps to the insn after it,
2649 delete INSN. */
2650 if (JUMP_LABEL (insn) != 0
2651 && (next_real_insn (JUMP_LABEL (insn))
2652 == next_real_insn (insn)))
2653 delete_insn (insn);
2654 }
2655
2656 /* Continue the loop after where the conditional
2657 branch used to jump, since the only branch insn
2658 in the block (if it still remains) is an inter-loop
2659 branch and hence needs no processing. */
2660 insn = NEXT_INSN (cond_label);
2661
2662 if (--LABEL_NUSES (cond_label) == 0)
2663 delete_insn (cond_label);
2664
2665 /* This loop will be continued with NEXT_INSN (insn). */
2666 insn = PREV_INSN (insn);
2667 }
2668 }
2669 }
2670 }
2671 }
2672
2673 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2674 loops it is contained in, mark the target loop invalid.
2675
2676 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2677
2678 static void
2679 mark_loop_jump (x, loop_num)
2680 rtx x;
2681 int loop_num;
2682 {
2683 int dest_loop;
2684 int outer_loop;
2685 int i;
2686
2687 switch (GET_CODE (x))
2688 {
2689 case PC:
2690 case USE:
2691 case CLOBBER:
2692 case REG:
2693 case MEM:
2694 case CONST_INT:
2695 case CONST_DOUBLE:
2696 case RETURN:
2697 return;
2698
2699 case CONST:
2700 /* There could be a label reference in here. */
2701 mark_loop_jump (XEXP (x, 0), loop_num);
2702 return;
2703
2704 case PLUS:
2705 case MINUS:
2706 case MULT:
2707 mark_loop_jump (XEXP (x, 0), loop_num);
2708 mark_loop_jump (XEXP (x, 1), loop_num);
2709 return;
2710
2711 case SIGN_EXTEND:
2712 case ZERO_EXTEND:
2713 mark_loop_jump (XEXP (x, 0), loop_num);
2714 return;
2715
2716 case LABEL_REF:
2717 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2718
2719 /* Link together all labels that branch outside the loop. This
2720 is used by final_[bg]iv_value and the loop unrolling code. Also
2721 mark this LABEL_REF so we know that this branch should predict
2722 false. */
2723
2724 /* A check to make sure the label is not in an inner nested loop,
2725 since this does not count as a loop exit. */
2726 if (dest_loop != -1)
2727 {
2728 for (outer_loop = dest_loop; outer_loop != -1;
2729 outer_loop = loop_outer_loop[outer_loop])
2730 if (outer_loop == loop_num)
2731 break;
2732 }
2733 else
2734 outer_loop = -1;
2735
2736 if (loop_num != -1 && outer_loop == -1)
2737 {
2738 LABEL_OUTSIDE_LOOP_P (x) = 1;
2739 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2740 loop_number_exit_labels[loop_num] = x;
2741
2742 for (outer_loop = loop_num;
2743 outer_loop != -1 && outer_loop != dest_loop;
2744 outer_loop = loop_outer_loop[outer_loop])
2745 loop_number_exit_count[outer_loop]++;
2746 }
2747
2748 /* If this is inside a loop, but not in the current loop or one enclosed
2749 by it, it invalidates at least one loop. */
2750
2751 if (dest_loop == -1)
2752 return;
2753
2754 /* We must invalidate every nested loop containing the target of this
2755 label, except those that also contain the jump insn. */
2756
2757 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2758 {
2759 /* Stop when we reach a loop that also contains the jump insn. */
2760 for (outer_loop = loop_num; outer_loop != -1;
2761 outer_loop = loop_outer_loop[outer_loop])
2762 if (dest_loop == outer_loop)
2763 return;
2764
2765 /* If we get here, we know we need to invalidate a loop. */
2766 if (loop_dump_stream && ! loop_invalid[dest_loop])
2767 fprintf (loop_dump_stream,
2768 "\nLoop at %d ignored due to multiple entry points.\n",
2769 INSN_UID (loop_number_loop_starts[dest_loop]));
2770
2771 loop_invalid[dest_loop] = 1;
2772 }
2773 return;
2774
2775 case SET:
2776 /* If this is not setting pc, ignore. */
2777 if (SET_DEST (x) == pc_rtx)
2778 mark_loop_jump (SET_SRC (x), loop_num);
2779 return;
2780
2781 case IF_THEN_ELSE:
2782 mark_loop_jump (XEXP (x, 1), loop_num);
2783 mark_loop_jump (XEXP (x, 2), loop_num);
2784 return;
2785
2786 case PARALLEL:
2787 case ADDR_VEC:
2788 for (i = 0; i < XVECLEN (x, 0); i++)
2789 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2790 return;
2791
2792 case ADDR_DIFF_VEC:
2793 for (i = 0; i < XVECLEN (x, 1); i++)
2794 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2795 return;
2796
2797 default:
2798 /* Treat anything else (such as a symbol_ref)
2799 as a branch out of this loop, but not into any loop. */
2800
2801 if (loop_num != -1)
2802 {
2803 #ifdef HAIFA
2804 LABEL_OUTSIDE_LOOP_P (x) = 1;
2805 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2806 #endif /* HAIFA */
2807
2808 loop_number_exit_labels[loop_num] = x;
2809
2810 for (outer_loop = loop_num; outer_loop != -1;
2811 outer_loop = loop_outer_loop[outer_loop])
2812 loop_number_exit_count[outer_loop]++;
2813 }
2814 return;
2815 }
2816 }
2817 \f
2818 /* Return nonzero if there is a label in the range from
2819 insn INSN to and including the insn whose luid is END
2820 INSN must have an assigned luid (i.e., it must not have
2821 been previously created by loop.c). */
2822
2823 static int
2824 labels_in_range_p (insn, end)
2825 rtx insn;
2826 int end;
2827 {
2828 while (insn && INSN_LUID (insn) <= end)
2829 {
2830 if (GET_CODE (insn) == CODE_LABEL)
2831 return 1;
2832 insn = NEXT_INSN (insn);
2833 }
2834
2835 return 0;
2836 }
2837
2838 /* Record that a memory reference X is being set. */
2839
2840 static void
2841 note_addr_stored (x, y)
2842 rtx x;
2843 rtx y ATTRIBUTE_UNUSED;
2844 {
2845 register int i;
2846
2847 if (x == 0 || GET_CODE (x) != MEM)
2848 return;
2849
2850 /* Count number of memory writes.
2851 This affects heuristics in strength_reduce. */
2852 num_mem_sets++;
2853
2854 /* BLKmode MEM means all memory is clobbered. */
2855 if (GET_MODE (x) == BLKmode)
2856 unknown_address_altered = 1;
2857
2858 if (unknown_address_altered)
2859 return;
2860
2861 for (i = 0; i < loop_store_mems_idx; i++)
2862 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2863 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2864 {
2865 /* We are storing at the same address as previously noted. Save the
2866 wider reference. */
2867 if (GET_MODE_SIZE (GET_MODE (x))
2868 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2869 loop_store_mems[i] = x;
2870 break;
2871 }
2872
2873 if (i == NUM_STORES)
2874 unknown_address_altered = 1;
2875
2876 else if (i == loop_store_mems_idx)
2877 loop_store_mems[loop_store_mems_idx++] = x;
2878 }
2879 \f
2880 /* Return nonzero if the rtx X is invariant over the current loop.
2881
2882 The value is 2 if we refer to something only conditionally invariant.
2883
2884 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2885 Otherwise, a memory ref is invariant if it does not conflict with
2886 anything stored in `loop_store_mems'. */
2887
2888 int
2889 invariant_p (x)
2890 register rtx x;
2891 {
2892 register int i;
2893 register enum rtx_code code;
2894 register char *fmt;
2895 int conditional = 0;
2896
2897 if (x == 0)
2898 return 1;
2899 code = GET_CODE (x);
2900 switch (code)
2901 {
2902 case CONST_INT:
2903 case CONST_DOUBLE:
2904 case SYMBOL_REF:
2905 case CONST:
2906 return 1;
2907
2908 case LABEL_REF:
2909 /* A LABEL_REF is normally invariant, however, if we are unrolling
2910 loops, and this label is inside the loop, then it isn't invariant.
2911 This is because each unrolled copy of the loop body will have
2912 a copy of this label. If this was invariant, then an insn loading
2913 the address of this label into a register might get moved outside
2914 the loop, and then each loop body would end up using the same label.
2915
2916 We don't know the loop bounds here though, so just fail for all
2917 labels. */
2918 if (flag_unroll_loops)
2919 return 0;
2920 else
2921 return 1;
2922
2923 case PC:
2924 case CC0:
2925 case UNSPEC_VOLATILE:
2926 return 0;
2927
2928 case REG:
2929 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2930 since the reg might be set by initialization within the loop. */
2931
2932 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2933 || x == arg_pointer_rtx)
2934 && ! current_function_has_nonlocal_goto)
2935 return 1;
2936
2937 if (loop_has_call
2938 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2939 return 0;
2940
2941 if (n_times_set[REGNO (x)] < 0)
2942 return 2;
2943
2944 return n_times_set[REGNO (x)] == 0;
2945
2946 case MEM:
2947 /* Volatile memory references must be rejected. Do this before
2948 checking for read-only items, so that volatile read-only items
2949 will be rejected also. */
2950 if (MEM_VOLATILE_P (x))
2951 return 0;
2952
2953 /* Read-only items (such as constants in a constant pool) are
2954 invariant if their address is. */
2955 if (RTX_UNCHANGING_P (x))
2956 break;
2957
2958 /* If we filled the table (or had a subroutine call), any location
2959 in memory could have been clobbered. */
2960 if (unknown_address_altered)
2961 return 0;
2962
2963 /* See if there is any dependence between a store and this load. */
2964 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2965 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2966 return 0;
2967
2968 /* It's not invalidated by a store in memory
2969 but we must still verify the address is invariant. */
2970 break;
2971
2972 case ASM_OPERANDS:
2973 /* Don't mess with insns declared volatile. */
2974 if (MEM_VOLATILE_P (x))
2975 return 0;
2976 break;
2977
2978 default:
2979 break;
2980 }
2981
2982 fmt = GET_RTX_FORMAT (code);
2983 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2984 {
2985 if (fmt[i] == 'e')
2986 {
2987 int tem = invariant_p (XEXP (x, i));
2988 if (tem == 0)
2989 return 0;
2990 if (tem == 2)
2991 conditional = 1;
2992 }
2993 else if (fmt[i] == 'E')
2994 {
2995 register int j;
2996 for (j = 0; j < XVECLEN (x, i); j++)
2997 {
2998 int tem = invariant_p (XVECEXP (x, i, j));
2999 if (tem == 0)
3000 return 0;
3001 if (tem == 2)
3002 conditional = 1;
3003 }
3004
3005 }
3006 }
3007
3008 return 1 + conditional;
3009 }
3010
3011 \f
3012 /* Return nonzero if all the insns in the loop that set REG
3013 are INSN and the immediately following insns,
3014 and if each of those insns sets REG in an invariant way
3015 (not counting uses of REG in them).
3016
3017 The value is 2 if some of these insns are only conditionally invariant.
3018
3019 We assume that INSN itself is the first set of REG
3020 and that its source is invariant. */
3021
3022 static int
3023 consec_sets_invariant_p (reg, n_sets, insn)
3024 int n_sets;
3025 rtx reg, insn;
3026 {
3027 register rtx p = insn;
3028 register int regno = REGNO (reg);
3029 rtx temp;
3030 /* Number of sets we have to insist on finding after INSN. */
3031 int count = n_sets - 1;
3032 int old = n_times_set[regno];
3033 int value = 0;
3034 int this;
3035
3036 /* If N_SETS hit the limit, we can't rely on its value. */
3037 if (n_sets == 127)
3038 return 0;
3039
3040 n_times_set[regno] = 0;
3041
3042 while (count > 0)
3043 {
3044 register enum rtx_code code;
3045 rtx set;
3046
3047 p = NEXT_INSN (p);
3048 code = GET_CODE (p);
3049
3050 /* If library call, skip to end of it. */
3051 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3052 p = XEXP (temp, 0);
3053
3054 this = 0;
3055 if (code == INSN
3056 && (set = single_set (p))
3057 && GET_CODE (SET_DEST (set)) == REG
3058 && REGNO (SET_DEST (set)) == regno)
3059 {
3060 this = invariant_p (SET_SRC (set));
3061 if (this != 0)
3062 value |= this;
3063 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3064 {
3065 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3066 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3067 notes are OK. */
3068 this = (CONSTANT_P (XEXP (temp, 0))
3069 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3070 && invariant_p (XEXP (temp, 0))));
3071 if (this != 0)
3072 value |= this;
3073 }
3074 }
3075 if (this != 0)
3076 count--;
3077 else if (code != NOTE)
3078 {
3079 n_times_set[regno] = old;
3080 return 0;
3081 }
3082 }
3083
3084 n_times_set[regno] = old;
3085 /* If invariant_p ever returned 2, we return 2. */
3086 return 1 + (value & 2);
3087 }
3088
3089 #if 0
3090 /* I don't think this condition is sufficient to allow INSN
3091 to be moved, so we no longer test it. */
3092
3093 /* Return 1 if all insns in the basic block of INSN and following INSN
3094 that set REG are invariant according to TABLE. */
3095
3096 static int
3097 all_sets_invariant_p (reg, insn, table)
3098 rtx reg, insn;
3099 short *table;
3100 {
3101 register rtx p = insn;
3102 register int regno = REGNO (reg);
3103
3104 while (1)
3105 {
3106 register enum rtx_code code;
3107 p = NEXT_INSN (p);
3108 code = GET_CODE (p);
3109 if (code == CODE_LABEL || code == JUMP_INSN)
3110 return 1;
3111 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3112 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3113 && REGNO (SET_DEST (PATTERN (p))) == regno)
3114 {
3115 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3116 return 0;
3117 }
3118 }
3119 }
3120 #endif /* 0 */
3121 \f
3122 /* Look at all uses (not sets) of registers in X. For each, if it is
3123 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3124 a different insn, set USAGE[REGNO] to const0_rtx. */
3125
3126 static void
3127 find_single_use_in_loop (insn, x, usage)
3128 rtx insn;
3129 rtx x;
3130 rtx *usage;
3131 {
3132 enum rtx_code code = GET_CODE (x);
3133 char *fmt = GET_RTX_FORMAT (code);
3134 int i, j;
3135
3136 if (code == REG)
3137 usage[REGNO (x)]
3138 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3139 ? const0_rtx : insn;
3140
3141 else if (code == SET)
3142 {
3143 /* Don't count SET_DEST if it is a REG; otherwise count things
3144 in SET_DEST because if a register is partially modified, it won't
3145 show up as a potential movable so we don't care how USAGE is set
3146 for it. */
3147 if (GET_CODE (SET_DEST (x)) != REG)
3148 find_single_use_in_loop (insn, SET_DEST (x), usage);
3149 find_single_use_in_loop (insn, SET_SRC (x), usage);
3150 }
3151 else
3152 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3153 {
3154 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3155 find_single_use_in_loop (insn, XEXP (x, i), usage);
3156 else if (fmt[i] == 'E')
3157 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3158 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3159 }
3160 }
3161 \f
3162 /* Increment N_TIMES_SET at the index of each register
3163 that is modified by an insn between FROM and TO.
3164 If the value of an element of N_TIMES_SET becomes 127 or more,
3165 stop incrementing it, to avoid overflow.
3166
3167 Store in SINGLE_USAGE[I] the single insn in which register I is
3168 used, if it is only used once. Otherwise, it is set to 0 (for no
3169 uses) or const0_rtx for more than one use. This parameter may be zero,
3170 in which case this processing is not done.
3171
3172 Store in *COUNT_PTR the number of actual instruction
3173 in the loop. We use this to decide what is worth moving out. */
3174
3175 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3176 In that case, it is the insn that last set reg n. */
3177
3178 static void
3179 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3180 register rtx from, to;
3181 char *may_not_move;
3182 rtx *single_usage;
3183 int *count_ptr;
3184 int nregs;
3185 {
3186 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3187 register rtx insn;
3188 register int count = 0;
3189 register rtx dest;
3190
3191 bzero ((char *) last_set, nregs * sizeof (rtx));
3192 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3193 {
3194 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3195 {
3196 ++count;
3197
3198 /* If requested, record registers that have exactly one use. */
3199 if (single_usage)
3200 {
3201 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3202
3203 /* Include uses in REG_EQUAL notes. */
3204 if (REG_NOTES (insn))
3205 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3206 }
3207
3208 if (GET_CODE (PATTERN (insn)) == CLOBBER
3209 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3210 /* Don't move a reg that has an explicit clobber.
3211 We might do so sometimes, but it's not worth the pain. */
3212 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3213
3214 if (GET_CODE (PATTERN (insn)) == SET
3215 || GET_CODE (PATTERN (insn)) == CLOBBER)
3216 {
3217 dest = SET_DEST (PATTERN (insn));
3218 while (GET_CODE (dest) == SUBREG
3219 || GET_CODE (dest) == ZERO_EXTRACT
3220 || GET_CODE (dest) == SIGN_EXTRACT
3221 || GET_CODE (dest) == STRICT_LOW_PART)
3222 dest = XEXP (dest, 0);
3223 if (GET_CODE (dest) == REG)
3224 {
3225 register int regno = REGNO (dest);
3226 /* If this is the first setting of this reg
3227 in current basic block, and it was set before,
3228 it must be set in two basic blocks, so it cannot
3229 be moved out of the loop. */
3230 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3231 may_not_move[regno] = 1;
3232 /* If this is not first setting in current basic block,
3233 see if reg was used in between previous one and this.
3234 If so, neither one can be moved. */
3235 if (last_set[regno] != 0
3236 && reg_used_between_p (dest, last_set[regno], insn))
3237 may_not_move[regno] = 1;
3238 if (n_times_set[regno] < 127)
3239 ++n_times_set[regno];
3240 last_set[regno] = insn;
3241 }
3242 }
3243 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3244 {
3245 register int i;
3246 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3247 {
3248 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3249 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3250 /* Don't move a reg that has an explicit clobber.
3251 It's not worth the pain to try to do it correctly. */
3252 may_not_move[REGNO (XEXP (x, 0))] = 1;
3253
3254 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3255 {
3256 dest = SET_DEST (x);
3257 while (GET_CODE (dest) == SUBREG
3258 || GET_CODE (dest) == ZERO_EXTRACT
3259 || GET_CODE (dest) == SIGN_EXTRACT
3260 || GET_CODE (dest) == STRICT_LOW_PART)
3261 dest = XEXP (dest, 0);
3262 if (GET_CODE (dest) == REG)
3263 {
3264 register int regno = REGNO (dest);
3265 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3266 may_not_move[regno] = 1;
3267 if (last_set[regno] != 0
3268 && reg_used_between_p (dest, last_set[regno], insn))
3269 may_not_move[regno] = 1;
3270 if (n_times_set[regno] < 127)
3271 ++n_times_set[regno];
3272 last_set[regno] = insn;
3273 }
3274 }
3275 }
3276 }
3277 }
3278
3279 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3280 bzero ((char *) last_set, nregs * sizeof (rtx));
3281 }
3282 *count_ptr = count;
3283 }
3284 \f
3285 /* Given a loop that is bounded by LOOP_START and LOOP_END
3286 and that is entered at SCAN_START,
3287 return 1 if the register set in SET contained in insn INSN is used by
3288 any insn that precedes INSN in cyclic order starting
3289 from the loop entry point.
3290
3291 We don't want to use INSN_LUID here because if we restrict INSN to those
3292 that have a valid INSN_LUID, it means we cannot move an invariant out
3293 from an inner loop past two loops. */
3294
3295 static int
3296 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3297 rtx set, insn, loop_start, scan_start, loop_end;
3298 {
3299 rtx reg = SET_DEST (set);
3300 rtx p;
3301
3302 /* Scan forward checking for register usage. If we hit INSN, we
3303 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3304 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3305 {
3306 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3307 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3308 return 1;
3309
3310 if (p == loop_end)
3311 p = loop_start;
3312 }
3313
3314 return 0;
3315 }
3316 \f
3317 /* A "basic induction variable" or biv is a pseudo reg that is set
3318 (within this loop) only by incrementing or decrementing it. */
3319 /* A "general induction variable" or giv is a pseudo reg whose
3320 value is a linear function of a biv. */
3321
3322 /* Bivs are recognized by `basic_induction_var';
3323 Givs by `general_induct_var'. */
3324
3325 /* Indexed by register number, indicates whether or not register is an
3326 induction variable, and if so what type. */
3327
3328 enum iv_mode *reg_iv_type;
3329
3330 /* Indexed by register number, contains pointer to `struct induction'
3331 if register is an induction variable. This holds general info for
3332 all induction variables. */
3333
3334 struct induction **reg_iv_info;
3335
3336 /* Indexed by register number, contains pointer to `struct iv_class'
3337 if register is a basic induction variable. This holds info describing
3338 the class (a related group) of induction variables that the biv belongs
3339 to. */
3340
3341 struct iv_class **reg_biv_class;
3342
3343 /* The head of a list which links together (via the next field)
3344 every iv class for the current loop. */
3345
3346 struct iv_class *loop_iv_list;
3347
3348 /* Communication with routines called via `note_stores'. */
3349
3350 static rtx note_insn;
3351
3352 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3353
3354 static rtx addr_placeholder;
3355
3356 /* ??? Unfinished optimizations, and possible future optimizations,
3357 for the strength reduction code. */
3358
3359 /* ??? There is one more optimization you might be interested in doing: to
3360 allocate pseudo registers for frequently-accessed memory locations.
3361 If the same memory location is referenced each time around, it might
3362 be possible to copy it into a register before and out after.
3363 This is especially useful when the memory location is a variable which
3364 is in a stack slot because somewhere its address is taken. If the
3365 loop doesn't contain a function call and the variable isn't volatile,
3366 it is safe to keep the value in a register for the duration of the
3367 loop. One tricky thing is that the copying of the value back from the
3368 register has to be done on all exits from the loop. You need to check that
3369 all the exits from the loop go to the same place. */
3370
3371 /* ??? The interaction of biv elimination, and recognition of 'constant'
3372 bivs, may cause problems. */
3373
3374 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3375 performance problems.
3376
3377 Perhaps don't eliminate things that can be combined with an addressing
3378 mode. Find all givs that have the same biv, mult_val, and add_val;
3379 then for each giv, check to see if its only use dies in a following
3380 memory address. If so, generate a new memory address and check to see
3381 if it is valid. If it is valid, then store the modified memory address,
3382 otherwise, mark the giv as not done so that it will get its own iv. */
3383
3384 /* ??? Could try to optimize branches when it is known that a biv is always
3385 positive. */
3386
3387 /* ??? When replace a biv in a compare insn, we should replace with closest
3388 giv so that an optimized branch can still be recognized by the combiner,
3389 e.g. the VAX acb insn. */
3390
3391 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3392 was rerun in loop_optimize whenever a register was added or moved.
3393 Also, some of the optimizations could be a little less conservative. */
3394 \f
3395 /* Perform strength reduction and induction variable elimination. */
3396
3397 /* Pseudo registers created during this function will be beyond the last
3398 valid index in several tables including n_times_set and regno_last_uid.
3399 This does not cause a problem here, because the added registers cannot be
3400 givs outside of their loop, and hence will never be reconsidered.
3401 But scan_loop must check regnos to make sure they are in bounds. */
3402
3403 static void
3404 strength_reduce (scan_start, end, loop_top, insn_count,
3405 loop_start, loop_end, unroll_p)
3406 rtx scan_start;
3407 rtx end;
3408 rtx loop_top;
3409 int insn_count;
3410 rtx loop_start;
3411 rtx loop_end;
3412 int unroll_p;
3413 {
3414 rtx p;
3415 rtx set;
3416 rtx inc_val;
3417 rtx mult_val;
3418 rtx dest_reg;
3419 /* This is 1 if current insn is not executed at least once for every loop
3420 iteration. */
3421 int not_every_iteration = 0;
3422 /* This is 1 if current insn may be executed more than once for every
3423 loop iteration. */
3424 int maybe_multiple = 0;
3425 /* Temporary list pointers for traversing loop_iv_list. */
3426 struct iv_class *bl, **backbl;
3427 /* Ratio of extra register life span we can justify
3428 for saving an instruction. More if loop doesn't call subroutines
3429 since in that case saving an insn makes more difference
3430 and more registers are available. */
3431 /* ??? could set this to last value of threshold in move_movables */
3432 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3433 /* Map of pseudo-register replacements. */
3434 rtx *reg_map;
3435 int call_seen;
3436 rtx test;
3437 rtx end_insert_before;
3438 int loop_depth = 0;
3439
3440 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3441 * sizeof (enum iv_mode *));
3442 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3443 reg_iv_info = (struct induction **)
3444 alloca (max_reg_before_loop * sizeof (struct induction *));
3445 bzero ((char *) reg_iv_info, (max_reg_before_loop
3446 * sizeof (struct induction *)));
3447 reg_biv_class = (struct iv_class **)
3448 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3449 bzero ((char *) reg_biv_class, (max_reg_before_loop
3450 * sizeof (struct iv_class *)));
3451
3452 loop_iv_list = 0;
3453 addr_placeholder = gen_reg_rtx (Pmode);
3454
3455 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3456 must be put before this insn, so that they will appear in the right
3457 order (i.e. loop order).
3458
3459 If loop_end is the end of the current function, then emit a
3460 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3461 dummy note insn. */
3462 if (NEXT_INSN (loop_end) != 0)
3463 end_insert_before = NEXT_INSN (loop_end);
3464 else
3465 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3466
3467 /* Scan through loop to find all possible bivs. */
3468
3469 p = scan_start;
3470 while (1)
3471 {
3472 p = NEXT_INSN (p);
3473 /* At end of a straight-in loop, we are done.
3474 At end of a loop entered at the bottom, scan the top. */
3475 if (p == scan_start)
3476 break;
3477 if (p == end)
3478 {
3479 if (loop_top != 0)
3480 p = loop_top;
3481 else
3482 break;
3483 if (p == scan_start)
3484 break;
3485 }
3486
3487 if (GET_CODE (p) == INSN
3488 && (set = single_set (p))
3489 && GET_CODE (SET_DEST (set)) == REG)
3490 {
3491 dest_reg = SET_DEST (set);
3492 if (REGNO (dest_reg) < max_reg_before_loop
3493 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3494 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3495 {
3496 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3497 dest_reg, p, &inc_val, &mult_val))
3498 {
3499 /* It is a possible basic induction variable.
3500 Create and initialize an induction structure for it. */
3501
3502 struct induction *v
3503 = (struct induction *) alloca (sizeof (struct induction));
3504
3505 record_biv (v, p, dest_reg, inc_val, mult_val,
3506 not_every_iteration, maybe_multiple);
3507 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3508 }
3509 else if (REGNO (dest_reg) < max_reg_before_loop)
3510 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3511 }
3512 }
3513
3514 /* Past CODE_LABEL, we get to insns that may be executed multiple
3515 times. The only way we can be sure that they can't is if every
3516 jump insn between here and the end of the loop either
3517 returns, exits the loop, is a forward jump, or is a jump
3518 to the loop start. */
3519
3520 if (GET_CODE (p) == CODE_LABEL)
3521 {
3522 rtx insn = p;
3523
3524 maybe_multiple = 0;
3525
3526 while (1)
3527 {
3528 insn = NEXT_INSN (insn);
3529 if (insn == scan_start)
3530 break;
3531 if (insn == end)
3532 {
3533 if (loop_top != 0)
3534 insn = loop_top;
3535 else
3536 break;
3537 if (insn == scan_start)
3538 break;
3539 }
3540
3541 if (GET_CODE (insn) == JUMP_INSN
3542 && GET_CODE (PATTERN (insn)) != RETURN
3543 && (! condjump_p (insn)
3544 || (JUMP_LABEL (insn) != 0
3545 && JUMP_LABEL (insn) != scan_start
3546 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3547 || INSN_UID (insn) >= max_uid_for_loop
3548 || (INSN_LUID (JUMP_LABEL (insn))
3549 < INSN_LUID (insn))))))
3550 {
3551 maybe_multiple = 1;
3552 break;
3553 }
3554 }
3555 }
3556
3557 /* Past a jump, we get to insns for which we can't count
3558 on whether they will be executed during each iteration. */
3559 /* This code appears twice in strength_reduce. There is also similar
3560 code in scan_loop. */
3561 if (GET_CODE (p) == JUMP_INSN
3562 /* If we enter the loop in the middle, and scan around to the
3563 beginning, don't set not_every_iteration for that.
3564 This can be any kind of jump, since we want to know if insns
3565 will be executed if the loop is executed. */
3566 && ! (JUMP_LABEL (p) == loop_top
3567 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3568 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3569 {
3570 rtx label = 0;
3571
3572 /* If this is a jump outside the loop, then it also doesn't
3573 matter. Check to see if the target of this branch is on the
3574 loop_number_exits_labels list. */
3575
3576 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3577 label;
3578 label = LABEL_NEXTREF (label))
3579 if (XEXP (label, 0) == JUMP_LABEL (p))
3580 break;
3581
3582 if (! label)
3583 not_every_iteration = 1;
3584 }
3585
3586 else if (GET_CODE (p) == NOTE)
3587 {
3588 /* At the virtual top of a converted loop, insns are again known to
3589 be executed each iteration: logically, the loop begins here
3590 even though the exit code has been duplicated. */
3591 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3592 not_every_iteration = 0;
3593 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3594 loop_depth++;
3595 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3596 loop_depth--;
3597 }
3598
3599 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3600 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3601 or not an insn is known to be executed each iteration of the
3602 loop, whether or not any iterations are known to occur.
3603
3604 Therefore, if we have just passed a label and have no more labels
3605 between here and the test insn of the loop, we know these insns
3606 will be executed each iteration. */
3607
3608 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3609 && no_labels_between_p (p, loop_end))
3610 not_every_iteration = 0;
3611 }
3612
3613 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3614 Make a sanity check against n_times_set. */
3615 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3616 {
3617 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3618 /* Above happens if register modified by subreg, etc. */
3619 /* Make sure it is not recognized as a basic induction var: */
3620 || n_times_set[bl->regno] != bl->biv_count
3621 /* If never incremented, it is invariant that we decided not to
3622 move. So leave it alone. */
3623 || ! bl->incremented)
3624 {
3625 if (loop_dump_stream)
3626 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3627 bl->regno,
3628 (reg_iv_type[bl->regno] != BASIC_INDUCT
3629 ? "not induction variable"
3630 : (! bl->incremented ? "never incremented"
3631 : "count error")));
3632
3633 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3634 *backbl = bl->next;
3635 }
3636 else
3637 {
3638 backbl = &bl->next;
3639
3640 if (loop_dump_stream)
3641 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3642 }
3643 }
3644
3645 /* Exit if there are no bivs. */
3646 if (! loop_iv_list)
3647 {
3648 /* Can still unroll the loop anyways, but indicate that there is no
3649 strength reduction info available. */
3650 if (unroll_p)
3651 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3652
3653 return;
3654 }
3655
3656 /* Find initial value for each biv by searching backwards from loop_start,
3657 halting at first label. Also record any test condition. */
3658
3659 call_seen = 0;
3660 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3661 {
3662 note_insn = p;
3663
3664 if (GET_CODE (p) == CALL_INSN)
3665 call_seen = 1;
3666
3667 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3668 || GET_CODE (p) == CALL_INSN)
3669 note_stores (PATTERN (p), record_initial);
3670
3671 /* Record any test of a biv that branches around the loop if no store
3672 between it and the start of loop. We only care about tests with
3673 constants and registers and only certain of those. */
3674 if (GET_CODE (p) == JUMP_INSN
3675 && JUMP_LABEL (p) != 0
3676 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3677 && (test = get_condition_for_loop (p)) != 0
3678 && GET_CODE (XEXP (test, 0)) == REG
3679 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3680 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3681 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3682 && bl->init_insn == 0)
3683 {
3684 /* If an NE test, we have an initial value! */
3685 if (GET_CODE (test) == NE)
3686 {
3687 bl->init_insn = p;
3688 bl->init_set = gen_rtx_SET (VOIDmode,
3689 XEXP (test, 0), XEXP (test, 1));
3690 }
3691 else
3692 bl->initial_test = test;
3693 }
3694 }
3695
3696 /* Look at the each biv and see if we can say anything better about its
3697 initial value from any initializing insns set up above. (This is done
3698 in two passes to avoid missing SETs in a PARALLEL.) */
3699 for (bl = loop_iv_list; bl; bl = bl->next)
3700 {
3701 rtx src;
3702 rtx note;
3703
3704 if (! bl->init_insn)
3705 continue;
3706
3707 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3708 is a constant, use the value of that. */
3709 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3710 && CONSTANT_P (XEXP (note, 0)))
3711 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3712 && CONSTANT_P (XEXP (note, 0))))
3713 src = XEXP (note, 0);
3714 else
3715 src = SET_SRC (bl->init_set);
3716
3717 if (loop_dump_stream)
3718 fprintf (loop_dump_stream,
3719 "Biv %d initialized at insn %d: initial value ",
3720 bl->regno, INSN_UID (bl->init_insn));
3721
3722 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3723 || GET_MODE (src) == VOIDmode)
3724 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3725 {
3726 bl->initial_value = src;
3727
3728 if (loop_dump_stream)
3729 {
3730 if (GET_CODE (src) == CONST_INT)
3731 {
3732 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3733 fputc ('\n', loop_dump_stream);
3734 }
3735 else
3736 {
3737 print_rtl (loop_dump_stream, src);
3738 fprintf (loop_dump_stream, "\n");
3739 }
3740 }
3741 }
3742 else
3743 {
3744 /* Biv initial value is not simple move,
3745 so let it keep initial value of "itself". */
3746
3747 if (loop_dump_stream)
3748 fprintf (loop_dump_stream, "is complex\n");
3749 }
3750 }
3751
3752 /* Search the loop for general induction variables. */
3753
3754 /* A register is a giv if: it is only set once, it is a function of a
3755 biv and a constant (or invariant), and it is not a biv. */
3756
3757 not_every_iteration = 0;
3758 loop_depth = 0;
3759 p = scan_start;
3760 while (1)
3761 {
3762 p = NEXT_INSN (p);
3763 /* At end of a straight-in loop, we are done.
3764 At end of a loop entered at the bottom, scan the top. */
3765 if (p == scan_start)
3766 break;
3767 if (p == end)
3768 {
3769 if (loop_top != 0)
3770 p = loop_top;
3771 else
3772 break;
3773 if (p == scan_start)
3774 break;
3775 }
3776
3777 /* Look for a general induction variable in a register. */
3778 if (GET_CODE (p) == INSN
3779 && (set = single_set (p))
3780 && GET_CODE (SET_DEST (set)) == REG
3781 && ! may_not_optimize[REGNO (SET_DEST (set))])
3782 {
3783 rtx src_reg;
3784 rtx add_val;
3785 rtx mult_val;
3786 int benefit;
3787 rtx regnote = 0;
3788
3789 dest_reg = SET_DEST (set);
3790 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3791 continue;
3792
3793 if (/* SET_SRC is a giv. */
3794 ((benefit = general_induction_var (SET_SRC (set),
3795 &src_reg, &add_val,
3796 &mult_val))
3797 /* Equivalent expression is a giv. */
3798 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3799 && (benefit = general_induction_var (XEXP (regnote, 0),
3800 &src_reg,
3801 &add_val, &mult_val))))
3802 /* Don't try to handle any regs made by loop optimization.
3803 We have nothing on them in regno_first_uid, etc. */
3804 && REGNO (dest_reg) < max_reg_before_loop
3805 /* Don't recognize a BASIC_INDUCT_VAR here. */
3806 && dest_reg != src_reg
3807 /* This must be the only place where the register is set. */
3808 && (n_times_set[REGNO (dest_reg)] == 1
3809 /* or all sets must be consecutive and make a giv. */
3810 || (benefit = consec_sets_giv (benefit, p,
3811 src_reg, dest_reg,
3812 &add_val, &mult_val))))
3813 {
3814 int count;
3815 struct induction *v
3816 = (struct induction *) alloca (sizeof (struct induction));
3817 rtx temp;
3818
3819 /* If this is a library call, increase benefit. */
3820 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3821 benefit += libcall_benefit (p);
3822
3823 /* Skip the consecutive insns, if there are any. */
3824 for (count = n_times_set[REGNO (dest_reg)] - 1;
3825 count > 0; count--)
3826 {
3827 /* If first insn of libcall sequence, skip to end.
3828 Do this at start of loop, since INSN is guaranteed to
3829 be an insn here. */
3830 if (GET_CODE (p) != NOTE
3831 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3832 p = XEXP (temp, 0);
3833
3834 do p = NEXT_INSN (p);
3835 while (GET_CODE (p) == NOTE);
3836 }
3837
3838 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3839 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3840 loop_end);
3841
3842 }
3843 }
3844
3845 #ifndef DONT_REDUCE_ADDR
3846 /* Look for givs which are memory addresses. */
3847 /* This resulted in worse code on a VAX 8600. I wonder if it
3848 still does. */
3849 if (GET_CODE (p) == INSN)
3850 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3851 loop_end);
3852 #endif
3853
3854 /* Update the status of whether giv can derive other givs. This can
3855 change when we pass a label or an insn that updates a biv. */
3856 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3857 || GET_CODE (p) == CODE_LABEL)
3858 update_giv_derive (p);
3859
3860 /* Past a jump, we get to insns for which we can't count
3861 on whether they will be executed during each iteration. */
3862 /* This code appears twice in strength_reduce. There is also similar
3863 code in scan_loop. */
3864 if (GET_CODE (p) == JUMP_INSN
3865 /* If we enter the loop in the middle, and scan around to the
3866 beginning, don't set not_every_iteration for that.
3867 This can be any kind of jump, since we want to know if insns
3868 will be executed if the loop is executed. */
3869 && ! (JUMP_LABEL (p) == loop_top
3870 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3871 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3872 {
3873 rtx label = 0;
3874
3875 /* If this is a jump outside the loop, then it also doesn't
3876 matter. Check to see if the target of this branch is on the
3877 loop_number_exits_labels list. */
3878
3879 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3880 label;
3881 label = LABEL_NEXTREF (label))
3882 if (XEXP (label, 0) == JUMP_LABEL (p))
3883 break;
3884
3885 if (! label)
3886 not_every_iteration = 1;
3887 }
3888
3889 else if (GET_CODE (p) == NOTE)
3890 {
3891 /* At the virtual top of a converted loop, insns are again known to
3892 be executed each iteration: logically, the loop begins here
3893 even though the exit code has been duplicated. */
3894 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3895 not_every_iteration = 0;
3896 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3897 loop_depth++;
3898 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3899 loop_depth--;
3900 }
3901
3902 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3903 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3904 or not an insn is known to be executed each iteration of the
3905 loop, whether or not any iterations are known to occur.
3906
3907 Therefore, if we have just passed a label and have no more labels
3908 between here and the test insn of the loop, we know these insns
3909 will be executed each iteration. */
3910
3911 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3912 && no_labels_between_p (p, loop_end))
3913 not_every_iteration = 0;
3914 }
3915
3916 /* Try to calculate and save the number of loop iterations. This is
3917 set to zero if the actual number can not be calculated. This must
3918 be called after all giv's have been identified, since otherwise it may
3919 fail if the iteration variable is a giv. */
3920
3921 loop_n_iterations = loop_iterations (loop_start, loop_end);
3922
3923 /* Now for each giv for which we still don't know whether or not it is
3924 replaceable, check to see if it is replaceable because its final value
3925 can be calculated. This must be done after loop_iterations is called,
3926 so that final_giv_value will work correctly. */
3927
3928 for (bl = loop_iv_list; bl; bl = bl->next)
3929 {
3930 struct induction *v;
3931
3932 for (v = bl->giv; v; v = v->next_iv)
3933 if (! v->replaceable && ! v->not_replaceable)
3934 check_final_value (v, loop_start, loop_end);
3935 }
3936
3937 /* Try to prove that the loop counter variable (if any) is always
3938 nonnegative; if so, record that fact with a REG_NONNEG note
3939 so that "decrement and branch until zero" insn can be used. */
3940 check_dbra_loop (loop_end, insn_count, loop_start);
3941
3942 #ifdef HAIFA
3943 /* record loop-variables relevant for BCT optimization before unrolling
3944 the loop. Unrolling may update part of this information, and the
3945 correct data will be used for generating the BCT. */
3946 #ifdef HAVE_decrement_and_branch_on_count
3947 if (HAVE_decrement_and_branch_on_count)
3948 analyze_loop_iterations (loop_start, loop_end);
3949 #endif
3950 #endif /* HAIFA */
3951
3952 /* Create reg_map to hold substitutions for replaceable giv regs. */
3953 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3954 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3955
3956 /* Examine each iv class for feasibility of strength reduction/induction
3957 variable elimination. */
3958
3959 for (bl = loop_iv_list; bl; bl = bl->next)
3960 {
3961 struct induction *v;
3962 int benefit;
3963 int all_reduced;
3964 rtx final_value = 0;
3965
3966 /* Test whether it will be possible to eliminate this biv
3967 provided all givs are reduced. This is possible if either
3968 the reg is not used outside the loop, or we can compute
3969 what its final value will be.
3970
3971 For architectures with a decrement_and_branch_until_zero insn,
3972 don't do this if we put a REG_NONNEG note on the endtest for
3973 this biv. */
3974
3975 /* Compare against bl->init_insn rather than loop_start.
3976 We aren't concerned with any uses of the biv between
3977 init_insn and loop_start since these won't be affected
3978 by the value of the biv elsewhere in the function, so
3979 long as init_insn doesn't use the biv itself.
3980 March 14, 1989 -- self@bayes.arc.nasa.gov */
3981
3982 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3983 && bl->init_insn
3984 && INSN_UID (bl->init_insn) < max_uid_for_loop
3985 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3986 #ifdef HAVE_decrement_and_branch_until_zero
3987 && ! bl->nonneg
3988 #endif
3989 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3990 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3991 #ifdef HAVE_decrement_and_branch_until_zero
3992 && ! bl->nonneg
3993 #endif
3994 ))
3995 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3996 threshold, insn_count);
3997 else
3998 {
3999 if (loop_dump_stream)
4000 {
4001 fprintf (loop_dump_stream,
4002 "Cannot eliminate biv %d.\n",
4003 bl->regno);
4004 fprintf (loop_dump_stream,
4005 "First use: insn %d, last use: insn %d.\n",
4006 REGNO_FIRST_UID (bl->regno),
4007 REGNO_LAST_UID (bl->regno));
4008 }
4009 }
4010
4011 /* Combine all giv's for this iv_class. */
4012 combine_givs (bl);
4013
4014 /* This will be true at the end, if all givs which depend on this
4015 biv have been strength reduced.
4016 We can't (currently) eliminate the biv unless this is so. */
4017 all_reduced = 1;
4018
4019 /* Check each giv in this class to see if we will benefit by reducing
4020 it. Skip giv's combined with others. */
4021 for (v = bl->giv; v; v = v->next_iv)
4022 {
4023 struct induction *tv;
4024
4025 if (v->ignore || v->same)
4026 continue;
4027
4028 benefit = v->benefit;
4029
4030 /* Reduce benefit if not replaceable, since we will insert
4031 a move-insn to replace the insn that calculates this giv.
4032 Don't do this unless the giv is a user variable, since it
4033 will often be marked non-replaceable because of the duplication
4034 of the exit code outside the loop. In such a case, the copies
4035 we insert are dead and will be deleted. So they don't have
4036 a cost. Similar situations exist. */
4037 /* ??? The new final_[bg]iv_value code does a much better job
4038 of finding replaceable giv's, and hence this code may no longer
4039 be necessary. */
4040 if (! v->replaceable && ! bl->eliminable
4041 && REG_USERVAR_P (v->dest_reg))
4042 benefit -= copy_cost;
4043
4044 /* Decrease the benefit to count the add-insns that we will
4045 insert to increment the reduced reg for the giv. */
4046 benefit -= add_cost * bl->biv_count;
4047
4048 /* Decide whether to strength-reduce this giv or to leave the code
4049 unchanged (recompute it from the biv each time it is used).
4050 This decision can be made independently for each giv. */
4051
4052 #ifdef AUTO_INC_DEC
4053 /* Attempt to guess whether autoincrement will handle some of the
4054 new add insns; if so, increase BENEFIT (undo the subtraction of
4055 add_cost that was done above). */
4056 if (v->giv_type == DEST_ADDR
4057 && GET_CODE (v->mult_val) == CONST_INT)
4058 {
4059 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4060 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4061 benefit += add_cost * bl->biv_count;
4062 #endif
4063 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4064 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4065 benefit += add_cost * bl->biv_count;
4066 #endif
4067 }
4068 #endif
4069
4070 /* If an insn is not to be strength reduced, then set its ignore
4071 flag, and clear all_reduced. */
4072
4073 /* A giv that depends on a reversed biv must be reduced if it is
4074 used after the loop exit, otherwise, it would have the wrong
4075 value after the loop exit. To make it simple, just reduce all
4076 of such giv's whether or not we know they are used after the loop
4077 exit. */
4078
4079 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4080 && ! bl->reversed )
4081 {
4082 if (loop_dump_stream)
4083 fprintf (loop_dump_stream,
4084 "giv of insn %d not worth while, %d vs %d.\n",
4085 INSN_UID (v->insn),
4086 v->lifetime * threshold * benefit, insn_count);
4087 v->ignore = 1;
4088 all_reduced = 0;
4089 }
4090 else
4091 {
4092 /* Check that we can increment the reduced giv without a
4093 multiply insn. If not, reject it. */
4094
4095 for (tv = bl->biv; tv; tv = tv->next_iv)
4096 if (tv->mult_val == const1_rtx
4097 && ! product_cheap_p (tv->add_val, v->mult_val))
4098 {
4099 if (loop_dump_stream)
4100 fprintf (loop_dump_stream,
4101 "giv of insn %d: would need a multiply.\n",
4102 INSN_UID (v->insn));
4103 v->ignore = 1;
4104 all_reduced = 0;
4105 break;
4106 }
4107 }
4108 }
4109
4110 /* Reduce each giv that we decided to reduce. */
4111
4112 for (v = bl->giv; v; v = v->next_iv)
4113 {
4114 struct induction *tv;
4115 if (! v->ignore && v->same == 0)
4116 {
4117 int auto_inc_opt = 0;
4118
4119 v->new_reg = gen_reg_rtx (v->mode);
4120
4121 #ifdef AUTO_INC_DEC
4122 /* If the target has auto-increment addressing modes, and
4123 this is an address giv, then try to put the increment
4124 immediately after its use, so that flow can create an
4125 auto-increment addressing mode. */
4126 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4127 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4128 /* We don't handle reversed biv's because bl->biv->insn
4129 does not have a valid INSN_LUID. */
4130 && ! bl->reversed
4131 && v->always_executed && ! v->maybe_multiple
4132 && INSN_UID (v->insn) < max_uid_for_loop)
4133 {
4134 /* If other giv's have been combined with this one, then
4135 this will work only if all uses of the other giv's occur
4136 before this giv's insn. This is difficult to check.
4137
4138 We simplify this by looking for the common case where
4139 there is one DEST_REG giv, and this giv's insn is the
4140 last use of the dest_reg of that DEST_REG giv. If the
4141 increment occurs after the address giv, then we can
4142 perform the optimization. (Otherwise, the increment
4143 would have to go before other_giv, and we would not be
4144 able to combine it with the address giv to get an
4145 auto-inc address.) */
4146 if (v->combined_with)
4147 {
4148 struct induction *other_giv = 0;
4149
4150 for (tv = bl->giv; tv; tv = tv->next_iv)
4151 if (tv->same == v)
4152 {
4153 if (other_giv)
4154 break;
4155 else
4156 other_giv = tv;
4157 }
4158 if (! tv && other_giv
4159 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4160 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4161 == INSN_UID (v->insn))
4162 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4163 auto_inc_opt = 1;
4164 }
4165 /* Check for case where increment is before the address
4166 giv. Do this test in "loop order". */
4167 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4168 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4169 || (INSN_LUID (bl->biv->insn)
4170 > INSN_LUID (scan_start))))
4171 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4172 && (INSN_LUID (scan_start)
4173 < INSN_LUID (bl->biv->insn))))
4174 auto_inc_opt = -1;
4175 else
4176 auto_inc_opt = 1;
4177
4178 #ifdef HAVE_cc0
4179 {
4180 rtx prev;
4181
4182 /* We can't put an insn immediately after one setting
4183 cc0, or immediately before one using cc0. */
4184 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4185 || (auto_inc_opt == -1
4186 && (prev = prev_nonnote_insn (v->insn)) != 0
4187 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4188 && sets_cc0_p (PATTERN (prev))))
4189 auto_inc_opt = 0;
4190 }
4191 #endif
4192
4193 if (auto_inc_opt)
4194 v->auto_inc_opt = 1;
4195 }
4196 #endif
4197
4198 /* For each place where the biv is incremented, add an insn
4199 to increment the new, reduced reg for the giv. */
4200 for (tv = bl->biv; tv; tv = tv->next_iv)
4201 {
4202 rtx insert_before;
4203
4204 if (! auto_inc_opt)
4205 insert_before = tv->insn;
4206 else if (auto_inc_opt == 1)
4207 insert_before = NEXT_INSN (v->insn);
4208 else
4209 insert_before = v->insn;
4210
4211 if (tv->mult_val == const1_rtx)
4212 emit_iv_add_mult (tv->add_val, v->mult_val,
4213 v->new_reg, v->new_reg, insert_before);
4214 else /* tv->mult_val == const0_rtx */
4215 /* A multiply is acceptable here
4216 since this is presumed to be seldom executed. */
4217 emit_iv_add_mult (tv->add_val, v->mult_val,
4218 v->add_val, v->new_reg, insert_before);
4219 }
4220
4221 /* Add code at loop start to initialize giv's reduced reg. */
4222
4223 emit_iv_add_mult (bl->initial_value, v->mult_val,
4224 v->add_val, v->new_reg, loop_start);
4225 }
4226 }
4227
4228 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4229 as not reduced.
4230
4231 For each giv register that can be reduced now: if replaceable,
4232 substitute reduced reg wherever the old giv occurs;
4233 else add new move insn "giv_reg = reduced_reg".
4234
4235 Also check for givs whose first use is their definition and whose
4236 last use is the definition of another giv. If so, it is likely
4237 dead and should not be used to eliminate a biv. */
4238 for (v = bl->giv; v; v = v->next_iv)
4239 {
4240 if (v->same && v->same->ignore)
4241 v->ignore = 1;
4242
4243 if (v->ignore)
4244 continue;
4245
4246 if (v->giv_type == DEST_REG
4247 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4248 {
4249 struct induction *v1;
4250
4251 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4252 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4253 v->maybe_dead = 1;
4254 }
4255
4256 /* Update expression if this was combined, in case other giv was
4257 replaced. */
4258 if (v->same)
4259 v->new_reg = replace_rtx (v->new_reg,
4260 v->same->dest_reg, v->same->new_reg);
4261
4262 if (v->giv_type == DEST_ADDR)
4263 /* Store reduced reg as the address in the memref where we found
4264 this giv. */
4265 validate_change (v->insn, v->location, v->new_reg, 0);
4266 else if (v->replaceable)
4267 {
4268 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4269
4270 #if 0
4271 /* I can no longer duplicate the original problem. Perhaps
4272 this is unnecessary now? */
4273
4274 /* Replaceable; it isn't strictly necessary to delete the old
4275 insn and emit a new one, because v->dest_reg is now dead.
4276
4277 However, especially when unrolling loops, the special
4278 handling for (set REG0 REG1) in the second cse pass may
4279 make v->dest_reg live again. To avoid this problem, emit
4280 an insn to set the original giv reg from the reduced giv.
4281 We can not delete the original insn, since it may be part
4282 of a LIBCALL, and the code in flow that eliminates dead
4283 libcalls will fail if it is deleted. */
4284 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4285 v->insn);
4286 #endif
4287 }
4288 else
4289 {
4290 /* Not replaceable; emit an insn to set the original giv reg from
4291 the reduced giv, same as above. */
4292 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4293 v->insn);
4294 }
4295
4296 /* When a loop is reversed, givs which depend on the reversed
4297 biv, and which are live outside the loop, must be set to their
4298 correct final value. This insn is only needed if the giv is
4299 not replaceable. The correct final value is the same as the
4300 value that the giv starts the reversed loop with. */
4301 if (bl->reversed && ! v->replaceable)
4302 emit_iv_add_mult (bl->initial_value, v->mult_val,
4303 v->add_val, v->dest_reg, end_insert_before);
4304 else if (v->final_value)
4305 {
4306 rtx insert_before;
4307
4308 /* If the loop has multiple exits, emit the insn before the
4309 loop to ensure that it will always be executed no matter
4310 how the loop exits. Otherwise, emit the insn after the loop,
4311 since this is slightly more efficient. */
4312 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4313 insert_before = loop_start;
4314 else
4315 insert_before = end_insert_before;
4316 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4317 insert_before);
4318
4319 #if 0
4320 /* If the insn to set the final value of the giv was emitted
4321 before the loop, then we must delete the insn inside the loop
4322 that sets it. If this is a LIBCALL, then we must delete
4323 every insn in the libcall. Note, however, that
4324 final_giv_value will only succeed when there are multiple
4325 exits if the giv is dead at each exit, hence it does not
4326 matter that the original insn remains because it is dead
4327 anyways. */
4328 /* Delete the insn inside the loop that sets the giv since
4329 the giv is now set before (or after) the loop. */
4330 delete_insn (v->insn);
4331 #endif
4332 }
4333
4334 if (loop_dump_stream)
4335 {
4336 fprintf (loop_dump_stream, "giv at %d reduced to ",
4337 INSN_UID (v->insn));
4338 print_rtl (loop_dump_stream, v->new_reg);
4339 fprintf (loop_dump_stream, "\n");
4340 }
4341 }
4342
4343 /* All the givs based on the biv bl have been reduced if they
4344 merit it. */
4345
4346 /* For each giv not marked as maybe dead that has been combined with a
4347 second giv, clear any "maybe dead" mark on that second giv.
4348 v->new_reg will either be or refer to the register of the giv it
4349 combined with.
4350
4351 Doing this clearing avoids problems in biv elimination where a
4352 giv's new_reg is a complex value that can't be put in the insn but
4353 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4354 Since the register will be used in either case, we'd prefer it be
4355 used from the simpler giv. */
4356
4357 for (v = bl->giv; v; v = v->next_iv)
4358 if (! v->maybe_dead && v->same)
4359 v->same->maybe_dead = 0;
4360
4361 /* Try to eliminate the biv, if it is a candidate.
4362 This won't work if ! all_reduced,
4363 since the givs we planned to use might not have been reduced.
4364
4365 We have to be careful that we didn't initially think we could eliminate
4366 this biv because of a giv that we now think may be dead and shouldn't
4367 be used as a biv replacement.
4368
4369 Also, there is the possibility that we may have a giv that looks
4370 like it can be used to eliminate a biv, but the resulting insn
4371 isn't valid. This can happen, for example, on the 88k, where a
4372 JUMP_INSN can compare a register only with zero. Attempts to
4373 replace it with a compare with a constant will fail.
4374
4375 Note that in cases where this call fails, we may have replaced some
4376 of the occurrences of the biv with a giv, but no harm was done in
4377 doing so in the rare cases where it can occur. */
4378
4379 if (all_reduced == 1 && bl->eliminable
4380 && maybe_eliminate_biv (bl, loop_start, end, 1,
4381 threshold, insn_count))
4382
4383 {
4384 /* ?? If we created a new test to bypass the loop entirely,
4385 or otherwise drop straight in, based on this test, then
4386 we might want to rewrite it also. This way some later
4387 pass has more hope of removing the initialization of this
4388 biv entirely. */
4389
4390 /* If final_value != 0, then the biv may be used after loop end
4391 and we must emit an insn to set it just in case.
4392
4393 Reversed bivs already have an insn after the loop setting their
4394 value, so we don't need another one. We can't calculate the
4395 proper final value for such a biv here anyways. */
4396 if (final_value != 0 && ! bl->reversed)
4397 {
4398 rtx insert_before;
4399
4400 /* If the loop has multiple exits, emit the insn before the
4401 loop to ensure that it will always be executed no matter
4402 how the loop exits. Otherwise, emit the insn after the
4403 loop, since this is slightly more efficient. */
4404 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4405 insert_before = loop_start;
4406 else
4407 insert_before = end_insert_before;
4408
4409 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4410 end_insert_before);
4411 }
4412
4413 #if 0
4414 /* Delete all of the instructions inside the loop which set
4415 the biv, as they are all dead. If is safe to delete them,
4416 because an insn setting a biv will never be part of a libcall. */
4417 /* However, deleting them will invalidate the regno_last_uid info,
4418 so keeping them around is more convenient. Final_biv_value
4419 will only succeed when there are multiple exits if the biv
4420 is dead at each exit, hence it does not matter that the original
4421 insn remains, because it is dead anyways. */
4422 for (v = bl->biv; v; v = v->next_iv)
4423 delete_insn (v->insn);
4424 #endif
4425
4426 if (loop_dump_stream)
4427 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4428 bl->regno);
4429 }
4430 }
4431
4432 /* Go through all the instructions in the loop, making all the
4433 register substitutions scheduled in REG_MAP. */
4434
4435 for (p = loop_start; p != end; p = NEXT_INSN (p))
4436 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4437 || GET_CODE (p) == CALL_INSN)
4438 {
4439 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4440 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4441 INSN_CODE (p) = -1;
4442 }
4443
4444 /* Unroll loops from within strength reduction so that we can use the
4445 induction variable information that strength_reduce has already
4446 collected. */
4447
4448 if (unroll_p)
4449 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4450
4451 #ifdef HAIFA
4452 /* instrument the loop with bct insn */
4453 #ifdef HAVE_decrement_and_branch_on_count
4454 if (HAVE_decrement_and_branch_on_count)
4455 insert_bct (loop_start, loop_end);
4456 #endif
4457 #endif /* HAIFA */
4458
4459 if (loop_dump_stream)
4460 fprintf (loop_dump_stream, "\n");
4461 }
4462 \f
4463 /* Return 1 if X is a valid source for an initial value (or as value being
4464 compared against in an initial test).
4465
4466 X must be either a register or constant and must not be clobbered between
4467 the current insn and the start of the loop.
4468
4469 INSN is the insn containing X. */
4470
4471 static int
4472 valid_initial_value_p (x, insn, call_seen, loop_start)
4473 rtx x;
4474 rtx insn;
4475 int call_seen;
4476 rtx loop_start;
4477 {
4478 if (CONSTANT_P (x))
4479 return 1;
4480
4481 /* Only consider pseudos we know about initialized in insns whose luids
4482 we know. */
4483 if (GET_CODE (x) != REG
4484 || REGNO (x) >= max_reg_before_loop)
4485 return 0;
4486
4487 /* Don't use call-clobbered registers across a call which clobbers it. On
4488 some machines, don't use any hard registers at all. */
4489 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4490 && (SMALL_REGISTER_CLASSES
4491 || (call_used_regs[REGNO (x)] && call_seen)))
4492 return 0;
4493
4494 /* Don't use registers that have been clobbered before the start of the
4495 loop. */
4496 if (reg_set_between_p (x, insn, loop_start))
4497 return 0;
4498
4499 return 1;
4500 }
4501 \f
4502 /* Scan X for memory refs and check each memory address
4503 as a possible giv. INSN is the insn whose pattern X comes from.
4504 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4505 every loop iteration. */
4506
4507 static void
4508 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4509 rtx x;
4510 rtx insn;
4511 int not_every_iteration;
4512 rtx loop_start, loop_end;
4513 {
4514 register int i, j;
4515 register enum rtx_code code;
4516 register char *fmt;
4517
4518 if (x == 0)
4519 return;
4520
4521 code = GET_CODE (x);
4522 switch (code)
4523 {
4524 case REG:
4525 case CONST_INT:
4526 case CONST:
4527 case CONST_DOUBLE:
4528 case SYMBOL_REF:
4529 case LABEL_REF:
4530 case PC:
4531 case CC0:
4532 case ADDR_VEC:
4533 case ADDR_DIFF_VEC:
4534 case USE:
4535 case CLOBBER:
4536 return;
4537
4538 case MEM:
4539 {
4540 rtx src_reg;
4541 rtx add_val;
4542 rtx mult_val;
4543 int benefit;
4544
4545 benefit = general_induction_var (XEXP (x, 0),
4546 &src_reg, &add_val, &mult_val);
4547
4548 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4549 Such a giv isn't useful. */
4550 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4551 {
4552 /* Found one; record it. */
4553 struct induction *v
4554 = (struct induction *) oballoc (sizeof (struct induction));
4555
4556 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4557 add_val, benefit, DEST_ADDR, not_every_iteration,
4558 &XEXP (x, 0), loop_start, loop_end);
4559
4560 v->mem_mode = GET_MODE (x);
4561 }
4562 }
4563 return;
4564
4565 default:
4566 break;
4567 }
4568
4569 /* Recursively scan the subexpressions for other mem refs. */
4570
4571 fmt = GET_RTX_FORMAT (code);
4572 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4573 if (fmt[i] == 'e')
4574 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4575 loop_end);
4576 else if (fmt[i] == 'E')
4577 for (j = 0; j < XVECLEN (x, i); j++)
4578 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4579 loop_start, loop_end);
4580 }
4581 \f
4582 /* Fill in the data about one biv update.
4583 V is the `struct induction' in which we record the biv. (It is
4584 allocated by the caller, with alloca.)
4585 INSN is the insn that sets it.
4586 DEST_REG is the biv's reg.
4587
4588 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4589 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4590 being set to INC_VAL.
4591
4592 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4593 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4594 can be executed more than once per iteration. If MAYBE_MULTIPLE
4595 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4596 executed exactly once per iteration. */
4597
4598 static void
4599 record_biv (v, insn, dest_reg, inc_val, mult_val,
4600 not_every_iteration, maybe_multiple)
4601 struct induction *v;
4602 rtx insn;
4603 rtx dest_reg;
4604 rtx inc_val;
4605 rtx mult_val;
4606 int not_every_iteration;
4607 int maybe_multiple;
4608 {
4609 struct iv_class *bl;
4610
4611 v->insn = insn;
4612 v->src_reg = dest_reg;
4613 v->dest_reg = dest_reg;
4614 v->mult_val = mult_val;
4615 v->add_val = inc_val;
4616 v->mode = GET_MODE (dest_reg);
4617 v->always_computable = ! not_every_iteration;
4618 v->always_executed = ! not_every_iteration;
4619 v->maybe_multiple = maybe_multiple;
4620
4621 /* Add this to the reg's iv_class, creating a class
4622 if this is the first incrementation of the reg. */
4623
4624 bl = reg_biv_class[REGNO (dest_reg)];
4625 if (bl == 0)
4626 {
4627 /* Create and initialize new iv_class. */
4628
4629 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4630
4631 bl->regno = REGNO (dest_reg);
4632 bl->biv = 0;
4633 bl->giv = 0;
4634 bl->biv_count = 0;
4635 bl->giv_count = 0;
4636
4637 /* Set initial value to the reg itself. */
4638 bl->initial_value = dest_reg;
4639 /* We haven't seen the initializing insn yet */
4640 bl->init_insn = 0;
4641 bl->init_set = 0;
4642 bl->initial_test = 0;
4643 bl->incremented = 0;
4644 bl->eliminable = 0;
4645 bl->nonneg = 0;
4646 bl->reversed = 0;
4647 bl->total_benefit = 0;
4648
4649 /* Add this class to loop_iv_list. */
4650 bl->next = loop_iv_list;
4651 loop_iv_list = bl;
4652
4653 /* Put it in the array of biv register classes. */
4654 reg_biv_class[REGNO (dest_reg)] = bl;
4655 }
4656
4657 /* Update IV_CLASS entry for this biv. */
4658 v->next_iv = bl->biv;
4659 bl->biv = v;
4660 bl->biv_count++;
4661 if (mult_val == const1_rtx)
4662 bl->incremented = 1;
4663
4664 if (loop_dump_stream)
4665 {
4666 fprintf (loop_dump_stream,
4667 "Insn %d: possible biv, reg %d,",
4668 INSN_UID (insn), REGNO (dest_reg));
4669 if (GET_CODE (inc_val) == CONST_INT)
4670 {
4671 fprintf (loop_dump_stream, " const =");
4672 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4673 fputc ('\n', loop_dump_stream);
4674 }
4675 else
4676 {
4677 fprintf (loop_dump_stream, " const = ");
4678 print_rtl (loop_dump_stream, inc_val);
4679 fprintf (loop_dump_stream, "\n");
4680 }
4681 }
4682 }
4683 \f
4684 /* Fill in the data about one giv.
4685 V is the `struct induction' in which we record the giv. (It is
4686 allocated by the caller, with alloca.)
4687 INSN is the insn that sets it.
4688 BENEFIT estimates the savings from deleting this insn.
4689 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4690 into a register or is used as a memory address.
4691
4692 SRC_REG is the biv reg which the giv is computed from.
4693 DEST_REG is the giv's reg (if the giv is stored in a reg).
4694 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4695 LOCATION points to the place where this giv's value appears in INSN. */
4696
4697 static void
4698 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4699 type, not_every_iteration, location, loop_start, loop_end)
4700 struct induction *v;
4701 rtx insn;
4702 rtx src_reg;
4703 rtx dest_reg;
4704 rtx mult_val, add_val;
4705 int benefit;
4706 enum g_types type;
4707 int not_every_iteration;
4708 rtx *location;
4709 rtx loop_start, loop_end;
4710 {
4711 struct induction *b;
4712 struct iv_class *bl;
4713 rtx set = single_set (insn);
4714
4715 v->insn = insn;
4716 v->src_reg = src_reg;
4717 v->giv_type = type;
4718 v->dest_reg = dest_reg;
4719 v->mult_val = mult_val;
4720 v->add_val = add_val;
4721 v->benefit = benefit;
4722 v->location = location;
4723 v->cant_derive = 0;
4724 v->combined_with = 0;
4725 v->maybe_multiple = 0;
4726 v->maybe_dead = 0;
4727 v->derive_adjustment = 0;
4728 v->same = 0;
4729 v->ignore = 0;
4730 v->new_reg = 0;
4731 v->final_value = 0;
4732 v->same_insn = 0;
4733 v->auto_inc_opt = 0;
4734 v->unrolled = 0;
4735 v->shared = 0;
4736
4737 /* The v->always_computable field is used in update_giv_derive, to
4738 determine whether a giv can be used to derive another giv. For a
4739 DEST_REG giv, INSN computes a new value for the giv, so its value
4740 isn't computable if INSN insn't executed every iteration.
4741 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4742 it does not compute a new value. Hence the value is always computable
4743 regardless of whether INSN is executed each iteration. */
4744
4745 if (type == DEST_ADDR)
4746 v->always_computable = 1;
4747 else
4748 v->always_computable = ! not_every_iteration;
4749
4750 v->always_executed = ! not_every_iteration;
4751
4752 if (type == DEST_ADDR)
4753 {
4754 v->mode = GET_MODE (*location);
4755 v->lifetime = 1;
4756 v->times_used = 1;
4757 }
4758 else /* type == DEST_REG */
4759 {
4760 v->mode = GET_MODE (SET_DEST (set));
4761
4762 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4763 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4764
4765 v->times_used = n_times_used[REGNO (dest_reg)];
4766
4767 /* If the lifetime is zero, it means that this register is
4768 really a dead store. So mark this as a giv that can be
4769 ignored. This will not prevent the biv from being eliminated. */
4770 if (v->lifetime == 0)
4771 v->ignore = 1;
4772
4773 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4774 reg_iv_info[REGNO (dest_reg)] = v;
4775 }
4776
4777 /* Add the giv to the class of givs computed from one biv. */
4778
4779 bl = reg_biv_class[REGNO (src_reg)];
4780 if (bl)
4781 {
4782 v->next_iv = bl->giv;
4783 bl->giv = v;
4784 /* Don't count DEST_ADDR. This is supposed to count the number of
4785 insns that calculate givs. */
4786 if (type == DEST_REG)
4787 bl->giv_count++;
4788 bl->total_benefit += benefit;
4789 }
4790 else
4791 /* Fatal error, biv missing for this giv? */
4792 abort ();
4793
4794 if (type == DEST_ADDR)
4795 v->replaceable = 1;
4796 else
4797 {
4798 /* The giv can be replaced outright by the reduced register only if all
4799 of the following conditions are true:
4800 - the insn that sets the giv is always executed on any iteration
4801 on which the giv is used at all
4802 (there are two ways to deduce this:
4803 either the insn is executed on every iteration,
4804 or all uses follow that insn in the same basic block),
4805 - the giv is not used outside the loop
4806 - no assignments to the biv occur during the giv's lifetime. */
4807
4808 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4809 /* Previous line always fails if INSN was moved by loop opt. */
4810 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4811 && (! not_every_iteration
4812 || last_use_this_basic_block (dest_reg, insn)))
4813 {
4814 /* Now check that there are no assignments to the biv within the
4815 giv's lifetime. This requires two separate checks. */
4816
4817 /* Check each biv update, and fail if any are between the first
4818 and last use of the giv.
4819
4820 If this loop contains an inner loop that was unrolled, then
4821 the insn modifying the biv may have been emitted by the loop
4822 unrolling code, and hence does not have a valid luid. Just
4823 mark the biv as not replaceable in this case. It is not very
4824 useful as a biv, because it is used in two different loops.
4825 It is very unlikely that we would be able to optimize the giv
4826 using this biv anyways. */
4827
4828 v->replaceable = 1;
4829 for (b = bl->biv; b; b = b->next_iv)
4830 {
4831 if (INSN_UID (b->insn) >= max_uid_for_loop
4832 || ((uid_luid[INSN_UID (b->insn)]
4833 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4834 && (uid_luid[INSN_UID (b->insn)]
4835 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4836 {
4837 v->replaceable = 0;
4838 v->not_replaceable = 1;
4839 break;
4840 }
4841 }
4842
4843 /* If there are any backwards branches that go from after the
4844 biv update to before it, then this giv is not replaceable. */
4845 if (v->replaceable)
4846 for (b = bl->biv; b; b = b->next_iv)
4847 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4848 {
4849 v->replaceable = 0;
4850 v->not_replaceable = 1;
4851 break;
4852 }
4853 }
4854 else
4855 {
4856 /* May still be replaceable, we don't have enough info here to
4857 decide. */
4858 v->replaceable = 0;
4859 v->not_replaceable = 0;
4860 }
4861 }
4862
4863 if (loop_dump_stream)
4864 {
4865 if (type == DEST_REG)
4866 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4867 INSN_UID (insn), REGNO (dest_reg));
4868 else
4869 fprintf (loop_dump_stream, "Insn %d: dest address",
4870 INSN_UID (insn));
4871
4872 fprintf (loop_dump_stream, " src reg %d benefit %d",
4873 REGNO (src_reg), v->benefit);
4874 fprintf (loop_dump_stream, " used %d lifetime %d",
4875 v->times_used, v->lifetime);
4876
4877 if (v->replaceable)
4878 fprintf (loop_dump_stream, " replaceable");
4879
4880 if (GET_CODE (mult_val) == CONST_INT)
4881 {
4882 fprintf (loop_dump_stream, " mult ");
4883 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
4884 }
4885 else
4886 {
4887 fprintf (loop_dump_stream, " mult ");
4888 print_rtl (loop_dump_stream, mult_val);
4889 }
4890
4891 if (GET_CODE (add_val) == CONST_INT)
4892 {
4893 fprintf (loop_dump_stream, " add ");
4894 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
4895 }
4896 else
4897 {
4898 fprintf (loop_dump_stream, " add ");
4899 print_rtl (loop_dump_stream, add_val);
4900 }
4901 }
4902
4903 if (loop_dump_stream)
4904 fprintf (loop_dump_stream, "\n");
4905
4906 }
4907
4908
4909 /* All this does is determine whether a giv can be made replaceable because
4910 its final value can be calculated. This code can not be part of record_giv
4911 above, because final_giv_value requires that the number of loop iterations
4912 be known, and that can not be accurately calculated until after all givs
4913 have been identified. */
4914
4915 static void
4916 check_final_value (v, loop_start, loop_end)
4917 struct induction *v;
4918 rtx loop_start, loop_end;
4919 {
4920 struct iv_class *bl;
4921 rtx final_value = 0;
4922
4923 bl = reg_biv_class[REGNO (v->src_reg)];
4924
4925 /* DEST_ADDR givs will never reach here, because they are always marked
4926 replaceable above in record_giv. */
4927
4928 /* The giv can be replaced outright by the reduced register only if all
4929 of the following conditions are true:
4930 - the insn that sets the giv is always executed on any iteration
4931 on which the giv is used at all
4932 (there are two ways to deduce this:
4933 either the insn is executed on every iteration,
4934 or all uses follow that insn in the same basic block),
4935 - its final value can be calculated (this condition is different
4936 than the one above in record_giv)
4937 - no assignments to the biv occur during the giv's lifetime. */
4938
4939 #if 0
4940 /* This is only called now when replaceable is known to be false. */
4941 /* Clear replaceable, so that it won't confuse final_giv_value. */
4942 v->replaceable = 0;
4943 #endif
4944
4945 if ((final_value = final_giv_value (v, loop_start, loop_end))
4946 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4947 {
4948 int biv_increment_seen = 0;
4949 rtx p = v->insn;
4950 rtx last_giv_use;
4951
4952 v->replaceable = 1;
4953
4954 /* When trying to determine whether or not a biv increment occurs
4955 during the lifetime of the giv, we can ignore uses of the variable
4956 outside the loop because final_value is true. Hence we can not
4957 use regno_last_uid and regno_first_uid as above in record_giv. */
4958
4959 /* Search the loop to determine whether any assignments to the
4960 biv occur during the giv's lifetime. Start with the insn
4961 that sets the giv, and search around the loop until we come
4962 back to that insn again.
4963
4964 Also fail if there is a jump within the giv's lifetime that jumps
4965 to somewhere outside the lifetime but still within the loop. This
4966 catches spaghetti code where the execution order is not linear, and
4967 hence the above test fails. Here we assume that the giv lifetime
4968 does not extend from one iteration of the loop to the next, so as
4969 to make the test easier. Since the lifetime isn't known yet,
4970 this requires two loops. See also record_giv above. */
4971
4972 last_giv_use = v->insn;
4973
4974 while (1)
4975 {
4976 p = NEXT_INSN (p);
4977 if (p == loop_end)
4978 p = NEXT_INSN (loop_start);
4979 if (p == v->insn)
4980 break;
4981
4982 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4983 || GET_CODE (p) == CALL_INSN)
4984 {
4985 if (biv_increment_seen)
4986 {
4987 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4988 {
4989 v->replaceable = 0;
4990 v->not_replaceable = 1;
4991 break;
4992 }
4993 }
4994 else if (reg_set_p (v->src_reg, PATTERN (p)))
4995 biv_increment_seen = 1;
4996 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4997 last_giv_use = p;
4998 }
4999 }
5000
5001 /* Now that the lifetime of the giv is known, check for branches
5002 from within the lifetime to outside the lifetime if it is still
5003 replaceable. */
5004
5005 if (v->replaceable)
5006 {
5007 p = v->insn;
5008 while (1)
5009 {
5010 p = NEXT_INSN (p);
5011 if (p == loop_end)
5012 p = NEXT_INSN (loop_start);
5013 if (p == last_giv_use)
5014 break;
5015
5016 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5017 && LABEL_NAME (JUMP_LABEL (p))
5018 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
5019 || (INSN_UID (v->insn) >= max_uid_for_loop)
5020 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
5021 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
5022 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
5023 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
5024 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
5025 {
5026 v->replaceable = 0;
5027 v->not_replaceable = 1;
5028
5029 if (loop_dump_stream)
5030 fprintf (loop_dump_stream,
5031 "Found branch outside giv lifetime.\n");
5032
5033 break;
5034 }
5035 }
5036 }
5037
5038 /* If it is replaceable, then save the final value. */
5039 if (v->replaceable)
5040 v->final_value = final_value;
5041 }
5042
5043 if (loop_dump_stream && v->replaceable)
5044 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5045 INSN_UID (v->insn), REGNO (v->dest_reg));
5046 }
5047 \f
5048 /* Update the status of whether a giv can derive other givs.
5049
5050 We need to do something special if there is or may be an update to the biv
5051 between the time the giv is defined and the time it is used to derive
5052 another giv.
5053
5054 In addition, a giv that is only conditionally set is not allowed to
5055 derive another giv once a label has been passed.
5056
5057 The cases we look at are when a label or an update to a biv is passed. */
5058
5059 static void
5060 update_giv_derive (p)
5061 rtx p;
5062 {
5063 struct iv_class *bl;
5064 struct induction *biv, *giv;
5065 rtx tem;
5066 int dummy;
5067
5068 /* Search all IV classes, then all bivs, and finally all givs.
5069
5070 There are three cases we are concerned with. First we have the situation
5071 of a giv that is only updated conditionally. In that case, it may not
5072 derive any givs after a label is passed.
5073
5074 The second case is when a biv update occurs, or may occur, after the
5075 definition of a giv. For certain biv updates (see below) that are
5076 known to occur between the giv definition and use, we can adjust the
5077 giv definition. For others, or when the biv update is conditional,
5078 we must prevent the giv from deriving any other givs. There are two
5079 sub-cases within this case.
5080
5081 If this is a label, we are concerned with any biv update that is done
5082 conditionally, since it may be done after the giv is defined followed by
5083 a branch here (actually, we need to pass both a jump and a label, but
5084 this extra tracking doesn't seem worth it).
5085
5086 If this is a jump, we are concerned about any biv update that may be
5087 executed multiple times. We are actually only concerned about
5088 backward jumps, but it is probably not worth performing the test
5089 on the jump again here.
5090
5091 If this is a biv update, we must adjust the giv status to show that a
5092 subsequent biv update was performed. If this adjustment cannot be done,
5093 the giv cannot derive further givs. */
5094
5095 for (bl = loop_iv_list; bl; bl = bl->next)
5096 for (biv = bl->biv; biv; biv = biv->next_iv)
5097 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5098 || biv->insn == p)
5099 {
5100 for (giv = bl->giv; giv; giv = giv->next_iv)
5101 {
5102 /* If cant_derive is already true, there is no point in
5103 checking all of these conditions again. */
5104 if (giv->cant_derive)
5105 continue;
5106
5107 /* If this giv is conditionally set and we have passed a label,
5108 it cannot derive anything. */
5109 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5110 giv->cant_derive = 1;
5111
5112 /* Skip givs that have mult_val == 0, since
5113 they are really invariants. Also skip those that are
5114 replaceable, since we know their lifetime doesn't contain
5115 any biv update. */
5116 else if (giv->mult_val == const0_rtx || giv->replaceable)
5117 continue;
5118
5119 /* The only way we can allow this giv to derive another
5120 is if this is a biv increment and we can form the product
5121 of biv->add_val and giv->mult_val. In this case, we will
5122 be able to compute a compensation. */
5123 else if (biv->insn == p)
5124 {
5125 tem = 0;
5126
5127 if (biv->mult_val == const1_rtx)
5128 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5129 biv->add_val,
5130 giv->mult_val),
5131 &dummy);
5132
5133 if (tem && giv->derive_adjustment)
5134 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5135 giv->derive_adjustment),
5136 &dummy);
5137 if (tem)
5138 giv->derive_adjustment = tem;
5139 else
5140 giv->cant_derive = 1;
5141 }
5142 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5143 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5144 giv->cant_derive = 1;
5145 }
5146 }
5147 }
5148 \f
5149 /* Check whether an insn is an increment legitimate for a basic induction var.
5150 X is the source of insn P, or a part of it.
5151 MODE is the mode in which X should be interpreted.
5152
5153 DEST_REG is the putative biv, also the destination of the insn.
5154 We accept patterns of these forms:
5155 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5156 REG = INVARIANT + REG
5157
5158 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5159 and store the additive term into *INC_VAL.
5160
5161 If X is an assignment of an invariant into DEST_REG, we set
5162 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5163
5164 We also want to detect a BIV when it corresponds to a variable
5165 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5166 of the variable may be a PLUS that adds a SUBREG of that variable to
5167 an invariant and then sign- or zero-extends the result of the PLUS
5168 into the variable.
5169
5170 Most GIVs in such cases will be in the promoted mode, since that is the
5171 probably the natural computation mode (and almost certainly the mode
5172 used for addresses) on the machine. So we view the pseudo-reg containing
5173 the variable as the BIV, as if it were simply incremented.
5174
5175 Note that treating the entire pseudo as a BIV will result in making
5176 simple increments to any GIVs based on it. However, if the variable
5177 overflows in its declared mode but not its promoted mode, the result will
5178 be incorrect. This is acceptable if the variable is signed, since
5179 overflows in such cases are undefined, but not if it is unsigned, since
5180 those overflows are defined. So we only check for SIGN_EXTEND and
5181 not ZERO_EXTEND.
5182
5183 If we cannot find a biv, we return 0. */
5184
5185 static int
5186 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5187 register rtx x;
5188 enum machine_mode mode;
5189 rtx p;
5190 rtx dest_reg;
5191 rtx *inc_val;
5192 rtx *mult_val;
5193 {
5194 register enum rtx_code code;
5195 rtx arg;
5196 rtx insn, set = 0;
5197
5198 code = GET_CODE (x);
5199 switch (code)
5200 {
5201 case PLUS:
5202 if (XEXP (x, 0) == dest_reg
5203 || (GET_CODE (XEXP (x, 0)) == SUBREG
5204 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5205 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5206 arg = XEXP (x, 1);
5207 else if (XEXP (x, 1) == dest_reg
5208 || (GET_CODE (XEXP (x, 1)) == SUBREG
5209 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5210 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5211 arg = XEXP (x, 0);
5212 else
5213 return 0;
5214
5215 if (invariant_p (arg) != 1)
5216 return 0;
5217
5218 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5219 *mult_val = const1_rtx;
5220 return 1;
5221
5222 case SUBREG:
5223 /* If this is a SUBREG for a promoted variable, check the inner
5224 value. */
5225 if (SUBREG_PROMOTED_VAR_P (x))
5226 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5227 dest_reg, p, inc_val, mult_val);
5228 return 0;
5229
5230 case REG:
5231 /* If this register is assigned in the previous insn, look at its
5232 source, but don't go outside the loop or past a label. */
5233
5234 for (insn = PREV_INSN (p);
5235 (insn && GET_CODE (insn) == NOTE
5236 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5237 insn = PREV_INSN (insn))
5238 ;
5239
5240 if (insn)
5241 set = single_set (insn);
5242
5243 if (set != 0
5244 && (SET_DEST (set) == x
5245 || (GET_CODE (SET_DEST (set)) == SUBREG
5246 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5247 <= UNITS_PER_WORD)
5248 && SUBREG_REG (SET_DEST (set)) == x)))
5249 return basic_induction_var (SET_SRC (set),
5250 (GET_MODE (SET_SRC (set)) == VOIDmode
5251 ? GET_MODE (x)
5252 : GET_MODE (SET_SRC (set))),
5253 dest_reg, insn,
5254 inc_val, mult_val);
5255 /* ... fall through ... */
5256
5257 /* Can accept constant setting of biv only when inside inner most loop.
5258 Otherwise, a biv of an inner loop may be incorrectly recognized
5259 as a biv of the outer loop,
5260 causing code to be moved INTO the inner loop. */
5261 case MEM:
5262 if (invariant_p (x) != 1)
5263 return 0;
5264 case CONST_INT:
5265 case SYMBOL_REF:
5266 case CONST:
5267 /* convert_modes aborts if we try to convert to or from CCmode, so just
5268 exclude that case. It is very unlikely that a condition code value
5269 would be a useful iterator anyways. */
5270 if (loops_enclosed == 1
5271 && GET_MODE_CLASS (mode) != MODE_CC
5272 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5273 {
5274 /* Possible bug here? Perhaps we don't know the mode of X. */
5275 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5276 *mult_val = const0_rtx;
5277 return 1;
5278 }
5279 else
5280 return 0;
5281
5282 case SIGN_EXTEND:
5283 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5284 dest_reg, p, inc_val, mult_val);
5285 case ASHIFTRT:
5286 /* Similar, since this can be a sign extension. */
5287 for (insn = PREV_INSN (p);
5288 (insn && GET_CODE (insn) == NOTE
5289 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5290 insn = PREV_INSN (insn))
5291 ;
5292
5293 if (insn)
5294 set = single_set (insn);
5295
5296 if (set && SET_DEST (set) == XEXP (x, 0)
5297 && GET_CODE (XEXP (x, 1)) == CONST_INT
5298 && INTVAL (XEXP (x, 1)) >= 0
5299 && GET_CODE (SET_SRC (set)) == ASHIFT
5300 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5301 return basic_induction_var (XEXP (SET_SRC (set), 0),
5302 GET_MODE (XEXP (x, 0)),
5303 dest_reg, insn, inc_val, mult_val);
5304 return 0;
5305
5306 default:
5307 return 0;
5308 }
5309 }
5310 \f
5311 /* A general induction variable (giv) is any quantity that is a linear
5312 function of a basic induction variable,
5313 i.e. giv = biv * mult_val + add_val.
5314 The coefficients can be any loop invariant quantity.
5315 A giv need not be computed directly from the biv;
5316 it can be computed by way of other givs. */
5317
5318 /* Determine whether X computes a giv.
5319 If it does, return a nonzero value
5320 which is the benefit from eliminating the computation of X;
5321 set *SRC_REG to the register of the biv that it is computed from;
5322 set *ADD_VAL and *MULT_VAL to the coefficients,
5323 such that the value of X is biv * mult + add; */
5324
5325 static int
5326 general_induction_var (x, src_reg, add_val, mult_val)
5327 rtx x;
5328 rtx *src_reg;
5329 rtx *add_val;
5330 rtx *mult_val;
5331 {
5332 rtx orig_x = x;
5333 int benefit = 0;
5334 char *storage;
5335
5336 /* If this is an invariant, forget it, it isn't a giv. */
5337 if (invariant_p (x) == 1)
5338 return 0;
5339
5340 /* See if the expression could be a giv and get its form.
5341 Mark our place on the obstack in case we don't find a giv. */
5342 storage = (char *) oballoc (0);
5343 x = simplify_giv_expr (x, &benefit);
5344 if (x == 0)
5345 {
5346 obfree (storage);
5347 return 0;
5348 }
5349
5350 switch (GET_CODE (x))
5351 {
5352 case USE:
5353 case CONST_INT:
5354 /* Since this is now an invariant and wasn't before, it must be a giv
5355 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5356 with. */
5357 *src_reg = loop_iv_list->biv->dest_reg;
5358 *mult_val = const0_rtx;
5359 *add_val = x;
5360 break;
5361
5362 case REG:
5363 /* This is equivalent to a BIV. */
5364 *src_reg = x;
5365 *mult_val = const1_rtx;
5366 *add_val = const0_rtx;
5367 break;
5368
5369 case PLUS:
5370 /* Either (plus (biv) (invar)) or
5371 (plus (mult (biv) (invar_1)) (invar_2)). */
5372 if (GET_CODE (XEXP (x, 0)) == MULT)
5373 {
5374 *src_reg = XEXP (XEXP (x, 0), 0);
5375 *mult_val = XEXP (XEXP (x, 0), 1);
5376 }
5377 else
5378 {
5379 *src_reg = XEXP (x, 0);
5380 *mult_val = const1_rtx;
5381 }
5382 *add_val = XEXP (x, 1);
5383 break;
5384
5385 case MULT:
5386 /* ADD_VAL is zero. */
5387 *src_reg = XEXP (x, 0);
5388 *mult_val = XEXP (x, 1);
5389 *add_val = const0_rtx;
5390 break;
5391
5392 default:
5393 abort ();
5394 }
5395
5396 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5397 unless they are CONST_INT). */
5398 if (GET_CODE (*add_val) == USE)
5399 *add_val = XEXP (*add_val, 0);
5400 if (GET_CODE (*mult_val) == USE)
5401 *mult_val = XEXP (*mult_val, 0);
5402
5403 benefit += rtx_cost (orig_x, SET);
5404
5405 /* Always return some benefit if this is a giv so it will be detected
5406 as such. This allows elimination of bivs that might otherwise
5407 not be eliminated. */
5408 return benefit == 0 ? 1 : benefit;
5409 }
5410 \f
5411 /* Given an expression, X, try to form it as a linear function of a biv.
5412 We will canonicalize it to be of the form
5413 (plus (mult (BIV) (invar_1))
5414 (invar_2))
5415 with possible degeneracies.
5416
5417 The invariant expressions must each be of a form that can be used as a
5418 machine operand. We surround then with a USE rtx (a hack, but localized
5419 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5420 routine; it is the caller's responsibility to strip them.
5421
5422 If no such canonicalization is possible (i.e., two biv's are used or an
5423 expression that is neither invariant nor a biv or giv), this routine
5424 returns 0.
5425
5426 For a non-zero return, the result will have a code of CONST_INT, USE,
5427 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5428
5429 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5430
5431 static rtx
5432 simplify_giv_expr (x, benefit)
5433 rtx x;
5434 int *benefit;
5435 {
5436 enum machine_mode mode = GET_MODE (x);
5437 rtx arg0, arg1;
5438 rtx tem;
5439
5440 /* If this is not an integer mode, or if we cannot do arithmetic in this
5441 mode, this can't be a giv. */
5442 if (mode != VOIDmode
5443 && (GET_MODE_CLASS (mode) != MODE_INT
5444 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5445 return 0;
5446
5447 switch (GET_CODE (x))
5448 {
5449 case PLUS:
5450 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5451 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5452 if (arg0 == 0 || arg1 == 0)
5453 return 0;
5454
5455 /* Put constant last, CONST_INT last if both constant. */
5456 if ((GET_CODE (arg0) == USE
5457 || GET_CODE (arg0) == CONST_INT)
5458 && GET_CODE (arg1) != CONST_INT)
5459 tem = arg0, arg0 = arg1, arg1 = tem;
5460
5461 /* Handle addition of zero, then addition of an invariant. */
5462 if (arg1 == const0_rtx)
5463 return arg0;
5464 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5465 switch (GET_CODE (arg0))
5466 {
5467 case CONST_INT:
5468 case USE:
5469 /* Both invariant. Only valid if sum is machine operand.
5470 First strip off possible USE on the operands. */
5471 if (GET_CODE (arg0) == USE)
5472 arg0 = XEXP (arg0, 0);
5473
5474 if (GET_CODE (arg1) == USE)
5475 arg1 = XEXP (arg1, 0);
5476
5477 tem = 0;
5478 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5479 {
5480 tem = plus_constant (arg0, INTVAL (arg1));
5481 if (GET_CODE (tem) != CONST_INT)
5482 tem = gen_rtx_USE (mode, tem);
5483 }
5484 else
5485 {
5486 /* Adding two invariants must result in an invariant,
5487 so enclose addition operation inside a USE and
5488 return it. */
5489 tem = gen_rtx_USE (mode, gen_rtx_PLUS (mode, arg0, arg1));
5490 }
5491
5492 return tem;
5493
5494 case REG:
5495 case MULT:
5496 /* biv + invar or mult + invar. Return sum. */
5497 return gen_rtx_PLUS (mode, arg0, arg1);
5498
5499 case PLUS:
5500 /* (a + invar_1) + invar_2. Associate. */
5501 return simplify_giv_expr (gen_rtx_PLUS (mode,
5502 XEXP (arg0, 0),
5503 gen_rtx_PLUS (mode,
5504 XEXP (arg0, 1), arg1)),
5505 benefit);
5506
5507 default:
5508 abort ();
5509 }
5510
5511 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5512 MULT to reduce cases. */
5513 if (GET_CODE (arg0) == REG)
5514 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5515 if (GET_CODE (arg1) == REG)
5516 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5517
5518 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5519 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5520 Recurse to associate the second PLUS. */
5521 if (GET_CODE (arg1) == MULT)
5522 tem = arg0, arg0 = arg1, arg1 = tem;
5523
5524 if (GET_CODE (arg1) == PLUS)
5525 return simplify_giv_expr (gen_rtx_PLUS (mode,
5526 gen_rtx_PLUS (mode, arg0,
5527 XEXP (arg1, 0)),
5528 XEXP (arg1, 1)),
5529 benefit);
5530
5531 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5532 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5533 abort ();
5534
5535 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5536 return 0;
5537
5538 return simplify_giv_expr (gen_rtx_MULT (mode,
5539 XEXP (arg0, 0),
5540 gen_rtx_PLUS (mode,
5541 XEXP (arg0, 1),
5542 XEXP (arg1, 1))),
5543 benefit);
5544
5545 case MINUS:
5546 /* Handle "a - b" as "a + b * (-1)". */
5547 return simplify_giv_expr (gen_rtx_PLUS (mode,
5548 XEXP (x, 0),
5549 gen_rtx_MULT (mode, XEXP (x, 1),
5550 constm1_rtx)),
5551 benefit);
5552
5553 case MULT:
5554 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5555 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5556 if (arg0 == 0 || arg1 == 0)
5557 return 0;
5558
5559 /* Put constant last, CONST_INT last if both constant. */
5560 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5561 && GET_CODE (arg1) != CONST_INT)
5562 tem = arg0, arg0 = arg1, arg1 = tem;
5563
5564 /* If second argument is not now constant, not giv. */
5565 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5566 return 0;
5567
5568 /* Handle multiply by 0 or 1. */
5569 if (arg1 == const0_rtx)
5570 return const0_rtx;
5571
5572 else if (arg1 == const1_rtx)
5573 return arg0;
5574
5575 switch (GET_CODE (arg0))
5576 {
5577 case REG:
5578 /* biv * invar. Done. */
5579 return gen_rtx_MULT (mode, arg0, arg1);
5580
5581 case CONST_INT:
5582 /* Product of two constants. */
5583 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5584
5585 case USE:
5586 /* invar * invar. Not giv. */
5587 return 0;
5588
5589 case MULT:
5590 /* (a * invar_1) * invar_2. Associate. */
5591 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5592 gen_rtx_MULT (mode,
5593 XEXP (arg0, 1),
5594 arg1)),
5595 benefit);
5596
5597 case PLUS:
5598 /* (a + invar_1) * invar_2. Distribute. */
5599 return simplify_giv_expr (gen_rtx_PLUS (mode,
5600 gen_rtx_MULT (mode,
5601 XEXP (arg0, 0),
5602 arg1),
5603 gen_rtx_MULT (mode,
5604 XEXP (arg0, 1),
5605 arg1)),
5606 benefit);
5607
5608 default:
5609 abort ();
5610 }
5611
5612 case ASHIFT:
5613 /* Shift by constant is multiply by power of two. */
5614 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5615 return 0;
5616
5617 return simplify_giv_expr (gen_rtx_MULT (mode,
5618 XEXP (x, 0),
5619 GEN_INT ((HOST_WIDE_INT) 1
5620 << INTVAL (XEXP (x, 1)))),
5621 benefit);
5622
5623 case NEG:
5624 /* "-a" is "a * (-1)" */
5625 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5626 benefit);
5627
5628 case NOT:
5629 /* "~a" is "-a - 1". Silly, but easy. */
5630 return simplify_giv_expr (gen_rtx_MINUS (mode,
5631 gen_rtx_NEG (mode, XEXP (x, 0)),
5632 const1_rtx),
5633 benefit);
5634
5635 case USE:
5636 /* Already in proper form for invariant. */
5637 return x;
5638
5639 case REG:
5640 /* If this is a new register, we can't deal with it. */
5641 if (REGNO (x) >= max_reg_before_loop)
5642 return 0;
5643
5644 /* Check for biv or giv. */
5645 switch (reg_iv_type[REGNO (x)])
5646 {
5647 case BASIC_INDUCT:
5648 return x;
5649 case GENERAL_INDUCT:
5650 {
5651 struct induction *v = reg_iv_info[REGNO (x)];
5652
5653 /* Form expression from giv and add benefit. Ensure this giv
5654 can derive another and subtract any needed adjustment if so. */
5655 *benefit += v->benefit;
5656 if (v->cant_derive)
5657 return 0;
5658
5659 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5660 v->mult_val),
5661 v->add_val);
5662 if (v->derive_adjustment)
5663 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5664 return simplify_giv_expr (tem, benefit);
5665 }
5666
5667 default:
5668 break;
5669 }
5670
5671 /* Fall through to general case. */
5672 default:
5673 /* If invariant, return as USE (unless CONST_INT).
5674 Otherwise, not giv. */
5675 if (GET_CODE (x) == USE)
5676 x = XEXP (x, 0);
5677
5678 if (invariant_p (x) == 1)
5679 {
5680 if (GET_CODE (x) == CONST_INT)
5681 return x;
5682 else
5683 return gen_rtx_USE (mode, x);
5684 }
5685 else
5686 return 0;
5687 }
5688 }
5689 \f
5690 /* Help detect a giv that is calculated by several consecutive insns;
5691 for example,
5692 giv = biv * M
5693 giv = giv + A
5694 The caller has already identified the first insn P as having a giv as dest;
5695 we check that all other insns that set the same register follow
5696 immediately after P, that they alter nothing else,
5697 and that the result of the last is still a giv.
5698
5699 The value is 0 if the reg set in P is not really a giv.
5700 Otherwise, the value is the amount gained by eliminating
5701 all the consecutive insns that compute the value.
5702
5703 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5704 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5705
5706 The coefficients of the ultimate giv value are stored in
5707 *MULT_VAL and *ADD_VAL. */
5708
5709 static int
5710 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5711 add_val, mult_val)
5712 int first_benefit;
5713 rtx p;
5714 rtx src_reg;
5715 rtx dest_reg;
5716 rtx *add_val;
5717 rtx *mult_val;
5718 {
5719 int count;
5720 enum rtx_code code;
5721 int benefit;
5722 rtx temp;
5723 rtx set;
5724
5725 /* Indicate that this is a giv so that we can update the value produced in
5726 each insn of the multi-insn sequence.
5727
5728 This induction structure will be used only by the call to
5729 general_induction_var below, so we can allocate it on our stack.
5730 If this is a giv, our caller will replace the induct var entry with
5731 a new induction structure. */
5732 struct induction *v
5733 = (struct induction *) alloca (sizeof (struct induction));
5734 v->src_reg = src_reg;
5735 v->mult_val = *mult_val;
5736 v->add_val = *add_val;
5737 v->benefit = first_benefit;
5738 v->cant_derive = 0;
5739 v->derive_adjustment = 0;
5740
5741 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5742 reg_iv_info[REGNO (dest_reg)] = v;
5743
5744 count = n_times_set[REGNO (dest_reg)] - 1;
5745
5746 while (count > 0)
5747 {
5748 p = NEXT_INSN (p);
5749 code = GET_CODE (p);
5750
5751 /* If libcall, skip to end of call sequence. */
5752 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5753 p = XEXP (temp, 0);
5754
5755 if (code == INSN
5756 && (set = single_set (p))
5757 && GET_CODE (SET_DEST (set)) == REG
5758 && SET_DEST (set) == dest_reg
5759 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5760 add_val, mult_val))
5761 /* Giv created by equivalent expression. */
5762 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5763 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5764 add_val, mult_val))))
5765 && src_reg == v->src_reg)
5766 {
5767 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5768 benefit += libcall_benefit (p);
5769
5770 count--;
5771 v->mult_val = *mult_val;
5772 v->add_val = *add_val;
5773 v->benefit = benefit;
5774 }
5775 else if (code != NOTE)
5776 {
5777 /* Allow insns that set something other than this giv to a
5778 constant. Such insns are needed on machines which cannot
5779 include long constants and should not disqualify a giv. */
5780 if (code == INSN
5781 && (set = single_set (p))
5782 && SET_DEST (set) != dest_reg
5783 && CONSTANT_P (SET_SRC (set)))
5784 continue;
5785
5786 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5787 return 0;
5788 }
5789 }
5790
5791 return v->benefit;
5792 }
5793 \f
5794 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5795 represented by G1. If no such expression can be found, or it is clear that
5796 it cannot possibly be a valid address, 0 is returned.
5797
5798 To perform the computation, we note that
5799 G1 = a * v + b and
5800 G2 = c * v + d
5801 where `v' is the biv.
5802
5803 So G2 = (c/a) * G1 + (d - b*c/a) */
5804
5805 #ifdef ADDRESS_COST
5806 static rtx
5807 express_from (g1, g2)
5808 struct induction *g1, *g2;
5809 {
5810 rtx mult, add;
5811
5812 /* The value that G1 will be multiplied by must be a constant integer. Also,
5813 the only chance we have of getting a valid address is if b*c/a (see above
5814 for notation) is also an integer. */
5815 if (GET_CODE (g1->mult_val) != CONST_INT
5816 || GET_CODE (g2->mult_val) != CONST_INT
5817 || GET_CODE (g1->add_val) != CONST_INT
5818 || g1->mult_val == const0_rtx
5819 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5820 return 0;
5821
5822 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5823 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5824
5825 /* Form simplified final result. */
5826 if (mult == const0_rtx)
5827 return add;
5828 else if (mult == const1_rtx)
5829 mult = g1->dest_reg;
5830 else
5831 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
5832
5833 if (add == const0_rtx)
5834 return mult;
5835 else
5836 return gen_rtx_PLUS (g2->mode, mult, add);
5837 }
5838 #endif
5839 \f
5840 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5841 (either directly or via an address expression) a register used to represent
5842 G1. Set g2->new_reg to a represtation of G1 (normally just
5843 g1->dest_reg). */
5844
5845 static int
5846 combine_givs_p (g1, g2)
5847 struct induction *g1, *g2;
5848 {
5849 #ifdef ADDRESS_COST
5850 rtx tem;
5851 #endif
5852
5853 /* If these givs are identical, they can be combined. */
5854 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5855 && rtx_equal_p (g1->add_val, g2->add_val))
5856 {
5857 g2->new_reg = g1->dest_reg;
5858 return 1;
5859 }
5860
5861 #ifdef ADDRESS_COST
5862 /* If G2 can be expressed as a function of G1 and that function is valid
5863 as an address and no more expensive than using a register for G2,
5864 the expression of G2 in terms of G1 can be used. */
5865 if (g2->giv_type == DEST_ADDR
5866 && (tem = express_from (g1, g2)) != 0
5867 && memory_address_p (g2->mem_mode, tem)
5868 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5869 {
5870 g2->new_reg = tem;
5871 return 1;
5872 }
5873 #endif
5874
5875 return 0;
5876 }
5877 \f
5878 #ifdef GIV_SORT_CRITERION
5879 /* Compare two givs and sort the most desirable one for combinations first.
5880 This is used only in one qsort call below. */
5881
5882 static int
5883 giv_sort (x, y)
5884 struct induction **x, **y;
5885 {
5886 GIV_SORT_CRITERION (*x, *y);
5887
5888 return 0;
5889 }
5890 #endif
5891
5892 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5893 any other. If so, point SAME to the giv combined with and set NEW_REG to
5894 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5895 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5896
5897 static void
5898 combine_givs (bl)
5899 struct iv_class *bl;
5900 {
5901 struct induction *g1, *g2, **giv_array;
5902 int i, j, giv_count, pass;
5903
5904 /* Count givs, because bl->giv_count is incorrect here. */
5905 giv_count = 0;
5906 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5907 giv_count++;
5908
5909 giv_array
5910 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
5911 i = 0;
5912 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5913 giv_array[i++] = g1;
5914
5915 #ifdef GIV_SORT_CRITERION
5916 /* Sort the givs if GIV_SORT_CRITERION is defined.
5917 This is usually defined for processors which lack
5918 negative register offsets so more givs may be combined. */
5919
5920 if (loop_dump_stream)
5921 fprintf (loop_dump_stream, "%d givs counted, sorting...\n", giv_count);
5922
5923 qsort (giv_array, giv_count, sizeof (struct induction *), giv_sort);
5924 #endif
5925
5926 for (i = 0; i < giv_count; i++)
5927 {
5928 g1 = giv_array[i];
5929 for (pass = 0; pass <= 1; pass++)
5930 for (j = 0; j < giv_count; j++)
5931 {
5932 g2 = giv_array[j];
5933 if (g1 != g2
5934 /* First try to combine with replaceable givs, then all givs. */
5935 && (g1->replaceable || pass == 1)
5936 /* If either has already been combined or is to be ignored, can't
5937 combine. */
5938 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5939 /* If something has been based on G2, G2 cannot itself be based
5940 on something else. */
5941 && ! g2->combined_with
5942 && combine_givs_p (g1, g2))
5943 {
5944 /* g2->new_reg set by `combine_givs_p' */
5945 g2->same = g1;
5946 g1->combined_with = 1;
5947
5948 /* If one of these givs is a DEST_REG that was only used
5949 once, by the other giv, this is actually a single use.
5950 The DEST_REG has the correct cost, while the other giv
5951 counts the REG use too often. */
5952 if (g2->giv_type == DEST_REG
5953 && n_times_used[REGNO (g2->dest_reg)] == 1
5954 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
5955 g1->benefit = g2->benefit;
5956 else if (g1->giv_type != DEST_REG
5957 || n_times_used[REGNO (g1->dest_reg)] != 1
5958 || ! reg_mentioned_p (g1->dest_reg,
5959 PATTERN (g2->insn)))
5960 {
5961 g1->benefit += g2->benefit;
5962 g1->times_used += g2->times_used;
5963 }
5964 /* ??? The new final_[bg]iv_value code does a much better job
5965 of finding replaceable giv's, and hence this code may no
5966 longer be necessary. */
5967 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5968 g1->benefit -= copy_cost;
5969 g1->lifetime += g2->lifetime;
5970
5971 if (loop_dump_stream)
5972 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5973 INSN_UID (g2->insn), INSN_UID (g1->insn));
5974 }
5975 }
5976 }
5977 }
5978 \f
5979 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5980
5981 void
5982 emit_iv_add_mult (b, m, a, reg, insert_before)
5983 rtx b; /* initial value of basic induction variable */
5984 rtx m; /* multiplicative constant */
5985 rtx a; /* additive constant */
5986 rtx reg; /* destination register */
5987 rtx insert_before;
5988 {
5989 rtx seq;
5990 rtx result;
5991
5992 /* Prevent unexpected sharing of these rtx. */
5993 a = copy_rtx (a);
5994 b = copy_rtx (b);
5995
5996 /* Increase the lifetime of any invariants moved further in code. */
5997 update_reg_last_use (a, insert_before);
5998 update_reg_last_use (b, insert_before);
5999 update_reg_last_use (m, insert_before);
6000
6001 start_sequence ();
6002 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
6003 if (reg != result)
6004 emit_move_insn (reg, result);
6005 seq = gen_sequence ();
6006 end_sequence ();
6007
6008 emit_insn_before (seq, insert_before);
6009
6010 record_base_value (REGNO (reg), b, 0);
6011 }
6012 \f
6013 /* Test whether A * B can be computed without
6014 an actual multiply insn. Value is 1 if so. */
6015
6016 static int
6017 product_cheap_p (a, b)
6018 rtx a;
6019 rtx b;
6020 {
6021 int i;
6022 rtx tmp;
6023 struct obstack *old_rtl_obstack = rtl_obstack;
6024 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
6025 int win = 1;
6026
6027 /* If only one is constant, make it B. */
6028 if (GET_CODE (a) == CONST_INT)
6029 tmp = a, a = b, b = tmp;
6030
6031 /* If first constant, both constant, so don't need multiply. */
6032 if (GET_CODE (a) == CONST_INT)
6033 return 1;
6034
6035 /* If second not constant, neither is constant, so would need multiply. */
6036 if (GET_CODE (b) != CONST_INT)
6037 return 0;
6038
6039 /* One operand is constant, so might not need multiply insn. Generate the
6040 code for the multiply and see if a call or multiply, or long sequence
6041 of insns is generated. */
6042
6043 rtl_obstack = &temp_obstack;
6044 start_sequence ();
6045 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
6046 tmp = gen_sequence ();
6047 end_sequence ();
6048
6049 if (GET_CODE (tmp) == SEQUENCE)
6050 {
6051 if (XVEC (tmp, 0) == 0)
6052 win = 1;
6053 else if (XVECLEN (tmp, 0) > 3)
6054 win = 0;
6055 else
6056 for (i = 0; i < XVECLEN (tmp, 0); i++)
6057 {
6058 rtx insn = XVECEXP (tmp, 0, i);
6059
6060 if (GET_CODE (insn) != INSN
6061 || (GET_CODE (PATTERN (insn)) == SET
6062 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6063 || (GET_CODE (PATTERN (insn)) == PARALLEL
6064 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6065 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6066 {
6067 win = 0;
6068 break;
6069 }
6070 }
6071 }
6072 else if (GET_CODE (tmp) == SET
6073 && GET_CODE (SET_SRC (tmp)) == MULT)
6074 win = 0;
6075 else if (GET_CODE (tmp) == PARALLEL
6076 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6077 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6078 win = 0;
6079
6080 /* Free any storage we obtained in generating this multiply and restore rtl
6081 allocation to its normal obstack. */
6082 obstack_free (&temp_obstack, storage);
6083 rtl_obstack = old_rtl_obstack;
6084
6085 return win;
6086 }
6087 \f
6088 /* Check to see if loop can be terminated by a "decrement and branch until
6089 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6090 Also try reversing an increment loop to a decrement loop
6091 to see if the optimization can be performed.
6092 Value is nonzero if optimization was performed. */
6093
6094 /* This is useful even if the architecture doesn't have such an insn,
6095 because it might change a loops which increments from 0 to n to a loop
6096 which decrements from n to 0. A loop that decrements to zero is usually
6097 faster than one that increments from zero. */
6098
6099 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6100 such as approx_final_value, biv_total_increment, loop_iterations, and
6101 final_[bg]iv_value. */
6102
6103 static int
6104 check_dbra_loop (loop_end, insn_count, loop_start)
6105 rtx loop_end;
6106 int insn_count;
6107 rtx loop_start;
6108 {
6109 struct iv_class *bl;
6110 rtx reg;
6111 rtx jump_label;
6112 rtx final_value;
6113 rtx start_value;
6114 rtx new_add_val;
6115 rtx comparison;
6116 rtx before_comparison;
6117 rtx p;
6118 rtx jump;
6119 rtx first_compare;
6120 int compare_and_branch;
6121
6122 /* If last insn is a conditional branch, and the insn before tests a
6123 register value, try to optimize it. Otherwise, we can't do anything. */
6124
6125 jump = PREV_INSN (loop_end);
6126 comparison = get_condition_for_loop (jump);
6127 if (comparison == 0)
6128 return 0;
6129
6130 /* Try to compute whether the compare/branch at the loop end is one or
6131 two instructions. */
6132 get_condition (jump, &first_compare);
6133 if (first_compare == jump)
6134 compare_and_branch = 1;
6135 else if (first_compare == prev_nonnote_insn (jump))
6136 compare_and_branch = 2;
6137 else
6138 return 0;
6139
6140 /* Check all of the bivs to see if the compare uses one of them.
6141 Skip biv's set more than once because we can't guarantee that
6142 it will be zero on the last iteration. Also skip if the biv is
6143 used between its update and the test insn. */
6144
6145 for (bl = loop_iv_list; bl; bl = bl->next)
6146 {
6147 if (bl->biv_count == 1
6148 && bl->biv->dest_reg == XEXP (comparison, 0)
6149 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6150 first_compare))
6151 break;
6152 }
6153
6154 if (! bl)
6155 return 0;
6156
6157 /* Look for the case where the basic induction variable is always
6158 nonnegative, and equals zero on the last iteration.
6159 In this case, add a reg_note REG_NONNEG, which allows the
6160 m68k DBRA instruction to be used. */
6161
6162 if (((GET_CODE (comparison) == GT
6163 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6164 && INTVAL (XEXP (comparison, 1)) == -1)
6165 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6166 && GET_CODE (bl->biv->add_val) == CONST_INT
6167 && INTVAL (bl->biv->add_val) < 0)
6168 {
6169 /* Initial value must be greater than 0,
6170 init_val % -dec_value == 0 to ensure that it equals zero on
6171 the last iteration */
6172
6173 if (GET_CODE (bl->initial_value) == CONST_INT
6174 && INTVAL (bl->initial_value) > 0
6175 && (INTVAL (bl->initial_value)
6176 % (-INTVAL (bl->biv->add_val))) == 0)
6177 {
6178 /* register always nonnegative, add REG_NOTE to branch */
6179 REG_NOTES (PREV_INSN (loop_end))
6180 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6181 REG_NOTES (PREV_INSN (loop_end)));
6182 bl->nonneg = 1;
6183
6184 return 1;
6185 }
6186
6187 /* If the decrement is 1 and the value was tested as >= 0 before
6188 the loop, then we can safely optimize. */
6189 for (p = loop_start; p; p = PREV_INSN (p))
6190 {
6191 if (GET_CODE (p) == CODE_LABEL)
6192 break;
6193 if (GET_CODE (p) != JUMP_INSN)
6194 continue;
6195
6196 before_comparison = get_condition_for_loop (p);
6197 if (before_comparison
6198 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6199 && GET_CODE (before_comparison) == LT
6200 && XEXP (before_comparison, 1) == const0_rtx
6201 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6202 && INTVAL (bl->biv->add_val) == -1)
6203 {
6204 REG_NOTES (PREV_INSN (loop_end))
6205 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6206 REG_NOTES (PREV_INSN (loop_end)));
6207 bl->nonneg = 1;
6208
6209 return 1;
6210 }
6211 }
6212 }
6213 else if (num_mem_sets <= 1)
6214 {
6215 /* Try to change inc to dec, so can apply above optimization. */
6216 /* Can do this if:
6217 all registers modified are induction variables or invariant,
6218 all memory references have non-overlapping addresses
6219 (obviously true if only one write)
6220 allow 2 insns for the compare/jump at the end of the loop. */
6221 /* Also, we must avoid any instructions which use both the reversed
6222 biv and another biv. Such instructions will fail if the loop is
6223 reversed. We meet this condition by requiring that either
6224 no_use_except_counting is true, or else that there is only
6225 one biv. */
6226 int num_nonfixed_reads = 0;
6227 /* 1 if the iteration var is used only to count iterations. */
6228 int no_use_except_counting = 0;
6229 /* 1 if the loop has no memory store, or it has a single memory store
6230 which is reversible. */
6231 int reversible_mem_store = 1;
6232
6233 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6234 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6235 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6236
6237 if (bl->giv_count == 0
6238 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6239 {
6240 rtx bivreg = regno_reg_rtx[bl->regno];
6241
6242 /* If there are no givs for this biv, and the only exit is the
6243 fall through at the end of the loop, then
6244 see if perhaps there are no uses except to count. */
6245 no_use_except_counting = 1;
6246 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6247 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6248 {
6249 rtx set = single_set (p);
6250
6251 if (set && GET_CODE (SET_DEST (set)) == REG
6252 && REGNO (SET_DEST (set)) == bl->regno)
6253 /* An insn that sets the biv is okay. */
6254 ;
6255 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6256 || p == prev_nonnote_insn (loop_end))
6257 /* Don't bother about the end test. */
6258 ;
6259 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6260 /* Any other use of the biv is no good. */
6261 {
6262 no_use_except_counting = 0;
6263 break;
6264 }
6265 }
6266 }
6267
6268 /* If the loop has a single store, and the destination address is
6269 invariant, then we can't reverse the loop, because this address
6270 might then have the wrong value at loop exit.
6271 This would work if the source was invariant also, however, in that
6272 case, the insn should have been moved out of the loop. */
6273
6274 if (num_mem_sets == 1)
6275 reversible_mem_store
6276 = (! unknown_address_altered
6277 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6278
6279 /* This code only acts for innermost loops. Also it simplifies
6280 the memory address check by only reversing loops with
6281 zero or one memory access.
6282 Two memory accesses could involve parts of the same array,
6283 and that can't be reversed. */
6284
6285 if (num_nonfixed_reads <= 1
6286 && !loop_has_call
6287 && !loop_has_volatile
6288 && reversible_mem_store
6289 && (no_use_except_counting
6290 || ((bl->giv_count + bl->biv_count + num_mem_sets
6291 + num_movables + compare_and_branch == insn_count)
6292 && (bl == loop_iv_list && bl->next == 0))))
6293 {
6294 rtx tem;
6295
6296 /* Loop can be reversed. */
6297 if (loop_dump_stream)
6298 fprintf (loop_dump_stream, "Can reverse loop\n");
6299
6300 /* Now check other conditions:
6301
6302 The increment must be a constant, as must the initial value,
6303 and the comparison code must be LT.
6304
6305 This test can probably be improved since +/- 1 in the constant
6306 can be obtained by changing LT to LE and vice versa; this is
6307 confusing. */
6308
6309 if (comparison
6310 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6311 /* LE gets turned into LT */
6312 && GET_CODE (comparison) == LT
6313 && GET_CODE (bl->initial_value) == CONST_INT)
6314 {
6315 HOST_WIDE_INT add_val, comparison_val;
6316 rtx initial_value;
6317
6318 add_val = INTVAL (bl->biv->add_val);
6319 comparison_val = INTVAL (XEXP (comparison, 1));
6320 final_value = XEXP (comparison, 1);
6321 initial_value = bl->initial_value;
6322
6323 /* Normalize the initial value if it is an integer and
6324 has no other use except as a counter. This will allow
6325 a few more loops to be reversed. */
6326 if (no_use_except_counting
6327 && GET_CODE (initial_value) == CONST_INT)
6328 {
6329 comparison_val = comparison_val - INTVAL (bl->initial_value);
6330 /* Check for overflow. If comparison_val ends up as a
6331 negative value, then we can't reverse the loop. */
6332 if (comparison_val >= 0)
6333 initial_value = const0_rtx;
6334 }
6335
6336 /* If the initial value is not zero, or if the comparison
6337 value is not an exact multiple of the increment, then we
6338 can not reverse this loop. */
6339 if (initial_value != const0_rtx
6340 || (comparison_val % add_val) != 0)
6341 return 0;
6342
6343 /* Reset these in case we normalized the initial value
6344 and comparison value above. */
6345 bl->initial_value = initial_value;
6346 XEXP (comparison, 1) = GEN_INT (comparison_val);
6347
6348 /* Register will always be nonnegative, with value
6349 0 on last iteration if loop reversed */
6350
6351 /* Save some info needed to produce the new insns. */
6352 reg = bl->biv->dest_reg;
6353 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6354 if (jump_label == pc_rtx)
6355 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6356 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6357
6358 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6359 - INTVAL (bl->biv->add_val));
6360
6361 /* Initialize biv to start_value before loop start.
6362 The old initializing insn will be deleted as a
6363 dead store by flow.c. */
6364 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6365
6366 /* Add insn to decrement register, and delete insn
6367 that incremented the register. */
6368 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6369 bl->biv->insn);
6370 delete_insn (bl->biv->insn);
6371
6372 /* Update biv info to reflect its new status. */
6373 bl->biv->insn = p;
6374 bl->initial_value = start_value;
6375 bl->biv->add_val = new_add_val;
6376
6377 /* Inc LABEL_NUSES so that delete_insn will
6378 not delete the label. */
6379 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6380
6381 /* Emit an insn after the end of the loop to set the biv's
6382 proper exit value if it is used anywhere outside the loop. */
6383 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
6384 || ! bl->init_insn
6385 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6386 emit_insn_after (gen_move_insn (reg, final_value),
6387 loop_end);
6388
6389 /* Delete compare/branch at end of loop. */
6390 delete_insn (PREV_INSN (loop_end));
6391 if (compare_and_branch == 2)
6392 delete_insn (first_compare);
6393
6394 /* Add new compare/branch insn at end of loop. */
6395 start_sequence ();
6396 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6397 GET_MODE (reg), 0, 0);
6398 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6399 tem = gen_sequence ();
6400 end_sequence ();
6401 emit_jump_insn_before (tem, loop_end);
6402
6403 for (tem = PREV_INSN (loop_end);
6404 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6405 ;
6406 if (tem)
6407 {
6408 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6409
6410 /* Increment of LABEL_NUSES done above. */
6411 /* Register is now always nonnegative,
6412 so add REG_NONNEG note to the branch. */
6413 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6414 REG_NOTES (tem));
6415 }
6416
6417 bl->nonneg = 1;
6418
6419 /* Mark that this biv has been reversed. Each giv which depends
6420 on this biv, and which is also live past the end of the loop
6421 will have to be fixed up. */
6422
6423 bl->reversed = 1;
6424
6425 if (loop_dump_stream)
6426 fprintf (loop_dump_stream,
6427 "Reversed loop and added reg_nonneg\n");
6428
6429 return 1;
6430 }
6431 }
6432 }
6433
6434 return 0;
6435 }
6436 \f
6437 /* Verify whether the biv BL appears to be eliminable,
6438 based on the insns in the loop that refer to it.
6439 LOOP_START is the first insn of the loop, and END is the end insn.
6440
6441 If ELIMINATE_P is non-zero, actually do the elimination.
6442
6443 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6444 determine whether invariant insns should be placed inside or at the
6445 start of the loop. */
6446
6447 static int
6448 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6449 struct iv_class *bl;
6450 rtx loop_start;
6451 rtx end;
6452 int eliminate_p;
6453 int threshold, insn_count;
6454 {
6455 rtx reg = bl->biv->dest_reg;
6456 rtx p;
6457
6458 /* Scan all insns in the loop, stopping if we find one that uses the
6459 biv in a way that we cannot eliminate. */
6460
6461 for (p = loop_start; p != end; p = NEXT_INSN (p))
6462 {
6463 enum rtx_code code = GET_CODE (p);
6464 rtx where = threshold >= insn_count ? loop_start : p;
6465
6466 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6467 && reg_mentioned_p (reg, PATTERN (p))
6468 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6469 {
6470 if (loop_dump_stream)
6471 fprintf (loop_dump_stream,
6472 "Cannot eliminate biv %d: biv used in insn %d.\n",
6473 bl->regno, INSN_UID (p));
6474 break;
6475 }
6476 }
6477
6478 if (p == end)
6479 {
6480 if (loop_dump_stream)
6481 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6482 bl->regno, eliminate_p ? "was" : "can be");
6483 return 1;
6484 }
6485
6486 return 0;
6487 }
6488 \f
6489 /* If BL appears in X (part of the pattern of INSN), see if we can
6490 eliminate its use. If so, return 1. If not, return 0.
6491
6492 If BIV does not appear in X, return 1.
6493
6494 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6495 where extra insns should be added. Depending on how many items have been
6496 moved out of the loop, it will either be before INSN or at the start of
6497 the loop. */
6498
6499 static int
6500 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6501 rtx x, insn;
6502 struct iv_class *bl;
6503 int eliminate_p;
6504 rtx where;
6505 {
6506 enum rtx_code code = GET_CODE (x);
6507 rtx reg = bl->biv->dest_reg;
6508 enum machine_mode mode = GET_MODE (reg);
6509 struct induction *v;
6510 rtx arg, tem;
6511 #ifdef HAVE_cc0
6512 rtx new;
6513 #endif
6514 int arg_operand;
6515 char *fmt;
6516 int i, j;
6517
6518 switch (code)
6519 {
6520 case REG:
6521 /* If we haven't already been able to do something with this BIV,
6522 we can't eliminate it. */
6523 if (x == reg)
6524 return 0;
6525 return 1;
6526
6527 case SET:
6528 /* If this sets the BIV, it is not a problem. */
6529 if (SET_DEST (x) == reg)
6530 return 1;
6531
6532 /* If this is an insn that defines a giv, it is also ok because
6533 it will go away when the giv is reduced. */
6534 for (v = bl->giv; v; v = v->next_iv)
6535 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6536 return 1;
6537
6538 #ifdef HAVE_cc0
6539 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6540 {
6541 /* Can replace with any giv that was reduced and
6542 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6543 Require a constant for MULT_VAL, so we know it's nonzero.
6544 ??? We disable this optimization to avoid potential
6545 overflows. */
6546
6547 for (v = bl->giv; v; v = v->next_iv)
6548 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6549 && v->add_val == const0_rtx
6550 && ! v->ignore && ! v->maybe_dead && v->always_computable
6551 && v->mode == mode
6552 && 0)
6553 {
6554 /* If the giv V had the auto-inc address optimization applied
6555 to it, and INSN occurs between the giv insn and the biv
6556 insn, then we must adjust the value used here.
6557 This is rare, so we don't bother to do so. */
6558 if (v->auto_inc_opt
6559 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6560 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6561 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6562 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6563 continue;
6564
6565 if (! eliminate_p)
6566 return 1;
6567
6568 /* If the giv has the opposite direction of change,
6569 then reverse the comparison. */
6570 if (INTVAL (v->mult_val) < 0)
6571 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
6572 const0_rtx, v->new_reg);
6573 else
6574 new = v->new_reg;
6575
6576 /* We can probably test that giv's reduced reg. */
6577 if (validate_change (insn, &SET_SRC (x), new, 0))
6578 return 1;
6579 }
6580
6581 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6582 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6583 Require a constant for MULT_VAL, so we know it's nonzero.
6584 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6585 overflow problem. */
6586
6587 for (v = bl->giv; v; v = v->next_iv)
6588 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6589 && ! v->ignore && ! v->maybe_dead && v->always_computable
6590 && v->mode == mode
6591 && (GET_CODE (v->add_val) == SYMBOL_REF
6592 || GET_CODE (v->add_val) == LABEL_REF
6593 || GET_CODE (v->add_val) == CONST
6594 || (GET_CODE (v->add_val) == REG
6595 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
6596 {
6597 /* If the giv V had the auto-inc address optimization applied
6598 to it, and INSN occurs between the giv insn and the biv
6599 insn, then we must adjust the value used here.
6600 This is rare, so we don't bother to do so. */
6601 if (v->auto_inc_opt
6602 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6603 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6604 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6605 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6606 continue;
6607
6608 if (! eliminate_p)
6609 return 1;
6610
6611 /* If the giv has the opposite direction of change,
6612 then reverse the comparison. */
6613 if (INTVAL (v->mult_val) < 0)
6614 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
6615 v->new_reg);
6616 else
6617 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
6618 copy_rtx (v->add_val));
6619
6620 /* Replace biv with the giv's reduced register. */
6621 update_reg_last_use (v->add_val, insn);
6622 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6623 return 1;
6624
6625 /* Insn doesn't support that constant or invariant. Copy it
6626 into a register (it will be a loop invariant.) */
6627 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6628
6629 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6630 where);
6631
6632 /* Substitute the new register for its invariant value in
6633 the compare expression. */
6634 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
6635 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6636 return 1;
6637 }
6638 }
6639 #endif
6640 break;
6641
6642 case COMPARE:
6643 case EQ: case NE:
6644 case GT: case GE: case GTU: case GEU:
6645 case LT: case LE: case LTU: case LEU:
6646 /* See if either argument is the biv. */
6647 if (XEXP (x, 0) == reg)
6648 arg = XEXP (x, 1), arg_operand = 1;
6649 else if (XEXP (x, 1) == reg)
6650 arg = XEXP (x, 0), arg_operand = 0;
6651 else
6652 break;
6653
6654 if (CONSTANT_P (arg))
6655 {
6656 /* First try to replace with any giv that has constant positive
6657 mult_val and constant add_val. We might be able to support
6658 negative mult_val, but it seems complex to do it in general. */
6659
6660 for (v = bl->giv; v; v = v->next_iv)
6661 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6662 && (GET_CODE (v->add_val) == SYMBOL_REF
6663 || GET_CODE (v->add_val) == LABEL_REF
6664 || GET_CODE (v->add_val) == CONST
6665 || (GET_CODE (v->add_val) == REG
6666 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
6667 && ! v->ignore && ! v->maybe_dead && v->always_computable
6668 && v->mode == mode)
6669 {
6670 /* If the giv V had the auto-inc address optimization applied
6671 to it, and INSN occurs between the giv insn and the biv
6672 insn, then we must adjust the value used here.
6673 This is rare, so we don't bother to do so. */
6674 if (v->auto_inc_opt
6675 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6676 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6677 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6678 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6679 continue;
6680
6681 if (! eliminate_p)
6682 return 1;
6683
6684 /* Replace biv with the giv's reduced reg. */
6685 XEXP (x, 1-arg_operand) = v->new_reg;
6686
6687 /* If all constants are actually constant integers and
6688 the derived constant can be directly placed in the COMPARE,
6689 do so. */
6690 if (GET_CODE (arg) == CONST_INT
6691 && GET_CODE (v->mult_val) == CONST_INT
6692 && GET_CODE (v->add_val) == CONST_INT
6693 && validate_change (insn, &XEXP (x, arg_operand),
6694 GEN_INT (INTVAL (arg)
6695 * INTVAL (v->mult_val)
6696 + INTVAL (v->add_val)), 0))
6697 return 1;
6698
6699 /* Otherwise, load it into a register. */
6700 tem = gen_reg_rtx (mode);
6701 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6702 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6703 return 1;
6704
6705 /* If that failed, put back the change we made above. */
6706 XEXP (x, 1-arg_operand) = reg;
6707 }
6708
6709 /* Look for giv with positive constant mult_val and nonconst add_val.
6710 Insert insns to calculate new compare value.
6711 ??? Turn this off due to possible overflow. */
6712
6713 for (v = bl->giv; v; v = v->next_iv)
6714 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6715 && ! v->ignore && ! v->maybe_dead && v->always_computable
6716 && v->mode == mode
6717 && 0)
6718 {
6719 rtx tem;
6720
6721 /* If the giv V had the auto-inc address optimization applied
6722 to it, and INSN occurs between the giv insn and the biv
6723 insn, then we must adjust the value used here.
6724 This is rare, so we don't bother to do so. */
6725 if (v->auto_inc_opt
6726 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6727 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6728 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6729 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6730 continue;
6731
6732 if (! eliminate_p)
6733 return 1;
6734
6735 tem = gen_reg_rtx (mode);
6736
6737 /* Replace biv with giv's reduced register. */
6738 validate_change (insn, &XEXP (x, 1 - arg_operand),
6739 v->new_reg, 1);
6740
6741 /* Compute value to compare against. */
6742 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6743 /* Use it in this insn. */
6744 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6745 if (apply_change_group ())
6746 return 1;
6747 }
6748 }
6749 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6750 {
6751 if (invariant_p (arg) == 1)
6752 {
6753 /* Look for giv with constant positive mult_val and nonconst
6754 add_val. Insert insns to compute new compare value.
6755 ??? Turn this off due to possible overflow. */
6756
6757 for (v = bl->giv; v; v = v->next_iv)
6758 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6759 && ! v->ignore && ! v->maybe_dead && v->always_computable
6760 && v->mode == mode
6761 && 0)
6762 {
6763 rtx tem;
6764
6765 /* If the giv V had the auto-inc address optimization applied
6766 to it, and INSN occurs between the giv insn and the biv
6767 insn, then we must adjust the value used here.
6768 This is rare, so we don't bother to do so. */
6769 if (v->auto_inc_opt
6770 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6771 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6772 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6773 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6774 continue;
6775
6776 if (! eliminate_p)
6777 return 1;
6778
6779 tem = gen_reg_rtx (mode);
6780
6781 /* Replace biv with giv's reduced register. */
6782 validate_change (insn, &XEXP (x, 1 - arg_operand),
6783 v->new_reg, 1);
6784
6785 /* Compute value to compare against. */
6786 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6787 tem, where);
6788 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6789 if (apply_change_group ())
6790 return 1;
6791 }
6792 }
6793
6794 /* This code has problems. Basically, you can't know when
6795 seeing if we will eliminate BL, whether a particular giv
6796 of ARG will be reduced. If it isn't going to be reduced,
6797 we can't eliminate BL. We can try forcing it to be reduced,
6798 but that can generate poor code.
6799
6800 The problem is that the benefit of reducing TV, below should
6801 be increased if BL can actually be eliminated, but this means
6802 we might have to do a topological sort of the order in which
6803 we try to process biv. It doesn't seem worthwhile to do
6804 this sort of thing now. */
6805
6806 #if 0
6807 /* Otherwise the reg compared with had better be a biv. */
6808 if (GET_CODE (arg) != REG
6809 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6810 return 0;
6811
6812 /* Look for a pair of givs, one for each biv,
6813 with identical coefficients. */
6814 for (v = bl->giv; v; v = v->next_iv)
6815 {
6816 struct induction *tv;
6817
6818 if (v->ignore || v->maybe_dead || v->mode != mode)
6819 continue;
6820
6821 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6822 if (! tv->ignore && ! tv->maybe_dead
6823 && rtx_equal_p (tv->mult_val, v->mult_val)
6824 && rtx_equal_p (tv->add_val, v->add_val)
6825 && tv->mode == mode)
6826 {
6827 /* If the giv V had the auto-inc address optimization applied
6828 to it, and INSN occurs between the giv insn and the biv
6829 insn, then we must adjust the value used here.
6830 This is rare, so we don't bother to do so. */
6831 if (v->auto_inc_opt
6832 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6833 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6834 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6835 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6836 continue;
6837
6838 if (! eliminate_p)
6839 return 1;
6840
6841 /* Replace biv with its giv's reduced reg. */
6842 XEXP (x, 1-arg_operand) = v->new_reg;
6843 /* Replace other operand with the other giv's
6844 reduced reg. */
6845 XEXP (x, arg_operand) = tv->new_reg;
6846 return 1;
6847 }
6848 }
6849 #endif
6850 }
6851
6852 /* If we get here, the biv can't be eliminated. */
6853 return 0;
6854
6855 case MEM:
6856 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6857 biv is used in it, since it will be replaced. */
6858 for (v = bl->giv; v; v = v->next_iv)
6859 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6860 return 1;
6861 break;
6862
6863 default:
6864 break;
6865 }
6866
6867 /* See if any subexpression fails elimination. */
6868 fmt = GET_RTX_FORMAT (code);
6869 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6870 {
6871 switch (fmt[i])
6872 {
6873 case 'e':
6874 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6875 eliminate_p, where))
6876 return 0;
6877 break;
6878
6879 case 'E':
6880 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6881 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6882 eliminate_p, where))
6883 return 0;
6884 break;
6885 }
6886 }
6887
6888 return 1;
6889 }
6890 \f
6891 /* Return nonzero if the last use of REG
6892 is in an insn following INSN in the same basic block. */
6893
6894 static int
6895 last_use_this_basic_block (reg, insn)
6896 rtx reg;
6897 rtx insn;
6898 {
6899 rtx n;
6900 for (n = insn;
6901 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6902 n = NEXT_INSN (n))
6903 {
6904 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
6905 return 1;
6906 }
6907 return 0;
6908 }
6909 \f
6910 /* Called via `note_stores' to record the initial value of a biv. Here we
6911 just record the location of the set and process it later. */
6912
6913 static void
6914 record_initial (dest, set)
6915 rtx dest;
6916 rtx set;
6917 {
6918 struct iv_class *bl;
6919
6920 if (GET_CODE (dest) != REG
6921 || REGNO (dest) >= max_reg_before_loop
6922 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6923 return;
6924
6925 bl = reg_biv_class[REGNO (dest)];
6926
6927 /* If this is the first set found, record it. */
6928 if (bl->init_insn == 0)
6929 {
6930 bl->init_insn = note_insn;
6931 bl->init_set = set;
6932 }
6933 }
6934 \f
6935 /* If any of the registers in X are "old" and currently have a last use earlier
6936 than INSN, update them to have a last use of INSN. Their actual last use
6937 will be the previous insn but it will not have a valid uid_luid so we can't
6938 use it. */
6939
6940 static void
6941 update_reg_last_use (x, insn)
6942 rtx x;
6943 rtx insn;
6944 {
6945 /* Check for the case where INSN does not have a valid luid. In this case,
6946 there is no need to modify the regno_last_uid, as this can only happen
6947 when code is inserted after the loop_end to set a pseudo's final value,
6948 and hence this insn will never be the last use of x. */
6949 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6950 && INSN_UID (insn) < max_uid_for_loop
6951 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
6952 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
6953 else
6954 {
6955 register int i, j;
6956 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6957 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6958 {
6959 if (fmt[i] == 'e')
6960 update_reg_last_use (XEXP (x, i), insn);
6961 else if (fmt[i] == 'E')
6962 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6963 update_reg_last_use (XVECEXP (x, i, j), insn);
6964 }
6965 }
6966 }
6967 \f
6968 /* Given a jump insn JUMP, return the condition that will cause it to branch
6969 to its JUMP_LABEL. If the condition cannot be understood, or is an
6970 inequality floating-point comparison which needs to be reversed, 0 will
6971 be returned.
6972
6973 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6974 insn used in locating the condition was found. If a replacement test
6975 of the condition is desired, it should be placed in front of that
6976 insn and we will be sure that the inputs are still valid.
6977
6978 The condition will be returned in a canonical form to simplify testing by
6979 callers. Specifically:
6980
6981 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6982 (2) Both operands will be machine operands; (cc0) will have been replaced.
6983 (3) If an operand is a constant, it will be the second operand.
6984 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6985 for GE, GEU, and LEU. */
6986
6987 rtx
6988 get_condition (jump, earliest)
6989 rtx jump;
6990 rtx *earliest;
6991 {
6992 enum rtx_code code;
6993 rtx prev = jump;
6994 rtx set;
6995 rtx tem;
6996 rtx op0, op1;
6997 int reverse_code = 0;
6998 int did_reverse_condition = 0;
6999 enum machine_mode mode;
7000
7001 /* If this is not a standard conditional jump, we can't parse it. */
7002 if (GET_CODE (jump) != JUMP_INSN
7003 || ! condjump_p (jump) || simplejump_p (jump))
7004 return 0;
7005
7006 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7007 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
7008 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
7009 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
7010
7011 if (earliest)
7012 *earliest = jump;
7013
7014 /* If this branches to JUMP_LABEL when the condition is false, reverse
7015 the condition. */
7016 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
7017 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
7018 code = reverse_condition (code), did_reverse_condition ^= 1;
7019
7020 /* If we are comparing a register with zero, see if the register is set
7021 in the previous insn to a COMPARE or a comparison operation. Perform
7022 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7023 in cse.c */
7024
7025 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
7026 {
7027 /* Set non-zero when we find something of interest. */
7028 rtx x = 0;
7029
7030 #ifdef HAVE_cc0
7031 /* If comparison with cc0, import actual comparison from compare
7032 insn. */
7033 if (op0 == cc0_rtx)
7034 {
7035 if ((prev = prev_nonnote_insn (prev)) == 0
7036 || GET_CODE (prev) != INSN
7037 || (set = single_set (prev)) == 0
7038 || SET_DEST (set) != cc0_rtx)
7039 return 0;
7040
7041 op0 = SET_SRC (set);
7042 op1 = CONST0_RTX (GET_MODE (op0));
7043 if (earliest)
7044 *earliest = prev;
7045 }
7046 #endif
7047
7048 /* If this is a COMPARE, pick up the two things being compared. */
7049 if (GET_CODE (op0) == COMPARE)
7050 {
7051 op1 = XEXP (op0, 1);
7052 op0 = XEXP (op0, 0);
7053 continue;
7054 }
7055 else if (GET_CODE (op0) != REG)
7056 break;
7057
7058 /* Go back to the previous insn. Stop if it is not an INSN. We also
7059 stop if it isn't a single set or if it has a REG_INC note because
7060 we don't want to bother dealing with it. */
7061
7062 if ((prev = prev_nonnote_insn (prev)) == 0
7063 || GET_CODE (prev) != INSN
7064 || FIND_REG_INC_NOTE (prev, 0)
7065 || (set = single_set (prev)) == 0)
7066 break;
7067
7068 /* If this is setting OP0, get what it sets it to if it looks
7069 relevant. */
7070 if (rtx_equal_p (SET_DEST (set), op0))
7071 {
7072 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7073
7074 /* ??? We may not combine comparisons done in a CCmode with
7075 comparisons not done in a CCmode. This is to aid targets
7076 like Alpha that have an IEEE compliant EQ instruction, and
7077 a non-IEEE compliant BEQ instruction. The use of CCmode is
7078 actually artificial, simply to prevent the combination, but
7079 should not affect other platforms. */
7080
7081 if ((GET_CODE (SET_SRC (set)) == COMPARE
7082 || (((code == NE
7083 || (code == LT
7084 && GET_MODE_CLASS (inner_mode) == MODE_INT
7085 && (GET_MODE_BITSIZE (inner_mode)
7086 <= HOST_BITS_PER_WIDE_INT)
7087 && (STORE_FLAG_VALUE
7088 & ((HOST_WIDE_INT) 1
7089 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7090 #ifdef FLOAT_STORE_FLAG_VALUE
7091 || (code == LT
7092 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7093 && FLOAT_STORE_FLAG_VALUE < 0)
7094 #endif
7095 ))
7096 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
7097 && ((GET_MODE_CLASS (mode) == MODE_CC)
7098 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7099 x = SET_SRC (set);
7100 else if (((code == EQ
7101 || (code == GE
7102 && (GET_MODE_BITSIZE (inner_mode)
7103 <= HOST_BITS_PER_WIDE_INT)
7104 && GET_MODE_CLASS (inner_mode) == MODE_INT
7105 && (STORE_FLAG_VALUE
7106 & ((HOST_WIDE_INT) 1
7107 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7108 #ifdef FLOAT_STORE_FLAG_VALUE
7109 || (code == GE
7110 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7111 && FLOAT_STORE_FLAG_VALUE < 0)
7112 #endif
7113 ))
7114 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
7115 && ((GET_MODE_CLASS (mode) == MODE_CC)
7116 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
7117 {
7118 /* We might have reversed a LT to get a GE here. But this wasn't
7119 actually the comparison of data, so we don't flag that we
7120 have had to reverse the condition. */
7121 did_reverse_condition ^= 1;
7122 reverse_code = 1;
7123 x = SET_SRC (set);
7124 }
7125 else
7126 break;
7127 }
7128
7129 else if (reg_set_p (op0, prev))
7130 /* If this sets OP0, but not directly, we have to give up. */
7131 break;
7132
7133 if (x)
7134 {
7135 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7136 code = GET_CODE (x);
7137 if (reverse_code)
7138 {
7139 code = reverse_condition (code);
7140 did_reverse_condition ^= 1;
7141 reverse_code = 0;
7142 }
7143
7144 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7145 if (earliest)
7146 *earliest = prev;
7147 }
7148 }
7149
7150 /* If constant is first, put it last. */
7151 if (CONSTANT_P (op0))
7152 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7153
7154 /* If OP0 is the result of a comparison, we weren't able to find what
7155 was really being compared, so fail. */
7156 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7157 return 0;
7158
7159 /* Canonicalize any ordered comparison with integers involving equality
7160 if we can do computations in the relevant mode and we do not
7161 overflow. */
7162
7163 if (GET_CODE (op1) == CONST_INT
7164 && GET_MODE (op0) != VOIDmode
7165 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7166 {
7167 HOST_WIDE_INT const_val = INTVAL (op1);
7168 unsigned HOST_WIDE_INT uconst_val = const_val;
7169 unsigned HOST_WIDE_INT max_val
7170 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7171
7172 switch (code)
7173 {
7174 case LE:
7175 if (const_val != max_val >> 1)
7176 code = LT, op1 = GEN_INT (const_val + 1);
7177 break;
7178
7179 /* When cross-compiling, const_val might be sign-extended from
7180 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7181 case GE:
7182 if ((const_val & max_val)
7183 != (((HOST_WIDE_INT) 1
7184 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7185 code = GT, op1 = GEN_INT (const_val - 1);
7186 break;
7187
7188 case LEU:
7189 if (uconst_val < max_val)
7190 code = LTU, op1 = GEN_INT (uconst_val + 1);
7191 break;
7192
7193 case GEU:
7194 if (uconst_val != 0)
7195 code = GTU, op1 = GEN_INT (uconst_val - 1);
7196 break;
7197
7198 default:
7199 break;
7200 }
7201 }
7202
7203 /* If this was floating-point and we reversed anything other than an
7204 EQ or NE, return zero. */
7205 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7206 && did_reverse_condition && code != NE && code != EQ
7207 && ! flag_fast_math
7208 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7209 return 0;
7210
7211 #ifdef HAVE_cc0
7212 /* Never return CC0; return zero instead. */
7213 if (op0 == cc0_rtx)
7214 return 0;
7215 #endif
7216
7217 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7218 }
7219
7220 /* Similar to above routine, except that we also put an invariant last
7221 unless both operands are invariants. */
7222
7223 rtx
7224 get_condition_for_loop (x)
7225 rtx x;
7226 {
7227 rtx comparison = get_condition (x, NULL_PTR);
7228
7229 if (comparison == 0
7230 || ! invariant_p (XEXP (comparison, 0))
7231 || invariant_p (XEXP (comparison, 1)))
7232 return comparison;
7233
7234 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7235 XEXP (comparison, 1), XEXP (comparison, 0));
7236 }
7237
7238 #ifdef HAIFA
7239 /* Analyze a loop in order to instrument it with the use of count register.
7240 loop_start and loop_end are the first and last insns of the loop.
7241 This function works in cooperation with insert_bct ().
7242 loop_can_insert_bct[loop_num] is set according to whether the optimization
7243 is applicable to the loop. When it is applicable, the following variables
7244 are also set:
7245 loop_start_value[loop_num]
7246 loop_comparison_value[loop_num]
7247 loop_increment[loop_num]
7248 loop_comparison_code[loop_num] */
7249
7250 #ifdef HAVE_decrement_and_branch_on_count
7251 static
7252 void analyze_loop_iterations (loop_start, loop_end)
7253 rtx loop_start, loop_end;
7254 {
7255 rtx comparison, comparison_value;
7256 rtx iteration_var, initial_value, increment;
7257 enum rtx_code comparison_code;
7258
7259 rtx last_loop_insn;
7260 rtx insn;
7261 int i;
7262
7263 /* loop_variable mode */
7264 enum machine_mode original_mode;
7265
7266 /* find the number of the loop */
7267 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7268
7269 /* we change our mind only when we are sure that loop will be instrumented */
7270 loop_can_insert_bct[loop_num] = 0;
7271
7272 /* is the optimization suppressed. */
7273 if ( !flag_branch_on_count_reg )
7274 return;
7275
7276 /* make sure that count-reg is not in use */
7277 if (loop_used_count_register[loop_num]){
7278 if (loop_dump_stream)
7279 fprintf (loop_dump_stream,
7280 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7281 loop_num);
7282 return;
7283 }
7284
7285 /* make sure that the function has no indirect jumps. */
7286 if (indirect_jump_in_function){
7287 if (loop_dump_stream)
7288 fprintf (loop_dump_stream,
7289 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7290 loop_num);
7291 return;
7292 }
7293
7294 /* make sure that the last loop insn is a conditional jump */
7295 last_loop_insn = PREV_INSN (loop_end);
7296 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7297 if (loop_dump_stream)
7298 fprintf (loop_dump_stream,
7299 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7300 loop_num);
7301 return;
7302 }
7303
7304 /* First find the iteration variable. If the last insn is a conditional
7305 branch, and the insn preceding it tests a register value, make that
7306 register the iteration variable. */
7307
7308 /* We used to use prev_nonnote_insn here, but that fails because it might
7309 accidentally get the branch for a contained loop if the branch for this
7310 loop was deleted. We can only trust branches immediately before the
7311 loop_end. */
7312
7313 comparison = get_condition_for_loop (last_loop_insn);
7314 /* ??? Get_condition may switch position of induction variable and
7315 invariant register when it canonicalizes the comparison. */
7316
7317 if (comparison == 0) {
7318 if (loop_dump_stream)
7319 fprintf (loop_dump_stream,
7320 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7321 loop_num);
7322 return;
7323 }
7324
7325 comparison_code = GET_CODE (comparison);
7326 iteration_var = XEXP (comparison, 0);
7327 comparison_value = XEXP (comparison, 1);
7328
7329 original_mode = GET_MODE (iteration_var);
7330 if (GET_MODE_CLASS (original_mode) != MODE_INT
7331 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7332 if (loop_dump_stream)
7333 fprintf (loop_dump_stream,
7334 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7335 loop_num);
7336 return;
7337 }
7338
7339 /* get info about loop bounds and increment */
7340 iteration_info (iteration_var, &initial_value, &increment,
7341 loop_start, loop_end);
7342
7343 /* make sure that all required loop data were found */
7344 if (!(initial_value && increment && comparison_value
7345 && invariant_p (comparison_value) && invariant_p (increment)
7346 && ! indirect_jump_in_function))
7347 {
7348 if (loop_dump_stream) {
7349 fprintf (loop_dump_stream,
7350 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7351 if (!(initial_value && increment && comparison_value)) {
7352 fprintf (loop_dump_stream, "\tbounds not available: ");
7353 if ( ! initial_value )
7354 fprintf (loop_dump_stream, "initial ");
7355 if ( ! increment )
7356 fprintf (loop_dump_stream, "increment ");
7357 if ( ! comparison_value )
7358 fprintf (loop_dump_stream, "comparison ");
7359 fprintf (loop_dump_stream, "\n");
7360 }
7361 if (!invariant_p (comparison_value) || !invariant_p (increment))
7362 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7363 }
7364 return;
7365 }
7366
7367 /* make sure that the increment is constant */
7368 if (GET_CODE (increment) != CONST_INT) {
7369 if (loop_dump_stream)
7370 fprintf (loop_dump_stream,
7371 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7372 loop_num);
7373 return;
7374 }
7375
7376 /* make sure that the loop contains neither function call, nor jump on table.
7377 (the count register might be altered by the called function, and might
7378 be used for a branch on table). */
7379 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7380 if (GET_CODE (insn) == CALL_INSN){
7381 if (loop_dump_stream)
7382 fprintf (loop_dump_stream,
7383 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7384 loop_num);
7385 return;
7386 }
7387
7388 if (GET_CODE (insn) == JUMP_INSN
7389 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7390 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7391 if (loop_dump_stream)
7392 fprintf (loop_dump_stream,
7393 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7394 loop_num);
7395 return;
7396 }
7397 }
7398
7399 /* At this point, we are sure that the loop can be instrumented with BCT.
7400 Some of the loops, however, will not be instrumented - the final decision
7401 is taken by insert_bct () */
7402 if (loop_dump_stream)
7403 fprintf (loop_dump_stream,
7404 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7405 loop_num);
7406
7407 /* mark all enclosing loops that they cannot use count register */
7408 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7409 marking here may prevent instrumenting an enclosing loop that could
7410 actually be instrumented. But since this is rare, it is safer to mark
7411 here in case the order of calling (analyze/insert)_bct would be changed. */
7412 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7413 loop_used_count_register[i] = 1;
7414
7415 /* Set data structures which will be used by the instrumentation phase */
7416 loop_start_value[loop_num] = initial_value;
7417 loop_comparison_value[loop_num] = comparison_value;
7418 loop_increment[loop_num] = increment;
7419 loop_comparison_code[loop_num] = comparison_code;
7420 loop_can_insert_bct[loop_num] = 1;
7421 }
7422
7423
7424 /* instrument loop for insertion of bct instruction. We distinguish between
7425 loops with compile-time bounds, to those with run-time bounds. The loop
7426 behaviour is analized according to the following characteristics/variables:
7427 ; Input variables:
7428 ; comparison-value: the value to which the iteration counter is compared.
7429 ; initial-value: iteration-counter initial value.
7430 ; increment: iteration-counter increment.
7431 ; Computed variables:
7432 ; increment-direction: the sign of the increment.
7433 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7434 ; range-direction: sign (comparison-value - initial-value)
7435 We give up on the following cases:
7436 ; loop variable overflow.
7437 ; run-time loop bounds with comparison code NE.
7438 */
7439
7440 static void
7441 insert_bct (loop_start, loop_end)
7442 rtx loop_start, loop_end;
7443 {
7444 rtx initial_value, comparison_value, increment;
7445 enum rtx_code comparison_code;
7446
7447 int increment_direction, compare_direction;
7448 int unsigned_p = 0;
7449
7450 /* if the loop condition is <= or >=, the number of iteration
7451 is 1 more than the range of the bounds of the loop */
7452 int add_iteration = 0;
7453
7454 /* the only machine mode we work with - is the integer of the size that the
7455 machine has */
7456 enum machine_mode loop_var_mode = SImode;
7457
7458 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7459
7460 /* get loop-variables. No need to check that these are valid - already
7461 checked in analyze_loop_iterations (). */
7462 comparison_code = loop_comparison_code[loop_num];
7463 initial_value = loop_start_value[loop_num];
7464 comparison_value = loop_comparison_value[loop_num];
7465 increment = loop_increment[loop_num];
7466
7467 /* check analyze_loop_iterations decision for this loop. */
7468 if (! loop_can_insert_bct[loop_num]){
7469 if (loop_dump_stream)
7470 fprintf (loop_dump_stream,
7471 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7472 loop_num);
7473 return;
7474 }
7475
7476 /* It's impossible to instrument a competely unrolled loop. */
7477 if (loop_unroll_factor [loop_num] == -1)
7478 return;
7479
7480 /* make sure that the last loop insn is a conditional jump .
7481 This check is repeated from analyze_loop_iterations (),
7482 because unrolling might have changed that. */
7483 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7484 || !condjump_p (PREV_INSN (loop_end))) {
7485 if (loop_dump_stream)
7486 fprintf (loop_dump_stream,
7487 "insert_bct: not instrumenting BCT because of invalid branch\n");
7488 return;
7489 }
7490
7491 /* fix increment in case loop was unrolled. */
7492 if (loop_unroll_factor [loop_num] > 1)
7493 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
7494
7495 /* determine properties and directions of the loop */
7496 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7497 switch ( comparison_code ) {
7498 case LEU:
7499 unsigned_p = 1;
7500 /* fallthrough */
7501 case LE:
7502 compare_direction = 1;
7503 add_iteration = 1;
7504 break;
7505 case GEU:
7506 unsigned_p = 1;
7507 /* fallthrough */
7508 case GE:
7509 compare_direction = -1;
7510 add_iteration = 1;
7511 break;
7512 case EQ:
7513 /* in this case we cannot know the number of iterations */
7514 if (loop_dump_stream)
7515 fprintf (loop_dump_stream,
7516 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7517 loop_num);
7518 return;
7519 case LTU:
7520 unsigned_p = 1;
7521 /* fallthrough */
7522 case LT:
7523 compare_direction = 1;
7524 break;
7525 case GTU:
7526 unsigned_p = 1;
7527 /* fallthrough */
7528 case GT:
7529 compare_direction = -1;
7530 break;
7531 case NE:
7532 compare_direction = 0;
7533 break;
7534 default:
7535 abort ();
7536 }
7537
7538
7539 /* make sure that the loop does not end by an overflow */
7540 if (compare_direction != increment_direction) {
7541 if (loop_dump_stream)
7542 fprintf (loop_dump_stream,
7543 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7544 loop_num);
7545 return;
7546 }
7547
7548 /* try to instrument the loop. */
7549
7550 /* Handle the simpler case, where the bounds are known at compile time. */
7551 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7552 {
7553 int n_iterations;
7554 int increment_value_abs = INTVAL (increment) * increment_direction;
7555
7556 /* check the relation between compare-val and initial-val */
7557 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7558 int range_direction = (difference > 0) ? 1 : -1;
7559
7560 /* make sure the loop executes enough iterations to gain from BCT */
7561 if (difference > -3 && difference < 3) {
7562 if (loop_dump_stream)
7563 fprintf (loop_dump_stream,
7564 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7565 loop_num);
7566 return;
7567 }
7568
7569 /* make sure that the loop executes at least once */
7570 if ((range_direction == 1 && compare_direction == -1)
7571 || (range_direction == -1 && compare_direction == 1))
7572 {
7573 if (loop_dump_stream)
7574 fprintf (loop_dump_stream,
7575 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7576 loop_num);
7577 return;
7578 }
7579
7580 /* make sure that the loop does not end by an overflow (in compile time
7581 bounds we must have an additional check for overflow, because here
7582 we also support the compare code of 'NE'. */
7583 if (comparison_code == NE
7584 && increment_direction != range_direction) {
7585 if (loop_dump_stream)
7586 fprintf (loop_dump_stream,
7587 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7588 loop_num);
7589 return;
7590 }
7591
7592 /* Determine the number of iterations by:
7593 ;
7594 ; compare-val - initial-val + (increment -1) + additional-iteration
7595 ; num_iterations = -----------------------------------------------------------------
7596 ; increment
7597 */
7598 difference = (range_direction > 0) ? difference : -difference;
7599 #if 0
7600 fprintf (stderr, "difference is: %d\n", difference); /* @*/
7601 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
7602 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
7603 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
7604 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
7605 #endif
7606
7607 if (increment_value_abs == 0) {
7608 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
7609 abort ();
7610 }
7611 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
7612 / increment_value_abs;
7613
7614 #if 0
7615 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
7616 #endif
7617 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
7618
7619 /* Done with this loop. */
7620 return;
7621 }
7622
7623 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7624 /* In this case we generate run_time calculation of the number of iterations */
7625
7626 /* With runtime bounds, if the compare is of the form '!=' we give up */
7627 if (comparison_code == NE) {
7628 if (loop_dump_stream)
7629 fprintf (loop_dump_stream,
7630 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7631 loop_num);
7632 return;
7633 }
7634
7635 else {
7636 /* We rely on the existence of run-time guard to ensure that the
7637 loop executes at least once. */
7638 rtx sequence;
7639 rtx iterations_num_reg;
7640
7641 int increment_value_abs = INTVAL (increment) * increment_direction;
7642
7643 /* make sure that the increment is a power of two, otherwise (an
7644 expensive) divide is needed. */
7645 if (exact_log2 (increment_value_abs) == -1)
7646 {
7647 if (loop_dump_stream)
7648 fprintf (loop_dump_stream,
7649 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7650 return;
7651 }
7652
7653 /* compute the number of iterations */
7654 start_sequence ();
7655 {
7656 rtx temp_reg;
7657
7658 /* Again, the number of iterations is calculated by:
7659 ;
7660 ; compare-val - initial-val + (increment -1) + additional-iteration
7661 ; num_iterations = -----------------------------------------------------------------
7662 ; increment
7663 */
7664 /* ??? Do we have to call copy_rtx here before passing rtx to
7665 expand_binop? */
7666 if (compare_direction > 0) {
7667 /* <, <= :the loop variable is increasing */
7668 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
7669 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7670 }
7671 else {
7672 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
7673 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7674 }
7675
7676 if (increment_value_abs - 1 + add_iteration != 0)
7677 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
7678 GEN_INT (increment_value_abs - 1 + add_iteration),
7679 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7680
7681 if (increment_value_abs != 1)
7682 {
7683 /* ??? This will generate an expensive divide instruction for
7684 most targets. The original authors apparently expected this
7685 to be a shift, since they test for power-of-2 divisors above,
7686 but just naively generating a divide instruction will not give
7687 a shift. It happens to work for the PowerPC target because
7688 the rs6000.md file has a divide pattern that emits shifts.
7689 It will probably not work for any other target. */
7690 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
7691 temp_reg,
7692 GEN_INT (increment_value_abs),
7693 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7694 }
7695 else
7696 iterations_num_reg = temp_reg;
7697 }
7698 sequence = gen_sequence ();
7699 end_sequence ();
7700 emit_insn_before (sequence, loop_start);
7701 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
7702 }
7703 }
7704
7705 /* instrument loop by inserting a bct in it. This is done in the following way:
7706 1. A new register is created and assigned the hard register number of the count
7707 register.
7708 2. In the head of the loop the new variable is initialized by the value passed in the
7709 loop_num_iterations parameter.
7710 3. At the end of the loop, comparison of the register with 0 is generated.
7711 The created comparison follows the pattern defined for the
7712 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7713 generation phase.
7714 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7715 not used elsewhere, it will be eliminated by data-flow analisys. */
7716
7717 static void
7718 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
7719 rtx loop_start, loop_end;
7720 rtx loop_num_iterations;
7721 {
7722 rtx temp_reg1, temp_reg2;
7723 rtx start_label;
7724
7725 rtx sequence;
7726 enum machine_mode loop_var_mode = SImode;
7727
7728 if (HAVE_decrement_and_branch_on_count)
7729 {
7730 if (loop_dump_stream)
7731 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
7732
7733 /* eliminate the check on the old variable */
7734 delete_insn (PREV_INSN (loop_end));
7735 delete_insn (PREV_INSN (loop_end));
7736
7737 /* insert the label which will delimit the start of the loop */
7738 start_label = gen_label_rtx ();
7739 emit_label_after (start_label, loop_start);
7740
7741 /* insert initialization of the count register into the loop header */
7742 start_sequence ();
7743 temp_reg1 = gen_reg_rtx (loop_var_mode);
7744 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
7745
7746 /* this will be count register */
7747 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
7748 /* we have to move the value to the count register from an GPR
7749 because rtx pointed to by loop_num_iterations could contain
7750 expression which cannot be moved into count register */
7751 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
7752
7753 sequence = gen_sequence ();
7754 end_sequence ();
7755 emit_insn_after (sequence, loop_start);
7756
7757 /* insert new comparison on the count register instead of the
7758 old one, generating the needed BCT pattern (that will be
7759 later recognized by assembly generation phase). */
7760 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
7761 loop_end);
7762 LABEL_NUSES (start_label)++;
7763 }
7764
7765 }
7766 #endif /* HAVE_decrement_and_branch_on_count */
7767
7768 #endif /* HAIFA */
7769
7770 /* Scan the function and determine whether it has indirect (computed) jumps.
7771
7772 This is taken mostly from flow.c; similar code exists elsewhere
7773 in the compiler. It may be useful to put this into rtlanal.c. */
7774 static int
7775 indirect_jump_in_function_p (start)
7776 rtx start;
7777 {
7778 rtx insn;
7779
7780 for (insn = start; insn; insn = NEXT_INSN (insn))
7781 if (computed_jump_p (insn))
7782 return 1;
7783
7784 return 0;
7785 }