Makefile.in (TARGET_H, [...]): New.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56
57 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
58 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
59
60 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
61 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
62 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
63
64
65 /* Vector mapping INSN_UIDs to luids.
66 The luids are like uids but increase monotonically always.
67 We use them to see whether a jump comes from outside a given loop. */
68
69 int *uid_luid;
70
71 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
72 number the insn is contained in. */
73
74 struct loop **uid_loop;
75
76 /* 1 + largest uid of any insn. */
77
78 int max_uid_for_loop;
79
80 /* 1 + luid of last insn. */
81
82 static int max_luid;
83
84 /* Number of loops detected in current function. Used as index to the
85 next few tables. */
86
87 static int max_loop_num;
88
89 /* Bound on pseudo register number before loop optimization.
90 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
91 unsigned int max_reg_before_loop;
92
93 /* The value to pass to the next call of reg_scan_update. */
94 static int loop_max_reg;
95
96 #define obstack_chunk_alloc xmalloc
97 #define obstack_chunk_free free
98 \f
99 /* During the analysis of a loop, a chain of `struct movable's
100 is made to record all the movable insns found.
101 Then the entire chain can be scanned to decide which to move. */
102
103 struct movable
104 {
105 rtx insn; /* A movable insn */
106 rtx set_src; /* The expression this reg is set from. */
107 rtx set_dest; /* The destination of this SET. */
108 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
109 of any registers used within the LIBCALL. */
110 int consec; /* Number of consecutive following insns
111 that must be moved with this one. */
112 unsigned int regno; /* The register it sets */
113 short lifetime; /* lifetime of that register;
114 may be adjusted when matching movables
115 that load the same value are found. */
116 short savings; /* Number of insns we can move for this reg,
117 including other movables that force this
118 or match this one. */
119 unsigned int cond : 1; /* 1 if only conditionally movable */
120 unsigned int force : 1; /* 1 means MUST move this insn */
121 unsigned int global : 1; /* 1 means reg is live outside this loop */
122 /* If PARTIAL is 1, GLOBAL means something different:
123 that the reg is live outside the range from where it is set
124 to the following label. */
125 unsigned int done : 1; /* 1 inhibits further processing of this */
126
127 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
128 In particular, moving it does not make it
129 invariant. */
130 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
131 load SRC, rather than copying INSN. */
132 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
133 first insn of a consecutive sets group. */
134 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
135 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
136 that we should avoid changing when clearing
137 the rest of the reg. */
138 struct movable *match; /* First entry for same value */
139 struct movable *forces; /* An insn that must be moved if this is */
140 struct movable *next;
141 };
142
143
144 FILE *loop_dump_stream;
145
146 /* Forward declarations. */
147
148 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
149 static void mark_loop_jump PARAMS ((rtx, struct loop *));
150 static void prescan_loop PARAMS ((struct loop *));
151 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
152 static int consec_sets_invariant_p PARAMS ((const struct loop *,
153 rtx, int, rtx));
154 static int labels_in_range_p PARAMS ((rtx, int));
155 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
156 static void note_addr_stored PARAMS ((rtx, rtx, void *));
157 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
158 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
159 static void scan_loop PARAMS ((struct loop*, int));
160 #if 0
161 static void replace_call_address PARAMS ((rtx, rtx, rtx));
162 #endif
163 static rtx skip_consec_insns PARAMS ((rtx, int));
164 static int libcall_benefit PARAMS ((rtx));
165 static void ignore_some_movables PARAMS ((struct loop_movables *));
166 static void force_movables PARAMS ((struct loop_movables *));
167 static void combine_movables PARAMS ((struct loop_movables *,
168 struct loop_regs *));
169 static int num_unmoved_movables PARAMS ((const struct loop *));
170 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
171 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
172 struct loop_regs *));
173 static void add_label_notes PARAMS ((rtx, rtx));
174 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
175 int, int));
176 static void loop_movables_add PARAMS((struct loop_movables *,
177 struct movable *));
178 static void loop_movables_free PARAMS((struct loop_movables *));
179 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
180 static void loop_bivs_find PARAMS((struct loop *));
181 static void loop_bivs_init_find PARAMS((struct loop *));
182 static void loop_bivs_check PARAMS((struct loop *));
183 static void loop_givs_find PARAMS((struct loop *));
184 static void loop_givs_check PARAMS((struct loop *));
185 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
186 int, int));
187 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
188 struct induction *, rtx));
189 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
190 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
191 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
192 rtx *));
193 static void loop_ivs_free PARAMS((struct loop *));
194 static void strength_reduce PARAMS ((struct loop *, int));
195 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
196 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
197 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
198 static void record_biv PARAMS ((struct loop *, struct induction *,
199 rtx, rtx, rtx, rtx, rtx *,
200 int, int));
201 static void check_final_value PARAMS ((const struct loop *,
202 struct induction *));
203 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
204 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
205 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
206 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
207 static void record_giv PARAMS ((const struct loop *, struct induction *,
208 rtx, rtx, rtx, rtx, rtx, rtx, int,
209 enum g_types, int, int, rtx *));
210 static void update_giv_derive PARAMS ((const struct loop *, rtx));
211 static void check_ext_dependant_givs PARAMS ((struct iv_class *,
212 struct loop_info *));
213 static int basic_induction_var PARAMS ((const struct loop *, rtx,
214 enum machine_mode, rtx, rtx,
215 rtx *, rtx *, rtx **));
216 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
217 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
218 rtx *, rtx *, rtx *, int, int *,
219 enum machine_mode));
220 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
221 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
222 static int check_dbra_loop PARAMS ((struct loop *, int));
223 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
224 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
225 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
226 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
227 static int product_cheap_p PARAMS ((rtx, rtx));
228 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
229 int, int, int));
230 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
231 struct iv_class *, int,
232 basic_block, rtx));
233 static int last_use_this_basic_block PARAMS ((rtx, rtx));
234 static void record_initial PARAMS ((rtx, rtx, void *));
235 static void update_reg_last_use PARAMS ((rtx, rtx));
236 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
237 static void loop_regs_scan PARAMS ((const struct loop *, int));
238 static int count_insns_in_loop PARAMS ((const struct loop *));
239 static void load_mems PARAMS ((const struct loop *));
240 static int insert_loop_mem PARAMS ((rtx *, void *));
241 static int replace_loop_mem PARAMS ((rtx *, void *));
242 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
243 static int replace_loop_reg PARAMS ((rtx *, void *));
244 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
245 static void note_reg_stored PARAMS ((rtx, rtx, void *));
246 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
247 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
248 unsigned int));
249 static int replace_label PARAMS ((rtx *, void *));
250 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
251 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
252 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
253 static void loop_regs_update PARAMS ((const struct loop *, rtx));
254 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
255
256 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
257 rtx, rtx));
258 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
259 basic_block, rtx, rtx));
260 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
261 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
262
263 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
264 static void loop_delete_insns PARAMS ((rtx, rtx));
265 void debug_ivs PARAMS ((const struct loop *));
266 void debug_iv_class PARAMS ((const struct iv_class *));
267 void debug_biv PARAMS ((const struct induction *));
268 void debug_giv PARAMS ((const struct induction *));
269 void debug_loop PARAMS ((const struct loop *));
270 void debug_loops PARAMS ((const struct loops *));
271
272 typedef struct rtx_pair
273 {
274 rtx r1;
275 rtx r2;
276 } rtx_pair;
277
278 typedef struct loop_replace_args
279 {
280 rtx match;
281 rtx replacement;
282 rtx insn;
283 } loop_replace_args;
284
285 /* Nonzero iff INSN is between START and END, inclusive. */
286 #define INSN_IN_RANGE_P(INSN, START, END) \
287 (INSN_UID (INSN) < max_uid_for_loop \
288 && INSN_LUID (INSN) >= INSN_LUID (START) \
289 && INSN_LUID (INSN) <= INSN_LUID (END))
290
291 /* Indirect_jump_in_function is computed once per function. */
292 static int indirect_jump_in_function;
293 static int indirect_jump_in_function_p PARAMS ((rtx));
294
295 static int compute_luids PARAMS ((rtx, rtx, int));
296
297 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
298 struct induction *,
299 rtx));
300 \f
301 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
302 copy the value of the strength reduced giv to its original register. */
303 static int copy_cost;
304
305 /* Cost of using a register, to normalize the benefits of a giv. */
306 static int reg_address_cost;
307
308 void
309 init_loop ()
310 {
311 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
312
313 reg_address_cost = address_cost (reg, SImode);
314
315 copy_cost = COSTS_N_INSNS (1);
316 }
317 \f
318 /* Compute the mapping from uids to luids.
319 LUIDs are numbers assigned to insns, like uids,
320 except that luids increase monotonically through the code.
321 Start at insn START and stop just before END. Assign LUIDs
322 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
323 static int
324 compute_luids (start, end, prev_luid)
325 rtx start, end;
326 int prev_luid;
327 {
328 int i;
329 rtx insn;
330
331 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
332 {
333 if (INSN_UID (insn) >= max_uid_for_loop)
334 continue;
335 /* Don't assign luids to line-number NOTEs, so that the distance in
336 luids between two insns is not affected by -g. */
337 if (GET_CODE (insn) != NOTE
338 || NOTE_LINE_NUMBER (insn) <= 0)
339 uid_luid[INSN_UID (insn)] = ++i;
340 else
341 /* Give a line number note the same luid as preceding insn. */
342 uid_luid[INSN_UID (insn)] = i;
343 }
344 return i + 1;
345 }
346 \f
347 /* Entry point of this file. Perform loop optimization
348 on the current function. F is the first insn of the function
349 and DUMPFILE is a stream for output of a trace of actions taken
350 (or 0 if none should be output). */
351
352 void
353 loop_optimize (f, dumpfile, flags)
354 /* f is the first instruction of a chain of insns for one function */
355 rtx f;
356 FILE *dumpfile;
357 int flags;
358 {
359 register rtx insn;
360 register int i;
361 struct loops loops_data;
362 struct loops *loops = &loops_data;
363 struct loop_info *loops_info;
364
365 loop_dump_stream = dumpfile;
366
367 init_recog_no_volatile ();
368
369 max_reg_before_loop = max_reg_num ();
370 loop_max_reg = max_reg_before_loop;
371
372 regs_may_share = 0;
373
374 /* Count the number of loops. */
375
376 max_loop_num = 0;
377 for (insn = f; insn; insn = NEXT_INSN (insn))
378 {
379 if (GET_CODE (insn) == NOTE
380 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
381 max_loop_num++;
382 }
383
384 /* Don't waste time if no loops. */
385 if (max_loop_num == 0)
386 return;
387
388 loops->num = max_loop_num;
389
390 /* Get size to use for tables indexed by uids.
391 Leave some space for labels allocated by find_and_verify_loops. */
392 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
393
394 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
395 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
396 sizeof (struct loop *));
397
398 /* Allocate storage for array of loops. */
399 loops->array = (struct loop *)
400 xcalloc (loops->num, sizeof (struct loop));
401
402 /* Find and process each loop.
403 First, find them, and record them in order of their beginnings. */
404 find_and_verify_loops (f, loops);
405
406 /* Allocate and initialize auxiliary loop information. */
407 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
408 for (i = 0; i < loops->num; i++)
409 loops->array[i].aux = loops_info + i;
410
411 /* Now find all register lifetimes. This must be done after
412 find_and_verify_loops, because it might reorder the insns in the
413 function. */
414 reg_scan (f, max_reg_before_loop, 1);
415
416 /* This must occur after reg_scan so that registers created by gcse
417 will have entries in the register tables.
418
419 We could have added a call to reg_scan after gcse_main in toplev.c,
420 but moving this call to init_alias_analysis is more efficient. */
421 init_alias_analysis ();
422
423 /* See if we went too far. Note that get_max_uid already returns
424 one more that the maximum uid of all insn. */
425 if (get_max_uid () > max_uid_for_loop)
426 abort ();
427 /* Now reset it to the actual size we need. See above. */
428 max_uid_for_loop = get_max_uid ();
429
430 /* find_and_verify_loops has already called compute_luids, but it
431 might have rearranged code afterwards, so we need to recompute
432 the luids now. */
433 max_luid = compute_luids (f, NULL_RTX, 0);
434
435 /* Don't leave gaps in uid_luid for insns that have been
436 deleted. It is possible that the first or last insn
437 using some register has been deleted by cross-jumping.
438 Make sure that uid_luid for that former insn's uid
439 points to the general area where that insn used to be. */
440 for (i = 0; i < max_uid_for_loop; i++)
441 {
442 uid_luid[0] = uid_luid[i];
443 if (uid_luid[0] != 0)
444 break;
445 }
446 for (i = 0; i < max_uid_for_loop; i++)
447 if (uid_luid[i] == 0)
448 uid_luid[i] = uid_luid[i - 1];
449
450 /* Determine if the function has indirect jump. On some systems
451 this prevents low overhead loop instructions from being used. */
452 indirect_jump_in_function = indirect_jump_in_function_p (f);
453
454 /* Now scan the loops, last ones first, since this means inner ones are done
455 before outer ones. */
456 for (i = max_loop_num - 1; i >= 0; i--)
457 {
458 struct loop *loop = &loops->array[i];
459
460 if (! loop->invalid && loop->end)
461 scan_loop (loop, flags);
462 }
463
464 /* If there were lexical blocks inside the loop, they have been
465 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
466 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
467 the BLOCKs as well. */
468 if (write_symbols != NO_DEBUG)
469 reorder_blocks ();
470
471 end_alias_analysis ();
472
473 /* Clean up. */
474 free (uid_luid);
475 free (uid_loop);
476 free (loops_info);
477 free (loops->array);
478 }
479 \f
480 /* Returns the next insn, in execution order, after INSN. START and
481 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
482 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
483 insn-stream; it is used with loops that are entered near the
484 bottom. */
485
486 static rtx
487 next_insn_in_loop (loop, insn)
488 const struct loop *loop;
489 rtx insn;
490 {
491 insn = NEXT_INSN (insn);
492
493 if (insn == loop->end)
494 {
495 if (loop->top)
496 /* Go to the top of the loop, and continue there. */
497 insn = loop->top;
498 else
499 /* We're done. */
500 insn = NULL_RTX;
501 }
502
503 if (insn == loop->scan_start)
504 /* We're done. */
505 insn = NULL_RTX;
506
507 return insn;
508 }
509
510 /* Optimize one loop described by LOOP. */
511
512 /* ??? Could also move memory writes out of loops if the destination address
513 is invariant, the source is invariant, the memory write is not volatile,
514 and if we can prove that no read inside the loop can read this address
515 before the write occurs. If there is a read of this address after the
516 write, then we can also mark the memory read as invariant. */
517
518 static void
519 scan_loop (loop, flags)
520 struct loop *loop;
521 int flags;
522 {
523 struct loop_info *loop_info = LOOP_INFO (loop);
524 struct loop_regs *regs = LOOP_REGS (loop);
525 register int i;
526 rtx loop_start = loop->start;
527 rtx loop_end = loop->end;
528 rtx p;
529 /* 1 if we are scanning insns that could be executed zero times. */
530 int maybe_never = 0;
531 /* 1 if we are scanning insns that might never be executed
532 due to a subroutine call which might exit before they are reached. */
533 int call_passed = 0;
534 /* Jump insn that enters the loop, or 0 if control drops in. */
535 rtx loop_entry_jump = 0;
536 /* Number of insns in the loop. */
537 int insn_count;
538 int tem;
539 rtx temp, update_start, update_end;
540 /* The SET from an insn, if it is the only SET in the insn. */
541 rtx set, set1;
542 /* Chain describing insns movable in current loop. */
543 struct loop_movables *movables = LOOP_MOVABLES (loop);
544 /* Ratio of extra register life span we can justify
545 for saving an instruction. More if loop doesn't call subroutines
546 since in that case saving an insn makes more difference
547 and more registers are available. */
548 int threshold;
549 /* Nonzero if we are scanning instructions in a sub-loop. */
550 int loop_depth = 0;
551
552 loop->top = 0;
553
554 movables->head = 0;
555 movables->last = 0;
556
557 /* Determine whether this loop starts with a jump down to a test at
558 the end. This will occur for a small number of loops with a test
559 that is too complex to duplicate in front of the loop.
560
561 We search for the first insn or label in the loop, skipping NOTEs.
562 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
563 (because we might have a loop executed only once that contains a
564 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
565 (in case we have a degenerate loop).
566
567 Note that if we mistakenly think that a loop is entered at the top
568 when, in fact, it is entered at the exit test, the only effect will be
569 slightly poorer optimization. Making the opposite error can generate
570 incorrect code. Since very few loops now start with a jump to the
571 exit test, the code here to detect that case is very conservative. */
572
573 for (p = NEXT_INSN (loop_start);
574 p != loop_end
575 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
576 && (GET_CODE (p) != NOTE
577 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
578 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
579 p = NEXT_INSN (p))
580 ;
581
582 loop->scan_start = p;
583
584 /* If loop end is the end of the current function, then emit a
585 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
586 note insn. This is the position we use when sinking insns out of
587 the loop. */
588 if (NEXT_INSN (loop->end) != 0)
589 loop->sink = NEXT_INSN (loop->end);
590 else
591 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
592
593 /* Set up variables describing this loop. */
594 prescan_loop (loop);
595 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
596
597 /* If loop has a jump before the first label,
598 the true entry is the target of that jump.
599 Start scan from there.
600 But record in LOOP->TOP the place where the end-test jumps
601 back to so we can scan that after the end of the loop. */
602 if (GET_CODE (p) == JUMP_INSN)
603 {
604 loop_entry_jump = p;
605
606 /* Loop entry must be unconditional jump (and not a RETURN) */
607 if (any_uncondjump_p (p)
608 && JUMP_LABEL (p) != 0
609 /* Check to see whether the jump actually
610 jumps out of the loop (meaning it's no loop).
611 This case can happen for things like
612 do {..} while (0). If this label was generated previously
613 by loop, we can't tell anything about it and have to reject
614 the loop. */
615 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
616 {
617 loop->top = next_label (loop->scan_start);
618 loop->scan_start = JUMP_LABEL (p);
619 }
620 }
621
622 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
623 as required by loop_reg_used_before_p. So skip such loops. (This
624 test may never be true, but it's best to play it safe.)
625
626 Also, skip loops where we do not start scanning at a label. This
627 test also rejects loops starting with a JUMP_INSN that failed the
628 test above. */
629
630 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
631 || GET_CODE (loop->scan_start) != CODE_LABEL)
632 {
633 if (loop_dump_stream)
634 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
635 INSN_UID (loop_start), INSN_UID (loop_end));
636 return;
637 }
638
639 /* Allocate extra space for REGs that might be created by load_mems.
640 We allocate a little extra slop as well, in the hopes that we
641 won't have to reallocate the regs array. */
642 loop_regs_scan (loop, loop_info->mems_idx + 16);
643 insn_count = count_insns_in_loop (loop);
644
645 if (loop_dump_stream)
646 {
647 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
648 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
649 if (loop->cont)
650 fprintf (loop_dump_stream, "Continue at insn %d.\n",
651 INSN_UID (loop->cont));
652 }
653
654 /* Scan through the loop finding insns that are safe to move.
655 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
656 this reg will be considered invariant for subsequent insns.
657 We consider whether subsequent insns use the reg
658 in deciding whether it is worth actually moving.
659
660 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
661 and therefore it is possible that the insns we are scanning
662 would never be executed. At such times, we must make sure
663 that it is safe to execute the insn once instead of zero times.
664 When MAYBE_NEVER is 0, all insns will be executed at least once
665 so that is not a problem. */
666
667 for (p = next_insn_in_loop (loop, loop->scan_start);
668 p != NULL_RTX;
669 p = next_insn_in_loop (loop, p))
670 {
671 if (GET_CODE (p) == INSN
672 && (set = single_set (p))
673 && GET_CODE (SET_DEST (set)) == REG
674 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
675 {
676 int tem1 = 0;
677 int tem2 = 0;
678 int move_insn = 0;
679 rtx src = SET_SRC (set);
680 rtx dependencies = 0;
681
682 /* Figure out what to use as a source of this insn. If a REG_EQUIV
683 note is given or if a REG_EQUAL note with a constant operand is
684 specified, use it as the source and mark that we should move
685 this insn by calling emit_move_insn rather that duplicating the
686 insn.
687
688 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
689 is present. */
690 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
691 if (temp)
692 src = XEXP (temp, 0), move_insn = 1;
693 else
694 {
695 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
696 if (temp && CONSTANT_P (XEXP (temp, 0)))
697 src = XEXP (temp, 0), move_insn = 1;
698 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
699 {
700 src = XEXP (temp, 0);
701 /* A libcall block can use regs that don't appear in
702 the equivalent expression. To move the libcall,
703 we must move those regs too. */
704 dependencies = libcall_other_reg (p, src);
705 }
706 }
707
708 /* Don't try to optimize a register that was made
709 by loop-optimization for an inner loop.
710 We don't know its life-span, so we can't compute the benefit. */
711 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
712 ;
713 else if (/* The register is used in basic blocks other
714 than the one where it is set (meaning that
715 something after this point in the loop might
716 depend on its value before the set). */
717 ! reg_in_basic_block_p (p, SET_DEST (set))
718 /* And the set is not guaranteed to be executed one
719 the loop starts, or the value before the set is
720 needed before the set occurs...
721
722 ??? Note we have quadratic behaviour here, mitigated
723 by the fact that the previous test will often fail for
724 large loops. Rather than re-scanning the entire loop
725 each time for register usage, we should build tables
726 of the register usage and use them here instead. */
727 && (maybe_never
728 || loop_reg_used_before_p (loop, set, p)))
729 /* It is unsafe to move the set.
730
731 This code used to consider it OK to move a set of a variable
732 which was not created by the user and not used in an exit test.
733 That behavior is incorrect and was removed. */
734 ;
735 else if ((tem = loop_invariant_p (loop, src))
736 && (dependencies == 0
737 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
738 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
739 || (tem1
740 = consec_sets_invariant_p
741 (loop, SET_DEST (set),
742 regs->array[REGNO (SET_DEST (set))].set_in_loop,
743 p)))
744 /* If the insn can cause a trap (such as divide by zero),
745 can't move it unless it's guaranteed to be executed
746 once loop is entered. Even a function call might
747 prevent the trap insn from being reached
748 (since it might exit!) */
749 && ! ((maybe_never || call_passed)
750 && may_trap_p (src)))
751 {
752 register struct movable *m;
753 register int regno = REGNO (SET_DEST (set));
754
755 /* A potential lossage is where we have a case where two insns
756 can be combined as long as they are both in the loop, but
757 we move one of them outside the loop. For large loops,
758 this can lose. The most common case of this is the address
759 of a function being called.
760
761 Therefore, if this register is marked as being used exactly
762 once if we are in a loop with calls (a "large loop"), see if
763 we can replace the usage of this register with the source
764 of this SET. If we can, delete this insn.
765
766 Don't do this if P has a REG_RETVAL note or if we have
767 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
768
769 if (loop_info->has_call
770 && regs->array[regno].single_usage != 0
771 && regs->array[regno].single_usage != const0_rtx
772 && REGNO_FIRST_UID (regno) == INSN_UID (p)
773 && (REGNO_LAST_UID (regno)
774 == INSN_UID (regs->array[regno].single_usage))
775 && regs->array[regno].set_in_loop == 1
776 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
777 && ! side_effects_p (SET_SRC (set))
778 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
779 && (! SMALL_REGISTER_CLASSES
780 || (! (GET_CODE (SET_SRC (set)) == REG
781 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
782 /* This test is not redundant; SET_SRC (set) might be
783 a call-clobbered register and the life of REGNO
784 might span a call. */
785 && ! modified_between_p (SET_SRC (set), p,
786 regs->array[regno].single_usage)
787 && no_labels_between_p (p, regs->array[regno].single_usage)
788 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
789 regs->array[regno].single_usage))
790 {
791 /* Replace any usage in a REG_EQUAL note. Must copy the
792 new source, so that we don't get rtx sharing between the
793 SET_SOURCE and REG_NOTES of insn p. */
794 REG_NOTES (regs->array[regno].single_usage)
795 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
796 SET_DEST (set), copy_rtx (SET_SRC (set)));
797
798 PUT_CODE (p, NOTE);
799 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
800 NOTE_SOURCE_FILE (p) = 0;
801 regs->array[regno].set_in_loop = 0;
802 continue;
803 }
804
805 m = (struct movable *) xmalloc (sizeof (struct movable));
806 m->next = 0;
807 m->insn = p;
808 m->set_src = src;
809 m->dependencies = dependencies;
810 m->set_dest = SET_DEST (set);
811 m->force = 0;
812 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
813 m->done = 0;
814 m->forces = 0;
815 m->partial = 0;
816 m->move_insn = move_insn;
817 m->move_insn_first = 0;
818 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
819 m->savemode = VOIDmode;
820 m->regno = regno;
821 /* Set M->cond if either loop_invariant_p
822 or consec_sets_invariant_p returned 2
823 (only conditionally invariant). */
824 m->cond = ((tem | tem1 | tem2) > 1);
825 m->global = LOOP_REG_GLOBAL_P (loop, regno);
826 m->match = 0;
827 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
828 m->savings = regs->array[regno].n_times_set;
829 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
830 m->savings += libcall_benefit (p);
831 regs->array[regno].set_in_loop = move_insn ? -2 : -1;
832 /* Add M to the end of the chain MOVABLES. */
833 loop_movables_add (movables, m);
834
835 if (m->consec > 0)
836 {
837 /* It is possible for the first instruction to have a
838 REG_EQUAL note but a non-invariant SET_SRC, so we must
839 remember the status of the first instruction in case
840 the last instruction doesn't have a REG_EQUAL note. */
841 m->move_insn_first = m->move_insn;
842
843 /* Skip this insn, not checking REG_LIBCALL notes. */
844 p = next_nonnote_insn (p);
845 /* Skip the consecutive insns, if there are any. */
846 p = skip_consec_insns (p, m->consec);
847 /* Back up to the last insn of the consecutive group. */
848 p = prev_nonnote_insn (p);
849
850 /* We must now reset m->move_insn, m->is_equiv, and possibly
851 m->set_src to correspond to the effects of all the
852 insns. */
853 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
854 if (temp)
855 m->set_src = XEXP (temp, 0), m->move_insn = 1;
856 else
857 {
858 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
859 if (temp && CONSTANT_P (XEXP (temp, 0)))
860 m->set_src = XEXP (temp, 0), m->move_insn = 1;
861 else
862 m->move_insn = 0;
863
864 }
865 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
866 }
867 }
868 /* If this register is always set within a STRICT_LOW_PART
869 or set to zero, then its high bytes are constant.
870 So clear them outside the loop and within the loop
871 just load the low bytes.
872 We must check that the machine has an instruction to do so.
873 Also, if the value loaded into the register
874 depends on the same register, this cannot be done. */
875 else if (SET_SRC (set) == const0_rtx
876 && GET_CODE (NEXT_INSN (p)) == INSN
877 && (set1 = single_set (NEXT_INSN (p)))
878 && GET_CODE (set1) == SET
879 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
880 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
881 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
882 == SET_DEST (set))
883 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
884 {
885 register int regno = REGNO (SET_DEST (set));
886 if (regs->array[regno].set_in_loop == 2)
887 {
888 register struct movable *m;
889 m = (struct movable *) xmalloc (sizeof (struct movable));
890 m->next = 0;
891 m->insn = p;
892 m->set_dest = SET_DEST (set);
893 m->dependencies = 0;
894 m->force = 0;
895 m->consec = 0;
896 m->done = 0;
897 m->forces = 0;
898 m->move_insn = 0;
899 m->move_insn_first = 0;
900 m->partial = 1;
901 /* If the insn may not be executed on some cycles,
902 we can't clear the whole reg; clear just high part.
903 Not even if the reg is used only within this loop.
904 Consider this:
905 while (1)
906 while (s != t) {
907 if (foo ()) x = *s;
908 use (x);
909 }
910 Clearing x before the inner loop could clobber a value
911 being saved from the last time around the outer loop.
912 However, if the reg is not used outside this loop
913 and all uses of the register are in the same
914 basic block as the store, there is no problem.
915
916 If this insn was made by loop, we don't know its
917 INSN_LUID and hence must make a conservative
918 assumption. */
919 m->global = (INSN_UID (p) >= max_uid_for_loop
920 || LOOP_REG_GLOBAL_P (loop, regno)
921 || (labels_in_range_p
922 (p, REGNO_FIRST_LUID (regno))));
923 if (maybe_never && m->global)
924 m->savemode = GET_MODE (SET_SRC (set1));
925 else
926 m->savemode = VOIDmode;
927 m->regno = regno;
928 m->cond = 0;
929 m->match = 0;
930 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
931 m->savings = 1;
932 regs->array[regno].set_in_loop = -1;
933 /* Add M to the end of the chain MOVABLES. */
934 loop_movables_add (movables, m);
935 }
936 }
937 }
938 /* Past a call insn, we get to insns which might not be executed
939 because the call might exit. This matters for insns that trap.
940 Constant and pure call insns always return, so they don't count. */
941 else if (GET_CODE (p) == CALL_INSN && ! CONST_CALL_P (p))
942 call_passed = 1;
943 /* Past a label or a jump, we get to insns for which we
944 can't count on whether or how many times they will be
945 executed during each iteration. Therefore, we can
946 only move out sets of trivial variables
947 (those not used after the loop). */
948 /* Similar code appears twice in strength_reduce. */
949 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
950 /* If we enter the loop in the middle, and scan around to the
951 beginning, don't set maybe_never for that. This must be an
952 unconditional jump, otherwise the code at the top of the
953 loop might never be executed. Unconditional jumps are
954 followed a by barrier then loop end. */
955 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
956 && NEXT_INSN (NEXT_INSN (p)) == loop_end
957 && any_uncondjump_p (p)))
958 maybe_never = 1;
959 else if (GET_CODE (p) == NOTE)
960 {
961 /* At the virtual top of a converted loop, insns are again known to
962 be executed: logically, the loop begins here even though the exit
963 code has been duplicated. */
964 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
965 maybe_never = call_passed = 0;
966 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
967 loop_depth++;
968 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
969 loop_depth--;
970 }
971 }
972
973 /* If one movable subsumes another, ignore that other. */
974
975 ignore_some_movables (movables);
976
977 /* For each movable insn, see if the reg that it loads
978 leads when it dies right into another conditionally movable insn.
979 If so, record that the second insn "forces" the first one,
980 since the second can be moved only if the first is. */
981
982 force_movables (movables);
983
984 /* See if there are multiple movable insns that load the same value.
985 If there are, make all but the first point at the first one
986 through the `match' field, and add the priorities of them
987 all together as the priority of the first. */
988
989 combine_movables (movables, regs);
990
991 /* Now consider each movable insn to decide whether it is worth moving.
992 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
993
994 Generally this increases code size, so do not move moveables when
995 optimizing for code size. */
996
997 if (! optimize_size)
998 move_movables (loop, movables, threshold, insn_count);
999
1000 /* Now candidates that still are negative are those not moved.
1001 Change regs->array[I].set_in_loop to indicate that those are not actually
1002 invariant. */
1003 for (i = 0; i < regs->num; i++)
1004 if (regs->array[i].set_in_loop < 0)
1005 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1006
1007 /* Now that we've moved some things out of the loop, we might be able to
1008 hoist even more memory references. */
1009 load_mems (loop);
1010
1011 /* Recalculate regs->array if load_mems has created new registers. */
1012 if (max_reg_num () > regs->num)
1013 loop_regs_scan (loop, 0);
1014
1015 for (update_start = loop_start;
1016 PREV_INSN (update_start)
1017 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1018 update_start = PREV_INSN (update_start))
1019 ;
1020 update_end = NEXT_INSN (loop_end);
1021
1022 reg_scan_update (update_start, update_end, loop_max_reg);
1023 loop_max_reg = max_reg_num ();
1024
1025 if (flag_strength_reduce)
1026 {
1027 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1028 /* Ensure our label doesn't go away. */
1029 LABEL_NUSES (update_end)++;
1030
1031 strength_reduce (loop, flags);
1032
1033 reg_scan_update (update_start, update_end, loop_max_reg);
1034 loop_max_reg = max_reg_num ();
1035
1036 if (update_end && GET_CODE (update_end) == CODE_LABEL
1037 && --LABEL_NUSES (update_end) == 0)
1038 delete_insn (update_end);
1039 }
1040
1041
1042 /* The movable information is required for strength reduction. */
1043 loop_movables_free (movables);
1044
1045 free (regs->array);
1046 regs->array = 0;
1047 regs->num = 0;
1048 }
1049 \f
1050 /* Add elements to *OUTPUT to record all the pseudo-regs
1051 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1052
1053 void
1054 record_excess_regs (in_this, not_in_this, output)
1055 rtx in_this, not_in_this;
1056 rtx *output;
1057 {
1058 enum rtx_code code;
1059 const char *fmt;
1060 int i;
1061
1062 code = GET_CODE (in_this);
1063
1064 switch (code)
1065 {
1066 case PC:
1067 case CC0:
1068 case CONST_INT:
1069 case CONST_DOUBLE:
1070 case CONST:
1071 case SYMBOL_REF:
1072 case LABEL_REF:
1073 return;
1074
1075 case REG:
1076 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1077 && ! reg_mentioned_p (in_this, not_in_this))
1078 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1079 return;
1080
1081 default:
1082 break;
1083 }
1084
1085 fmt = GET_RTX_FORMAT (code);
1086 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1087 {
1088 int j;
1089
1090 switch (fmt[i])
1091 {
1092 case 'E':
1093 for (j = 0; j < XVECLEN (in_this, i); j++)
1094 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1095 break;
1096
1097 case 'e':
1098 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1099 break;
1100 }
1101 }
1102 }
1103 \f
1104 /* Check what regs are referred to in the libcall block ending with INSN,
1105 aside from those mentioned in the equivalent value.
1106 If there are none, return 0.
1107 If there are one or more, return an EXPR_LIST containing all of them. */
1108
1109 rtx
1110 libcall_other_reg (insn, equiv)
1111 rtx insn, equiv;
1112 {
1113 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1114 rtx p = XEXP (note, 0);
1115 rtx output = 0;
1116
1117 /* First, find all the regs used in the libcall block
1118 that are not mentioned as inputs to the result. */
1119
1120 while (p != insn)
1121 {
1122 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1123 || GET_CODE (p) == CALL_INSN)
1124 record_excess_regs (PATTERN (p), equiv, &output);
1125 p = NEXT_INSN (p);
1126 }
1127
1128 return output;
1129 }
1130 \f
1131 /* Return 1 if all uses of REG
1132 are between INSN and the end of the basic block. */
1133
1134 static int
1135 reg_in_basic_block_p (insn, reg)
1136 rtx insn, reg;
1137 {
1138 int regno = REGNO (reg);
1139 rtx p;
1140
1141 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1142 return 0;
1143
1144 /* Search this basic block for the already recorded last use of the reg. */
1145 for (p = insn; p; p = NEXT_INSN (p))
1146 {
1147 switch (GET_CODE (p))
1148 {
1149 case NOTE:
1150 break;
1151
1152 case INSN:
1153 case CALL_INSN:
1154 /* Ordinary insn: if this is the last use, we win. */
1155 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1156 return 1;
1157 break;
1158
1159 case JUMP_INSN:
1160 /* Jump insn: if this is the last use, we win. */
1161 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1162 return 1;
1163 /* Otherwise, it's the end of the basic block, so we lose. */
1164 return 0;
1165
1166 case CODE_LABEL:
1167 case BARRIER:
1168 /* It's the end of the basic block, so we lose. */
1169 return 0;
1170
1171 default:
1172 break;
1173 }
1174 }
1175
1176 /* The "last use" that was recorded can't be found after the first
1177 use. This can happen when the last use was deleted while
1178 processing an inner loop, this inner loop was then completely
1179 unrolled, and the outer loop is always exited after the inner loop,
1180 so that everything after the first use becomes a single basic block. */
1181 return 1;
1182 }
1183 \f
1184 /* Compute the benefit of eliminating the insns in the block whose
1185 last insn is LAST. This may be a group of insns used to compute a
1186 value directly or can contain a library call. */
1187
1188 static int
1189 libcall_benefit (last)
1190 rtx last;
1191 {
1192 rtx insn;
1193 int benefit = 0;
1194
1195 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1196 insn != last; insn = NEXT_INSN (insn))
1197 {
1198 if (GET_CODE (insn) == CALL_INSN)
1199 benefit += 10; /* Assume at least this many insns in a library
1200 routine. */
1201 else if (GET_CODE (insn) == INSN
1202 && GET_CODE (PATTERN (insn)) != USE
1203 && GET_CODE (PATTERN (insn)) != CLOBBER)
1204 benefit++;
1205 }
1206
1207 return benefit;
1208 }
1209 \f
1210 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1211
1212 static rtx
1213 skip_consec_insns (insn, count)
1214 rtx insn;
1215 int count;
1216 {
1217 for (; count > 0; count--)
1218 {
1219 rtx temp;
1220
1221 /* If first insn of libcall sequence, skip to end. */
1222 /* Do this at start of loop, since INSN is guaranteed to
1223 be an insn here. */
1224 if (GET_CODE (insn) != NOTE
1225 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1226 insn = XEXP (temp, 0);
1227
1228 do
1229 insn = NEXT_INSN (insn);
1230 while (GET_CODE (insn) == NOTE);
1231 }
1232
1233 return insn;
1234 }
1235
1236 /* Ignore any movable whose insn falls within a libcall
1237 which is part of another movable.
1238 We make use of the fact that the movable for the libcall value
1239 was made later and so appears later on the chain. */
1240
1241 static void
1242 ignore_some_movables (movables)
1243 struct loop_movables *movables;
1244 {
1245 register struct movable *m, *m1;
1246
1247 for (m = movables->head; m; m = m->next)
1248 {
1249 /* Is this a movable for the value of a libcall? */
1250 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1251 if (note)
1252 {
1253 rtx insn;
1254 /* Check for earlier movables inside that range,
1255 and mark them invalid. We cannot use LUIDs here because
1256 insns created by loop.c for prior loops don't have LUIDs.
1257 Rather than reject all such insns from movables, we just
1258 explicitly check each insn in the libcall (since invariant
1259 libcalls aren't that common). */
1260 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1261 for (m1 = movables->head; m1 != m; m1 = m1->next)
1262 if (m1->insn == insn)
1263 m1->done = 1;
1264 }
1265 }
1266 }
1267
1268 /* For each movable insn, see if the reg that it loads
1269 leads when it dies right into another conditionally movable insn.
1270 If so, record that the second insn "forces" the first one,
1271 since the second can be moved only if the first is. */
1272
1273 static void
1274 force_movables (movables)
1275 struct loop_movables *movables;
1276 {
1277 register struct movable *m, *m1;
1278 for (m1 = movables->head; m1; m1 = m1->next)
1279 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1280 if (!m1->partial && !m1->done)
1281 {
1282 int regno = m1->regno;
1283 for (m = m1->next; m; m = m->next)
1284 /* ??? Could this be a bug? What if CSE caused the
1285 register of M1 to be used after this insn?
1286 Since CSE does not update regno_last_uid,
1287 this insn M->insn might not be where it dies.
1288 But very likely this doesn't matter; what matters is
1289 that M's reg is computed from M1's reg. */
1290 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1291 && !m->done)
1292 break;
1293 if (m != 0 && m->set_src == m1->set_dest
1294 /* If m->consec, m->set_src isn't valid. */
1295 && m->consec == 0)
1296 m = 0;
1297
1298 /* Increase the priority of the moving the first insn
1299 since it permits the second to be moved as well. */
1300 if (m != 0)
1301 {
1302 m->forces = m1;
1303 m1->lifetime += m->lifetime;
1304 m1->savings += m->savings;
1305 }
1306 }
1307 }
1308 \f
1309 /* Find invariant expressions that are equal and can be combined into
1310 one register. */
1311
1312 static void
1313 combine_movables (movables, regs)
1314 struct loop_movables *movables;
1315 struct loop_regs *regs;
1316 {
1317 register struct movable *m;
1318 char *matched_regs = (char *) xmalloc (regs->num);
1319 enum machine_mode mode;
1320
1321 /* Regs that are set more than once are not allowed to match
1322 or be matched. I'm no longer sure why not. */
1323 /* Perhaps testing m->consec_sets would be more appropriate here? */
1324
1325 for (m = movables->head; m; m = m->next)
1326 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1327 && !m->partial)
1328 {
1329 register struct movable *m1;
1330 int regno = m->regno;
1331
1332 memset (matched_regs, 0, regs->num);
1333 matched_regs[regno] = 1;
1334
1335 /* We want later insns to match the first one. Don't make the first
1336 one match any later ones. So start this loop at m->next. */
1337 for (m1 = m->next; m1; m1 = m1->next)
1338 if (m != m1 && m1->match == 0
1339 && regs->array[m1->regno].n_times_set == 1
1340 /* A reg used outside the loop mustn't be eliminated. */
1341 && !m1->global
1342 /* A reg used for zero-extending mustn't be eliminated. */
1343 && !m1->partial
1344 && (matched_regs[m1->regno]
1345 ||
1346 (
1347 /* Can combine regs with different modes loaded from the
1348 same constant only if the modes are the same or
1349 if both are integer modes with M wider or the same
1350 width as M1. The check for integer is redundant, but
1351 safe, since the only case of differing destination
1352 modes with equal sources is when both sources are
1353 VOIDmode, i.e., CONST_INT. */
1354 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1355 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1356 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1357 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1358 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1359 /* See if the source of M1 says it matches M. */
1360 && ((GET_CODE (m1->set_src) == REG
1361 && matched_regs[REGNO (m1->set_src)])
1362 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1363 movables, regs))))
1364 && ((m->dependencies == m1->dependencies)
1365 || rtx_equal_p (m->dependencies, m1->dependencies)))
1366 {
1367 m->lifetime += m1->lifetime;
1368 m->savings += m1->savings;
1369 m1->done = 1;
1370 m1->match = m;
1371 matched_regs[m1->regno] = 1;
1372 }
1373 }
1374
1375 /* Now combine the regs used for zero-extension.
1376 This can be done for those not marked `global'
1377 provided their lives don't overlap. */
1378
1379 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1380 mode = GET_MODE_WIDER_MODE (mode))
1381 {
1382 register struct movable *m0 = 0;
1383
1384 /* Combine all the registers for extension from mode MODE.
1385 Don't combine any that are used outside this loop. */
1386 for (m = movables->head; m; m = m->next)
1387 if (m->partial && ! m->global
1388 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1389 {
1390 register struct movable *m1;
1391 int first = REGNO_FIRST_LUID (m->regno);
1392 int last = REGNO_LAST_LUID (m->regno);
1393
1394 if (m0 == 0)
1395 {
1396 /* First one: don't check for overlap, just record it. */
1397 m0 = m;
1398 continue;
1399 }
1400
1401 /* Make sure they extend to the same mode.
1402 (Almost always true.) */
1403 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1404 continue;
1405
1406 /* We already have one: check for overlap with those
1407 already combined together. */
1408 for (m1 = movables->head; m1 != m; m1 = m1->next)
1409 if (m1 == m0 || (m1->partial && m1->match == m0))
1410 if (! (REGNO_FIRST_LUID (m1->regno) > last
1411 || REGNO_LAST_LUID (m1->regno) < first))
1412 goto overlap;
1413
1414 /* No overlap: we can combine this with the others. */
1415 m0->lifetime += m->lifetime;
1416 m0->savings += m->savings;
1417 m->done = 1;
1418 m->match = m0;
1419
1420 overlap:
1421 ;
1422 }
1423 }
1424
1425 /* Clean up. */
1426 free (matched_regs);
1427 }
1428
1429 /* Returns the number of movable instructions in LOOP that were not
1430 moved outside the loop. */
1431
1432 static int
1433 num_unmoved_movables (loop)
1434 const struct loop *loop;
1435 {
1436 int num = 0;
1437 struct movable *m;
1438
1439 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1440 if (!m->done)
1441 ++num;
1442
1443 return num;
1444 }
1445
1446 \f
1447 /* Return 1 if regs X and Y will become the same if moved. */
1448
1449 static int
1450 regs_match_p (x, y, movables)
1451 rtx x, y;
1452 struct loop_movables *movables;
1453 {
1454 unsigned int xn = REGNO (x);
1455 unsigned int yn = REGNO (y);
1456 struct movable *mx, *my;
1457
1458 for (mx = movables->head; mx; mx = mx->next)
1459 if (mx->regno == xn)
1460 break;
1461
1462 for (my = movables->head; my; my = my->next)
1463 if (my->regno == yn)
1464 break;
1465
1466 return (mx && my
1467 && ((mx->match == my->match && mx->match != 0)
1468 || mx->match == my
1469 || mx == my->match));
1470 }
1471
1472 /* Return 1 if X and Y are identical-looking rtx's.
1473 This is the Lisp function EQUAL for rtx arguments.
1474
1475 If two registers are matching movables or a movable register and an
1476 equivalent constant, consider them equal. */
1477
1478 static int
1479 rtx_equal_for_loop_p (x, y, movables, regs)
1480 rtx x, y;
1481 struct loop_movables *movables;
1482 struct loop_regs *regs;
1483 {
1484 register int i;
1485 register int j;
1486 register struct movable *m;
1487 register enum rtx_code code;
1488 register const char *fmt;
1489
1490 if (x == y)
1491 return 1;
1492 if (x == 0 || y == 0)
1493 return 0;
1494
1495 code = GET_CODE (x);
1496
1497 /* If we have a register and a constant, they may sometimes be
1498 equal. */
1499 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1500 && CONSTANT_P (y))
1501 {
1502 for (m = movables->head; m; m = m->next)
1503 if (m->move_insn && m->regno == REGNO (x)
1504 && rtx_equal_p (m->set_src, y))
1505 return 1;
1506 }
1507 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1508 && CONSTANT_P (x))
1509 {
1510 for (m = movables->head; m; m = m->next)
1511 if (m->move_insn && m->regno == REGNO (y)
1512 && rtx_equal_p (m->set_src, x))
1513 return 1;
1514 }
1515
1516 /* Otherwise, rtx's of different codes cannot be equal. */
1517 if (code != GET_CODE (y))
1518 return 0;
1519
1520 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1521 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1522
1523 if (GET_MODE (x) != GET_MODE (y))
1524 return 0;
1525
1526 /* These three types of rtx's can be compared nonrecursively. */
1527 if (code == REG)
1528 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1529
1530 if (code == LABEL_REF)
1531 return XEXP (x, 0) == XEXP (y, 0);
1532 if (code == SYMBOL_REF)
1533 return XSTR (x, 0) == XSTR (y, 0);
1534
1535 /* Compare the elements. If any pair of corresponding elements
1536 fail to match, return 0 for the whole things. */
1537
1538 fmt = GET_RTX_FORMAT (code);
1539 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1540 {
1541 switch (fmt[i])
1542 {
1543 case 'w':
1544 if (XWINT (x, i) != XWINT (y, i))
1545 return 0;
1546 break;
1547
1548 case 'i':
1549 if (XINT (x, i) != XINT (y, i))
1550 return 0;
1551 break;
1552
1553 case 'E':
1554 /* Two vectors must have the same length. */
1555 if (XVECLEN (x, i) != XVECLEN (y, i))
1556 return 0;
1557
1558 /* And the corresponding elements must match. */
1559 for (j = 0; j < XVECLEN (x, i); j++)
1560 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1561 movables, regs) == 0)
1562 return 0;
1563 break;
1564
1565 case 'e':
1566 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1567 == 0)
1568 return 0;
1569 break;
1570
1571 case 's':
1572 if (strcmp (XSTR (x, i), XSTR (y, i)))
1573 return 0;
1574 break;
1575
1576 case 'u':
1577 /* These are just backpointers, so they don't matter. */
1578 break;
1579
1580 case '0':
1581 break;
1582
1583 /* It is believed that rtx's at this level will never
1584 contain anything but integers and other rtx's,
1585 except for within LABEL_REFs and SYMBOL_REFs. */
1586 default:
1587 abort ();
1588 }
1589 }
1590 return 1;
1591 }
1592 \f
1593 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1594 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1595 references is incremented once for each added note. */
1596
1597 static void
1598 add_label_notes (x, insns)
1599 rtx x;
1600 rtx insns;
1601 {
1602 enum rtx_code code = GET_CODE (x);
1603 int i, j;
1604 const char *fmt;
1605 rtx insn;
1606
1607 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1608 {
1609 /* This code used to ignore labels that referred to dispatch tables to
1610 avoid flow generating (slighly) worse code.
1611
1612 We no longer ignore such label references (see LABEL_REF handling in
1613 mark_jump_label for additional information). */
1614 for (insn = insns; insn; insn = NEXT_INSN (insn))
1615 if (reg_mentioned_p (XEXP (x, 0), insn))
1616 {
1617 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1618 REG_NOTES (insn));
1619 if (LABEL_P (XEXP (x, 0)))
1620 LABEL_NUSES (XEXP (x, 0))++;
1621 }
1622 }
1623
1624 fmt = GET_RTX_FORMAT (code);
1625 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1626 {
1627 if (fmt[i] == 'e')
1628 add_label_notes (XEXP (x, i), insns);
1629 else if (fmt[i] == 'E')
1630 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1631 add_label_notes (XVECEXP (x, i, j), insns);
1632 }
1633 }
1634 \f
1635 /* Scan MOVABLES, and move the insns that deserve to be moved.
1636 If two matching movables are combined, replace one reg with the
1637 other throughout. */
1638
1639 static void
1640 move_movables (loop, movables, threshold, insn_count)
1641 struct loop *loop;
1642 struct loop_movables *movables;
1643 int threshold;
1644 int insn_count;
1645 {
1646 struct loop_regs *regs = LOOP_REGS (loop);
1647 int nregs = regs->num;
1648 rtx new_start = 0;
1649 register struct movable *m;
1650 register rtx p;
1651 rtx loop_start = loop->start;
1652 rtx loop_end = loop->end;
1653 /* Map of pseudo-register replacements to handle combining
1654 when we move several insns that load the same value
1655 into different pseudo-registers. */
1656 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1657 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1658
1659 for (m = movables->head; m; m = m->next)
1660 {
1661 /* Describe this movable insn. */
1662
1663 if (loop_dump_stream)
1664 {
1665 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1666 INSN_UID (m->insn), m->regno, m->lifetime);
1667 if (m->consec > 0)
1668 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1669 if (m->cond)
1670 fprintf (loop_dump_stream, "cond ");
1671 if (m->force)
1672 fprintf (loop_dump_stream, "force ");
1673 if (m->global)
1674 fprintf (loop_dump_stream, "global ");
1675 if (m->done)
1676 fprintf (loop_dump_stream, "done ");
1677 if (m->move_insn)
1678 fprintf (loop_dump_stream, "move-insn ");
1679 if (m->match)
1680 fprintf (loop_dump_stream, "matches %d ",
1681 INSN_UID (m->match->insn));
1682 if (m->forces)
1683 fprintf (loop_dump_stream, "forces %d ",
1684 INSN_UID (m->forces->insn));
1685 }
1686
1687 /* Ignore the insn if it's already done (it matched something else).
1688 Otherwise, see if it is now safe to move. */
1689
1690 if (!m->done
1691 && (! m->cond
1692 || (1 == loop_invariant_p (loop, m->set_src)
1693 && (m->dependencies == 0
1694 || 1 == loop_invariant_p (loop, m->dependencies))
1695 && (m->consec == 0
1696 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1697 m->consec + 1,
1698 m->insn))))
1699 && (! m->forces || m->forces->done))
1700 {
1701 register int regno;
1702 register rtx p;
1703 int savings = m->savings;
1704
1705 /* We have an insn that is safe to move.
1706 Compute its desirability. */
1707
1708 p = m->insn;
1709 regno = m->regno;
1710
1711 if (loop_dump_stream)
1712 fprintf (loop_dump_stream, "savings %d ", savings);
1713
1714 if (regs->array[regno].moved_once && loop_dump_stream)
1715 fprintf (loop_dump_stream, "halved since already moved ");
1716
1717 /* An insn MUST be moved if we already moved something else
1718 which is safe only if this one is moved too: that is,
1719 if already_moved[REGNO] is nonzero. */
1720
1721 /* An insn is desirable to move if the new lifetime of the
1722 register is no more than THRESHOLD times the old lifetime.
1723 If it's not desirable, it means the loop is so big
1724 that moving won't speed things up much,
1725 and it is liable to make register usage worse. */
1726
1727 /* It is also desirable to move if it can be moved at no
1728 extra cost because something else was already moved. */
1729
1730 if (already_moved[regno]
1731 || flag_move_all_movables
1732 || (threshold * savings * m->lifetime) >=
1733 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1734 || (m->forces && m->forces->done
1735 && regs->array[m->forces->regno].n_times_set == 1))
1736 {
1737 int count;
1738 register struct movable *m1;
1739 rtx first = NULL_RTX;
1740
1741 /* Now move the insns that set the reg. */
1742
1743 if (m->partial && m->match)
1744 {
1745 rtx newpat, i1;
1746 rtx r1, r2;
1747 /* Find the end of this chain of matching regs.
1748 Thus, we load each reg in the chain from that one reg.
1749 And that reg is loaded with 0 directly,
1750 since it has ->match == 0. */
1751 for (m1 = m; m1->match; m1 = m1->match);
1752 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1753 SET_DEST (PATTERN (m1->insn)));
1754 i1 = loop_insn_hoist (loop, newpat);
1755
1756 /* Mark the moved, invariant reg as being allowed to
1757 share a hard reg with the other matching invariant. */
1758 REG_NOTES (i1) = REG_NOTES (m->insn);
1759 r1 = SET_DEST (PATTERN (m->insn));
1760 r2 = SET_DEST (PATTERN (m1->insn));
1761 regs_may_share
1762 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1763 gen_rtx_EXPR_LIST (VOIDmode, r2,
1764 regs_may_share));
1765 delete_insn (m->insn);
1766
1767 if (new_start == 0)
1768 new_start = i1;
1769
1770 if (loop_dump_stream)
1771 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1772 }
1773 /* If we are to re-generate the item being moved with a
1774 new move insn, first delete what we have and then emit
1775 the move insn before the loop. */
1776 else if (m->move_insn)
1777 {
1778 rtx i1, temp, seq;
1779
1780 for (count = m->consec; count >= 0; count--)
1781 {
1782 /* If this is the first insn of a library call sequence,
1783 skip to the end. */
1784 if (GET_CODE (p) != NOTE
1785 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1786 p = XEXP (temp, 0);
1787
1788 /* If this is the last insn of a libcall sequence, then
1789 delete every insn in the sequence except the last.
1790 The last insn is handled in the normal manner. */
1791 if (GET_CODE (p) != NOTE
1792 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1793 {
1794 temp = XEXP (temp, 0);
1795 while (temp != p)
1796 temp = delete_insn (temp);
1797 }
1798
1799 temp = p;
1800 p = delete_insn (p);
1801
1802 /* simplify_giv_expr expects that it can walk the insns
1803 at m->insn forwards and see this old sequence we are
1804 tossing here. delete_insn does preserve the next
1805 pointers, but when we skip over a NOTE we must fix
1806 it up. Otherwise that code walks into the non-deleted
1807 insn stream. */
1808 while (p && GET_CODE (p) == NOTE)
1809 p = NEXT_INSN (temp) = NEXT_INSN (p);
1810 }
1811
1812 start_sequence ();
1813 emit_move_insn (m->set_dest, m->set_src);
1814 temp = get_insns ();
1815 seq = gen_sequence ();
1816 end_sequence ();
1817
1818 add_label_notes (m->set_src, temp);
1819
1820 i1 = loop_insn_hoist (loop, seq);
1821 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1822 REG_NOTES (i1)
1823 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1824 m->set_src, REG_NOTES (i1));
1825
1826 if (loop_dump_stream)
1827 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1828
1829 /* The more regs we move, the less we like moving them. */
1830 threshold -= 3;
1831 }
1832 else
1833 {
1834 for (count = m->consec; count >= 0; count--)
1835 {
1836 rtx i1, temp;
1837
1838 /* If first insn of libcall sequence, skip to end. */
1839 /* Do this at start of loop, since p is guaranteed to
1840 be an insn here. */
1841 if (GET_CODE (p) != NOTE
1842 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1843 p = XEXP (temp, 0);
1844
1845 /* If last insn of libcall sequence, move all
1846 insns except the last before the loop. The last
1847 insn is handled in the normal manner. */
1848 if (GET_CODE (p) != NOTE
1849 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1850 {
1851 rtx fn_address = 0;
1852 rtx fn_reg = 0;
1853 rtx fn_address_insn = 0;
1854
1855 first = 0;
1856 for (temp = XEXP (temp, 0); temp != p;
1857 temp = NEXT_INSN (temp))
1858 {
1859 rtx body;
1860 rtx n;
1861 rtx next;
1862
1863 if (GET_CODE (temp) == NOTE)
1864 continue;
1865
1866 body = PATTERN (temp);
1867
1868 /* Find the next insn after TEMP,
1869 not counting USE or NOTE insns. */
1870 for (next = NEXT_INSN (temp); next != p;
1871 next = NEXT_INSN (next))
1872 if (! (GET_CODE (next) == INSN
1873 && GET_CODE (PATTERN (next)) == USE)
1874 && GET_CODE (next) != NOTE)
1875 break;
1876
1877 /* If that is the call, this may be the insn
1878 that loads the function address.
1879
1880 Extract the function address from the insn
1881 that loads it into a register.
1882 If this insn was cse'd, we get incorrect code.
1883
1884 So emit a new move insn that copies the
1885 function address into the register that the
1886 call insn will use. flow.c will delete any
1887 redundant stores that we have created. */
1888 if (GET_CODE (next) == CALL_INSN
1889 && GET_CODE (body) == SET
1890 && GET_CODE (SET_DEST (body)) == REG
1891 && (n = find_reg_note (temp, REG_EQUAL,
1892 NULL_RTX)))
1893 {
1894 fn_reg = SET_SRC (body);
1895 if (GET_CODE (fn_reg) != REG)
1896 fn_reg = SET_DEST (body);
1897 fn_address = XEXP (n, 0);
1898 fn_address_insn = temp;
1899 }
1900 /* We have the call insn.
1901 If it uses the register we suspect it might,
1902 load it with the correct address directly. */
1903 if (GET_CODE (temp) == CALL_INSN
1904 && fn_address != 0
1905 && reg_referenced_p (fn_reg, body))
1906 loop_insn_emit_after (loop, 0, fn_address_insn,
1907 gen_move_insn
1908 (fn_reg, fn_address));
1909
1910 if (GET_CODE (temp) == CALL_INSN)
1911 {
1912 i1 = loop_call_insn_hoist (loop, body);
1913 /* Because the USAGE information potentially
1914 contains objects other than hard registers
1915 we need to copy it. */
1916 if (CALL_INSN_FUNCTION_USAGE (temp))
1917 CALL_INSN_FUNCTION_USAGE (i1)
1918 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1919 }
1920 else
1921 i1 = loop_insn_hoist (loop, body);
1922 if (first == 0)
1923 first = i1;
1924 if (temp == fn_address_insn)
1925 fn_address_insn = i1;
1926 REG_NOTES (i1) = REG_NOTES (temp);
1927 delete_insn (temp);
1928 }
1929 if (new_start == 0)
1930 new_start = first;
1931 }
1932 if (m->savemode != VOIDmode)
1933 {
1934 /* P sets REG to zero; but we should clear only
1935 the bits that are not covered by the mode
1936 m->savemode. */
1937 rtx reg = m->set_dest;
1938 rtx sequence;
1939 rtx tem;
1940
1941 start_sequence ();
1942 tem = expand_binop
1943 (GET_MODE (reg), and_optab, reg,
1944 GEN_INT ((((HOST_WIDE_INT) 1
1945 << GET_MODE_BITSIZE (m->savemode)))
1946 - 1),
1947 reg, 1, OPTAB_LIB_WIDEN);
1948 if (tem == 0)
1949 abort ();
1950 if (tem != reg)
1951 emit_move_insn (reg, tem);
1952 sequence = gen_sequence ();
1953 end_sequence ();
1954 i1 = loop_insn_hoist (loop, sequence);
1955 }
1956 else if (GET_CODE (p) == CALL_INSN)
1957 {
1958 i1 = loop_call_insn_hoist (loop, PATTERN (p));
1959 /* Because the USAGE information potentially
1960 contains objects other than hard registers
1961 we need to copy it. */
1962 if (CALL_INSN_FUNCTION_USAGE (p))
1963 CALL_INSN_FUNCTION_USAGE (i1)
1964 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1965 }
1966 else if (count == m->consec && m->move_insn_first)
1967 {
1968 rtx seq;
1969 /* The SET_SRC might not be invariant, so we must
1970 use the REG_EQUAL note. */
1971 start_sequence ();
1972 emit_move_insn (m->set_dest, m->set_src);
1973 temp = get_insns ();
1974 seq = gen_sequence ();
1975 end_sequence ();
1976
1977 add_label_notes (m->set_src, temp);
1978
1979 i1 = loop_insn_hoist (loop, seq);
1980 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1981 REG_NOTES (i1)
1982 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1983 : REG_EQUAL),
1984 m->set_src, REG_NOTES (i1));
1985 }
1986 else
1987 i1 = loop_insn_hoist (loop, PATTERN (p));
1988
1989 if (REG_NOTES (i1) == 0)
1990 {
1991 REG_NOTES (i1) = REG_NOTES (p);
1992
1993 /* If there is a REG_EQUAL note present whose value
1994 is not loop invariant, then delete it, since it
1995 may cause problems with later optimization passes.
1996 It is possible for cse to create such notes
1997 like this as a result of record_jump_cond. */
1998
1999 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2000 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2001 remove_note (i1, temp);
2002 }
2003
2004 if (new_start == 0)
2005 new_start = i1;
2006
2007 if (loop_dump_stream)
2008 fprintf (loop_dump_stream, " moved to %d",
2009 INSN_UID (i1));
2010
2011 /* If library call, now fix the REG_NOTES that contain
2012 insn pointers, namely REG_LIBCALL on FIRST
2013 and REG_RETVAL on I1. */
2014 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2015 {
2016 XEXP (temp, 0) = first;
2017 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2018 XEXP (temp, 0) = i1;
2019 }
2020
2021 temp = p;
2022 delete_insn (p);
2023 p = NEXT_INSN (p);
2024
2025 /* simplify_giv_expr expects that it can walk the insns
2026 at m->insn forwards and see this old sequence we are
2027 tossing here. delete_insn does preserve the next
2028 pointers, but when we skip over a NOTE we must fix
2029 it up. Otherwise that code walks into the non-deleted
2030 insn stream. */
2031 while (p && GET_CODE (p) == NOTE)
2032 p = NEXT_INSN (temp) = NEXT_INSN (p);
2033 }
2034
2035 /* The more regs we move, the less we like moving them. */
2036 threshold -= 3;
2037 }
2038
2039 /* Any other movable that loads the same register
2040 MUST be moved. */
2041 already_moved[regno] = 1;
2042
2043 /* This reg has been moved out of one loop. */
2044 regs->array[regno].moved_once = 1;
2045
2046 /* The reg set here is now invariant. */
2047 if (! m->partial)
2048 regs->array[regno].set_in_loop = 0;
2049
2050 m->done = 1;
2051
2052 /* Change the length-of-life info for the register
2053 to say it lives at least the full length of this loop.
2054 This will help guide optimizations in outer loops. */
2055
2056 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2057 /* This is the old insn before all the moved insns.
2058 We can't use the moved insn because it is out of range
2059 in uid_luid. Only the old insns have luids. */
2060 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2061 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2062 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2063
2064 /* Combine with this moved insn any other matching movables. */
2065
2066 if (! m->partial)
2067 for (m1 = movables->head; m1; m1 = m1->next)
2068 if (m1->match == m)
2069 {
2070 rtx temp;
2071
2072 /* Schedule the reg loaded by M1
2073 for replacement so that shares the reg of M.
2074 If the modes differ (only possible in restricted
2075 circumstances, make a SUBREG.
2076
2077 Note this assumes that the target dependent files
2078 treat REG and SUBREG equally, including within
2079 GO_IF_LEGITIMATE_ADDRESS and in all the
2080 predicates since we never verify that replacing the
2081 original register with a SUBREG results in a
2082 recognizable insn. */
2083 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2084 reg_map[m1->regno] = m->set_dest;
2085 else
2086 reg_map[m1->regno]
2087 = gen_lowpart_common (GET_MODE (m1->set_dest),
2088 m->set_dest);
2089
2090 /* Get rid of the matching insn
2091 and prevent further processing of it. */
2092 m1->done = 1;
2093
2094 /* if library call, delete all insn except last, which
2095 is deleted below */
2096 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2097 NULL_RTX)))
2098 {
2099 for (temp = XEXP (temp, 0); temp != m1->insn;
2100 temp = NEXT_INSN (temp))
2101 delete_insn (temp);
2102 }
2103 delete_insn (m1->insn);
2104
2105 /* Any other movable that loads the same register
2106 MUST be moved. */
2107 already_moved[m1->regno] = 1;
2108
2109 /* The reg merged here is now invariant,
2110 if the reg it matches is invariant. */
2111 if (! m->partial)
2112 regs->array[m1->regno].set_in_loop = 0;
2113 }
2114 }
2115 else if (loop_dump_stream)
2116 fprintf (loop_dump_stream, "not desirable");
2117 }
2118 else if (loop_dump_stream && !m->match)
2119 fprintf (loop_dump_stream, "not safe");
2120
2121 if (loop_dump_stream)
2122 fprintf (loop_dump_stream, "\n");
2123 }
2124
2125 if (new_start == 0)
2126 new_start = loop_start;
2127
2128 /* Go through all the instructions in the loop, making
2129 all the register substitutions scheduled in REG_MAP. */
2130 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2131 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2132 || GET_CODE (p) == CALL_INSN)
2133 {
2134 replace_regs (PATTERN (p), reg_map, nregs, 0);
2135 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2136 INSN_CODE (p) = -1;
2137 }
2138
2139 /* Clean up. */
2140 free (reg_map);
2141 free (already_moved);
2142 }
2143
2144
2145 static void
2146 loop_movables_add (movables, m)
2147 struct loop_movables *movables;
2148 struct movable *m;
2149 {
2150 if (movables->head == 0)
2151 movables->head = m;
2152 else
2153 movables->last->next = m;
2154 movables->last = m;
2155 }
2156
2157
2158 static void
2159 loop_movables_free (movables)
2160 struct loop_movables *movables;
2161 {
2162 struct movable *m;
2163 struct movable *m_next;
2164
2165 for (m = movables->head; m; m = m_next)
2166 {
2167 m_next = m->next;
2168 free (m);
2169 }
2170 }
2171 \f
2172 #if 0
2173 /* Scan X and replace the address of any MEM in it with ADDR.
2174 REG is the address that MEM should have before the replacement. */
2175
2176 static void
2177 replace_call_address (x, reg, addr)
2178 rtx x, reg, addr;
2179 {
2180 register enum rtx_code code;
2181 register int i;
2182 register const char *fmt;
2183
2184 if (x == 0)
2185 return;
2186 code = GET_CODE (x);
2187 switch (code)
2188 {
2189 case PC:
2190 case CC0:
2191 case CONST_INT:
2192 case CONST_DOUBLE:
2193 case CONST:
2194 case SYMBOL_REF:
2195 case LABEL_REF:
2196 case REG:
2197 return;
2198
2199 case SET:
2200 /* Short cut for very common case. */
2201 replace_call_address (XEXP (x, 1), reg, addr);
2202 return;
2203
2204 case CALL:
2205 /* Short cut for very common case. */
2206 replace_call_address (XEXP (x, 0), reg, addr);
2207 return;
2208
2209 case MEM:
2210 /* If this MEM uses a reg other than the one we expected,
2211 something is wrong. */
2212 if (XEXP (x, 0) != reg)
2213 abort ();
2214 XEXP (x, 0) = addr;
2215 return;
2216
2217 default:
2218 break;
2219 }
2220
2221 fmt = GET_RTX_FORMAT (code);
2222 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2223 {
2224 if (fmt[i] == 'e')
2225 replace_call_address (XEXP (x, i), reg, addr);
2226 else if (fmt[i] == 'E')
2227 {
2228 register int j;
2229 for (j = 0; j < XVECLEN (x, i); j++)
2230 replace_call_address (XVECEXP (x, i, j), reg, addr);
2231 }
2232 }
2233 }
2234 #endif
2235 \f
2236 /* Return the number of memory refs to addresses that vary
2237 in the rtx X. */
2238
2239 static int
2240 count_nonfixed_reads (loop, x)
2241 const struct loop *loop;
2242 rtx x;
2243 {
2244 register enum rtx_code code;
2245 register int i;
2246 register const char *fmt;
2247 int value;
2248
2249 if (x == 0)
2250 return 0;
2251
2252 code = GET_CODE (x);
2253 switch (code)
2254 {
2255 case PC:
2256 case CC0:
2257 case CONST_INT:
2258 case CONST_DOUBLE:
2259 case CONST:
2260 case SYMBOL_REF:
2261 case LABEL_REF:
2262 case REG:
2263 return 0;
2264
2265 case MEM:
2266 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2267 + count_nonfixed_reads (loop, XEXP (x, 0)));
2268
2269 default:
2270 break;
2271 }
2272
2273 value = 0;
2274 fmt = GET_RTX_FORMAT (code);
2275 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2276 {
2277 if (fmt[i] == 'e')
2278 value += count_nonfixed_reads (loop, XEXP (x, i));
2279 if (fmt[i] == 'E')
2280 {
2281 register int j;
2282 for (j = 0; j < XVECLEN (x, i); j++)
2283 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2284 }
2285 }
2286 return value;
2287 }
2288 \f
2289 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2290 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2291 `unknown_address_altered', `unknown_constant_address_altered', and
2292 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2293 list `store_mems' in LOOP. */
2294
2295 static void
2296 prescan_loop (loop)
2297 struct loop *loop;
2298 {
2299 register int level = 1;
2300 rtx insn;
2301 struct loop_info *loop_info = LOOP_INFO (loop);
2302 rtx start = loop->start;
2303 rtx end = loop->end;
2304 /* The label after END. Jumping here is just like falling off the
2305 end of the loop. We use next_nonnote_insn instead of next_label
2306 as a hedge against the (pathological) case where some actual insn
2307 might end up between the two. */
2308 rtx exit_target = next_nonnote_insn (end);
2309
2310 loop_info->has_indirect_jump = indirect_jump_in_function;
2311 loop_info->pre_header_has_call = 0;
2312 loop_info->has_call = 0;
2313 loop_info->has_nonconst_call = 0;
2314 loop_info->has_volatile = 0;
2315 loop_info->has_tablejump = 0;
2316 loop_info->has_multiple_exit_targets = 0;
2317 loop->level = 1;
2318
2319 loop_info->unknown_address_altered = 0;
2320 loop_info->unknown_constant_address_altered = 0;
2321 loop_info->store_mems = NULL_RTX;
2322 loop_info->first_loop_store_insn = NULL_RTX;
2323 loop_info->mems_idx = 0;
2324 loop_info->num_mem_sets = 0;
2325
2326
2327 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2328 insn = PREV_INSN (insn))
2329 {
2330 if (GET_CODE (insn) == CALL_INSN)
2331 {
2332 loop_info->pre_header_has_call = 1;
2333 break;
2334 }
2335 }
2336
2337 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2338 insn = NEXT_INSN (insn))
2339 {
2340 if (GET_CODE (insn) == NOTE)
2341 {
2342 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2343 {
2344 ++level;
2345 /* Count number of loops contained in this one. */
2346 loop->level++;
2347 }
2348 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2349 {
2350 --level;
2351 }
2352 }
2353 else if (GET_CODE (insn) == CALL_INSN)
2354 {
2355 if (! CONST_CALL_P (insn))
2356 {
2357 loop_info->unknown_address_altered = 1;
2358 loop_info->has_nonconst_call = 1;
2359 }
2360 loop_info->has_call = 1;
2361 }
2362 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2363 {
2364 rtx label1 = NULL_RTX;
2365 rtx label2 = NULL_RTX;
2366
2367 if (volatile_refs_p (PATTERN (insn)))
2368 loop_info->has_volatile = 1;
2369
2370 if (GET_CODE (insn) == JUMP_INSN
2371 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2372 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2373 loop_info->has_tablejump = 1;
2374
2375 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2376 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2377 loop_info->first_loop_store_insn = insn;
2378
2379 if (! loop_info->has_multiple_exit_targets
2380 && GET_CODE (insn) == JUMP_INSN
2381 && GET_CODE (PATTERN (insn)) == SET
2382 && SET_DEST (PATTERN (insn)) == pc_rtx)
2383 {
2384 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2385 {
2386 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2387 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2388 }
2389 else
2390 {
2391 label1 = SET_SRC (PATTERN (insn));
2392 }
2393
2394 do
2395 {
2396 if (label1 && label1 != pc_rtx)
2397 {
2398 if (GET_CODE (label1) != LABEL_REF)
2399 {
2400 /* Something tricky. */
2401 loop_info->has_multiple_exit_targets = 1;
2402 break;
2403 }
2404 else if (XEXP (label1, 0) != exit_target
2405 && LABEL_OUTSIDE_LOOP_P (label1))
2406 {
2407 /* A jump outside the current loop. */
2408 loop_info->has_multiple_exit_targets = 1;
2409 break;
2410 }
2411 }
2412
2413 label1 = label2;
2414 label2 = NULL_RTX;
2415 }
2416 while (label1);
2417 }
2418 }
2419 else if (GET_CODE (insn) == RETURN)
2420 loop_info->has_multiple_exit_targets = 1;
2421 }
2422
2423 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2424 if (/* An exception thrown by a called function might land us
2425 anywhere. */
2426 ! loop_info->has_nonconst_call
2427 /* We don't want loads for MEMs moved to a location before the
2428 one at which their stack memory becomes allocated. (Note
2429 that this is not a problem for malloc, etc., since those
2430 require actual function calls. */
2431 && ! current_function_calls_alloca
2432 /* There are ways to leave the loop other than falling off the
2433 end. */
2434 && ! loop_info->has_multiple_exit_targets)
2435 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2436 insn = NEXT_INSN (insn))
2437 for_each_rtx (&insn, insert_loop_mem, loop_info);
2438
2439 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2440 that loop_invariant_p and load_mems can use true_dependence
2441 to determine what is really clobbered. */
2442 if (loop_info->unknown_address_altered)
2443 {
2444 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2445
2446 loop_info->store_mems
2447 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2448 }
2449 if (loop_info->unknown_constant_address_altered)
2450 {
2451 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2452
2453 RTX_UNCHANGING_P (mem) = 1;
2454 loop_info->store_mems
2455 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2456 }
2457 }
2458 \f
2459 /* Scan the function looking for loops. Record the start and end of each loop.
2460 Also mark as invalid loops any loops that contain a setjmp or are branched
2461 to from outside the loop. */
2462
2463 static void
2464 find_and_verify_loops (f, loops)
2465 rtx f;
2466 struct loops *loops;
2467 {
2468 rtx insn;
2469 rtx label;
2470 int num_loops;
2471 struct loop *current_loop;
2472 struct loop *next_loop;
2473 struct loop *loop;
2474
2475 num_loops = loops->num;
2476
2477 compute_luids (f, NULL_RTX, 0);
2478
2479 /* If there are jumps to undefined labels,
2480 treat them as jumps out of any/all loops.
2481 This also avoids writing past end of tables when there are no loops. */
2482 uid_loop[0] = NULL;
2483
2484 /* Find boundaries of loops, mark which loops are contained within
2485 loops, and invalidate loops that have setjmp. */
2486
2487 num_loops = 0;
2488 current_loop = NULL;
2489 for (insn = f; insn; insn = NEXT_INSN (insn))
2490 {
2491 if (GET_CODE (insn) == NOTE)
2492 switch (NOTE_LINE_NUMBER (insn))
2493 {
2494 case NOTE_INSN_LOOP_BEG:
2495 next_loop = loops->array + num_loops;
2496 next_loop->num = num_loops;
2497 num_loops++;
2498 next_loop->start = insn;
2499 next_loop->outer = current_loop;
2500 current_loop = next_loop;
2501 break;
2502
2503 case NOTE_INSN_SETJMP:
2504 /* In this case, we must invalidate our current loop and any
2505 enclosing loop. */
2506 for (loop = current_loop; loop; loop = loop->outer)
2507 {
2508 loop->invalid = 1;
2509 if (loop_dump_stream)
2510 fprintf (loop_dump_stream,
2511 "\nLoop at %d ignored due to setjmp.\n",
2512 INSN_UID (loop->start));
2513 }
2514 break;
2515
2516 case NOTE_INSN_LOOP_CONT:
2517 current_loop->cont = insn;
2518 break;
2519
2520 case NOTE_INSN_LOOP_VTOP:
2521 current_loop->vtop = insn;
2522 break;
2523
2524 case NOTE_INSN_LOOP_END:
2525 if (! current_loop)
2526 abort ();
2527
2528 current_loop->end = insn;
2529 current_loop = current_loop->outer;
2530 break;
2531
2532 default:
2533 break;
2534 }
2535
2536 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2537 enclosing loop, but this doesn't matter. */
2538 uid_loop[INSN_UID (insn)] = current_loop;
2539 }
2540
2541 /* Any loop containing a label used in an initializer must be invalidated,
2542 because it can be jumped into from anywhere. */
2543
2544 for (label = forced_labels; label; label = XEXP (label, 1))
2545 {
2546 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2547 loop; loop = loop->outer)
2548 loop->invalid = 1;
2549 }
2550
2551 /* Any loop containing a label used for an exception handler must be
2552 invalidated, because it can be jumped into from anywhere. */
2553
2554 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2555 {
2556 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2557 loop; loop = loop->outer)
2558 loop->invalid = 1;
2559 }
2560
2561 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2562 loop that it is not contained within, that loop is marked invalid.
2563 If any INSN or CALL_INSN uses a label's address, then the loop containing
2564 that label is marked invalid, because it could be jumped into from
2565 anywhere.
2566
2567 Also look for blocks of code ending in an unconditional branch that
2568 exits the loop. If such a block is surrounded by a conditional
2569 branch around the block, move the block elsewhere (see below) and
2570 invert the jump to point to the code block. This may eliminate a
2571 label in our loop and will simplify processing by both us and a
2572 possible second cse pass. */
2573
2574 for (insn = f; insn; insn = NEXT_INSN (insn))
2575 if (INSN_P (insn))
2576 {
2577 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2578
2579 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2580 {
2581 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2582 if (note)
2583 {
2584 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2585 loop; loop = loop->outer)
2586 loop->invalid = 1;
2587 }
2588 }
2589
2590 if (GET_CODE (insn) != JUMP_INSN)
2591 continue;
2592
2593 mark_loop_jump (PATTERN (insn), this_loop);
2594
2595 /* See if this is an unconditional branch outside the loop. */
2596 if (this_loop
2597 && (GET_CODE (PATTERN (insn)) == RETURN
2598 || (any_uncondjump_p (insn)
2599 && onlyjump_p (insn)
2600 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2601 != this_loop)))
2602 && get_max_uid () < max_uid_for_loop)
2603 {
2604 rtx p;
2605 rtx our_next = next_real_insn (insn);
2606 rtx last_insn_to_move = NEXT_INSN (insn);
2607 struct loop *dest_loop;
2608 struct loop *outer_loop = NULL;
2609
2610 /* Go backwards until we reach the start of the loop, a label,
2611 or a JUMP_INSN. */
2612 for (p = PREV_INSN (insn);
2613 GET_CODE (p) != CODE_LABEL
2614 && ! (GET_CODE (p) == NOTE
2615 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2616 && GET_CODE (p) != JUMP_INSN;
2617 p = PREV_INSN (p))
2618 ;
2619
2620 /* Check for the case where we have a jump to an inner nested
2621 loop, and do not perform the optimization in that case. */
2622
2623 if (JUMP_LABEL (insn))
2624 {
2625 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2626 if (dest_loop)
2627 {
2628 for (outer_loop = dest_loop; outer_loop;
2629 outer_loop = outer_loop->outer)
2630 if (outer_loop == this_loop)
2631 break;
2632 }
2633 }
2634
2635 /* Make sure that the target of P is within the current loop. */
2636
2637 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2638 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2639 outer_loop = this_loop;
2640
2641 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2642 we have a block of code to try to move.
2643
2644 We look backward and then forward from the target of INSN
2645 to find a BARRIER at the same loop depth as the target.
2646 If we find such a BARRIER, we make a new label for the start
2647 of the block, invert the jump in P and point it to that label,
2648 and move the block of code to the spot we found. */
2649
2650 if (! outer_loop
2651 && GET_CODE (p) == JUMP_INSN
2652 && JUMP_LABEL (p) != 0
2653 /* Just ignore jumps to labels that were never emitted.
2654 These always indicate compilation errors. */
2655 && INSN_UID (JUMP_LABEL (p)) != 0
2656 && any_condjump_p (p) && onlyjump_p (p)
2657 && next_real_insn (JUMP_LABEL (p)) == our_next
2658 /* If it's not safe to move the sequence, then we
2659 mustn't try. */
2660 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2661 &last_insn_to_move))
2662 {
2663 rtx target
2664 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2665 struct loop *target_loop = uid_loop[INSN_UID (target)];
2666 rtx loc, loc2;
2667
2668 for (loc = target; loc; loc = PREV_INSN (loc))
2669 if (GET_CODE (loc) == BARRIER
2670 /* Don't move things inside a tablejump. */
2671 && ((loc2 = next_nonnote_insn (loc)) == 0
2672 || GET_CODE (loc2) != CODE_LABEL
2673 || (loc2 = next_nonnote_insn (loc2)) == 0
2674 || GET_CODE (loc2) != JUMP_INSN
2675 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2676 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2677 && uid_loop[INSN_UID (loc)] == target_loop)
2678 break;
2679
2680 if (loc == 0)
2681 for (loc = target; loc; loc = NEXT_INSN (loc))
2682 if (GET_CODE (loc) == BARRIER
2683 /* Don't move things inside a tablejump. */
2684 && ((loc2 = next_nonnote_insn (loc)) == 0
2685 || GET_CODE (loc2) != CODE_LABEL
2686 || (loc2 = next_nonnote_insn (loc2)) == 0
2687 || GET_CODE (loc2) != JUMP_INSN
2688 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2689 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2690 && uid_loop[INSN_UID (loc)] == target_loop)
2691 break;
2692
2693 if (loc)
2694 {
2695 rtx cond_label = JUMP_LABEL (p);
2696 rtx new_label = get_label_after (p);
2697
2698 /* Ensure our label doesn't go away. */
2699 LABEL_NUSES (cond_label)++;
2700
2701 /* Verify that uid_loop is large enough and that
2702 we can invert P. */
2703 if (invert_jump (p, new_label, 1))
2704 {
2705 rtx q, r;
2706
2707 /* If no suitable BARRIER was found, create a suitable
2708 one before TARGET. Since TARGET is a fall through
2709 path, we'll need to insert an jump around our block
2710 and a add a BARRIER before TARGET.
2711
2712 This creates an extra unconditional jump outside
2713 the loop. However, the benefits of removing rarely
2714 executed instructions from inside the loop usually
2715 outweighs the cost of the extra unconditional jump
2716 outside the loop. */
2717 if (loc == 0)
2718 {
2719 rtx temp;
2720
2721 temp = gen_jump (JUMP_LABEL (insn));
2722 temp = emit_jump_insn_before (temp, target);
2723 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2724 LABEL_NUSES (JUMP_LABEL (insn))++;
2725 loc = emit_barrier_before (target);
2726 }
2727
2728 /* Include the BARRIER after INSN and copy the
2729 block after LOC. */
2730 new_label = squeeze_notes (new_label,
2731 last_insn_to_move);
2732 reorder_insns (new_label, last_insn_to_move, loc);
2733
2734 /* All those insns are now in TARGET_LOOP. */
2735 for (q = new_label;
2736 q != NEXT_INSN (last_insn_to_move);
2737 q = NEXT_INSN (q))
2738 uid_loop[INSN_UID (q)] = target_loop;
2739
2740 /* The label jumped to by INSN is no longer a loop
2741 exit. Unless INSN does not have a label (e.g.,
2742 it is a RETURN insn), search loop->exit_labels
2743 to find its label_ref, and remove it. Also turn
2744 off LABEL_OUTSIDE_LOOP_P bit. */
2745 if (JUMP_LABEL (insn))
2746 {
2747 for (q = 0, r = this_loop->exit_labels;
2748 r;
2749 q = r, r = LABEL_NEXTREF (r))
2750 if (XEXP (r, 0) == JUMP_LABEL (insn))
2751 {
2752 LABEL_OUTSIDE_LOOP_P (r) = 0;
2753 if (q)
2754 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2755 else
2756 this_loop->exit_labels = LABEL_NEXTREF (r);
2757 break;
2758 }
2759
2760 for (loop = this_loop; loop && loop != target_loop;
2761 loop = loop->outer)
2762 loop->exit_count--;
2763
2764 /* If we didn't find it, then something is
2765 wrong. */
2766 if (! r)
2767 abort ();
2768 }
2769
2770 /* P is now a jump outside the loop, so it must be put
2771 in loop->exit_labels, and marked as such.
2772 The easiest way to do this is to just call
2773 mark_loop_jump again for P. */
2774 mark_loop_jump (PATTERN (p), this_loop);
2775
2776 /* If INSN now jumps to the insn after it,
2777 delete INSN. */
2778 if (JUMP_LABEL (insn) != 0
2779 && (next_real_insn (JUMP_LABEL (insn))
2780 == next_real_insn (insn)))
2781 delete_insn (insn);
2782 }
2783
2784 /* Continue the loop after where the conditional
2785 branch used to jump, since the only branch insn
2786 in the block (if it still remains) is an inter-loop
2787 branch and hence needs no processing. */
2788 insn = NEXT_INSN (cond_label);
2789
2790 if (--LABEL_NUSES (cond_label) == 0)
2791 delete_insn (cond_label);
2792
2793 /* This loop will be continued with NEXT_INSN (insn). */
2794 insn = PREV_INSN (insn);
2795 }
2796 }
2797 }
2798 }
2799 }
2800
2801 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2802 loops it is contained in, mark the target loop invalid.
2803
2804 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2805
2806 static void
2807 mark_loop_jump (x, loop)
2808 rtx x;
2809 struct loop *loop;
2810 {
2811 struct loop *dest_loop;
2812 struct loop *outer_loop;
2813 int i;
2814
2815 switch (GET_CODE (x))
2816 {
2817 case PC:
2818 case USE:
2819 case CLOBBER:
2820 case REG:
2821 case MEM:
2822 case CONST_INT:
2823 case CONST_DOUBLE:
2824 case RETURN:
2825 return;
2826
2827 case CONST:
2828 /* There could be a label reference in here. */
2829 mark_loop_jump (XEXP (x, 0), loop);
2830 return;
2831
2832 case PLUS:
2833 case MINUS:
2834 case MULT:
2835 mark_loop_jump (XEXP (x, 0), loop);
2836 mark_loop_jump (XEXP (x, 1), loop);
2837 return;
2838
2839 case LO_SUM:
2840 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2841 mark_loop_jump (XEXP (x, 1), loop);
2842 return;
2843
2844 case SIGN_EXTEND:
2845 case ZERO_EXTEND:
2846 mark_loop_jump (XEXP (x, 0), loop);
2847 return;
2848
2849 case LABEL_REF:
2850 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2851
2852 /* Link together all labels that branch outside the loop. This
2853 is used by final_[bg]iv_value and the loop unrolling code. Also
2854 mark this LABEL_REF so we know that this branch should predict
2855 false. */
2856
2857 /* A check to make sure the label is not in an inner nested loop,
2858 since this does not count as a loop exit. */
2859 if (dest_loop)
2860 {
2861 for (outer_loop = dest_loop; outer_loop;
2862 outer_loop = outer_loop->outer)
2863 if (outer_loop == loop)
2864 break;
2865 }
2866 else
2867 outer_loop = NULL;
2868
2869 if (loop && ! outer_loop)
2870 {
2871 LABEL_OUTSIDE_LOOP_P (x) = 1;
2872 LABEL_NEXTREF (x) = loop->exit_labels;
2873 loop->exit_labels = x;
2874
2875 for (outer_loop = loop;
2876 outer_loop && outer_loop != dest_loop;
2877 outer_loop = outer_loop->outer)
2878 outer_loop->exit_count++;
2879 }
2880
2881 /* If this is inside a loop, but not in the current loop or one enclosed
2882 by it, it invalidates at least one loop. */
2883
2884 if (! dest_loop)
2885 return;
2886
2887 /* We must invalidate every nested loop containing the target of this
2888 label, except those that also contain the jump insn. */
2889
2890 for (; dest_loop; dest_loop = dest_loop->outer)
2891 {
2892 /* Stop when we reach a loop that also contains the jump insn. */
2893 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2894 if (dest_loop == outer_loop)
2895 return;
2896
2897 /* If we get here, we know we need to invalidate a loop. */
2898 if (loop_dump_stream && ! dest_loop->invalid)
2899 fprintf (loop_dump_stream,
2900 "\nLoop at %d ignored due to multiple entry points.\n",
2901 INSN_UID (dest_loop->start));
2902
2903 dest_loop->invalid = 1;
2904 }
2905 return;
2906
2907 case SET:
2908 /* If this is not setting pc, ignore. */
2909 if (SET_DEST (x) == pc_rtx)
2910 mark_loop_jump (SET_SRC (x), loop);
2911 return;
2912
2913 case IF_THEN_ELSE:
2914 mark_loop_jump (XEXP (x, 1), loop);
2915 mark_loop_jump (XEXP (x, 2), loop);
2916 return;
2917
2918 case PARALLEL:
2919 case ADDR_VEC:
2920 for (i = 0; i < XVECLEN (x, 0); i++)
2921 mark_loop_jump (XVECEXP (x, 0, i), loop);
2922 return;
2923
2924 case ADDR_DIFF_VEC:
2925 for (i = 0; i < XVECLEN (x, 1); i++)
2926 mark_loop_jump (XVECEXP (x, 1, i), loop);
2927 return;
2928
2929 default:
2930 /* Strictly speaking this is not a jump into the loop, only a possible
2931 jump out of the loop. However, we have no way to link the destination
2932 of this jump onto the list of exit labels. To be safe we mark this
2933 loop and any containing loops as invalid. */
2934 if (loop)
2935 {
2936 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2937 {
2938 if (loop_dump_stream && ! outer_loop->invalid)
2939 fprintf (loop_dump_stream,
2940 "\nLoop at %d ignored due to unknown exit jump.\n",
2941 INSN_UID (outer_loop->start));
2942 outer_loop->invalid = 1;
2943 }
2944 }
2945 return;
2946 }
2947 }
2948 \f
2949 /* Return nonzero if there is a label in the range from
2950 insn INSN to and including the insn whose luid is END
2951 INSN must have an assigned luid (i.e., it must not have
2952 been previously created by loop.c). */
2953
2954 static int
2955 labels_in_range_p (insn, end)
2956 rtx insn;
2957 int end;
2958 {
2959 while (insn && INSN_LUID (insn) <= end)
2960 {
2961 if (GET_CODE (insn) == CODE_LABEL)
2962 return 1;
2963 insn = NEXT_INSN (insn);
2964 }
2965
2966 return 0;
2967 }
2968
2969 /* Record that a memory reference X is being set. */
2970
2971 static void
2972 note_addr_stored (x, y, data)
2973 rtx x;
2974 rtx y ATTRIBUTE_UNUSED;
2975 void *data ATTRIBUTE_UNUSED;
2976 {
2977 struct loop_info *loop_info = data;
2978
2979 if (x == 0 || GET_CODE (x) != MEM)
2980 return;
2981
2982 /* Count number of memory writes.
2983 This affects heuristics in strength_reduce. */
2984 loop_info->num_mem_sets++;
2985
2986 /* BLKmode MEM means all memory is clobbered. */
2987 if (GET_MODE (x) == BLKmode)
2988 {
2989 if (RTX_UNCHANGING_P (x))
2990 loop_info->unknown_constant_address_altered = 1;
2991 else
2992 loop_info->unknown_address_altered = 1;
2993
2994 return;
2995 }
2996
2997 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
2998 loop_info->store_mems);
2999 }
3000
3001 /* X is a value modified by an INSN that references a biv inside a loop
3002 exit test (ie, X is somehow related to the value of the biv). If X
3003 is a pseudo that is used more than once, then the biv is (effectively)
3004 used more than once. DATA is a pointer to a loop_regs structure. */
3005
3006 static void
3007 note_set_pseudo_multiple_uses (x, y, data)
3008 rtx x;
3009 rtx y ATTRIBUTE_UNUSED;
3010 void *data;
3011 {
3012 struct loop_regs *regs = (struct loop_regs *) data;
3013
3014 if (x == 0)
3015 return;
3016
3017 while (GET_CODE (x) == STRICT_LOW_PART
3018 || GET_CODE (x) == SIGN_EXTRACT
3019 || GET_CODE (x) == ZERO_EXTRACT
3020 || GET_CODE (x) == SUBREG)
3021 x = XEXP (x, 0);
3022
3023 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3024 return;
3025
3026 /* If we do not have usage information, or if we know the register
3027 is used more than once, note that fact for check_dbra_loop. */
3028 if (REGNO (x) >= max_reg_before_loop
3029 || ! regs->array[REGNO (x)].single_usage
3030 || regs->array[REGNO (x)].single_usage == const0_rtx)
3031 regs->multiple_uses = 1;
3032 }
3033 \f
3034 /* Return nonzero if the rtx X is invariant over the current loop.
3035
3036 The value is 2 if we refer to something only conditionally invariant.
3037
3038 A memory ref is invariant if it is not volatile and does not conflict
3039 with anything stored in `loop_info->store_mems'. */
3040
3041 int
3042 loop_invariant_p (loop, x)
3043 const struct loop *loop;
3044 register rtx x;
3045 {
3046 struct loop_info *loop_info = LOOP_INFO (loop);
3047 struct loop_regs *regs = LOOP_REGS (loop);
3048 register int i;
3049 register enum rtx_code code;
3050 register const char *fmt;
3051 int conditional = 0;
3052 rtx mem_list_entry;
3053
3054 if (x == 0)
3055 return 1;
3056 code = GET_CODE (x);
3057 switch (code)
3058 {
3059 case CONST_INT:
3060 case CONST_DOUBLE:
3061 case SYMBOL_REF:
3062 case CONST:
3063 return 1;
3064
3065 case LABEL_REF:
3066 /* A LABEL_REF is normally invariant, however, if we are unrolling
3067 loops, and this label is inside the loop, then it isn't invariant.
3068 This is because each unrolled copy of the loop body will have
3069 a copy of this label. If this was invariant, then an insn loading
3070 the address of this label into a register might get moved outside
3071 the loop, and then each loop body would end up using the same label.
3072
3073 We don't know the loop bounds here though, so just fail for all
3074 labels. */
3075 if (flag_unroll_loops)
3076 return 0;
3077 else
3078 return 1;
3079
3080 case PC:
3081 case CC0:
3082 case UNSPEC_VOLATILE:
3083 return 0;
3084
3085 case REG:
3086 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3087 since the reg might be set by initialization within the loop. */
3088
3089 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3090 || x == arg_pointer_rtx)
3091 && ! current_function_has_nonlocal_goto)
3092 return 1;
3093
3094 if (LOOP_INFO (loop)->has_call
3095 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3096 return 0;
3097
3098 if (regs->array[REGNO (x)].set_in_loop < 0)
3099 return 2;
3100
3101 return regs->array[REGNO (x)].set_in_loop == 0;
3102
3103 case MEM:
3104 /* Volatile memory references must be rejected. Do this before
3105 checking for read-only items, so that volatile read-only items
3106 will be rejected also. */
3107 if (MEM_VOLATILE_P (x))
3108 return 0;
3109
3110 /* See if there is any dependence between a store and this load. */
3111 mem_list_entry = loop_info->store_mems;
3112 while (mem_list_entry)
3113 {
3114 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3115 x, rtx_varies_p))
3116 return 0;
3117
3118 mem_list_entry = XEXP (mem_list_entry, 1);
3119 }
3120
3121 /* It's not invalidated by a store in memory
3122 but we must still verify the address is invariant. */
3123 break;
3124
3125 case ASM_OPERANDS:
3126 /* Don't mess with insns declared volatile. */
3127 if (MEM_VOLATILE_P (x))
3128 return 0;
3129 break;
3130
3131 default:
3132 break;
3133 }
3134
3135 fmt = GET_RTX_FORMAT (code);
3136 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3137 {
3138 if (fmt[i] == 'e')
3139 {
3140 int tem = loop_invariant_p (loop, XEXP (x, i));
3141 if (tem == 0)
3142 return 0;
3143 if (tem == 2)
3144 conditional = 1;
3145 }
3146 else if (fmt[i] == 'E')
3147 {
3148 register int j;
3149 for (j = 0; j < XVECLEN (x, i); j++)
3150 {
3151 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3152 if (tem == 0)
3153 return 0;
3154 if (tem == 2)
3155 conditional = 1;
3156 }
3157
3158 }
3159 }
3160
3161 return 1 + conditional;
3162 }
3163 \f
3164 /* Return nonzero if all the insns in the loop that set REG
3165 are INSN and the immediately following insns,
3166 and if each of those insns sets REG in an invariant way
3167 (not counting uses of REG in them).
3168
3169 The value is 2 if some of these insns are only conditionally invariant.
3170
3171 We assume that INSN itself is the first set of REG
3172 and that its source is invariant. */
3173
3174 static int
3175 consec_sets_invariant_p (loop, reg, n_sets, insn)
3176 const struct loop *loop;
3177 int n_sets;
3178 rtx reg, insn;
3179 {
3180 struct loop_regs *regs = LOOP_REGS (loop);
3181 rtx p = insn;
3182 unsigned int regno = REGNO (reg);
3183 rtx temp;
3184 /* Number of sets we have to insist on finding after INSN. */
3185 int count = n_sets - 1;
3186 int old = regs->array[regno].set_in_loop;
3187 int value = 0;
3188 int this;
3189
3190 /* If N_SETS hit the limit, we can't rely on its value. */
3191 if (n_sets == 127)
3192 return 0;
3193
3194 regs->array[regno].set_in_loop = 0;
3195
3196 while (count > 0)
3197 {
3198 register enum rtx_code code;
3199 rtx set;
3200
3201 p = NEXT_INSN (p);
3202 code = GET_CODE (p);
3203
3204 /* If library call, skip to end of it. */
3205 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3206 p = XEXP (temp, 0);
3207
3208 this = 0;
3209 if (code == INSN
3210 && (set = single_set (p))
3211 && GET_CODE (SET_DEST (set)) == REG
3212 && REGNO (SET_DEST (set)) == regno)
3213 {
3214 this = loop_invariant_p (loop, SET_SRC (set));
3215 if (this != 0)
3216 value |= this;
3217 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3218 {
3219 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3220 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3221 notes are OK. */
3222 this = (CONSTANT_P (XEXP (temp, 0))
3223 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3224 && loop_invariant_p (loop, XEXP (temp, 0))));
3225 if (this != 0)
3226 value |= this;
3227 }
3228 }
3229 if (this != 0)
3230 count--;
3231 else if (code != NOTE)
3232 {
3233 regs->array[regno].set_in_loop = old;
3234 return 0;
3235 }
3236 }
3237
3238 regs->array[regno].set_in_loop = old;
3239 /* If loop_invariant_p ever returned 2, we return 2. */
3240 return 1 + (value & 2);
3241 }
3242
3243 #if 0
3244 /* I don't think this condition is sufficient to allow INSN
3245 to be moved, so we no longer test it. */
3246
3247 /* Return 1 if all insns in the basic block of INSN and following INSN
3248 that set REG are invariant according to TABLE. */
3249
3250 static int
3251 all_sets_invariant_p (reg, insn, table)
3252 rtx reg, insn;
3253 short *table;
3254 {
3255 register rtx p = insn;
3256 register int regno = REGNO (reg);
3257
3258 while (1)
3259 {
3260 register enum rtx_code code;
3261 p = NEXT_INSN (p);
3262 code = GET_CODE (p);
3263 if (code == CODE_LABEL || code == JUMP_INSN)
3264 return 1;
3265 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3266 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3267 && REGNO (SET_DEST (PATTERN (p))) == regno)
3268 {
3269 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3270 return 0;
3271 }
3272 }
3273 }
3274 #endif /* 0 */
3275 \f
3276 /* Look at all uses (not sets) of registers in X. For each, if it is
3277 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3278 a different insn, set USAGE[REGNO] to const0_rtx. */
3279
3280 static void
3281 find_single_use_in_loop (regs, insn, x)
3282 struct loop_regs *regs;
3283 rtx insn;
3284 rtx x;
3285 {
3286 enum rtx_code code = GET_CODE (x);
3287 const char *fmt = GET_RTX_FORMAT (code);
3288 int i, j;
3289
3290 if (code == REG)
3291 regs->array[REGNO (x)].single_usage
3292 = (regs->array[REGNO (x)].single_usage != 0
3293 && regs->array[REGNO (x)].single_usage != insn)
3294 ? const0_rtx : insn;
3295
3296 else if (code == SET)
3297 {
3298 /* Don't count SET_DEST if it is a REG; otherwise count things
3299 in SET_DEST because if a register is partially modified, it won't
3300 show up as a potential movable so we don't care how USAGE is set
3301 for it. */
3302 if (GET_CODE (SET_DEST (x)) != REG)
3303 find_single_use_in_loop (regs, insn, SET_DEST (x));
3304 find_single_use_in_loop (regs, insn, SET_SRC (x));
3305 }
3306 else
3307 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3308 {
3309 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3310 find_single_use_in_loop (regs, insn, XEXP (x, i));
3311 else if (fmt[i] == 'E')
3312 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3313 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3314 }
3315 }
3316 \f
3317 /* Count and record any set in X which is contained in INSN. Update
3318 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3319 in X. */
3320
3321 static void
3322 count_one_set (regs, insn, x, last_set)
3323 struct loop_regs *regs;
3324 rtx insn, x;
3325 rtx *last_set;
3326 {
3327 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3328 /* Don't move a reg that has an explicit clobber.
3329 It's not worth the pain to try to do it correctly. */
3330 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3331
3332 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3333 {
3334 rtx dest = SET_DEST (x);
3335 while (GET_CODE (dest) == SUBREG
3336 || GET_CODE (dest) == ZERO_EXTRACT
3337 || GET_CODE (dest) == SIGN_EXTRACT
3338 || GET_CODE (dest) == STRICT_LOW_PART)
3339 dest = XEXP (dest, 0);
3340 if (GET_CODE (dest) == REG)
3341 {
3342 register int regno = REGNO (dest);
3343 /* If this is the first setting of this reg
3344 in current basic block, and it was set before,
3345 it must be set in two basic blocks, so it cannot
3346 be moved out of the loop. */
3347 if (regs->array[regno].set_in_loop > 0
3348 && last_set == 0)
3349 regs->array[regno].may_not_optimize = 1;
3350 /* If this is not first setting in current basic block,
3351 see if reg was used in between previous one and this.
3352 If so, neither one can be moved. */
3353 if (last_set[regno] != 0
3354 && reg_used_between_p (dest, last_set[regno], insn))
3355 regs->array[regno].may_not_optimize = 1;
3356 if (regs->array[regno].set_in_loop < 127)
3357 ++regs->array[regno].set_in_loop;
3358 last_set[regno] = insn;
3359 }
3360 }
3361 }
3362 \f
3363 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3364 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3365 contained in insn INSN is used by any insn that precedes INSN in
3366 cyclic order starting from the loop entry point.
3367
3368 We don't want to use INSN_LUID here because if we restrict INSN to those
3369 that have a valid INSN_LUID, it means we cannot move an invariant out
3370 from an inner loop past two loops. */
3371
3372 static int
3373 loop_reg_used_before_p (loop, set, insn)
3374 const struct loop *loop;
3375 rtx set, insn;
3376 {
3377 rtx reg = SET_DEST (set);
3378 rtx p;
3379
3380 /* Scan forward checking for register usage. If we hit INSN, we
3381 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3382 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3383 {
3384 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3385 return 1;
3386
3387 if (p == loop->end)
3388 p = loop->start;
3389 }
3390
3391 return 0;
3392 }
3393 \f
3394 /* A "basic induction variable" or biv is a pseudo reg that is set
3395 (within this loop) only by incrementing or decrementing it. */
3396 /* A "general induction variable" or giv is a pseudo reg whose
3397 value is a linear function of a biv. */
3398
3399 /* Bivs are recognized by `basic_induction_var';
3400 Givs by `general_induction_var'. */
3401
3402 /* Communication with routines called via `note_stores'. */
3403
3404 static rtx note_insn;
3405
3406 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3407
3408 static rtx addr_placeholder;
3409
3410 /* ??? Unfinished optimizations, and possible future optimizations,
3411 for the strength reduction code. */
3412
3413 /* ??? The interaction of biv elimination, and recognition of 'constant'
3414 bivs, may cause problems. */
3415
3416 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3417 performance problems.
3418
3419 Perhaps don't eliminate things that can be combined with an addressing
3420 mode. Find all givs that have the same biv, mult_val, and add_val;
3421 then for each giv, check to see if its only use dies in a following
3422 memory address. If so, generate a new memory address and check to see
3423 if it is valid. If it is valid, then store the modified memory address,
3424 otherwise, mark the giv as not done so that it will get its own iv. */
3425
3426 /* ??? Could try to optimize branches when it is known that a biv is always
3427 positive. */
3428
3429 /* ??? When replace a biv in a compare insn, we should replace with closest
3430 giv so that an optimized branch can still be recognized by the combiner,
3431 e.g. the VAX acb insn. */
3432
3433 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3434 was rerun in loop_optimize whenever a register was added or moved.
3435 Also, some of the optimizations could be a little less conservative. */
3436 \f
3437 /* Scan the loop body and call FNCALL for each insn. In the addition to the
3438 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
3439 callback.
3440
3441 NOT_EVERY_ITERATION if current insn is not executed at least once for every
3442 loop iteration except for the last one.
3443
3444 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
3445 loop iteration.
3446 */
3447 void
3448 for_each_insn_in_loop (loop, fncall)
3449 struct loop *loop;
3450 loop_insn_callback fncall;
3451 {
3452 /* This is 1 if current insn is not executed at least once for every loop
3453 iteration. */
3454 int not_every_iteration = 0;
3455 int maybe_multiple = 0;
3456 int past_loop_latch = 0;
3457 int loop_depth = 0;
3458 rtx p;
3459
3460 /* If loop_scan_start points to the loop exit test, we have to be wary of
3461 subversive use of gotos inside expression statements. */
3462 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
3463 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
3464
3465 /* Scan through loop to find all possible bivs. */
3466
3467 for (p = next_insn_in_loop (loop, loop->scan_start);
3468 p != NULL_RTX;
3469 p = next_insn_in_loop (loop, p))
3470 {
3471 p = fncall (loop, p, not_every_iteration, maybe_multiple);
3472
3473 /* Past CODE_LABEL, we get to insns that may be executed multiple
3474 times. The only way we can be sure that they can't is if every
3475 jump insn between here and the end of the loop either
3476 returns, exits the loop, is a jump to a location that is still
3477 behind the label, or is a jump to the loop start. */
3478
3479 if (GET_CODE (p) == CODE_LABEL)
3480 {
3481 rtx insn = p;
3482
3483 maybe_multiple = 0;
3484
3485 while (1)
3486 {
3487 insn = NEXT_INSN (insn);
3488 if (insn == loop->scan_start)
3489 break;
3490 if (insn == loop->end)
3491 {
3492 if (loop->top != 0)
3493 insn = loop->top;
3494 else
3495 break;
3496 if (insn == loop->scan_start)
3497 break;
3498 }
3499
3500 if (GET_CODE (insn) == JUMP_INSN
3501 && GET_CODE (PATTERN (insn)) != RETURN
3502 && (!any_condjump_p (insn)
3503 || (JUMP_LABEL (insn) != 0
3504 && JUMP_LABEL (insn) != loop->scan_start
3505 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
3506 {
3507 maybe_multiple = 1;
3508 break;
3509 }
3510 }
3511 }
3512
3513 /* Past a jump, we get to insns for which we can't count
3514 on whether they will be executed during each iteration. */
3515 /* This code appears twice in strength_reduce. There is also similar
3516 code in scan_loop. */
3517 if (GET_CODE (p) == JUMP_INSN
3518 /* If we enter the loop in the middle, and scan around to the
3519 beginning, don't set not_every_iteration for that.
3520 This can be any kind of jump, since we want to know if insns
3521 will be executed if the loop is executed. */
3522 && !(JUMP_LABEL (p) == loop->top
3523 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
3524 && any_uncondjump_p (p))
3525 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
3526 {
3527 rtx label = 0;
3528
3529 /* If this is a jump outside the loop, then it also doesn't
3530 matter. Check to see if the target of this branch is on the
3531 loop->exits_labels list. */
3532
3533 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
3534 if (XEXP (label, 0) == JUMP_LABEL (p))
3535 break;
3536
3537 if (!label)
3538 not_every_iteration = 1;
3539 }
3540
3541 else if (GET_CODE (p) == NOTE)
3542 {
3543 /* At the virtual top of a converted loop, insns are again known to
3544 be executed each iteration: logically, the loop begins here
3545 even though the exit code has been duplicated.
3546
3547 Insns are also again known to be executed each iteration at
3548 the LOOP_CONT note. */
3549 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
3550 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
3551 && loop_depth == 0)
3552 not_every_iteration = 0;
3553 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3554 loop_depth++;
3555 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3556 loop_depth--;
3557 }
3558
3559 /* Note if we pass a loop latch. If we do, then we can not clear
3560 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
3561 a loop since a jump before the last CODE_LABEL may have started
3562 a new loop iteration.
3563
3564 Note that LOOP_TOP is only set for rotated loops and we need
3565 this check for all loops, so compare against the CODE_LABEL
3566 which immediately follows LOOP_START. */
3567 if (GET_CODE (p) == JUMP_INSN
3568 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
3569 past_loop_latch = 1;
3570
3571 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3572 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3573 or not an insn is known to be executed each iteration of the
3574 loop, whether or not any iterations are known to occur.
3575
3576 Therefore, if we have just passed a label and have no more labels
3577 between here and the test insn of the loop, and we have not passed
3578 a jump to the top of the loop, then we know these insns will be
3579 executed each iteration. */
3580
3581 if (not_every_iteration
3582 && !past_loop_latch
3583 && GET_CODE (p) == CODE_LABEL
3584 && no_labels_between_p (p, loop->end)
3585 && loop_insn_first_p (p, loop->cont))
3586 not_every_iteration = 0;
3587 }
3588 }
3589 \f
3590 static void
3591 loop_bivs_find (loop)
3592 struct loop *loop;
3593 {
3594 struct loop_regs *regs = LOOP_REGS (loop);
3595 struct loop_ivs *ivs = LOOP_IVS (loop);
3596 /* Temporary list pointers for traversing ivs->list. */
3597 struct iv_class *bl, **backbl;
3598
3599 ivs->list = 0;
3600
3601 for_each_insn_in_loop (loop, check_insn_for_bivs);
3602
3603 /* Scan ivs->list to remove all regs that proved not to be bivs.
3604 Make a sanity check against regs->n_times_set. */
3605 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
3606 {
3607 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3608 /* Above happens if register modified by subreg, etc. */
3609 /* Make sure it is not recognized as a basic induction var: */
3610 || regs->array[bl->regno].n_times_set != bl->biv_count
3611 /* If never incremented, it is invariant that we decided not to
3612 move. So leave it alone. */
3613 || ! bl->incremented)
3614 {
3615 if (loop_dump_stream)
3616 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
3617 bl->regno,
3618 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3619 ? "not induction variable"
3620 : (! bl->incremented ? "never incremented"
3621 : "count error")));
3622
3623 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
3624 *backbl = bl->next;
3625 }
3626 else
3627 {
3628 backbl = &bl->next;
3629
3630 if (loop_dump_stream)
3631 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
3632 }
3633 }
3634 }
3635
3636
3637 /* Determine how BIVS are initialised by looking through pre-header
3638 extended basic block. */
3639 static void
3640 loop_bivs_init_find (loop)
3641 struct loop *loop;
3642 {
3643 struct loop_ivs *ivs = LOOP_IVS (loop);
3644 /* Temporary list pointers for traversing ivs->list. */
3645 struct iv_class *bl;
3646 int call_seen;
3647 rtx p;
3648
3649 /* Find initial value for each biv by searching backwards from loop_start,
3650 halting at first label. Also record any test condition. */
3651
3652 call_seen = 0;
3653 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3654 {
3655 rtx test;
3656
3657 note_insn = p;
3658
3659 if (GET_CODE (p) == CALL_INSN)
3660 call_seen = 1;
3661
3662 if (INSN_P (p))
3663 note_stores (PATTERN (p), record_initial, ivs);
3664
3665 /* Record any test of a biv that branches around the loop if no store
3666 between it and the start of loop. We only care about tests with
3667 constants and registers and only certain of those. */
3668 if (GET_CODE (p) == JUMP_INSN
3669 && JUMP_LABEL (p) != 0
3670 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
3671 && (test = get_condition_for_loop (loop, p)) != 0
3672 && GET_CODE (XEXP (test, 0)) == REG
3673 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3674 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
3675 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
3676 && bl->init_insn == 0)
3677 {
3678 /* If an NE test, we have an initial value! */
3679 if (GET_CODE (test) == NE)
3680 {
3681 bl->init_insn = p;
3682 bl->init_set = gen_rtx_SET (VOIDmode,
3683 XEXP (test, 0), XEXP (test, 1));
3684 }
3685 else
3686 bl->initial_test = test;
3687 }
3688 }
3689 }
3690
3691
3692 /* Look at the each biv and see if we can say anything better about its
3693 initial value from any initializing insns set up above. (This is done
3694 in two passes to avoid missing SETs in a PARALLEL.) */
3695 static void
3696 loop_bivs_check (loop)
3697 struct loop *loop;
3698 {
3699 struct loop_ivs *ivs = LOOP_IVS (loop);
3700 /* Temporary list pointers for traversing ivs->list. */
3701 struct iv_class *bl;
3702 struct iv_class **backbl;
3703
3704 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
3705 {
3706 rtx src;
3707 rtx note;
3708
3709 if (! bl->init_insn)
3710 continue;
3711
3712 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3713 is a constant, use the value of that. */
3714 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3715 && CONSTANT_P (XEXP (note, 0)))
3716 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3717 && CONSTANT_P (XEXP (note, 0))))
3718 src = XEXP (note, 0);
3719 else
3720 src = SET_SRC (bl->init_set);
3721
3722 if (loop_dump_stream)
3723 fprintf (loop_dump_stream,
3724 "Biv %d: initialized at insn %d: initial value ",
3725 bl->regno, INSN_UID (bl->init_insn));
3726
3727 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3728 || GET_MODE (src) == VOIDmode)
3729 && valid_initial_value_p (src, bl->init_insn,
3730 LOOP_INFO (loop)->pre_header_has_call,
3731 loop->start))
3732 {
3733 bl->initial_value = src;
3734
3735 if (loop_dump_stream)
3736 {
3737 print_simple_rtl (loop_dump_stream, src);
3738 fputc ('\n', loop_dump_stream);
3739 }
3740 }
3741 /* If we can't make it a giv,
3742 let biv keep initial value of "itself". */
3743 else if (loop_dump_stream)
3744 fprintf (loop_dump_stream, "is complex\n");
3745 }
3746 }
3747
3748
3749 /* Search the loop for general induction variables. */
3750
3751 static void
3752 loop_givs_find (loop)
3753 struct loop* loop;
3754 {
3755 for_each_insn_in_loop (loop, check_insn_for_givs);
3756 }
3757
3758
3759 /* For each giv for which we still don't know whether or not it is
3760 replaceable, check to see if it is replaceable because its final value
3761 can be calculated. */
3762
3763 static void
3764 loop_givs_check (loop)
3765 struct loop *loop;
3766 {
3767 struct loop_ivs *ivs = LOOP_IVS (loop);
3768 struct iv_class *bl;
3769
3770 for (bl = ivs->list; bl; bl = bl->next)
3771 {
3772 struct induction *v;
3773
3774 for (v = bl->giv; v; v = v->next_iv)
3775 if (! v->replaceable && ! v->not_replaceable)
3776 check_final_value (loop, v);
3777 }
3778 }
3779
3780
3781 /* Return non-zero if it is possible to eliminate the biv BL provided
3782 all givs are reduced. This is possible if either the reg is not
3783 used outside the loop, or we can compute what its final value will
3784 be. */
3785
3786 static int
3787 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
3788 struct loop *loop;
3789 struct iv_class *bl;
3790 int threshold;
3791 int insn_count;
3792 {
3793 /* For architectures with a decrement_and_branch_until_zero insn,
3794 don't do this if we put a REG_NONNEG note on the endtest for this
3795 biv. */
3796
3797 #ifdef HAVE_decrement_and_branch_until_zero
3798 if (bl->nonneg)
3799 {
3800 if (loop_dump_stream)
3801 fprintf (loop_dump_stream,
3802 "Cannot eliminate nonneg biv %d.\n", bl->regno);
3803 return 0;
3804 }
3805 #endif
3806
3807 /* Check that biv is used outside loop or if it has a final value.
3808 Compare against bl->init_insn rather than loop->start. We aren't
3809 concerned with any uses of the biv between init_insn and
3810 loop->start since these won't be affected by the value of the biv
3811 elsewhere in the function, so long as init_insn doesn't use the
3812 biv itself. */
3813
3814 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
3815 && bl->init_insn
3816 && INSN_UID (bl->init_insn) < max_uid_for_loop
3817 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
3818 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3819 || (bl->final_value = final_biv_value (loop, bl)))
3820 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
3821
3822 if (loop_dump_stream)
3823 {
3824 fprintf (loop_dump_stream,
3825 "Cannot eliminate biv %d.\n",
3826 bl->regno);
3827 fprintf (loop_dump_stream,
3828 "First use: insn %d, last use: insn %d.\n",
3829 REGNO_FIRST_UID (bl->regno),
3830 REGNO_LAST_UID (bl->regno));
3831 }
3832 return 0;
3833 }
3834
3835
3836 /* Reduce each giv of BL that we have decided to reduce. */
3837
3838 static void
3839 loop_givs_reduce (loop, bl)
3840 struct loop *loop;
3841 struct iv_class *bl;
3842 {
3843 struct induction *v;
3844
3845 for (v = bl->giv; v; v = v->next_iv)
3846 {
3847 struct induction *tv;
3848 if (! v->ignore && v->same == 0)
3849 {
3850 int auto_inc_opt = 0;
3851
3852 /* If the code for derived givs immediately below has already
3853 allocated a new_reg, we must keep it. */
3854 if (! v->new_reg)
3855 v->new_reg = gen_reg_rtx (v->mode);
3856
3857 #ifdef AUTO_INC_DEC
3858 /* If the target has auto-increment addressing modes, and
3859 this is an address giv, then try to put the increment
3860 immediately after its use, so that flow can create an
3861 auto-increment addressing mode. */
3862 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
3863 && bl->biv->always_executed && ! bl->biv->maybe_multiple
3864 /* We don't handle reversed biv's because bl->biv->insn
3865 does not have a valid INSN_LUID. */
3866 && ! bl->reversed
3867 && v->always_executed && ! v->maybe_multiple
3868 && INSN_UID (v->insn) < max_uid_for_loop)
3869 {
3870 /* If other giv's have been combined with this one, then
3871 this will work only if all uses of the other giv's occur
3872 before this giv's insn. This is difficult to check.
3873
3874 We simplify this by looking for the common case where
3875 there is one DEST_REG giv, and this giv's insn is the
3876 last use of the dest_reg of that DEST_REG giv. If the
3877 increment occurs after the address giv, then we can
3878 perform the optimization. (Otherwise, the increment
3879 would have to go before other_giv, and we would not be
3880 able to combine it with the address giv to get an
3881 auto-inc address.) */
3882 if (v->combined_with)
3883 {
3884 struct induction *other_giv = 0;
3885
3886 for (tv = bl->giv; tv; tv = tv->next_iv)
3887 if (tv->same == v)
3888 {
3889 if (other_giv)
3890 break;
3891 else
3892 other_giv = tv;
3893 }
3894 if (! tv && other_giv
3895 && REGNO (other_giv->dest_reg) < max_reg_before_loop
3896 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
3897 == INSN_UID (v->insn))
3898 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
3899 auto_inc_opt = 1;
3900 }
3901 /* Check for case where increment is before the address
3902 giv. Do this test in "loop order". */
3903 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
3904 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3905 || (INSN_LUID (bl->biv->insn)
3906 > INSN_LUID (loop->scan_start))))
3907 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3908 && (INSN_LUID (loop->scan_start)
3909 < INSN_LUID (bl->biv->insn))))
3910 auto_inc_opt = -1;
3911 else
3912 auto_inc_opt = 1;
3913
3914 #ifdef HAVE_cc0
3915 {
3916 rtx prev;
3917
3918 /* We can't put an insn immediately after one setting
3919 cc0, or immediately before one using cc0. */
3920 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
3921 || (auto_inc_opt == -1
3922 && (prev = prev_nonnote_insn (v->insn)) != 0
3923 && INSN_P (prev)
3924 && sets_cc0_p (PATTERN (prev))))
3925 auto_inc_opt = 0;
3926 }
3927 #endif
3928
3929 if (auto_inc_opt)
3930 v->auto_inc_opt = 1;
3931 }
3932 #endif
3933
3934 /* For each place where the biv is incremented, add an insn
3935 to increment the new, reduced reg for the giv. */
3936 for (tv = bl->biv; tv; tv = tv->next_iv)
3937 {
3938 rtx insert_before;
3939
3940 if (! auto_inc_opt)
3941 insert_before = tv->insn;
3942 else if (auto_inc_opt == 1)
3943 insert_before = NEXT_INSN (v->insn);
3944 else
3945 insert_before = v->insn;
3946
3947 if (tv->mult_val == const1_rtx)
3948 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3949 v->new_reg, v->new_reg,
3950 0, insert_before);
3951 else /* tv->mult_val == const0_rtx */
3952 /* A multiply is acceptable here
3953 since this is presumed to be seldom executed. */
3954 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3955 v->add_val, v->new_reg,
3956 0, insert_before);
3957 }
3958
3959 /* Add code at loop start to initialize giv's reduced reg. */
3960
3961 loop_iv_add_mult_hoist (loop,
3962 extend_value_for_giv (v, bl->initial_value),
3963 v->mult_val, v->add_val, v->new_reg);
3964 }
3965 }
3966 }
3967
3968
3969 /* Check for givs whose first use is their definition and whose
3970 last use is the definition of another giv. If so, it is likely
3971 dead and should not be used to derive another giv nor to
3972 eliminate a biv. */
3973
3974 static void
3975 loop_givs_dead_check (loop, bl)
3976 struct loop *loop ATTRIBUTE_UNUSED;
3977 struct iv_class *bl;
3978 {
3979 struct induction *v;
3980
3981 for (v = bl->giv; v; v = v->next_iv)
3982 {
3983 if (v->ignore
3984 || (v->same && v->same->ignore))
3985 continue;
3986
3987 if (v->giv_type == DEST_REG
3988 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
3989 {
3990 struct induction *v1;
3991
3992 for (v1 = bl->giv; v1; v1 = v1->next_iv)
3993 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
3994 v->maybe_dead = 1;
3995 }
3996 }
3997 }
3998
3999
4000 static void
4001 loop_givs_rescan (loop, bl, reg_map)
4002 struct loop *loop;
4003 struct iv_class *bl;
4004 rtx *reg_map;
4005 {
4006 struct induction *v;
4007
4008 for (v = bl->giv; v; v = v->next_iv)
4009 {
4010 if (v->same && v->same->ignore)
4011 v->ignore = 1;
4012
4013 if (v->ignore)
4014 continue;
4015
4016 /* Update expression if this was combined, in case other giv was
4017 replaced. */
4018 if (v->same)
4019 v->new_reg = replace_rtx (v->new_reg,
4020 v->same->dest_reg, v->same->new_reg);
4021
4022 /* See if this register is known to be a pointer to something. If
4023 so, see if we can find the alignment. First see if there is a
4024 destination register that is a pointer. If so, this shares the
4025 alignment too. Next see if we can deduce anything from the
4026 computational information. If not, and this is a DEST_ADDR
4027 giv, at least we know that it's a pointer, though we don't know
4028 the alignment. */
4029 if (GET_CODE (v->new_reg) == REG
4030 && v->giv_type == DEST_REG
4031 && REG_POINTER (v->dest_reg))
4032 mark_reg_pointer (v->new_reg,
4033 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4034 else if (GET_CODE (v->new_reg) == REG
4035 && REG_POINTER (v->src_reg))
4036 {
4037 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4038
4039 if (align == 0
4040 || GET_CODE (v->add_val) != CONST_INT
4041 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4042 align = 0;
4043
4044 mark_reg_pointer (v->new_reg, align);
4045 }
4046 else if (GET_CODE (v->new_reg) == REG
4047 && GET_CODE (v->add_val) == REG
4048 && REG_POINTER (v->add_val))
4049 {
4050 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4051
4052 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4053 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4054 align = 0;
4055
4056 mark_reg_pointer (v->new_reg, align);
4057 }
4058 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4059 mark_reg_pointer (v->new_reg, 0);
4060
4061 if (v->giv_type == DEST_ADDR)
4062 /* Store reduced reg as the address in the memref where we found
4063 this giv. */
4064 validate_change (v->insn, v->location, v->new_reg, 0);
4065 else if (v->replaceable)
4066 {
4067 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4068 }
4069 else
4070 {
4071 /* Not replaceable; emit an insn to set the original giv reg from
4072 the reduced giv, same as above. */
4073 loop_insn_emit_after (loop, 0, v->insn,
4074 gen_move_insn (v->dest_reg, v->new_reg));
4075 }
4076
4077 /* When a loop is reversed, givs which depend on the reversed
4078 biv, and which are live outside the loop, must be set to their
4079 correct final value. This insn is only needed if the giv is
4080 not replaceable. The correct final value is the same as the
4081 value that the giv starts the reversed loop with. */
4082 if (bl->reversed && ! v->replaceable)
4083 loop_iv_add_mult_sink (loop,
4084 extend_value_for_giv (v, bl->initial_value),
4085 v->mult_val, v->add_val, v->dest_reg);
4086 else if (v->final_value)
4087 loop_insn_sink_or_swim (loop,
4088 gen_move_insn (v->dest_reg, v->final_value));
4089
4090 if (loop_dump_stream)
4091 {
4092 fprintf (loop_dump_stream, "giv at %d reduced to ",
4093 INSN_UID (v->insn));
4094 print_simple_rtl (loop_dump_stream, v->new_reg);
4095 fprintf (loop_dump_stream, "\n");
4096 }
4097 }
4098 }
4099
4100
4101 static int
4102 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4103 struct loop *loop ATTRIBUTE_UNUSED;
4104 struct iv_class *bl;
4105 struct induction *v;
4106 rtx test_reg;
4107 {
4108 int add_cost;
4109 int benefit;
4110
4111 benefit = v->benefit;
4112 PUT_MODE (test_reg, v->mode);
4113 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4114 test_reg, test_reg);
4115
4116 /* Reduce benefit if not replaceable, since we will insert a
4117 move-insn to replace the insn that calculates this giv. Don't do
4118 this unless the giv is a user variable, since it will often be
4119 marked non-replaceable because of the duplication of the exit
4120 code outside the loop. In such a case, the copies we insert are
4121 dead and will be deleted. So they don't have a cost. Similar
4122 situations exist. */
4123 /* ??? The new final_[bg]iv_value code does a much better job of
4124 finding replaceable giv's, and hence this code may no longer be
4125 necessary. */
4126 if (! v->replaceable && ! bl->eliminable
4127 && REG_USERVAR_P (v->dest_reg))
4128 benefit -= copy_cost;
4129
4130 /* Decrease the benefit to count the add-insns that we will insert
4131 to increment the reduced reg for the giv. ??? This can
4132 overestimate the run-time cost of the additional insns, e.g. if
4133 there are multiple basic blocks that increment the biv, but only
4134 one of these blocks is executed during each iteration. There is
4135 no good way to detect cases like this with the current structure
4136 of the loop optimizer. This code is more accurate for
4137 determining code size than run-time benefits. */
4138 benefit -= add_cost * bl->biv_count;
4139
4140 /* Decide whether to strength-reduce this giv or to leave the code
4141 unchanged (recompute it from the biv each time it is used). This
4142 decision can be made independently for each giv. */
4143
4144 #ifdef AUTO_INC_DEC
4145 /* Attempt to guess whether autoincrement will handle some of the
4146 new add insns; if so, increase BENEFIT (undo the subtraction of
4147 add_cost that was done above). */
4148 if (v->giv_type == DEST_ADDR
4149 /* Increasing the benefit is risky, since this is only a guess.
4150 Avoid increasing register pressure in cases where there would
4151 be no other benefit from reducing this giv. */
4152 && benefit > 0
4153 && GET_CODE (v->mult_val) == CONST_INT)
4154 {
4155 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4156
4157 if (HAVE_POST_INCREMENT
4158 && INTVAL (v->mult_val) == size)
4159 benefit += add_cost * bl->biv_count;
4160 else if (HAVE_PRE_INCREMENT
4161 && INTVAL (v->mult_val) == size)
4162 benefit += add_cost * bl->biv_count;
4163 else if (HAVE_POST_DECREMENT
4164 && -INTVAL (v->mult_val) == size)
4165 benefit += add_cost * bl->biv_count;
4166 else if (HAVE_PRE_DECREMENT
4167 && -INTVAL (v->mult_val) == size)
4168 benefit += add_cost * bl->biv_count;
4169 }
4170 #endif
4171
4172 return benefit;
4173 }
4174
4175
4176 /* Free IV structures for LOOP. */
4177
4178 static void
4179 loop_ivs_free (loop)
4180 struct loop *loop;
4181 {
4182 struct loop_ivs *ivs = LOOP_IVS (loop);
4183 struct iv_class *iv = ivs->list;
4184
4185 free (ivs->regs);
4186
4187 while (iv)
4188 {
4189 struct iv_class *next = iv->next;
4190 struct induction *induction;
4191 struct induction *next_induction;
4192
4193 for (induction = iv->biv; induction; induction = next_induction)
4194 {
4195 next_induction = induction->next_iv;
4196 free (induction);
4197 }
4198 for (induction = iv->giv; induction; induction = next_induction)
4199 {
4200 next_induction = induction->next_iv;
4201 free (induction);
4202 }
4203
4204 free (iv);
4205 iv = next;
4206 }
4207 }
4208
4209
4210 /* Perform strength reduction and induction variable elimination.
4211
4212 Pseudo registers created during this function will be beyond the
4213 last valid index in several tables including
4214 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4215 problem here, because the added registers cannot be givs outside of
4216 their loop, and hence will never be reconsidered. But scan_loop
4217 must check regnos to make sure they are in bounds. */
4218
4219 static void
4220 strength_reduce (loop, flags)
4221 struct loop *loop;
4222 int flags;
4223 {
4224 struct loop_info *loop_info = LOOP_INFO (loop);
4225 struct loop_regs *regs = LOOP_REGS (loop);
4226 struct loop_ivs *ivs = LOOP_IVS (loop);
4227 rtx p;
4228 /* Temporary list pointer for traversing ivs->list. */
4229 struct iv_class *bl;
4230 /* Ratio of extra register life span we can justify
4231 for saving an instruction. More if loop doesn't call subroutines
4232 since in that case saving an insn makes more difference
4233 and more registers are available. */
4234 /* ??? could set this to last value of threshold in move_movables */
4235 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4236 /* Map of pseudo-register replacements. */
4237 rtx *reg_map = NULL;
4238 int reg_map_size;
4239 int unrolled_insn_copies = 0;
4240 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4241 int insn_count = count_insns_in_loop (loop);
4242
4243 addr_placeholder = gen_reg_rtx (Pmode);
4244
4245 ivs->n_regs = max_reg_before_loop;
4246 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4247
4248 /* Find all BIVs in loop. */
4249 loop_bivs_find (loop);
4250
4251 /* Exit if there are no bivs. */
4252 if (! ivs->list)
4253 {
4254 /* Can still unroll the loop anyways, but indicate that there is no
4255 strength reduction info available. */
4256 if (flags & LOOP_UNROLL)
4257 unroll_loop (loop, insn_count, 0);
4258
4259 loop_ivs_free (loop);
4260 return;
4261 }
4262
4263 /* Determine how BIVS are initialised by looking through pre-header
4264 extended basic block. */
4265 loop_bivs_init_find (loop);
4266
4267 /* Look at the each biv and see if we can say anything better about its
4268 initial value from any initializing insns set up above. */
4269 loop_bivs_check (loop);
4270
4271 /* Search the loop for general induction variables. */
4272 loop_givs_find (loop);
4273
4274 /* Try to calculate and save the number of loop iterations. This is
4275 set to zero if the actual number can not be calculated. This must
4276 be called after all giv's have been identified, since otherwise it may
4277 fail if the iteration variable is a giv. */
4278 loop_iterations (loop);
4279
4280 /* Now for each giv for which we still don't know whether or not it is
4281 replaceable, check to see if it is replaceable because its final value
4282 can be calculated. This must be done after loop_iterations is called,
4283 so that final_giv_value will work correctly. */
4284 loop_givs_check (loop);
4285
4286 /* Try to prove that the loop counter variable (if any) is always
4287 nonnegative; if so, record that fact with a REG_NONNEG note
4288 so that "decrement and branch until zero" insn can be used. */
4289 check_dbra_loop (loop, insn_count);
4290
4291 /* Create reg_map to hold substitutions for replaceable giv regs.
4292 Some givs might have been made from biv increments, so look at
4293 ivs->reg_iv_type for a suitable size. */
4294 reg_map_size = ivs->n_regs;
4295 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4296
4297 /* Examine each iv class for feasibility of strength reduction/induction
4298 variable elimination. */
4299
4300 for (bl = ivs->list; bl; bl = bl->next)
4301 {
4302 struct induction *v;
4303 int benefit;
4304
4305 /* Test whether it will be possible to eliminate this biv
4306 provided all givs are reduced. */
4307 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
4308
4309 /* This will be true at the end, if all givs which depend on this
4310 biv have been strength reduced.
4311 We can't (currently) eliminate the biv unless this is so. */
4312 bl->all_reduced = 1;
4313
4314 /* Check each extension dependent giv in this class to see if its
4315 root biv is safe from wrapping in the interior mode. */
4316 check_ext_dependant_givs (bl, loop_info);
4317
4318 /* Combine all giv's for this iv_class. */
4319 combine_givs (regs, bl);
4320
4321 for (v = bl->giv; v; v = v->next_iv)
4322 {
4323 struct induction *tv;
4324
4325 if (v->ignore || v->same)
4326 continue;
4327
4328 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
4329
4330 /* If an insn is not to be strength reduced, then set its ignore
4331 flag, and clear bl->all_reduced. */
4332
4333 /* A giv that depends on a reversed biv must be reduced if it is
4334 used after the loop exit, otherwise, it would have the wrong
4335 value after the loop exit. To make it simple, just reduce all
4336 of such giv's whether or not we know they are used after the loop
4337 exit. */
4338
4339 if (! flag_reduce_all_givs
4340 && v->lifetime * threshold * benefit < insn_count
4341 && ! bl->reversed)
4342 {
4343 if (loop_dump_stream)
4344 fprintf (loop_dump_stream,
4345 "giv of insn %d not worth while, %d vs %d.\n",
4346 INSN_UID (v->insn),
4347 v->lifetime * threshold * benefit, insn_count);
4348 v->ignore = 1;
4349 bl->all_reduced = 0;
4350 }
4351 else
4352 {
4353 /* Check that we can increment the reduced giv without a
4354 multiply insn. If not, reject it. */
4355
4356 for (tv = bl->biv; tv; tv = tv->next_iv)
4357 if (tv->mult_val == const1_rtx
4358 && ! product_cheap_p (tv->add_val, v->mult_val))
4359 {
4360 if (loop_dump_stream)
4361 fprintf (loop_dump_stream,
4362 "giv of insn %d: would need a multiply.\n",
4363 INSN_UID (v->insn));
4364 v->ignore = 1;
4365 bl->all_reduced = 0;
4366 break;
4367 }
4368 }
4369 }
4370
4371 /* Check for givs whose first use is their definition and whose
4372 last use is the definition of another giv. If so, it is likely
4373 dead and should not be used to derive another giv nor to
4374 eliminate a biv. */
4375 loop_givs_dead_check (loop, bl);
4376
4377 /* Reduce each giv that we decided to reduce. */
4378 loop_givs_reduce (loop, bl);
4379
4380 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4381 as not reduced.
4382
4383 For each giv register that can be reduced now: if replaceable,
4384 substitute reduced reg wherever the old giv occurs;
4385 else add new move insn "giv_reg = reduced_reg". */
4386 loop_givs_rescan (loop, bl, reg_map);
4387
4388 /* All the givs based on the biv bl have been reduced if they
4389 merit it. */
4390
4391 /* For each giv not marked as maybe dead that has been combined with a
4392 second giv, clear any "maybe dead" mark on that second giv.
4393 v->new_reg will either be or refer to the register of the giv it
4394 combined with.
4395
4396 Doing this clearing avoids problems in biv elimination where
4397 a giv's new_reg is a complex value that can't be put in the
4398 insn but the giv combined with (with a reg as new_reg) is
4399 marked maybe_dead. Since the register will be used in either
4400 case, we'd prefer it be used from the simpler giv. */
4401
4402 for (v = bl->giv; v; v = v->next_iv)
4403 if (! v->maybe_dead && v->same)
4404 v->same->maybe_dead = 0;
4405
4406 /* Try to eliminate the biv, if it is a candidate.
4407 This won't work if ! bl->all_reduced,
4408 since the givs we planned to use might not have been reduced.
4409
4410 We have to be careful that we didn't initially think we could
4411 eliminate this biv because of a giv that we now think may be
4412 dead and shouldn't be used as a biv replacement.
4413
4414 Also, there is the possibility that we may have a giv that looks
4415 like it can be used to eliminate a biv, but the resulting insn
4416 isn't valid. This can happen, for example, on the 88k, where a
4417 JUMP_INSN can compare a register only with zero. Attempts to
4418 replace it with a compare with a constant will fail.
4419
4420 Note that in cases where this call fails, we may have replaced some
4421 of the occurrences of the biv with a giv, but no harm was done in
4422 doing so in the rare cases where it can occur. */
4423
4424 if (bl->all_reduced == 1 && bl->eliminable
4425 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
4426 {
4427 /* ?? If we created a new test to bypass the loop entirely,
4428 or otherwise drop straight in, based on this test, then
4429 we might want to rewrite it also. This way some later
4430 pass has more hope of removing the initialization of this
4431 biv entirely. */
4432
4433 /* If final_value != 0, then the biv may be used after loop end
4434 and we must emit an insn to set it just in case.
4435
4436 Reversed bivs already have an insn after the loop setting their
4437 value, so we don't need another one. We can't calculate the
4438 proper final value for such a biv here anyways. */
4439 if (bl->final_value && ! bl->reversed)
4440 loop_insn_sink_or_swim (loop, gen_move_insn
4441 (bl->biv->dest_reg, bl->final_value));
4442
4443 if (loop_dump_stream)
4444 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4445 bl->regno);
4446 }
4447 }
4448
4449 /* Go through all the instructions in the loop, making all the
4450 register substitutions scheduled in REG_MAP. */
4451
4452 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
4453 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4454 || GET_CODE (p) == CALL_INSN)
4455 {
4456 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
4457 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
4458 INSN_CODE (p) = -1;
4459 }
4460
4461 if (loop_info->n_iterations > 0)
4462 {
4463 /* When we completely unroll a loop we will likely not need the increment
4464 of the loop BIV and we will not need the conditional branch at the
4465 end of the loop. */
4466 unrolled_insn_copies = insn_count - 2;
4467
4468 #ifdef HAVE_cc0
4469 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
4470 need the comparison before the conditional branch at the end of the
4471 loop. */
4472 unrolled_insn_copies -= 1;
4473 #endif
4474
4475 /* We'll need one copy for each loop iteration. */
4476 unrolled_insn_copies *= loop_info->n_iterations;
4477
4478 /* A little slop to account for the ability to remove initialization
4479 code, better CSE, and other secondary benefits of completely
4480 unrolling some loops. */
4481 unrolled_insn_copies -= 1;
4482
4483 /* Clamp the value. */
4484 if (unrolled_insn_copies < 0)
4485 unrolled_insn_copies = 0;
4486 }
4487
4488 /* Unroll loops from within strength reduction so that we can use the
4489 induction variable information that strength_reduce has already
4490 collected. Always unroll loops that would be as small or smaller
4491 unrolled than when rolled. */
4492 if ((flags & LOOP_UNROLL)
4493 || (loop_info->n_iterations > 0
4494 && unrolled_insn_copies <= insn_count))
4495 unroll_loop (loop, insn_count, 1);
4496
4497 #ifdef HAVE_doloop_end
4498 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
4499 doloop_optimize (loop);
4500 #endif /* HAVE_doloop_end */
4501
4502 /* In case number of iterations is known, drop branch prediction note
4503 in the branch. Do that only in second loop pass, as loop unrolling
4504 may change the number of iterations performed. */
4505 if ((flags & LOOP_BCT)
4506 && loop_info->n_iterations / loop_info->unroll_number > 1)
4507 {
4508 int n = loop_info->n_iterations / loop_info->unroll_number;
4509 predict_insn (PREV_INSN (loop->end),
4510 PRED_LOOP_ITERATIONS,
4511 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
4512 }
4513
4514 if (loop_dump_stream)
4515 fprintf (loop_dump_stream, "\n");
4516
4517 loop_ivs_free (loop);
4518 if (reg_map)
4519 free (reg_map);
4520 }
4521 \f
4522 /*Record all basic induction variables calculated in the insn. */
4523 static rtx
4524 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
4525 struct loop *loop;
4526 rtx p;
4527 int not_every_iteration;
4528 int maybe_multiple;
4529 {
4530 struct loop_ivs *ivs = LOOP_IVS (loop);
4531 rtx set;
4532 rtx dest_reg;
4533 rtx inc_val;
4534 rtx mult_val;
4535 rtx *location;
4536
4537 if (GET_CODE (p) == INSN
4538 && (set = single_set (p))
4539 && GET_CODE (SET_DEST (set)) == REG)
4540 {
4541 dest_reg = SET_DEST (set);
4542 if (REGNO (dest_reg) < max_reg_before_loop
4543 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
4544 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
4545 {
4546 if (basic_induction_var (loop, SET_SRC (set),
4547 GET_MODE (SET_SRC (set)),
4548 dest_reg, p, &inc_val, &mult_val,
4549 &location))
4550 {
4551 /* It is a possible basic induction variable.
4552 Create and initialize an induction structure for it. */
4553
4554 struct induction *v
4555 = (struct induction *) xmalloc (sizeof (struct induction));
4556
4557 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
4558 not_every_iteration, maybe_multiple);
4559 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
4560 }
4561 else if (REGNO (dest_reg) < ivs->n_regs)
4562 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
4563 }
4564 }
4565 return p;
4566 }
4567 \f
4568 /* Record all givs calculated in the insn.
4569 A register is a giv if: it is only set once, it is a function of a
4570 biv and a constant (or invariant), and it is not a biv. */
4571 static rtx
4572 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
4573 struct loop *loop;
4574 rtx p;
4575 int not_every_iteration;
4576 int maybe_multiple;
4577 {
4578 struct loop_regs *regs = LOOP_REGS (loop);
4579
4580 rtx set;
4581 /* Look for a general induction variable in a register. */
4582 if (GET_CODE (p) == INSN
4583 && (set = single_set (p))
4584 && GET_CODE (SET_DEST (set)) == REG
4585 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
4586 {
4587 rtx src_reg;
4588 rtx dest_reg;
4589 rtx add_val;
4590 rtx mult_val;
4591 rtx ext_val;
4592 int benefit;
4593 rtx regnote = 0;
4594 rtx last_consec_insn;
4595
4596 dest_reg = SET_DEST (set);
4597 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
4598 return p;
4599
4600 if (/* SET_SRC is a giv. */
4601 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
4602 &mult_val, &ext_val, 0, &benefit, VOIDmode)
4603 /* Equivalent expression is a giv. */
4604 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
4605 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
4606 &add_val, &mult_val, &ext_val, 0,
4607 &benefit, VOIDmode)))
4608 /* Don't try to handle any regs made by loop optimization.
4609 We have nothing on them in regno_first_uid, etc. */
4610 && REGNO (dest_reg) < max_reg_before_loop
4611 /* Don't recognize a BASIC_INDUCT_VAR here. */
4612 && dest_reg != src_reg
4613 /* This must be the only place where the register is set. */
4614 && (regs->array[REGNO (dest_reg)].n_times_set == 1
4615 /* or all sets must be consecutive and make a giv. */
4616 || (benefit = consec_sets_giv (loop, benefit, p,
4617 src_reg, dest_reg,
4618 &add_val, &mult_val, &ext_val,
4619 &last_consec_insn))))
4620 {
4621 struct induction *v
4622 = (struct induction *) xmalloc (sizeof (struct induction));
4623
4624 /* If this is a library call, increase benefit. */
4625 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
4626 benefit += libcall_benefit (p);
4627
4628 /* Skip the consecutive insns, if there are any. */
4629 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
4630 p = last_consec_insn;
4631
4632 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
4633 ext_val, benefit, DEST_REG, not_every_iteration,
4634 maybe_multiple, (rtx*)0);
4635
4636 }
4637 }
4638
4639 #ifndef DONT_REDUCE_ADDR
4640 /* Look for givs which are memory addresses. */
4641 /* This resulted in worse code on a VAX 8600. I wonder if it
4642 still does. */
4643 if (GET_CODE (p) == INSN)
4644 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
4645 maybe_multiple);
4646 #endif
4647
4648 /* Update the status of whether giv can derive other givs. This can
4649 change when we pass a label or an insn that updates a biv. */
4650 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4651 || GET_CODE (p) == CODE_LABEL)
4652 update_giv_derive (loop, p);
4653 return p;
4654 }
4655 \f
4656 /* Return 1 if X is a valid source for an initial value (or as value being
4657 compared against in an initial test).
4658
4659 X must be either a register or constant and must not be clobbered between
4660 the current insn and the start of the loop.
4661
4662 INSN is the insn containing X. */
4663
4664 static int
4665 valid_initial_value_p (x, insn, call_seen, loop_start)
4666 rtx x;
4667 rtx insn;
4668 int call_seen;
4669 rtx loop_start;
4670 {
4671 if (CONSTANT_P (x))
4672 return 1;
4673
4674 /* Only consider pseudos we know about initialized in insns whose luids
4675 we know. */
4676 if (GET_CODE (x) != REG
4677 || REGNO (x) >= max_reg_before_loop)
4678 return 0;
4679
4680 /* Don't use call-clobbered registers across a call which clobbers it. On
4681 some machines, don't use any hard registers at all. */
4682 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4683 && (SMALL_REGISTER_CLASSES
4684 || (call_used_regs[REGNO (x)] && call_seen)))
4685 return 0;
4686
4687 /* Don't use registers that have been clobbered before the start of the
4688 loop. */
4689 if (reg_set_between_p (x, insn, loop_start))
4690 return 0;
4691
4692 return 1;
4693 }
4694 \f
4695 /* Scan X for memory refs and check each memory address
4696 as a possible giv. INSN is the insn whose pattern X comes from.
4697 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4698 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
4699 more thanonce in each loop iteration. */
4700
4701 static void
4702 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
4703 const struct loop *loop;
4704 rtx x;
4705 rtx insn;
4706 int not_every_iteration, maybe_multiple;
4707 {
4708 register int i, j;
4709 register enum rtx_code code;
4710 register const char *fmt;
4711
4712 if (x == 0)
4713 return;
4714
4715 code = GET_CODE (x);
4716 switch (code)
4717 {
4718 case REG:
4719 case CONST_INT:
4720 case CONST:
4721 case CONST_DOUBLE:
4722 case SYMBOL_REF:
4723 case LABEL_REF:
4724 case PC:
4725 case CC0:
4726 case ADDR_VEC:
4727 case ADDR_DIFF_VEC:
4728 case USE:
4729 case CLOBBER:
4730 return;
4731
4732 case MEM:
4733 {
4734 rtx src_reg;
4735 rtx add_val;
4736 rtx mult_val;
4737 rtx ext_val;
4738 int benefit;
4739
4740 /* This code used to disable creating GIVs with mult_val == 1 and
4741 add_val == 0. However, this leads to lost optimizations when
4742 it comes time to combine a set of related DEST_ADDR GIVs, since
4743 this one would not be seen. */
4744
4745 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
4746 &mult_val, &ext_val, 1, &benefit,
4747 GET_MODE (x)))
4748 {
4749 /* Found one; record it. */
4750 struct induction *v
4751 = (struct induction *) xmalloc (sizeof (struct induction));
4752
4753 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
4754 add_val, ext_val, benefit, DEST_ADDR,
4755 not_every_iteration, maybe_multiple, &XEXP (x, 0));
4756
4757 v->mem = x;
4758 }
4759 }
4760 return;
4761
4762 default:
4763 break;
4764 }
4765
4766 /* Recursively scan the subexpressions for other mem refs. */
4767
4768 fmt = GET_RTX_FORMAT (code);
4769 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4770 if (fmt[i] == 'e')
4771 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
4772 maybe_multiple);
4773 else if (fmt[i] == 'E')
4774 for (j = 0; j < XVECLEN (x, i); j++)
4775 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
4776 maybe_multiple);
4777 }
4778 \f
4779 /* Fill in the data about one biv update.
4780 V is the `struct induction' in which we record the biv. (It is
4781 allocated by the caller, with alloca.)
4782 INSN is the insn that sets it.
4783 DEST_REG is the biv's reg.
4784
4785 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4786 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4787 being set to INC_VAL.
4788
4789 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4790 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4791 can be executed more than once per iteration. If MAYBE_MULTIPLE
4792 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4793 executed exactly once per iteration. */
4794
4795 static void
4796 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
4797 not_every_iteration, maybe_multiple)
4798 struct loop *loop;
4799 struct induction *v;
4800 rtx insn;
4801 rtx dest_reg;
4802 rtx inc_val;
4803 rtx mult_val;
4804 rtx *location;
4805 int not_every_iteration;
4806 int maybe_multiple;
4807 {
4808 struct loop_ivs *ivs = LOOP_IVS (loop);
4809 struct iv_class *bl;
4810
4811 v->insn = insn;
4812 v->src_reg = dest_reg;
4813 v->dest_reg = dest_reg;
4814 v->mult_val = mult_val;
4815 v->add_val = inc_val;
4816 v->ext_dependant = NULL_RTX;
4817 v->location = location;
4818 v->mode = GET_MODE (dest_reg);
4819 v->always_computable = ! not_every_iteration;
4820 v->always_executed = ! not_every_iteration;
4821 v->maybe_multiple = maybe_multiple;
4822
4823 /* Add this to the reg's iv_class, creating a class
4824 if this is the first incrementation of the reg. */
4825
4826 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
4827 if (bl == 0)
4828 {
4829 /* Create and initialize new iv_class. */
4830
4831 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
4832
4833 bl->regno = REGNO (dest_reg);
4834 bl->biv = 0;
4835 bl->giv = 0;
4836 bl->biv_count = 0;
4837 bl->giv_count = 0;
4838
4839 /* Set initial value to the reg itself. */
4840 bl->initial_value = dest_reg;
4841 bl->final_value = 0;
4842 /* We haven't seen the initializing insn yet */
4843 bl->init_insn = 0;
4844 bl->init_set = 0;
4845 bl->initial_test = 0;
4846 bl->incremented = 0;
4847 bl->eliminable = 0;
4848 bl->nonneg = 0;
4849 bl->reversed = 0;
4850 bl->total_benefit = 0;
4851
4852 /* Add this class to ivs->list. */
4853 bl->next = ivs->list;
4854 ivs->list = bl;
4855
4856 /* Put it in the array of biv register classes. */
4857 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
4858 }
4859
4860 /* Update IV_CLASS entry for this biv. */
4861 v->next_iv = bl->biv;
4862 bl->biv = v;
4863 bl->biv_count++;
4864 if (mult_val == const1_rtx)
4865 bl->incremented = 1;
4866
4867 if (loop_dump_stream)
4868 loop_biv_dump (v, loop_dump_stream, 0);
4869 }
4870 \f
4871 /* Fill in the data about one giv.
4872 V is the `struct induction' in which we record the giv. (It is
4873 allocated by the caller, with alloca.)
4874 INSN is the insn that sets it.
4875 BENEFIT estimates the savings from deleting this insn.
4876 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4877 into a register or is used as a memory address.
4878
4879 SRC_REG is the biv reg which the giv is computed from.
4880 DEST_REG is the giv's reg (if the giv is stored in a reg).
4881 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4882 LOCATION points to the place where this giv's value appears in INSN. */
4883
4884 static void
4885 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
4886 benefit, type, not_every_iteration, maybe_multiple, location)
4887 const struct loop *loop;
4888 struct induction *v;
4889 rtx insn;
4890 rtx src_reg;
4891 rtx dest_reg;
4892 rtx mult_val, add_val, ext_val;
4893 int benefit;
4894 enum g_types type;
4895 int not_every_iteration, maybe_multiple;
4896 rtx *location;
4897 {
4898 struct loop_ivs *ivs = LOOP_IVS (loop);
4899 struct induction *b;
4900 struct iv_class *bl;
4901 rtx set = single_set (insn);
4902 rtx temp;
4903
4904 /* Attempt to prove constantness of the values. */
4905 temp = simplify_rtx (add_val);
4906 if (temp)
4907 add_val = temp;
4908
4909 v->insn = insn;
4910 v->src_reg = src_reg;
4911 v->giv_type = type;
4912 v->dest_reg = dest_reg;
4913 v->mult_val = mult_val;
4914 v->add_val = add_val;
4915 v->ext_dependant = ext_val;
4916 v->benefit = benefit;
4917 v->location = location;
4918 v->cant_derive = 0;
4919 v->combined_with = 0;
4920 v->maybe_multiple = maybe_multiple;
4921 v->maybe_dead = 0;
4922 v->derive_adjustment = 0;
4923 v->same = 0;
4924 v->ignore = 0;
4925 v->new_reg = 0;
4926 v->final_value = 0;
4927 v->same_insn = 0;
4928 v->auto_inc_opt = 0;
4929 v->unrolled = 0;
4930 v->shared = 0;
4931
4932 /* The v->always_computable field is used in update_giv_derive, to
4933 determine whether a giv can be used to derive another giv. For a
4934 DEST_REG giv, INSN computes a new value for the giv, so its value
4935 isn't computable if INSN insn't executed every iteration.
4936 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4937 it does not compute a new value. Hence the value is always computable
4938 regardless of whether INSN is executed each iteration. */
4939
4940 if (type == DEST_ADDR)
4941 v->always_computable = 1;
4942 else
4943 v->always_computable = ! not_every_iteration;
4944
4945 v->always_executed = ! not_every_iteration;
4946
4947 if (type == DEST_ADDR)
4948 {
4949 v->mode = GET_MODE (*location);
4950 v->lifetime = 1;
4951 }
4952 else /* type == DEST_REG */
4953 {
4954 v->mode = GET_MODE (SET_DEST (set));
4955
4956 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
4957
4958 /* If the lifetime is zero, it means that this register is
4959 really a dead store. So mark this as a giv that can be
4960 ignored. This will not prevent the biv from being eliminated. */
4961 if (v->lifetime == 0)
4962 v->ignore = 1;
4963
4964 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
4965 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
4966 }
4967
4968 /* Add the giv to the class of givs computed from one biv. */
4969
4970 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
4971 if (bl)
4972 {
4973 v->next_iv = bl->giv;
4974 bl->giv = v;
4975 /* Don't count DEST_ADDR. This is supposed to count the number of
4976 insns that calculate givs. */
4977 if (type == DEST_REG)
4978 bl->giv_count++;
4979 bl->total_benefit += benefit;
4980 }
4981 else
4982 /* Fatal error, biv missing for this giv? */
4983 abort ();
4984
4985 if (type == DEST_ADDR)
4986 v->replaceable = 1;
4987 else
4988 {
4989 /* The giv can be replaced outright by the reduced register only if all
4990 of the following conditions are true:
4991 - the insn that sets the giv is always executed on any iteration
4992 on which the giv is used at all
4993 (there are two ways to deduce this:
4994 either the insn is executed on every iteration,
4995 or all uses follow that insn in the same basic block),
4996 - the giv is not used outside the loop
4997 - no assignments to the biv occur during the giv's lifetime. */
4998
4999 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5000 /* Previous line always fails if INSN was moved by loop opt. */
5001 && REGNO_LAST_LUID (REGNO (dest_reg))
5002 < INSN_LUID (loop->end)
5003 && (! not_every_iteration
5004 || last_use_this_basic_block (dest_reg, insn)))
5005 {
5006 /* Now check that there are no assignments to the biv within the
5007 giv's lifetime. This requires two separate checks. */
5008
5009 /* Check each biv update, and fail if any are between the first
5010 and last use of the giv.
5011
5012 If this loop contains an inner loop that was unrolled, then
5013 the insn modifying the biv may have been emitted by the loop
5014 unrolling code, and hence does not have a valid luid. Just
5015 mark the biv as not replaceable in this case. It is not very
5016 useful as a biv, because it is used in two different loops.
5017 It is very unlikely that we would be able to optimize the giv
5018 using this biv anyways. */
5019
5020 v->replaceable = 1;
5021 for (b = bl->biv; b; b = b->next_iv)
5022 {
5023 if (INSN_UID (b->insn) >= max_uid_for_loop
5024 || ((INSN_LUID (b->insn)
5025 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5026 && (INSN_LUID (b->insn)
5027 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5028 {
5029 v->replaceable = 0;
5030 v->not_replaceable = 1;
5031 break;
5032 }
5033 }
5034
5035 /* If there are any backwards branches that go from after the
5036 biv update to before it, then this giv is not replaceable. */
5037 if (v->replaceable)
5038 for (b = bl->biv; b; b = b->next_iv)
5039 if (back_branch_in_range_p (loop, b->insn))
5040 {
5041 v->replaceable = 0;
5042 v->not_replaceable = 1;
5043 break;
5044 }
5045 }
5046 else
5047 {
5048 /* May still be replaceable, we don't have enough info here to
5049 decide. */
5050 v->replaceable = 0;
5051 v->not_replaceable = 0;
5052 }
5053 }
5054
5055 /* Record whether the add_val contains a const_int, for later use by
5056 combine_givs. */
5057 {
5058 rtx tem = add_val;
5059
5060 v->no_const_addval = 1;
5061 if (tem == const0_rtx)
5062 ;
5063 else if (CONSTANT_P (add_val))
5064 v->no_const_addval = 0;
5065 if (GET_CODE (tem) == PLUS)
5066 {
5067 while (1)
5068 {
5069 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5070 tem = XEXP (tem, 0);
5071 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5072 tem = XEXP (tem, 1);
5073 else
5074 break;
5075 }
5076 if (CONSTANT_P (XEXP (tem, 1)))
5077 v->no_const_addval = 0;
5078 }
5079 }
5080
5081 if (loop_dump_stream)
5082 loop_giv_dump (v, loop_dump_stream, 0);
5083 }
5084
5085 /* All this does is determine whether a giv can be made replaceable because
5086 its final value can be calculated. This code can not be part of record_giv
5087 above, because final_giv_value requires that the number of loop iterations
5088 be known, and that can not be accurately calculated until after all givs
5089 have been identified. */
5090
5091 static void
5092 check_final_value (loop, v)
5093 const struct loop *loop;
5094 struct induction *v;
5095 {
5096 struct loop_ivs *ivs = LOOP_IVS (loop);
5097 struct iv_class *bl;
5098 rtx final_value = 0;
5099
5100 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5101
5102 /* DEST_ADDR givs will never reach here, because they are always marked
5103 replaceable above in record_giv. */
5104
5105 /* The giv can be replaced outright by the reduced register only if all
5106 of the following conditions are true:
5107 - the insn that sets the giv is always executed on any iteration
5108 on which the giv is used at all
5109 (there are two ways to deduce this:
5110 either the insn is executed on every iteration,
5111 or all uses follow that insn in the same basic block),
5112 - its final value can be calculated (this condition is different
5113 than the one above in record_giv)
5114 - it's not used before the it's set
5115 - no assignments to the biv occur during the giv's lifetime. */
5116
5117 #if 0
5118 /* This is only called now when replaceable is known to be false. */
5119 /* Clear replaceable, so that it won't confuse final_giv_value. */
5120 v->replaceable = 0;
5121 #endif
5122
5123 if ((final_value = final_giv_value (loop, v))
5124 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5125 {
5126 int biv_increment_seen = 0, before_giv_insn = 0;
5127 rtx p = v->insn;
5128 rtx last_giv_use;
5129
5130 v->replaceable = 1;
5131
5132 /* When trying to determine whether or not a biv increment occurs
5133 during the lifetime of the giv, we can ignore uses of the variable
5134 outside the loop because final_value is true. Hence we can not
5135 use regno_last_uid and regno_first_uid as above in record_giv. */
5136
5137 /* Search the loop to determine whether any assignments to the
5138 biv occur during the giv's lifetime. Start with the insn
5139 that sets the giv, and search around the loop until we come
5140 back to that insn again.
5141
5142 Also fail if there is a jump within the giv's lifetime that jumps
5143 to somewhere outside the lifetime but still within the loop. This
5144 catches spaghetti code where the execution order is not linear, and
5145 hence the above test fails. Here we assume that the giv lifetime
5146 does not extend from one iteration of the loop to the next, so as
5147 to make the test easier. Since the lifetime isn't known yet,
5148 this requires two loops. See also record_giv above. */
5149
5150 last_giv_use = v->insn;
5151
5152 while (1)
5153 {
5154 p = NEXT_INSN (p);
5155 if (p == loop->end)
5156 {
5157 before_giv_insn = 1;
5158 p = NEXT_INSN (loop->start);
5159 }
5160 if (p == v->insn)
5161 break;
5162
5163 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5164 || GET_CODE (p) == CALL_INSN)
5165 {
5166 /* It is possible for the BIV increment to use the GIV if we
5167 have a cycle. Thus we must be sure to check each insn for
5168 both BIV and GIV uses, and we must check for BIV uses
5169 first. */
5170
5171 if (! biv_increment_seen
5172 && reg_set_p (v->src_reg, PATTERN (p)))
5173 biv_increment_seen = 1;
5174
5175 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5176 {
5177 if (biv_increment_seen || before_giv_insn)
5178 {
5179 v->replaceable = 0;
5180 v->not_replaceable = 1;
5181 break;
5182 }
5183 last_giv_use = p;
5184 }
5185 }
5186 }
5187
5188 /* Now that the lifetime of the giv is known, check for branches
5189 from within the lifetime to outside the lifetime if it is still
5190 replaceable. */
5191
5192 if (v->replaceable)
5193 {
5194 p = v->insn;
5195 while (1)
5196 {
5197 p = NEXT_INSN (p);
5198 if (p == loop->end)
5199 p = NEXT_INSN (loop->start);
5200 if (p == last_giv_use)
5201 break;
5202
5203 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5204 && LABEL_NAME (JUMP_LABEL (p))
5205 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5206 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5207 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5208 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5209 {
5210 v->replaceable = 0;
5211 v->not_replaceable = 1;
5212
5213 if (loop_dump_stream)
5214 fprintf (loop_dump_stream,
5215 "Found branch outside giv lifetime.\n");
5216
5217 break;
5218 }
5219 }
5220 }
5221
5222 /* If it is replaceable, then save the final value. */
5223 if (v->replaceable)
5224 v->final_value = final_value;
5225 }
5226
5227 if (loop_dump_stream && v->replaceable)
5228 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5229 INSN_UID (v->insn), REGNO (v->dest_reg));
5230 }
5231 \f
5232 /* Update the status of whether a giv can derive other givs.
5233
5234 We need to do something special if there is or may be an update to the biv
5235 between the time the giv is defined and the time it is used to derive
5236 another giv.
5237
5238 In addition, a giv that is only conditionally set is not allowed to
5239 derive another giv once a label has been passed.
5240
5241 The cases we look at are when a label or an update to a biv is passed. */
5242
5243 static void
5244 update_giv_derive (loop, p)
5245 const struct loop *loop;
5246 rtx p;
5247 {
5248 struct loop_ivs *ivs = LOOP_IVS (loop);
5249 struct iv_class *bl;
5250 struct induction *biv, *giv;
5251 rtx tem;
5252 int dummy;
5253
5254 /* Search all IV classes, then all bivs, and finally all givs.
5255
5256 There are three cases we are concerned with. First we have the situation
5257 of a giv that is only updated conditionally. In that case, it may not
5258 derive any givs after a label is passed.
5259
5260 The second case is when a biv update occurs, or may occur, after the
5261 definition of a giv. For certain biv updates (see below) that are
5262 known to occur between the giv definition and use, we can adjust the
5263 giv definition. For others, or when the biv update is conditional,
5264 we must prevent the giv from deriving any other givs. There are two
5265 sub-cases within this case.
5266
5267 If this is a label, we are concerned with any biv update that is done
5268 conditionally, since it may be done after the giv is defined followed by
5269 a branch here (actually, we need to pass both a jump and a label, but
5270 this extra tracking doesn't seem worth it).
5271
5272 If this is a jump, we are concerned about any biv update that may be
5273 executed multiple times. We are actually only concerned about
5274 backward jumps, but it is probably not worth performing the test
5275 on the jump again here.
5276
5277 If this is a biv update, we must adjust the giv status to show that a
5278 subsequent biv update was performed. If this adjustment cannot be done,
5279 the giv cannot derive further givs. */
5280
5281 for (bl = ivs->list; bl; bl = bl->next)
5282 for (biv = bl->biv; biv; biv = biv->next_iv)
5283 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5284 || biv->insn == p)
5285 {
5286 for (giv = bl->giv; giv; giv = giv->next_iv)
5287 {
5288 /* If cant_derive is already true, there is no point in
5289 checking all of these conditions again. */
5290 if (giv->cant_derive)
5291 continue;
5292
5293 /* If this giv is conditionally set and we have passed a label,
5294 it cannot derive anything. */
5295 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5296 giv->cant_derive = 1;
5297
5298 /* Skip givs that have mult_val == 0, since
5299 they are really invariants. Also skip those that are
5300 replaceable, since we know their lifetime doesn't contain
5301 any biv update. */
5302 else if (giv->mult_val == const0_rtx || giv->replaceable)
5303 continue;
5304
5305 /* The only way we can allow this giv to derive another
5306 is if this is a biv increment and we can form the product
5307 of biv->add_val and giv->mult_val. In this case, we will
5308 be able to compute a compensation. */
5309 else if (biv->insn == p)
5310 {
5311 rtx ext_val_dummy;
5312
5313 tem = 0;
5314 if (biv->mult_val == const1_rtx)
5315 tem = simplify_giv_expr (loop,
5316 gen_rtx_MULT (giv->mode,
5317 biv->add_val,
5318 giv->mult_val),
5319 &ext_val_dummy, &dummy);
5320
5321 if (tem && giv->derive_adjustment)
5322 tem = simplify_giv_expr
5323 (loop,
5324 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
5325 &ext_val_dummy, &dummy);
5326
5327 if (tem)
5328 giv->derive_adjustment = tem;
5329 else
5330 giv->cant_derive = 1;
5331 }
5332 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5333 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5334 giv->cant_derive = 1;
5335 }
5336 }
5337 }
5338 \f
5339 /* Check whether an insn is an increment legitimate for a basic induction var.
5340 X is the source of insn P, or a part of it.
5341 MODE is the mode in which X should be interpreted.
5342
5343 DEST_REG is the putative biv, also the destination of the insn.
5344 We accept patterns of these forms:
5345 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5346 REG = INVARIANT + REG
5347
5348 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5349 store the additive term into *INC_VAL, and store the place where
5350 we found the additive term into *LOCATION.
5351
5352 If X is an assignment of an invariant into DEST_REG, we set
5353 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5354
5355 We also want to detect a BIV when it corresponds to a variable
5356 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5357 of the variable may be a PLUS that adds a SUBREG of that variable to
5358 an invariant and then sign- or zero-extends the result of the PLUS
5359 into the variable.
5360
5361 Most GIVs in such cases will be in the promoted mode, since that is the
5362 probably the natural computation mode (and almost certainly the mode
5363 used for addresses) on the machine. So we view the pseudo-reg containing
5364 the variable as the BIV, as if it were simply incremented.
5365
5366 Note that treating the entire pseudo as a BIV will result in making
5367 simple increments to any GIVs based on it. However, if the variable
5368 overflows in its declared mode but not its promoted mode, the result will
5369 be incorrect. This is acceptable if the variable is signed, since
5370 overflows in such cases are undefined, but not if it is unsigned, since
5371 those overflows are defined. So we only check for SIGN_EXTEND and
5372 not ZERO_EXTEND.
5373
5374 If we cannot find a biv, we return 0. */
5375
5376 static int
5377 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
5378 const struct loop *loop;
5379 register rtx x;
5380 enum machine_mode mode;
5381 rtx dest_reg;
5382 rtx p;
5383 rtx *inc_val;
5384 rtx *mult_val;
5385 rtx **location;
5386 {
5387 register enum rtx_code code;
5388 rtx *argp, arg;
5389 rtx insn, set = 0;
5390
5391 code = GET_CODE (x);
5392 *location = NULL;
5393 switch (code)
5394 {
5395 case PLUS:
5396 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5397 || (GET_CODE (XEXP (x, 0)) == SUBREG
5398 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5399 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5400 {
5401 argp = &XEXP (x, 1);
5402 }
5403 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5404 || (GET_CODE (XEXP (x, 1)) == SUBREG
5405 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5406 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5407 {
5408 argp = &XEXP (x, 0);
5409 }
5410 else
5411 return 0;
5412
5413 arg = *argp;
5414 if (loop_invariant_p (loop, arg) != 1)
5415 return 0;
5416
5417 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5418 *mult_val = const1_rtx;
5419 *location = argp;
5420 return 1;
5421
5422 case SUBREG:
5423 /* If this is a SUBREG for a promoted variable, check the inner
5424 value. */
5425 if (SUBREG_PROMOTED_VAR_P (x))
5426 return basic_induction_var (loop, SUBREG_REG (x),
5427 GET_MODE (SUBREG_REG (x)),
5428 dest_reg, p, inc_val, mult_val, location);
5429 return 0;
5430
5431 case REG:
5432 /* If this register is assigned in a previous insn, look at its
5433 source, but don't go outside the loop or past a label. */
5434
5435 /* If this sets a register to itself, we would repeat any previous
5436 biv increment if we applied this strategy blindly. */
5437 if (rtx_equal_p (dest_reg, x))
5438 return 0;
5439
5440 insn = p;
5441 while (1)
5442 {
5443 rtx dest;
5444 do
5445 {
5446 insn = PREV_INSN (insn);
5447 }
5448 while (insn && GET_CODE (insn) == NOTE
5449 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5450
5451 if (!insn)
5452 break;
5453 set = single_set (insn);
5454 if (set == 0)
5455 break;
5456 dest = SET_DEST (set);
5457 if (dest == x
5458 || (GET_CODE (dest) == SUBREG
5459 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
5460 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
5461 && SUBREG_REG (dest) == x))
5462 return basic_induction_var (loop, SET_SRC (set),
5463 (GET_MODE (SET_SRC (set)) == VOIDmode
5464 ? GET_MODE (x)
5465 : GET_MODE (SET_SRC (set))),
5466 dest_reg, insn,
5467 inc_val, mult_val, location);
5468
5469 while (GET_CODE (dest) == SIGN_EXTRACT
5470 || GET_CODE (dest) == ZERO_EXTRACT
5471 || GET_CODE (dest) == SUBREG
5472 || GET_CODE (dest) == STRICT_LOW_PART)
5473 dest = XEXP (dest, 0);
5474 if (dest == x)
5475 break;
5476 }
5477 /* Fall through. */
5478
5479 /* Can accept constant setting of biv only when inside inner most loop.
5480 Otherwise, a biv of an inner loop may be incorrectly recognized
5481 as a biv of the outer loop,
5482 causing code to be moved INTO the inner loop. */
5483 case MEM:
5484 if (loop_invariant_p (loop, x) != 1)
5485 return 0;
5486 case CONST_INT:
5487 case SYMBOL_REF:
5488 case CONST:
5489 /* convert_modes aborts if we try to convert to or from CCmode, so just
5490 exclude that case. It is very unlikely that a condition code value
5491 would be a useful iterator anyways. */
5492 if (loop->level == 1
5493 && GET_MODE_CLASS (mode) != MODE_CC
5494 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5495 {
5496 /* Possible bug here? Perhaps we don't know the mode of X. */
5497 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5498 *mult_val = const0_rtx;
5499 return 1;
5500 }
5501 else
5502 return 0;
5503
5504 case SIGN_EXTEND:
5505 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5506 dest_reg, p, inc_val, mult_val, location);
5507
5508 case ASHIFTRT:
5509 /* Similar, since this can be a sign extension. */
5510 for (insn = PREV_INSN (p);
5511 (insn && GET_CODE (insn) == NOTE
5512 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5513 insn = PREV_INSN (insn))
5514 ;
5515
5516 if (insn)
5517 set = single_set (insn);
5518
5519 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
5520 && set && SET_DEST (set) == XEXP (x, 0)
5521 && GET_CODE (XEXP (x, 1)) == CONST_INT
5522 && INTVAL (XEXP (x, 1)) >= 0
5523 && GET_CODE (SET_SRC (set)) == ASHIFT
5524 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5525 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
5526 GET_MODE (XEXP (x, 0)),
5527 dest_reg, insn, inc_val, mult_val,
5528 location);
5529 return 0;
5530
5531 default:
5532 return 0;
5533 }
5534 }
5535 \f
5536 /* A general induction variable (giv) is any quantity that is a linear
5537 function of a basic induction variable,
5538 i.e. giv = biv * mult_val + add_val.
5539 The coefficients can be any loop invariant quantity.
5540 A giv need not be computed directly from the biv;
5541 it can be computed by way of other givs. */
5542
5543 /* Determine whether X computes a giv.
5544 If it does, return a nonzero value
5545 which is the benefit from eliminating the computation of X;
5546 set *SRC_REG to the register of the biv that it is computed from;
5547 set *ADD_VAL and *MULT_VAL to the coefficients,
5548 such that the value of X is biv * mult + add; */
5549
5550 static int
5551 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
5552 is_addr, pbenefit, addr_mode)
5553 const struct loop *loop;
5554 rtx x;
5555 rtx *src_reg;
5556 rtx *add_val;
5557 rtx *mult_val;
5558 rtx *ext_val;
5559 int is_addr;
5560 int *pbenefit;
5561 enum machine_mode addr_mode;
5562 {
5563 struct loop_ivs *ivs = LOOP_IVS (loop);
5564 rtx orig_x = x;
5565
5566 /* If this is an invariant, forget it, it isn't a giv. */
5567 if (loop_invariant_p (loop, x) == 1)
5568 return 0;
5569
5570 *pbenefit = 0;
5571 *ext_val = NULL_RTX;
5572 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
5573 if (x == 0)
5574 return 0;
5575
5576 switch (GET_CODE (x))
5577 {
5578 case USE:
5579 case CONST_INT:
5580 /* Since this is now an invariant and wasn't before, it must be a giv
5581 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5582 with. */
5583 *src_reg = ivs->list->biv->dest_reg;
5584 *mult_val = const0_rtx;
5585 *add_val = x;
5586 break;
5587
5588 case REG:
5589 /* This is equivalent to a BIV. */
5590 *src_reg = x;
5591 *mult_val = const1_rtx;
5592 *add_val = const0_rtx;
5593 break;
5594
5595 case PLUS:
5596 /* Either (plus (biv) (invar)) or
5597 (plus (mult (biv) (invar_1)) (invar_2)). */
5598 if (GET_CODE (XEXP (x, 0)) == MULT)
5599 {
5600 *src_reg = XEXP (XEXP (x, 0), 0);
5601 *mult_val = XEXP (XEXP (x, 0), 1);
5602 }
5603 else
5604 {
5605 *src_reg = XEXP (x, 0);
5606 *mult_val = const1_rtx;
5607 }
5608 *add_val = XEXP (x, 1);
5609 break;
5610
5611 case MULT:
5612 /* ADD_VAL is zero. */
5613 *src_reg = XEXP (x, 0);
5614 *mult_val = XEXP (x, 1);
5615 *add_val = const0_rtx;
5616 break;
5617
5618 default:
5619 abort ();
5620 }
5621
5622 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5623 unless they are CONST_INT). */
5624 if (GET_CODE (*add_val) == USE)
5625 *add_val = XEXP (*add_val, 0);
5626 if (GET_CODE (*mult_val) == USE)
5627 *mult_val = XEXP (*mult_val, 0);
5628
5629 if (is_addr)
5630 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
5631 else
5632 *pbenefit += rtx_cost (orig_x, SET);
5633
5634 /* Always return true if this is a giv so it will be detected as such,
5635 even if the benefit is zero or negative. This allows elimination
5636 of bivs that might otherwise not be eliminated. */
5637 return 1;
5638 }
5639 \f
5640 /* Given an expression, X, try to form it as a linear function of a biv.
5641 We will canonicalize it to be of the form
5642 (plus (mult (BIV) (invar_1))
5643 (invar_2))
5644 with possible degeneracies.
5645
5646 The invariant expressions must each be of a form that can be used as a
5647 machine operand. We surround then with a USE rtx (a hack, but localized
5648 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5649 routine; it is the caller's responsibility to strip them.
5650
5651 If no such canonicalization is possible (i.e., two biv's are used or an
5652 expression that is neither invariant nor a biv or giv), this routine
5653 returns 0.
5654
5655 For a non-zero return, the result will have a code of CONST_INT, USE,
5656 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5657
5658 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5659
5660 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
5661 static rtx sge_plus_constant PARAMS ((rtx, rtx));
5662
5663 static rtx
5664 simplify_giv_expr (loop, x, ext_val, benefit)
5665 const struct loop *loop;
5666 rtx x;
5667 rtx *ext_val;
5668 int *benefit;
5669 {
5670 struct loop_ivs *ivs = LOOP_IVS (loop);
5671 struct loop_regs *regs = LOOP_REGS (loop);
5672 enum machine_mode mode = GET_MODE (x);
5673 rtx arg0, arg1;
5674 rtx tem;
5675
5676 /* If this is not an integer mode, or if we cannot do arithmetic in this
5677 mode, this can't be a giv. */
5678 if (mode != VOIDmode
5679 && (GET_MODE_CLASS (mode) != MODE_INT
5680 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5681 return NULL_RTX;
5682
5683 switch (GET_CODE (x))
5684 {
5685 case PLUS:
5686 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5687 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5688 if (arg0 == 0 || arg1 == 0)
5689 return NULL_RTX;
5690
5691 /* Put constant last, CONST_INT last if both constant. */
5692 if ((GET_CODE (arg0) == USE
5693 || GET_CODE (arg0) == CONST_INT)
5694 && ! ((GET_CODE (arg0) == USE
5695 && GET_CODE (arg1) == USE)
5696 || GET_CODE (arg1) == CONST_INT))
5697 tem = arg0, arg0 = arg1, arg1 = tem;
5698
5699 /* Handle addition of zero, then addition of an invariant. */
5700 if (arg1 == const0_rtx)
5701 return arg0;
5702 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5703 switch (GET_CODE (arg0))
5704 {
5705 case CONST_INT:
5706 case USE:
5707 /* Adding two invariants must result in an invariant, so enclose
5708 addition operation inside a USE and return it. */
5709 if (GET_CODE (arg0) == USE)
5710 arg0 = XEXP (arg0, 0);
5711 if (GET_CODE (arg1) == USE)
5712 arg1 = XEXP (arg1, 0);
5713
5714 if (GET_CODE (arg0) == CONST_INT)
5715 tem = arg0, arg0 = arg1, arg1 = tem;
5716 if (GET_CODE (arg1) == CONST_INT)
5717 tem = sge_plus_constant (arg0, arg1);
5718 else
5719 tem = sge_plus (mode, arg0, arg1);
5720
5721 if (GET_CODE (tem) != CONST_INT)
5722 tem = gen_rtx_USE (mode, tem);
5723 return tem;
5724
5725 case REG:
5726 case MULT:
5727 /* biv + invar or mult + invar. Return sum. */
5728 return gen_rtx_PLUS (mode, arg0, arg1);
5729
5730 case PLUS:
5731 /* (a + invar_1) + invar_2. Associate. */
5732 return
5733 simplify_giv_expr (loop,
5734 gen_rtx_PLUS (mode,
5735 XEXP (arg0, 0),
5736 gen_rtx_PLUS (mode,
5737 XEXP (arg0, 1),
5738 arg1)),
5739 ext_val, benefit);
5740
5741 default:
5742 abort ();
5743 }
5744
5745 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5746 MULT to reduce cases. */
5747 if (GET_CODE (arg0) == REG)
5748 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5749 if (GET_CODE (arg1) == REG)
5750 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5751
5752 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5753 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5754 Recurse to associate the second PLUS. */
5755 if (GET_CODE (arg1) == MULT)
5756 tem = arg0, arg0 = arg1, arg1 = tem;
5757
5758 if (GET_CODE (arg1) == PLUS)
5759 return
5760 simplify_giv_expr (loop,
5761 gen_rtx_PLUS (mode,
5762 gen_rtx_PLUS (mode, arg0,
5763 XEXP (arg1, 0)),
5764 XEXP (arg1, 1)),
5765 ext_val, benefit);
5766
5767 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5768 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5769 return NULL_RTX;
5770
5771 if (!rtx_equal_p (arg0, arg1))
5772 return NULL_RTX;
5773
5774 return simplify_giv_expr (loop,
5775 gen_rtx_MULT (mode,
5776 XEXP (arg0, 0),
5777 gen_rtx_PLUS (mode,
5778 XEXP (arg0, 1),
5779 XEXP (arg1, 1))),
5780 ext_val, benefit);
5781
5782 case MINUS:
5783 /* Handle "a - b" as "a + b * (-1)". */
5784 return simplify_giv_expr (loop,
5785 gen_rtx_PLUS (mode,
5786 XEXP (x, 0),
5787 gen_rtx_MULT (mode,
5788 XEXP (x, 1),
5789 constm1_rtx)),
5790 ext_val, benefit);
5791
5792 case MULT:
5793 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5794 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5795 if (arg0 == 0 || arg1 == 0)
5796 return NULL_RTX;
5797
5798 /* Put constant last, CONST_INT last if both constant. */
5799 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5800 && GET_CODE (arg1) != CONST_INT)
5801 tem = arg0, arg0 = arg1, arg1 = tem;
5802
5803 /* If second argument is not now constant, not giv. */
5804 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5805 return NULL_RTX;
5806
5807 /* Handle multiply by 0 or 1. */
5808 if (arg1 == const0_rtx)
5809 return const0_rtx;
5810
5811 else if (arg1 == const1_rtx)
5812 return arg0;
5813
5814 switch (GET_CODE (arg0))
5815 {
5816 case REG:
5817 /* biv * invar. Done. */
5818 return gen_rtx_MULT (mode, arg0, arg1);
5819
5820 case CONST_INT:
5821 /* Product of two constants. */
5822 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5823
5824 case USE:
5825 /* invar * invar is a giv, but attempt to simplify it somehow. */
5826 if (GET_CODE (arg1) != CONST_INT)
5827 return NULL_RTX;
5828
5829 arg0 = XEXP (arg0, 0);
5830 if (GET_CODE (arg0) == MULT)
5831 {
5832 /* (invar_0 * invar_1) * invar_2. Associate. */
5833 return simplify_giv_expr (loop,
5834 gen_rtx_MULT (mode,
5835 XEXP (arg0, 0),
5836 gen_rtx_MULT (mode,
5837 XEXP (arg0,
5838 1),
5839 arg1)),
5840 ext_val, benefit);
5841 }
5842 /* Porpagate the MULT expressions to the intermost nodes. */
5843 else if (GET_CODE (arg0) == PLUS)
5844 {
5845 /* (invar_0 + invar_1) * invar_2. Distribute. */
5846 return simplify_giv_expr (loop,
5847 gen_rtx_PLUS (mode,
5848 gen_rtx_MULT (mode,
5849 XEXP (arg0,
5850 0),
5851 arg1),
5852 gen_rtx_MULT (mode,
5853 XEXP (arg0,
5854 1),
5855 arg1)),
5856 ext_val, benefit);
5857 }
5858 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
5859
5860 case MULT:
5861 /* (a * invar_1) * invar_2. Associate. */
5862 return simplify_giv_expr (loop,
5863 gen_rtx_MULT (mode,
5864 XEXP (arg0, 0),
5865 gen_rtx_MULT (mode,
5866 XEXP (arg0, 1),
5867 arg1)),
5868 ext_val, benefit);
5869
5870 case PLUS:
5871 /* (a + invar_1) * invar_2. Distribute. */
5872 return simplify_giv_expr (loop,
5873 gen_rtx_PLUS (mode,
5874 gen_rtx_MULT (mode,
5875 XEXP (arg0, 0),
5876 arg1),
5877 gen_rtx_MULT (mode,
5878 XEXP (arg0, 1),
5879 arg1)),
5880 ext_val, benefit);
5881
5882 default:
5883 abort ();
5884 }
5885
5886 case ASHIFT:
5887 /* Shift by constant is multiply by power of two. */
5888 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5889 return 0;
5890
5891 return
5892 simplify_giv_expr (loop,
5893 gen_rtx_MULT (mode,
5894 XEXP (x, 0),
5895 GEN_INT ((HOST_WIDE_INT) 1
5896 << INTVAL (XEXP (x, 1)))),
5897 ext_val, benefit);
5898
5899 case NEG:
5900 /* "-a" is "a * (-1)" */
5901 return simplify_giv_expr (loop,
5902 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5903 ext_val, benefit);
5904
5905 case NOT:
5906 /* "~a" is "-a - 1". Silly, but easy. */
5907 return simplify_giv_expr (loop,
5908 gen_rtx_MINUS (mode,
5909 gen_rtx_NEG (mode, XEXP (x, 0)),
5910 const1_rtx),
5911 ext_val, benefit);
5912
5913 case USE:
5914 /* Already in proper form for invariant. */
5915 return x;
5916
5917 case SIGN_EXTEND:
5918 case ZERO_EXTEND:
5919 case TRUNCATE:
5920 /* Conditionally recognize extensions of simple IVs. After we've
5921 computed loop traversal counts and verified the range of the
5922 source IV, we'll reevaluate this as a GIV. */
5923 if (*ext_val == NULL_RTX)
5924 {
5925 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5926 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
5927 {
5928 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
5929 return arg0;
5930 }
5931 }
5932 goto do_default;
5933
5934 case REG:
5935 /* If this is a new register, we can't deal with it. */
5936 if (REGNO (x) >= max_reg_before_loop)
5937 return 0;
5938
5939 /* Check for biv or giv. */
5940 switch (REG_IV_TYPE (ivs, REGNO (x)))
5941 {
5942 case BASIC_INDUCT:
5943 return x;
5944 case GENERAL_INDUCT:
5945 {
5946 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
5947
5948 /* Form expression from giv and add benefit. Ensure this giv
5949 can derive another and subtract any needed adjustment if so. */
5950
5951 /* Increasing the benefit here is risky. The only case in which it
5952 is arguably correct is if this is the only use of V. In other
5953 cases, this will artificially inflate the benefit of the current
5954 giv, and lead to suboptimal code. Thus, it is disabled, since
5955 potentially not reducing an only marginally beneficial giv is
5956 less harmful than reducing many givs that are not really
5957 beneficial. */
5958 {
5959 rtx single_use = regs->array[REGNO (x)].single_usage;
5960 if (single_use && single_use != const0_rtx)
5961 *benefit += v->benefit;
5962 }
5963
5964 if (v->cant_derive)
5965 return 0;
5966
5967 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
5968 v->src_reg, v->mult_val),
5969 v->add_val);
5970
5971 if (v->derive_adjustment)
5972 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5973 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
5974 if (*ext_val)
5975 {
5976 if (!v->ext_dependant)
5977 return arg0;
5978 }
5979 else
5980 {
5981 *ext_val = v->ext_dependant;
5982 return arg0;
5983 }
5984 return 0;
5985 }
5986
5987 default:
5988 do_default:
5989 /* If it isn't an induction variable, and it is invariant, we
5990 may be able to simplify things further by looking through
5991 the bits we just moved outside the loop. */
5992 if (loop_invariant_p (loop, x) == 1)
5993 {
5994 struct movable *m;
5995 struct loop_movables *movables = LOOP_MOVABLES (loop);
5996
5997 for (m = movables->head; m; m = m->next)
5998 if (rtx_equal_p (x, m->set_dest))
5999 {
6000 /* Ok, we found a match. Substitute and simplify. */
6001
6002 /* If we match another movable, we must use that, as
6003 this one is going away. */
6004 if (m->match)
6005 return simplify_giv_expr (loop, m->match->set_dest,
6006 ext_val, benefit);
6007
6008 /* If consec is non-zero, this is a member of a group of
6009 instructions that were moved together. We handle this
6010 case only to the point of seeking to the last insn and
6011 looking for a REG_EQUAL. Fail if we don't find one. */
6012 if (m->consec != 0)
6013 {
6014 int i = m->consec;
6015 tem = m->insn;
6016 do
6017 {
6018 tem = NEXT_INSN (tem);
6019 }
6020 while (--i > 0);
6021
6022 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6023 if (tem)
6024 tem = XEXP (tem, 0);
6025 }
6026 else
6027 {
6028 tem = single_set (m->insn);
6029 if (tem)
6030 tem = SET_SRC (tem);
6031 }
6032
6033 if (tem)
6034 {
6035 /* What we are most interested in is pointer
6036 arithmetic on invariants -- only take
6037 patterns we may be able to do something with. */
6038 if (GET_CODE (tem) == PLUS
6039 || GET_CODE (tem) == MULT
6040 || GET_CODE (tem) == ASHIFT
6041 || GET_CODE (tem) == CONST_INT
6042 || GET_CODE (tem) == SYMBOL_REF)
6043 {
6044 tem = simplify_giv_expr (loop, tem, ext_val,
6045 benefit);
6046 if (tem)
6047 return tem;
6048 }
6049 else if (GET_CODE (tem) == CONST
6050 && GET_CODE (XEXP (tem, 0)) == PLUS
6051 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6052 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6053 {
6054 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6055 ext_val, benefit);
6056 if (tem)
6057 return tem;
6058 }
6059 }
6060 break;
6061 }
6062 }
6063 break;
6064 }
6065
6066 /* Fall through to general case. */
6067 default:
6068 /* If invariant, return as USE (unless CONST_INT).
6069 Otherwise, not giv. */
6070 if (GET_CODE (x) == USE)
6071 x = XEXP (x, 0);
6072
6073 if (loop_invariant_p (loop, x) == 1)
6074 {
6075 if (GET_CODE (x) == CONST_INT)
6076 return x;
6077 if (GET_CODE (x) == CONST
6078 && GET_CODE (XEXP (x, 0)) == PLUS
6079 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6080 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6081 x = XEXP (x, 0);
6082 return gen_rtx_USE (mode, x);
6083 }
6084 else
6085 return 0;
6086 }
6087 }
6088
6089 /* This routine folds invariants such that there is only ever one
6090 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6091
6092 static rtx
6093 sge_plus_constant (x, c)
6094 rtx x, c;
6095 {
6096 if (GET_CODE (x) == CONST_INT)
6097 return GEN_INT (INTVAL (x) + INTVAL (c));
6098 else if (GET_CODE (x) != PLUS)
6099 return gen_rtx_PLUS (GET_MODE (x), x, c);
6100 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6101 {
6102 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6103 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6104 }
6105 else if (GET_CODE (XEXP (x, 0)) == PLUS
6106 || GET_CODE (XEXP (x, 1)) != PLUS)
6107 {
6108 return gen_rtx_PLUS (GET_MODE (x),
6109 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6110 }
6111 else
6112 {
6113 return gen_rtx_PLUS (GET_MODE (x),
6114 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6115 }
6116 }
6117
6118 static rtx
6119 sge_plus (mode, x, y)
6120 enum machine_mode mode;
6121 rtx x, y;
6122 {
6123 while (GET_CODE (y) == PLUS)
6124 {
6125 rtx a = XEXP (y, 0);
6126 if (GET_CODE (a) == CONST_INT)
6127 x = sge_plus_constant (x, a);
6128 else
6129 x = gen_rtx_PLUS (mode, x, a);
6130 y = XEXP (y, 1);
6131 }
6132 if (GET_CODE (y) == CONST_INT)
6133 x = sge_plus_constant (x, y);
6134 else
6135 x = gen_rtx_PLUS (mode, x, y);
6136 return x;
6137 }
6138 \f
6139 /* Help detect a giv that is calculated by several consecutive insns;
6140 for example,
6141 giv = biv * M
6142 giv = giv + A
6143 The caller has already identified the first insn P as having a giv as dest;
6144 we check that all other insns that set the same register follow
6145 immediately after P, that they alter nothing else,
6146 and that the result of the last is still a giv.
6147
6148 The value is 0 if the reg set in P is not really a giv.
6149 Otherwise, the value is the amount gained by eliminating
6150 all the consecutive insns that compute the value.
6151
6152 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6153 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6154
6155 The coefficients of the ultimate giv value are stored in
6156 *MULT_VAL and *ADD_VAL. */
6157
6158 static int
6159 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6160 add_val, mult_val, ext_val, last_consec_insn)
6161 const struct loop *loop;
6162 int first_benefit;
6163 rtx p;
6164 rtx src_reg;
6165 rtx dest_reg;
6166 rtx *add_val;
6167 rtx *mult_val;
6168 rtx *ext_val;
6169 rtx *last_consec_insn;
6170 {
6171 struct loop_ivs *ivs = LOOP_IVS (loop);
6172 struct loop_regs *regs = LOOP_REGS (loop);
6173 int count;
6174 enum rtx_code code;
6175 int benefit;
6176 rtx temp;
6177 rtx set;
6178
6179 /* Indicate that this is a giv so that we can update the value produced in
6180 each insn of the multi-insn sequence.
6181
6182 This induction structure will be used only by the call to
6183 general_induction_var below, so we can allocate it on our stack.
6184 If this is a giv, our caller will replace the induct var entry with
6185 a new induction structure. */
6186 struct induction *v;
6187
6188 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6189 return 0;
6190
6191 v = (struct induction *) alloca (sizeof (struct induction));
6192 v->src_reg = src_reg;
6193 v->mult_val = *mult_val;
6194 v->add_val = *add_val;
6195 v->benefit = first_benefit;
6196 v->cant_derive = 0;
6197 v->derive_adjustment = 0;
6198 v->ext_dependant = NULL_RTX;
6199
6200 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6201 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6202
6203 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6204
6205 while (count > 0)
6206 {
6207 p = NEXT_INSN (p);
6208 code = GET_CODE (p);
6209
6210 /* If libcall, skip to end of call sequence. */
6211 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6212 p = XEXP (temp, 0);
6213
6214 if (code == INSN
6215 && (set = single_set (p))
6216 && GET_CODE (SET_DEST (set)) == REG
6217 && SET_DEST (set) == dest_reg
6218 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6219 add_val, mult_val, ext_val, 0,
6220 &benefit, VOIDmode)
6221 /* Giv created by equivalent expression. */
6222 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6223 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6224 add_val, mult_val, ext_val, 0,
6225 &benefit, VOIDmode)))
6226 && src_reg == v->src_reg)
6227 {
6228 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6229 benefit += libcall_benefit (p);
6230
6231 count--;
6232 v->mult_val = *mult_val;
6233 v->add_val = *add_val;
6234 v->benefit += benefit;
6235 }
6236 else if (code != NOTE)
6237 {
6238 /* Allow insns that set something other than this giv to a
6239 constant. Such insns are needed on machines which cannot
6240 include long constants and should not disqualify a giv. */
6241 if (code == INSN
6242 && (set = single_set (p))
6243 && SET_DEST (set) != dest_reg
6244 && CONSTANT_P (SET_SRC (set)))
6245 continue;
6246
6247 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6248 return 0;
6249 }
6250 }
6251
6252 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6253 *last_consec_insn = p;
6254 return v->benefit;
6255 }
6256 \f
6257 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6258 represented by G1. If no such expression can be found, or it is clear that
6259 it cannot possibly be a valid address, 0 is returned.
6260
6261 To perform the computation, we note that
6262 G1 = x * v + a and
6263 G2 = y * v + b
6264 where `v' is the biv.
6265
6266 So G2 = (y/b) * G1 + (b - a*y/x).
6267
6268 Note that MULT = y/x.
6269
6270 Update: A and B are now allowed to be additive expressions such that
6271 B contains all variables in A. That is, computing B-A will not require
6272 subtracting variables. */
6273
6274 static rtx
6275 express_from_1 (a, b, mult)
6276 rtx a, b, mult;
6277 {
6278 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6279
6280 if (mult == const0_rtx)
6281 return b;
6282
6283 /* If MULT is not 1, we cannot handle A with non-constants, since we
6284 would then be required to subtract multiples of the registers in A.
6285 This is theoretically possible, and may even apply to some Fortran
6286 constructs, but it is a lot of work and we do not attempt it here. */
6287
6288 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6289 return NULL_RTX;
6290
6291 /* In general these structures are sorted top to bottom (down the PLUS
6292 chain), but not left to right across the PLUS. If B is a higher
6293 order giv than A, we can strip one level and recurse. If A is higher
6294 order, we'll eventually bail out, but won't know that until the end.
6295 If they are the same, we'll strip one level around this loop. */
6296
6297 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6298 {
6299 rtx ra, rb, oa, ob, tmp;
6300
6301 ra = XEXP (a, 0), oa = XEXP (a, 1);
6302 if (GET_CODE (ra) == PLUS)
6303 tmp = ra, ra = oa, oa = tmp;
6304
6305 rb = XEXP (b, 0), ob = XEXP (b, 1);
6306 if (GET_CODE (rb) == PLUS)
6307 tmp = rb, rb = ob, ob = tmp;
6308
6309 if (rtx_equal_p (ra, rb))
6310 /* We matched: remove one reg completely. */
6311 a = oa, b = ob;
6312 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6313 /* An alternate match. */
6314 a = oa, b = rb;
6315 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6316 /* An alternate match. */
6317 a = ra, b = ob;
6318 else
6319 {
6320 /* Indicates an extra register in B. Strip one level from B and
6321 recurse, hoping B was the higher order expression. */
6322 ob = express_from_1 (a, ob, mult);
6323 if (ob == NULL_RTX)
6324 return NULL_RTX;
6325 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6326 }
6327 }
6328
6329 /* Here we are at the last level of A, go through the cases hoping to
6330 get rid of everything but a constant. */
6331
6332 if (GET_CODE (a) == PLUS)
6333 {
6334 rtx ra, oa;
6335
6336 ra = XEXP (a, 0), oa = XEXP (a, 1);
6337 if (rtx_equal_p (oa, b))
6338 oa = ra;
6339 else if (!rtx_equal_p (ra, b))
6340 return NULL_RTX;
6341
6342 if (GET_CODE (oa) != CONST_INT)
6343 return NULL_RTX;
6344
6345 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6346 }
6347 else if (GET_CODE (a) == CONST_INT)
6348 {
6349 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6350 }
6351 else if (CONSTANT_P (a))
6352 {
6353 return simplify_gen_binary (MINUS, GET_MODE (b) != VOIDmode ? GET_MODE (b) : GET_MODE (a), const0_rtx, a);
6354 }
6355 else if (GET_CODE (b) == PLUS)
6356 {
6357 if (rtx_equal_p (a, XEXP (b, 0)))
6358 return XEXP (b, 1);
6359 else if (rtx_equal_p (a, XEXP (b, 1)))
6360 return XEXP (b, 0);
6361 else
6362 return NULL_RTX;
6363 }
6364 else if (rtx_equal_p (a, b))
6365 return const0_rtx;
6366
6367 return NULL_RTX;
6368 }
6369
6370 rtx
6371 express_from (g1, g2)
6372 struct induction *g1, *g2;
6373 {
6374 rtx mult, add;
6375
6376 /* The value that G1 will be multiplied by must be a constant integer. Also,
6377 the only chance we have of getting a valid address is if b*c/a (see above
6378 for notation) is also an integer. */
6379 if (GET_CODE (g1->mult_val) == CONST_INT
6380 && GET_CODE (g2->mult_val) == CONST_INT)
6381 {
6382 if (g1->mult_val == const0_rtx
6383 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6384 return NULL_RTX;
6385 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6386 }
6387 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6388 mult = const1_rtx;
6389 else
6390 {
6391 /* ??? Find out if the one is a multiple of the other? */
6392 return NULL_RTX;
6393 }
6394
6395 add = express_from_1 (g1->add_val, g2->add_val, mult);
6396 if (add == NULL_RTX)
6397 {
6398 /* Failed. If we've got a multiplication factor between G1 and G2,
6399 scale G1's addend and try again. */
6400 if (INTVAL (mult) > 1)
6401 {
6402 rtx g1_add_val = g1->add_val;
6403 if (GET_CODE (g1_add_val) == MULT
6404 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
6405 {
6406 HOST_WIDE_INT m;
6407 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
6408 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
6409 XEXP (g1_add_val, 0), GEN_INT (m));
6410 }
6411 else
6412 {
6413 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
6414 mult);
6415 }
6416
6417 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
6418 }
6419 }
6420 if (add == NULL_RTX)
6421 return NULL_RTX;
6422
6423 /* Form simplified final result. */
6424 if (mult == const0_rtx)
6425 return add;
6426 else if (mult == const1_rtx)
6427 mult = g1->dest_reg;
6428 else
6429 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6430
6431 if (add == const0_rtx)
6432 return mult;
6433 else
6434 {
6435 if (GET_CODE (add) == PLUS
6436 && CONSTANT_P (XEXP (add, 1)))
6437 {
6438 rtx tem = XEXP (add, 1);
6439 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
6440 add = tem;
6441 }
6442
6443 return gen_rtx_PLUS (g2->mode, mult, add);
6444 }
6445 }
6446 \f
6447 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6448 represented by G1. This indicates that G2 should be combined with G1 and
6449 that G2 can use (either directly or via an address expression) a register
6450 used to represent G1. */
6451
6452 static rtx
6453 combine_givs_p (g1, g2)
6454 struct induction *g1, *g2;
6455 {
6456 rtx comb, ret;
6457
6458 /* With the introduction of ext dependant givs, we must care for modes.
6459 G2 must not use a wider mode than G1. */
6460 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
6461 return NULL_RTX;
6462
6463 ret = comb = express_from (g1, g2);
6464 if (comb == NULL_RTX)
6465 return NULL_RTX;
6466 if (g1->mode != g2->mode)
6467 ret = gen_lowpart (g2->mode, comb);
6468
6469 /* If these givs are identical, they can be combined. We use the results
6470 of express_from because the addends are not in a canonical form, so
6471 rtx_equal_p is a weaker test. */
6472 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
6473 combination to be the other way round. */
6474 if (comb == g1->dest_reg
6475 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
6476 {
6477 return ret;
6478 }
6479
6480 /* If G2 can be expressed as a function of G1 and that function is valid
6481 as an address and no more expensive than using a register for G2,
6482 the expression of G2 in terms of G1 can be used. */
6483 if (ret != NULL_RTX
6484 && g2->giv_type == DEST_ADDR
6485 && memory_address_p (GET_MODE (g2->mem), ret)
6486 /* ??? Looses, especially with -fforce-addr, where *g2->location
6487 will always be a register, and so anything more complicated
6488 gets discarded. */
6489 #if 0
6490 #ifdef ADDRESS_COST
6491 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6492 #else
6493 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6494 #endif
6495 #endif
6496 )
6497 {
6498 return ret;
6499 }
6500
6501 return NULL_RTX;
6502 }
6503 \f
6504 /* Check each extension dependant giv in this class to see if its
6505 root biv is safe from wrapping in the interior mode, which would
6506 make the giv illegal. */
6507
6508 static void
6509 check_ext_dependant_givs (bl, loop_info)
6510 struct iv_class *bl;
6511 struct loop_info *loop_info;
6512 {
6513 int ze_ok = 0, se_ok = 0, info_ok = 0;
6514 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
6515 HOST_WIDE_INT start_val;
6516 unsigned HOST_WIDE_INT u_end_val = 0;
6517 unsigned HOST_WIDE_INT u_start_val = 0;
6518 rtx incr = pc_rtx;
6519 struct induction *v;
6520
6521 /* Make sure the iteration data is available. We must have
6522 constants in order to be certain of no overflow. */
6523 /* ??? An unknown iteration count with an increment of +-1
6524 combined with friendly exit tests of against an invariant
6525 value is also ameanable to optimization. Not implemented. */
6526 if (loop_info->n_iterations > 0
6527 && bl->initial_value
6528 && GET_CODE (bl->initial_value) == CONST_INT
6529 && (incr = biv_total_increment (bl))
6530 && GET_CODE (incr) == CONST_INT
6531 /* Make sure the host can represent the arithmetic. */
6532 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
6533 {
6534 unsigned HOST_WIDE_INT abs_incr, total_incr;
6535 HOST_WIDE_INT s_end_val;
6536 int neg_incr;
6537
6538 info_ok = 1;
6539 start_val = INTVAL (bl->initial_value);
6540 u_start_val = start_val;
6541
6542 neg_incr = 0, abs_incr = INTVAL (incr);
6543 if (INTVAL (incr) < 0)
6544 neg_incr = 1, abs_incr = -abs_incr;
6545 total_incr = abs_incr * loop_info->n_iterations;
6546
6547 /* Check for host arithmatic overflow. */
6548 if (total_incr / loop_info->n_iterations == abs_incr)
6549 {
6550 unsigned HOST_WIDE_INT u_max;
6551 HOST_WIDE_INT s_max;
6552
6553 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
6554 s_end_val = u_end_val;
6555 u_max = GET_MODE_MASK (biv_mode);
6556 s_max = u_max >> 1;
6557
6558 /* Check zero extension of biv ok. */
6559 if (start_val >= 0
6560 /* Check for host arithmatic overflow. */
6561 && (neg_incr
6562 ? u_end_val < u_start_val
6563 : u_end_val > u_start_val)
6564 /* Check for target arithmetic overflow. */
6565 && (neg_incr
6566 ? 1 /* taken care of with host overflow */
6567 : u_end_val <= u_max))
6568 {
6569 ze_ok = 1;
6570 }
6571
6572 /* Check sign extension of biv ok. */
6573 /* ??? While it is true that overflow with signed and pointer
6574 arithmetic is undefined, I fear too many programmers don't
6575 keep this fact in mind -- myself included on occasion.
6576 So leave alone with the signed overflow optimizations. */
6577 if (start_val >= -s_max - 1
6578 /* Check for host arithmatic overflow. */
6579 && (neg_incr
6580 ? s_end_val < start_val
6581 : s_end_val > start_val)
6582 /* Check for target arithmetic overflow. */
6583 && (neg_incr
6584 ? s_end_val >= -s_max - 1
6585 : s_end_val <= s_max))
6586 {
6587 se_ok = 1;
6588 }
6589 }
6590 }
6591
6592 /* Invalidate givs that fail the tests. */
6593 for (v = bl->giv; v; v = v->next_iv)
6594 if (v->ext_dependant)
6595 {
6596 enum rtx_code code = GET_CODE (v->ext_dependant);
6597 int ok = 0;
6598
6599 switch (code)
6600 {
6601 case SIGN_EXTEND:
6602 ok = se_ok;
6603 break;
6604 case ZERO_EXTEND:
6605 ok = ze_ok;
6606 break;
6607
6608 case TRUNCATE:
6609 /* We don't know whether this value is being used as either
6610 signed or unsigned, so to safely truncate we must satisfy
6611 both. The initial check here verifies the BIV itself;
6612 once that is successful we may check its range wrt the
6613 derived GIV. */
6614 if (se_ok && ze_ok)
6615 {
6616 enum machine_mode outer_mode = GET_MODE (v->ext_dependant);
6617 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
6618
6619 /* We know from the above that both endpoints are nonnegative,
6620 and that there is no wrapping. Verify that both endpoints
6621 are within the (signed) range of the outer mode. */
6622 if (u_start_val <= max && u_end_val <= max)
6623 ok = 1;
6624 }
6625 break;
6626
6627 default:
6628 abort ();
6629 }
6630
6631 if (ok)
6632 {
6633 if (loop_dump_stream)
6634 {
6635 fprintf (loop_dump_stream,
6636 "Verified ext dependant giv at %d of reg %d\n",
6637 INSN_UID (v->insn), bl->regno);
6638 }
6639 }
6640 else
6641 {
6642 if (loop_dump_stream)
6643 {
6644 const char *why;
6645
6646 if (info_ok)
6647 why = "biv iteration values overflowed";
6648 else
6649 {
6650 if (incr == pc_rtx)
6651 incr = biv_total_increment (bl);
6652 if (incr == const1_rtx)
6653 why = "biv iteration info incomplete; incr by 1";
6654 else
6655 why = "biv iteration info incomplete";
6656 }
6657
6658 fprintf (loop_dump_stream,
6659 "Failed ext dependant giv at %d, %s\n",
6660 INSN_UID (v->insn), why);
6661 }
6662 v->ignore = 1;
6663 bl->all_reduced = 0;
6664 }
6665 }
6666 }
6667
6668 /* Generate a version of VALUE in a mode appropriate for initializing V. */
6669
6670 rtx
6671 extend_value_for_giv (v, value)
6672 struct induction *v;
6673 rtx value;
6674 {
6675 rtx ext_dep = v->ext_dependant;
6676
6677 if (! ext_dep)
6678 return value;
6679
6680 /* Recall that check_ext_dependant_givs verified that the known bounds
6681 of a biv did not overflow or wrap with respect to the extension for
6682 the giv. Therefore, constants need no additional adjustment. */
6683 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
6684 return value;
6685
6686 /* Otherwise, we must adjust the value to compensate for the
6687 differing modes of the biv and the giv. */
6688 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
6689 }
6690 \f
6691 struct combine_givs_stats
6692 {
6693 int giv_number;
6694 int total_benefit;
6695 };
6696
6697 static int
6698 cmp_combine_givs_stats (xp, yp)
6699 const PTR xp;
6700 const PTR yp;
6701 {
6702 const struct combine_givs_stats * const x =
6703 (const struct combine_givs_stats *) xp;
6704 const struct combine_givs_stats * const y =
6705 (const struct combine_givs_stats *) yp;
6706 int d;
6707 d = y->total_benefit - x->total_benefit;
6708 /* Stabilize the sort. */
6709 if (!d)
6710 d = x->giv_number - y->giv_number;
6711 return d;
6712 }
6713
6714 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6715 any other. If so, point SAME to the giv combined with and set NEW_REG to
6716 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6717 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6718
6719 static void
6720 combine_givs (regs, bl)
6721 struct loop_regs *regs;
6722 struct iv_class *bl;
6723 {
6724 /* Additional benefit to add for being combined multiple times. */
6725 const int extra_benefit = 3;
6726
6727 struct induction *g1, *g2, **giv_array;
6728 int i, j, k, giv_count;
6729 struct combine_givs_stats *stats;
6730 rtx *can_combine;
6731
6732 /* Count givs, because bl->giv_count is incorrect here. */
6733 giv_count = 0;
6734 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6735 if (!g1->ignore)
6736 giv_count++;
6737
6738 giv_array
6739 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6740 i = 0;
6741 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6742 if (!g1->ignore)
6743 giv_array[i++] = g1;
6744
6745 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
6746 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
6747
6748 for (i = 0; i < giv_count; i++)
6749 {
6750 int this_benefit;
6751 rtx single_use;
6752
6753 g1 = giv_array[i];
6754 stats[i].giv_number = i;
6755
6756 /* If a DEST_REG GIV is used only once, do not allow it to combine
6757 with anything, for in doing so we will gain nothing that cannot
6758 be had by simply letting the GIV with which we would have combined
6759 to be reduced on its own. The losage shows up in particular with
6760 DEST_ADDR targets on hosts with reg+reg addressing, though it can
6761 be seen elsewhere as well. */
6762 if (g1->giv_type == DEST_REG
6763 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
6764 && single_use != const0_rtx)
6765 continue;
6766
6767 this_benefit = g1->benefit;
6768 /* Add an additional weight for zero addends. */
6769 if (g1->no_const_addval)
6770 this_benefit += 1;
6771
6772 for (j = 0; j < giv_count; j++)
6773 {
6774 rtx this_combine;
6775
6776 g2 = giv_array[j];
6777 if (g1 != g2
6778 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6779 {
6780 can_combine[i * giv_count + j] = this_combine;
6781 this_benefit += g2->benefit + extra_benefit;
6782 }
6783 }
6784 stats[i].total_benefit = this_benefit;
6785 }
6786
6787 /* Iterate, combining until we can't. */
6788 restart:
6789 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
6790
6791 if (loop_dump_stream)
6792 {
6793 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6794 for (k = 0; k < giv_count; k++)
6795 {
6796 g1 = giv_array[stats[k].giv_number];
6797 if (!g1->combined_with && !g1->same)
6798 fprintf (loop_dump_stream, " {%d, %d}",
6799 INSN_UID (giv_array[stats[k].giv_number]->insn),
6800 stats[k].total_benefit);
6801 }
6802 putc ('\n', loop_dump_stream);
6803 }
6804
6805 for (k = 0; k < giv_count; k++)
6806 {
6807 int g1_add_benefit = 0;
6808
6809 i = stats[k].giv_number;
6810 g1 = giv_array[i];
6811
6812 /* If it has already been combined, skip. */
6813 if (g1->combined_with || g1->same)
6814 continue;
6815
6816 for (j = 0; j < giv_count; j++)
6817 {
6818 g2 = giv_array[j];
6819 if (g1 != g2 && can_combine[i * giv_count + j]
6820 /* If it has already been combined, skip. */
6821 && ! g2->same && ! g2->combined_with)
6822 {
6823 int l;
6824
6825 g2->new_reg = can_combine[i * giv_count + j];
6826 g2->same = g1;
6827 g1->combined_with++;
6828 g1->lifetime += g2->lifetime;
6829
6830 g1_add_benefit += g2->benefit;
6831
6832 /* ??? The new final_[bg]iv_value code does a much better job
6833 of finding replaceable giv's, and hence this code may no
6834 longer be necessary. */
6835 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6836 g1_add_benefit -= copy_cost;
6837
6838 /* To help optimize the next set of combinations, remove
6839 this giv from the benefits of other potential mates. */
6840 for (l = 0; l < giv_count; ++l)
6841 {
6842 int m = stats[l].giv_number;
6843 if (can_combine[m * giv_count + j])
6844 stats[l].total_benefit -= g2->benefit + extra_benefit;
6845 }
6846
6847 if (loop_dump_stream)
6848 fprintf (loop_dump_stream,
6849 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
6850 INSN_UID (g2->insn), INSN_UID (g1->insn),
6851 g1->benefit, g1_add_benefit, g1->lifetime);
6852 }
6853 }
6854
6855 /* To help optimize the next set of combinations, remove
6856 this giv from the benefits of other potential mates. */
6857 if (g1->combined_with)
6858 {
6859 for (j = 0; j < giv_count; ++j)
6860 {
6861 int m = stats[j].giv_number;
6862 if (can_combine[m * giv_count + i])
6863 stats[j].total_benefit -= g1->benefit + extra_benefit;
6864 }
6865
6866 g1->benefit += g1_add_benefit;
6867
6868 /* We've finished with this giv, and everything it touched.
6869 Restart the combination so that proper weights for the
6870 rest of the givs are properly taken into account. */
6871 /* ??? Ideally we would compact the arrays at this point, so
6872 as to not cover old ground. But sanely compacting
6873 can_combine is tricky. */
6874 goto restart;
6875 }
6876 }
6877
6878 /* Clean up. */
6879 free (stats);
6880 free (can_combine);
6881 }
6882 \f
6883 /* Generate sequence for REG = B * M + A. */
6884
6885 static rtx
6886 gen_add_mult (b, m, a, reg)
6887 rtx b; /* initial value of basic induction variable */
6888 rtx m; /* multiplicative constant */
6889 rtx a; /* additive constant */
6890 rtx reg; /* destination register */
6891 {
6892 rtx seq;
6893 rtx result;
6894
6895 start_sequence ();
6896 /* Use unsigned arithmetic. */
6897 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
6898 if (reg != result)
6899 emit_move_insn (reg, result);
6900 seq = gen_sequence ();
6901 end_sequence ();
6902
6903 return seq;
6904 }
6905
6906
6907 /* Update registers created in insn sequence SEQ. */
6908
6909 static void
6910 loop_regs_update (loop, seq)
6911 const struct loop *loop ATTRIBUTE_UNUSED;
6912 rtx seq;
6913 {
6914 /* Update register info for alias analysis. */
6915
6916 if (GET_CODE (seq) == SEQUENCE)
6917 {
6918 int i;
6919 for (i = 0; i < XVECLEN (seq, 0); ++i)
6920 {
6921 rtx set = single_set (XVECEXP (seq, 0, i));
6922 if (set && GET_CODE (SET_DEST (set)) == REG)
6923 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6924 }
6925 }
6926 else
6927 {
6928 rtx set = single_set (seq);
6929 if (set && GET_CODE (SET_DEST (set)) == REG)
6930 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6931 }
6932 }
6933
6934
6935 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
6936
6937 void
6938 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
6939 const struct loop *loop;
6940 rtx b; /* initial value of basic induction variable */
6941 rtx m; /* multiplicative constant */
6942 rtx a; /* additive constant */
6943 rtx reg; /* destination register */
6944 basic_block before_bb;
6945 rtx before_insn;
6946 {
6947 rtx seq;
6948
6949 if (! before_insn)
6950 {
6951 loop_iv_add_mult_hoist (loop, b, m, a, reg);
6952 return;
6953 }
6954
6955 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
6956 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
6957
6958 /* Increase the lifetime of any invariants moved further in code. */
6959 update_reg_last_use (a, before_insn);
6960 update_reg_last_use (b, before_insn);
6961 update_reg_last_use (m, before_insn);
6962
6963 loop_insn_emit_before (loop, before_bb, before_insn, seq);
6964
6965 /* It is possible that the expansion created lots of new registers.
6966 Iterate over the sequence we just created and record them all. */
6967 loop_regs_update (loop, seq);
6968 }
6969
6970
6971 /* Emit insns in loop pre-header to set REG = B * M + A. */
6972
6973 void
6974 loop_iv_add_mult_sink (loop, b, m, a, reg)
6975 const struct loop *loop;
6976 rtx b; /* initial value of basic induction variable */
6977 rtx m; /* multiplicative constant */
6978 rtx a; /* additive constant */
6979 rtx reg; /* destination register */
6980 {
6981 rtx seq;
6982
6983 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
6984 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
6985
6986 /* Increase the lifetime of any invariants moved further in code.
6987 ???? Is this really necessary? */
6988 update_reg_last_use (a, loop->sink);
6989 update_reg_last_use (b, loop->sink);
6990 update_reg_last_use (m, loop->sink);
6991
6992 loop_insn_sink (loop, seq);
6993
6994 /* It is possible that the expansion created lots of new registers.
6995 Iterate over the sequence we just created and record them all. */
6996 loop_regs_update (loop, seq);
6997 }
6998
6999
7000 /* Emit insns after loop to set REG = B * M + A. */
7001
7002 void
7003 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7004 const struct loop *loop;
7005 rtx b; /* initial value of basic induction variable */
7006 rtx m; /* multiplicative constant */
7007 rtx a; /* additive constant */
7008 rtx reg; /* destination register */
7009 {
7010 rtx seq;
7011
7012 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7013 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7014
7015 loop_insn_hoist (loop, seq);
7016
7017 /* It is possible that the expansion created lots of new registers.
7018 Iterate over the sequence we just created and record them all. */
7019 loop_regs_update (loop, seq);
7020 }
7021
7022
7023
7024 /* Similar to gen_add_mult, but compute cost rather than generating
7025 sequence. */
7026
7027 static int
7028 iv_add_mult_cost (b, m, a, reg)
7029 rtx b; /* initial value of basic induction variable */
7030 rtx m; /* multiplicative constant */
7031 rtx a; /* additive constant */
7032 rtx reg; /* destination register */
7033 {
7034 int cost = 0;
7035 rtx last, result;
7036
7037 start_sequence ();
7038 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7039 if (reg != result)
7040 emit_move_insn (reg, result);
7041 last = get_last_insn ();
7042 while (last)
7043 {
7044 rtx t = single_set (last);
7045 if (t)
7046 cost += rtx_cost (SET_SRC (t), SET);
7047 last = PREV_INSN (last);
7048 }
7049 end_sequence ();
7050 return cost;
7051 }
7052 \f
7053 /* Test whether A * B can be computed without
7054 an actual multiply insn. Value is 1 if so. */
7055
7056 static int
7057 product_cheap_p (a, b)
7058 rtx a;
7059 rtx b;
7060 {
7061 int i;
7062 rtx tmp;
7063 int win = 1;
7064
7065 /* If only one is constant, make it B. */
7066 if (GET_CODE (a) == CONST_INT)
7067 tmp = a, a = b, b = tmp;
7068
7069 /* If first constant, both constant, so don't need multiply. */
7070 if (GET_CODE (a) == CONST_INT)
7071 return 1;
7072
7073 /* If second not constant, neither is constant, so would need multiply. */
7074 if (GET_CODE (b) != CONST_INT)
7075 return 0;
7076
7077 /* One operand is constant, so might not need multiply insn. Generate the
7078 code for the multiply and see if a call or multiply, or long sequence
7079 of insns is generated. */
7080
7081 start_sequence ();
7082 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7083 tmp = gen_sequence ();
7084 end_sequence ();
7085
7086 if (GET_CODE (tmp) == SEQUENCE)
7087 {
7088 if (XVEC (tmp, 0) == 0)
7089 win = 1;
7090 else if (XVECLEN (tmp, 0) > 3)
7091 win = 0;
7092 else
7093 for (i = 0; i < XVECLEN (tmp, 0); i++)
7094 {
7095 rtx insn = XVECEXP (tmp, 0, i);
7096
7097 if (GET_CODE (insn) != INSN
7098 || (GET_CODE (PATTERN (insn)) == SET
7099 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7100 || (GET_CODE (PATTERN (insn)) == PARALLEL
7101 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7102 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7103 {
7104 win = 0;
7105 break;
7106 }
7107 }
7108 }
7109 else if (GET_CODE (tmp) == SET
7110 && GET_CODE (SET_SRC (tmp)) == MULT)
7111 win = 0;
7112 else if (GET_CODE (tmp) == PARALLEL
7113 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7114 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7115 win = 0;
7116
7117 return win;
7118 }
7119 \f
7120 /* Check to see if loop can be terminated by a "decrement and branch until
7121 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7122 Also try reversing an increment loop to a decrement loop
7123 to see if the optimization can be performed.
7124 Value is nonzero if optimization was performed. */
7125
7126 /* This is useful even if the architecture doesn't have such an insn,
7127 because it might change a loops which increments from 0 to n to a loop
7128 which decrements from n to 0. A loop that decrements to zero is usually
7129 faster than one that increments from zero. */
7130
7131 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7132 such as approx_final_value, biv_total_increment, loop_iterations, and
7133 final_[bg]iv_value. */
7134
7135 static int
7136 check_dbra_loop (loop, insn_count)
7137 struct loop *loop;
7138 int insn_count;
7139 {
7140 struct loop_info *loop_info = LOOP_INFO (loop);
7141 struct loop_regs *regs = LOOP_REGS (loop);
7142 struct loop_ivs *ivs = LOOP_IVS (loop);
7143 struct iv_class *bl;
7144 rtx reg;
7145 rtx jump_label;
7146 rtx final_value;
7147 rtx start_value;
7148 rtx new_add_val;
7149 rtx comparison;
7150 rtx before_comparison;
7151 rtx p;
7152 rtx jump;
7153 rtx first_compare;
7154 int compare_and_branch;
7155 rtx loop_start = loop->start;
7156 rtx loop_end = loop->end;
7157
7158 /* If last insn is a conditional branch, and the insn before tests a
7159 register value, try to optimize it. Otherwise, we can't do anything. */
7160
7161 jump = PREV_INSN (loop_end);
7162 comparison = get_condition_for_loop (loop, jump);
7163 if (comparison == 0)
7164 return 0;
7165 if (!onlyjump_p (jump))
7166 return 0;
7167
7168 /* Try to compute whether the compare/branch at the loop end is one or
7169 two instructions. */
7170 get_condition (jump, &first_compare);
7171 if (first_compare == jump)
7172 compare_and_branch = 1;
7173 else if (first_compare == prev_nonnote_insn (jump))
7174 compare_and_branch = 2;
7175 else
7176 return 0;
7177
7178 {
7179 /* If more than one condition is present to control the loop, then
7180 do not proceed, as this function does not know how to rewrite
7181 loop tests with more than one condition.
7182
7183 Look backwards from the first insn in the last comparison
7184 sequence and see if we've got another comparison sequence. */
7185
7186 rtx jump1;
7187 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7188 if (GET_CODE (jump1) == JUMP_INSN)
7189 return 0;
7190 }
7191
7192 /* Check all of the bivs to see if the compare uses one of them.
7193 Skip biv's set more than once because we can't guarantee that
7194 it will be zero on the last iteration. Also skip if the biv is
7195 used between its update and the test insn. */
7196
7197 for (bl = ivs->list; bl; bl = bl->next)
7198 {
7199 if (bl->biv_count == 1
7200 && ! bl->biv->maybe_multiple
7201 && bl->biv->dest_reg == XEXP (comparison, 0)
7202 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7203 first_compare))
7204 break;
7205 }
7206
7207 if (! bl)
7208 return 0;
7209
7210 /* Look for the case where the basic induction variable is always
7211 nonnegative, and equals zero on the last iteration.
7212 In this case, add a reg_note REG_NONNEG, which allows the
7213 m68k DBRA instruction to be used. */
7214
7215 if (((GET_CODE (comparison) == GT
7216 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7217 && INTVAL (XEXP (comparison, 1)) == -1)
7218 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7219 && GET_CODE (bl->biv->add_val) == CONST_INT
7220 && INTVAL (bl->biv->add_val) < 0)
7221 {
7222 /* Initial value must be greater than 0,
7223 init_val % -dec_value == 0 to ensure that it equals zero on
7224 the last iteration */
7225
7226 if (GET_CODE (bl->initial_value) == CONST_INT
7227 && INTVAL (bl->initial_value) > 0
7228 && (INTVAL (bl->initial_value)
7229 % (-INTVAL (bl->biv->add_val))) == 0)
7230 {
7231 /* register always nonnegative, add REG_NOTE to branch */
7232 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7233 REG_NOTES (jump)
7234 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7235 REG_NOTES (jump));
7236 bl->nonneg = 1;
7237
7238 return 1;
7239 }
7240
7241 /* If the decrement is 1 and the value was tested as >= 0 before
7242 the loop, then we can safely optimize. */
7243 for (p = loop_start; p; p = PREV_INSN (p))
7244 {
7245 if (GET_CODE (p) == CODE_LABEL)
7246 break;
7247 if (GET_CODE (p) != JUMP_INSN)
7248 continue;
7249
7250 before_comparison = get_condition_for_loop (loop, p);
7251 if (before_comparison
7252 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7253 && GET_CODE (before_comparison) == LT
7254 && XEXP (before_comparison, 1) == const0_rtx
7255 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7256 && INTVAL (bl->biv->add_val) == -1)
7257 {
7258 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7259 REG_NOTES (jump)
7260 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7261 REG_NOTES (jump));
7262 bl->nonneg = 1;
7263
7264 return 1;
7265 }
7266 }
7267 }
7268 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7269 && INTVAL (bl->biv->add_val) > 0)
7270 {
7271 /* Try to change inc to dec, so can apply above optimization. */
7272 /* Can do this if:
7273 all registers modified are induction variables or invariant,
7274 all memory references have non-overlapping addresses
7275 (obviously true if only one write)
7276 allow 2 insns for the compare/jump at the end of the loop. */
7277 /* Also, we must avoid any instructions which use both the reversed
7278 biv and another biv. Such instructions will fail if the loop is
7279 reversed. We meet this condition by requiring that either
7280 no_use_except_counting is true, or else that there is only
7281 one biv. */
7282 int num_nonfixed_reads = 0;
7283 /* 1 if the iteration var is used only to count iterations. */
7284 int no_use_except_counting = 0;
7285 /* 1 if the loop has no memory store, or it has a single memory store
7286 which is reversible. */
7287 int reversible_mem_store = 1;
7288
7289 if (bl->giv_count == 0 && ! loop->exit_count)
7290 {
7291 rtx bivreg = regno_reg_rtx[bl->regno];
7292 struct iv_class *blt;
7293
7294 /* If there are no givs for this biv, and the only exit is the
7295 fall through at the end of the loop, then
7296 see if perhaps there are no uses except to count. */
7297 no_use_except_counting = 1;
7298 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7299 if (INSN_P (p))
7300 {
7301 rtx set = single_set (p);
7302
7303 if (set && GET_CODE (SET_DEST (set)) == REG
7304 && REGNO (SET_DEST (set)) == bl->regno)
7305 /* An insn that sets the biv is okay. */
7306 ;
7307 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
7308 || p == prev_nonnote_insn (loop_end))
7309 && reg_mentioned_p (bivreg, PATTERN (p)))
7310 {
7311 /* If either of these insns uses the biv and sets a pseudo
7312 that has more than one usage, then the biv has uses
7313 other than counting since it's used to derive a value
7314 that is used more than one time. */
7315 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
7316 regs);
7317 if (regs->multiple_uses)
7318 {
7319 no_use_except_counting = 0;
7320 break;
7321 }
7322 }
7323 else if (reg_mentioned_p (bivreg, PATTERN (p)))
7324 {
7325 no_use_except_counting = 0;
7326 break;
7327 }
7328 }
7329
7330 /* A biv has uses besides counting if it is used to set another biv. */
7331 for (blt = ivs->list; blt; blt = blt->next)
7332 if (blt->init_set && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
7333 {
7334 no_use_except_counting = 0;
7335 break;
7336 }
7337 }
7338
7339 if (no_use_except_counting)
7340 /* No need to worry about MEMs. */
7341 ;
7342 else if (loop_info->num_mem_sets <= 1)
7343 {
7344 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7345 if (INSN_P (p))
7346 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
7347
7348 /* If the loop has a single store, and the destination address is
7349 invariant, then we can't reverse the loop, because this address
7350 might then have the wrong value at loop exit.
7351 This would work if the source was invariant also, however, in that
7352 case, the insn should have been moved out of the loop. */
7353
7354 if (loop_info->num_mem_sets == 1)
7355 {
7356 struct induction *v;
7357
7358 /* If we could prove that each of the memory locations
7359 written to was different, then we could reverse the
7360 store -- but we don't presently have any way of
7361 knowing that. */
7362 reversible_mem_store = 0;
7363
7364 /* If the store depends on a register that is set after the
7365 store, it depends on the initial value, and is thus not
7366 reversible. */
7367 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
7368 {
7369 if (v->giv_type == DEST_REG
7370 && reg_mentioned_p (v->dest_reg,
7371 PATTERN (loop_info->first_loop_store_insn))
7372 && loop_insn_first_p (loop_info->first_loop_store_insn,
7373 v->insn))
7374 reversible_mem_store = 0;
7375 }
7376 }
7377 }
7378 else
7379 return 0;
7380
7381 /* This code only acts for innermost loops. Also it simplifies
7382 the memory address check by only reversing loops with
7383 zero or one memory access.
7384 Two memory accesses could involve parts of the same array,
7385 and that can't be reversed.
7386 If the biv is used only for counting, than we don't need to worry
7387 about all these things. */
7388
7389 if ((num_nonfixed_reads <= 1
7390 && ! loop_info->has_nonconst_call
7391 && ! loop_info->has_volatile
7392 && reversible_mem_store
7393 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
7394 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
7395 && (bl == ivs->list && bl->next == 0))
7396 || no_use_except_counting)
7397 {
7398 rtx tem;
7399
7400 /* Loop can be reversed. */
7401 if (loop_dump_stream)
7402 fprintf (loop_dump_stream, "Can reverse loop\n");
7403
7404 /* Now check other conditions:
7405
7406 The increment must be a constant, as must the initial value,
7407 and the comparison code must be LT.
7408
7409 This test can probably be improved since +/- 1 in the constant
7410 can be obtained by changing LT to LE and vice versa; this is
7411 confusing. */
7412
7413 if (comparison
7414 /* for constants, LE gets turned into LT */
7415 && (GET_CODE (comparison) == LT
7416 || (GET_CODE (comparison) == LE
7417 && no_use_except_counting)))
7418 {
7419 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
7420 rtx initial_value, comparison_value;
7421 int nonneg = 0;
7422 enum rtx_code cmp_code;
7423 int comparison_const_width;
7424 unsigned HOST_WIDE_INT comparison_sign_mask;
7425
7426 add_val = INTVAL (bl->biv->add_val);
7427 comparison_value = XEXP (comparison, 1);
7428 if (GET_MODE (comparison_value) == VOIDmode)
7429 comparison_const_width
7430 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
7431 else
7432 comparison_const_width
7433 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
7434 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
7435 comparison_const_width = HOST_BITS_PER_WIDE_INT;
7436 comparison_sign_mask
7437 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
7438
7439 /* If the comparison value is not a loop invariant, then we
7440 can not reverse this loop.
7441
7442 ??? If the insns which initialize the comparison value as
7443 a whole compute an invariant result, then we could move
7444 them out of the loop and proceed with loop reversal. */
7445 if (! loop_invariant_p (loop, comparison_value))
7446 return 0;
7447
7448 if (GET_CODE (comparison_value) == CONST_INT)
7449 comparison_val = INTVAL (comparison_value);
7450 initial_value = bl->initial_value;
7451
7452 /* Normalize the initial value if it is an integer and
7453 has no other use except as a counter. This will allow
7454 a few more loops to be reversed. */
7455 if (no_use_except_counting
7456 && GET_CODE (comparison_value) == CONST_INT
7457 && GET_CODE (initial_value) == CONST_INT)
7458 {
7459 comparison_val = comparison_val - INTVAL (bl->initial_value);
7460 /* The code below requires comparison_val to be a multiple
7461 of add_val in order to do the loop reversal, so
7462 round up comparison_val to a multiple of add_val.
7463 Since comparison_value is constant, we know that the
7464 current comparison code is LT. */
7465 comparison_val = comparison_val + add_val - 1;
7466 comparison_val
7467 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
7468 /* We postpone overflow checks for COMPARISON_VAL here;
7469 even if there is an overflow, we might still be able to
7470 reverse the loop, if converting the loop exit test to
7471 NE is possible. */
7472 initial_value = const0_rtx;
7473 }
7474
7475 /* First check if we can do a vanilla loop reversal. */
7476 if (initial_value == const0_rtx
7477 /* If we have a decrement_and_branch_on_count,
7478 prefer the NE test, since this will allow that
7479 instruction to be generated. Note that we must
7480 use a vanilla loop reversal if the biv is used to
7481 calculate a giv or has a non-counting use. */
7482 #if ! defined (HAVE_decrement_and_branch_until_zero) \
7483 && defined (HAVE_decrement_and_branch_on_count)
7484 && (! (add_val == 1 && loop->vtop
7485 && (bl->biv_count == 0
7486 || no_use_except_counting)))
7487 #endif
7488 && GET_CODE (comparison_value) == CONST_INT
7489 /* Now do postponed overflow checks on COMPARISON_VAL. */
7490 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
7491 & comparison_sign_mask))
7492 {
7493 /* Register will always be nonnegative, with value
7494 0 on last iteration */
7495 add_adjust = add_val;
7496 nonneg = 1;
7497 cmp_code = GE;
7498 }
7499 else if (add_val == 1 && loop->vtop
7500 && (bl->biv_count == 0
7501 || no_use_except_counting))
7502 {
7503 add_adjust = 0;
7504 cmp_code = NE;
7505 }
7506 else
7507 return 0;
7508
7509 if (GET_CODE (comparison) == LE)
7510 add_adjust -= add_val;
7511
7512 /* If the initial value is not zero, or if the comparison
7513 value is not an exact multiple of the increment, then we
7514 can not reverse this loop. */
7515 if (initial_value == const0_rtx
7516 && GET_CODE (comparison_value) == CONST_INT)
7517 {
7518 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
7519 return 0;
7520 }
7521 else
7522 {
7523 if (! no_use_except_counting || add_val != 1)
7524 return 0;
7525 }
7526
7527 final_value = comparison_value;
7528
7529 /* Reset these in case we normalized the initial value
7530 and comparison value above. */
7531 if (GET_CODE (comparison_value) == CONST_INT
7532 && GET_CODE (initial_value) == CONST_INT)
7533 {
7534 comparison_value = GEN_INT (comparison_val);
7535 final_value
7536 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
7537 }
7538 bl->initial_value = initial_value;
7539
7540 /* Save some info needed to produce the new insns. */
7541 reg = bl->biv->dest_reg;
7542 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
7543 if (jump_label == pc_rtx)
7544 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
7545 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
7546
7547 /* Set start_value; if this is not a CONST_INT, we need
7548 to generate a SUB.
7549 Initialize biv to start_value before loop start.
7550 The old initializing insn will be deleted as a
7551 dead store by flow.c. */
7552 if (initial_value == const0_rtx
7553 && GET_CODE (comparison_value) == CONST_INT)
7554 {
7555 start_value = GEN_INT (comparison_val - add_adjust);
7556 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
7557 }
7558 else if (GET_CODE (initial_value) == CONST_INT)
7559 {
7560 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
7561 enum machine_mode mode = GET_MODE (reg);
7562 enum insn_code icode
7563 = add_optab->handlers[(int) mode].insn_code;
7564
7565 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
7566 || ! ((*insn_data[icode].operand[1].predicate)
7567 (comparison_value, mode))
7568 || ! ((*insn_data[icode].operand[2].predicate)
7569 (offset, mode)))
7570 return 0;
7571 start_value
7572 = gen_rtx_PLUS (mode, comparison_value, offset);
7573 loop_insn_hoist (loop, (GEN_FCN (icode)
7574 (reg, comparison_value, offset)));
7575 if (GET_CODE (comparison) == LE)
7576 final_value = gen_rtx_PLUS (mode, comparison_value,
7577 GEN_INT (add_val));
7578 }
7579 else if (! add_adjust)
7580 {
7581 enum machine_mode mode = GET_MODE (reg);
7582 enum insn_code icode
7583 = sub_optab->handlers[(int) mode].insn_code;
7584 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
7585 || ! ((*insn_data[icode].operand[1].predicate)
7586 (comparison_value, mode))
7587 || ! ((*insn_data[icode].operand[2].predicate)
7588 (initial_value, mode)))
7589 return 0;
7590 start_value
7591 = gen_rtx_MINUS (mode, comparison_value, initial_value);
7592 loop_insn_hoist (loop, (GEN_FCN (icode)
7593 (reg, comparison_value,
7594 initial_value)));
7595 }
7596 else
7597 /* We could handle the other cases too, but it'll be
7598 better to have a testcase first. */
7599 return 0;
7600
7601 /* We may not have a single insn which can increment a reg, so
7602 create a sequence to hold all the insns from expand_inc. */
7603 start_sequence ();
7604 expand_inc (reg, new_add_val);
7605 tem = gen_sequence ();
7606 end_sequence ();
7607
7608 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
7609 delete_insn (bl->biv->insn);
7610
7611 /* Update biv info to reflect its new status. */
7612 bl->biv->insn = p;
7613 bl->initial_value = start_value;
7614 bl->biv->add_val = new_add_val;
7615
7616 /* Update loop info. */
7617 loop_info->initial_value = reg;
7618 loop_info->initial_equiv_value = reg;
7619 loop_info->final_value = const0_rtx;
7620 loop_info->final_equiv_value = const0_rtx;
7621 loop_info->comparison_value = const0_rtx;
7622 loop_info->comparison_code = cmp_code;
7623 loop_info->increment = new_add_val;
7624
7625 /* Inc LABEL_NUSES so that delete_insn will
7626 not delete the label. */
7627 LABEL_NUSES (XEXP (jump_label, 0))++;
7628
7629 /* Emit an insn after the end of the loop to set the biv's
7630 proper exit value if it is used anywhere outside the loop. */
7631 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
7632 || ! bl->init_insn
7633 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
7634 loop_insn_sink (loop, gen_move_insn (reg, final_value));
7635
7636 /* Delete compare/branch at end of loop. */
7637 delete_insn (PREV_INSN (loop_end));
7638 if (compare_and_branch == 2)
7639 delete_insn (first_compare);
7640
7641 /* Add new compare/branch insn at end of loop. */
7642 start_sequence ();
7643 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
7644 GET_MODE (reg), 0, 0,
7645 XEXP (jump_label, 0));
7646 tem = gen_sequence ();
7647 end_sequence ();
7648 emit_jump_insn_before (tem, loop_end);
7649
7650 for (tem = PREV_INSN (loop_end);
7651 tem && GET_CODE (tem) != JUMP_INSN;
7652 tem = PREV_INSN (tem))
7653 ;
7654
7655 if (tem)
7656 JUMP_LABEL (tem) = XEXP (jump_label, 0);
7657
7658 if (nonneg)
7659 {
7660 if (tem)
7661 {
7662 /* Increment of LABEL_NUSES done above. */
7663 /* Register is now always nonnegative,
7664 so add REG_NONNEG note to the branch. */
7665 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
7666 REG_NOTES (tem));
7667 }
7668 bl->nonneg = 1;
7669 }
7670
7671 /* No insn may reference both the reversed and another biv or it
7672 will fail (see comment near the top of the loop reversal
7673 code).
7674 Earlier on, we have verified that the biv has no use except
7675 counting, or it is the only biv in this function.
7676 However, the code that computes no_use_except_counting does
7677 not verify reg notes. It's possible to have an insn that
7678 references another biv, and has a REG_EQUAL note with an
7679 expression based on the reversed biv. To avoid this case,
7680 remove all REG_EQUAL notes based on the reversed biv
7681 here. */
7682 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7683 if (INSN_P (p))
7684 {
7685 rtx *pnote;
7686 rtx set = single_set (p);
7687 /* If this is a set of a GIV based on the reversed biv, any
7688 REG_EQUAL notes should still be correct. */
7689 if (! set
7690 || GET_CODE (SET_DEST (set)) != REG
7691 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
7692 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
7693 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
7694 for (pnote = &REG_NOTES (p); *pnote;)
7695 {
7696 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
7697 && reg_mentioned_p (regno_reg_rtx[bl->regno],
7698 XEXP (*pnote, 0)))
7699 *pnote = XEXP (*pnote, 1);
7700 else
7701 pnote = &XEXP (*pnote, 1);
7702 }
7703 }
7704
7705 /* Mark that this biv has been reversed. Each giv which depends
7706 on this biv, and which is also live past the end of the loop
7707 will have to be fixed up. */
7708
7709 bl->reversed = 1;
7710
7711 if (loop_dump_stream)
7712 {
7713 fprintf (loop_dump_stream, "Reversed loop");
7714 if (bl->nonneg)
7715 fprintf (loop_dump_stream, " and added reg_nonneg\n");
7716 else
7717 fprintf (loop_dump_stream, "\n");
7718 }
7719
7720 return 1;
7721 }
7722 }
7723 }
7724
7725 return 0;
7726 }
7727 \f
7728 /* Verify whether the biv BL appears to be eliminable,
7729 based on the insns in the loop that refer to it.
7730
7731 If ELIMINATE_P is non-zero, actually do the elimination.
7732
7733 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
7734 determine whether invariant insns should be placed inside or at the
7735 start of the loop. */
7736
7737 static int
7738 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
7739 const struct loop *loop;
7740 struct iv_class *bl;
7741 int eliminate_p;
7742 int threshold, insn_count;
7743 {
7744 struct loop_ivs *ivs = LOOP_IVS (loop);
7745 rtx reg = bl->biv->dest_reg;
7746 rtx p;
7747
7748 /* Scan all insns in the loop, stopping if we find one that uses the
7749 biv in a way that we cannot eliminate. */
7750
7751 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
7752 {
7753 enum rtx_code code = GET_CODE (p);
7754 basic_block where_bb = 0;
7755 rtx where_insn = threshold >= insn_count ? 0 : p;
7756
7757 /* If this is a libcall that sets a giv, skip ahead to its end. */
7758 if (GET_RTX_CLASS (code) == 'i')
7759 {
7760 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
7761
7762 if (note)
7763 {
7764 rtx last = XEXP (note, 0);
7765 rtx set = single_set (last);
7766
7767 if (set && GET_CODE (SET_DEST (set)) == REG)
7768 {
7769 unsigned int regno = REGNO (SET_DEST (set));
7770
7771 if (regno < ivs->n_regs
7772 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
7773 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
7774 p = last;
7775 }
7776 }
7777 }
7778 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
7779 && reg_mentioned_p (reg, PATTERN (p))
7780 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
7781 eliminate_p, where_bb, where_insn))
7782 {
7783 if (loop_dump_stream)
7784 fprintf (loop_dump_stream,
7785 "Cannot eliminate biv %d: biv used in insn %d.\n",
7786 bl->regno, INSN_UID (p));
7787 break;
7788 }
7789 }
7790
7791 if (p == loop->end)
7792 {
7793 if (loop_dump_stream)
7794 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
7795 bl->regno, eliminate_p ? "was" : "can be");
7796 return 1;
7797 }
7798
7799 return 0;
7800 }
7801 \f
7802 /* INSN and REFERENCE are instructions in the same insn chain.
7803 Return non-zero if INSN is first. */
7804
7805 int
7806 loop_insn_first_p (insn, reference)
7807 rtx insn, reference;
7808 {
7809 rtx p, q;
7810
7811 for (p = insn, q = reference;;)
7812 {
7813 /* Start with test for not first so that INSN == REFERENCE yields not
7814 first. */
7815 if (q == insn || ! p)
7816 return 0;
7817 if (p == reference || ! q)
7818 return 1;
7819
7820 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
7821 previous insn, hence the <= comparison below does not work if
7822 P is a note. */
7823 if (INSN_UID (p) < max_uid_for_loop
7824 && INSN_UID (q) < max_uid_for_loop
7825 && GET_CODE (p) != NOTE)
7826 return INSN_LUID (p) <= INSN_LUID (q);
7827
7828 if (INSN_UID (p) >= max_uid_for_loop
7829 || GET_CODE (p) == NOTE)
7830 p = NEXT_INSN (p);
7831 if (INSN_UID (q) >= max_uid_for_loop)
7832 q = NEXT_INSN (q);
7833 }
7834 }
7835
7836 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
7837 the offset that we have to take into account due to auto-increment /
7838 div derivation is zero. */
7839 static int
7840 biv_elimination_giv_has_0_offset (biv, giv, insn)
7841 struct induction *biv, *giv;
7842 rtx insn;
7843 {
7844 /* If the giv V had the auto-inc address optimization applied
7845 to it, and INSN occurs between the giv insn and the biv
7846 insn, then we'd have to adjust the value used here.
7847 This is rare, so we don't bother to make this possible. */
7848 if (giv->auto_inc_opt
7849 && ((loop_insn_first_p (giv->insn, insn)
7850 && loop_insn_first_p (insn, biv->insn))
7851 || (loop_insn_first_p (biv->insn, insn)
7852 && loop_insn_first_p (insn, giv->insn))))
7853 return 0;
7854
7855 return 1;
7856 }
7857
7858 /* If BL appears in X (part of the pattern of INSN), see if we can
7859 eliminate its use. If so, return 1. If not, return 0.
7860
7861 If BIV does not appear in X, return 1.
7862
7863 If ELIMINATE_P is non-zero, actually do the elimination.
7864 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
7865 Depending on how many items have been moved out of the loop, it
7866 will either be before INSN (when WHERE_INSN is non-zero) or at the
7867 start of the loop (when WHERE_INSN is zero). */
7868
7869 static int
7870 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
7871 const struct loop *loop;
7872 rtx x, insn;
7873 struct iv_class *bl;
7874 int eliminate_p;
7875 basic_block where_bb;
7876 rtx where_insn;
7877 {
7878 enum rtx_code code = GET_CODE (x);
7879 rtx reg = bl->biv->dest_reg;
7880 enum machine_mode mode = GET_MODE (reg);
7881 struct induction *v;
7882 rtx arg, tem;
7883 #ifdef HAVE_cc0
7884 rtx new;
7885 #endif
7886 int arg_operand;
7887 const char *fmt;
7888 int i, j;
7889
7890 switch (code)
7891 {
7892 case REG:
7893 /* If we haven't already been able to do something with this BIV,
7894 we can't eliminate it. */
7895 if (x == reg)
7896 return 0;
7897 return 1;
7898
7899 case SET:
7900 /* If this sets the BIV, it is not a problem. */
7901 if (SET_DEST (x) == reg)
7902 return 1;
7903
7904 /* If this is an insn that defines a giv, it is also ok because
7905 it will go away when the giv is reduced. */
7906 for (v = bl->giv; v; v = v->next_iv)
7907 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
7908 return 1;
7909
7910 #ifdef HAVE_cc0
7911 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
7912 {
7913 /* Can replace with any giv that was reduced and
7914 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7915 Require a constant for MULT_VAL, so we know it's nonzero.
7916 ??? We disable this optimization to avoid potential
7917 overflows. */
7918
7919 for (v = bl->giv; v; v = v->next_iv)
7920 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
7921 && v->add_val == const0_rtx
7922 && ! v->ignore && ! v->maybe_dead && v->always_computable
7923 && v->mode == mode
7924 && 0)
7925 {
7926 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7927 continue;
7928
7929 if (! eliminate_p)
7930 return 1;
7931
7932 /* If the giv has the opposite direction of change,
7933 then reverse the comparison. */
7934 if (INTVAL (v->mult_val) < 0)
7935 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
7936 const0_rtx, v->new_reg);
7937 else
7938 new = v->new_reg;
7939
7940 /* We can probably test that giv's reduced reg. */
7941 if (validate_change (insn, &SET_SRC (x), new, 0))
7942 return 1;
7943 }
7944
7945 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7946 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7947 Require a constant for MULT_VAL, so we know it's nonzero.
7948 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7949 overflow problem. */
7950
7951 for (v = bl->giv; v; v = v->next_iv)
7952 if (GET_CODE (v->mult_val) == CONST_INT
7953 && v->mult_val != const0_rtx
7954 && ! v->ignore && ! v->maybe_dead && v->always_computable
7955 && v->mode == mode
7956 && (GET_CODE (v->add_val) == SYMBOL_REF
7957 || GET_CODE (v->add_val) == LABEL_REF
7958 || GET_CODE (v->add_val) == CONST
7959 || (GET_CODE (v->add_val) == REG
7960 && REG_POINTER (v->add_val))))
7961 {
7962 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7963 continue;
7964
7965 if (! eliminate_p)
7966 return 1;
7967
7968 /* If the giv has the opposite direction of change,
7969 then reverse the comparison. */
7970 if (INTVAL (v->mult_val) < 0)
7971 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7972 v->new_reg);
7973 else
7974 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7975 copy_rtx (v->add_val));
7976
7977 /* Replace biv with the giv's reduced register. */
7978 update_reg_last_use (v->add_val, insn);
7979 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7980 return 1;
7981
7982 /* Insn doesn't support that constant or invariant. Copy it
7983 into a register (it will be a loop invariant.) */
7984 tem = gen_reg_rtx (GET_MODE (v->new_reg));
7985
7986 loop_insn_emit_before (loop, 0, where_insn,
7987 gen_move_insn (tem,
7988 copy_rtx (v->add_val)));
7989
7990 /* Substitute the new register for its invariant value in
7991 the compare expression. */
7992 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
7993 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7994 return 1;
7995 }
7996 }
7997 #endif
7998 break;
7999
8000 case COMPARE:
8001 case EQ: case NE:
8002 case GT: case GE: case GTU: case GEU:
8003 case LT: case LE: case LTU: case LEU:
8004 /* See if either argument is the biv. */
8005 if (XEXP (x, 0) == reg)
8006 arg = XEXP (x, 1), arg_operand = 1;
8007 else if (XEXP (x, 1) == reg)
8008 arg = XEXP (x, 0), arg_operand = 0;
8009 else
8010 break;
8011
8012 if (CONSTANT_P (arg))
8013 {
8014 /* First try to replace with any giv that has constant positive
8015 mult_val and constant add_val. We might be able to support
8016 negative mult_val, but it seems complex to do it in general. */
8017
8018 for (v = bl->giv; v; v = v->next_iv)
8019 if (GET_CODE (v->mult_val) == CONST_INT
8020 && INTVAL (v->mult_val) > 0
8021 && (GET_CODE (v->add_val) == SYMBOL_REF
8022 || GET_CODE (v->add_val) == LABEL_REF
8023 || GET_CODE (v->add_val) == CONST
8024 || (GET_CODE (v->add_val) == REG
8025 && REG_POINTER (v->add_val)))
8026 && ! v->ignore && ! v->maybe_dead && v->always_computable
8027 && v->mode == mode)
8028 {
8029 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8030 continue;
8031
8032 if (! eliminate_p)
8033 return 1;
8034
8035 /* Replace biv with the giv's reduced reg. */
8036 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8037
8038 /* If all constants are actually constant integers and
8039 the derived constant can be directly placed in the COMPARE,
8040 do so. */
8041 if (GET_CODE (arg) == CONST_INT
8042 && GET_CODE (v->mult_val) == CONST_INT
8043 && GET_CODE (v->add_val) == CONST_INT)
8044 {
8045 validate_change (insn, &XEXP (x, arg_operand),
8046 GEN_INT (INTVAL (arg)
8047 * INTVAL (v->mult_val)
8048 + INTVAL (v->add_val)), 1);
8049 }
8050 else
8051 {
8052 /* Otherwise, load it into a register. */
8053 tem = gen_reg_rtx (mode);
8054 loop_iv_add_mult_emit_before (loop, arg,
8055 v->mult_val, v->add_val,
8056 tem, where_bb, where_insn);
8057 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8058 }
8059 if (apply_change_group ())
8060 return 1;
8061 }
8062
8063 /* Look for giv with positive constant mult_val and nonconst add_val.
8064 Insert insns to calculate new compare value.
8065 ??? Turn this off due to possible overflow. */
8066
8067 for (v = bl->giv; v; v = v->next_iv)
8068 if (GET_CODE (v->mult_val) == CONST_INT
8069 && INTVAL (v->mult_val) > 0
8070 && ! v->ignore && ! v->maybe_dead && v->always_computable
8071 && v->mode == mode
8072 && 0)
8073 {
8074 rtx tem;
8075
8076 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8077 continue;
8078
8079 if (! eliminate_p)
8080 return 1;
8081
8082 tem = gen_reg_rtx (mode);
8083
8084 /* Replace biv with giv's reduced register. */
8085 validate_change (insn, &XEXP (x, 1 - arg_operand),
8086 v->new_reg, 1);
8087
8088 /* Compute value to compare against. */
8089 loop_iv_add_mult_emit_before (loop, arg,
8090 v->mult_val, v->add_val,
8091 tem, where_bb, where_insn);
8092 /* Use it in this insn. */
8093 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8094 if (apply_change_group ())
8095 return 1;
8096 }
8097 }
8098 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8099 {
8100 if (loop_invariant_p (loop, arg) == 1)
8101 {
8102 /* Look for giv with constant positive mult_val and nonconst
8103 add_val. Insert insns to compute new compare value.
8104 ??? Turn this off due to possible overflow. */
8105
8106 for (v = bl->giv; v; v = v->next_iv)
8107 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8108 && ! v->ignore && ! v->maybe_dead && v->always_computable
8109 && v->mode == mode
8110 && 0)
8111 {
8112 rtx tem;
8113
8114 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8115 continue;
8116
8117 if (! eliminate_p)
8118 return 1;
8119
8120 tem = gen_reg_rtx (mode);
8121
8122 /* Replace biv with giv's reduced register. */
8123 validate_change (insn, &XEXP (x, 1 - arg_operand),
8124 v->new_reg, 1);
8125
8126 /* Compute value to compare against. */
8127 loop_iv_add_mult_emit_before (loop, arg,
8128 v->mult_val, v->add_val,
8129 tem, where_bb, where_insn);
8130 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8131 if (apply_change_group ())
8132 return 1;
8133 }
8134 }
8135
8136 /* This code has problems. Basically, you can't know when
8137 seeing if we will eliminate BL, whether a particular giv
8138 of ARG will be reduced. If it isn't going to be reduced,
8139 we can't eliminate BL. We can try forcing it to be reduced,
8140 but that can generate poor code.
8141
8142 The problem is that the benefit of reducing TV, below should
8143 be increased if BL can actually be eliminated, but this means
8144 we might have to do a topological sort of the order in which
8145 we try to process biv. It doesn't seem worthwhile to do
8146 this sort of thing now. */
8147
8148 #if 0
8149 /* Otherwise the reg compared with had better be a biv. */
8150 if (GET_CODE (arg) != REG
8151 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8152 return 0;
8153
8154 /* Look for a pair of givs, one for each biv,
8155 with identical coefficients. */
8156 for (v = bl->giv; v; v = v->next_iv)
8157 {
8158 struct induction *tv;
8159
8160 if (v->ignore || v->maybe_dead || v->mode != mode)
8161 continue;
8162
8163 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8164 tv = tv->next_iv)
8165 if (! tv->ignore && ! tv->maybe_dead
8166 && rtx_equal_p (tv->mult_val, v->mult_val)
8167 && rtx_equal_p (tv->add_val, v->add_val)
8168 && tv->mode == mode)
8169 {
8170 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8171 continue;
8172
8173 if (! eliminate_p)
8174 return 1;
8175
8176 /* Replace biv with its giv's reduced reg. */
8177 XEXP (x, 1 - arg_operand) = v->new_reg;
8178 /* Replace other operand with the other giv's
8179 reduced reg. */
8180 XEXP (x, arg_operand) = tv->new_reg;
8181 return 1;
8182 }
8183 }
8184 #endif
8185 }
8186
8187 /* If we get here, the biv can't be eliminated. */
8188 return 0;
8189
8190 case MEM:
8191 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8192 biv is used in it, since it will be replaced. */
8193 for (v = bl->giv; v; v = v->next_iv)
8194 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8195 return 1;
8196 break;
8197
8198 default:
8199 break;
8200 }
8201
8202 /* See if any subexpression fails elimination. */
8203 fmt = GET_RTX_FORMAT (code);
8204 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8205 {
8206 switch (fmt[i])
8207 {
8208 case 'e':
8209 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8210 eliminate_p, where_bb, where_insn))
8211 return 0;
8212 break;
8213
8214 case 'E':
8215 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8216 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8217 eliminate_p, where_bb, where_insn))
8218 return 0;
8219 break;
8220 }
8221 }
8222
8223 return 1;
8224 }
8225 \f
8226 /* Return nonzero if the last use of REG
8227 is in an insn following INSN in the same basic block. */
8228
8229 static int
8230 last_use_this_basic_block (reg, insn)
8231 rtx reg;
8232 rtx insn;
8233 {
8234 rtx n;
8235 for (n = insn;
8236 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8237 n = NEXT_INSN (n))
8238 {
8239 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8240 return 1;
8241 }
8242 return 0;
8243 }
8244 \f
8245 /* Called via `note_stores' to record the initial value of a biv. Here we
8246 just record the location of the set and process it later. */
8247
8248 static void
8249 record_initial (dest, set, data)
8250 rtx dest;
8251 rtx set;
8252 void *data ATTRIBUTE_UNUSED;
8253 {
8254 struct loop_ivs *ivs = (struct loop_ivs *) data;
8255 struct iv_class *bl;
8256
8257 if (GET_CODE (dest) != REG
8258 || REGNO (dest) >= ivs->n_regs
8259 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8260 return;
8261
8262 bl = REG_IV_CLASS (ivs, REGNO (dest));
8263
8264 /* If this is the first set found, record it. */
8265 if (bl->init_insn == 0)
8266 {
8267 bl->init_insn = note_insn;
8268 bl->init_set = set;
8269 }
8270 }
8271 \f
8272 /* If any of the registers in X are "old" and currently have a last use earlier
8273 than INSN, update them to have a last use of INSN. Their actual last use
8274 will be the previous insn but it will not have a valid uid_luid so we can't
8275 use it. X must be a source expression only. */
8276
8277 static void
8278 update_reg_last_use (x, insn)
8279 rtx x;
8280 rtx insn;
8281 {
8282 /* Check for the case where INSN does not have a valid luid. In this case,
8283 there is no need to modify the regno_last_uid, as this can only happen
8284 when code is inserted after the loop_end to set a pseudo's final value,
8285 and hence this insn will never be the last use of x.
8286 ???? This comment is not correct. See for example loop_givs_reduce.
8287 This may insert an insn before another new insn. */
8288 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8289 && INSN_UID (insn) < max_uid_for_loop
8290 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
8291 {
8292 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8293 }
8294 else
8295 {
8296 register int i, j;
8297 register const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8298 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8299 {
8300 if (fmt[i] == 'e')
8301 update_reg_last_use (XEXP (x, i), insn);
8302 else if (fmt[i] == 'E')
8303 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8304 update_reg_last_use (XVECEXP (x, i, j), insn);
8305 }
8306 }
8307 }
8308 \f
8309 /* Given an insn INSN and condition COND, return the condition in a
8310 canonical form to simplify testing by callers. Specifically:
8311
8312 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
8313 (2) Both operands will be machine operands; (cc0) will have been replaced.
8314 (3) If an operand is a constant, it will be the second operand.
8315 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
8316 for GE, GEU, and LEU.
8317
8318 If the condition cannot be understood, or is an inequality floating-point
8319 comparison which needs to be reversed, 0 will be returned.
8320
8321 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
8322
8323 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8324 insn used in locating the condition was found. If a replacement test
8325 of the condition is desired, it should be placed in front of that
8326 insn and we will be sure that the inputs are still valid.
8327
8328 If WANT_REG is non-zero, we wish the condition to be relative to that
8329 register, if possible. Therefore, do not canonicalize the condition
8330 further. */
8331
8332 rtx
8333 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
8334 rtx insn;
8335 rtx cond;
8336 int reverse;
8337 rtx *earliest;
8338 rtx want_reg;
8339 {
8340 enum rtx_code code;
8341 rtx prev = insn;
8342 rtx set;
8343 rtx tem;
8344 rtx op0, op1;
8345 int reverse_code = 0;
8346 enum machine_mode mode;
8347
8348 code = GET_CODE (cond);
8349 mode = GET_MODE (cond);
8350 op0 = XEXP (cond, 0);
8351 op1 = XEXP (cond, 1);
8352
8353 if (reverse)
8354 code = reversed_comparison_code (cond, insn);
8355 if (code == UNKNOWN)
8356 return 0;
8357
8358 if (earliest)
8359 *earliest = insn;
8360
8361 /* If we are comparing a register with zero, see if the register is set
8362 in the previous insn to a COMPARE or a comparison operation. Perform
8363 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
8364 in cse.c */
8365
8366 while (GET_RTX_CLASS (code) == '<'
8367 && op1 == CONST0_RTX (GET_MODE (op0))
8368 && op0 != want_reg)
8369 {
8370 /* Set non-zero when we find something of interest. */
8371 rtx x = 0;
8372
8373 #ifdef HAVE_cc0
8374 /* If comparison with cc0, import actual comparison from compare
8375 insn. */
8376 if (op0 == cc0_rtx)
8377 {
8378 if ((prev = prev_nonnote_insn (prev)) == 0
8379 || GET_CODE (prev) != INSN
8380 || (set = single_set (prev)) == 0
8381 || SET_DEST (set) != cc0_rtx)
8382 return 0;
8383
8384 op0 = SET_SRC (set);
8385 op1 = CONST0_RTX (GET_MODE (op0));
8386 if (earliest)
8387 *earliest = prev;
8388 }
8389 #endif
8390
8391 /* If this is a COMPARE, pick up the two things being compared. */
8392 if (GET_CODE (op0) == COMPARE)
8393 {
8394 op1 = XEXP (op0, 1);
8395 op0 = XEXP (op0, 0);
8396 continue;
8397 }
8398 else if (GET_CODE (op0) != REG)
8399 break;
8400
8401 /* Go back to the previous insn. Stop if it is not an INSN. We also
8402 stop if it isn't a single set or if it has a REG_INC note because
8403 we don't want to bother dealing with it. */
8404
8405 if ((prev = prev_nonnote_insn (prev)) == 0
8406 || GET_CODE (prev) != INSN
8407 || FIND_REG_INC_NOTE (prev, 0))
8408 break;
8409
8410 set = set_of (op0, prev);
8411
8412 if (set
8413 && (GET_CODE (set) != SET
8414 || !rtx_equal_p (SET_DEST (set), op0)))
8415 break;
8416
8417 /* If this is setting OP0, get what it sets it to if it looks
8418 relevant. */
8419 if (set)
8420 {
8421 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
8422
8423 /* ??? We may not combine comparisons done in a CCmode with
8424 comparisons not done in a CCmode. This is to aid targets
8425 like Alpha that have an IEEE compliant EQ instruction, and
8426 a non-IEEE compliant BEQ instruction. The use of CCmode is
8427 actually artificial, simply to prevent the combination, but
8428 should not affect other platforms.
8429
8430 However, we must allow VOIDmode comparisons to match either
8431 CCmode or non-CCmode comparison, because some ports have
8432 modeless comparisons inside branch patterns.
8433
8434 ??? This mode check should perhaps look more like the mode check
8435 in simplify_comparison in combine. */
8436
8437 if ((GET_CODE (SET_SRC (set)) == COMPARE
8438 || (((code == NE
8439 || (code == LT
8440 && GET_MODE_CLASS (inner_mode) == MODE_INT
8441 && (GET_MODE_BITSIZE (inner_mode)
8442 <= HOST_BITS_PER_WIDE_INT)
8443 && (STORE_FLAG_VALUE
8444 & ((HOST_WIDE_INT) 1
8445 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8446 #ifdef FLOAT_STORE_FLAG_VALUE
8447 || (code == LT
8448 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8449 && (REAL_VALUE_NEGATIVE
8450 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8451 #endif
8452 ))
8453 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
8454 && (((GET_MODE_CLASS (mode) == MODE_CC)
8455 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8456 || mode == VOIDmode || inner_mode == VOIDmode))
8457 x = SET_SRC (set);
8458 else if (((code == EQ
8459 || (code == GE
8460 && (GET_MODE_BITSIZE (inner_mode)
8461 <= HOST_BITS_PER_WIDE_INT)
8462 && GET_MODE_CLASS (inner_mode) == MODE_INT
8463 && (STORE_FLAG_VALUE
8464 & ((HOST_WIDE_INT) 1
8465 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8466 #ifdef FLOAT_STORE_FLAG_VALUE
8467 || (code == GE
8468 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8469 && (REAL_VALUE_NEGATIVE
8470 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8471 #endif
8472 ))
8473 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
8474 && (((GET_MODE_CLASS (mode) == MODE_CC)
8475 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8476 || mode == VOIDmode || inner_mode == VOIDmode))
8477
8478 {
8479 reverse_code = 1;
8480 x = SET_SRC (set);
8481 }
8482 else
8483 break;
8484 }
8485
8486 else if (reg_set_p (op0, prev))
8487 /* If this sets OP0, but not directly, we have to give up. */
8488 break;
8489
8490 if (x)
8491 {
8492 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
8493 code = GET_CODE (x);
8494 if (reverse_code)
8495 {
8496 code = reversed_comparison_code (x, prev);
8497 if (code == UNKNOWN)
8498 return 0;
8499 reverse_code = 0;
8500 }
8501
8502 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
8503 if (earliest)
8504 *earliest = prev;
8505 }
8506 }
8507
8508 /* If constant is first, put it last. */
8509 if (CONSTANT_P (op0))
8510 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
8511
8512 /* If OP0 is the result of a comparison, we weren't able to find what
8513 was really being compared, so fail. */
8514 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
8515 return 0;
8516
8517 /* Canonicalize any ordered comparison with integers involving equality
8518 if we can do computations in the relevant mode and we do not
8519 overflow. */
8520
8521 if (GET_CODE (op1) == CONST_INT
8522 && GET_MODE (op0) != VOIDmode
8523 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
8524 {
8525 HOST_WIDE_INT const_val = INTVAL (op1);
8526 unsigned HOST_WIDE_INT uconst_val = const_val;
8527 unsigned HOST_WIDE_INT max_val
8528 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
8529
8530 switch (code)
8531 {
8532 case LE:
8533 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
8534 code = LT, op1 = GEN_INT (const_val + 1);
8535 break;
8536
8537 /* When cross-compiling, const_val might be sign-extended from
8538 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
8539 case GE:
8540 if ((HOST_WIDE_INT) (const_val & max_val)
8541 != (((HOST_WIDE_INT) 1
8542 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
8543 code = GT, op1 = GEN_INT (const_val - 1);
8544 break;
8545
8546 case LEU:
8547 if (uconst_val < max_val)
8548 code = LTU, op1 = GEN_INT (uconst_val + 1);
8549 break;
8550
8551 case GEU:
8552 if (uconst_val != 0)
8553 code = GTU, op1 = GEN_INT (uconst_val - 1);
8554 break;
8555
8556 default:
8557 break;
8558 }
8559 }
8560
8561 #ifdef HAVE_cc0
8562 /* Never return CC0; return zero instead. */
8563 if (op0 == cc0_rtx)
8564 return 0;
8565 #endif
8566
8567 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
8568 }
8569
8570 /* Given a jump insn JUMP, return the condition that will cause it to branch
8571 to its JUMP_LABEL. If the condition cannot be understood, or is an
8572 inequality floating-point comparison which needs to be reversed, 0 will
8573 be returned.
8574
8575 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8576 insn used in locating the condition was found. If a replacement test
8577 of the condition is desired, it should be placed in front of that
8578 insn and we will be sure that the inputs are still valid. */
8579
8580 rtx
8581 get_condition (jump, earliest)
8582 rtx jump;
8583 rtx *earliest;
8584 {
8585 rtx cond;
8586 int reverse;
8587 rtx set;
8588
8589 /* If this is not a standard conditional jump, we can't parse it. */
8590 if (GET_CODE (jump) != JUMP_INSN
8591 || ! any_condjump_p (jump))
8592 return 0;
8593 set = pc_set (jump);
8594
8595 cond = XEXP (SET_SRC (set), 0);
8596
8597 /* If this branches to JUMP_LABEL when the condition is false, reverse
8598 the condition. */
8599 reverse
8600 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
8601 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
8602
8603 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
8604 }
8605
8606 /* Similar to above routine, except that we also put an invariant last
8607 unless both operands are invariants. */
8608
8609 rtx
8610 get_condition_for_loop (loop, x)
8611 const struct loop *loop;
8612 rtx x;
8613 {
8614 rtx comparison = get_condition (x, (rtx*)0);
8615
8616 if (comparison == 0
8617 || ! loop_invariant_p (loop, XEXP (comparison, 0))
8618 || loop_invariant_p (loop, XEXP (comparison, 1)))
8619 return comparison;
8620
8621 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
8622 XEXP (comparison, 1), XEXP (comparison, 0));
8623 }
8624
8625 /* Scan the function and determine whether it has indirect (computed) jumps.
8626
8627 This is taken mostly from flow.c; similar code exists elsewhere
8628 in the compiler. It may be useful to put this into rtlanal.c. */
8629 static int
8630 indirect_jump_in_function_p (start)
8631 rtx start;
8632 {
8633 rtx insn;
8634
8635 for (insn = start; insn; insn = NEXT_INSN (insn))
8636 if (computed_jump_p (insn))
8637 return 1;
8638
8639 return 0;
8640 }
8641
8642 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8643 documentation for LOOP_MEMS for the definition of `appropriate'.
8644 This function is called from prescan_loop via for_each_rtx. */
8645
8646 static int
8647 insert_loop_mem (mem, data)
8648 rtx *mem;
8649 void *data ATTRIBUTE_UNUSED;
8650 {
8651 struct loop_info *loop_info = data;
8652 int i;
8653 rtx m = *mem;
8654
8655 if (m == NULL_RTX)
8656 return 0;
8657
8658 switch (GET_CODE (m))
8659 {
8660 case MEM:
8661 break;
8662
8663 case CLOBBER:
8664 /* We're not interested in MEMs that are only clobbered. */
8665 return -1;
8666
8667 case CONST_DOUBLE:
8668 /* We're not interested in the MEM associated with a
8669 CONST_DOUBLE, so there's no need to traverse into this. */
8670 return -1;
8671
8672 case EXPR_LIST:
8673 /* We're not interested in any MEMs that only appear in notes. */
8674 return -1;
8675
8676 default:
8677 /* This is not a MEM. */
8678 return 0;
8679 }
8680
8681 /* See if we've already seen this MEM. */
8682 for (i = 0; i < loop_info->mems_idx; ++i)
8683 if (rtx_equal_p (m, loop_info->mems[i].mem))
8684 {
8685 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
8686 /* The modes of the two memory accesses are different. If
8687 this happens, something tricky is going on, and we just
8688 don't optimize accesses to this MEM. */
8689 loop_info->mems[i].optimize = 0;
8690
8691 return 0;
8692 }
8693
8694 /* Resize the array, if necessary. */
8695 if (loop_info->mems_idx == loop_info->mems_allocated)
8696 {
8697 if (loop_info->mems_allocated != 0)
8698 loop_info->mems_allocated *= 2;
8699 else
8700 loop_info->mems_allocated = 32;
8701
8702 loop_info->mems = (loop_mem_info *)
8703 xrealloc (loop_info->mems,
8704 loop_info->mems_allocated * sizeof (loop_mem_info));
8705 }
8706
8707 /* Actually insert the MEM. */
8708 loop_info->mems[loop_info->mems_idx].mem = m;
8709 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8710 because we can't put it in a register. We still store it in the
8711 table, though, so that if we see the same address later, but in a
8712 non-BLK mode, we'll not think we can optimize it at that point. */
8713 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
8714 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
8715 ++loop_info->mems_idx;
8716
8717 return 0;
8718 }
8719
8720
8721 /* Allocate REGS->ARRAY or reallocate it if it is too small.
8722
8723 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
8724 register that is modified by an insn between FROM and TO. If the
8725 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
8726 more, stop incrementing it, to avoid overflow.
8727
8728 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
8729 register I is used, if it is only used once. Otherwise, it is set
8730 to 0 (for no uses) or const0_rtx for more than one use. This
8731 parameter may be zero, in which case this processing is not done.
8732
8733 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
8734 optimize register I. */
8735
8736 static void
8737 loop_regs_scan (loop, extra_size)
8738 const struct loop *loop;
8739 int extra_size;
8740 {
8741 struct loop_regs *regs = LOOP_REGS (loop);
8742 int old_nregs;
8743 /* last_set[n] is nonzero iff reg n has been set in the current
8744 basic block. In that case, it is the insn that last set reg n. */
8745 rtx *last_set;
8746 rtx insn;
8747 int i;
8748
8749 old_nregs = regs->num;
8750 regs->num = max_reg_num ();
8751
8752 /* Grow the regs array if not allocated or too small. */
8753 if (regs->num >= regs->size)
8754 {
8755 regs->size = regs->num + extra_size;
8756
8757 regs->array = (struct loop_reg *)
8758 xrealloc (regs->array, regs->size * sizeof (*regs->array));
8759
8760 /* Zero the new elements. */
8761 memset (regs->array + old_nregs, 0,
8762 (regs->size - old_nregs) * sizeof (*regs->array));
8763 }
8764
8765 /* Clear previously scanned fields but do not clear n_times_set. */
8766 for (i = 0; i < old_nregs; i++)
8767 {
8768 regs->array[i].set_in_loop = 0;
8769 regs->array[i].may_not_optimize = 0;
8770 regs->array[i].single_usage = NULL_RTX;
8771 }
8772
8773 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
8774
8775 /* Scan the loop, recording register usage. */
8776 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8777 insn = NEXT_INSN (insn))
8778 {
8779 if (INSN_P (insn))
8780 {
8781 /* Record registers that have exactly one use. */
8782 find_single_use_in_loop (regs, insn, PATTERN (insn));
8783
8784 /* Include uses in REG_EQUAL notes. */
8785 if (REG_NOTES (insn))
8786 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
8787
8788 if (GET_CODE (PATTERN (insn)) == SET
8789 || GET_CODE (PATTERN (insn)) == CLOBBER)
8790 count_one_set (regs, insn, PATTERN (insn), last_set);
8791 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8792 {
8793 register int i;
8794 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8795 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
8796 last_set);
8797 }
8798 }
8799
8800 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
8801 memset (last_set, 0, regs->num * sizeof (rtx));
8802 }
8803
8804 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8805 {
8806 regs->array[i].may_not_optimize = 1;
8807 regs->array[i].set_in_loop = 1;
8808 }
8809
8810 #ifdef AVOID_CCMODE_COPIES
8811 /* Don't try to move insns which set CC registers if we should not
8812 create CCmode register copies. */
8813 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
8814 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
8815 regs->array[i].may_not_optimize = 1;
8816 #endif
8817
8818 /* Set regs->array[I].n_times_set for the new registers. */
8819 for (i = old_nregs; i < regs->num; i++)
8820 regs->array[i].n_times_set = regs->array[i].set_in_loop;
8821
8822 free (last_set);
8823 }
8824
8825 /* Returns the number of real INSNs in the LOOP. */
8826
8827 static int
8828 count_insns_in_loop (loop)
8829 const struct loop *loop;
8830 {
8831 int count = 0;
8832 rtx insn;
8833
8834 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8835 insn = NEXT_INSN (insn))
8836 if (INSN_P (insn))
8837 ++count;
8838
8839 return count;
8840 }
8841
8842 /* Move MEMs into registers for the duration of the loop. */
8843
8844 static void
8845 load_mems (loop)
8846 const struct loop *loop;
8847 {
8848 struct loop_info *loop_info = LOOP_INFO (loop);
8849 struct loop_regs *regs = LOOP_REGS (loop);
8850 int maybe_never = 0;
8851 int i;
8852 rtx p, prev_ebb_head;
8853 rtx label = NULL_RTX;
8854 rtx end_label;
8855 /* Nonzero if the next instruction may never be executed. */
8856 int next_maybe_never = 0;
8857 unsigned int last_max_reg = max_reg_num ();
8858
8859 if (loop_info->mems_idx == 0)
8860 return;
8861
8862 /* We cannot use next_label here because it skips over normal insns. */
8863 end_label = next_nonnote_insn (loop->end);
8864 if (end_label && GET_CODE (end_label) != CODE_LABEL)
8865 end_label = NULL_RTX;
8866
8867 /* Check to see if it's possible that some instructions in the loop are
8868 never executed. Also check if there is a goto out of the loop other
8869 than right after the end of the loop. */
8870 for (p = next_insn_in_loop (loop, loop->scan_start);
8871 p != NULL_RTX;
8872 p = next_insn_in_loop (loop, p))
8873 {
8874 if (GET_CODE (p) == CODE_LABEL)
8875 maybe_never = 1;
8876 else if (GET_CODE (p) == JUMP_INSN
8877 /* If we enter the loop in the middle, and scan
8878 around to the beginning, don't set maybe_never
8879 for that. This must be an unconditional jump,
8880 otherwise the code at the top of the loop might
8881 never be executed. Unconditional jumps are
8882 followed a by barrier then loop end. */
8883 && ! (GET_CODE (p) == JUMP_INSN
8884 && JUMP_LABEL (p) == loop->top
8885 && NEXT_INSN (NEXT_INSN (p)) == loop->end
8886 && any_uncondjump_p (p)))
8887 {
8888 /* If this is a jump outside of the loop but not right
8889 after the end of the loop, we would have to emit new fixup
8890 sequences for each such label. */
8891 if (/* If we can't tell where control might go when this
8892 JUMP_INSN is executed, we must be conservative. */
8893 !JUMP_LABEL (p)
8894 || (JUMP_LABEL (p) != end_label
8895 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
8896 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
8897 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
8898 return;
8899
8900 if (!any_condjump_p (p))
8901 /* Something complicated. */
8902 maybe_never = 1;
8903 else
8904 /* If there are any more instructions in the loop, they
8905 might not be reached. */
8906 next_maybe_never = 1;
8907 }
8908 else if (next_maybe_never)
8909 maybe_never = 1;
8910 }
8911
8912 /* Find start of the extended basic block that enters the loop. */
8913 for (p = loop->start;
8914 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
8915 p = PREV_INSN (p))
8916 ;
8917 prev_ebb_head = p;
8918
8919 cselib_init ();
8920
8921 /* Build table of mems that get set to constant values before the
8922 loop. */
8923 for (; p != loop->start; p = NEXT_INSN (p))
8924 cselib_process_insn (p);
8925
8926 /* Actually move the MEMs. */
8927 for (i = 0; i < loop_info->mems_idx; ++i)
8928 {
8929 regset_head load_copies;
8930 regset_head store_copies;
8931 int written = 0;
8932 rtx reg;
8933 rtx mem = loop_info->mems[i].mem;
8934 rtx mem_list_entry;
8935
8936 if (MEM_VOLATILE_P (mem)
8937 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
8938 /* There's no telling whether or not MEM is modified. */
8939 loop_info->mems[i].optimize = 0;
8940
8941 /* Go through the MEMs written to in the loop to see if this
8942 one is aliased by one of them. */
8943 mem_list_entry = loop_info->store_mems;
8944 while (mem_list_entry)
8945 {
8946 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
8947 written = 1;
8948 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
8949 mem, rtx_varies_p))
8950 {
8951 /* MEM is indeed aliased by this store. */
8952 loop_info->mems[i].optimize = 0;
8953 break;
8954 }
8955 mem_list_entry = XEXP (mem_list_entry, 1);
8956 }
8957
8958 if (flag_float_store && written
8959 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
8960 loop_info->mems[i].optimize = 0;
8961
8962 /* If this MEM is written to, we must be sure that there
8963 are no reads from another MEM that aliases this one. */
8964 if (loop_info->mems[i].optimize && written)
8965 {
8966 int j;
8967
8968 for (j = 0; j < loop_info->mems_idx; ++j)
8969 {
8970 if (j == i)
8971 continue;
8972 else if (true_dependence (mem,
8973 VOIDmode,
8974 loop_info->mems[j].mem,
8975 rtx_varies_p))
8976 {
8977 /* It's not safe to hoist loop_info->mems[i] out of
8978 the loop because writes to it might not be
8979 seen by reads from loop_info->mems[j]. */
8980 loop_info->mems[i].optimize = 0;
8981 break;
8982 }
8983 }
8984 }
8985
8986 if (maybe_never && may_trap_p (mem))
8987 /* We can't access the MEM outside the loop; it might
8988 cause a trap that wouldn't have happened otherwise. */
8989 loop_info->mems[i].optimize = 0;
8990
8991 if (!loop_info->mems[i].optimize)
8992 /* We thought we were going to lift this MEM out of the
8993 loop, but later discovered that we could not. */
8994 continue;
8995
8996 INIT_REG_SET (&load_copies);
8997 INIT_REG_SET (&store_copies);
8998
8999 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9000 order to keep scan_loop from moving stores to this MEM
9001 out of the loop just because this REG is neither a
9002 user-variable nor used in the loop test. */
9003 reg = gen_reg_rtx (GET_MODE (mem));
9004 REG_USERVAR_P (reg) = 1;
9005 loop_info->mems[i].reg = reg;
9006
9007 /* Now, replace all references to the MEM with the
9008 corresponding pseudos. */
9009 maybe_never = 0;
9010 for (p = next_insn_in_loop (loop, loop->scan_start);
9011 p != NULL_RTX;
9012 p = next_insn_in_loop (loop, p))
9013 {
9014 if (INSN_P (p))
9015 {
9016 rtx set;
9017
9018 set = single_set (p);
9019
9020 /* See if this copies the mem into a register that isn't
9021 modified afterwards. We'll try to do copy propagation
9022 a little further on. */
9023 if (set
9024 /* @@@ This test is _way_ too conservative. */
9025 && ! maybe_never
9026 && GET_CODE (SET_DEST (set)) == REG
9027 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9028 && REGNO (SET_DEST (set)) < last_max_reg
9029 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9030 && rtx_equal_p (SET_SRC (set), mem))
9031 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9032
9033 /* See if this copies the mem from a register that isn't
9034 modified afterwards. We'll try to remove the
9035 redundant copy later on by doing a little register
9036 renaming and copy propagation. This will help
9037 to untangle things for the BIV detection code. */
9038 if (set
9039 && ! maybe_never
9040 && GET_CODE (SET_SRC (set)) == REG
9041 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9042 && REGNO (SET_SRC (set)) < last_max_reg
9043 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9044 && rtx_equal_p (SET_DEST (set), mem))
9045 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9046
9047 /* Replace the memory reference with the shadow register. */
9048 replace_loop_mems (p, loop_info->mems[i].mem,
9049 loop_info->mems[i].reg);
9050 }
9051
9052 if (GET_CODE (p) == CODE_LABEL
9053 || GET_CODE (p) == JUMP_INSN)
9054 maybe_never = 1;
9055 }
9056
9057 if (! apply_change_group ())
9058 /* We couldn't replace all occurrences of the MEM. */
9059 loop_info->mems[i].optimize = 0;
9060 else
9061 {
9062 /* Load the memory immediately before LOOP->START, which is
9063 the NOTE_LOOP_BEG. */
9064 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9065 rtx set;
9066 rtx best = mem;
9067 int j;
9068 struct elt_loc_list *const_equiv = 0;
9069
9070 if (e)
9071 {
9072 struct elt_loc_list *equiv;
9073 struct elt_loc_list *best_equiv = 0;
9074 for (equiv = e->locs; equiv; equiv = equiv->next)
9075 {
9076 if (CONSTANT_P (equiv->loc))
9077 const_equiv = equiv;
9078 else if (GET_CODE (equiv->loc) == REG
9079 /* Extending hard register lifetimes causes crash
9080 on SRC targets. Doing so on non-SRC is
9081 probably also not good idea, since we most
9082 probably have pseudoregister equivalence as
9083 well. */
9084 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9085 best_equiv = equiv;
9086 }
9087 /* Use the constant equivalence if that is cheap enough. */
9088 if (! best_equiv)
9089 best_equiv = const_equiv;
9090 else if (const_equiv
9091 && (rtx_cost (const_equiv->loc, SET)
9092 <= rtx_cost (best_equiv->loc, SET)))
9093 {
9094 best_equiv = const_equiv;
9095 const_equiv = 0;
9096 }
9097
9098 /* If best_equiv is nonzero, we know that MEM is set to a
9099 constant or register before the loop. We will use this
9100 knowledge to initialize the shadow register with that
9101 constant or reg rather than by loading from MEM. */
9102 if (best_equiv)
9103 best = copy_rtx (best_equiv->loc);
9104 }
9105
9106 set = gen_move_insn (reg, best);
9107 set = loop_insn_hoist (loop, set);
9108 if (REG_P (best))
9109 {
9110 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9111 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9112 {
9113 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9114 break;
9115 }
9116 }
9117
9118 if (const_equiv)
9119 REG_NOTES (set) = gen_rtx_EXPR_LIST (REG_EQUAL,
9120 copy_rtx (const_equiv->loc),
9121 REG_NOTES (set));
9122
9123 if (written)
9124 {
9125 if (label == NULL_RTX)
9126 {
9127 label = gen_label_rtx ();
9128 emit_label_after (label, loop->end);
9129 }
9130
9131 /* Store the memory immediately after END, which is
9132 the NOTE_LOOP_END. */
9133 set = gen_move_insn (copy_rtx (mem), reg);
9134 loop_insn_emit_after (loop, 0, label, set);
9135 }
9136
9137 if (loop_dump_stream)
9138 {
9139 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9140 REGNO (reg), (written ? "r/w" : "r/o"));
9141 print_rtl (loop_dump_stream, mem);
9142 fputc ('\n', loop_dump_stream);
9143 }
9144
9145 /* Attempt a bit of copy propagation. This helps untangle the
9146 data flow, and enables {basic,general}_induction_var to find
9147 more bivs/givs. */
9148 EXECUTE_IF_SET_IN_REG_SET
9149 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9150 {
9151 try_copy_prop (loop, reg, j);
9152 });
9153 CLEAR_REG_SET (&load_copies);
9154
9155 EXECUTE_IF_SET_IN_REG_SET
9156 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9157 {
9158 try_swap_copy_prop (loop, reg, j);
9159 });
9160 CLEAR_REG_SET (&store_copies);
9161 }
9162 }
9163
9164 if (label != NULL_RTX && end_label != NULL_RTX)
9165 {
9166 /* Now, we need to replace all references to the previous exit
9167 label with the new one. */
9168 rtx_pair rr;
9169 rr.r1 = end_label;
9170 rr.r2 = label;
9171
9172 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9173 {
9174 for_each_rtx (&p, replace_label, &rr);
9175
9176 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9177 field. This is not handled by for_each_rtx because it doesn't
9178 handle unprinted ('0') fields. We need to update JUMP_LABEL
9179 because the immediately following unroll pass will use it.
9180 replace_label would not work anyways, because that only handles
9181 LABEL_REFs. */
9182 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9183 JUMP_LABEL (p) = label;
9184 }
9185 }
9186
9187 cselib_finish ();
9188 }
9189
9190 /* For communication between note_reg_stored and its caller. */
9191 struct note_reg_stored_arg
9192 {
9193 int set_seen;
9194 rtx reg;
9195 };
9196
9197 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9198 is equal to ARG. */
9199 static void
9200 note_reg_stored (x, setter, arg)
9201 rtx x, setter ATTRIBUTE_UNUSED;
9202 void *arg;
9203 {
9204 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9205 if (t->reg == x)
9206 t->set_seen = 1;
9207 }
9208
9209 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9210 There must be exactly one insn that sets this pseudo; it will be
9211 deleted if all replacements succeed and we can prove that the register
9212 is not used after the loop. */
9213
9214 static void
9215 try_copy_prop (loop, replacement, regno)
9216 const struct loop *loop;
9217 rtx replacement;
9218 unsigned int regno;
9219 {
9220 /* This is the reg that we are copying from. */
9221 rtx reg_rtx = regno_reg_rtx[regno];
9222 rtx init_insn = 0;
9223 rtx insn;
9224 /* These help keep track of whether we replaced all uses of the reg. */
9225 int replaced_last = 0;
9226 int store_is_first = 0;
9227
9228 for (insn = next_insn_in_loop (loop, loop->scan_start);
9229 insn != NULL_RTX;
9230 insn = next_insn_in_loop (loop, insn))
9231 {
9232 rtx set;
9233
9234 /* Only substitute within one extended basic block from the initializing
9235 insn. */
9236 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9237 break;
9238
9239 if (! INSN_P (insn))
9240 continue;
9241
9242 /* Is this the initializing insn? */
9243 set = single_set (insn);
9244 if (set
9245 && GET_CODE (SET_DEST (set)) == REG
9246 && REGNO (SET_DEST (set)) == regno)
9247 {
9248 if (init_insn)
9249 abort ();
9250
9251 init_insn = insn;
9252 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9253 store_is_first = 1;
9254 }
9255
9256 /* Only substitute after seeing the initializing insn. */
9257 if (init_insn && insn != init_insn)
9258 {
9259 struct note_reg_stored_arg arg;
9260
9261 replace_loop_regs (insn, reg_rtx, replacement);
9262 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9263 replaced_last = 1;
9264
9265 /* Stop replacing when REPLACEMENT is modified. */
9266 arg.reg = replacement;
9267 arg.set_seen = 0;
9268 note_stores (PATTERN (insn), note_reg_stored, &arg);
9269 if (arg.set_seen)
9270 break;
9271 }
9272 }
9273 if (! init_insn)
9274 abort ();
9275 if (apply_change_group ())
9276 {
9277 if (loop_dump_stream)
9278 fprintf (loop_dump_stream, " Replaced reg %d", regno);
9279 if (store_is_first && replaced_last)
9280 {
9281 rtx first;
9282 rtx retval_note;
9283
9284 /* Assume we're just deleting INIT_INSN. */
9285 first = init_insn;
9286 /* Look for REG_RETVAL note. If we're deleting the end of
9287 the libcall sequence, the whole sequence can go. */
9288 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
9289 /* If we found a REG_RETVAL note, find the first instruction
9290 in the sequence. */
9291 if (retval_note)
9292 first = XEXP (retval_note, 0);
9293
9294 /* Delete the instructions. */
9295 loop_delete_insns (first, init_insn);
9296 }
9297 if (loop_dump_stream)
9298 fprintf (loop_dump_stream, ".\n");
9299 }
9300 }
9301
9302 /* Replace all the instructions from FIRST up to and including LAST
9303 with NOTE_INSN_DELETED notes. */
9304
9305 static void
9306 loop_delete_insns (first, last)
9307 rtx first;
9308 rtx last;
9309 {
9310 while (1)
9311 {
9312 PUT_CODE (first, NOTE);
9313 NOTE_LINE_NUMBER (first) = NOTE_INSN_DELETED;
9314 if (loop_dump_stream)
9315 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
9316 INSN_UID (first));
9317
9318 /* If this was the LAST instructions we're supposed to delete,
9319 we're done. */
9320 if (first == last)
9321 break;
9322
9323 first = NEXT_INSN (first);
9324 }
9325 }
9326
9327 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
9328 loop LOOP if the order of the sets of these registers can be
9329 swapped. There must be exactly one insn within the loop that sets
9330 this pseudo followed immediately by a move insn that sets
9331 REPLACEMENT with REGNO. */
9332 static void
9333 try_swap_copy_prop (loop, replacement, regno)
9334 const struct loop *loop;
9335 rtx replacement;
9336 unsigned int regno;
9337 {
9338 rtx insn;
9339 rtx set = NULL_RTX;
9340 unsigned int new_regno;
9341
9342 new_regno = REGNO (replacement);
9343
9344 for (insn = next_insn_in_loop (loop, loop->scan_start);
9345 insn != NULL_RTX;
9346 insn = next_insn_in_loop (loop, insn))
9347 {
9348 /* Search for the insn that copies REGNO to NEW_REGNO? */
9349 if (INSN_P (insn)
9350 && (set = single_set (insn))
9351 && GET_CODE (SET_DEST (set)) == REG
9352 && REGNO (SET_DEST (set)) == new_regno
9353 && GET_CODE (SET_SRC (set)) == REG
9354 && REGNO (SET_SRC (set)) == regno)
9355 break;
9356 }
9357
9358 if (insn != NULL_RTX)
9359 {
9360 rtx prev_insn;
9361 rtx prev_set;
9362
9363 /* Some DEF-USE info would come in handy here to make this
9364 function more general. For now, just check the previous insn
9365 which is the most likely candidate for setting REGNO. */
9366
9367 prev_insn = PREV_INSN (insn);
9368
9369 if (INSN_P (insn)
9370 && (prev_set = single_set (prev_insn))
9371 && GET_CODE (SET_DEST (prev_set)) == REG
9372 && REGNO (SET_DEST (prev_set)) == regno)
9373 {
9374 /* We have:
9375 (set (reg regno) (expr))
9376 (set (reg new_regno) (reg regno))
9377
9378 so try converting this to:
9379 (set (reg new_regno) (expr))
9380 (set (reg regno) (reg new_regno))
9381
9382 The former construct is often generated when a global
9383 variable used for an induction variable is shadowed by a
9384 register (NEW_REGNO). The latter construct improves the
9385 chances of GIV replacement and BIV elimination. */
9386
9387 validate_change (prev_insn, &SET_DEST (prev_set),
9388 replacement, 1);
9389 validate_change (insn, &SET_DEST (set),
9390 SET_SRC (set), 1);
9391 validate_change (insn, &SET_SRC (set),
9392 replacement, 1);
9393
9394 if (apply_change_group ())
9395 {
9396 if (loop_dump_stream)
9397 fprintf (loop_dump_stream,
9398 " Swapped set of reg %d at %d with reg %d at %d.\n",
9399 regno, INSN_UID (insn),
9400 new_regno, INSN_UID (prev_insn));
9401
9402 /* Update first use of REGNO. */
9403 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
9404 REGNO_FIRST_UID (regno) = INSN_UID (insn);
9405
9406 /* Now perform copy propagation to hopefully
9407 remove all uses of REGNO within the loop. */
9408 try_copy_prop (loop, replacement, regno);
9409 }
9410 }
9411 }
9412 }
9413
9414 /* Replace MEM with its associated pseudo register. This function is
9415 called from load_mems via for_each_rtx. DATA is actually a pointer
9416 to a structure describing the instruction currently being scanned
9417 and the MEM we are currently replacing. */
9418
9419 static int
9420 replace_loop_mem (mem, data)
9421 rtx *mem;
9422 void *data;
9423 {
9424 loop_replace_args *args = (loop_replace_args *) data;
9425 rtx m = *mem;
9426
9427 if (m == NULL_RTX)
9428 return 0;
9429
9430 switch (GET_CODE (m))
9431 {
9432 case MEM:
9433 break;
9434
9435 case CONST_DOUBLE:
9436 /* We're not interested in the MEM associated with a
9437 CONST_DOUBLE, so there's no need to traverse into one. */
9438 return -1;
9439
9440 default:
9441 /* This is not a MEM. */
9442 return 0;
9443 }
9444
9445 if (!rtx_equal_p (args->match, m))
9446 /* This is not the MEM we are currently replacing. */
9447 return 0;
9448
9449 /* Actually replace the MEM. */
9450 validate_change (args->insn, mem, args->replacement, 1);
9451
9452 return 0;
9453 }
9454
9455 static void
9456 replace_loop_mems (insn, mem, reg)
9457 rtx insn;
9458 rtx mem;
9459 rtx reg;
9460 {
9461 loop_replace_args args;
9462
9463 args.insn = insn;
9464 args.match = mem;
9465 args.replacement = reg;
9466
9467 for_each_rtx (&insn, replace_loop_mem, &args);
9468 }
9469
9470 /* Replace one register with another. Called through for_each_rtx; PX points
9471 to the rtx being scanned. DATA is actually a pointer to
9472 a structure of arguments. */
9473
9474 static int
9475 replace_loop_reg (px, data)
9476 rtx *px;
9477 void *data;
9478 {
9479 rtx x = *px;
9480 loop_replace_args *args = (loop_replace_args *) data;
9481
9482 if (x == NULL_RTX)
9483 return 0;
9484
9485 if (x == args->match)
9486 validate_change (args->insn, px, args->replacement, 1);
9487
9488 return 0;
9489 }
9490
9491 static void
9492 replace_loop_regs (insn, reg, replacement)
9493 rtx insn;
9494 rtx reg;
9495 rtx replacement;
9496 {
9497 loop_replace_args args;
9498
9499 args.insn = insn;
9500 args.match = reg;
9501 args.replacement = replacement;
9502
9503 for_each_rtx (&insn, replace_loop_reg, &args);
9504 }
9505
9506 /* Replace occurrences of the old exit label for the loop with the new
9507 one. DATA is an rtx_pair containing the old and new labels,
9508 respectively. */
9509
9510 static int
9511 replace_label (x, data)
9512 rtx *x;
9513 void *data;
9514 {
9515 rtx l = *x;
9516 rtx old_label = ((rtx_pair *) data)->r1;
9517 rtx new_label = ((rtx_pair *) data)->r2;
9518
9519 if (l == NULL_RTX)
9520 return 0;
9521
9522 if (GET_CODE (l) != LABEL_REF)
9523 return 0;
9524
9525 if (XEXP (l, 0) != old_label)
9526 return 0;
9527
9528 XEXP (l, 0) = new_label;
9529 ++LABEL_NUSES (new_label);
9530 --LABEL_NUSES (old_label);
9531
9532 return 0;
9533 }
9534 \f
9535 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
9536 (ignored in the interim). */
9537
9538 static rtx
9539 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
9540 const struct loop *loop ATTRIBUTE_UNUSED;
9541 basic_block where_bb ATTRIBUTE_UNUSED;
9542 rtx where_insn;
9543 rtx pattern;
9544 {
9545 return emit_insn_after (pattern, where_insn);
9546 }
9547
9548
9549 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
9550 in basic block WHERE_BB (ignored in the interim) within the loop
9551 otherwise hoist PATTERN into the loop pre-header. */
9552
9553 rtx
9554 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
9555 const struct loop *loop;
9556 basic_block where_bb ATTRIBUTE_UNUSED;
9557 rtx where_insn;
9558 rtx pattern;
9559 {
9560 if (! where_insn)
9561 return loop_insn_hoist (loop, pattern);
9562 return emit_insn_before (pattern, where_insn);
9563 }
9564
9565
9566 /* Emit call insn for PATTERN before WHERE_INSN in basic block
9567 WHERE_BB (ignored in the interim) within the loop. */
9568
9569 static rtx
9570 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
9571 const struct loop *loop ATTRIBUTE_UNUSED;
9572 basic_block where_bb ATTRIBUTE_UNUSED;
9573 rtx where_insn;
9574 rtx pattern;
9575 {
9576 return emit_call_insn_before (pattern, where_insn);
9577 }
9578
9579
9580 /* Hoist insn for PATTERN into the loop pre-header. */
9581
9582 rtx
9583 loop_insn_hoist (loop, pattern)
9584 const struct loop *loop;
9585 rtx pattern;
9586 {
9587 return loop_insn_emit_before (loop, 0, loop->start, pattern);
9588 }
9589
9590
9591 /* Hoist call insn for PATTERN into the loop pre-header. */
9592
9593 static rtx
9594 loop_call_insn_hoist (loop, pattern)
9595 const struct loop *loop;
9596 rtx pattern;
9597 {
9598 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
9599 }
9600
9601
9602 /* Sink insn for PATTERN after the loop end. */
9603
9604 rtx
9605 loop_insn_sink (loop, pattern)
9606 const struct loop *loop;
9607 rtx pattern;
9608 {
9609 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
9610 }
9611
9612
9613 /* If the loop has multiple exits, emit insn for PATTERN before the
9614 loop to ensure that it will always be executed no matter how the
9615 loop exits. Otherwise, emit the insn for PATTERN after the loop,
9616 since this is slightly more efficient. */
9617
9618 static rtx
9619 loop_insn_sink_or_swim (loop, pattern)
9620 const struct loop *loop;
9621 rtx pattern;
9622 {
9623 if (loop->exit_count)
9624 return loop_insn_hoist (loop, pattern);
9625 else
9626 return loop_insn_sink (loop, pattern);
9627 }
9628 \f
9629 static void
9630 loop_ivs_dump (loop, file, verbose)
9631 const struct loop *loop;
9632 FILE *file;
9633 int verbose;
9634 {
9635 struct iv_class *bl;
9636 int iv_num = 0;
9637
9638 if (! loop || ! file)
9639 return;
9640
9641 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9642 iv_num++;
9643
9644 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
9645
9646 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9647 {
9648 loop_iv_class_dump (bl, file, verbose);
9649 fputc ('\n', file);
9650 }
9651 }
9652
9653
9654 static void
9655 loop_iv_class_dump (bl, file, verbose)
9656 const struct iv_class *bl;
9657 FILE *file;
9658 int verbose ATTRIBUTE_UNUSED;
9659 {
9660 struct induction *v;
9661 rtx incr;
9662 int i;
9663
9664 if (! bl || ! file)
9665 return;
9666
9667 fprintf (file, "IV class for reg %d, benefit %d\n",
9668 bl->regno, bl->total_benefit);
9669
9670 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
9671 if (bl->initial_value)
9672 {
9673 fprintf (file, ", init val: ");
9674 print_simple_rtl (file, bl->initial_value);
9675 }
9676 if (bl->initial_test)
9677 {
9678 fprintf (file, ", init test: ");
9679 print_simple_rtl (file, bl->initial_test);
9680 }
9681 fputc ('\n', file);
9682
9683 if (bl->final_value)
9684 {
9685 fprintf (file, " Final val: ");
9686 print_simple_rtl (file, bl->final_value);
9687 fputc ('\n', file);
9688 }
9689
9690 if ((incr = biv_total_increment (bl)))
9691 {
9692 fprintf (file, " Total increment: ");
9693 print_simple_rtl (file, incr);
9694 fputc ('\n', file);
9695 }
9696
9697 /* List the increments. */
9698 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
9699 {
9700 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
9701 print_simple_rtl (file, v->add_val);
9702 fputc ('\n', file);
9703 }
9704
9705 /* List the givs. */
9706 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
9707 {
9708 fprintf (file, " Giv%d: insn %d, benefit %d, ",
9709 i, INSN_UID (v->insn), v->benefit);
9710 if (v->giv_type == DEST_ADDR)
9711 print_simple_rtl (file, v->mem);
9712 else
9713 print_simple_rtl (file, single_set (v->insn));
9714 fputc ('\n', file);
9715 }
9716 }
9717
9718
9719 static void
9720 loop_biv_dump (v, file, verbose)
9721 const struct induction *v;
9722 FILE *file;
9723 int verbose;
9724 {
9725 if (! v || ! file)
9726 return;
9727
9728 fprintf (file,
9729 "Biv %d: insn %d",
9730 REGNO (v->dest_reg), INSN_UID (v->insn));
9731 fprintf (file, " const ");
9732 print_simple_rtl (file, v->add_val);
9733
9734 if (verbose && v->final_value)
9735 {
9736 fputc ('\n', file);
9737 fprintf (file, " final ");
9738 print_simple_rtl (file, v->final_value);
9739 }
9740
9741 fputc ('\n', file);
9742 }
9743
9744
9745 static void
9746 loop_giv_dump (v, file, verbose)
9747 const struct induction *v;
9748 FILE *file;
9749 int verbose;
9750 {
9751 if (! v || ! file)
9752 return;
9753
9754 if (v->giv_type == DEST_REG)
9755 fprintf (file, "Giv %d: insn %d",
9756 REGNO (v->dest_reg), INSN_UID (v->insn));
9757 else
9758 fprintf (file, "Dest address: insn %d",
9759 INSN_UID (v->insn));
9760
9761 fprintf (file, " src reg %d benefit %d",
9762 REGNO (v->src_reg), v->benefit);
9763 fprintf (file, " lifetime %d",
9764 v->lifetime);
9765
9766 if (v->replaceable)
9767 fprintf (file, " replaceable");
9768
9769 if (v->no_const_addval)
9770 fprintf (file, " ncav");
9771
9772 if (v->ext_dependant)
9773 {
9774 switch (GET_CODE (v->ext_dependant))
9775 {
9776 case SIGN_EXTEND:
9777 fprintf (file, " ext se");
9778 break;
9779 case ZERO_EXTEND:
9780 fprintf (file, " ext ze");
9781 break;
9782 case TRUNCATE:
9783 fprintf (file, " ext tr");
9784 break;
9785 default:
9786 abort ();
9787 }
9788 }
9789
9790 fputc ('\n', file);
9791 fprintf (file, " mult ");
9792 print_simple_rtl (file, v->mult_val);
9793
9794 fputc ('\n', file);
9795 fprintf (file, " add ");
9796 print_simple_rtl (file, v->add_val);
9797
9798 if (verbose && v->final_value)
9799 {
9800 fputc ('\n', file);
9801 fprintf (file, " final ");
9802 print_simple_rtl (file, v->final_value);
9803 }
9804
9805 fputc ('\n', file);
9806 }
9807
9808
9809 void
9810 debug_ivs (loop)
9811 const struct loop *loop;
9812 {
9813 loop_ivs_dump (loop, stderr, 1);
9814 }
9815
9816
9817 void
9818 debug_iv_class (bl)
9819 const struct iv_class *bl;
9820 {
9821 loop_iv_class_dump (bl, stderr, 1);
9822 }
9823
9824
9825 void
9826 debug_biv (v)
9827 const struct induction *v;
9828 {
9829 loop_biv_dump (v, stderr, 1);
9830 }
9831
9832
9833 void
9834 debug_giv (v)
9835 const struct induction *v;
9836 {
9837 loop_giv_dump (v, stderr, 1);
9838 }
9839
9840
9841 #define LOOP_BLOCK_NUM_1(INSN) \
9842 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
9843
9844 /* The notes do not have an assigned block, so look at the next insn. */
9845 #define LOOP_BLOCK_NUM(INSN) \
9846 ((INSN) ? (GET_CODE (INSN) == NOTE \
9847 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
9848 : LOOP_BLOCK_NUM_1 (INSN)) \
9849 : -1)
9850
9851 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
9852
9853 static void
9854 loop_dump_aux (loop, file, verbose)
9855 const struct loop *loop;
9856 FILE *file;
9857 int verbose ATTRIBUTE_UNUSED;
9858 {
9859 rtx label;
9860
9861 if (! loop || ! file)
9862 return;
9863
9864 /* Print diagnostics to compare our concept of a loop with
9865 what the loop notes say. */
9866 if (! PREV_INSN (loop->first->head)
9867 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
9868 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
9869 != NOTE_INSN_LOOP_BEG)
9870 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
9871 INSN_UID (PREV_INSN (loop->first->head)));
9872 if (! NEXT_INSN (loop->last->end)
9873 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
9874 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
9875 != NOTE_INSN_LOOP_END)
9876 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
9877 INSN_UID (NEXT_INSN (loop->last->end)));
9878
9879 if (loop->start)
9880 {
9881 fprintf (file,
9882 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
9883 LOOP_BLOCK_NUM (loop->start),
9884 LOOP_INSN_UID (loop->start),
9885 LOOP_BLOCK_NUM (loop->cont),
9886 LOOP_INSN_UID (loop->cont),
9887 LOOP_BLOCK_NUM (loop->cont),
9888 LOOP_INSN_UID (loop->cont),
9889 LOOP_BLOCK_NUM (loop->vtop),
9890 LOOP_INSN_UID (loop->vtop),
9891 LOOP_BLOCK_NUM (loop->end),
9892 LOOP_INSN_UID (loop->end));
9893 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
9894 LOOP_BLOCK_NUM (loop->top),
9895 LOOP_INSN_UID (loop->top),
9896 LOOP_BLOCK_NUM (loop->scan_start),
9897 LOOP_INSN_UID (loop->scan_start));
9898 fprintf (file, ";; exit_count %d", loop->exit_count);
9899 if (loop->exit_count)
9900 {
9901 fputs (", labels:", file);
9902 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
9903 {
9904 fprintf (file, " %d ",
9905 LOOP_INSN_UID (XEXP (label, 0)));
9906 }
9907 }
9908 fputs ("\n", file);
9909
9910 /* This can happen when a marked loop appears as two nested loops,
9911 say from while (a || b) {}. The inner loop won't match
9912 the loop markers but the outer one will. */
9913 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
9914 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
9915 }
9916 }
9917
9918 /* Call this function from the debugger to dump LOOP. */
9919
9920 void
9921 debug_loop (loop)
9922 const struct loop *loop;
9923 {
9924 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
9925 }
9926
9927 /* Call this function from the debugger to dump LOOPS. */
9928
9929 void
9930 debug_loops (loops)
9931 const struct loops *loops;
9932 {
9933 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
9934 }