* config/alpha/x-vms (version): Change "." to "_".
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56
57 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
58 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
59
60 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
61 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
62 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
63
64
65 /* Vector mapping INSN_UIDs to luids.
66 The luids are like uids but increase monotonically always.
67 We use them to see whether a jump comes from outside a given loop. */
68
69 int *uid_luid;
70
71 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
72 number the insn is contained in. */
73
74 struct loop **uid_loop;
75
76 /* 1 + largest uid of any insn. */
77
78 int max_uid_for_loop;
79
80 /* 1 + luid of last insn. */
81
82 static int max_luid;
83
84 /* Number of loops detected in current function. Used as index to the
85 next few tables. */
86
87 static int max_loop_num;
88
89 /* Bound on pseudo register number before loop optimization.
90 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
91 unsigned int max_reg_before_loop;
92
93 /* The value to pass to the next call of reg_scan_update. */
94 static int loop_max_reg;
95
96 #define obstack_chunk_alloc xmalloc
97 #define obstack_chunk_free free
98 \f
99 /* During the analysis of a loop, a chain of `struct movable's
100 is made to record all the movable insns found.
101 Then the entire chain can be scanned to decide which to move. */
102
103 struct movable
104 {
105 rtx insn; /* A movable insn */
106 rtx set_src; /* The expression this reg is set from. */
107 rtx set_dest; /* The destination of this SET. */
108 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
109 of any registers used within the LIBCALL. */
110 int consec; /* Number of consecutive following insns
111 that must be moved with this one. */
112 unsigned int regno; /* The register it sets */
113 short lifetime; /* lifetime of that register;
114 may be adjusted when matching movables
115 that load the same value are found. */
116 short savings; /* Number of insns we can move for this reg,
117 including other movables that force this
118 or match this one. */
119 unsigned int cond : 1; /* 1 if only conditionally movable */
120 unsigned int force : 1; /* 1 means MUST move this insn */
121 unsigned int global : 1; /* 1 means reg is live outside this loop */
122 /* If PARTIAL is 1, GLOBAL means something different:
123 that the reg is live outside the range from where it is set
124 to the following label. */
125 unsigned int done : 1; /* 1 inhibits further processing of this */
126
127 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
128 In particular, moving it does not make it
129 invariant. */
130 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
131 load SRC, rather than copying INSN. */
132 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
133 first insn of a consecutive sets group. */
134 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
135 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
136 that we should avoid changing when clearing
137 the rest of the reg. */
138 struct movable *match; /* First entry for same value */
139 struct movable *forces; /* An insn that must be moved if this is */
140 struct movable *next;
141 };
142
143
144 FILE *loop_dump_stream;
145
146 /* Forward declarations. */
147
148 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
149 static void mark_loop_jump PARAMS ((rtx, struct loop *));
150 static void prescan_loop PARAMS ((struct loop *));
151 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
152 static int consec_sets_invariant_p PARAMS ((const struct loop *,
153 rtx, int, rtx));
154 static int labels_in_range_p PARAMS ((rtx, int));
155 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
156 static void note_addr_stored PARAMS ((rtx, rtx, void *));
157 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
158 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
159 static void scan_loop PARAMS ((struct loop*, int));
160 #if 0
161 static void replace_call_address PARAMS ((rtx, rtx, rtx));
162 #endif
163 static rtx skip_consec_insns PARAMS ((rtx, int));
164 static int libcall_benefit PARAMS ((rtx));
165 static void ignore_some_movables PARAMS ((struct loop_movables *));
166 static void force_movables PARAMS ((struct loop_movables *));
167 static void combine_movables PARAMS ((struct loop_movables *,
168 struct loop_regs *));
169 static int num_unmoved_movables PARAMS ((const struct loop *));
170 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
171 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
172 struct loop_regs *));
173 static void add_label_notes PARAMS ((rtx, rtx));
174 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
175 int, int));
176 static void loop_movables_add PARAMS((struct loop_movables *,
177 struct movable *));
178 static void loop_movables_free PARAMS((struct loop_movables *));
179 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
180 static void loop_bivs_find PARAMS((struct loop *));
181 static void loop_bivs_init_find PARAMS((struct loop *));
182 static void loop_bivs_check PARAMS((struct loop *));
183 static void loop_givs_find PARAMS((struct loop *));
184 static void loop_givs_check PARAMS((struct loop *));
185 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
186 int, int));
187 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
188 struct induction *, rtx));
189 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
190 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
191 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
192 rtx *));
193 static void loop_ivs_free PARAMS((struct loop *));
194 static void strength_reduce PARAMS ((struct loop *, int));
195 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
196 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
197 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
198 static void record_biv PARAMS ((struct loop *, struct induction *,
199 rtx, rtx, rtx, rtx, rtx *,
200 int, int));
201 static void check_final_value PARAMS ((const struct loop *,
202 struct induction *));
203 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
204 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
205 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
206 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
207 static void record_giv PARAMS ((const struct loop *, struct induction *,
208 rtx, rtx, rtx, rtx, rtx, rtx, int,
209 enum g_types, int, int, rtx *));
210 static void update_giv_derive PARAMS ((const struct loop *, rtx));
211 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
212 struct loop_info *));
213 static int basic_induction_var PARAMS ((const struct loop *, rtx,
214 enum machine_mode, rtx, rtx,
215 rtx *, rtx *, rtx **));
216 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
217 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
218 rtx *, rtx *, rtx *, int, int *,
219 enum machine_mode));
220 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
221 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
222 static int check_dbra_loop PARAMS ((struct loop *, int));
223 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
224 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
225 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
226 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
227 static int product_cheap_p PARAMS ((rtx, rtx));
228 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
229 int, int, int));
230 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
231 struct iv_class *, int,
232 basic_block, rtx));
233 static int last_use_this_basic_block PARAMS ((rtx, rtx));
234 static void record_initial PARAMS ((rtx, rtx, void *));
235 static void update_reg_last_use PARAMS ((rtx, rtx));
236 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
237 static void loop_regs_scan PARAMS ((const struct loop *, int));
238 static int count_insns_in_loop PARAMS ((const struct loop *));
239 static void load_mems PARAMS ((const struct loop *));
240 static int insert_loop_mem PARAMS ((rtx *, void *));
241 static int replace_loop_mem PARAMS ((rtx *, void *));
242 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
243 static int replace_loop_reg PARAMS ((rtx *, void *));
244 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
245 static void note_reg_stored PARAMS ((rtx, rtx, void *));
246 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
247 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
248 unsigned int));
249 static int replace_label PARAMS ((rtx *, void *));
250 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
251 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
252 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
253 static void loop_regs_update PARAMS ((const struct loop *, rtx));
254 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
255
256 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
257 rtx, rtx));
258 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
259 basic_block, rtx, rtx));
260 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
261 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
262
263 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
264 static void loop_delete_insns PARAMS ((rtx, rtx));
265 void debug_ivs PARAMS ((const struct loop *));
266 void debug_iv_class PARAMS ((const struct iv_class *));
267 void debug_biv PARAMS ((const struct induction *));
268 void debug_giv PARAMS ((const struct induction *));
269 void debug_loop PARAMS ((const struct loop *));
270 void debug_loops PARAMS ((const struct loops *));
271
272 typedef struct rtx_pair
273 {
274 rtx r1;
275 rtx r2;
276 } rtx_pair;
277
278 typedef struct loop_replace_args
279 {
280 rtx match;
281 rtx replacement;
282 rtx insn;
283 } loop_replace_args;
284
285 /* Nonzero iff INSN is between START and END, inclusive. */
286 #define INSN_IN_RANGE_P(INSN, START, END) \
287 (INSN_UID (INSN) < max_uid_for_loop \
288 && INSN_LUID (INSN) >= INSN_LUID (START) \
289 && INSN_LUID (INSN) <= INSN_LUID (END))
290
291 /* Indirect_jump_in_function is computed once per function. */
292 static int indirect_jump_in_function;
293 static int indirect_jump_in_function_p PARAMS ((rtx));
294
295 static int compute_luids PARAMS ((rtx, rtx, int));
296
297 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
298 struct induction *,
299 rtx));
300 \f
301 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
302 copy the value of the strength reduced giv to its original register. */
303 static int copy_cost;
304
305 /* Cost of using a register, to normalize the benefits of a giv. */
306 static int reg_address_cost;
307
308 void
309 init_loop ()
310 {
311 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
312
313 reg_address_cost = address_cost (reg, SImode);
314
315 copy_cost = COSTS_N_INSNS (1);
316 }
317 \f
318 /* Compute the mapping from uids to luids.
319 LUIDs are numbers assigned to insns, like uids,
320 except that luids increase monotonically through the code.
321 Start at insn START and stop just before END. Assign LUIDs
322 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
323 static int
324 compute_luids (start, end, prev_luid)
325 rtx start, end;
326 int prev_luid;
327 {
328 int i;
329 rtx insn;
330
331 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
332 {
333 if (INSN_UID (insn) >= max_uid_for_loop)
334 continue;
335 /* Don't assign luids to line-number NOTEs, so that the distance in
336 luids between two insns is not affected by -g. */
337 if (GET_CODE (insn) != NOTE
338 || NOTE_LINE_NUMBER (insn) <= 0)
339 uid_luid[INSN_UID (insn)] = ++i;
340 else
341 /* Give a line number note the same luid as preceding insn. */
342 uid_luid[INSN_UID (insn)] = i;
343 }
344 return i + 1;
345 }
346 \f
347 /* Entry point of this file. Perform loop optimization
348 on the current function. F is the first insn of the function
349 and DUMPFILE is a stream for output of a trace of actions taken
350 (or 0 if none should be output). */
351
352 void
353 loop_optimize (f, dumpfile, flags)
354 /* f is the first instruction of a chain of insns for one function */
355 rtx f;
356 FILE *dumpfile;
357 int flags;
358 {
359 rtx insn;
360 int i;
361 struct loops loops_data;
362 struct loops *loops = &loops_data;
363 struct loop_info *loops_info;
364
365 loop_dump_stream = dumpfile;
366
367 init_recog_no_volatile ();
368
369 max_reg_before_loop = max_reg_num ();
370 loop_max_reg = max_reg_before_loop;
371
372 regs_may_share = 0;
373
374 /* Count the number of loops. */
375
376 max_loop_num = 0;
377 for (insn = f; insn; insn = NEXT_INSN (insn))
378 {
379 if (GET_CODE (insn) == NOTE
380 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
381 max_loop_num++;
382 }
383
384 /* Don't waste time if no loops. */
385 if (max_loop_num == 0)
386 return;
387
388 loops->num = max_loop_num;
389
390 /* Get size to use for tables indexed by uids.
391 Leave some space for labels allocated by find_and_verify_loops. */
392 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
393
394 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
395 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
396 sizeof (struct loop *));
397
398 /* Allocate storage for array of loops. */
399 loops->array = (struct loop *)
400 xcalloc (loops->num, sizeof (struct loop));
401
402 /* Find and process each loop.
403 First, find them, and record them in order of their beginnings. */
404 find_and_verify_loops (f, loops);
405
406 /* Allocate and initialize auxiliary loop information. */
407 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
408 for (i = 0; i < loops->num; i++)
409 loops->array[i].aux = loops_info + i;
410
411 /* Now find all register lifetimes. This must be done after
412 find_and_verify_loops, because it might reorder the insns in the
413 function. */
414 reg_scan (f, max_reg_before_loop, 1);
415
416 /* This must occur after reg_scan so that registers created by gcse
417 will have entries in the register tables.
418
419 We could have added a call to reg_scan after gcse_main in toplev.c,
420 but moving this call to init_alias_analysis is more efficient. */
421 init_alias_analysis ();
422
423 /* See if we went too far. Note that get_max_uid already returns
424 one more that the maximum uid of all insn. */
425 if (get_max_uid () > max_uid_for_loop)
426 abort ();
427 /* Now reset it to the actual size we need. See above. */
428 max_uid_for_loop = get_max_uid ();
429
430 /* find_and_verify_loops has already called compute_luids, but it
431 might have rearranged code afterwards, so we need to recompute
432 the luids now. */
433 max_luid = compute_luids (f, NULL_RTX, 0);
434
435 /* Don't leave gaps in uid_luid for insns that have been
436 deleted. It is possible that the first or last insn
437 using some register has been deleted by cross-jumping.
438 Make sure that uid_luid for that former insn's uid
439 points to the general area where that insn used to be. */
440 for (i = 0; i < max_uid_for_loop; i++)
441 {
442 uid_luid[0] = uid_luid[i];
443 if (uid_luid[0] != 0)
444 break;
445 }
446 for (i = 0; i < max_uid_for_loop; i++)
447 if (uid_luid[i] == 0)
448 uid_luid[i] = uid_luid[i - 1];
449
450 /* Determine if the function has indirect jump. On some systems
451 this prevents low overhead loop instructions from being used. */
452 indirect_jump_in_function = indirect_jump_in_function_p (f);
453
454 /* Now scan the loops, last ones first, since this means inner ones are done
455 before outer ones. */
456 for (i = max_loop_num - 1; i >= 0; i--)
457 {
458 struct loop *loop = &loops->array[i];
459
460 if (! loop->invalid && loop->end)
461 scan_loop (loop, flags);
462 }
463
464 /* If there were lexical blocks inside the loop, they have been
465 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
466 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
467 the BLOCKs as well. */
468 if (write_symbols != NO_DEBUG)
469 reorder_blocks ();
470
471 end_alias_analysis ();
472
473 /* Clean up. */
474 free (uid_luid);
475 free (uid_loop);
476 free (loops_info);
477 free (loops->array);
478 }
479 \f
480 /* Returns the next insn, in execution order, after INSN. START and
481 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
482 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
483 insn-stream; it is used with loops that are entered near the
484 bottom. */
485
486 static rtx
487 next_insn_in_loop (loop, insn)
488 const struct loop *loop;
489 rtx insn;
490 {
491 insn = NEXT_INSN (insn);
492
493 if (insn == loop->end)
494 {
495 if (loop->top)
496 /* Go to the top of the loop, and continue there. */
497 insn = loop->top;
498 else
499 /* We're done. */
500 insn = NULL_RTX;
501 }
502
503 if (insn == loop->scan_start)
504 /* We're done. */
505 insn = NULL_RTX;
506
507 return insn;
508 }
509
510 /* Optimize one loop described by LOOP. */
511
512 /* ??? Could also move memory writes out of loops if the destination address
513 is invariant, the source is invariant, the memory write is not volatile,
514 and if we can prove that no read inside the loop can read this address
515 before the write occurs. If there is a read of this address after the
516 write, then we can also mark the memory read as invariant. */
517
518 static void
519 scan_loop (loop, flags)
520 struct loop *loop;
521 int flags;
522 {
523 struct loop_info *loop_info = LOOP_INFO (loop);
524 struct loop_regs *regs = LOOP_REGS (loop);
525 int i;
526 rtx loop_start = loop->start;
527 rtx loop_end = loop->end;
528 rtx p;
529 /* 1 if we are scanning insns that could be executed zero times. */
530 int maybe_never = 0;
531 /* 1 if we are scanning insns that might never be executed
532 due to a subroutine call which might exit before they are reached. */
533 int call_passed = 0;
534 /* Jump insn that enters the loop, or 0 if control drops in. */
535 rtx loop_entry_jump = 0;
536 /* Number of insns in the loop. */
537 int insn_count;
538 int tem;
539 rtx temp, update_start, update_end;
540 /* The SET from an insn, if it is the only SET in the insn. */
541 rtx set, set1;
542 /* Chain describing insns movable in current loop. */
543 struct loop_movables *movables = LOOP_MOVABLES (loop);
544 /* Ratio of extra register life span we can justify
545 for saving an instruction. More if loop doesn't call subroutines
546 since in that case saving an insn makes more difference
547 and more registers are available. */
548 int threshold;
549 /* Nonzero if we are scanning instructions in a sub-loop. */
550 int loop_depth = 0;
551
552 loop->top = 0;
553
554 movables->head = 0;
555 movables->last = 0;
556
557 /* Determine whether this loop starts with a jump down to a test at
558 the end. This will occur for a small number of loops with a test
559 that is too complex to duplicate in front of the loop.
560
561 We search for the first insn or label in the loop, skipping NOTEs.
562 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
563 (because we might have a loop executed only once that contains a
564 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
565 (in case we have a degenerate loop).
566
567 Note that if we mistakenly think that a loop is entered at the top
568 when, in fact, it is entered at the exit test, the only effect will be
569 slightly poorer optimization. Making the opposite error can generate
570 incorrect code. Since very few loops now start with a jump to the
571 exit test, the code here to detect that case is very conservative. */
572
573 for (p = NEXT_INSN (loop_start);
574 p != loop_end
575 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
576 && (GET_CODE (p) != NOTE
577 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
578 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
579 p = NEXT_INSN (p))
580 ;
581
582 loop->scan_start = p;
583
584 /* If loop end is the end of the current function, then emit a
585 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
586 note insn. This is the position we use when sinking insns out of
587 the loop. */
588 if (NEXT_INSN (loop->end) != 0)
589 loop->sink = NEXT_INSN (loop->end);
590 else
591 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
592
593 /* Set up variables describing this loop. */
594 prescan_loop (loop);
595 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
596
597 /* If loop has a jump before the first label,
598 the true entry is the target of that jump.
599 Start scan from there.
600 But record in LOOP->TOP the place where the end-test jumps
601 back to so we can scan that after the end of the loop. */
602 if (GET_CODE (p) == JUMP_INSN)
603 {
604 loop_entry_jump = p;
605
606 /* Loop entry must be unconditional jump (and not a RETURN) */
607 if (any_uncondjump_p (p)
608 && JUMP_LABEL (p) != 0
609 /* Check to see whether the jump actually
610 jumps out of the loop (meaning it's no loop).
611 This case can happen for things like
612 do {..} while (0). If this label was generated previously
613 by loop, we can't tell anything about it and have to reject
614 the loop. */
615 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
616 {
617 loop->top = next_label (loop->scan_start);
618 loop->scan_start = JUMP_LABEL (p);
619 }
620 }
621
622 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
623 as required by loop_reg_used_before_p. So skip such loops. (This
624 test may never be true, but it's best to play it safe.)
625
626 Also, skip loops where we do not start scanning at a label. This
627 test also rejects loops starting with a JUMP_INSN that failed the
628 test above. */
629
630 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
631 || GET_CODE (loop->scan_start) != CODE_LABEL)
632 {
633 if (loop_dump_stream)
634 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
635 INSN_UID (loop_start), INSN_UID (loop_end));
636 return;
637 }
638
639 /* Allocate extra space for REGs that might be created by load_mems.
640 We allocate a little extra slop as well, in the hopes that we
641 won't have to reallocate the regs array. */
642 loop_regs_scan (loop, loop_info->mems_idx + 16);
643 insn_count = count_insns_in_loop (loop);
644
645 if (loop_dump_stream)
646 {
647 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
648 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
649 if (loop->cont)
650 fprintf (loop_dump_stream, "Continue at insn %d.\n",
651 INSN_UID (loop->cont));
652 }
653
654 /* Scan through the loop finding insns that are safe to move.
655 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
656 this reg will be considered invariant for subsequent insns.
657 We consider whether subsequent insns use the reg
658 in deciding whether it is worth actually moving.
659
660 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
661 and therefore it is possible that the insns we are scanning
662 would never be executed. At such times, we must make sure
663 that it is safe to execute the insn once instead of zero times.
664 When MAYBE_NEVER is 0, all insns will be executed at least once
665 so that is not a problem. */
666
667 for (p = next_insn_in_loop (loop, loop->scan_start);
668 p != NULL_RTX;
669 p = next_insn_in_loop (loop, p))
670 {
671 if (GET_CODE (p) == INSN
672 && (set = single_set (p))
673 && GET_CODE (SET_DEST (set)) == REG
674 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
675 {
676 int tem1 = 0;
677 int tem2 = 0;
678 int move_insn = 0;
679 rtx src = SET_SRC (set);
680 rtx dependencies = 0;
681
682 /* Figure out what to use as a source of this insn. If a REG_EQUIV
683 note is given or if a REG_EQUAL note with a constant operand is
684 specified, use it as the source and mark that we should move
685 this insn by calling emit_move_insn rather that duplicating the
686 insn.
687
688 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
689 is present. */
690 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
691 if (temp)
692 src = XEXP (temp, 0), move_insn = 1;
693 else
694 {
695 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
696 if (temp && CONSTANT_P (XEXP (temp, 0)))
697 src = XEXP (temp, 0), move_insn = 1;
698 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
699 {
700 src = XEXP (temp, 0);
701 /* A libcall block can use regs that don't appear in
702 the equivalent expression. To move the libcall,
703 we must move those regs too. */
704 dependencies = libcall_other_reg (p, src);
705 }
706 }
707
708 /* For parallels, add any possible uses to the depencies, as we can't move
709 the insn without resolving them first. */
710 if (GET_CODE (PATTERN (p)) == PARALLEL)
711 {
712 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
713 {
714 rtx x = XVECEXP (PATTERN (p), 0, i);
715 if (GET_CODE (x) == USE)
716 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
717 }
718 }
719
720 /* Don't try to optimize a register that was made
721 by loop-optimization for an inner loop.
722 We don't know its life-span, so we can't compute the benefit. */
723 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
724 ;
725 else if (/* The register is used in basic blocks other
726 than the one where it is set (meaning that
727 something after this point in the loop might
728 depend on its value before the set). */
729 ! reg_in_basic_block_p (p, SET_DEST (set))
730 /* And the set is not guaranteed to be executed once
731 the loop starts, or the value before the set is
732 needed before the set occurs...
733
734 ??? Note we have quadratic behaviour here, mitigated
735 by the fact that the previous test will often fail for
736 large loops. Rather than re-scanning the entire loop
737 each time for register usage, we should build tables
738 of the register usage and use them here instead. */
739 && (maybe_never
740 || loop_reg_used_before_p (loop, set, p)))
741 /* It is unsafe to move the set.
742
743 This code used to consider it OK to move a set of a variable
744 which was not created by the user and not used in an exit test.
745 That behavior is incorrect and was removed. */
746 ;
747 else if ((tem = loop_invariant_p (loop, src))
748 && (dependencies == 0
749 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
750 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
751 || (tem1
752 = consec_sets_invariant_p
753 (loop, SET_DEST (set),
754 regs->array[REGNO (SET_DEST (set))].set_in_loop,
755 p)))
756 /* If the insn can cause a trap (such as divide by zero),
757 can't move it unless it's guaranteed to be executed
758 once loop is entered. Even a function call might
759 prevent the trap insn from being reached
760 (since it might exit!) */
761 && ! ((maybe_never || call_passed)
762 && may_trap_p (src)))
763 {
764 struct movable *m;
765 int regno = REGNO (SET_DEST (set));
766
767 /* A potential lossage is where we have a case where two insns
768 can be combined as long as they are both in the loop, but
769 we move one of them outside the loop. For large loops,
770 this can lose. The most common case of this is the address
771 of a function being called.
772
773 Therefore, if this register is marked as being used exactly
774 once if we are in a loop with calls (a "large loop"), see if
775 we can replace the usage of this register with the source
776 of this SET. If we can, delete this insn.
777
778 Don't do this if P has a REG_RETVAL note or if we have
779 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
780
781 if (loop_info->has_call
782 && regs->array[regno].single_usage != 0
783 && regs->array[regno].single_usage != const0_rtx
784 && REGNO_FIRST_UID (regno) == INSN_UID (p)
785 && (REGNO_LAST_UID (regno)
786 == INSN_UID (regs->array[regno].single_usage))
787 && regs->array[regno].set_in_loop == 1
788 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
789 && ! side_effects_p (SET_SRC (set))
790 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
791 && (! SMALL_REGISTER_CLASSES
792 || (! (GET_CODE (SET_SRC (set)) == REG
793 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
794 /* This test is not redundant; SET_SRC (set) might be
795 a call-clobbered register and the life of REGNO
796 might span a call. */
797 && ! modified_between_p (SET_SRC (set), p,
798 regs->array[regno].single_usage)
799 && no_labels_between_p (p, regs->array[regno].single_usage)
800 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
801 regs->array[regno].single_usage))
802 {
803 /* Replace any usage in a REG_EQUAL note. Must copy the
804 new source, so that we don't get rtx sharing between the
805 SET_SOURCE and REG_NOTES of insn p. */
806 REG_NOTES (regs->array[regno].single_usage)
807 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
808 SET_DEST (set), copy_rtx (SET_SRC (set)));
809
810 delete_insn (p);
811 regs->array[regno].set_in_loop = 0;
812 continue;
813 }
814
815 m = (struct movable *) xmalloc (sizeof (struct movable));
816 m->next = 0;
817 m->insn = p;
818 m->set_src = src;
819 m->dependencies = dependencies;
820 m->set_dest = SET_DEST (set);
821 m->force = 0;
822 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
823 m->done = 0;
824 m->forces = 0;
825 m->partial = 0;
826 m->move_insn = move_insn;
827 m->move_insn_first = 0;
828 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
829 m->savemode = VOIDmode;
830 m->regno = regno;
831 /* Set M->cond if either loop_invariant_p
832 or consec_sets_invariant_p returned 2
833 (only conditionally invariant). */
834 m->cond = ((tem | tem1 | tem2) > 1);
835 m->global = LOOP_REG_GLOBAL_P (loop, regno);
836 m->match = 0;
837 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
838 m->savings = regs->array[regno].n_times_set;
839 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
840 m->savings += libcall_benefit (p);
841 regs->array[regno].set_in_loop = move_insn ? -2 : -1;
842 /* Add M to the end of the chain MOVABLES. */
843 loop_movables_add (movables, m);
844
845 if (m->consec > 0)
846 {
847 /* It is possible for the first instruction to have a
848 REG_EQUAL note but a non-invariant SET_SRC, so we must
849 remember the status of the first instruction in case
850 the last instruction doesn't have a REG_EQUAL note. */
851 m->move_insn_first = m->move_insn;
852
853 /* Skip this insn, not checking REG_LIBCALL notes. */
854 p = next_nonnote_insn (p);
855 /* Skip the consecutive insns, if there are any. */
856 p = skip_consec_insns (p, m->consec);
857 /* Back up to the last insn of the consecutive group. */
858 p = prev_nonnote_insn (p);
859
860 /* We must now reset m->move_insn, m->is_equiv, and possibly
861 m->set_src to correspond to the effects of all the
862 insns. */
863 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
864 if (temp)
865 m->set_src = XEXP (temp, 0), m->move_insn = 1;
866 else
867 {
868 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
869 if (temp && CONSTANT_P (XEXP (temp, 0)))
870 m->set_src = XEXP (temp, 0), m->move_insn = 1;
871 else
872 m->move_insn = 0;
873
874 }
875 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
876 }
877 }
878 /* If this register is always set within a STRICT_LOW_PART
879 or set to zero, then its high bytes are constant.
880 So clear them outside the loop and within the loop
881 just load the low bytes.
882 We must check that the machine has an instruction to do so.
883 Also, if the value loaded into the register
884 depends on the same register, this cannot be done. */
885 else if (SET_SRC (set) == const0_rtx
886 && GET_CODE (NEXT_INSN (p)) == INSN
887 && (set1 = single_set (NEXT_INSN (p)))
888 && GET_CODE (set1) == SET
889 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
890 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
891 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
892 == SET_DEST (set))
893 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
894 {
895 int regno = REGNO (SET_DEST (set));
896 if (regs->array[regno].set_in_loop == 2)
897 {
898 struct movable *m;
899 m = (struct movable *) xmalloc (sizeof (struct movable));
900 m->next = 0;
901 m->insn = p;
902 m->set_dest = SET_DEST (set);
903 m->dependencies = 0;
904 m->force = 0;
905 m->consec = 0;
906 m->done = 0;
907 m->forces = 0;
908 m->move_insn = 0;
909 m->move_insn_first = 0;
910 m->partial = 1;
911 /* If the insn may not be executed on some cycles,
912 we can't clear the whole reg; clear just high part.
913 Not even if the reg is used only within this loop.
914 Consider this:
915 while (1)
916 while (s != t) {
917 if (foo ()) x = *s;
918 use (x);
919 }
920 Clearing x before the inner loop could clobber a value
921 being saved from the last time around the outer loop.
922 However, if the reg is not used outside this loop
923 and all uses of the register are in the same
924 basic block as the store, there is no problem.
925
926 If this insn was made by loop, we don't know its
927 INSN_LUID and hence must make a conservative
928 assumption. */
929 m->global = (INSN_UID (p) >= max_uid_for_loop
930 || LOOP_REG_GLOBAL_P (loop, regno)
931 || (labels_in_range_p
932 (p, REGNO_FIRST_LUID (regno))));
933 if (maybe_never && m->global)
934 m->savemode = GET_MODE (SET_SRC (set1));
935 else
936 m->savemode = VOIDmode;
937 m->regno = regno;
938 m->cond = 0;
939 m->match = 0;
940 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
941 m->savings = 1;
942 regs->array[regno].set_in_loop = -1;
943 /* Add M to the end of the chain MOVABLES. */
944 loop_movables_add (movables, m);
945 }
946 }
947 }
948 /* Past a call insn, we get to insns which might not be executed
949 because the call might exit. This matters for insns that trap.
950 Constant and pure call insns always return, so they don't count. */
951 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
952 call_passed = 1;
953 /* Past a label or a jump, we get to insns for which we
954 can't count on whether or how many times they will be
955 executed during each iteration. Therefore, we can
956 only move out sets of trivial variables
957 (those not used after the loop). */
958 /* Similar code appears twice in strength_reduce. */
959 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
960 /* If we enter the loop in the middle, and scan around to the
961 beginning, don't set maybe_never for that. This must be an
962 unconditional jump, otherwise the code at the top of the
963 loop might never be executed. Unconditional jumps are
964 followed by a barrier then the loop_end. */
965 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
966 && NEXT_INSN (NEXT_INSN (p)) == loop_end
967 && any_uncondjump_p (p)))
968 maybe_never = 1;
969 else if (GET_CODE (p) == NOTE)
970 {
971 /* At the virtual top of a converted loop, insns are again known to
972 be executed: logically, the loop begins here even though the exit
973 code has been duplicated. */
974 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
975 maybe_never = call_passed = 0;
976 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
977 loop_depth++;
978 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
979 loop_depth--;
980 }
981 }
982
983 /* If one movable subsumes another, ignore that other. */
984
985 ignore_some_movables (movables);
986
987 /* For each movable insn, see if the reg that it loads
988 leads when it dies right into another conditionally movable insn.
989 If so, record that the second insn "forces" the first one,
990 since the second can be moved only if the first is. */
991
992 force_movables (movables);
993
994 /* See if there are multiple movable insns that load the same value.
995 If there are, make all but the first point at the first one
996 through the `match' field, and add the priorities of them
997 all together as the priority of the first. */
998
999 combine_movables (movables, regs);
1000
1001 /* Now consider each movable insn to decide whether it is worth moving.
1002 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1003
1004 Generally this increases code size, so do not move moveables when
1005 optimizing for code size. */
1006
1007 if (! optimize_size)
1008 move_movables (loop, movables, threshold, insn_count);
1009
1010 /* Now candidates that still are negative are those not moved.
1011 Change regs->array[I].set_in_loop to indicate that those are not actually
1012 invariant. */
1013 for (i = 0; i < regs->num; i++)
1014 if (regs->array[i].set_in_loop < 0)
1015 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1016
1017 /* Now that we've moved some things out of the loop, we might be able to
1018 hoist even more memory references. */
1019 load_mems (loop);
1020
1021 /* Recalculate regs->array if load_mems has created new registers. */
1022 if (max_reg_num () > regs->num)
1023 loop_regs_scan (loop, 0);
1024
1025 for (update_start = loop_start;
1026 PREV_INSN (update_start)
1027 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1028 update_start = PREV_INSN (update_start))
1029 ;
1030 update_end = NEXT_INSN (loop_end);
1031
1032 reg_scan_update (update_start, update_end, loop_max_reg);
1033 loop_max_reg = max_reg_num ();
1034
1035 if (flag_strength_reduce)
1036 {
1037 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1038 /* Ensure our label doesn't go away. */
1039 LABEL_NUSES (update_end)++;
1040
1041 strength_reduce (loop, flags);
1042
1043 reg_scan_update (update_start, update_end, loop_max_reg);
1044 loop_max_reg = max_reg_num ();
1045
1046 if (update_end && GET_CODE (update_end) == CODE_LABEL
1047 && --LABEL_NUSES (update_end) == 0)
1048 delete_related_insns (update_end);
1049 }
1050
1051
1052 /* The movable information is required for strength reduction. */
1053 loop_movables_free (movables);
1054
1055 free (regs->array);
1056 regs->array = 0;
1057 regs->num = 0;
1058 }
1059 \f
1060 /* Add elements to *OUTPUT to record all the pseudo-regs
1061 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1062
1063 void
1064 record_excess_regs (in_this, not_in_this, output)
1065 rtx in_this, not_in_this;
1066 rtx *output;
1067 {
1068 enum rtx_code code;
1069 const char *fmt;
1070 int i;
1071
1072 code = GET_CODE (in_this);
1073
1074 switch (code)
1075 {
1076 case PC:
1077 case CC0:
1078 case CONST_INT:
1079 case CONST_DOUBLE:
1080 case CONST:
1081 case SYMBOL_REF:
1082 case LABEL_REF:
1083 return;
1084
1085 case REG:
1086 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1087 && ! reg_mentioned_p (in_this, not_in_this))
1088 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1089 return;
1090
1091 default:
1092 break;
1093 }
1094
1095 fmt = GET_RTX_FORMAT (code);
1096 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1097 {
1098 int j;
1099
1100 switch (fmt[i])
1101 {
1102 case 'E':
1103 for (j = 0; j < XVECLEN (in_this, i); j++)
1104 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1105 break;
1106
1107 case 'e':
1108 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1109 break;
1110 }
1111 }
1112 }
1113 \f
1114 /* Check what regs are referred to in the libcall block ending with INSN,
1115 aside from those mentioned in the equivalent value.
1116 If there are none, return 0.
1117 If there are one or more, return an EXPR_LIST containing all of them. */
1118
1119 rtx
1120 libcall_other_reg (insn, equiv)
1121 rtx insn, equiv;
1122 {
1123 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1124 rtx p = XEXP (note, 0);
1125 rtx output = 0;
1126
1127 /* First, find all the regs used in the libcall block
1128 that are not mentioned as inputs to the result. */
1129
1130 while (p != insn)
1131 {
1132 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1133 || GET_CODE (p) == CALL_INSN)
1134 record_excess_regs (PATTERN (p), equiv, &output);
1135 p = NEXT_INSN (p);
1136 }
1137
1138 return output;
1139 }
1140 \f
1141 /* Return 1 if all uses of REG
1142 are between INSN and the end of the basic block. */
1143
1144 static int
1145 reg_in_basic_block_p (insn, reg)
1146 rtx insn, reg;
1147 {
1148 int regno = REGNO (reg);
1149 rtx p;
1150
1151 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1152 return 0;
1153
1154 /* Search this basic block for the already recorded last use of the reg. */
1155 for (p = insn; p; p = NEXT_INSN (p))
1156 {
1157 switch (GET_CODE (p))
1158 {
1159 case NOTE:
1160 break;
1161
1162 case INSN:
1163 case CALL_INSN:
1164 /* Ordinary insn: if this is the last use, we win. */
1165 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1166 return 1;
1167 break;
1168
1169 case JUMP_INSN:
1170 /* Jump insn: if this is the last use, we win. */
1171 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1172 return 1;
1173 /* Otherwise, it's the end of the basic block, so we lose. */
1174 return 0;
1175
1176 case CODE_LABEL:
1177 case BARRIER:
1178 /* It's the end of the basic block, so we lose. */
1179 return 0;
1180
1181 default:
1182 break;
1183 }
1184 }
1185
1186 /* The "last use" that was recorded can't be found after the first
1187 use. This can happen when the last use was deleted while
1188 processing an inner loop, this inner loop was then completely
1189 unrolled, and the outer loop is always exited after the inner loop,
1190 so that everything after the first use becomes a single basic block. */
1191 return 1;
1192 }
1193 \f
1194 /* Compute the benefit of eliminating the insns in the block whose
1195 last insn is LAST. This may be a group of insns used to compute a
1196 value directly or can contain a library call. */
1197
1198 static int
1199 libcall_benefit (last)
1200 rtx last;
1201 {
1202 rtx insn;
1203 int benefit = 0;
1204
1205 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1206 insn != last; insn = NEXT_INSN (insn))
1207 {
1208 if (GET_CODE (insn) == CALL_INSN)
1209 benefit += 10; /* Assume at least this many insns in a library
1210 routine. */
1211 else if (GET_CODE (insn) == INSN
1212 && GET_CODE (PATTERN (insn)) != USE
1213 && GET_CODE (PATTERN (insn)) != CLOBBER)
1214 benefit++;
1215 }
1216
1217 return benefit;
1218 }
1219 \f
1220 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1221
1222 static rtx
1223 skip_consec_insns (insn, count)
1224 rtx insn;
1225 int count;
1226 {
1227 for (; count > 0; count--)
1228 {
1229 rtx temp;
1230
1231 /* If first insn of libcall sequence, skip to end. */
1232 /* Do this at start of loop, since INSN is guaranteed to
1233 be an insn here. */
1234 if (GET_CODE (insn) != NOTE
1235 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1236 insn = XEXP (temp, 0);
1237
1238 do
1239 insn = NEXT_INSN (insn);
1240 while (GET_CODE (insn) == NOTE);
1241 }
1242
1243 return insn;
1244 }
1245
1246 /* Ignore any movable whose insn falls within a libcall
1247 which is part of another movable.
1248 We make use of the fact that the movable for the libcall value
1249 was made later and so appears later on the chain. */
1250
1251 static void
1252 ignore_some_movables (movables)
1253 struct loop_movables *movables;
1254 {
1255 struct movable *m, *m1;
1256
1257 for (m = movables->head; m; m = m->next)
1258 {
1259 /* Is this a movable for the value of a libcall? */
1260 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1261 if (note)
1262 {
1263 rtx insn;
1264 /* Check for earlier movables inside that range,
1265 and mark them invalid. We cannot use LUIDs here because
1266 insns created by loop.c for prior loops don't have LUIDs.
1267 Rather than reject all such insns from movables, we just
1268 explicitly check each insn in the libcall (since invariant
1269 libcalls aren't that common). */
1270 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1271 for (m1 = movables->head; m1 != m; m1 = m1->next)
1272 if (m1->insn == insn)
1273 m1->done = 1;
1274 }
1275 }
1276 }
1277
1278 /* For each movable insn, see if the reg that it loads
1279 leads when it dies right into another conditionally movable insn.
1280 If so, record that the second insn "forces" the first one,
1281 since the second can be moved only if the first is. */
1282
1283 static void
1284 force_movables (movables)
1285 struct loop_movables *movables;
1286 {
1287 struct movable *m, *m1;
1288
1289 for (m1 = movables->head; m1; m1 = m1->next)
1290 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1291 if (!m1->partial && !m1->done)
1292 {
1293 int regno = m1->regno;
1294 for (m = m1->next; m; m = m->next)
1295 /* ??? Could this be a bug? What if CSE caused the
1296 register of M1 to be used after this insn?
1297 Since CSE does not update regno_last_uid,
1298 this insn M->insn might not be where it dies.
1299 But very likely this doesn't matter; what matters is
1300 that M's reg is computed from M1's reg. */
1301 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1302 && !m->done)
1303 break;
1304 if (m != 0 && m->set_src == m1->set_dest
1305 /* If m->consec, m->set_src isn't valid. */
1306 && m->consec == 0)
1307 m = 0;
1308
1309 /* Increase the priority of the moving the first insn
1310 since it permits the second to be moved as well. */
1311 if (m != 0)
1312 {
1313 m->forces = m1;
1314 m1->lifetime += m->lifetime;
1315 m1->savings += m->savings;
1316 }
1317 }
1318 }
1319 \f
1320 /* Find invariant expressions that are equal and can be combined into
1321 one register. */
1322
1323 static void
1324 combine_movables (movables, regs)
1325 struct loop_movables *movables;
1326 struct loop_regs *regs;
1327 {
1328 struct movable *m;
1329 char *matched_regs = (char *) xmalloc (regs->num);
1330 enum machine_mode mode;
1331
1332 /* Regs that are set more than once are not allowed to match
1333 or be matched. I'm no longer sure why not. */
1334 /* Perhaps testing m->consec_sets would be more appropriate here? */
1335
1336 for (m = movables->head; m; m = m->next)
1337 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1338 && !m->partial)
1339 {
1340 struct movable *m1;
1341 int regno = m->regno;
1342
1343 memset (matched_regs, 0, regs->num);
1344 matched_regs[regno] = 1;
1345
1346 /* We want later insns to match the first one. Don't make the first
1347 one match any later ones. So start this loop at m->next. */
1348 for (m1 = m->next; m1; m1 = m1->next)
1349 /* ??? HACK! move_movables does not verify that the replacement
1350 is valid, which can have disasterous effects with hard regs
1351 and match_dup. Turn combination off for now. */
1352 if (0 && m != m1 && m1->match == 0
1353 && regs->array[m1->regno].n_times_set == 1
1354 /* A reg used outside the loop mustn't be eliminated. */
1355 && !m1->global
1356 /* A reg used for zero-extending mustn't be eliminated. */
1357 && !m1->partial
1358 && (matched_regs[m1->regno]
1359 ||
1360 (
1361 /* Can combine regs with different modes loaded from the
1362 same constant only if the modes are the same or
1363 if both are integer modes with M wider or the same
1364 width as M1. The check for integer is redundant, but
1365 safe, since the only case of differing destination
1366 modes with equal sources is when both sources are
1367 VOIDmode, i.e., CONST_INT. */
1368 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1369 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1370 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1371 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1372 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1373 /* See if the source of M1 says it matches M. */
1374 && ((GET_CODE (m1->set_src) == REG
1375 && matched_regs[REGNO (m1->set_src)])
1376 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1377 movables, regs))))
1378 && ((m->dependencies == m1->dependencies)
1379 || rtx_equal_p (m->dependencies, m1->dependencies)))
1380 {
1381 m->lifetime += m1->lifetime;
1382 m->savings += m1->savings;
1383 m1->done = 1;
1384 m1->match = m;
1385 matched_regs[m1->regno] = 1;
1386 }
1387 }
1388
1389 /* Now combine the regs used for zero-extension.
1390 This can be done for those not marked `global'
1391 provided their lives don't overlap. */
1392
1393 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1394 mode = GET_MODE_WIDER_MODE (mode))
1395 {
1396 struct movable *m0 = 0;
1397
1398 /* Combine all the registers for extension from mode MODE.
1399 Don't combine any that are used outside this loop. */
1400 for (m = movables->head; m; m = m->next)
1401 if (m->partial && ! m->global
1402 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1403 {
1404 struct movable *m1;
1405
1406 int first = REGNO_FIRST_LUID (m->regno);
1407 int last = REGNO_LAST_LUID (m->regno);
1408
1409 if (m0 == 0)
1410 {
1411 /* First one: don't check for overlap, just record it. */
1412 m0 = m;
1413 continue;
1414 }
1415
1416 /* Make sure they extend to the same mode.
1417 (Almost always true.) */
1418 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1419 continue;
1420
1421 /* We already have one: check for overlap with those
1422 already combined together. */
1423 for (m1 = movables->head; m1 != m; m1 = m1->next)
1424 if (m1 == m0 || (m1->partial && m1->match == m0))
1425 if (! (REGNO_FIRST_LUID (m1->regno) > last
1426 || REGNO_LAST_LUID (m1->regno) < first))
1427 goto overlap;
1428
1429 /* No overlap: we can combine this with the others. */
1430 m0->lifetime += m->lifetime;
1431 m0->savings += m->savings;
1432 m->done = 1;
1433 m->match = m0;
1434
1435 overlap:
1436 ;
1437 }
1438 }
1439
1440 /* Clean up. */
1441 free (matched_regs);
1442 }
1443
1444 /* Returns the number of movable instructions in LOOP that were not
1445 moved outside the loop. */
1446
1447 static int
1448 num_unmoved_movables (loop)
1449 const struct loop *loop;
1450 {
1451 int num = 0;
1452 struct movable *m;
1453
1454 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1455 if (!m->done)
1456 ++num;
1457
1458 return num;
1459 }
1460
1461 \f
1462 /* Return 1 if regs X and Y will become the same if moved. */
1463
1464 static int
1465 regs_match_p (x, y, movables)
1466 rtx x, y;
1467 struct loop_movables *movables;
1468 {
1469 unsigned int xn = REGNO (x);
1470 unsigned int yn = REGNO (y);
1471 struct movable *mx, *my;
1472
1473 for (mx = movables->head; mx; mx = mx->next)
1474 if (mx->regno == xn)
1475 break;
1476
1477 for (my = movables->head; my; my = my->next)
1478 if (my->regno == yn)
1479 break;
1480
1481 return (mx && my
1482 && ((mx->match == my->match && mx->match != 0)
1483 || mx->match == my
1484 || mx == my->match));
1485 }
1486
1487 /* Return 1 if X and Y are identical-looking rtx's.
1488 This is the Lisp function EQUAL for rtx arguments.
1489
1490 If two registers are matching movables or a movable register and an
1491 equivalent constant, consider them equal. */
1492
1493 static int
1494 rtx_equal_for_loop_p (x, y, movables, regs)
1495 rtx x, y;
1496 struct loop_movables *movables;
1497 struct loop_regs *regs;
1498 {
1499 int i;
1500 int j;
1501 struct movable *m;
1502 enum rtx_code code;
1503 const char *fmt;
1504
1505 if (x == y)
1506 return 1;
1507 if (x == 0 || y == 0)
1508 return 0;
1509
1510 code = GET_CODE (x);
1511
1512 /* If we have a register and a constant, they may sometimes be
1513 equal. */
1514 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1515 && CONSTANT_P (y))
1516 {
1517 for (m = movables->head; m; m = m->next)
1518 if (m->move_insn && m->regno == REGNO (x)
1519 && rtx_equal_p (m->set_src, y))
1520 return 1;
1521 }
1522 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1523 && CONSTANT_P (x))
1524 {
1525 for (m = movables->head; m; m = m->next)
1526 if (m->move_insn && m->regno == REGNO (y)
1527 && rtx_equal_p (m->set_src, x))
1528 return 1;
1529 }
1530
1531 /* Otherwise, rtx's of different codes cannot be equal. */
1532 if (code != GET_CODE (y))
1533 return 0;
1534
1535 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1536 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1537
1538 if (GET_MODE (x) != GET_MODE (y))
1539 return 0;
1540
1541 /* These three types of rtx's can be compared nonrecursively. */
1542 if (code == REG)
1543 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1544
1545 if (code == LABEL_REF)
1546 return XEXP (x, 0) == XEXP (y, 0);
1547 if (code == SYMBOL_REF)
1548 return XSTR (x, 0) == XSTR (y, 0);
1549
1550 /* Compare the elements. If any pair of corresponding elements
1551 fail to match, return 0 for the whole things. */
1552
1553 fmt = GET_RTX_FORMAT (code);
1554 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1555 {
1556 switch (fmt[i])
1557 {
1558 case 'w':
1559 if (XWINT (x, i) != XWINT (y, i))
1560 return 0;
1561 break;
1562
1563 case 'i':
1564 if (XINT (x, i) != XINT (y, i))
1565 return 0;
1566 break;
1567
1568 case 'E':
1569 /* Two vectors must have the same length. */
1570 if (XVECLEN (x, i) != XVECLEN (y, i))
1571 return 0;
1572
1573 /* And the corresponding elements must match. */
1574 for (j = 0; j < XVECLEN (x, i); j++)
1575 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1576 movables, regs) == 0)
1577 return 0;
1578 break;
1579
1580 case 'e':
1581 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1582 == 0)
1583 return 0;
1584 break;
1585
1586 case 's':
1587 if (strcmp (XSTR (x, i), XSTR (y, i)))
1588 return 0;
1589 break;
1590
1591 case 'u':
1592 /* These are just backpointers, so they don't matter. */
1593 break;
1594
1595 case '0':
1596 break;
1597
1598 /* It is believed that rtx's at this level will never
1599 contain anything but integers and other rtx's,
1600 except for within LABEL_REFs and SYMBOL_REFs. */
1601 default:
1602 abort ();
1603 }
1604 }
1605 return 1;
1606 }
1607 \f
1608 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1609 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1610 references is incremented once for each added note. */
1611
1612 static void
1613 add_label_notes (x, insns)
1614 rtx x;
1615 rtx insns;
1616 {
1617 enum rtx_code code = GET_CODE (x);
1618 int i, j;
1619 const char *fmt;
1620 rtx insn;
1621
1622 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1623 {
1624 /* This code used to ignore labels that referred to dispatch tables to
1625 avoid flow generating (slighly) worse code.
1626
1627 We no longer ignore such label references (see LABEL_REF handling in
1628 mark_jump_label for additional information). */
1629 for (insn = insns; insn; insn = NEXT_INSN (insn))
1630 if (reg_mentioned_p (XEXP (x, 0), insn))
1631 {
1632 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1633 REG_NOTES (insn));
1634 if (LABEL_P (XEXP (x, 0)))
1635 LABEL_NUSES (XEXP (x, 0))++;
1636 }
1637 }
1638
1639 fmt = GET_RTX_FORMAT (code);
1640 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1641 {
1642 if (fmt[i] == 'e')
1643 add_label_notes (XEXP (x, i), insns);
1644 else if (fmt[i] == 'E')
1645 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1646 add_label_notes (XVECEXP (x, i, j), insns);
1647 }
1648 }
1649 \f
1650 /* Scan MOVABLES, and move the insns that deserve to be moved.
1651 If two matching movables are combined, replace one reg with the
1652 other throughout. */
1653
1654 static void
1655 move_movables (loop, movables, threshold, insn_count)
1656 struct loop *loop;
1657 struct loop_movables *movables;
1658 int threshold;
1659 int insn_count;
1660 {
1661 struct loop_regs *regs = LOOP_REGS (loop);
1662 int nregs = regs->num;
1663 rtx new_start = 0;
1664 struct movable *m;
1665 rtx p;
1666 rtx loop_start = loop->start;
1667 rtx loop_end = loop->end;
1668 /* Map of pseudo-register replacements to handle combining
1669 when we move several insns that load the same value
1670 into different pseudo-registers. */
1671 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1672 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1673
1674 for (m = movables->head; m; m = m->next)
1675 {
1676 /* Describe this movable insn. */
1677
1678 if (loop_dump_stream)
1679 {
1680 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1681 INSN_UID (m->insn), m->regno, m->lifetime);
1682 if (m->consec > 0)
1683 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1684 if (m->cond)
1685 fprintf (loop_dump_stream, "cond ");
1686 if (m->force)
1687 fprintf (loop_dump_stream, "force ");
1688 if (m->global)
1689 fprintf (loop_dump_stream, "global ");
1690 if (m->done)
1691 fprintf (loop_dump_stream, "done ");
1692 if (m->move_insn)
1693 fprintf (loop_dump_stream, "move-insn ");
1694 if (m->match)
1695 fprintf (loop_dump_stream, "matches %d ",
1696 INSN_UID (m->match->insn));
1697 if (m->forces)
1698 fprintf (loop_dump_stream, "forces %d ",
1699 INSN_UID (m->forces->insn));
1700 }
1701
1702 /* Ignore the insn if it's already done (it matched something else).
1703 Otherwise, see if it is now safe to move. */
1704
1705 if (!m->done
1706 && (! m->cond
1707 || (1 == loop_invariant_p (loop, m->set_src)
1708 && (m->dependencies == 0
1709 || 1 == loop_invariant_p (loop, m->dependencies))
1710 && (m->consec == 0
1711 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1712 m->consec + 1,
1713 m->insn))))
1714 && (! m->forces || m->forces->done))
1715 {
1716 int regno;
1717 rtx p;
1718 int savings = m->savings;
1719
1720 /* We have an insn that is safe to move.
1721 Compute its desirability. */
1722
1723 p = m->insn;
1724 regno = m->regno;
1725
1726 if (loop_dump_stream)
1727 fprintf (loop_dump_stream, "savings %d ", savings);
1728
1729 if (regs->array[regno].moved_once && loop_dump_stream)
1730 fprintf (loop_dump_stream, "halved since already moved ");
1731
1732 /* An insn MUST be moved if we already moved something else
1733 which is safe only if this one is moved too: that is,
1734 if already_moved[REGNO] is nonzero. */
1735
1736 /* An insn is desirable to move if the new lifetime of the
1737 register is no more than THRESHOLD times the old lifetime.
1738 If it's not desirable, it means the loop is so big
1739 that moving won't speed things up much,
1740 and it is liable to make register usage worse. */
1741
1742 /* It is also desirable to move if it can be moved at no
1743 extra cost because something else was already moved. */
1744
1745 if (already_moved[regno]
1746 || flag_move_all_movables
1747 || (threshold * savings * m->lifetime) >=
1748 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1749 || (m->forces && m->forces->done
1750 && regs->array[m->forces->regno].n_times_set == 1))
1751 {
1752 int count;
1753 struct movable *m1;
1754 rtx first = NULL_RTX;
1755
1756 /* Now move the insns that set the reg. */
1757
1758 if (m->partial && m->match)
1759 {
1760 rtx newpat, i1;
1761 rtx r1, r2;
1762 /* Find the end of this chain of matching regs.
1763 Thus, we load each reg in the chain from that one reg.
1764 And that reg is loaded with 0 directly,
1765 since it has ->match == 0. */
1766 for (m1 = m; m1->match; m1 = m1->match);
1767 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1768 SET_DEST (PATTERN (m1->insn)));
1769 i1 = loop_insn_hoist (loop, newpat);
1770
1771 /* Mark the moved, invariant reg as being allowed to
1772 share a hard reg with the other matching invariant. */
1773 REG_NOTES (i1) = REG_NOTES (m->insn);
1774 r1 = SET_DEST (PATTERN (m->insn));
1775 r2 = SET_DEST (PATTERN (m1->insn));
1776 regs_may_share
1777 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1778 gen_rtx_EXPR_LIST (VOIDmode, r2,
1779 regs_may_share));
1780 delete_insn (m->insn);
1781
1782 if (new_start == 0)
1783 new_start = i1;
1784
1785 if (loop_dump_stream)
1786 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1787 }
1788 /* If we are to re-generate the item being moved with a
1789 new move insn, first delete what we have and then emit
1790 the move insn before the loop. */
1791 else if (m->move_insn)
1792 {
1793 rtx i1, temp, seq;
1794
1795 for (count = m->consec; count >= 0; count--)
1796 {
1797 /* If this is the first insn of a library call sequence,
1798 skip to the end. */
1799 if (GET_CODE (p) != NOTE
1800 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1801 p = XEXP (temp, 0);
1802
1803 /* If this is the last insn of a libcall sequence, then
1804 delete every insn in the sequence except the last.
1805 The last insn is handled in the normal manner. */
1806 if (GET_CODE (p) != NOTE
1807 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1808 {
1809 temp = XEXP (temp, 0);
1810 while (temp != p)
1811 temp = delete_insn (temp);
1812 }
1813
1814 temp = p;
1815 p = delete_insn (p);
1816
1817 /* simplify_giv_expr expects that it can walk the insns
1818 at m->insn forwards and see this old sequence we are
1819 tossing here. delete_insn does preserve the next
1820 pointers, but when we skip over a NOTE we must fix
1821 it up. Otherwise that code walks into the non-deleted
1822 insn stream. */
1823 while (p && GET_CODE (p) == NOTE)
1824 p = NEXT_INSN (temp) = NEXT_INSN (p);
1825 }
1826
1827 start_sequence ();
1828 emit_move_insn (m->set_dest, m->set_src);
1829 temp = get_insns ();
1830 seq = gen_sequence ();
1831 end_sequence ();
1832
1833 add_label_notes (m->set_src, temp);
1834
1835 i1 = loop_insn_hoist (loop, seq);
1836 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1837 set_unique_reg_note (i1,
1838 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1839 m->set_src);
1840
1841 if (loop_dump_stream)
1842 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1843
1844 /* The more regs we move, the less we like moving them. */
1845 threshold -= 3;
1846 }
1847 else
1848 {
1849 for (count = m->consec; count >= 0; count--)
1850 {
1851 rtx i1, temp;
1852
1853 /* If first insn of libcall sequence, skip to end. */
1854 /* Do this at start of loop, since p is guaranteed to
1855 be an insn here. */
1856 if (GET_CODE (p) != NOTE
1857 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1858 p = XEXP (temp, 0);
1859
1860 /* If last insn of libcall sequence, move all
1861 insns except the last before the loop. The last
1862 insn is handled in the normal manner. */
1863 if (GET_CODE (p) != NOTE
1864 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1865 {
1866 rtx fn_address = 0;
1867 rtx fn_reg = 0;
1868 rtx fn_address_insn = 0;
1869
1870 first = 0;
1871 for (temp = XEXP (temp, 0); temp != p;
1872 temp = NEXT_INSN (temp))
1873 {
1874 rtx body;
1875 rtx n;
1876 rtx next;
1877
1878 if (GET_CODE (temp) == NOTE)
1879 continue;
1880
1881 body = PATTERN (temp);
1882
1883 /* Find the next insn after TEMP,
1884 not counting USE or NOTE insns. */
1885 for (next = NEXT_INSN (temp); next != p;
1886 next = NEXT_INSN (next))
1887 if (! (GET_CODE (next) == INSN
1888 && GET_CODE (PATTERN (next)) == USE)
1889 && GET_CODE (next) != NOTE)
1890 break;
1891
1892 /* If that is the call, this may be the insn
1893 that loads the function address.
1894
1895 Extract the function address from the insn
1896 that loads it into a register.
1897 If this insn was cse'd, we get incorrect code.
1898
1899 So emit a new move insn that copies the
1900 function address into the register that the
1901 call insn will use. flow.c will delete any
1902 redundant stores that we have created. */
1903 if (GET_CODE (next) == CALL_INSN
1904 && GET_CODE (body) == SET
1905 && GET_CODE (SET_DEST (body)) == REG
1906 && (n = find_reg_note (temp, REG_EQUAL,
1907 NULL_RTX)))
1908 {
1909 fn_reg = SET_SRC (body);
1910 if (GET_CODE (fn_reg) != REG)
1911 fn_reg = SET_DEST (body);
1912 fn_address = XEXP (n, 0);
1913 fn_address_insn = temp;
1914 }
1915 /* We have the call insn.
1916 If it uses the register we suspect it might,
1917 load it with the correct address directly. */
1918 if (GET_CODE (temp) == CALL_INSN
1919 && fn_address != 0
1920 && reg_referenced_p (fn_reg, body))
1921 loop_insn_emit_after (loop, 0, fn_address_insn,
1922 gen_move_insn
1923 (fn_reg, fn_address));
1924
1925 if (GET_CODE (temp) == CALL_INSN)
1926 {
1927 i1 = loop_call_insn_hoist (loop, body);
1928 /* Because the USAGE information potentially
1929 contains objects other than hard registers
1930 we need to copy it. */
1931 if (CALL_INSN_FUNCTION_USAGE (temp))
1932 CALL_INSN_FUNCTION_USAGE (i1)
1933 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1934 }
1935 else
1936 i1 = loop_insn_hoist (loop, body);
1937 if (first == 0)
1938 first = i1;
1939 if (temp == fn_address_insn)
1940 fn_address_insn = i1;
1941 REG_NOTES (i1) = REG_NOTES (temp);
1942 REG_NOTES (temp) = NULL;
1943 delete_insn (temp);
1944 }
1945 if (new_start == 0)
1946 new_start = first;
1947 }
1948 if (m->savemode != VOIDmode)
1949 {
1950 /* P sets REG to zero; but we should clear only
1951 the bits that are not covered by the mode
1952 m->savemode. */
1953 rtx reg = m->set_dest;
1954 rtx sequence;
1955 rtx tem;
1956
1957 start_sequence ();
1958 tem = expand_simple_binop
1959 (GET_MODE (reg), AND, reg,
1960 GEN_INT ((((HOST_WIDE_INT) 1
1961 << GET_MODE_BITSIZE (m->savemode)))
1962 - 1),
1963 reg, 1, OPTAB_LIB_WIDEN);
1964 if (tem == 0)
1965 abort ();
1966 if (tem != reg)
1967 emit_move_insn (reg, tem);
1968 sequence = gen_sequence ();
1969 end_sequence ();
1970 i1 = loop_insn_hoist (loop, sequence);
1971 }
1972 else if (GET_CODE (p) == CALL_INSN)
1973 {
1974 i1 = loop_call_insn_hoist (loop, PATTERN (p));
1975 /* Because the USAGE information potentially
1976 contains objects other than hard registers
1977 we need to copy it. */
1978 if (CALL_INSN_FUNCTION_USAGE (p))
1979 CALL_INSN_FUNCTION_USAGE (i1)
1980 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1981 }
1982 else if (count == m->consec && m->move_insn_first)
1983 {
1984 rtx seq;
1985 /* The SET_SRC might not be invariant, so we must
1986 use the REG_EQUAL note. */
1987 start_sequence ();
1988 emit_move_insn (m->set_dest, m->set_src);
1989 temp = get_insns ();
1990 seq = gen_sequence ();
1991 end_sequence ();
1992
1993 add_label_notes (m->set_src, temp);
1994
1995 i1 = loop_insn_hoist (loop, seq);
1996 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1997 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
1998 : REG_EQUAL, m->set_src);
1999 }
2000 else
2001 i1 = loop_insn_hoist (loop, PATTERN (p));
2002
2003 if (REG_NOTES (i1) == 0)
2004 {
2005 REG_NOTES (i1) = REG_NOTES (p);
2006 REG_NOTES (p) = NULL;
2007
2008 /* If there is a REG_EQUAL note present whose value
2009 is not loop invariant, then delete it, since it
2010 may cause problems with later optimization passes.
2011 It is possible for cse to create such notes
2012 like this as a result of record_jump_cond. */
2013
2014 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2015 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2016 remove_note (i1, temp);
2017 }
2018
2019 if (new_start == 0)
2020 new_start = i1;
2021
2022 if (loop_dump_stream)
2023 fprintf (loop_dump_stream, " moved to %d",
2024 INSN_UID (i1));
2025
2026 /* If library call, now fix the REG_NOTES that contain
2027 insn pointers, namely REG_LIBCALL on FIRST
2028 and REG_RETVAL on I1. */
2029 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2030 {
2031 XEXP (temp, 0) = first;
2032 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2033 XEXP (temp, 0) = i1;
2034 }
2035
2036 temp = p;
2037 delete_insn (p);
2038 p = NEXT_INSN (p);
2039
2040 /* simplify_giv_expr expects that it can walk the insns
2041 at m->insn forwards and see this old sequence we are
2042 tossing here. delete_insn does preserve the next
2043 pointers, but when we skip over a NOTE we must fix
2044 it up. Otherwise that code walks into the non-deleted
2045 insn stream. */
2046 while (p && GET_CODE (p) == NOTE)
2047 p = NEXT_INSN (temp) = NEXT_INSN (p);
2048 }
2049
2050 /* The more regs we move, the less we like moving them. */
2051 threshold -= 3;
2052 }
2053
2054 /* Any other movable that loads the same register
2055 MUST be moved. */
2056 already_moved[regno] = 1;
2057
2058 /* This reg has been moved out of one loop. */
2059 regs->array[regno].moved_once = 1;
2060
2061 /* The reg set here is now invariant. */
2062 if (! m->partial)
2063 regs->array[regno].set_in_loop = 0;
2064
2065 m->done = 1;
2066
2067 /* Change the length-of-life info for the register
2068 to say it lives at least the full length of this loop.
2069 This will help guide optimizations in outer loops. */
2070
2071 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2072 /* This is the old insn before all the moved insns.
2073 We can't use the moved insn because it is out of range
2074 in uid_luid. Only the old insns have luids. */
2075 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2076 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2077 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2078
2079 /* Combine with this moved insn any other matching movables. */
2080
2081 if (! m->partial)
2082 for (m1 = movables->head; m1; m1 = m1->next)
2083 if (m1->match == m)
2084 {
2085 rtx temp;
2086
2087 /* Schedule the reg loaded by M1
2088 for replacement so that shares the reg of M.
2089 If the modes differ (only possible in restricted
2090 circumstances, make a SUBREG.
2091
2092 Note this assumes that the target dependent files
2093 treat REG and SUBREG equally, including within
2094 GO_IF_LEGITIMATE_ADDRESS and in all the
2095 predicates since we never verify that replacing the
2096 original register with a SUBREG results in a
2097 recognizable insn. */
2098 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2099 reg_map[m1->regno] = m->set_dest;
2100 else
2101 reg_map[m1->regno]
2102 = gen_lowpart_common (GET_MODE (m1->set_dest),
2103 m->set_dest);
2104
2105 /* Get rid of the matching insn
2106 and prevent further processing of it. */
2107 m1->done = 1;
2108
2109 /* if library call, delete all insns. */
2110 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2111 NULL_RTX)))
2112 delete_insn_chain (XEXP (temp, 0), m1->insn);
2113 else
2114 delete_insn (m1->insn);
2115
2116 /* Any other movable that loads the same register
2117 MUST be moved. */
2118 already_moved[m1->regno] = 1;
2119
2120 /* The reg merged here is now invariant,
2121 if the reg it matches is invariant. */
2122 if (! m->partial)
2123 regs->array[m1->regno].set_in_loop = 0;
2124 }
2125 }
2126 else if (loop_dump_stream)
2127 fprintf (loop_dump_stream, "not desirable");
2128 }
2129 else if (loop_dump_stream && !m->match)
2130 fprintf (loop_dump_stream, "not safe");
2131
2132 if (loop_dump_stream)
2133 fprintf (loop_dump_stream, "\n");
2134 }
2135
2136 if (new_start == 0)
2137 new_start = loop_start;
2138
2139 /* Go through all the instructions in the loop, making
2140 all the register substitutions scheduled in REG_MAP. */
2141 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2142 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2143 || GET_CODE (p) == CALL_INSN)
2144 {
2145 replace_regs (PATTERN (p), reg_map, nregs, 0);
2146 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2147 INSN_CODE (p) = -1;
2148 }
2149
2150 /* Clean up. */
2151 free (reg_map);
2152 free (already_moved);
2153 }
2154
2155
2156 static void
2157 loop_movables_add (movables, m)
2158 struct loop_movables *movables;
2159 struct movable *m;
2160 {
2161 if (movables->head == 0)
2162 movables->head = m;
2163 else
2164 movables->last->next = m;
2165 movables->last = m;
2166 }
2167
2168
2169 static void
2170 loop_movables_free (movables)
2171 struct loop_movables *movables;
2172 {
2173 struct movable *m;
2174 struct movable *m_next;
2175
2176 for (m = movables->head; m; m = m_next)
2177 {
2178 m_next = m->next;
2179 free (m);
2180 }
2181 }
2182 \f
2183 #if 0
2184 /* Scan X and replace the address of any MEM in it with ADDR.
2185 REG is the address that MEM should have before the replacement. */
2186
2187 static void
2188 replace_call_address (x, reg, addr)
2189 rtx x, reg, addr;
2190 {
2191 enum rtx_code code;
2192 int i;
2193 const char *fmt;
2194
2195 if (x == 0)
2196 return;
2197 code = GET_CODE (x);
2198 switch (code)
2199 {
2200 case PC:
2201 case CC0:
2202 case CONST_INT:
2203 case CONST_DOUBLE:
2204 case CONST:
2205 case SYMBOL_REF:
2206 case LABEL_REF:
2207 case REG:
2208 return;
2209
2210 case SET:
2211 /* Short cut for very common case. */
2212 replace_call_address (XEXP (x, 1), reg, addr);
2213 return;
2214
2215 case CALL:
2216 /* Short cut for very common case. */
2217 replace_call_address (XEXP (x, 0), reg, addr);
2218 return;
2219
2220 case MEM:
2221 /* If this MEM uses a reg other than the one we expected,
2222 something is wrong. */
2223 if (XEXP (x, 0) != reg)
2224 abort ();
2225 XEXP (x, 0) = addr;
2226 return;
2227
2228 default:
2229 break;
2230 }
2231
2232 fmt = GET_RTX_FORMAT (code);
2233 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2234 {
2235 if (fmt[i] == 'e')
2236 replace_call_address (XEXP (x, i), reg, addr);
2237 else if (fmt[i] == 'E')
2238 {
2239 int j;
2240 for (j = 0; j < XVECLEN (x, i); j++)
2241 replace_call_address (XVECEXP (x, i, j), reg, addr);
2242 }
2243 }
2244 }
2245 #endif
2246 \f
2247 /* Return the number of memory refs to addresses that vary
2248 in the rtx X. */
2249
2250 static int
2251 count_nonfixed_reads (loop, x)
2252 const struct loop *loop;
2253 rtx x;
2254 {
2255 enum rtx_code code;
2256 int i;
2257 const char *fmt;
2258 int value;
2259
2260 if (x == 0)
2261 return 0;
2262
2263 code = GET_CODE (x);
2264 switch (code)
2265 {
2266 case PC:
2267 case CC0:
2268 case CONST_INT:
2269 case CONST_DOUBLE:
2270 case CONST:
2271 case SYMBOL_REF:
2272 case LABEL_REF:
2273 case REG:
2274 return 0;
2275
2276 case MEM:
2277 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2278 + count_nonfixed_reads (loop, XEXP (x, 0)));
2279
2280 default:
2281 break;
2282 }
2283
2284 value = 0;
2285 fmt = GET_RTX_FORMAT (code);
2286 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2287 {
2288 if (fmt[i] == 'e')
2289 value += count_nonfixed_reads (loop, XEXP (x, i));
2290 if (fmt[i] == 'E')
2291 {
2292 int j;
2293 for (j = 0; j < XVECLEN (x, i); j++)
2294 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2295 }
2296 }
2297 return value;
2298 }
2299 \f
2300 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2301 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2302 `unknown_address_altered', `unknown_constant_address_altered', and
2303 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2304 list `store_mems' in LOOP. */
2305
2306 static void
2307 prescan_loop (loop)
2308 struct loop *loop;
2309 {
2310 int level = 1;
2311 rtx insn;
2312 struct loop_info *loop_info = LOOP_INFO (loop);
2313 rtx start = loop->start;
2314 rtx end = loop->end;
2315 /* The label after END. Jumping here is just like falling off the
2316 end of the loop. We use next_nonnote_insn instead of next_label
2317 as a hedge against the (pathological) case where some actual insn
2318 might end up between the two. */
2319 rtx exit_target = next_nonnote_insn (end);
2320
2321 loop_info->has_indirect_jump = indirect_jump_in_function;
2322 loop_info->pre_header_has_call = 0;
2323 loop_info->has_call = 0;
2324 loop_info->has_nonconst_call = 0;
2325 loop_info->has_volatile = 0;
2326 loop_info->has_tablejump = 0;
2327 loop_info->has_multiple_exit_targets = 0;
2328 loop->level = 1;
2329
2330 loop_info->unknown_address_altered = 0;
2331 loop_info->unknown_constant_address_altered = 0;
2332 loop_info->store_mems = NULL_RTX;
2333 loop_info->first_loop_store_insn = NULL_RTX;
2334 loop_info->mems_idx = 0;
2335 loop_info->num_mem_sets = 0;
2336
2337
2338 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2339 insn = PREV_INSN (insn))
2340 {
2341 if (GET_CODE (insn) == CALL_INSN)
2342 {
2343 loop_info->pre_header_has_call = 1;
2344 break;
2345 }
2346 }
2347
2348 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2349 insn = NEXT_INSN (insn))
2350 {
2351 if (GET_CODE (insn) == NOTE)
2352 {
2353 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2354 {
2355 ++level;
2356 /* Count number of loops contained in this one. */
2357 loop->level++;
2358 }
2359 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2360 {
2361 --level;
2362 }
2363 }
2364 else if (GET_CODE (insn) == CALL_INSN)
2365 {
2366 if (! CONST_OR_PURE_CALL_P (insn))
2367 {
2368 loop_info->unknown_address_altered = 1;
2369 loop_info->has_nonconst_call = 1;
2370 }
2371 loop_info->has_call = 1;
2372 }
2373 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2374 {
2375 rtx label1 = NULL_RTX;
2376 rtx label2 = NULL_RTX;
2377
2378 if (volatile_refs_p (PATTERN (insn)))
2379 loop_info->has_volatile = 1;
2380
2381 if (GET_CODE (insn) == JUMP_INSN
2382 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2383 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2384 loop_info->has_tablejump = 1;
2385
2386 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2387 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2388 loop_info->first_loop_store_insn = insn;
2389
2390 if (! loop_info->has_multiple_exit_targets
2391 && GET_CODE (insn) == JUMP_INSN
2392 && GET_CODE (PATTERN (insn)) == SET
2393 && SET_DEST (PATTERN (insn)) == pc_rtx)
2394 {
2395 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2396 {
2397 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2398 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2399 }
2400 else
2401 {
2402 label1 = SET_SRC (PATTERN (insn));
2403 }
2404
2405 do
2406 {
2407 if (label1 && label1 != pc_rtx)
2408 {
2409 if (GET_CODE (label1) != LABEL_REF)
2410 {
2411 /* Something tricky. */
2412 loop_info->has_multiple_exit_targets = 1;
2413 break;
2414 }
2415 else if (XEXP (label1, 0) != exit_target
2416 && LABEL_OUTSIDE_LOOP_P (label1))
2417 {
2418 /* A jump outside the current loop. */
2419 loop_info->has_multiple_exit_targets = 1;
2420 break;
2421 }
2422 }
2423
2424 label1 = label2;
2425 label2 = NULL_RTX;
2426 }
2427 while (label1);
2428 }
2429 }
2430 else if (GET_CODE (insn) == RETURN)
2431 loop_info->has_multiple_exit_targets = 1;
2432 }
2433
2434 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2435 if (/* An exception thrown by a called function might land us
2436 anywhere. */
2437 ! loop_info->has_nonconst_call
2438 /* We don't want loads for MEMs moved to a location before the
2439 one at which their stack memory becomes allocated. (Note
2440 that this is not a problem for malloc, etc., since those
2441 require actual function calls. */
2442 && ! current_function_calls_alloca
2443 /* There are ways to leave the loop other than falling off the
2444 end. */
2445 && ! loop_info->has_multiple_exit_targets)
2446 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2447 insn = NEXT_INSN (insn))
2448 for_each_rtx (&insn, insert_loop_mem, loop_info);
2449
2450 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2451 that loop_invariant_p and load_mems can use true_dependence
2452 to determine what is really clobbered. */
2453 if (loop_info->unknown_address_altered)
2454 {
2455 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2456
2457 loop_info->store_mems
2458 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2459 }
2460 if (loop_info->unknown_constant_address_altered)
2461 {
2462 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2463
2464 RTX_UNCHANGING_P (mem) = 1;
2465 loop_info->store_mems
2466 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2467 }
2468 }
2469 \f
2470 /* Scan the function looking for loops. Record the start and end of each loop.
2471 Also mark as invalid loops any loops that contain a setjmp or are branched
2472 to from outside the loop. */
2473
2474 static void
2475 find_and_verify_loops (f, loops)
2476 rtx f;
2477 struct loops *loops;
2478 {
2479 rtx insn;
2480 rtx label;
2481 int num_loops;
2482 struct loop *current_loop;
2483 struct loop *next_loop;
2484 struct loop *loop;
2485
2486 num_loops = loops->num;
2487
2488 compute_luids (f, NULL_RTX, 0);
2489
2490 /* If there are jumps to undefined labels,
2491 treat them as jumps out of any/all loops.
2492 This also avoids writing past end of tables when there are no loops. */
2493 uid_loop[0] = NULL;
2494
2495 /* Find boundaries of loops, mark which loops are contained within
2496 loops, and invalidate loops that have setjmp. */
2497
2498 num_loops = 0;
2499 current_loop = NULL;
2500 for (insn = f; insn; insn = NEXT_INSN (insn))
2501 {
2502 if (GET_CODE (insn) == NOTE)
2503 switch (NOTE_LINE_NUMBER (insn))
2504 {
2505 case NOTE_INSN_LOOP_BEG:
2506 next_loop = loops->array + num_loops;
2507 next_loop->num = num_loops;
2508 num_loops++;
2509 next_loop->start = insn;
2510 next_loop->outer = current_loop;
2511 current_loop = next_loop;
2512 break;
2513
2514 case NOTE_INSN_LOOP_CONT:
2515 current_loop->cont = insn;
2516 break;
2517
2518 case NOTE_INSN_LOOP_VTOP:
2519 current_loop->vtop = insn;
2520 break;
2521
2522 case NOTE_INSN_LOOP_END:
2523 if (! current_loop)
2524 abort ();
2525
2526 current_loop->end = insn;
2527 current_loop = current_loop->outer;
2528 break;
2529
2530 default:
2531 break;
2532 }
2533
2534 if (GET_CODE (insn) == CALL_INSN
2535 && find_reg_note (insn, REG_SETJMP, NULL))
2536 {
2537 /* In this case, we must invalidate our current loop and any
2538 enclosing loop. */
2539 for (loop = current_loop; loop; loop = loop->outer)
2540 {
2541 loop->invalid = 1;
2542 if (loop_dump_stream)
2543 fprintf (loop_dump_stream,
2544 "\nLoop at %d ignored due to setjmp.\n",
2545 INSN_UID (loop->start));
2546 }
2547 }
2548
2549 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2550 enclosing loop, but this doesn't matter. */
2551 uid_loop[INSN_UID (insn)] = current_loop;
2552 }
2553
2554 /* Any loop containing a label used in an initializer must be invalidated,
2555 because it can be jumped into from anywhere. */
2556
2557 for (label = forced_labels; label; label = XEXP (label, 1))
2558 {
2559 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2560 loop; loop = loop->outer)
2561 loop->invalid = 1;
2562 }
2563
2564 /* Any loop containing a label used for an exception handler must be
2565 invalidated, because it can be jumped into from anywhere. */
2566
2567 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2568 {
2569 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2570 loop; loop = loop->outer)
2571 loop->invalid = 1;
2572 }
2573
2574 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2575 loop that it is not contained within, that loop is marked invalid.
2576 If any INSN or CALL_INSN uses a label's address, then the loop containing
2577 that label is marked invalid, because it could be jumped into from
2578 anywhere.
2579
2580 Also look for blocks of code ending in an unconditional branch that
2581 exits the loop. If such a block is surrounded by a conditional
2582 branch around the block, move the block elsewhere (see below) and
2583 invert the jump to point to the code block. This may eliminate a
2584 label in our loop and will simplify processing by both us and a
2585 possible second cse pass. */
2586
2587 for (insn = f; insn; insn = NEXT_INSN (insn))
2588 if (INSN_P (insn))
2589 {
2590 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2591
2592 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2593 {
2594 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2595 if (note)
2596 {
2597 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2598 loop; loop = loop->outer)
2599 loop->invalid = 1;
2600 }
2601 }
2602
2603 if (GET_CODE (insn) != JUMP_INSN)
2604 continue;
2605
2606 mark_loop_jump (PATTERN (insn), this_loop);
2607
2608 /* See if this is an unconditional branch outside the loop. */
2609 if (this_loop
2610 && (GET_CODE (PATTERN (insn)) == RETURN
2611 || (any_uncondjump_p (insn)
2612 && onlyjump_p (insn)
2613 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2614 != this_loop)))
2615 && get_max_uid () < max_uid_for_loop)
2616 {
2617 rtx p;
2618 rtx our_next = next_real_insn (insn);
2619 rtx last_insn_to_move = NEXT_INSN (insn);
2620 struct loop *dest_loop;
2621 struct loop *outer_loop = NULL;
2622
2623 /* Go backwards until we reach the start of the loop, a label,
2624 or a JUMP_INSN. */
2625 for (p = PREV_INSN (insn);
2626 GET_CODE (p) != CODE_LABEL
2627 && ! (GET_CODE (p) == NOTE
2628 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2629 && GET_CODE (p) != JUMP_INSN;
2630 p = PREV_INSN (p))
2631 ;
2632
2633 /* Check for the case where we have a jump to an inner nested
2634 loop, and do not perform the optimization in that case. */
2635
2636 if (JUMP_LABEL (insn))
2637 {
2638 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2639 if (dest_loop)
2640 {
2641 for (outer_loop = dest_loop; outer_loop;
2642 outer_loop = outer_loop->outer)
2643 if (outer_loop == this_loop)
2644 break;
2645 }
2646 }
2647
2648 /* Make sure that the target of P is within the current loop. */
2649
2650 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2651 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2652 outer_loop = this_loop;
2653
2654 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2655 we have a block of code to try to move.
2656
2657 We look backward and then forward from the target of INSN
2658 to find a BARRIER at the same loop depth as the target.
2659 If we find such a BARRIER, we make a new label for the start
2660 of the block, invert the jump in P and point it to that label,
2661 and move the block of code to the spot we found. */
2662
2663 if (! outer_loop
2664 && GET_CODE (p) == JUMP_INSN
2665 && JUMP_LABEL (p) != 0
2666 /* Just ignore jumps to labels that were never emitted.
2667 These always indicate compilation errors. */
2668 && INSN_UID (JUMP_LABEL (p)) != 0
2669 && any_condjump_p (p) && onlyjump_p (p)
2670 && next_real_insn (JUMP_LABEL (p)) == our_next
2671 /* If it's not safe to move the sequence, then we
2672 mustn't try. */
2673 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2674 &last_insn_to_move))
2675 {
2676 rtx target
2677 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2678 struct loop *target_loop = uid_loop[INSN_UID (target)];
2679 rtx loc, loc2;
2680 rtx tmp;
2681
2682 /* Search for possible garbage past the conditional jumps
2683 and look for the last barrier. */
2684 for (tmp = last_insn_to_move;
2685 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2686 if (GET_CODE (tmp) == BARRIER)
2687 last_insn_to_move = tmp;
2688
2689 for (loc = target; loc; loc = PREV_INSN (loc))
2690 if (GET_CODE (loc) == BARRIER
2691 /* Don't move things inside a tablejump. */
2692 && ((loc2 = next_nonnote_insn (loc)) == 0
2693 || GET_CODE (loc2) != CODE_LABEL
2694 || (loc2 = next_nonnote_insn (loc2)) == 0
2695 || GET_CODE (loc2) != JUMP_INSN
2696 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2697 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2698 && uid_loop[INSN_UID (loc)] == target_loop)
2699 break;
2700
2701 if (loc == 0)
2702 for (loc = target; loc; loc = NEXT_INSN (loc))
2703 if (GET_CODE (loc) == BARRIER
2704 /* Don't move things inside a tablejump. */
2705 && ((loc2 = next_nonnote_insn (loc)) == 0
2706 || GET_CODE (loc2) != CODE_LABEL
2707 || (loc2 = next_nonnote_insn (loc2)) == 0
2708 || GET_CODE (loc2) != JUMP_INSN
2709 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2710 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2711 && uid_loop[INSN_UID (loc)] == target_loop)
2712 break;
2713
2714 if (loc)
2715 {
2716 rtx cond_label = JUMP_LABEL (p);
2717 rtx new_label = get_label_after (p);
2718
2719 /* Ensure our label doesn't go away. */
2720 LABEL_NUSES (cond_label)++;
2721
2722 /* Verify that uid_loop is large enough and that
2723 we can invert P. */
2724 if (invert_jump (p, new_label, 1))
2725 {
2726 rtx q, r;
2727
2728 /* If no suitable BARRIER was found, create a suitable
2729 one before TARGET. Since TARGET is a fall through
2730 path, we'll need to insert an jump around our block
2731 and add a BARRIER before TARGET.
2732
2733 This creates an extra unconditional jump outside
2734 the loop. However, the benefits of removing rarely
2735 executed instructions from inside the loop usually
2736 outweighs the cost of the extra unconditional jump
2737 outside the loop. */
2738 if (loc == 0)
2739 {
2740 rtx temp;
2741
2742 temp = gen_jump (JUMP_LABEL (insn));
2743 temp = emit_jump_insn_before (temp, target);
2744 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2745 LABEL_NUSES (JUMP_LABEL (insn))++;
2746 loc = emit_barrier_before (target);
2747 }
2748
2749 /* Include the BARRIER after INSN and copy the
2750 block after LOC. */
2751 if (squeeze_notes (&new_label, &last_insn_to_move))
2752 abort ();
2753 reorder_insns (new_label, last_insn_to_move, loc);
2754
2755 /* All those insns are now in TARGET_LOOP. */
2756 for (q = new_label;
2757 q != NEXT_INSN (last_insn_to_move);
2758 q = NEXT_INSN (q))
2759 uid_loop[INSN_UID (q)] = target_loop;
2760
2761 /* The label jumped to by INSN is no longer a loop
2762 exit. Unless INSN does not have a label (e.g.,
2763 it is a RETURN insn), search loop->exit_labels
2764 to find its label_ref, and remove it. Also turn
2765 off LABEL_OUTSIDE_LOOP_P bit. */
2766 if (JUMP_LABEL (insn))
2767 {
2768 for (q = 0, r = this_loop->exit_labels;
2769 r;
2770 q = r, r = LABEL_NEXTREF (r))
2771 if (XEXP (r, 0) == JUMP_LABEL (insn))
2772 {
2773 LABEL_OUTSIDE_LOOP_P (r) = 0;
2774 if (q)
2775 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2776 else
2777 this_loop->exit_labels = LABEL_NEXTREF (r);
2778 break;
2779 }
2780
2781 for (loop = this_loop; loop && loop != target_loop;
2782 loop = loop->outer)
2783 loop->exit_count--;
2784
2785 /* If we didn't find it, then something is
2786 wrong. */
2787 if (! r)
2788 abort ();
2789 }
2790
2791 /* P is now a jump outside the loop, so it must be put
2792 in loop->exit_labels, and marked as such.
2793 The easiest way to do this is to just call
2794 mark_loop_jump again for P. */
2795 mark_loop_jump (PATTERN (p), this_loop);
2796
2797 /* If INSN now jumps to the insn after it,
2798 delete INSN. */
2799 if (JUMP_LABEL (insn) != 0
2800 && (next_real_insn (JUMP_LABEL (insn))
2801 == next_real_insn (insn)))
2802 delete_related_insns (insn);
2803 }
2804
2805 /* Continue the loop after where the conditional
2806 branch used to jump, since the only branch insn
2807 in the block (if it still remains) is an inter-loop
2808 branch and hence needs no processing. */
2809 insn = NEXT_INSN (cond_label);
2810
2811 if (--LABEL_NUSES (cond_label) == 0)
2812 delete_related_insns (cond_label);
2813
2814 /* This loop will be continued with NEXT_INSN (insn). */
2815 insn = PREV_INSN (insn);
2816 }
2817 }
2818 }
2819 }
2820 }
2821
2822 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2823 loops it is contained in, mark the target loop invalid.
2824
2825 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2826
2827 static void
2828 mark_loop_jump (x, loop)
2829 rtx x;
2830 struct loop *loop;
2831 {
2832 struct loop *dest_loop;
2833 struct loop *outer_loop;
2834 int i;
2835
2836 switch (GET_CODE (x))
2837 {
2838 case PC:
2839 case USE:
2840 case CLOBBER:
2841 case REG:
2842 case MEM:
2843 case CONST_INT:
2844 case CONST_DOUBLE:
2845 case RETURN:
2846 return;
2847
2848 case CONST:
2849 /* There could be a label reference in here. */
2850 mark_loop_jump (XEXP (x, 0), loop);
2851 return;
2852
2853 case PLUS:
2854 case MINUS:
2855 case MULT:
2856 mark_loop_jump (XEXP (x, 0), loop);
2857 mark_loop_jump (XEXP (x, 1), loop);
2858 return;
2859
2860 case LO_SUM:
2861 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2862 mark_loop_jump (XEXP (x, 1), loop);
2863 return;
2864
2865 case SIGN_EXTEND:
2866 case ZERO_EXTEND:
2867 mark_loop_jump (XEXP (x, 0), loop);
2868 return;
2869
2870 case LABEL_REF:
2871 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2872
2873 /* Link together all labels that branch outside the loop. This
2874 is used by final_[bg]iv_value and the loop unrolling code. Also
2875 mark this LABEL_REF so we know that this branch should predict
2876 false. */
2877
2878 /* A check to make sure the label is not in an inner nested loop,
2879 since this does not count as a loop exit. */
2880 if (dest_loop)
2881 {
2882 for (outer_loop = dest_loop; outer_loop;
2883 outer_loop = outer_loop->outer)
2884 if (outer_loop == loop)
2885 break;
2886 }
2887 else
2888 outer_loop = NULL;
2889
2890 if (loop && ! outer_loop)
2891 {
2892 LABEL_OUTSIDE_LOOP_P (x) = 1;
2893 LABEL_NEXTREF (x) = loop->exit_labels;
2894 loop->exit_labels = x;
2895
2896 for (outer_loop = loop;
2897 outer_loop && outer_loop != dest_loop;
2898 outer_loop = outer_loop->outer)
2899 outer_loop->exit_count++;
2900 }
2901
2902 /* If this is inside a loop, but not in the current loop or one enclosed
2903 by it, it invalidates at least one loop. */
2904
2905 if (! dest_loop)
2906 return;
2907
2908 /* We must invalidate every nested loop containing the target of this
2909 label, except those that also contain the jump insn. */
2910
2911 for (; dest_loop; dest_loop = dest_loop->outer)
2912 {
2913 /* Stop when we reach a loop that also contains the jump insn. */
2914 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2915 if (dest_loop == outer_loop)
2916 return;
2917
2918 /* If we get here, we know we need to invalidate a loop. */
2919 if (loop_dump_stream && ! dest_loop->invalid)
2920 fprintf (loop_dump_stream,
2921 "\nLoop at %d ignored due to multiple entry points.\n",
2922 INSN_UID (dest_loop->start));
2923
2924 dest_loop->invalid = 1;
2925 }
2926 return;
2927
2928 case SET:
2929 /* If this is not setting pc, ignore. */
2930 if (SET_DEST (x) == pc_rtx)
2931 mark_loop_jump (SET_SRC (x), loop);
2932 return;
2933
2934 case IF_THEN_ELSE:
2935 mark_loop_jump (XEXP (x, 1), loop);
2936 mark_loop_jump (XEXP (x, 2), loop);
2937 return;
2938
2939 case PARALLEL:
2940 case ADDR_VEC:
2941 for (i = 0; i < XVECLEN (x, 0); i++)
2942 mark_loop_jump (XVECEXP (x, 0, i), loop);
2943 return;
2944
2945 case ADDR_DIFF_VEC:
2946 for (i = 0; i < XVECLEN (x, 1); i++)
2947 mark_loop_jump (XVECEXP (x, 1, i), loop);
2948 return;
2949
2950 default:
2951 /* Strictly speaking this is not a jump into the loop, only a possible
2952 jump out of the loop. However, we have no way to link the destination
2953 of this jump onto the list of exit labels. To be safe we mark this
2954 loop and any containing loops as invalid. */
2955 if (loop)
2956 {
2957 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2958 {
2959 if (loop_dump_stream && ! outer_loop->invalid)
2960 fprintf (loop_dump_stream,
2961 "\nLoop at %d ignored due to unknown exit jump.\n",
2962 INSN_UID (outer_loop->start));
2963 outer_loop->invalid = 1;
2964 }
2965 }
2966 return;
2967 }
2968 }
2969 \f
2970 /* Return nonzero if there is a label in the range from
2971 insn INSN to and including the insn whose luid is END
2972 INSN must have an assigned luid (i.e., it must not have
2973 been previously created by loop.c). */
2974
2975 static int
2976 labels_in_range_p (insn, end)
2977 rtx insn;
2978 int end;
2979 {
2980 while (insn && INSN_LUID (insn) <= end)
2981 {
2982 if (GET_CODE (insn) == CODE_LABEL)
2983 return 1;
2984 insn = NEXT_INSN (insn);
2985 }
2986
2987 return 0;
2988 }
2989
2990 /* Record that a memory reference X is being set. */
2991
2992 static void
2993 note_addr_stored (x, y, data)
2994 rtx x;
2995 rtx y ATTRIBUTE_UNUSED;
2996 void *data ATTRIBUTE_UNUSED;
2997 {
2998 struct loop_info *loop_info = data;
2999
3000 if (x == 0 || GET_CODE (x) != MEM)
3001 return;
3002
3003 /* Count number of memory writes.
3004 This affects heuristics in strength_reduce. */
3005 loop_info->num_mem_sets++;
3006
3007 /* BLKmode MEM means all memory is clobbered. */
3008 if (GET_MODE (x) == BLKmode)
3009 {
3010 if (RTX_UNCHANGING_P (x))
3011 loop_info->unknown_constant_address_altered = 1;
3012 else
3013 loop_info->unknown_address_altered = 1;
3014
3015 return;
3016 }
3017
3018 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3019 loop_info->store_mems);
3020 }
3021
3022 /* X is a value modified by an INSN that references a biv inside a loop
3023 exit test (ie, X is somehow related to the value of the biv). If X
3024 is a pseudo that is used more than once, then the biv is (effectively)
3025 used more than once. DATA is a pointer to a loop_regs structure. */
3026
3027 static void
3028 note_set_pseudo_multiple_uses (x, y, data)
3029 rtx x;
3030 rtx y ATTRIBUTE_UNUSED;
3031 void *data;
3032 {
3033 struct loop_regs *regs = (struct loop_regs *) data;
3034
3035 if (x == 0)
3036 return;
3037
3038 while (GET_CODE (x) == STRICT_LOW_PART
3039 || GET_CODE (x) == SIGN_EXTRACT
3040 || GET_CODE (x) == ZERO_EXTRACT
3041 || GET_CODE (x) == SUBREG)
3042 x = XEXP (x, 0);
3043
3044 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3045 return;
3046
3047 /* If we do not have usage information, or if we know the register
3048 is used more than once, note that fact for check_dbra_loop. */
3049 if (REGNO (x) >= max_reg_before_loop
3050 || ! regs->array[REGNO (x)].single_usage
3051 || regs->array[REGNO (x)].single_usage == const0_rtx)
3052 regs->multiple_uses = 1;
3053 }
3054 \f
3055 /* Return nonzero if the rtx X is invariant over the current loop.
3056
3057 The value is 2 if we refer to something only conditionally invariant.
3058
3059 A memory ref is invariant if it is not volatile and does not conflict
3060 with anything stored in `loop_info->store_mems'. */
3061
3062 int
3063 loop_invariant_p (loop, x)
3064 const struct loop *loop;
3065 rtx x;
3066 {
3067 struct loop_info *loop_info = LOOP_INFO (loop);
3068 struct loop_regs *regs = LOOP_REGS (loop);
3069 int i;
3070 enum rtx_code code;
3071 const char *fmt;
3072 int conditional = 0;
3073 rtx mem_list_entry;
3074
3075 if (x == 0)
3076 return 1;
3077 code = GET_CODE (x);
3078 switch (code)
3079 {
3080 case CONST_INT:
3081 case CONST_DOUBLE:
3082 case SYMBOL_REF:
3083 case CONST:
3084 return 1;
3085
3086 case LABEL_REF:
3087 /* A LABEL_REF is normally invariant, however, if we are unrolling
3088 loops, and this label is inside the loop, then it isn't invariant.
3089 This is because each unrolled copy of the loop body will have
3090 a copy of this label. If this was invariant, then an insn loading
3091 the address of this label into a register might get moved outside
3092 the loop, and then each loop body would end up using the same label.
3093
3094 We don't know the loop bounds here though, so just fail for all
3095 labels. */
3096 if (flag_unroll_loops)
3097 return 0;
3098 else
3099 return 1;
3100
3101 case PC:
3102 case CC0:
3103 case UNSPEC_VOLATILE:
3104 return 0;
3105
3106 case REG:
3107 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3108 since the reg might be set by initialization within the loop. */
3109
3110 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3111 || x == arg_pointer_rtx)
3112 && ! current_function_has_nonlocal_goto)
3113 return 1;
3114
3115 if (LOOP_INFO (loop)->has_call
3116 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3117 return 0;
3118
3119 if (regs->array[REGNO (x)].set_in_loop < 0)
3120 return 2;
3121
3122 return regs->array[REGNO (x)].set_in_loop == 0;
3123
3124 case MEM:
3125 /* Volatile memory references must be rejected. Do this before
3126 checking for read-only items, so that volatile read-only items
3127 will be rejected also. */
3128 if (MEM_VOLATILE_P (x))
3129 return 0;
3130
3131 /* See if there is any dependence between a store and this load. */
3132 mem_list_entry = loop_info->store_mems;
3133 while (mem_list_entry)
3134 {
3135 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3136 x, rtx_varies_p))
3137 return 0;
3138
3139 mem_list_entry = XEXP (mem_list_entry, 1);
3140 }
3141
3142 /* It's not invalidated by a store in memory
3143 but we must still verify the address is invariant. */
3144 break;
3145
3146 case ASM_OPERANDS:
3147 /* Don't mess with insns declared volatile. */
3148 if (MEM_VOLATILE_P (x))
3149 return 0;
3150 break;
3151
3152 default:
3153 break;
3154 }
3155
3156 fmt = GET_RTX_FORMAT (code);
3157 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3158 {
3159 if (fmt[i] == 'e')
3160 {
3161 int tem = loop_invariant_p (loop, XEXP (x, i));
3162 if (tem == 0)
3163 return 0;
3164 if (tem == 2)
3165 conditional = 1;
3166 }
3167 else if (fmt[i] == 'E')
3168 {
3169 int j;
3170 for (j = 0; j < XVECLEN (x, i); j++)
3171 {
3172 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3173 if (tem == 0)
3174 return 0;
3175 if (tem == 2)
3176 conditional = 1;
3177 }
3178
3179 }
3180 }
3181
3182 return 1 + conditional;
3183 }
3184 \f
3185 /* Return nonzero if all the insns in the loop that set REG
3186 are INSN and the immediately following insns,
3187 and if each of those insns sets REG in an invariant way
3188 (not counting uses of REG in them).
3189
3190 The value is 2 if some of these insns are only conditionally invariant.
3191
3192 We assume that INSN itself is the first set of REG
3193 and that its source is invariant. */
3194
3195 static int
3196 consec_sets_invariant_p (loop, reg, n_sets, insn)
3197 const struct loop *loop;
3198 int n_sets;
3199 rtx reg, insn;
3200 {
3201 struct loop_regs *regs = LOOP_REGS (loop);
3202 rtx p = insn;
3203 unsigned int regno = REGNO (reg);
3204 rtx temp;
3205 /* Number of sets we have to insist on finding after INSN. */
3206 int count = n_sets - 1;
3207 int old = regs->array[regno].set_in_loop;
3208 int value = 0;
3209 int this;
3210
3211 /* If N_SETS hit the limit, we can't rely on its value. */
3212 if (n_sets == 127)
3213 return 0;
3214
3215 regs->array[regno].set_in_loop = 0;
3216
3217 while (count > 0)
3218 {
3219 enum rtx_code code;
3220 rtx set;
3221
3222 p = NEXT_INSN (p);
3223 code = GET_CODE (p);
3224
3225 /* If library call, skip to end of it. */
3226 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3227 p = XEXP (temp, 0);
3228
3229 this = 0;
3230 if (code == INSN
3231 && (set = single_set (p))
3232 && GET_CODE (SET_DEST (set)) == REG
3233 && REGNO (SET_DEST (set)) == regno)
3234 {
3235 this = loop_invariant_p (loop, SET_SRC (set));
3236 if (this != 0)
3237 value |= this;
3238 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3239 {
3240 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3241 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3242 notes are OK. */
3243 this = (CONSTANT_P (XEXP (temp, 0))
3244 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3245 && loop_invariant_p (loop, XEXP (temp, 0))));
3246 if (this != 0)
3247 value |= this;
3248 }
3249 }
3250 if (this != 0)
3251 count--;
3252 else if (code != NOTE)
3253 {
3254 regs->array[regno].set_in_loop = old;
3255 return 0;
3256 }
3257 }
3258
3259 regs->array[regno].set_in_loop = old;
3260 /* If loop_invariant_p ever returned 2, we return 2. */
3261 return 1 + (value & 2);
3262 }
3263
3264 #if 0
3265 /* I don't think this condition is sufficient to allow INSN
3266 to be moved, so we no longer test it. */
3267
3268 /* Return 1 if all insns in the basic block of INSN and following INSN
3269 that set REG are invariant according to TABLE. */
3270
3271 static int
3272 all_sets_invariant_p (reg, insn, table)
3273 rtx reg, insn;
3274 short *table;
3275 {
3276 rtx p = insn;
3277 int regno = REGNO (reg);
3278
3279 while (1)
3280 {
3281 enum rtx_code code;
3282 p = NEXT_INSN (p);
3283 code = GET_CODE (p);
3284 if (code == CODE_LABEL || code == JUMP_INSN)
3285 return 1;
3286 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3287 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3288 && REGNO (SET_DEST (PATTERN (p))) == regno)
3289 {
3290 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3291 return 0;
3292 }
3293 }
3294 }
3295 #endif /* 0 */
3296 \f
3297 /* Look at all uses (not sets) of registers in X. For each, if it is
3298 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3299 a different insn, set USAGE[REGNO] to const0_rtx. */
3300
3301 static void
3302 find_single_use_in_loop (regs, insn, x)
3303 struct loop_regs *regs;
3304 rtx insn;
3305 rtx x;
3306 {
3307 enum rtx_code code = GET_CODE (x);
3308 const char *fmt = GET_RTX_FORMAT (code);
3309 int i, j;
3310
3311 if (code == REG)
3312 regs->array[REGNO (x)].single_usage
3313 = (regs->array[REGNO (x)].single_usage != 0
3314 && regs->array[REGNO (x)].single_usage != insn)
3315 ? const0_rtx : insn;
3316
3317 else if (code == SET)
3318 {
3319 /* Don't count SET_DEST if it is a REG; otherwise count things
3320 in SET_DEST because if a register is partially modified, it won't
3321 show up as a potential movable so we don't care how USAGE is set
3322 for it. */
3323 if (GET_CODE (SET_DEST (x)) != REG)
3324 find_single_use_in_loop (regs, insn, SET_DEST (x));
3325 find_single_use_in_loop (regs, insn, SET_SRC (x));
3326 }
3327 else
3328 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3329 {
3330 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3331 find_single_use_in_loop (regs, insn, XEXP (x, i));
3332 else if (fmt[i] == 'E')
3333 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3334 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3335 }
3336 }
3337 \f
3338 /* Count and record any set in X which is contained in INSN. Update
3339 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3340 in X. */
3341
3342 static void
3343 count_one_set (regs, insn, x, last_set)
3344 struct loop_regs *regs;
3345 rtx insn, x;
3346 rtx *last_set;
3347 {
3348 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3349 /* Don't move a reg that has an explicit clobber.
3350 It's not worth the pain to try to do it correctly. */
3351 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3352
3353 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3354 {
3355 rtx dest = SET_DEST (x);
3356 while (GET_CODE (dest) == SUBREG
3357 || GET_CODE (dest) == ZERO_EXTRACT
3358 || GET_CODE (dest) == SIGN_EXTRACT
3359 || GET_CODE (dest) == STRICT_LOW_PART)
3360 dest = XEXP (dest, 0);
3361 if (GET_CODE (dest) == REG)
3362 {
3363 int regno = REGNO (dest);
3364 /* If this is the first setting of this reg
3365 in current basic block, and it was set before,
3366 it must be set in two basic blocks, so it cannot
3367 be moved out of the loop. */
3368 if (regs->array[regno].set_in_loop > 0
3369 && last_set == 0)
3370 regs->array[regno].may_not_optimize = 1;
3371 /* If this is not first setting in current basic block,
3372 see if reg was used in between previous one and this.
3373 If so, neither one can be moved. */
3374 if (last_set[regno] != 0
3375 && reg_used_between_p (dest, last_set[regno], insn))
3376 regs->array[regno].may_not_optimize = 1;
3377 if (regs->array[regno].set_in_loop < 127)
3378 ++regs->array[regno].set_in_loop;
3379 last_set[regno] = insn;
3380 }
3381 }
3382 }
3383 \f
3384 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3385 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3386 contained in insn INSN is used by any insn that precedes INSN in
3387 cyclic order starting from the loop entry point.
3388
3389 We don't want to use INSN_LUID here because if we restrict INSN to those
3390 that have a valid INSN_LUID, it means we cannot move an invariant out
3391 from an inner loop past two loops. */
3392
3393 static int
3394 loop_reg_used_before_p (loop, set, insn)
3395 const struct loop *loop;
3396 rtx set, insn;
3397 {
3398 rtx reg = SET_DEST (set);
3399 rtx p;
3400
3401 /* Scan forward checking for register usage. If we hit INSN, we
3402 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3403 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3404 {
3405 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3406 return 1;
3407
3408 if (p == loop->end)
3409 p = loop->start;
3410 }
3411
3412 return 0;
3413 }
3414 \f
3415 /* A "basic induction variable" or biv is a pseudo reg that is set
3416 (within this loop) only by incrementing or decrementing it. */
3417 /* A "general induction variable" or giv is a pseudo reg whose
3418 value is a linear function of a biv. */
3419
3420 /* Bivs are recognized by `basic_induction_var';
3421 Givs by `general_induction_var'. */
3422
3423 /* Communication with routines called via `note_stores'. */
3424
3425 static rtx note_insn;
3426
3427 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3428
3429 static rtx addr_placeholder;
3430
3431 /* ??? Unfinished optimizations, and possible future optimizations,
3432 for the strength reduction code. */
3433
3434 /* ??? The interaction of biv elimination, and recognition of 'constant'
3435 bivs, may cause problems. */
3436
3437 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3438 performance problems.
3439
3440 Perhaps don't eliminate things that can be combined with an addressing
3441 mode. Find all givs that have the same biv, mult_val, and add_val;
3442 then for each giv, check to see if its only use dies in a following
3443 memory address. If so, generate a new memory address and check to see
3444 if it is valid. If it is valid, then store the modified memory address,
3445 otherwise, mark the giv as not done so that it will get its own iv. */
3446
3447 /* ??? Could try to optimize branches when it is known that a biv is always
3448 positive. */
3449
3450 /* ??? When replace a biv in a compare insn, we should replace with closest
3451 giv so that an optimized branch can still be recognized by the combiner,
3452 e.g. the VAX acb insn. */
3453
3454 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3455 was rerun in loop_optimize whenever a register was added or moved.
3456 Also, some of the optimizations could be a little less conservative. */
3457 \f
3458 /* Scan the loop body and call FNCALL for each insn. In the addition to the
3459 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
3460 callback.
3461
3462 NOT_EVERY_ITERATION if current insn is not executed at least once for every
3463 loop iteration except for the last one.
3464
3465 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
3466 loop iteration.
3467 */
3468 void
3469 for_each_insn_in_loop (loop, fncall)
3470 struct loop *loop;
3471 loop_insn_callback fncall;
3472 {
3473 /* This is 1 if current insn is not executed at least once for every loop
3474 iteration. */
3475 int not_every_iteration = 0;
3476 int maybe_multiple = 0;
3477 int past_loop_latch = 0;
3478 int loop_depth = 0;
3479 rtx p;
3480
3481 /* If loop_scan_start points to the loop exit test, we have to be wary of
3482 subversive use of gotos inside expression statements. */
3483 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
3484 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
3485
3486 /* Scan through loop to find all possible bivs. */
3487
3488 for (p = next_insn_in_loop (loop, loop->scan_start);
3489 p != NULL_RTX;
3490 p = next_insn_in_loop (loop, p))
3491 {
3492 p = fncall (loop, p, not_every_iteration, maybe_multiple);
3493
3494 /* Past CODE_LABEL, we get to insns that may be executed multiple
3495 times. The only way we can be sure that they can't is if every
3496 jump insn between here and the end of the loop either
3497 returns, exits the loop, is a jump to a location that is still
3498 behind the label, or is a jump to the loop start. */
3499
3500 if (GET_CODE (p) == CODE_LABEL)
3501 {
3502 rtx insn = p;
3503
3504 maybe_multiple = 0;
3505
3506 while (1)
3507 {
3508 insn = NEXT_INSN (insn);
3509 if (insn == loop->scan_start)
3510 break;
3511 if (insn == loop->end)
3512 {
3513 if (loop->top != 0)
3514 insn = loop->top;
3515 else
3516 break;
3517 if (insn == loop->scan_start)
3518 break;
3519 }
3520
3521 if (GET_CODE (insn) == JUMP_INSN
3522 && GET_CODE (PATTERN (insn)) != RETURN
3523 && (!any_condjump_p (insn)
3524 || (JUMP_LABEL (insn) != 0
3525 && JUMP_LABEL (insn) != loop->scan_start
3526 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
3527 {
3528 maybe_multiple = 1;
3529 break;
3530 }
3531 }
3532 }
3533
3534 /* Past a jump, we get to insns for which we can't count
3535 on whether they will be executed during each iteration. */
3536 /* This code appears twice in strength_reduce. There is also similar
3537 code in scan_loop. */
3538 if (GET_CODE (p) == JUMP_INSN
3539 /* If we enter the loop in the middle, and scan around to the
3540 beginning, don't set not_every_iteration for that.
3541 This can be any kind of jump, since we want to know if insns
3542 will be executed if the loop is executed. */
3543 && !(JUMP_LABEL (p) == loop->top
3544 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
3545 && any_uncondjump_p (p))
3546 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
3547 {
3548 rtx label = 0;
3549
3550 /* If this is a jump outside the loop, then it also doesn't
3551 matter. Check to see if the target of this branch is on the
3552 loop->exits_labels list. */
3553
3554 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
3555 if (XEXP (label, 0) == JUMP_LABEL (p))
3556 break;
3557
3558 if (!label)
3559 not_every_iteration = 1;
3560 }
3561
3562 else if (GET_CODE (p) == NOTE)
3563 {
3564 /* At the virtual top of a converted loop, insns are again known to
3565 be executed each iteration: logically, the loop begins here
3566 even though the exit code has been duplicated.
3567
3568 Insns are also again known to be executed each iteration at
3569 the LOOP_CONT note. */
3570 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
3571 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
3572 && loop_depth == 0)
3573 not_every_iteration = 0;
3574 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3575 loop_depth++;
3576 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3577 loop_depth--;
3578 }
3579
3580 /* Note if we pass a loop latch. If we do, then we can not clear
3581 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
3582 a loop since a jump before the last CODE_LABEL may have started
3583 a new loop iteration.
3584
3585 Note that LOOP_TOP is only set for rotated loops and we need
3586 this check for all loops, so compare against the CODE_LABEL
3587 which immediately follows LOOP_START. */
3588 if (GET_CODE (p) == JUMP_INSN
3589 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
3590 past_loop_latch = 1;
3591
3592 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3593 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3594 or not an insn is known to be executed each iteration of the
3595 loop, whether or not any iterations are known to occur.
3596
3597 Therefore, if we have just passed a label and have no more labels
3598 between here and the test insn of the loop, and we have not passed
3599 a jump to the top of the loop, then we know these insns will be
3600 executed each iteration. */
3601
3602 if (not_every_iteration
3603 && !past_loop_latch
3604 && GET_CODE (p) == CODE_LABEL
3605 && no_labels_between_p (p, loop->end)
3606 && loop_insn_first_p (p, loop->cont))
3607 not_every_iteration = 0;
3608 }
3609 }
3610 \f
3611 static void
3612 loop_bivs_find (loop)
3613 struct loop *loop;
3614 {
3615 struct loop_regs *regs = LOOP_REGS (loop);
3616 struct loop_ivs *ivs = LOOP_IVS (loop);
3617 /* Temporary list pointers for traversing ivs->list. */
3618 struct iv_class *bl, **backbl;
3619
3620 ivs->list = 0;
3621
3622 for_each_insn_in_loop (loop, check_insn_for_bivs);
3623
3624 /* Scan ivs->list to remove all regs that proved not to be bivs.
3625 Make a sanity check against regs->n_times_set. */
3626 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
3627 {
3628 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3629 /* Above happens if register modified by subreg, etc. */
3630 /* Make sure it is not recognized as a basic induction var: */
3631 || regs->array[bl->regno].n_times_set != bl->biv_count
3632 /* If never incremented, it is invariant that we decided not to
3633 move. So leave it alone. */
3634 || ! bl->incremented)
3635 {
3636 if (loop_dump_stream)
3637 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
3638 bl->regno,
3639 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3640 ? "not induction variable"
3641 : (! bl->incremented ? "never incremented"
3642 : "count error")));
3643
3644 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
3645 *backbl = bl->next;
3646 }
3647 else
3648 {
3649 backbl = &bl->next;
3650
3651 if (loop_dump_stream)
3652 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
3653 }
3654 }
3655 }
3656
3657
3658 /* Determine how BIVS are initialised by looking through pre-header
3659 extended basic block. */
3660 static void
3661 loop_bivs_init_find (loop)
3662 struct loop *loop;
3663 {
3664 struct loop_ivs *ivs = LOOP_IVS (loop);
3665 /* Temporary list pointers for traversing ivs->list. */
3666 struct iv_class *bl;
3667 int call_seen;
3668 rtx p;
3669
3670 /* Find initial value for each biv by searching backwards from loop_start,
3671 halting at first label. Also record any test condition. */
3672
3673 call_seen = 0;
3674 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3675 {
3676 rtx test;
3677
3678 note_insn = p;
3679
3680 if (GET_CODE (p) == CALL_INSN)
3681 call_seen = 1;
3682
3683 if (INSN_P (p))
3684 note_stores (PATTERN (p), record_initial, ivs);
3685
3686 /* Record any test of a biv that branches around the loop if no store
3687 between it and the start of loop. We only care about tests with
3688 constants and registers and only certain of those. */
3689 if (GET_CODE (p) == JUMP_INSN
3690 && JUMP_LABEL (p) != 0
3691 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
3692 && (test = get_condition_for_loop (loop, p)) != 0
3693 && GET_CODE (XEXP (test, 0)) == REG
3694 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3695 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
3696 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
3697 && bl->init_insn == 0)
3698 {
3699 /* If an NE test, we have an initial value! */
3700 if (GET_CODE (test) == NE)
3701 {
3702 bl->init_insn = p;
3703 bl->init_set = gen_rtx_SET (VOIDmode,
3704 XEXP (test, 0), XEXP (test, 1));
3705 }
3706 else
3707 bl->initial_test = test;
3708 }
3709 }
3710 }
3711
3712
3713 /* Look at the each biv and see if we can say anything better about its
3714 initial value from any initializing insns set up above. (This is done
3715 in two passes to avoid missing SETs in a PARALLEL.) */
3716 static void
3717 loop_bivs_check (loop)
3718 struct loop *loop;
3719 {
3720 struct loop_ivs *ivs = LOOP_IVS (loop);
3721 /* Temporary list pointers for traversing ivs->list. */
3722 struct iv_class *bl;
3723 struct iv_class **backbl;
3724
3725 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
3726 {
3727 rtx src;
3728 rtx note;
3729
3730 if (! bl->init_insn)
3731 continue;
3732
3733 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3734 is a constant, use the value of that. */
3735 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3736 && CONSTANT_P (XEXP (note, 0)))
3737 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3738 && CONSTANT_P (XEXP (note, 0))))
3739 src = XEXP (note, 0);
3740 else
3741 src = SET_SRC (bl->init_set);
3742
3743 if (loop_dump_stream)
3744 fprintf (loop_dump_stream,
3745 "Biv %d: initialized at insn %d: initial value ",
3746 bl->regno, INSN_UID (bl->init_insn));
3747
3748 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3749 || GET_MODE (src) == VOIDmode)
3750 && valid_initial_value_p (src, bl->init_insn,
3751 LOOP_INFO (loop)->pre_header_has_call,
3752 loop->start))
3753 {
3754 bl->initial_value = src;
3755
3756 if (loop_dump_stream)
3757 {
3758 print_simple_rtl (loop_dump_stream, src);
3759 fputc ('\n', loop_dump_stream);
3760 }
3761 }
3762 /* If we can't make it a giv,
3763 let biv keep initial value of "itself". */
3764 else if (loop_dump_stream)
3765 fprintf (loop_dump_stream, "is complex\n");
3766 }
3767 }
3768
3769
3770 /* Search the loop for general induction variables. */
3771
3772 static void
3773 loop_givs_find (loop)
3774 struct loop* loop;
3775 {
3776 for_each_insn_in_loop (loop, check_insn_for_givs);
3777 }
3778
3779
3780 /* For each giv for which we still don't know whether or not it is
3781 replaceable, check to see if it is replaceable because its final value
3782 can be calculated. */
3783
3784 static void
3785 loop_givs_check (loop)
3786 struct loop *loop;
3787 {
3788 struct loop_ivs *ivs = LOOP_IVS (loop);
3789 struct iv_class *bl;
3790
3791 for (bl = ivs->list; bl; bl = bl->next)
3792 {
3793 struct induction *v;
3794
3795 for (v = bl->giv; v; v = v->next_iv)
3796 if (! v->replaceable && ! v->not_replaceable)
3797 check_final_value (loop, v);
3798 }
3799 }
3800
3801
3802 /* Return non-zero if it is possible to eliminate the biv BL provided
3803 all givs are reduced. This is possible if either the reg is not
3804 used outside the loop, or we can compute what its final value will
3805 be. */
3806
3807 static int
3808 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
3809 struct loop *loop;
3810 struct iv_class *bl;
3811 int threshold;
3812 int insn_count;
3813 {
3814 /* For architectures with a decrement_and_branch_until_zero insn,
3815 don't do this if we put a REG_NONNEG note on the endtest for this
3816 biv. */
3817
3818 #ifdef HAVE_decrement_and_branch_until_zero
3819 if (bl->nonneg)
3820 {
3821 if (loop_dump_stream)
3822 fprintf (loop_dump_stream,
3823 "Cannot eliminate nonneg biv %d.\n", bl->regno);
3824 return 0;
3825 }
3826 #endif
3827
3828 /* Check that biv is used outside loop or if it has a final value.
3829 Compare against bl->init_insn rather than loop->start. We aren't
3830 concerned with any uses of the biv between init_insn and
3831 loop->start since these won't be affected by the value of the biv
3832 elsewhere in the function, so long as init_insn doesn't use the
3833 biv itself. */
3834
3835 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
3836 && bl->init_insn
3837 && INSN_UID (bl->init_insn) < max_uid_for_loop
3838 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
3839 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3840 || (bl->final_value = final_biv_value (loop, bl)))
3841 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
3842
3843 if (loop_dump_stream)
3844 {
3845 fprintf (loop_dump_stream,
3846 "Cannot eliminate biv %d.\n",
3847 bl->regno);
3848 fprintf (loop_dump_stream,
3849 "First use: insn %d, last use: insn %d.\n",
3850 REGNO_FIRST_UID (bl->regno),
3851 REGNO_LAST_UID (bl->regno));
3852 }
3853 return 0;
3854 }
3855
3856
3857 /* Reduce each giv of BL that we have decided to reduce. */
3858
3859 static void
3860 loop_givs_reduce (loop, bl)
3861 struct loop *loop;
3862 struct iv_class *bl;
3863 {
3864 struct induction *v;
3865
3866 for (v = bl->giv; v; v = v->next_iv)
3867 {
3868 struct induction *tv;
3869 if (! v->ignore && v->same == 0)
3870 {
3871 int auto_inc_opt = 0;
3872
3873 /* If the code for derived givs immediately below has already
3874 allocated a new_reg, we must keep it. */
3875 if (! v->new_reg)
3876 v->new_reg = gen_reg_rtx (v->mode);
3877
3878 #ifdef AUTO_INC_DEC
3879 /* If the target has auto-increment addressing modes, and
3880 this is an address giv, then try to put the increment
3881 immediately after its use, so that flow can create an
3882 auto-increment addressing mode. */
3883 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
3884 && bl->biv->always_executed && ! bl->biv->maybe_multiple
3885 /* We don't handle reversed biv's because bl->biv->insn
3886 does not have a valid INSN_LUID. */
3887 && ! bl->reversed
3888 && v->always_executed && ! v->maybe_multiple
3889 && INSN_UID (v->insn) < max_uid_for_loop)
3890 {
3891 /* If other giv's have been combined with this one, then
3892 this will work only if all uses of the other giv's occur
3893 before this giv's insn. This is difficult to check.
3894
3895 We simplify this by looking for the common case where
3896 there is one DEST_REG giv, and this giv's insn is the
3897 last use of the dest_reg of that DEST_REG giv. If the
3898 increment occurs after the address giv, then we can
3899 perform the optimization. (Otherwise, the increment
3900 would have to go before other_giv, and we would not be
3901 able to combine it with the address giv to get an
3902 auto-inc address.) */
3903 if (v->combined_with)
3904 {
3905 struct induction *other_giv = 0;
3906
3907 for (tv = bl->giv; tv; tv = tv->next_iv)
3908 if (tv->same == v)
3909 {
3910 if (other_giv)
3911 break;
3912 else
3913 other_giv = tv;
3914 }
3915 if (! tv && other_giv
3916 && REGNO (other_giv->dest_reg) < max_reg_before_loop
3917 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
3918 == INSN_UID (v->insn))
3919 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
3920 auto_inc_opt = 1;
3921 }
3922 /* Check for case where increment is before the address
3923 giv. Do this test in "loop order". */
3924 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
3925 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3926 || (INSN_LUID (bl->biv->insn)
3927 > INSN_LUID (loop->scan_start))))
3928 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3929 && (INSN_LUID (loop->scan_start)
3930 < INSN_LUID (bl->biv->insn))))
3931 auto_inc_opt = -1;
3932 else
3933 auto_inc_opt = 1;
3934
3935 #ifdef HAVE_cc0
3936 {
3937 rtx prev;
3938
3939 /* We can't put an insn immediately after one setting
3940 cc0, or immediately before one using cc0. */
3941 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
3942 || (auto_inc_opt == -1
3943 && (prev = prev_nonnote_insn (v->insn)) != 0
3944 && INSN_P (prev)
3945 && sets_cc0_p (PATTERN (prev))))
3946 auto_inc_opt = 0;
3947 }
3948 #endif
3949
3950 if (auto_inc_opt)
3951 v->auto_inc_opt = 1;
3952 }
3953 #endif
3954
3955 /* For each place where the biv is incremented, add an insn
3956 to increment the new, reduced reg for the giv. */
3957 for (tv = bl->biv; tv; tv = tv->next_iv)
3958 {
3959 rtx insert_before;
3960
3961 if (! auto_inc_opt)
3962 insert_before = tv->insn;
3963 else if (auto_inc_opt == 1)
3964 insert_before = NEXT_INSN (v->insn);
3965 else
3966 insert_before = v->insn;
3967
3968 if (tv->mult_val == const1_rtx)
3969 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3970 v->new_reg, v->new_reg,
3971 0, insert_before);
3972 else /* tv->mult_val == const0_rtx */
3973 /* A multiply is acceptable here
3974 since this is presumed to be seldom executed. */
3975 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3976 v->add_val, v->new_reg,
3977 0, insert_before);
3978 }
3979
3980 /* Add code at loop start to initialize giv's reduced reg. */
3981
3982 loop_iv_add_mult_hoist (loop,
3983 extend_value_for_giv (v, bl->initial_value),
3984 v->mult_val, v->add_val, v->new_reg);
3985 }
3986 }
3987 }
3988
3989
3990 /* Check for givs whose first use is their definition and whose
3991 last use is the definition of another giv. If so, it is likely
3992 dead and should not be used to derive another giv nor to
3993 eliminate a biv. */
3994
3995 static void
3996 loop_givs_dead_check (loop, bl)
3997 struct loop *loop ATTRIBUTE_UNUSED;
3998 struct iv_class *bl;
3999 {
4000 struct induction *v;
4001
4002 for (v = bl->giv; v; v = v->next_iv)
4003 {
4004 if (v->ignore
4005 || (v->same && v->same->ignore))
4006 continue;
4007
4008 if (v->giv_type == DEST_REG
4009 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4010 {
4011 struct induction *v1;
4012
4013 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4014 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4015 v->maybe_dead = 1;
4016 }
4017 }
4018 }
4019
4020
4021 static void
4022 loop_givs_rescan (loop, bl, reg_map)
4023 struct loop *loop;
4024 struct iv_class *bl;
4025 rtx *reg_map;
4026 {
4027 struct induction *v;
4028
4029 for (v = bl->giv; v; v = v->next_iv)
4030 {
4031 if (v->same && v->same->ignore)
4032 v->ignore = 1;
4033
4034 if (v->ignore)
4035 continue;
4036
4037 /* Update expression if this was combined, in case other giv was
4038 replaced. */
4039 if (v->same)
4040 v->new_reg = replace_rtx (v->new_reg,
4041 v->same->dest_reg, v->same->new_reg);
4042
4043 /* See if this register is known to be a pointer to something. If
4044 so, see if we can find the alignment. First see if there is a
4045 destination register that is a pointer. If so, this shares the
4046 alignment too. Next see if we can deduce anything from the
4047 computational information. If not, and this is a DEST_ADDR
4048 giv, at least we know that it's a pointer, though we don't know
4049 the alignment. */
4050 if (GET_CODE (v->new_reg) == REG
4051 && v->giv_type == DEST_REG
4052 && REG_POINTER (v->dest_reg))
4053 mark_reg_pointer (v->new_reg,
4054 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4055 else if (GET_CODE (v->new_reg) == REG
4056 && REG_POINTER (v->src_reg))
4057 {
4058 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4059
4060 if (align == 0
4061 || GET_CODE (v->add_val) != CONST_INT
4062 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4063 align = 0;
4064
4065 mark_reg_pointer (v->new_reg, align);
4066 }
4067 else if (GET_CODE (v->new_reg) == REG
4068 && GET_CODE (v->add_val) == REG
4069 && REG_POINTER (v->add_val))
4070 {
4071 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4072
4073 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4074 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4075 align = 0;
4076
4077 mark_reg_pointer (v->new_reg, align);
4078 }
4079 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4080 mark_reg_pointer (v->new_reg, 0);
4081
4082 if (v->giv_type == DEST_ADDR)
4083 /* Store reduced reg as the address in the memref where we found
4084 this giv. */
4085 validate_change (v->insn, v->location, v->new_reg, 0);
4086 else if (v->replaceable)
4087 {
4088 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4089 }
4090 else
4091 {
4092 /* Not replaceable; emit an insn to set the original giv reg from
4093 the reduced giv, same as above. */
4094 loop_insn_emit_after (loop, 0, v->insn,
4095 gen_move_insn (v->dest_reg, v->new_reg));
4096 }
4097
4098 /* When a loop is reversed, givs which depend on the reversed
4099 biv, and which are live outside the loop, must be set to their
4100 correct final value. This insn is only needed if the giv is
4101 not replaceable. The correct final value is the same as the
4102 value that the giv starts the reversed loop with. */
4103 if (bl->reversed && ! v->replaceable)
4104 loop_iv_add_mult_sink (loop,
4105 extend_value_for_giv (v, bl->initial_value),
4106 v->mult_val, v->add_val, v->dest_reg);
4107 else if (v->final_value)
4108 loop_insn_sink_or_swim (loop,
4109 gen_move_insn (v->dest_reg, v->final_value));
4110
4111 if (loop_dump_stream)
4112 {
4113 fprintf (loop_dump_stream, "giv at %d reduced to ",
4114 INSN_UID (v->insn));
4115 print_simple_rtl (loop_dump_stream, v->new_reg);
4116 fprintf (loop_dump_stream, "\n");
4117 }
4118 }
4119 }
4120
4121
4122 static int
4123 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4124 struct loop *loop ATTRIBUTE_UNUSED;
4125 struct iv_class *bl;
4126 struct induction *v;
4127 rtx test_reg;
4128 {
4129 int add_cost;
4130 int benefit;
4131
4132 benefit = v->benefit;
4133 PUT_MODE (test_reg, v->mode);
4134 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4135 test_reg, test_reg);
4136
4137 /* Reduce benefit if not replaceable, since we will insert a
4138 move-insn to replace the insn that calculates this giv. Don't do
4139 this unless the giv is a user variable, since it will often be
4140 marked non-replaceable because of the duplication of the exit
4141 code outside the loop. In such a case, the copies we insert are
4142 dead and will be deleted. So they don't have a cost. Similar
4143 situations exist. */
4144 /* ??? The new final_[bg]iv_value code does a much better job of
4145 finding replaceable giv's, and hence this code may no longer be
4146 necessary. */
4147 if (! v->replaceable && ! bl->eliminable
4148 && REG_USERVAR_P (v->dest_reg))
4149 benefit -= copy_cost;
4150
4151 /* Decrease the benefit to count the add-insns that we will insert
4152 to increment the reduced reg for the giv. ??? This can
4153 overestimate the run-time cost of the additional insns, e.g. if
4154 there are multiple basic blocks that increment the biv, but only
4155 one of these blocks is executed during each iteration. There is
4156 no good way to detect cases like this with the current structure
4157 of the loop optimizer. This code is more accurate for
4158 determining code size than run-time benefits. */
4159 benefit -= add_cost * bl->biv_count;
4160
4161 /* Decide whether to strength-reduce this giv or to leave the code
4162 unchanged (recompute it from the biv each time it is used). This
4163 decision can be made independently for each giv. */
4164
4165 #ifdef AUTO_INC_DEC
4166 /* Attempt to guess whether autoincrement will handle some of the
4167 new add insns; if so, increase BENEFIT (undo the subtraction of
4168 add_cost that was done above). */
4169 if (v->giv_type == DEST_ADDR
4170 /* Increasing the benefit is risky, since this is only a guess.
4171 Avoid increasing register pressure in cases where there would
4172 be no other benefit from reducing this giv. */
4173 && benefit > 0
4174 && GET_CODE (v->mult_val) == CONST_INT)
4175 {
4176 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4177
4178 if (HAVE_POST_INCREMENT
4179 && INTVAL (v->mult_val) == size)
4180 benefit += add_cost * bl->biv_count;
4181 else if (HAVE_PRE_INCREMENT
4182 && INTVAL (v->mult_val) == size)
4183 benefit += add_cost * bl->biv_count;
4184 else if (HAVE_POST_DECREMENT
4185 && -INTVAL (v->mult_val) == size)
4186 benefit += add_cost * bl->biv_count;
4187 else if (HAVE_PRE_DECREMENT
4188 && -INTVAL (v->mult_val) == size)
4189 benefit += add_cost * bl->biv_count;
4190 }
4191 #endif
4192
4193 return benefit;
4194 }
4195
4196
4197 /* Free IV structures for LOOP. */
4198
4199 static void
4200 loop_ivs_free (loop)
4201 struct loop *loop;
4202 {
4203 struct loop_ivs *ivs = LOOP_IVS (loop);
4204 struct iv_class *iv = ivs->list;
4205
4206 free (ivs->regs);
4207
4208 while (iv)
4209 {
4210 struct iv_class *next = iv->next;
4211 struct induction *induction;
4212 struct induction *next_induction;
4213
4214 for (induction = iv->biv; induction; induction = next_induction)
4215 {
4216 next_induction = induction->next_iv;
4217 free (induction);
4218 }
4219 for (induction = iv->giv; induction; induction = next_induction)
4220 {
4221 next_induction = induction->next_iv;
4222 free (induction);
4223 }
4224
4225 free (iv);
4226 iv = next;
4227 }
4228 }
4229
4230
4231 /* Perform strength reduction and induction variable elimination.
4232
4233 Pseudo registers created during this function will be beyond the
4234 last valid index in several tables including
4235 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4236 problem here, because the added registers cannot be givs outside of
4237 their loop, and hence will never be reconsidered. But scan_loop
4238 must check regnos to make sure they are in bounds. */
4239
4240 static void
4241 strength_reduce (loop, flags)
4242 struct loop *loop;
4243 int flags;
4244 {
4245 struct loop_info *loop_info = LOOP_INFO (loop);
4246 struct loop_regs *regs = LOOP_REGS (loop);
4247 struct loop_ivs *ivs = LOOP_IVS (loop);
4248 rtx p;
4249 /* Temporary list pointer for traversing ivs->list. */
4250 struct iv_class *bl;
4251 /* Ratio of extra register life span we can justify
4252 for saving an instruction. More if loop doesn't call subroutines
4253 since in that case saving an insn makes more difference
4254 and more registers are available. */
4255 /* ??? could set this to last value of threshold in move_movables */
4256 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4257 /* Map of pseudo-register replacements. */
4258 rtx *reg_map = NULL;
4259 int reg_map_size;
4260 int unrolled_insn_copies = 0;
4261 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4262 int insn_count = count_insns_in_loop (loop);
4263
4264 addr_placeholder = gen_reg_rtx (Pmode);
4265
4266 ivs->n_regs = max_reg_before_loop;
4267 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4268
4269 /* Find all BIVs in loop. */
4270 loop_bivs_find (loop);
4271
4272 /* Exit if there are no bivs. */
4273 if (! ivs->list)
4274 {
4275 /* Can still unroll the loop anyways, but indicate that there is no
4276 strength reduction info available. */
4277 if (flags & LOOP_UNROLL)
4278 unroll_loop (loop, insn_count, 0);
4279
4280 loop_ivs_free (loop);
4281 return;
4282 }
4283
4284 /* Determine how BIVS are initialised by looking through pre-header
4285 extended basic block. */
4286 loop_bivs_init_find (loop);
4287
4288 /* Look at the each biv and see if we can say anything better about its
4289 initial value from any initializing insns set up above. */
4290 loop_bivs_check (loop);
4291
4292 /* Search the loop for general induction variables. */
4293 loop_givs_find (loop);
4294
4295 /* Try to calculate and save the number of loop iterations. This is
4296 set to zero if the actual number can not be calculated. This must
4297 be called after all giv's have been identified, since otherwise it may
4298 fail if the iteration variable is a giv. */
4299 loop_iterations (loop);
4300
4301 /* Now for each giv for which we still don't know whether or not it is
4302 replaceable, check to see if it is replaceable because its final value
4303 can be calculated. This must be done after loop_iterations is called,
4304 so that final_giv_value will work correctly. */
4305 loop_givs_check (loop);
4306
4307 /* Try to prove that the loop counter variable (if any) is always
4308 nonnegative; if so, record that fact with a REG_NONNEG note
4309 so that "decrement and branch until zero" insn can be used. */
4310 check_dbra_loop (loop, insn_count);
4311
4312 /* Create reg_map to hold substitutions for replaceable giv regs.
4313 Some givs might have been made from biv increments, so look at
4314 ivs->reg_iv_type for a suitable size. */
4315 reg_map_size = ivs->n_regs;
4316 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4317
4318 /* Examine each iv class for feasibility of strength reduction/induction
4319 variable elimination. */
4320
4321 for (bl = ivs->list; bl; bl = bl->next)
4322 {
4323 struct induction *v;
4324 int benefit;
4325
4326 /* Test whether it will be possible to eliminate this biv
4327 provided all givs are reduced. */
4328 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
4329
4330 /* This will be true at the end, if all givs which depend on this
4331 biv have been strength reduced.
4332 We can't (currently) eliminate the biv unless this is so. */
4333 bl->all_reduced = 1;
4334
4335 /* Check each extension dependent giv in this class to see if its
4336 root biv is safe from wrapping in the interior mode. */
4337 check_ext_dependent_givs (bl, loop_info);
4338
4339 /* Combine all giv's for this iv_class. */
4340 combine_givs (regs, bl);
4341
4342 for (v = bl->giv; v; v = v->next_iv)
4343 {
4344 struct induction *tv;
4345
4346 if (v->ignore || v->same)
4347 continue;
4348
4349 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
4350
4351 /* If an insn is not to be strength reduced, then set its ignore
4352 flag, and clear bl->all_reduced. */
4353
4354 /* A giv that depends on a reversed biv must be reduced if it is
4355 used after the loop exit, otherwise, it would have the wrong
4356 value after the loop exit. To make it simple, just reduce all
4357 of such giv's whether or not we know they are used after the loop
4358 exit. */
4359
4360 if (! flag_reduce_all_givs
4361 && v->lifetime * threshold * benefit < insn_count
4362 && ! bl->reversed)
4363 {
4364 if (loop_dump_stream)
4365 fprintf (loop_dump_stream,
4366 "giv of insn %d not worth while, %d vs %d.\n",
4367 INSN_UID (v->insn),
4368 v->lifetime * threshold * benefit, insn_count);
4369 v->ignore = 1;
4370 bl->all_reduced = 0;
4371 }
4372 else
4373 {
4374 /* Check that we can increment the reduced giv without a
4375 multiply insn. If not, reject it. */
4376
4377 for (tv = bl->biv; tv; tv = tv->next_iv)
4378 if (tv->mult_val == const1_rtx
4379 && ! product_cheap_p (tv->add_val, v->mult_val))
4380 {
4381 if (loop_dump_stream)
4382 fprintf (loop_dump_stream,
4383 "giv of insn %d: would need a multiply.\n",
4384 INSN_UID (v->insn));
4385 v->ignore = 1;
4386 bl->all_reduced = 0;
4387 break;
4388 }
4389 }
4390 }
4391
4392 /* Check for givs whose first use is their definition and whose
4393 last use is the definition of another giv. If so, it is likely
4394 dead and should not be used to derive another giv nor to
4395 eliminate a biv. */
4396 loop_givs_dead_check (loop, bl);
4397
4398 /* Reduce each giv that we decided to reduce. */
4399 loop_givs_reduce (loop, bl);
4400
4401 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4402 as not reduced.
4403
4404 For each giv register that can be reduced now: if replaceable,
4405 substitute reduced reg wherever the old giv occurs;
4406 else add new move insn "giv_reg = reduced_reg". */
4407 loop_givs_rescan (loop, bl, reg_map);
4408
4409 /* All the givs based on the biv bl have been reduced if they
4410 merit it. */
4411
4412 /* For each giv not marked as maybe dead that has been combined with a
4413 second giv, clear any "maybe dead" mark on that second giv.
4414 v->new_reg will either be or refer to the register of the giv it
4415 combined with.
4416
4417 Doing this clearing avoids problems in biv elimination where
4418 a giv's new_reg is a complex value that can't be put in the
4419 insn but the giv combined with (with a reg as new_reg) is
4420 marked maybe_dead. Since the register will be used in either
4421 case, we'd prefer it be used from the simpler giv. */
4422
4423 for (v = bl->giv; v; v = v->next_iv)
4424 if (! v->maybe_dead && v->same)
4425 v->same->maybe_dead = 0;
4426
4427 /* Try to eliminate the biv, if it is a candidate.
4428 This won't work if ! bl->all_reduced,
4429 since the givs we planned to use might not have been reduced.
4430
4431 We have to be careful that we didn't initially think we could
4432 eliminate this biv because of a giv that we now think may be
4433 dead and shouldn't be used as a biv replacement.
4434
4435 Also, there is the possibility that we may have a giv that looks
4436 like it can be used to eliminate a biv, but the resulting insn
4437 isn't valid. This can happen, for example, on the 88k, where a
4438 JUMP_INSN can compare a register only with zero. Attempts to
4439 replace it with a compare with a constant will fail.
4440
4441 Note that in cases where this call fails, we may have replaced some
4442 of the occurrences of the biv with a giv, but no harm was done in
4443 doing so in the rare cases where it can occur. */
4444
4445 if (bl->all_reduced == 1 && bl->eliminable
4446 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
4447 {
4448 /* ?? If we created a new test to bypass the loop entirely,
4449 or otherwise drop straight in, based on this test, then
4450 we might want to rewrite it also. This way some later
4451 pass has more hope of removing the initialization of this
4452 biv entirely. */
4453
4454 /* If final_value != 0, then the biv may be used after loop end
4455 and we must emit an insn to set it just in case.
4456
4457 Reversed bivs already have an insn after the loop setting their
4458 value, so we don't need another one. We can't calculate the
4459 proper final value for such a biv here anyways. */
4460 if (bl->final_value && ! bl->reversed)
4461 loop_insn_sink_or_swim (loop, gen_move_insn
4462 (bl->biv->dest_reg, bl->final_value));
4463
4464 if (loop_dump_stream)
4465 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4466 bl->regno);
4467 }
4468 }
4469
4470 /* Go through all the instructions in the loop, making all the
4471 register substitutions scheduled in REG_MAP. */
4472
4473 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
4474 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4475 || GET_CODE (p) == CALL_INSN)
4476 {
4477 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
4478 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
4479 INSN_CODE (p) = -1;
4480 }
4481
4482 if (loop_info->n_iterations > 0)
4483 {
4484 /* When we completely unroll a loop we will likely not need the increment
4485 of the loop BIV and we will not need the conditional branch at the
4486 end of the loop. */
4487 unrolled_insn_copies = insn_count - 2;
4488
4489 #ifdef HAVE_cc0
4490 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
4491 need the comparison before the conditional branch at the end of the
4492 loop. */
4493 unrolled_insn_copies -= 1;
4494 #endif
4495
4496 /* We'll need one copy for each loop iteration. */
4497 unrolled_insn_copies *= loop_info->n_iterations;
4498
4499 /* A little slop to account for the ability to remove initialization
4500 code, better CSE, and other secondary benefits of completely
4501 unrolling some loops. */
4502 unrolled_insn_copies -= 1;
4503
4504 /* Clamp the value. */
4505 if (unrolled_insn_copies < 0)
4506 unrolled_insn_copies = 0;
4507 }
4508
4509 /* Unroll loops from within strength reduction so that we can use the
4510 induction variable information that strength_reduce has already
4511 collected. Always unroll loops that would be as small or smaller
4512 unrolled than when rolled. */
4513 if ((flags & LOOP_UNROLL)
4514 || (loop_info->n_iterations > 0
4515 && unrolled_insn_copies <= insn_count))
4516 unroll_loop (loop, insn_count, 1);
4517
4518 #ifdef HAVE_doloop_end
4519 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
4520 doloop_optimize (loop);
4521 #endif /* HAVE_doloop_end */
4522
4523 /* In case number of iterations is known, drop branch prediction note
4524 in the branch. Do that only in second loop pass, as loop unrolling
4525 may change the number of iterations performed. */
4526 if ((flags & LOOP_BCT)
4527 && loop_info->n_iterations / loop_info->unroll_number > 1)
4528 {
4529 int n = loop_info->n_iterations / loop_info->unroll_number;
4530 predict_insn (PREV_INSN (loop->end),
4531 PRED_LOOP_ITERATIONS,
4532 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
4533 }
4534
4535 if (loop_dump_stream)
4536 fprintf (loop_dump_stream, "\n");
4537
4538 loop_ivs_free (loop);
4539 if (reg_map)
4540 free (reg_map);
4541 }
4542 \f
4543 /*Record all basic induction variables calculated in the insn. */
4544 static rtx
4545 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
4546 struct loop *loop;
4547 rtx p;
4548 int not_every_iteration;
4549 int maybe_multiple;
4550 {
4551 struct loop_ivs *ivs = LOOP_IVS (loop);
4552 rtx set;
4553 rtx dest_reg;
4554 rtx inc_val;
4555 rtx mult_val;
4556 rtx *location;
4557
4558 if (GET_CODE (p) == INSN
4559 && (set = single_set (p))
4560 && GET_CODE (SET_DEST (set)) == REG)
4561 {
4562 dest_reg = SET_DEST (set);
4563 if (REGNO (dest_reg) < max_reg_before_loop
4564 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
4565 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
4566 {
4567 if (basic_induction_var (loop, SET_SRC (set),
4568 GET_MODE (SET_SRC (set)),
4569 dest_reg, p, &inc_val, &mult_val,
4570 &location))
4571 {
4572 /* It is a possible basic induction variable.
4573 Create and initialize an induction structure for it. */
4574
4575 struct induction *v
4576 = (struct induction *) xmalloc (sizeof (struct induction));
4577
4578 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
4579 not_every_iteration, maybe_multiple);
4580 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
4581 }
4582 else if (REGNO (dest_reg) < ivs->n_regs)
4583 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
4584 }
4585 }
4586 return p;
4587 }
4588 \f
4589 /* Record all givs calculated in the insn.
4590 A register is a giv if: it is only set once, it is a function of a
4591 biv and a constant (or invariant), and it is not a biv. */
4592 static rtx
4593 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
4594 struct loop *loop;
4595 rtx p;
4596 int not_every_iteration;
4597 int maybe_multiple;
4598 {
4599 struct loop_regs *regs = LOOP_REGS (loop);
4600
4601 rtx set;
4602 /* Look for a general induction variable in a register. */
4603 if (GET_CODE (p) == INSN
4604 && (set = single_set (p))
4605 && GET_CODE (SET_DEST (set)) == REG
4606 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
4607 {
4608 rtx src_reg;
4609 rtx dest_reg;
4610 rtx add_val;
4611 rtx mult_val;
4612 rtx ext_val;
4613 int benefit;
4614 rtx regnote = 0;
4615 rtx last_consec_insn;
4616
4617 dest_reg = SET_DEST (set);
4618 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
4619 return p;
4620
4621 if (/* SET_SRC is a giv. */
4622 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
4623 &mult_val, &ext_val, 0, &benefit, VOIDmode)
4624 /* Equivalent expression is a giv. */
4625 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
4626 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
4627 &add_val, &mult_val, &ext_val, 0,
4628 &benefit, VOIDmode)))
4629 /* Don't try to handle any regs made by loop optimization.
4630 We have nothing on them in regno_first_uid, etc. */
4631 && REGNO (dest_reg) < max_reg_before_loop
4632 /* Don't recognize a BASIC_INDUCT_VAR here. */
4633 && dest_reg != src_reg
4634 /* This must be the only place where the register is set. */
4635 && (regs->array[REGNO (dest_reg)].n_times_set == 1
4636 /* or all sets must be consecutive and make a giv. */
4637 || (benefit = consec_sets_giv (loop, benefit, p,
4638 src_reg, dest_reg,
4639 &add_val, &mult_val, &ext_val,
4640 &last_consec_insn))))
4641 {
4642 struct induction *v
4643 = (struct induction *) xmalloc (sizeof (struct induction));
4644
4645 /* If this is a library call, increase benefit. */
4646 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
4647 benefit += libcall_benefit (p);
4648
4649 /* Skip the consecutive insns, if there are any. */
4650 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
4651 p = last_consec_insn;
4652
4653 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
4654 ext_val, benefit, DEST_REG, not_every_iteration,
4655 maybe_multiple, (rtx*)0);
4656
4657 }
4658 }
4659
4660 #ifndef DONT_REDUCE_ADDR
4661 /* Look for givs which are memory addresses. */
4662 /* This resulted in worse code on a VAX 8600. I wonder if it
4663 still does. */
4664 if (GET_CODE (p) == INSN)
4665 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
4666 maybe_multiple);
4667 #endif
4668
4669 /* Update the status of whether giv can derive other givs. This can
4670 change when we pass a label or an insn that updates a biv. */
4671 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4672 || GET_CODE (p) == CODE_LABEL)
4673 update_giv_derive (loop, p);
4674 return p;
4675 }
4676 \f
4677 /* Return 1 if X is a valid source for an initial value (or as value being
4678 compared against in an initial test).
4679
4680 X must be either a register or constant and must not be clobbered between
4681 the current insn and the start of the loop.
4682
4683 INSN is the insn containing X. */
4684
4685 static int
4686 valid_initial_value_p (x, insn, call_seen, loop_start)
4687 rtx x;
4688 rtx insn;
4689 int call_seen;
4690 rtx loop_start;
4691 {
4692 if (CONSTANT_P (x))
4693 return 1;
4694
4695 /* Only consider pseudos we know about initialized in insns whose luids
4696 we know. */
4697 if (GET_CODE (x) != REG
4698 || REGNO (x) >= max_reg_before_loop)
4699 return 0;
4700
4701 /* Don't use call-clobbered registers across a call which clobbers it. On
4702 some machines, don't use any hard registers at all. */
4703 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4704 && (SMALL_REGISTER_CLASSES
4705 || (call_used_regs[REGNO (x)] && call_seen)))
4706 return 0;
4707
4708 /* Don't use registers that have been clobbered before the start of the
4709 loop. */
4710 if (reg_set_between_p (x, insn, loop_start))
4711 return 0;
4712
4713 return 1;
4714 }
4715 \f
4716 /* Scan X for memory refs and check each memory address
4717 as a possible giv. INSN is the insn whose pattern X comes from.
4718 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4719 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
4720 more thanonce in each loop iteration. */
4721
4722 static void
4723 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
4724 const struct loop *loop;
4725 rtx x;
4726 rtx insn;
4727 int not_every_iteration, maybe_multiple;
4728 {
4729 int i, j;
4730 enum rtx_code code;
4731 const char *fmt;
4732
4733 if (x == 0)
4734 return;
4735
4736 code = GET_CODE (x);
4737 switch (code)
4738 {
4739 case REG:
4740 case CONST_INT:
4741 case CONST:
4742 case CONST_DOUBLE:
4743 case SYMBOL_REF:
4744 case LABEL_REF:
4745 case PC:
4746 case CC0:
4747 case ADDR_VEC:
4748 case ADDR_DIFF_VEC:
4749 case USE:
4750 case CLOBBER:
4751 return;
4752
4753 case MEM:
4754 {
4755 rtx src_reg;
4756 rtx add_val;
4757 rtx mult_val;
4758 rtx ext_val;
4759 int benefit;
4760
4761 /* This code used to disable creating GIVs with mult_val == 1 and
4762 add_val == 0. However, this leads to lost optimizations when
4763 it comes time to combine a set of related DEST_ADDR GIVs, since
4764 this one would not be seen. */
4765
4766 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
4767 &mult_val, &ext_val, 1, &benefit,
4768 GET_MODE (x)))
4769 {
4770 /* Found one; record it. */
4771 struct induction *v
4772 = (struct induction *) xmalloc (sizeof (struct induction));
4773
4774 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
4775 add_val, ext_val, benefit, DEST_ADDR,
4776 not_every_iteration, maybe_multiple, &XEXP (x, 0));
4777
4778 v->mem = x;
4779 }
4780 }
4781 return;
4782
4783 default:
4784 break;
4785 }
4786
4787 /* Recursively scan the subexpressions for other mem refs. */
4788
4789 fmt = GET_RTX_FORMAT (code);
4790 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4791 if (fmt[i] == 'e')
4792 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
4793 maybe_multiple);
4794 else if (fmt[i] == 'E')
4795 for (j = 0; j < XVECLEN (x, i); j++)
4796 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
4797 maybe_multiple);
4798 }
4799 \f
4800 /* Fill in the data about one biv update.
4801 V is the `struct induction' in which we record the biv. (It is
4802 allocated by the caller, with alloca.)
4803 INSN is the insn that sets it.
4804 DEST_REG is the biv's reg.
4805
4806 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4807 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4808 being set to INC_VAL.
4809
4810 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4811 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4812 can be executed more than once per iteration. If MAYBE_MULTIPLE
4813 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4814 executed exactly once per iteration. */
4815
4816 static void
4817 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
4818 not_every_iteration, maybe_multiple)
4819 struct loop *loop;
4820 struct induction *v;
4821 rtx insn;
4822 rtx dest_reg;
4823 rtx inc_val;
4824 rtx mult_val;
4825 rtx *location;
4826 int not_every_iteration;
4827 int maybe_multiple;
4828 {
4829 struct loop_ivs *ivs = LOOP_IVS (loop);
4830 struct iv_class *bl;
4831
4832 v->insn = insn;
4833 v->src_reg = dest_reg;
4834 v->dest_reg = dest_reg;
4835 v->mult_val = mult_val;
4836 v->add_val = inc_val;
4837 v->ext_dependent = NULL_RTX;
4838 v->location = location;
4839 v->mode = GET_MODE (dest_reg);
4840 v->always_computable = ! not_every_iteration;
4841 v->always_executed = ! not_every_iteration;
4842 v->maybe_multiple = maybe_multiple;
4843
4844 /* Add this to the reg's iv_class, creating a class
4845 if this is the first incrementation of the reg. */
4846
4847 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
4848 if (bl == 0)
4849 {
4850 /* Create and initialize new iv_class. */
4851
4852 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
4853
4854 bl->regno = REGNO (dest_reg);
4855 bl->biv = 0;
4856 bl->giv = 0;
4857 bl->biv_count = 0;
4858 bl->giv_count = 0;
4859
4860 /* Set initial value to the reg itself. */
4861 bl->initial_value = dest_reg;
4862 bl->final_value = 0;
4863 /* We haven't seen the initializing insn yet */
4864 bl->init_insn = 0;
4865 bl->init_set = 0;
4866 bl->initial_test = 0;
4867 bl->incremented = 0;
4868 bl->eliminable = 0;
4869 bl->nonneg = 0;
4870 bl->reversed = 0;
4871 bl->total_benefit = 0;
4872
4873 /* Add this class to ivs->list. */
4874 bl->next = ivs->list;
4875 ivs->list = bl;
4876
4877 /* Put it in the array of biv register classes. */
4878 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
4879 }
4880
4881 /* Update IV_CLASS entry for this biv. */
4882 v->next_iv = bl->biv;
4883 bl->biv = v;
4884 bl->biv_count++;
4885 if (mult_val == const1_rtx)
4886 bl->incremented = 1;
4887
4888 if (loop_dump_stream)
4889 loop_biv_dump (v, loop_dump_stream, 0);
4890 }
4891 \f
4892 /* Fill in the data about one giv.
4893 V is the `struct induction' in which we record the giv. (It is
4894 allocated by the caller, with alloca.)
4895 INSN is the insn that sets it.
4896 BENEFIT estimates the savings from deleting this insn.
4897 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4898 into a register or is used as a memory address.
4899
4900 SRC_REG is the biv reg which the giv is computed from.
4901 DEST_REG is the giv's reg (if the giv is stored in a reg).
4902 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4903 LOCATION points to the place where this giv's value appears in INSN. */
4904
4905 static void
4906 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
4907 benefit, type, not_every_iteration, maybe_multiple, location)
4908 const struct loop *loop;
4909 struct induction *v;
4910 rtx insn;
4911 rtx src_reg;
4912 rtx dest_reg;
4913 rtx mult_val, add_val, ext_val;
4914 int benefit;
4915 enum g_types type;
4916 int not_every_iteration, maybe_multiple;
4917 rtx *location;
4918 {
4919 struct loop_ivs *ivs = LOOP_IVS (loop);
4920 struct induction *b;
4921 struct iv_class *bl;
4922 rtx set = single_set (insn);
4923 rtx temp;
4924
4925 /* Attempt to prove constantness of the values. Don't let simplity_rtx
4926 undo the MULT canonicalization that we performed earlier. */
4927 temp = simplify_rtx (add_val);
4928 if (temp
4929 && ! (GET_CODE (add_val) == MULT
4930 && GET_CODE (temp) == ASHIFT))
4931 add_val = temp;
4932
4933 v->insn = insn;
4934 v->src_reg = src_reg;
4935 v->giv_type = type;
4936 v->dest_reg = dest_reg;
4937 v->mult_val = mult_val;
4938 v->add_val = add_val;
4939 v->ext_dependent = ext_val;
4940 v->benefit = benefit;
4941 v->location = location;
4942 v->cant_derive = 0;
4943 v->combined_with = 0;
4944 v->maybe_multiple = maybe_multiple;
4945 v->maybe_dead = 0;
4946 v->derive_adjustment = 0;
4947 v->same = 0;
4948 v->ignore = 0;
4949 v->new_reg = 0;
4950 v->final_value = 0;
4951 v->same_insn = 0;
4952 v->auto_inc_opt = 0;
4953 v->unrolled = 0;
4954 v->shared = 0;
4955
4956 /* The v->always_computable field is used in update_giv_derive, to
4957 determine whether a giv can be used to derive another giv. For a
4958 DEST_REG giv, INSN computes a new value for the giv, so its value
4959 isn't computable if INSN insn't executed every iteration.
4960 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4961 it does not compute a new value. Hence the value is always computable
4962 regardless of whether INSN is executed each iteration. */
4963
4964 if (type == DEST_ADDR)
4965 v->always_computable = 1;
4966 else
4967 v->always_computable = ! not_every_iteration;
4968
4969 v->always_executed = ! not_every_iteration;
4970
4971 if (type == DEST_ADDR)
4972 {
4973 v->mode = GET_MODE (*location);
4974 v->lifetime = 1;
4975 }
4976 else /* type == DEST_REG */
4977 {
4978 v->mode = GET_MODE (SET_DEST (set));
4979
4980 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
4981
4982 /* If the lifetime is zero, it means that this register is
4983 really a dead store. So mark this as a giv that can be
4984 ignored. This will not prevent the biv from being eliminated. */
4985 if (v->lifetime == 0)
4986 v->ignore = 1;
4987
4988 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
4989 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
4990 }
4991
4992 /* Add the giv to the class of givs computed from one biv. */
4993
4994 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
4995 if (bl)
4996 {
4997 v->next_iv = bl->giv;
4998 bl->giv = v;
4999 /* Don't count DEST_ADDR. This is supposed to count the number of
5000 insns that calculate givs. */
5001 if (type == DEST_REG)
5002 bl->giv_count++;
5003 bl->total_benefit += benefit;
5004 }
5005 else
5006 /* Fatal error, biv missing for this giv? */
5007 abort ();
5008
5009 if (type == DEST_ADDR)
5010 v->replaceable = 1;
5011 else
5012 {
5013 /* The giv can be replaced outright by the reduced register only if all
5014 of the following conditions are true:
5015 - the insn that sets the giv is always executed on any iteration
5016 on which the giv is used at all
5017 (there are two ways to deduce this:
5018 either the insn is executed on every iteration,
5019 or all uses follow that insn in the same basic block),
5020 - the giv is not used outside the loop
5021 - no assignments to the biv occur during the giv's lifetime. */
5022
5023 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5024 /* Previous line always fails if INSN was moved by loop opt. */
5025 && REGNO_LAST_LUID (REGNO (dest_reg))
5026 < INSN_LUID (loop->end)
5027 && (! not_every_iteration
5028 || last_use_this_basic_block (dest_reg, insn)))
5029 {
5030 /* Now check that there are no assignments to the biv within the
5031 giv's lifetime. This requires two separate checks. */
5032
5033 /* Check each biv update, and fail if any are between the first
5034 and last use of the giv.
5035
5036 If this loop contains an inner loop that was unrolled, then
5037 the insn modifying the biv may have been emitted by the loop
5038 unrolling code, and hence does not have a valid luid. Just
5039 mark the biv as not replaceable in this case. It is not very
5040 useful as a biv, because it is used in two different loops.
5041 It is very unlikely that we would be able to optimize the giv
5042 using this biv anyways. */
5043
5044 v->replaceable = 1;
5045 for (b = bl->biv; b; b = b->next_iv)
5046 {
5047 if (INSN_UID (b->insn) >= max_uid_for_loop
5048 || ((INSN_LUID (b->insn)
5049 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5050 && (INSN_LUID (b->insn)
5051 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5052 {
5053 v->replaceable = 0;
5054 v->not_replaceable = 1;
5055 break;
5056 }
5057 }
5058
5059 /* If there are any backwards branches that go from after the
5060 biv update to before it, then this giv is not replaceable. */
5061 if (v->replaceable)
5062 for (b = bl->biv; b; b = b->next_iv)
5063 if (back_branch_in_range_p (loop, b->insn))
5064 {
5065 v->replaceable = 0;
5066 v->not_replaceable = 1;
5067 break;
5068 }
5069 }
5070 else
5071 {
5072 /* May still be replaceable, we don't have enough info here to
5073 decide. */
5074 v->replaceable = 0;
5075 v->not_replaceable = 0;
5076 }
5077 }
5078
5079 /* Record whether the add_val contains a const_int, for later use by
5080 combine_givs. */
5081 {
5082 rtx tem = add_val;
5083
5084 v->no_const_addval = 1;
5085 if (tem == const0_rtx)
5086 ;
5087 else if (CONSTANT_P (add_val))
5088 v->no_const_addval = 0;
5089 if (GET_CODE (tem) == PLUS)
5090 {
5091 while (1)
5092 {
5093 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5094 tem = XEXP (tem, 0);
5095 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5096 tem = XEXP (tem, 1);
5097 else
5098 break;
5099 }
5100 if (CONSTANT_P (XEXP (tem, 1)))
5101 v->no_const_addval = 0;
5102 }
5103 }
5104
5105 if (loop_dump_stream)
5106 loop_giv_dump (v, loop_dump_stream, 0);
5107 }
5108
5109 /* All this does is determine whether a giv can be made replaceable because
5110 its final value can be calculated. This code can not be part of record_giv
5111 above, because final_giv_value requires that the number of loop iterations
5112 be known, and that can not be accurately calculated until after all givs
5113 have been identified. */
5114
5115 static void
5116 check_final_value (loop, v)
5117 const struct loop *loop;
5118 struct induction *v;
5119 {
5120 struct loop_ivs *ivs = LOOP_IVS (loop);
5121 struct iv_class *bl;
5122 rtx final_value = 0;
5123
5124 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5125
5126 /* DEST_ADDR givs will never reach here, because they are always marked
5127 replaceable above in record_giv. */
5128
5129 /* The giv can be replaced outright by the reduced register only if all
5130 of the following conditions are true:
5131 - the insn that sets the giv is always executed on any iteration
5132 on which the giv is used at all
5133 (there are two ways to deduce this:
5134 either the insn is executed on every iteration,
5135 or all uses follow that insn in the same basic block),
5136 - its final value can be calculated (this condition is different
5137 than the one above in record_giv)
5138 - it's not used before the it's set
5139 - no assignments to the biv occur during the giv's lifetime. */
5140
5141 #if 0
5142 /* This is only called now when replaceable is known to be false. */
5143 /* Clear replaceable, so that it won't confuse final_giv_value. */
5144 v->replaceable = 0;
5145 #endif
5146
5147 if ((final_value = final_giv_value (loop, v))
5148 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5149 {
5150 int biv_increment_seen = 0, before_giv_insn = 0;
5151 rtx p = v->insn;
5152 rtx last_giv_use;
5153
5154 v->replaceable = 1;
5155
5156 /* When trying to determine whether or not a biv increment occurs
5157 during the lifetime of the giv, we can ignore uses of the variable
5158 outside the loop because final_value is true. Hence we can not
5159 use regno_last_uid and regno_first_uid as above in record_giv. */
5160
5161 /* Search the loop to determine whether any assignments to the
5162 biv occur during the giv's lifetime. Start with the insn
5163 that sets the giv, and search around the loop until we come
5164 back to that insn again.
5165
5166 Also fail if there is a jump within the giv's lifetime that jumps
5167 to somewhere outside the lifetime but still within the loop. This
5168 catches spaghetti code where the execution order is not linear, and
5169 hence the above test fails. Here we assume that the giv lifetime
5170 does not extend from one iteration of the loop to the next, so as
5171 to make the test easier. Since the lifetime isn't known yet,
5172 this requires two loops. See also record_giv above. */
5173
5174 last_giv_use = v->insn;
5175
5176 while (1)
5177 {
5178 p = NEXT_INSN (p);
5179 if (p == loop->end)
5180 {
5181 before_giv_insn = 1;
5182 p = NEXT_INSN (loop->start);
5183 }
5184 if (p == v->insn)
5185 break;
5186
5187 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5188 || GET_CODE (p) == CALL_INSN)
5189 {
5190 /* It is possible for the BIV increment to use the GIV if we
5191 have a cycle. Thus we must be sure to check each insn for
5192 both BIV and GIV uses, and we must check for BIV uses
5193 first. */
5194
5195 if (! biv_increment_seen
5196 && reg_set_p (v->src_reg, PATTERN (p)))
5197 biv_increment_seen = 1;
5198
5199 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5200 {
5201 if (biv_increment_seen || before_giv_insn)
5202 {
5203 v->replaceable = 0;
5204 v->not_replaceable = 1;
5205 break;
5206 }
5207 last_giv_use = p;
5208 }
5209 }
5210 }
5211
5212 /* Now that the lifetime of the giv is known, check for branches
5213 from within the lifetime to outside the lifetime if it is still
5214 replaceable. */
5215
5216 if (v->replaceable)
5217 {
5218 p = v->insn;
5219 while (1)
5220 {
5221 p = NEXT_INSN (p);
5222 if (p == loop->end)
5223 p = NEXT_INSN (loop->start);
5224 if (p == last_giv_use)
5225 break;
5226
5227 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5228 && LABEL_NAME (JUMP_LABEL (p))
5229 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5230 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5231 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5232 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5233 {
5234 v->replaceable = 0;
5235 v->not_replaceable = 1;
5236
5237 if (loop_dump_stream)
5238 fprintf (loop_dump_stream,
5239 "Found branch outside giv lifetime.\n");
5240
5241 break;
5242 }
5243 }
5244 }
5245
5246 /* If it is replaceable, then save the final value. */
5247 if (v->replaceable)
5248 v->final_value = final_value;
5249 }
5250
5251 if (loop_dump_stream && v->replaceable)
5252 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5253 INSN_UID (v->insn), REGNO (v->dest_reg));
5254 }
5255 \f
5256 /* Update the status of whether a giv can derive other givs.
5257
5258 We need to do something special if there is or may be an update to the biv
5259 between the time the giv is defined and the time it is used to derive
5260 another giv.
5261
5262 In addition, a giv that is only conditionally set is not allowed to
5263 derive another giv once a label has been passed.
5264
5265 The cases we look at are when a label or an update to a biv is passed. */
5266
5267 static void
5268 update_giv_derive (loop, p)
5269 const struct loop *loop;
5270 rtx p;
5271 {
5272 struct loop_ivs *ivs = LOOP_IVS (loop);
5273 struct iv_class *bl;
5274 struct induction *biv, *giv;
5275 rtx tem;
5276 int dummy;
5277
5278 /* Search all IV classes, then all bivs, and finally all givs.
5279
5280 There are three cases we are concerned with. First we have the situation
5281 of a giv that is only updated conditionally. In that case, it may not
5282 derive any givs after a label is passed.
5283
5284 The second case is when a biv update occurs, or may occur, after the
5285 definition of a giv. For certain biv updates (see below) that are
5286 known to occur between the giv definition and use, we can adjust the
5287 giv definition. For others, or when the biv update is conditional,
5288 we must prevent the giv from deriving any other givs. There are two
5289 sub-cases within this case.
5290
5291 If this is a label, we are concerned with any biv update that is done
5292 conditionally, since it may be done after the giv is defined followed by
5293 a branch here (actually, we need to pass both a jump and a label, but
5294 this extra tracking doesn't seem worth it).
5295
5296 If this is a jump, we are concerned about any biv update that may be
5297 executed multiple times. We are actually only concerned about
5298 backward jumps, but it is probably not worth performing the test
5299 on the jump again here.
5300
5301 If this is a biv update, we must adjust the giv status to show that a
5302 subsequent biv update was performed. If this adjustment cannot be done,
5303 the giv cannot derive further givs. */
5304
5305 for (bl = ivs->list; bl; bl = bl->next)
5306 for (biv = bl->biv; biv; biv = biv->next_iv)
5307 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5308 || biv->insn == p)
5309 {
5310 for (giv = bl->giv; giv; giv = giv->next_iv)
5311 {
5312 /* If cant_derive is already true, there is no point in
5313 checking all of these conditions again. */
5314 if (giv->cant_derive)
5315 continue;
5316
5317 /* If this giv is conditionally set and we have passed a label,
5318 it cannot derive anything. */
5319 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5320 giv->cant_derive = 1;
5321
5322 /* Skip givs that have mult_val == 0, since
5323 they are really invariants. Also skip those that are
5324 replaceable, since we know their lifetime doesn't contain
5325 any biv update. */
5326 else if (giv->mult_val == const0_rtx || giv->replaceable)
5327 continue;
5328
5329 /* The only way we can allow this giv to derive another
5330 is if this is a biv increment and we can form the product
5331 of biv->add_val and giv->mult_val. In this case, we will
5332 be able to compute a compensation. */
5333 else if (biv->insn == p)
5334 {
5335 rtx ext_val_dummy;
5336
5337 tem = 0;
5338 if (biv->mult_val == const1_rtx)
5339 tem = simplify_giv_expr (loop,
5340 gen_rtx_MULT (giv->mode,
5341 biv->add_val,
5342 giv->mult_val),
5343 &ext_val_dummy, &dummy);
5344
5345 if (tem && giv->derive_adjustment)
5346 tem = simplify_giv_expr
5347 (loop,
5348 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
5349 &ext_val_dummy, &dummy);
5350
5351 if (tem)
5352 giv->derive_adjustment = tem;
5353 else
5354 giv->cant_derive = 1;
5355 }
5356 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5357 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5358 giv->cant_derive = 1;
5359 }
5360 }
5361 }
5362 \f
5363 /* Check whether an insn is an increment legitimate for a basic induction var.
5364 X is the source of insn P, or a part of it.
5365 MODE is the mode in which X should be interpreted.
5366
5367 DEST_REG is the putative biv, also the destination of the insn.
5368 We accept patterns of these forms:
5369 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5370 REG = INVARIANT + REG
5371
5372 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5373 store the additive term into *INC_VAL, and store the place where
5374 we found the additive term into *LOCATION.
5375
5376 If X is an assignment of an invariant into DEST_REG, we set
5377 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5378
5379 We also want to detect a BIV when it corresponds to a variable
5380 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5381 of the variable may be a PLUS that adds a SUBREG of that variable to
5382 an invariant and then sign- or zero-extends the result of the PLUS
5383 into the variable.
5384
5385 Most GIVs in such cases will be in the promoted mode, since that is the
5386 probably the natural computation mode (and almost certainly the mode
5387 used for addresses) on the machine. So we view the pseudo-reg containing
5388 the variable as the BIV, as if it were simply incremented.
5389
5390 Note that treating the entire pseudo as a BIV will result in making
5391 simple increments to any GIVs based on it. However, if the variable
5392 overflows in its declared mode but not its promoted mode, the result will
5393 be incorrect. This is acceptable if the variable is signed, since
5394 overflows in such cases are undefined, but not if it is unsigned, since
5395 those overflows are defined. So we only check for SIGN_EXTEND and
5396 not ZERO_EXTEND.
5397
5398 If we cannot find a biv, we return 0. */
5399
5400 static int
5401 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
5402 const struct loop *loop;
5403 rtx x;
5404 enum machine_mode mode;
5405 rtx dest_reg;
5406 rtx p;
5407 rtx *inc_val;
5408 rtx *mult_val;
5409 rtx **location;
5410 {
5411 enum rtx_code code;
5412 rtx *argp, arg;
5413 rtx insn, set = 0;
5414
5415 code = GET_CODE (x);
5416 *location = NULL;
5417 switch (code)
5418 {
5419 case PLUS:
5420 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5421 || (GET_CODE (XEXP (x, 0)) == SUBREG
5422 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5423 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5424 {
5425 argp = &XEXP (x, 1);
5426 }
5427 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5428 || (GET_CODE (XEXP (x, 1)) == SUBREG
5429 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5430 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5431 {
5432 argp = &XEXP (x, 0);
5433 }
5434 else
5435 return 0;
5436
5437 arg = *argp;
5438 if (loop_invariant_p (loop, arg) != 1)
5439 return 0;
5440
5441 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5442 *mult_val = const1_rtx;
5443 *location = argp;
5444 return 1;
5445
5446 case SUBREG:
5447 /* If this is a SUBREG for a promoted variable, check the inner
5448 value. */
5449 if (SUBREG_PROMOTED_VAR_P (x))
5450 return basic_induction_var (loop, SUBREG_REG (x),
5451 GET_MODE (SUBREG_REG (x)),
5452 dest_reg, p, inc_val, mult_val, location);
5453 return 0;
5454
5455 case REG:
5456 /* If this register is assigned in a previous insn, look at its
5457 source, but don't go outside the loop or past a label. */
5458
5459 /* If this sets a register to itself, we would repeat any previous
5460 biv increment if we applied this strategy blindly. */
5461 if (rtx_equal_p (dest_reg, x))
5462 return 0;
5463
5464 insn = p;
5465 while (1)
5466 {
5467 rtx dest;
5468 do
5469 {
5470 insn = PREV_INSN (insn);
5471 }
5472 while (insn && GET_CODE (insn) == NOTE
5473 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5474
5475 if (!insn)
5476 break;
5477 set = single_set (insn);
5478 if (set == 0)
5479 break;
5480 dest = SET_DEST (set);
5481 if (dest == x
5482 || (GET_CODE (dest) == SUBREG
5483 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
5484 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
5485 && SUBREG_REG (dest) == x))
5486 return basic_induction_var (loop, SET_SRC (set),
5487 (GET_MODE (SET_SRC (set)) == VOIDmode
5488 ? GET_MODE (x)
5489 : GET_MODE (SET_SRC (set))),
5490 dest_reg, insn,
5491 inc_val, mult_val, location);
5492
5493 while (GET_CODE (dest) == SIGN_EXTRACT
5494 || GET_CODE (dest) == ZERO_EXTRACT
5495 || GET_CODE (dest) == SUBREG
5496 || GET_CODE (dest) == STRICT_LOW_PART)
5497 dest = XEXP (dest, 0);
5498 if (dest == x)
5499 break;
5500 }
5501 /* Fall through. */
5502
5503 /* Can accept constant setting of biv only when inside inner most loop.
5504 Otherwise, a biv of an inner loop may be incorrectly recognized
5505 as a biv of the outer loop,
5506 causing code to be moved INTO the inner loop. */
5507 case MEM:
5508 if (loop_invariant_p (loop, x) != 1)
5509 return 0;
5510 case CONST_INT:
5511 case SYMBOL_REF:
5512 case CONST:
5513 /* convert_modes aborts if we try to convert to or from CCmode, so just
5514 exclude that case. It is very unlikely that a condition code value
5515 would be a useful iterator anyways. */
5516 if (loop->level == 1
5517 && GET_MODE_CLASS (mode) != MODE_CC
5518 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5519 {
5520 /* Possible bug here? Perhaps we don't know the mode of X. */
5521 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5522 *mult_val = const0_rtx;
5523 return 1;
5524 }
5525 else
5526 return 0;
5527
5528 case SIGN_EXTEND:
5529 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5530 dest_reg, p, inc_val, mult_val, location);
5531
5532 case ASHIFTRT:
5533 /* Similar, since this can be a sign extension. */
5534 for (insn = PREV_INSN (p);
5535 (insn && GET_CODE (insn) == NOTE
5536 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5537 insn = PREV_INSN (insn))
5538 ;
5539
5540 if (insn)
5541 set = single_set (insn);
5542
5543 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
5544 && set && SET_DEST (set) == XEXP (x, 0)
5545 && GET_CODE (XEXP (x, 1)) == CONST_INT
5546 && INTVAL (XEXP (x, 1)) >= 0
5547 && GET_CODE (SET_SRC (set)) == ASHIFT
5548 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5549 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
5550 GET_MODE (XEXP (x, 0)),
5551 dest_reg, insn, inc_val, mult_val,
5552 location);
5553 return 0;
5554
5555 default:
5556 return 0;
5557 }
5558 }
5559 \f
5560 /* A general induction variable (giv) is any quantity that is a linear
5561 function of a basic induction variable,
5562 i.e. giv = biv * mult_val + add_val.
5563 The coefficients can be any loop invariant quantity.
5564 A giv need not be computed directly from the biv;
5565 it can be computed by way of other givs. */
5566
5567 /* Determine whether X computes a giv.
5568 If it does, return a nonzero value
5569 which is the benefit from eliminating the computation of X;
5570 set *SRC_REG to the register of the biv that it is computed from;
5571 set *ADD_VAL and *MULT_VAL to the coefficients,
5572 such that the value of X is biv * mult + add; */
5573
5574 static int
5575 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
5576 is_addr, pbenefit, addr_mode)
5577 const struct loop *loop;
5578 rtx x;
5579 rtx *src_reg;
5580 rtx *add_val;
5581 rtx *mult_val;
5582 rtx *ext_val;
5583 int is_addr;
5584 int *pbenefit;
5585 enum machine_mode addr_mode;
5586 {
5587 struct loop_ivs *ivs = LOOP_IVS (loop);
5588 rtx orig_x = x;
5589
5590 /* If this is an invariant, forget it, it isn't a giv. */
5591 if (loop_invariant_p (loop, x) == 1)
5592 return 0;
5593
5594 *pbenefit = 0;
5595 *ext_val = NULL_RTX;
5596 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
5597 if (x == 0)
5598 return 0;
5599
5600 switch (GET_CODE (x))
5601 {
5602 case USE:
5603 case CONST_INT:
5604 /* Since this is now an invariant and wasn't before, it must be a giv
5605 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5606 with. */
5607 *src_reg = ivs->list->biv->dest_reg;
5608 *mult_val = const0_rtx;
5609 *add_val = x;
5610 break;
5611
5612 case REG:
5613 /* This is equivalent to a BIV. */
5614 *src_reg = x;
5615 *mult_val = const1_rtx;
5616 *add_val = const0_rtx;
5617 break;
5618
5619 case PLUS:
5620 /* Either (plus (biv) (invar)) or
5621 (plus (mult (biv) (invar_1)) (invar_2)). */
5622 if (GET_CODE (XEXP (x, 0)) == MULT)
5623 {
5624 *src_reg = XEXP (XEXP (x, 0), 0);
5625 *mult_val = XEXP (XEXP (x, 0), 1);
5626 }
5627 else
5628 {
5629 *src_reg = XEXP (x, 0);
5630 *mult_val = const1_rtx;
5631 }
5632 *add_val = XEXP (x, 1);
5633 break;
5634
5635 case MULT:
5636 /* ADD_VAL is zero. */
5637 *src_reg = XEXP (x, 0);
5638 *mult_val = XEXP (x, 1);
5639 *add_val = const0_rtx;
5640 break;
5641
5642 default:
5643 abort ();
5644 }
5645
5646 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5647 unless they are CONST_INT). */
5648 if (GET_CODE (*add_val) == USE)
5649 *add_val = XEXP (*add_val, 0);
5650 if (GET_CODE (*mult_val) == USE)
5651 *mult_val = XEXP (*mult_val, 0);
5652
5653 if (is_addr)
5654 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
5655 else
5656 *pbenefit += rtx_cost (orig_x, SET);
5657
5658 /* Always return true if this is a giv so it will be detected as such,
5659 even if the benefit is zero or negative. This allows elimination
5660 of bivs that might otherwise not be eliminated. */
5661 return 1;
5662 }
5663 \f
5664 /* Given an expression, X, try to form it as a linear function of a biv.
5665 We will canonicalize it to be of the form
5666 (plus (mult (BIV) (invar_1))
5667 (invar_2))
5668 with possible degeneracies.
5669
5670 The invariant expressions must each be of a form that can be used as a
5671 machine operand. We surround then with a USE rtx (a hack, but localized
5672 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5673 routine; it is the caller's responsibility to strip them.
5674
5675 If no such canonicalization is possible (i.e., two biv's are used or an
5676 expression that is neither invariant nor a biv or giv), this routine
5677 returns 0.
5678
5679 For a non-zero return, the result will have a code of CONST_INT, USE,
5680 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5681
5682 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5683
5684 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
5685 static rtx sge_plus_constant PARAMS ((rtx, rtx));
5686
5687 static rtx
5688 simplify_giv_expr (loop, x, ext_val, benefit)
5689 const struct loop *loop;
5690 rtx x;
5691 rtx *ext_val;
5692 int *benefit;
5693 {
5694 struct loop_ivs *ivs = LOOP_IVS (loop);
5695 struct loop_regs *regs = LOOP_REGS (loop);
5696 enum machine_mode mode = GET_MODE (x);
5697 rtx arg0, arg1;
5698 rtx tem;
5699
5700 /* If this is not an integer mode, or if we cannot do arithmetic in this
5701 mode, this can't be a giv. */
5702 if (mode != VOIDmode
5703 && (GET_MODE_CLASS (mode) != MODE_INT
5704 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5705 return NULL_RTX;
5706
5707 switch (GET_CODE (x))
5708 {
5709 case PLUS:
5710 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5711 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5712 if (arg0 == 0 || arg1 == 0)
5713 return NULL_RTX;
5714
5715 /* Put constant last, CONST_INT last if both constant. */
5716 if ((GET_CODE (arg0) == USE
5717 || GET_CODE (arg0) == CONST_INT)
5718 && ! ((GET_CODE (arg0) == USE
5719 && GET_CODE (arg1) == USE)
5720 || GET_CODE (arg1) == CONST_INT))
5721 tem = arg0, arg0 = arg1, arg1 = tem;
5722
5723 /* Handle addition of zero, then addition of an invariant. */
5724 if (arg1 == const0_rtx)
5725 return arg0;
5726 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5727 switch (GET_CODE (arg0))
5728 {
5729 case CONST_INT:
5730 case USE:
5731 /* Adding two invariants must result in an invariant, so enclose
5732 addition operation inside a USE and return it. */
5733 if (GET_CODE (arg0) == USE)
5734 arg0 = XEXP (arg0, 0);
5735 if (GET_CODE (arg1) == USE)
5736 arg1 = XEXP (arg1, 0);
5737
5738 if (GET_CODE (arg0) == CONST_INT)
5739 tem = arg0, arg0 = arg1, arg1 = tem;
5740 if (GET_CODE (arg1) == CONST_INT)
5741 tem = sge_plus_constant (arg0, arg1);
5742 else
5743 tem = sge_plus (mode, arg0, arg1);
5744
5745 if (GET_CODE (tem) != CONST_INT)
5746 tem = gen_rtx_USE (mode, tem);
5747 return tem;
5748
5749 case REG:
5750 case MULT:
5751 /* biv + invar or mult + invar. Return sum. */
5752 return gen_rtx_PLUS (mode, arg0, arg1);
5753
5754 case PLUS:
5755 /* (a + invar_1) + invar_2. Associate. */
5756 return
5757 simplify_giv_expr (loop,
5758 gen_rtx_PLUS (mode,
5759 XEXP (arg0, 0),
5760 gen_rtx_PLUS (mode,
5761 XEXP (arg0, 1),
5762 arg1)),
5763 ext_val, benefit);
5764
5765 default:
5766 abort ();
5767 }
5768
5769 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5770 MULT to reduce cases. */
5771 if (GET_CODE (arg0) == REG)
5772 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5773 if (GET_CODE (arg1) == REG)
5774 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5775
5776 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5777 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5778 Recurse to associate the second PLUS. */
5779 if (GET_CODE (arg1) == MULT)
5780 tem = arg0, arg0 = arg1, arg1 = tem;
5781
5782 if (GET_CODE (arg1) == PLUS)
5783 return
5784 simplify_giv_expr (loop,
5785 gen_rtx_PLUS (mode,
5786 gen_rtx_PLUS (mode, arg0,
5787 XEXP (arg1, 0)),
5788 XEXP (arg1, 1)),
5789 ext_val, benefit);
5790
5791 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5792 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5793 return NULL_RTX;
5794
5795 if (!rtx_equal_p (arg0, arg1))
5796 return NULL_RTX;
5797
5798 return simplify_giv_expr (loop,
5799 gen_rtx_MULT (mode,
5800 XEXP (arg0, 0),
5801 gen_rtx_PLUS (mode,
5802 XEXP (arg0, 1),
5803 XEXP (arg1, 1))),
5804 ext_val, benefit);
5805
5806 case MINUS:
5807 /* Handle "a - b" as "a + b * (-1)". */
5808 return simplify_giv_expr (loop,
5809 gen_rtx_PLUS (mode,
5810 XEXP (x, 0),
5811 gen_rtx_MULT (mode,
5812 XEXP (x, 1),
5813 constm1_rtx)),
5814 ext_val, benefit);
5815
5816 case MULT:
5817 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5818 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5819 if (arg0 == 0 || arg1 == 0)
5820 return NULL_RTX;
5821
5822 /* Put constant last, CONST_INT last if both constant. */
5823 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5824 && GET_CODE (arg1) != CONST_INT)
5825 tem = arg0, arg0 = arg1, arg1 = tem;
5826
5827 /* If second argument is not now constant, not giv. */
5828 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5829 return NULL_RTX;
5830
5831 /* Handle multiply by 0 or 1. */
5832 if (arg1 == const0_rtx)
5833 return const0_rtx;
5834
5835 else if (arg1 == const1_rtx)
5836 return arg0;
5837
5838 switch (GET_CODE (arg0))
5839 {
5840 case REG:
5841 /* biv * invar. Done. */
5842 return gen_rtx_MULT (mode, arg0, arg1);
5843
5844 case CONST_INT:
5845 /* Product of two constants. */
5846 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5847
5848 case USE:
5849 /* invar * invar is a giv, but attempt to simplify it somehow. */
5850 if (GET_CODE (arg1) != CONST_INT)
5851 return NULL_RTX;
5852
5853 arg0 = XEXP (arg0, 0);
5854 if (GET_CODE (arg0) == MULT)
5855 {
5856 /* (invar_0 * invar_1) * invar_2. Associate. */
5857 return simplify_giv_expr (loop,
5858 gen_rtx_MULT (mode,
5859 XEXP (arg0, 0),
5860 gen_rtx_MULT (mode,
5861 XEXP (arg0,
5862 1),
5863 arg1)),
5864 ext_val, benefit);
5865 }
5866 /* Porpagate the MULT expressions to the intermost nodes. */
5867 else if (GET_CODE (arg0) == PLUS)
5868 {
5869 /* (invar_0 + invar_1) * invar_2. Distribute. */
5870 return simplify_giv_expr (loop,
5871 gen_rtx_PLUS (mode,
5872 gen_rtx_MULT (mode,
5873 XEXP (arg0,
5874 0),
5875 arg1),
5876 gen_rtx_MULT (mode,
5877 XEXP (arg0,
5878 1),
5879 arg1)),
5880 ext_val, benefit);
5881 }
5882 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
5883
5884 case MULT:
5885 /* (a * invar_1) * invar_2. Associate. */
5886 return simplify_giv_expr (loop,
5887 gen_rtx_MULT (mode,
5888 XEXP (arg0, 0),
5889 gen_rtx_MULT (mode,
5890 XEXP (arg0, 1),
5891 arg1)),
5892 ext_val, benefit);
5893
5894 case PLUS:
5895 /* (a + invar_1) * invar_2. Distribute. */
5896 return simplify_giv_expr (loop,
5897 gen_rtx_PLUS (mode,
5898 gen_rtx_MULT (mode,
5899 XEXP (arg0, 0),
5900 arg1),
5901 gen_rtx_MULT (mode,
5902 XEXP (arg0, 1),
5903 arg1)),
5904 ext_val, benefit);
5905
5906 default:
5907 abort ();
5908 }
5909
5910 case ASHIFT:
5911 /* Shift by constant is multiply by power of two. */
5912 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5913 return 0;
5914
5915 return
5916 simplify_giv_expr (loop,
5917 gen_rtx_MULT (mode,
5918 XEXP (x, 0),
5919 GEN_INT ((HOST_WIDE_INT) 1
5920 << INTVAL (XEXP (x, 1)))),
5921 ext_val, benefit);
5922
5923 case NEG:
5924 /* "-a" is "a * (-1)" */
5925 return simplify_giv_expr (loop,
5926 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5927 ext_val, benefit);
5928
5929 case NOT:
5930 /* "~a" is "-a - 1". Silly, but easy. */
5931 return simplify_giv_expr (loop,
5932 gen_rtx_MINUS (mode,
5933 gen_rtx_NEG (mode, XEXP (x, 0)),
5934 const1_rtx),
5935 ext_val, benefit);
5936
5937 case USE:
5938 /* Already in proper form for invariant. */
5939 return x;
5940
5941 case SIGN_EXTEND:
5942 case ZERO_EXTEND:
5943 case TRUNCATE:
5944 /* Conditionally recognize extensions of simple IVs. After we've
5945 computed loop traversal counts and verified the range of the
5946 source IV, we'll reevaluate this as a GIV. */
5947 if (*ext_val == NULL_RTX)
5948 {
5949 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5950 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
5951 {
5952 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
5953 return arg0;
5954 }
5955 }
5956 goto do_default;
5957
5958 case REG:
5959 /* If this is a new register, we can't deal with it. */
5960 if (REGNO (x) >= max_reg_before_loop)
5961 return 0;
5962
5963 /* Check for biv or giv. */
5964 switch (REG_IV_TYPE (ivs, REGNO (x)))
5965 {
5966 case BASIC_INDUCT:
5967 return x;
5968 case GENERAL_INDUCT:
5969 {
5970 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
5971
5972 /* Form expression from giv and add benefit. Ensure this giv
5973 can derive another and subtract any needed adjustment if so. */
5974
5975 /* Increasing the benefit here is risky. The only case in which it
5976 is arguably correct is if this is the only use of V. In other
5977 cases, this will artificially inflate the benefit of the current
5978 giv, and lead to suboptimal code. Thus, it is disabled, since
5979 potentially not reducing an only marginally beneficial giv is
5980 less harmful than reducing many givs that are not really
5981 beneficial. */
5982 {
5983 rtx single_use = regs->array[REGNO (x)].single_usage;
5984 if (single_use && single_use != const0_rtx)
5985 *benefit += v->benefit;
5986 }
5987
5988 if (v->cant_derive)
5989 return 0;
5990
5991 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
5992 v->src_reg, v->mult_val),
5993 v->add_val);
5994
5995 if (v->derive_adjustment)
5996 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5997 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
5998 if (*ext_val)
5999 {
6000 if (!v->ext_dependent)
6001 return arg0;
6002 }
6003 else
6004 {
6005 *ext_val = v->ext_dependent;
6006 return arg0;
6007 }
6008 return 0;
6009 }
6010
6011 default:
6012 do_default:
6013 /* If it isn't an induction variable, and it is invariant, we
6014 may be able to simplify things further by looking through
6015 the bits we just moved outside the loop. */
6016 if (loop_invariant_p (loop, x) == 1)
6017 {
6018 struct movable *m;
6019 struct loop_movables *movables = LOOP_MOVABLES (loop);
6020
6021 for (m = movables->head; m; m = m->next)
6022 if (rtx_equal_p (x, m->set_dest))
6023 {
6024 /* Ok, we found a match. Substitute and simplify. */
6025
6026 /* If we match another movable, we must use that, as
6027 this one is going away. */
6028 if (m->match)
6029 return simplify_giv_expr (loop, m->match->set_dest,
6030 ext_val, benefit);
6031
6032 /* If consec is non-zero, this is a member of a group of
6033 instructions that were moved together. We handle this
6034 case only to the point of seeking to the last insn and
6035 looking for a REG_EQUAL. Fail if we don't find one. */
6036 if (m->consec != 0)
6037 {
6038 int i = m->consec;
6039 tem = m->insn;
6040 do
6041 {
6042 tem = NEXT_INSN (tem);
6043 }
6044 while (--i > 0);
6045
6046 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6047 if (tem)
6048 tem = XEXP (tem, 0);
6049 }
6050 else
6051 {
6052 tem = single_set (m->insn);
6053 if (tem)
6054 tem = SET_SRC (tem);
6055 }
6056
6057 if (tem)
6058 {
6059 /* What we are most interested in is pointer
6060 arithmetic on invariants -- only take
6061 patterns we may be able to do something with. */
6062 if (GET_CODE (tem) == PLUS
6063 || GET_CODE (tem) == MULT
6064 || GET_CODE (tem) == ASHIFT
6065 || GET_CODE (tem) == CONST_INT
6066 || GET_CODE (tem) == SYMBOL_REF)
6067 {
6068 tem = simplify_giv_expr (loop, tem, ext_val,
6069 benefit);
6070 if (tem)
6071 return tem;
6072 }
6073 else if (GET_CODE (tem) == CONST
6074 && GET_CODE (XEXP (tem, 0)) == PLUS
6075 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6076 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6077 {
6078 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6079 ext_val, benefit);
6080 if (tem)
6081 return tem;
6082 }
6083 }
6084 break;
6085 }
6086 }
6087 break;
6088 }
6089
6090 /* Fall through to general case. */
6091 default:
6092 /* If invariant, return as USE (unless CONST_INT).
6093 Otherwise, not giv. */
6094 if (GET_CODE (x) == USE)
6095 x = XEXP (x, 0);
6096
6097 if (loop_invariant_p (loop, x) == 1)
6098 {
6099 if (GET_CODE (x) == CONST_INT)
6100 return x;
6101 if (GET_CODE (x) == CONST
6102 && GET_CODE (XEXP (x, 0)) == PLUS
6103 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6104 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6105 x = XEXP (x, 0);
6106 return gen_rtx_USE (mode, x);
6107 }
6108 else
6109 return 0;
6110 }
6111 }
6112
6113 /* This routine folds invariants such that there is only ever one
6114 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6115
6116 static rtx
6117 sge_plus_constant (x, c)
6118 rtx x, c;
6119 {
6120 if (GET_CODE (x) == CONST_INT)
6121 return GEN_INT (INTVAL (x) + INTVAL (c));
6122 else if (GET_CODE (x) != PLUS)
6123 return gen_rtx_PLUS (GET_MODE (x), x, c);
6124 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6125 {
6126 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6127 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6128 }
6129 else if (GET_CODE (XEXP (x, 0)) == PLUS
6130 || GET_CODE (XEXP (x, 1)) != PLUS)
6131 {
6132 return gen_rtx_PLUS (GET_MODE (x),
6133 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6134 }
6135 else
6136 {
6137 return gen_rtx_PLUS (GET_MODE (x),
6138 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6139 }
6140 }
6141
6142 static rtx
6143 sge_plus (mode, x, y)
6144 enum machine_mode mode;
6145 rtx x, y;
6146 {
6147 while (GET_CODE (y) == PLUS)
6148 {
6149 rtx a = XEXP (y, 0);
6150 if (GET_CODE (a) == CONST_INT)
6151 x = sge_plus_constant (x, a);
6152 else
6153 x = gen_rtx_PLUS (mode, x, a);
6154 y = XEXP (y, 1);
6155 }
6156 if (GET_CODE (y) == CONST_INT)
6157 x = sge_plus_constant (x, y);
6158 else
6159 x = gen_rtx_PLUS (mode, x, y);
6160 return x;
6161 }
6162 \f
6163 /* Help detect a giv that is calculated by several consecutive insns;
6164 for example,
6165 giv = biv * M
6166 giv = giv + A
6167 The caller has already identified the first insn P as having a giv as dest;
6168 we check that all other insns that set the same register follow
6169 immediately after P, that they alter nothing else,
6170 and that the result of the last is still a giv.
6171
6172 The value is 0 if the reg set in P is not really a giv.
6173 Otherwise, the value is the amount gained by eliminating
6174 all the consecutive insns that compute the value.
6175
6176 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6177 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6178
6179 The coefficients of the ultimate giv value are stored in
6180 *MULT_VAL and *ADD_VAL. */
6181
6182 static int
6183 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6184 add_val, mult_val, ext_val, last_consec_insn)
6185 const struct loop *loop;
6186 int first_benefit;
6187 rtx p;
6188 rtx src_reg;
6189 rtx dest_reg;
6190 rtx *add_val;
6191 rtx *mult_val;
6192 rtx *ext_val;
6193 rtx *last_consec_insn;
6194 {
6195 struct loop_ivs *ivs = LOOP_IVS (loop);
6196 struct loop_regs *regs = LOOP_REGS (loop);
6197 int count;
6198 enum rtx_code code;
6199 int benefit;
6200 rtx temp;
6201 rtx set;
6202
6203 /* Indicate that this is a giv so that we can update the value produced in
6204 each insn of the multi-insn sequence.
6205
6206 This induction structure will be used only by the call to
6207 general_induction_var below, so we can allocate it on our stack.
6208 If this is a giv, our caller will replace the induct var entry with
6209 a new induction structure. */
6210 struct induction *v;
6211
6212 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6213 return 0;
6214
6215 v = (struct induction *) alloca (sizeof (struct induction));
6216 v->src_reg = src_reg;
6217 v->mult_val = *mult_val;
6218 v->add_val = *add_val;
6219 v->benefit = first_benefit;
6220 v->cant_derive = 0;
6221 v->derive_adjustment = 0;
6222 v->ext_dependent = NULL_RTX;
6223
6224 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6225 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6226
6227 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6228
6229 while (count > 0)
6230 {
6231 p = NEXT_INSN (p);
6232 code = GET_CODE (p);
6233
6234 /* If libcall, skip to end of call sequence. */
6235 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6236 p = XEXP (temp, 0);
6237
6238 if (code == INSN
6239 && (set = single_set (p))
6240 && GET_CODE (SET_DEST (set)) == REG
6241 && SET_DEST (set) == dest_reg
6242 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6243 add_val, mult_val, ext_val, 0,
6244 &benefit, VOIDmode)
6245 /* Giv created by equivalent expression. */
6246 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6247 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6248 add_val, mult_val, ext_val, 0,
6249 &benefit, VOIDmode)))
6250 && src_reg == v->src_reg)
6251 {
6252 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6253 benefit += libcall_benefit (p);
6254
6255 count--;
6256 v->mult_val = *mult_val;
6257 v->add_val = *add_val;
6258 v->benefit += benefit;
6259 }
6260 else if (code != NOTE)
6261 {
6262 /* Allow insns that set something other than this giv to a
6263 constant. Such insns are needed on machines which cannot
6264 include long constants and should not disqualify a giv. */
6265 if (code == INSN
6266 && (set = single_set (p))
6267 && SET_DEST (set) != dest_reg
6268 && CONSTANT_P (SET_SRC (set)))
6269 continue;
6270
6271 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6272 return 0;
6273 }
6274 }
6275
6276 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6277 *last_consec_insn = p;
6278 return v->benefit;
6279 }
6280 \f
6281 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6282 represented by G1. If no such expression can be found, or it is clear that
6283 it cannot possibly be a valid address, 0 is returned.
6284
6285 To perform the computation, we note that
6286 G1 = x * v + a and
6287 G2 = y * v + b
6288 where `v' is the biv.
6289
6290 So G2 = (y/b) * G1 + (b - a*y/x).
6291
6292 Note that MULT = y/x.
6293
6294 Update: A and B are now allowed to be additive expressions such that
6295 B contains all variables in A. That is, computing B-A will not require
6296 subtracting variables. */
6297
6298 static rtx
6299 express_from_1 (a, b, mult)
6300 rtx a, b, mult;
6301 {
6302 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6303
6304 if (mult == const0_rtx)
6305 return b;
6306
6307 /* If MULT is not 1, we cannot handle A with non-constants, since we
6308 would then be required to subtract multiples of the registers in A.
6309 This is theoretically possible, and may even apply to some Fortran
6310 constructs, but it is a lot of work and we do not attempt it here. */
6311
6312 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6313 return NULL_RTX;
6314
6315 /* In general these structures are sorted top to bottom (down the PLUS
6316 chain), but not left to right across the PLUS. If B is a higher
6317 order giv than A, we can strip one level and recurse. If A is higher
6318 order, we'll eventually bail out, but won't know that until the end.
6319 If they are the same, we'll strip one level around this loop. */
6320
6321 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6322 {
6323 rtx ra, rb, oa, ob, tmp;
6324
6325 ra = XEXP (a, 0), oa = XEXP (a, 1);
6326 if (GET_CODE (ra) == PLUS)
6327 tmp = ra, ra = oa, oa = tmp;
6328
6329 rb = XEXP (b, 0), ob = XEXP (b, 1);
6330 if (GET_CODE (rb) == PLUS)
6331 tmp = rb, rb = ob, ob = tmp;
6332
6333 if (rtx_equal_p (ra, rb))
6334 /* We matched: remove one reg completely. */
6335 a = oa, b = ob;
6336 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6337 /* An alternate match. */
6338 a = oa, b = rb;
6339 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6340 /* An alternate match. */
6341 a = ra, b = ob;
6342 else
6343 {
6344 /* Indicates an extra register in B. Strip one level from B and
6345 recurse, hoping B was the higher order expression. */
6346 ob = express_from_1 (a, ob, mult);
6347 if (ob == NULL_RTX)
6348 return NULL_RTX;
6349 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6350 }
6351 }
6352
6353 /* Here we are at the last level of A, go through the cases hoping to
6354 get rid of everything but a constant. */
6355
6356 if (GET_CODE (a) == PLUS)
6357 {
6358 rtx ra, oa;
6359
6360 ra = XEXP (a, 0), oa = XEXP (a, 1);
6361 if (rtx_equal_p (oa, b))
6362 oa = ra;
6363 else if (!rtx_equal_p (ra, b))
6364 return NULL_RTX;
6365
6366 if (GET_CODE (oa) != CONST_INT)
6367 return NULL_RTX;
6368
6369 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6370 }
6371 else if (GET_CODE (a) == CONST_INT)
6372 {
6373 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6374 }
6375 else if (CONSTANT_P (a))
6376 {
6377 enum machine_mode mode_a = GET_MODE (a);
6378 enum machine_mode mode_b = GET_MODE (b);
6379 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
6380 return simplify_gen_binary (MINUS, mode, b, a);
6381 }
6382 else if (GET_CODE (b) == PLUS)
6383 {
6384 if (rtx_equal_p (a, XEXP (b, 0)))
6385 return XEXP (b, 1);
6386 else if (rtx_equal_p (a, XEXP (b, 1)))
6387 return XEXP (b, 0);
6388 else
6389 return NULL_RTX;
6390 }
6391 else if (rtx_equal_p (a, b))
6392 return const0_rtx;
6393
6394 return NULL_RTX;
6395 }
6396
6397 rtx
6398 express_from (g1, g2)
6399 struct induction *g1, *g2;
6400 {
6401 rtx mult, add;
6402
6403 /* The value that G1 will be multiplied by must be a constant integer. Also,
6404 the only chance we have of getting a valid address is if b*c/a (see above
6405 for notation) is also an integer. */
6406 if (GET_CODE (g1->mult_val) == CONST_INT
6407 && GET_CODE (g2->mult_val) == CONST_INT)
6408 {
6409 if (g1->mult_val == const0_rtx
6410 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6411 return NULL_RTX;
6412 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6413 }
6414 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6415 mult = const1_rtx;
6416 else
6417 {
6418 /* ??? Find out if the one is a multiple of the other? */
6419 return NULL_RTX;
6420 }
6421
6422 add = express_from_1 (g1->add_val, g2->add_val, mult);
6423 if (add == NULL_RTX)
6424 {
6425 /* Failed. If we've got a multiplication factor between G1 and G2,
6426 scale G1's addend and try again. */
6427 if (INTVAL (mult) > 1)
6428 {
6429 rtx g1_add_val = g1->add_val;
6430 if (GET_CODE (g1_add_val) == MULT
6431 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
6432 {
6433 HOST_WIDE_INT m;
6434 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
6435 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
6436 XEXP (g1_add_val, 0), GEN_INT (m));
6437 }
6438 else
6439 {
6440 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
6441 mult);
6442 }
6443
6444 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
6445 }
6446 }
6447 if (add == NULL_RTX)
6448 return NULL_RTX;
6449
6450 /* Form simplified final result. */
6451 if (mult == const0_rtx)
6452 return add;
6453 else if (mult == const1_rtx)
6454 mult = g1->dest_reg;
6455 else
6456 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6457
6458 if (add == const0_rtx)
6459 return mult;
6460 else
6461 {
6462 if (GET_CODE (add) == PLUS
6463 && CONSTANT_P (XEXP (add, 1)))
6464 {
6465 rtx tem = XEXP (add, 1);
6466 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
6467 add = tem;
6468 }
6469
6470 return gen_rtx_PLUS (g2->mode, mult, add);
6471 }
6472 }
6473 \f
6474 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6475 represented by G1. This indicates that G2 should be combined with G1 and
6476 that G2 can use (either directly or via an address expression) a register
6477 used to represent G1. */
6478
6479 static rtx
6480 combine_givs_p (g1, g2)
6481 struct induction *g1, *g2;
6482 {
6483 rtx comb, ret;
6484
6485 /* With the introduction of ext dependent givs, we must care for modes.
6486 G2 must not use a wider mode than G1. */
6487 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
6488 return NULL_RTX;
6489
6490 ret = comb = express_from (g1, g2);
6491 if (comb == NULL_RTX)
6492 return NULL_RTX;
6493 if (g1->mode != g2->mode)
6494 ret = gen_lowpart (g2->mode, comb);
6495
6496 /* If these givs are identical, they can be combined. We use the results
6497 of express_from because the addends are not in a canonical form, so
6498 rtx_equal_p is a weaker test. */
6499 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
6500 combination to be the other way round. */
6501 if (comb == g1->dest_reg
6502 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
6503 {
6504 return ret;
6505 }
6506
6507 /* If G2 can be expressed as a function of G1 and that function is valid
6508 as an address and no more expensive than using a register for G2,
6509 the expression of G2 in terms of G1 can be used. */
6510 if (ret != NULL_RTX
6511 && g2->giv_type == DEST_ADDR
6512 && memory_address_p (GET_MODE (g2->mem), ret)
6513 /* ??? Looses, especially with -fforce-addr, where *g2->location
6514 will always be a register, and so anything more complicated
6515 gets discarded. */
6516 #if 0
6517 #ifdef ADDRESS_COST
6518 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6519 #else
6520 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6521 #endif
6522 #endif
6523 )
6524 {
6525 return ret;
6526 }
6527
6528 return NULL_RTX;
6529 }
6530 \f
6531 /* Check each extension dependent giv in this class to see if its
6532 root biv is safe from wrapping in the interior mode, which would
6533 make the giv illegal. */
6534
6535 static void
6536 check_ext_dependent_givs (bl, loop_info)
6537 struct iv_class *bl;
6538 struct loop_info *loop_info;
6539 {
6540 int ze_ok = 0, se_ok = 0, info_ok = 0;
6541 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
6542 HOST_WIDE_INT start_val;
6543 unsigned HOST_WIDE_INT u_end_val = 0;
6544 unsigned HOST_WIDE_INT u_start_val = 0;
6545 rtx incr = pc_rtx;
6546 struct induction *v;
6547
6548 /* Make sure the iteration data is available. We must have
6549 constants in order to be certain of no overflow. */
6550 /* ??? An unknown iteration count with an increment of +-1
6551 combined with friendly exit tests of against an invariant
6552 value is also ameanable to optimization. Not implemented. */
6553 if (loop_info->n_iterations > 0
6554 && bl->initial_value
6555 && GET_CODE (bl->initial_value) == CONST_INT
6556 && (incr = biv_total_increment (bl))
6557 && GET_CODE (incr) == CONST_INT
6558 /* Make sure the host can represent the arithmetic. */
6559 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
6560 {
6561 unsigned HOST_WIDE_INT abs_incr, total_incr;
6562 HOST_WIDE_INT s_end_val;
6563 int neg_incr;
6564
6565 info_ok = 1;
6566 start_val = INTVAL (bl->initial_value);
6567 u_start_val = start_val;
6568
6569 neg_incr = 0, abs_incr = INTVAL (incr);
6570 if (INTVAL (incr) < 0)
6571 neg_incr = 1, abs_incr = -abs_incr;
6572 total_incr = abs_incr * loop_info->n_iterations;
6573
6574 /* Check for host arithmatic overflow. */
6575 if (total_incr / loop_info->n_iterations == abs_incr)
6576 {
6577 unsigned HOST_WIDE_INT u_max;
6578 HOST_WIDE_INT s_max;
6579
6580 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
6581 s_end_val = u_end_val;
6582 u_max = GET_MODE_MASK (biv_mode);
6583 s_max = u_max >> 1;
6584
6585 /* Check zero extension of biv ok. */
6586 if (start_val >= 0
6587 /* Check for host arithmatic overflow. */
6588 && (neg_incr
6589 ? u_end_val < u_start_val
6590 : u_end_val > u_start_val)
6591 /* Check for target arithmetic overflow. */
6592 && (neg_incr
6593 ? 1 /* taken care of with host overflow */
6594 : u_end_val <= u_max))
6595 {
6596 ze_ok = 1;
6597 }
6598
6599 /* Check sign extension of biv ok. */
6600 /* ??? While it is true that overflow with signed and pointer
6601 arithmetic is undefined, I fear too many programmers don't
6602 keep this fact in mind -- myself included on occasion.
6603 So leave alone with the signed overflow optimizations. */
6604 if (start_val >= -s_max - 1
6605 /* Check for host arithmatic overflow. */
6606 && (neg_incr
6607 ? s_end_val < start_val
6608 : s_end_val > start_val)
6609 /* Check for target arithmetic overflow. */
6610 && (neg_incr
6611 ? s_end_val >= -s_max - 1
6612 : s_end_val <= s_max))
6613 {
6614 se_ok = 1;
6615 }
6616 }
6617 }
6618
6619 /* Invalidate givs that fail the tests. */
6620 for (v = bl->giv; v; v = v->next_iv)
6621 if (v->ext_dependent)
6622 {
6623 enum rtx_code code = GET_CODE (v->ext_dependent);
6624 int ok = 0;
6625
6626 switch (code)
6627 {
6628 case SIGN_EXTEND:
6629 ok = se_ok;
6630 break;
6631 case ZERO_EXTEND:
6632 ok = ze_ok;
6633 break;
6634
6635 case TRUNCATE:
6636 /* We don't know whether this value is being used as either
6637 signed or unsigned, so to safely truncate we must satisfy
6638 both. The initial check here verifies the BIV itself;
6639 once that is successful we may check its range wrt the
6640 derived GIV. */
6641 if (se_ok && ze_ok)
6642 {
6643 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
6644 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
6645
6646 /* We know from the above that both endpoints are nonnegative,
6647 and that there is no wrapping. Verify that both endpoints
6648 are within the (signed) range of the outer mode. */
6649 if (u_start_val <= max && u_end_val <= max)
6650 ok = 1;
6651 }
6652 break;
6653
6654 default:
6655 abort ();
6656 }
6657
6658 if (ok)
6659 {
6660 if (loop_dump_stream)
6661 {
6662 fprintf (loop_dump_stream,
6663 "Verified ext dependent giv at %d of reg %d\n",
6664 INSN_UID (v->insn), bl->regno);
6665 }
6666 }
6667 else
6668 {
6669 if (loop_dump_stream)
6670 {
6671 const char *why;
6672
6673 if (info_ok)
6674 why = "biv iteration values overflowed";
6675 else
6676 {
6677 if (incr == pc_rtx)
6678 incr = biv_total_increment (bl);
6679 if (incr == const1_rtx)
6680 why = "biv iteration info incomplete; incr by 1";
6681 else
6682 why = "biv iteration info incomplete";
6683 }
6684
6685 fprintf (loop_dump_stream,
6686 "Failed ext dependent giv at %d, %s\n",
6687 INSN_UID (v->insn), why);
6688 }
6689 v->ignore = 1;
6690 bl->all_reduced = 0;
6691 }
6692 }
6693 }
6694
6695 /* Generate a version of VALUE in a mode appropriate for initializing V. */
6696
6697 rtx
6698 extend_value_for_giv (v, value)
6699 struct induction *v;
6700 rtx value;
6701 {
6702 rtx ext_dep = v->ext_dependent;
6703
6704 if (! ext_dep)
6705 return value;
6706
6707 /* Recall that check_ext_dependent_givs verified that the known bounds
6708 of a biv did not overflow or wrap with respect to the extension for
6709 the giv. Therefore, constants need no additional adjustment. */
6710 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
6711 return value;
6712
6713 /* Otherwise, we must adjust the value to compensate for the
6714 differing modes of the biv and the giv. */
6715 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
6716 }
6717 \f
6718 struct combine_givs_stats
6719 {
6720 int giv_number;
6721 int total_benefit;
6722 };
6723
6724 static int
6725 cmp_combine_givs_stats (xp, yp)
6726 const PTR xp;
6727 const PTR yp;
6728 {
6729 const struct combine_givs_stats * const x =
6730 (const struct combine_givs_stats *) xp;
6731 const struct combine_givs_stats * const y =
6732 (const struct combine_givs_stats *) yp;
6733 int d;
6734 d = y->total_benefit - x->total_benefit;
6735 /* Stabilize the sort. */
6736 if (!d)
6737 d = x->giv_number - y->giv_number;
6738 return d;
6739 }
6740
6741 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6742 any other. If so, point SAME to the giv combined with and set NEW_REG to
6743 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6744 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6745
6746 static void
6747 combine_givs (regs, bl)
6748 struct loop_regs *regs;
6749 struct iv_class *bl;
6750 {
6751 /* Additional benefit to add for being combined multiple times. */
6752 const int extra_benefit = 3;
6753
6754 struct induction *g1, *g2, **giv_array;
6755 int i, j, k, giv_count;
6756 struct combine_givs_stats *stats;
6757 rtx *can_combine;
6758
6759 /* Count givs, because bl->giv_count is incorrect here. */
6760 giv_count = 0;
6761 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6762 if (!g1->ignore)
6763 giv_count++;
6764
6765 giv_array
6766 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6767 i = 0;
6768 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6769 if (!g1->ignore)
6770 giv_array[i++] = g1;
6771
6772 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
6773 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
6774
6775 for (i = 0; i < giv_count; i++)
6776 {
6777 int this_benefit;
6778 rtx single_use;
6779
6780 g1 = giv_array[i];
6781 stats[i].giv_number = i;
6782
6783 /* If a DEST_REG GIV is used only once, do not allow it to combine
6784 with anything, for in doing so we will gain nothing that cannot
6785 be had by simply letting the GIV with which we would have combined
6786 to be reduced on its own. The losage shows up in particular with
6787 DEST_ADDR targets on hosts with reg+reg addressing, though it can
6788 be seen elsewhere as well. */
6789 if (g1->giv_type == DEST_REG
6790 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
6791 && single_use != const0_rtx)
6792 continue;
6793
6794 this_benefit = g1->benefit;
6795 /* Add an additional weight for zero addends. */
6796 if (g1->no_const_addval)
6797 this_benefit += 1;
6798
6799 for (j = 0; j < giv_count; j++)
6800 {
6801 rtx this_combine;
6802
6803 g2 = giv_array[j];
6804 if (g1 != g2
6805 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6806 {
6807 can_combine[i * giv_count + j] = this_combine;
6808 this_benefit += g2->benefit + extra_benefit;
6809 }
6810 }
6811 stats[i].total_benefit = this_benefit;
6812 }
6813
6814 /* Iterate, combining until we can't. */
6815 restart:
6816 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
6817
6818 if (loop_dump_stream)
6819 {
6820 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6821 for (k = 0; k < giv_count; k++)
6822 {
6823 g1 = giv_array[stats[k].giv_number];
6824 if (!g1->combined_with && !g1->same)
6825 fprintf (loop_dump_stream, " {%d, %d}",
6826 INSN_UID (giv_array[stats[k].giv_number]->insn),
6827 stats[k].total_benefit);
6828 }
6829 putc ('\n', loop_dump_stream);
6830 }
6831
6832 for (k = 0; k < giv_count; k++)
6833 {
6834 int g1_add_benefit = 0;
6835
6836 i = stats[k].giv_number;
6837 g1 = giv_array[i];
6838
6839 /* If it has already been combined, skip. */
6840 if (g1->combined_with || g1->same)
6841 continue;
6842
6843 for (j = 0; j < giv_count; j++)
6844 {
6845 g2 = giv_array[j];
6846 if (g1 != g2 && can_combine[i * giv_count + j]
6847 /* If it has already been combined, skip. */
6848 && ! g2->same && ! g2->combined_with)
6849 {
6850 int l;
6851
6852 g2->new_reg = can_combine[i * giv_count + j];
6853 g2->same = g1;
6854 /* For destination, we now may replace by mem expression instead
6855 of register. This changes the costs considerably, so add the
6856 compensation. */
6857 if (g2->giv_type == DEST_ADDR)
6858 g2->benefit = (g2->benefit + reg_address_cost
6859 - address_cost (g2->new_reg,
6860 GET_MODE (g2->mem)));
6861 g1->combined_with++;
6862 g1->lifetime += g2->lifetime;
6863
6864 g1_add_benefit += g2->benefit;
6865
6866 /* ??? The new final_[bg]iv_value code does a much better job
6867 of finding replaceable giv's, and hence this code may no
6868 longer be necessary. */
6869 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6870 g1_add_benefit -= copy_cost;
6871
6872 /* To help optimize the next set of combinations, remove
6873 this giv from the benefits of other potential mates. */
6874 for (l = 0; l < giv_count; ++l)
6875 {
6876 int m = stats[l].giv_number;
6877 if (can_combine[m * giv_count + j])
6878 stats[l].total_benefit -= g2->benefit + extra_benefit;
6879 }
6880
6881 if (loop_dump_stream)
6882 fprintf (loop_dump_stream,
6883 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
6884 INSN_UID (g2->insn), INSN_UID (g1->insn),
6885 g1->benefit, g1_add_benefit, g1->lifetime);
6886 }
6887 }
6888
6889 /* To help optimize the next set of combinations, remove
6890 this giv from the benefits of other potential mates. */
6891 if (g1->combined_with)
6892 {
6893 for (j = 0; j < giv_count; ++j)
6894 {
6895 int m = stats[j].giv_number;
6896 if (can_combine[m * giv_count + i])
6897 stats[j].total_benefit -= g1->benefit + extra_benefit;
6898 }
6899
6900 g1->benefit += g1_add_benefit;
6901
6902 /* We've finished with this giv, and everything it touched.
6903 Restart the combination so that proper weights for the
6904 rest of the givs are properly taken into account. */
6905 /* ??? Ideally we would compact the arrays at this point, so
6906 as to not cover old ground. But sanely compacting
6907 can_combine is tricky. */
6908 goto restart;
6909 }
6910 }
6911
6912 /* Clean up. */
6913 free (stats);
6914 free (can_combine);
6915 }
6916 \f
6917 /* Generate sequence for REG = B * M + A. */
6918
6919 static rtx
6920 gen_add_mult (b, m, a, reg)
6921 rtx b; /* initial value of basic induction variable */
6922 rtx m; /* multiplicative constant */
6923 rtx a; /* additive constant */
6924 rtx reg; /* destination register */
6925 {
6926 rtx seq;
6927 rtx result;
6928
6929 start_sequence ();
6930 /* Use unsigned arithmetic. */
6931 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
6932 if (reg != result)
6933 emit_move_insn (reg, result);
6934 seq = gen_sequence ();
6935 end_sequence ();
6936
6937 return seq;
6938 }
6939
6940
6941 /* Update registers created in insn sequence SEQ. */
6942
6943 static void
6944 loop_regs_update (loop, seq)
6945 const struct loop *loop ATTRIBUTE_UNUSED;
6946 rtx seq;
6947 {
6948 /* Update register info for alias analysis. */
6949
6950 if (GET_CODE (seq) == SEQUENCE)
6951 {
6952 int i;
6953 for (i = 0; i < XVECLEN (seq, 0); ++i)
6954 {
6955 rtx set = single_set (XVECEXP (seq, 0, i));
6956 if (set && GET_CODE (SET_DEST (set)) == REG)
6957 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6958 }
6959 }
6960 else
6961 {
6962 rtx set = single_set (seq);
6963 if (set && GET_CODE (SET_DEST (set)) == REG)
6964 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6965 }
6966 }
6967
6968
6969 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
6970
6971 void
6972 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
6973 const struct loop *loop;
6974 rtx b; /* initial value of basic induction variable */
6975 rtx m; /* multiplicative constant */
6976 rtx a; /* additive constant */
6977 rtx reg; /* destination register */
6978 basic_block before_bb;
6979 rtx before_insn;
6980 {
6981 rtx seq;
6982
6983 if (! before_insn)
6984 {
6985 loop_iv_add_mult_hoist (loop, b, m, a, reg);
6986 return;
6987 }
6988
6989 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
6990 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
6991
6992 /* Increase the lifetime of any invariants moved further in code. */
6993 update_reg_last_use (a, before_insn);
6994 update_reg_last_use (b, before_insn);
6995 update_reg_last_use (m, before_insn);
6996
6997 loop_insn_emit_before (loop, before_bb, before_insn, seq);
6998
6999 /* It is possible that the expansion created lots of new registers.
7000 Iterate over the sequence we just created and record them all. */
7001 loop_regs_update (loop, seq);
7002 }
7003
7004
7005 /* Emit insns in loop pre-header to set REG = B * M + A. */
7006
7007 void
7008 loop_iv_add_mult_sink (loop, b, m, a, reg)
7009 const struct loop *loop;
7010 rtx b; /* initial value of basic induction variable */
7011 rtx m; /* multiplicative constant */
7012 rtx a; /* additive constant */
7013 rtx reg; /* destination register */
7014 {
7015 rtx seq;
7016
7017 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7018 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7019
7020 /* Increase the lifetime of any invariants moved further in code.
7021 ???? Is this really necessary? */
7022 update_reg_last_use (a, loop->sink);
7023 update_reg_last_use (b, loop->sink);
7024 update_reg_last_use (m, loop->sink);
7025
7026 loop_insn_sink (loop, seq);
7027
7028 /* It is possible that the expansion created lots of new registers.
7029 Iterate over the sequence we just created and record them all. */
7030 loop_regs_update (loop, seq);
7031 }
7032
7033
7034 /* Emit insns after loop to set REG = B * M + A. */
7035
7036 void
7037 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7038 const struct loop *loop;
7039 rtx b; /* initial value of basic induction variable */
7040 rtx m; /* multiplicative constant */
7041 rtx a; /* additive constant */
7042 rtx reg; /* destination register */
7043 {
7044 rtx seq;
7045
7046 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7047 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7048
7049 loop_insn_hoist (loop, seq);
7050
7051 /* It is possible that the expansion created lots of new registers.
7052 Iterate over the sequence we just created and record them all. */
7053 loop_regs_update (loop, seq);
7054 }
7055
7056
7057
7058 /* Similar to gen_add_mult, but compute cost rather than generating
7059 sequence. */
7060
7061 static int
7062 iv_add_mult_cost (b, m, a, reg)
7063 rtx b; /* initial value of basic induction variable */
7064 rtx m; /* multiplicative constant */
7065 rtx a; /* additive constant */
7066 rtx reg; /* destination register */
7067 {
7068 int cost = 0;
7069 rtx last, result;
7070
7071 start_sequence ();
7072 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7073 if (reg != result)
7074 emit_move_insn (reg, result);
7075 last = get_last_insn ();
7076 while (last)
7077 {
7078 rtx t = single_set (last);
7079 if (t)
7080 cost += rtx_cost (SET_SRC (t), SET);
7081 last = PREV_INSN (last);
7082 }
7083 end_sequence ();
7084 return cost;
7085 }
7086 \f
7087 /* Test whether A * B can be computed without
7088 an actual multiply insn. Value is 1 if so. */
7089
7090 static int
7091 product_cheap_p (a, b)
7092 rtx a;
7093 rtx b;
7094 {
7095 int i;
7096 rtx tmp;
7097 int win = 1;
7098
7099 /* If only one is constant, make it B. */
7100 if (GET_CODE (a) == CONST_INT)
7101 tmp = a, a = b, b = tmp;
7102
7103 /* If first constant, both constant, so don't need multiply. */
7104 if (GET_CODE (a) == CONST_INT)
7105 return 1;
7106
7107 /* If second not constant, neither is constant, so would need multiply. */
7108 if (GET_CODE (b) != CONST_INT)
7109 return 0;
7110
7111 /* One operand is constant, so might not need multiply insn. Generate the
7112 code for the multiply and see if a call or multiply, or long sequence
7113 of insns is generated. */
7114
7115 start_sequence ();
7116 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7117 tmp = gen_sequence ();
7118 end_sequence ();
7119
7120 if (GET_CODE (tmp) == SEQUENCE)
7121 {
7122 if (XVEC (tmp, 0) == 0)
7123 win = 1;
7124 else if (XVECLEN (tmp, 0) > 3)
7125 win = 0;
7126 else
7127 for (i = 0; i < XVECLEN (tmp, 0); i++)
7128 {
7129 rtx insn = XVECEXP (tmp, 0, i);
7130
7131 if (GET_CODE (insn) != INSN
7132 || (GET_CODE (PATTERN (insn)) == SET
7133 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7134 || (GET_CODE (PATTERN (insn)) == PARALLEL
7135 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7136 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7137 {
7138 win = 0;
7139 break;
7140 }
7141 }
7142 }
7143 else if (GET_CODE (tmp) == SET
7144 && GET_CODE (SET_SRC (tmp)) == MULT)
7145 win = 0;
7146 else if (GET_CODE (tmp) == PARALLEL
7147 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7148 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7149 win = 0;
7150
7151 return win;
7152 }
7153 \f
7154 /* Check to see if loop can be terminated by a "decrement and branch until
7155 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7156 Also try reversing an increment loop to a decrement loop
7157 to see if the optimization can be performed.
7158 Value is nonzero if optimization was performed. */
7159
7160 /* This is useful even if the architecture doesn't have such an insn,
7161 because it might change a loops which increments from 0 to n to a loop
7162 which decrements from n to 0. A loop that decrements to zero is usually
7163 faster than one that increments from zero. */
7164
7165 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7166 such as approx_final_value, biv_total_increment, loop_iterations, and
7167 final_[bg]iv_value. */
7168
7169 static int
7170 check_dbra_loop (loop, insn_count)
7171 struct loop *loop;
7172 int insn_count;
7173 {
7174 struct loop_info *loop_info = LOOP_INFO (loop);
7175 struct loop_regs *regs = LOOP_REGS (loop);
7176 struct loop_ivs *ivs = LOOP_IVS (loop);
7177 struct iv_class *bl;
7178 rtx reg;
7179 rtx jump_label;
7180 rtx final_value;
7181 rtx start_value;
7182 rtx new_add_val;
7183 rtx comparison;
7184 rtx before_comparison;
7185 rtx p;
7186 rtx jump;
7187 rtx first_compare;
7188 int compare_and_branch;
7189 rtx loop_start = loop->start;
7190 rtx loop_end = loop->end;
7191
7192 /* If last insn is a conditional branch, and the insn before tests a
7193 register value, try to optimize it. Otherwise, we can't do anything. */
7194
7195 jump = PREV_INSN (loop_end);
7196 comparison = get_condition_for_loop (loop, jump);
7197 if (comparison == 0)
7198 return 0;
7199 if (!onlyjump_p (jump))
7200 return 0;
7201
7202 /* Try to compute whether the compare/branch at the loop end is one or
7203 two instructions. */
7204 get_condition (jump, &first_compare);
7205 if (first_compare == jump)
7206 compare_and_branch = 1;
7207 else if (first_compare == prev_nonnote_insn (jump))
7208 compare_and_branch = 2;
7209 else
7210 return 0;
7211
7212 {
7213 /* If more than one condition is present to control the loop, then
7214 do not proceed, as this function does not know how to rewrite
7215 loop tests with more than one condition.
7216
7217 Look backwards from the first insn in the last comparison
7218 sequence and see if we've got another comparison sequence. */
7219
7220 rtx jump1;
7221 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7222 if (GET_CODE (jump1) == JUMP_INSN)
7223 return 0;
7224 }
7225
7226 /* Check all of the bivs to see if the compare uses one of them.
7227 Skip biv's set more than once because we can't guarantee that
7228 it will be zero on the last iteration. Also skip if the biv is
7229 used between its update and the test insn. */
7230
7231 for (bl = ivs->list; bl; bl = bl->next)
7232 {
7233 if (bl->biv_count == 1
7234 && ! bl->biv->maybe_multiple
7235 && bl->biv->dest_reg == XEXP (comparison, 0)
7236 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7237 first_compare))
7238 break;
7239 }
7240
7241 if (! bl)
7242 return 0;
7243
7244 /* Look for the case where the basic induction variable is always
7245 nonnegative, and equals zero on the last iteration.
7246 In this case, add a reg_note REG_NONNEG, which allows the
7247 m68k DBRA instruction to be used. */
7248
7249 if (((GET_CODE (comparison) == GT
7250 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7251 && INTVAL (XEXP (comparison, 1)) == -1)
7252 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7253 && GET_CODE (bl->biv->add_val) == CONST_INT
7254 && INTVAL (bl->biv->add_val) < 0)
7255 {
7256 /* Initial value must be greater than 0,
7257 init_val % -dec_value == 0 to ensure that it equals zero on
7258 the last iteration */
7259
7260 if (GET_CODE (bl->initial_value) == CONST_INT
7261 && INTVAL (bl->initial_value) > 0
7262 && (INTVAL (bl->initial_value)
7263 % (-INTVAL (bl->biv->add_val))) == 0)
7264 {
7265 /* register always nonnegative, add REG_NOTE to branch */
7266 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7267 REG_NOTES (jump)
7268 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7269 REG_NOTES (jump));
7270 bl->nonneg = 1;
7271
7272 return 1;
7273 }
7274
7275 /* If the decrement is 1 and the value was tested as >= 0 before
7276 the loop, then we can safely optimize. */
7277 for (p = loop_start; p; p = PREV_INSN (p))
7278 {
7279 if (GET_CODE (p) == CODE_LABEL)
7280 break;
7281 if (GET_CODE (p) != JUMP_INSN)
7282 continue;
7283
7284 before_comparison = get_condition_for_loop (loop, p);
7285 if (before_comparison
7286 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7287 && GET_CODE (before_comparison) == LT
7288 && XEXP (before_comparison, 1) == const0_rtx
7289 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7290 && INTVAL (bl->biv->add_val) == -1)
7291 {
7292 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7293 REG_NOTES (jump)
7294 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7295 REG_NOTES (jump));
7296 bl->nonneg = 1;
7297
7298 return 1;
7299 }
7300 }
7301 }
7302 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7303 && INTVAL (bl->biv->add_val) > 0)
7304 {
7305 /* Try to change inc to dec, so can apply above optimization. */
7306 /* Can do this if:
7307 all registers modified are induction variables or invariant,
7308 all memory references have non-overlapping addresses
7309 (obviously true if only one write)
7310 allow 2 insns for the compare/jump at the end of the loop. */
7311 /* Also, we must avoid any instructions which use both the reversed
7312 biv and another biv. Such instructions will fail if the loop is
7313 reversed. We meet this condition by requiring that either
7314 no_use_except_counting is true, or else that there is only
7315 one biv. */
7316 int num_nonfixed_reads = 0;
7317 /* 1 if the iteration var is used only to count iterations. */
7318 int no_use_except_counting = 0;
7319 /* 1 if the loop has no memory store, or it has a single memory store
7320 which is reversible. */
7321 int reversible_mem_store = 1;
7322
7323 if (bl->giv_count == 0 && ! loop->exit_count)
7324 {
7325 rtx bivreg = regno_reg_rtx[bl->regno];
7326 struct iv_class *blt;
7327
7328 /* If there are no givs for this biv, and the only exit is the
7329 fall through at the end of the loop, then
7330 see if perhaps there are no uses except to count. */
7331 no_use_except_counting = 1;
7332 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7333 if (INSN_P (p))
7334 {
7335 rtx set = single_set (p);
7336
7337 if (set && GET_CODE (SET_DEST (set)) == REG
7338 && REGNO (SET_DEST (set)) == bl->regno)
7339 /* An insn that sets the biv is okay. */
7340 ;
7341 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
7342 || p == prev_nonnote_insn (loop_end))
7343 && reg_mentioned_p (bivreg, PATTERN (p)))
7344 {
7345 /* If either of these insns uses the biv and sets a pseudo
7346 that has more than one usage, then the biv has uses
7347 other than counting since it's used to derive a value
7348 that is used more than one time. */
7349 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
7350 regs);
7351 if (regs->multiple_uses)
7352 {
7353 no_use_except_counting = 0;
7354 break;
7355 }
7356 }
7357 else if (reg_mentioned_p (bivreg, PATTERN (p)))
7358 {
7359 no_use_except_counting = 0;
7360 break;
7361 }
7362 }
7363
7364 /* A biv has uses besides counting if it is used to set another biv. */
7365 for (blt = ivs->list; blt; blt = blt->next)
7366 if (blt->init_set && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
7367 {
7368 no_use_except_counting = 0;
7369 break;
7370 }
7371 }
7372
7373 if (no_use_except_counting)
7374 /* No need to worry about MEMs. */
7375 ;
7376 else if (loop_info->num_mem_sets <= 1)
7377 {
7378 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7379 if (INSN_P (p))
7380 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
7381
7382 /* If the loop has a single store, and the destination address is
7383 invariant, then we can't reverse the loop, because this address
7384 might then have the wrong value at loop exit.
7385 This would work if the source was invariant also, however, in that
7386 case, the insn should have been moved out of the loop. */
7387
7388 if (loop_info->num_mem_sets == 1)
7389 {
7390 struct induction *v;
7391
7392 /* If we could prove that each of the memory locations
7393 written to was different, then we could reverse the
7394 store -- but we don't presently have any way of
7395 knowing that. */
7396 reversible_mem_store = 0;
7397
7398 /* If the store depends on a register that is set after the
7399 store, it depends on the initial value, and is thus not
7400 reversible. */
7401 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
7402 {
7403 if (v->giv_type == DEST_REG
7404 && reg_mentioned_p (v->dest_reg,
7405 PATTERN (loop_info->first_loop_store_insn))
7406 && loop_insn_first_p (loop_info->first_loop_store_insn,
7407 v->insn))
7408 reversible_mem_store = 0;
7409 }
7410 }
7411 }
7412 else
7413 return 0;
7414
7415 /* This code only acts for innermost loops. Also it simplifies
7416 the memory address check by only reversing loops with
7417 zero or one memory access.
7418 Two memory accesses could involve parts of the same array,
7419 and that can't be reversed.
7420 If the biv is used only for counting, than we don't need to worry
7421 about all these things. */
7422
7423 if ((num_nonfixed_reads <= 1
7424 && ! loop_info->has_nonconst_call
7425 && ! loop_info->has_volatile
7426 && reversible_mem_store
7427 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
7428 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
7429 && (bl == ivs->list && bl->next == 0))
7430 || no_use_except_counting)
7431 {
7432 rtx tem;
7433
7434 /* Loop can be reversed. */
7435 if (loop_dump_stream)
7436 fprintf (loop_dump_stream, "Can reverse loop\n");
7437
7438 /* Now check other conditions:
7439
7440 The increment must be a constant, as must the initial value,
7441 and the comparison code must be LT.
7442
7443 This test can probably be improved since +/- 1 in the constant
7444 can be obtained by changing LT to LE and vice versa; this is
7445 confusing. */
7446
7447 if (comparison
7448 /* for constants, LE gets turned into LT */
7449 && (GET_CODE (comparison) == LT
7450 || (GET_CODE (comparison) == LE
7451 && no_use_except_counting)))
7452 {
7453 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
7454 rtx initial_value, comparison_value;
7455 int nonneg = 0;
7456 enum rtx_code cmp_code;
7457 int comparison_const_width;
7458 unsigned HOST_WIDE_INT comparison_sign_mask;
7459
7460 add_val = INTVAL (bl->biv->add_val);
7461 comparison_value = XEXP (comparison, 1);
7462 if (GET_MODE (comparison_value) == VOIDmode)
7463 comparison_const_width
7464 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
7465 else
7466 comparison_const_width
7467 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
7468 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
7469 comparison_const_width = HOST_BITS_PER_WIDE_INT;
7470 comparison_sign_mask
7471 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
7472
7473 /* If the comparison value is not a loop invariant, then we
7474 can not reverse this loop.
7475
7476 ??? If the insns which initialize the comparison value as
7477 a whole compute an invariant result, then we could move
7478 them out of the loop and proceed with loop reversal. */
7479 if (! loop_invariant_p (loop, comparison_value))
7480 return 0;
7481
7482 if (GET_CODE (comparison_value) == CONST_INT)
7483 comparison_val = INTVAL (comparison_value);
7484 initial_value = bl->initial_value;
7485
7486 /* Normalize the initial value if it is an integer and
7487 has no other use except as a counter. This will allow
7488 a few more loops to be reversed. */
7489 if (no_use_except_counting
7490 && GET_CODE (comparison_value) == CONST_INT
7491 && GET_CODE (initial_value) == CONST_INT)
7492 {
7493 comparison_val = comparison_val - INTVAL (bl->initial_value);
7494 /* The code below requires comparison_val to be a multiple
7495 of add_val in order to do the loop reversal, so
7496 round up comparison_val to a multiple of add_val.
7497 Since comparison_value is constant, we know that the
7498 current comparison code is LT. */
7499 comparison_val = comparison_val + add_val - 1;
7500 comparison_val
7501 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
7502 /* We postpone overflow checks for COMPARISON_VAL here;
7503 even if there is an overflow, we might still be able to
7504 reverse the loop, if converting the loop exit test to
7505 NE is possible. */
7506 initial_value = const0_rtx;
7507 }
7508
7509 /* First check if we can do a vanilla loop reversal. */
7510 if (initial_value == const0_rtx
7511 /* If we have a decrement_and_branch_on_count,
7512 prefer the NE test, since this will allow that
7513 instruction to be generated. Note that we must
7514 use a vanilla loop reversal if the biv is used to
7515 calculate a giv or has a non-counting use. */
7516 #if ! defined (HAVE_decrement_and_branch_until_zero) \
7517 && defined (HAVE_decrement_and_branch_on_count)
7518 && (! (add_val == 1 && loop->vtop
7519 && (bl->biv_count == 0
7520 || no_use_except_counting)))
7521 #endif
7522 && GET_CODE (comparison_value) == CONST_INT
7523 /* Now do postponed overflow checks on COMPARISON_VAL. */
7524 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
7525 & comparison_sign_mask))
7526 {
7527 /* Register will always be nonnegative, with value
7528 0 on last iteration */
7529 add_adjust = add_val;
7530 nonneg = 1;
7531 cmp_code = GE;
7532 }
7533 else if (add_val == 1 && loop->vtop
7534 && (bl->biv_count == 0
7535 || no_use_except_counting))
7536 {
7537 add_adjust = 0;
7538 cmp_code = NE;
7539 }
7540 else
7541 return 0;
7542
7543 if (GET_CODE (comparison) == LE)
7544 add_adjust -= add_val;
7545
7546 /* If the initial value is not zero, or if the comparison
7547 value is not an exact multiple of the increment, then we
7548 can not reverse this loop. */
7549 if (initial_value == const0_rtx
7550 && GET_CODE (comparison_value) == CONST_INT)
7551 {
7552 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
7553 return 0;
7554 }
7555 else
7556 {
7557 if (! no_use_except_counting || add_val != 1)
7558 return 0;
7559 }
7560
7561 final_value = comparison_value;
7562
7563 /* Reset these in case we normalized the initial value
7564 and comparison value above. */
7565 if (GET_CODE (comparison_value) == CONST_INT
7566 && GET_CODE (initial_value) == CONST_INT)
7567 {
7568 comparison_value = GEN_INT (comparison_val);
7569 final_value
7570 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
7571 }
7572 bl->initial_value = initial_value;
7573
7574 /* Save some info needed to produce the new insns. */
7575 reg = bl->biv->dest_reg;
7576 jump_label = condjump_label (PREV_INSN (loop_end));
7577 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
7578
7579 /* Set start_value; if this is not a CONST_INT, we need
7580 to generate a SUB.
7581 Initialize biv to start_value before loop start.
7582 The old initializing insn will be deleted as a
7583 dead store by flow.c. */
7584 if (initial_value == const0_rtx
7585 && GET_CODE (comparison_value) == CONST_INT)
7586 {
7587 start_value = GEN_INT (comparison_val - add_adjust);
7588 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
7589 }
7590 else if (GET_CODE (initial_value) == CONST_INT)
7591 {
7592 enum machine_mode mode = GET_MODE (reg);
7593 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
7594 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
7595
7596 if (add_insn == 0)
7597 return 0;
7598
7599 start_value
7600 = gen_rtx_PLUS (mode, comparison_value, offset);
7601 loop_insn_hoist (loop, add_insn);
7602 if (GET_CODE (comparison) == LE)
7603 final_value = gen_rtx_PLUS (mode, comparison_value,
7604 GEN_INT (add_val));
7605 }
7606 else if (! add_adjust)
7607 {
7608 enum machine_mode mode = GET_MODE (reg);
7609 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
7610 initial_value);
7611
7612 if (sub_insn == 0)
7613 return 0;
7614 start_value
7615 = gen_rtx_MINUS (mode, comparison_value, initial_value);
7616 loop_insn_hoist (loop, sub_insn);
7617 }
7618 else
7619 /* We could handle the other cases too, but it'll be
7620 better to have a testcase first. */
7621 return 0;
7622
7623 /* We may not have a single insn which can increment a reg, so
7624 create a sequence to hold all the insns from expand_inc. */
7625 start_sequence ();
7626 expand_inc (reg, new_add_val);
7627 tem = gen_sequence ();
7628 end_sequence ();
7629
7630 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
7631 delete_insn (bl->biv->insn);
7632
7633 /* Update biv info to reflect its new status. */
7634 bl->biv->insn = p;
7635 bl->initial_value = start_value;
7636 bl->biv->add_val = new_add_val;
7637
7638 /* Update loop info. */
7639 loop_info->initial_value = reg;
7640 loop_info->initial_equiv_value = reg;
7641 loop_info->final_value = const0_rtx;
7642 loop_info->final_equiv_value = const0_rtx;
7643 loop_info->comparison_value = const0_rtx;
7644 loop_info->comparison_code = cmp_code;
7645 loop_info->increment = new_add_val;
7646
7647 /* Inc LABEL_NUSES so that delete_insn will
7648 not delete the label. */
7649 LABEL_NUSES (XEXP (jump_label, 0))++;
7650
7651 /* Emit an insn after the end of the loop to set the biv's
7652 proper exit value if it is used anywhere outside the loop. */
7653 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
7654 || ! bl->init_insn
7655 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
7656 loop_insn_sink (loop, gen_move_insn (reg, final_value));
7657
7658 /* Delete compare/branch at end of loop. */
7659 delete_related_insns (PREV_INSN (loop_end));
7660 if (compare_and_branch == 2)
7661 delete_related_insns (first_compare);
7662
7663 /* Add new compare/branch insn at end of loop. */
7664 start_sequence ();
7665 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
7666 GET_MODE (reg), 0,
7667 XEXP (jump_label, 0));
7668 tem = gen_sequence ();
7669 end_sequence ();
7670 emit_jump_insn_before (tem, loop_end);
7671
7672 for (tem = PREV_INSN (loop_end);
7673 tem && GET_CODE (tem) != JUMP_INSN;
7674 tem = PREV_INSN (tem))
7675 ;
7676
7677 if (tem)
7678 JUMP_LABEL (tem) = XEXP (jump_label, 0);
7679
7680 if (nonneg)
7681 {
7682 if (tem)
7683 {
7684 /* Increment of LABEL_NUSES done above. */
7685 /* Register is now always nonnegative,
7686 so add REG_NONNEG note to the branch. */
7687 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
7688 REG_NOTES (tem));
7689 }
7690 bl->nonneg = 1;
7691 }
7692
7693 /* No insn may reference both the reversed and another biv or it
7694 will fail (see comment near the top of the loop reversal
7695 code).
7696 Earlier on, we have verified that the biv has no use except
7697 counting, or it is the only biv in this function.
7698 However, the code that computes no_use_except_counting does
7699 not verify reg notes. It's possible to have an insn that
7700 references another biv, and has a REG_EQUAL note with an
7701 expression based on the reversed biv. To avoid this case,
7702 remove all REG_EQUAL notes based on the reversed biv
7703 here. */
7704 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7705 if (INSN_P (p))
7706 {
7707 rtx *pnote;
7708 rtx set = single_set (p);
7709 /* If this is a set of a GIV based on the reversed biv, any
7710 REG_EQUAL notes should still be correct. */
7711 if (! set
7712 || GET_CODE (SET_DEST (set)) != REG
7713 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
7714 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
7715 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
7716 for (pnote = &REG_NOTES (p); *pnote;)
7717 {
7718 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
7719 && reg_mentioned_p (regno_reg_rtx[bl->regno],
7720 XEXP (*pnote, 0)))
7721 *pnote = XEXP (*pnote, 1);
7722 else
7723 pnote = &XEXP (*pnote, 1);
7724 }
7725 }
7726
7727 /* Mark that this biv has been reversed. Each giv which depends
7728 on this biv, and which is also live past the end of the loop
7729 will have to be fixed up. */
7730
7731 bl->reversed = 1;
7732
7733 if (loop_dump_stream)
7734 {
7735 fprintf (loop_dump_stream, "Reversed loop");
7736 if (bl->nonneg)
7737 fprintf (loop_dump_stream, " and added reg_nonneg\n");
7738 else
7739 fprintf (loop_dump_stream, "\n");
7740 }
7741
7742 return 1;
7743 }
7744 }
7745 }
7746
7747 return 0;
7748 }
7749 \f
7750 /* Verify whether the biv BL appears to be eliminable,
7751 based on the insns in the loop that refer to it.
7752
7753 If ELIMINATE_P is non-zero, actually do the elimination.
7754
7755 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
7756 determine whether invariant insns should be placed inside or at the
7757 start of the loop. */
7758
7759 static int
7760 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
7761 const struct loop *loop;
7762 struct iv_class *bl;
7763 int eliminate_p;
7764 int threshold, insn_count;
7765 {
7766 struct loop_ivs *ivs = LOOP_IVS (loop);
7767 rtx reg = bl->biv->dest_reg;
7768 rtx p;
7769
7770 /* Scan all insns in the loop, stopping if we find one that uses the
7771 biv in a way that we cannot eliminate. */
7772
7773 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
7774 {
7775 enum rtx_code code = GET_CODE (p);
7776 basic_block where_bb = 0;
7777 rtx where_insn = threshold >= insn_count ? 0 : p;
7778
7779 /* If this is a libcall that sets a giv, skip ahead to its end. */
7780 if (GET_RTX_CLASS (code) == 'i')
7781 {
7782 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
7783
7784 if (note)
7785 {
7786 rtx last = XEXP (note, 0);
7787 rtx set = single_set (last);
7788
7789 if (set && GET_CODE (SET_DEST (set)) == REG)
7790 {
7791 unsigned int regno = REGNO (SET_DEST (set));
7792
7793 if (regno < ivs->n_regs
7794 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
7795 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
7796 p = last;
7797 }
7798 }
7799 }
7800 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
7801 && reg_mentioned_p (reg, PATTERN (p))
7802 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
7803 eliminate_p, where_bb, where_insn))
7804 {
7805 if (loop_dump_stream)
7806 fprintf (loop_dump_stream,
7807 "Cannot eliminate biv %d: biv used in insn %d.\n",
7808 bl->regno, INSN_UID (p));
7809 break;
7810 }
7811 }
7812
7813 if (p == loop->end)
7814 {
7815 if (loop_dump_stream)
7816 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
7817 bl->regno, eliminate_p ? "was" : "can be");
7818 return 1;
7819 }
7820
7821 return 0;
7822 }
7823 \f
7824 /* INSN and REFERENCE are instructions in the same insn chain.
7825 Return non-zero if INSN is first. */
7826
7827 int
7828 loop_insn_first_p (insn, reference)
7829 rtx insn, reference;
7830 {
7831 rtx p, q;
7832
7833 for (p = insn, q = reference;;)
7834 {
7835 /* Start with test for not first so that INSN == REFERENCE yields not
7836 first. */
7837 if (q == insn || ! p)
7838 return 0;
7839 if (p == reference || ! q)
7840 return 1;
7841
7842 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
7843 previous insn, hence the <= comparison below does not work if
7844 P is a note. */
7845 if (INSN_UID (p) < max_uid_for_loop
7846 && INSN_UID (q) < max_uid_for_loop
7847 && GET_CODE (p) != NOTE)
7848 return INSN_LUID (p) <= INSN_LUID (q);
7849
7850 if (INSN_UID (p) >= max_uid_for_loop
7851 || GET_CODE (p) == NOTE)
7852 p = NEXT_INSN (p);
7853 if (INSN_UID (q) >= max_uid_for_loop)
7854 q = NEXT_INSN (q);
7855 }
7856 }
7857
7858 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
7859 the offset that we have to take into account due to auto-increment /
7860 div derivation is zero. */
7861 static int
7862 biv_elimination_giv_has_0_offset (biv, giv, insn)
7863 struct induction *biv, *giv;
7864 rtx insn;
7865 {
7866 /* If the giv V had the auto-inc address optimization applied
7867 to it, and INSN occurs between the giv insn and the biv
7868 insn, then we'd have to adjust the value used here.
7869 This is rare, so we don't bother to make this possible. */
7870 if (giv->auto_inc_opt
7871 && ((loop_insn_first_p (giv->insn, insn)
7872 && loop_insn_first_p (insn, biv->insn))
7873 || (loop_insn_first_p (biv->insn, insn)
7874 && loop_insn_first_p (insn, giv->insn))))
7875 return 0;
7876
7877 return 1;
7878 }
7879
7880 /* If BL appears in X (part of the pattern of INSN), see if we can
7881 eliminate its use. If so, return 1. If not, return 0.
7882
7883 If BIV does not appear in X, return 1.
7884
7885 If ELIMINATE_P is non-zero, actually do the elimination.
7886 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
7887 Depending on how many items have been moved out of the loop, it
7888 will either be before INSN (when WHERE_INSN is non-zero) or at the
7889 start of the loop (when WHERE_INSN is zero). */
7890
7891 static int
7892 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
7893 const struct loop *loop;
7894 rtx x, insn;
7895 struct iv_class *bl;
7896 int eliminate_p;
7897 basic_block where_bb;
7898 rtx where_insn;
7899 {
7900 enum rtx_code code = GET_CODE (x);
7901 rtx reg = bl->biv->dest_reg;
7902 enum machine_mode mode = GET_MODE (reg);
7903 struct induction *v;
7904 rtx arg, tem;
7905 #ifdef HAVE_cc0
7906 rtx new;
7907 #endif
7908 int arg_operand;
7909 const char *fmt;
7910 int i, j;
7911
7912 switch (code)
7913 {
7914 case REG:
7915 /* If we haven't already been able to do something with this BIV,
7916 we can't eliminate it. */
7917 if (x == reg)
7918 return 0;
7919 return 1;
7920
7921 case SET:
7922 /* If this sets the BIV, it is not a problem. */
7923 if (SET_DEST (x) == reg)
7924 return 1;
7925
7926 /* If this is an insn that defines a giv, it is also ok because
7927 it will go away when the giv is reduced. */
7928 for (v = bl->giv; v; v = v->next_iv)
7929 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
7930 return 1;
7931
7932 #ifdef HAVE_cc0
7933 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
7934 {
7935 /* Can replace with any giv that was reduced and
7936 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7937 Require a constant for MULT_VAL, so we know it's nonzero.
7938 ??? We disable this optimization to avoid potential
7939 overflows. */
7940
7941 for (v = bl->giv; v; v = v->next_iv)
7942 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
7943 && v->add_val == const0_rtx
7944 && ! v->ignore && ! v->maybe_dead && v->always_computable
7945 && v->mode == mode
7946 && 0)
7947 {
7948 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7949 continue;
7950
7951 if (! eliminate_p)
7952 return 1;
7953
7954 /* If the giv has the opposite direction of change,
7955 then reverse the comparison. */
7956 if (INTVAL (v->mult_val) < 0)
7957 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
7958 const0_rtx, v->new_reg);
7959 else
7960 new = v->new_reg;
7961
7962 /* We can probably test that giv's reduced reg. */
7963 if (validate_change (insn, &SET_SRC (x), new, 0))
7964 return 1;
7965 }
7966
7967 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7968 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7969 Require a constant for MULT_VAL, so we know it's nonzero.
7970 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7971 overflow problem. */
7972
7973 for (v = bl->giv; v; v = v->next_iv)
7974 if (GET_CODE (v->mult_val) == CONST_INT
7975 && v->mult_val != const0_rtx
7976 && ! v->ignore && ! v->maybe_dead && v->always_computable
7977 && v->mode == mode
7978 && (GET_CODE (v->add_val) == SYMBOL_REF
7979 || GET_CODE (v->add_val) == LABEL_REF
7980 || GET_CODE (v->add_val) == CONST
7981 || (GET_CODE (v->add_val) == REG
7982 && REG_POINTER (v->add_val))))
7983 {
7984 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7985 continue;
7986
7987 if (! eliminate_p)
7988 return 1;
7989
7990 /* If the giv has the opposite direction of change,
7991 then reverse the comparison. */
7992 if (INTVAL (v->mult_val) < 0)
7993 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7994 v->new_reg);
7995 else
7996 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7997 copy_rtx (v->add_val));
7998
7999 /* Replace biv with the giv's reduced register. */
8000 update_reg_last_use (v->add_val, insn);
8001 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8002 return 1;
8003
8004 /* Insn doesn't support that constant or invariant. Copy it
8005 into a register (it will be a loop invariant.) */
8006 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8007
8008 loop_insn_emit_before (loop, 0, where_insn,
8009 gen_move_insn (tem,
8010 copy_rtx (v->add_val)));
8011
8012 /* Substitute the new register for its invariant value in
8013 the compare expression. */
8014 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8015 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8016 return 1;
8017 }
8018 }
8019 #endif
8020 break;
8021
8022 case COMPARE:
8023 case EQ: case NE:
8024 case GT: case GE: case GTU: case GEU:
8025 case LT: case LE: case LTU: case LEU:
8026 /* See if either argument is the biv. */
8027 if (XEXP (x, 0) == reg)
8028 arg = XEXP (x, 1), arg_operand = 1;
8029 else if (XEXP (x, 1) == reg)
8030 arg = XEXP (x, 0), arg_operand = 0;
8031 else
8032 break;
8033
8034 if (CONSTANT_P (arg))
8035 {
8036 /* First try to replace with any giv that has constant positive
8037 mult_val and constant add_val. We might be able to support
8038 negative mult_val, but it seems complex to do it in general. */
8039
8040 for (v = bl->giv; v; v = v->next_iv)
8041 if (GET_CODE (v->mult_val) == CONST_INT
8042 && INTVAL (v->mult_val) > 0
8043 && (GET_CODE (v->add_val) == SYMBOL_REF
8044 || GET_CODE (v->add_val) == LABEL_REF
8045 || GET_CODE (v->add_val) == CONST
8046 || (GET_CODE (v->add_val) == REG
8047 && REG_POINTER (v->add_val)))
8048 && ! v->ignore && ! v->maybe_dead && v->always_computable
8049 && v->mode == mode)
8050 {
8051 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8052 continue;
8053
8054 if (! eliminate_p)
8055 return 1;
8056
8057 /* Replace biv with the giv's reduced reg. */
8058 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8059
8060 /* If all constants are actually constant integers and
8061 the derived constant can be directly placed in the COMPARE,
8062 do so. */
8063 if (GET_CODE (arg) == CONST_INT
8064 && GET_CODE (v->mult_val) == CONST_INT
8065 && GET_CODE (v->add_val) == CONST_INT)
8066 {
8067 validate_change (insn, &XEXP (x, arg_operand),
8068 GEN_INT (INTVAL (arg)
8069 * INTVAL (v->mult_val)
8070 + INTVAL (v->add_val)), 1);
8071 }
8072 else
8073 {
8074 /* Otherwise, load it into a register. */
8075 tem = gen_reg_rtx (mode);
8076 loop_iv_add_mult_emit_before (loop, arg,
8077 v->mult_val, v->add_val,
8078 tem, where_bb, where_insn);
8079 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8080 }
8081 if (apply_change_group ())
8082 return 1;
8083 }
8084
8085 /* Look for giv with positive constant mult_val and nonconst add_val.
8086 Insert insns to calculate new compare value.
8087 ??? Turn this off due to possible overflow. */
8088
8089 for (v = bl->giv; v; v = v->next_iv)
8090 if (GET_CODE (v->mult_val) == CONST_INT
8091 && INTVAL (v->mult_val) > 0
8092 && ! v->ignore && ! v->maybe_dead && v->always_computable
8093 && v->mode == mode
8094 && 0)
8095 {
8096 rtx tem;
8097
8098 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8099 continue;
8100
8101 if (! eliminate_p)
8102 return 1;
8103
8104 tem = gen_reg_rtx (mode);
8105
8106 /* Replace biv with giv's reduced register. */
8107 validate_change (insn, &XEXP (x, 1 - arg_operand),
8108 v->new_reg, 1);
8109
8110 /* Compute value to compare against. */
8111 loop_iv_add_mult_emit_before (loop, arg,
8112 v->mult_val, v->add_val,
8113 tem, where_bb, where_insn);
8114 /* Use it in this insn. */
8115 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8116 if (apply_change_group ())
8117 return 1;
8118 }
8119 }
8120 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8121 {
8122 if (loop_invariant_p (loop, arg) == 1)
8123 {
8124 /* Look for giv with constant positive mult_val and nonconst
8125 add_val. Insert insns to compute new compare value.
8126 ??? Turn this off due to possible overflow. */
8127
8128 for (v = bl->giv; v; v = v->next_iv)
8129 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8130 && ! v->ignore && ! v->maybe_dead && v->always_computable
8131 && v->mode == mode
8132 && 0)
8133 {
8134 rtx tem;
8135
8136 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8137 continue;
8138
8139 if (! eliminate_p)
8140 return 1;
8141
8142 tem = gen_reg_rtx (mode);
8143
8144 /* Replace biv with giv's reduced register. */
8145 validate_change (insn, &XEXP (x, 1 - arg_operand),
8146 v->new_reg, 1);
8147
8148 /* Compute value to compare against. */
8149 loop_iv_add_mult_emit_before (loop, arg,
8150 v->mult_val, v->add_val,
8151 tem, where_bb, where_insn);
8152 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8153 if (apply_change_group ())
8154 return 1;
8155 }
8156 }
8157
8158 /* This code has problems. Basically, you can't know when
8159 seeing if we will eliminate BL, whether a particular giv
8160 of ARG will be reduced. If it isn't going to be reduced,
8161 we can't eliminate BL. We can try forcing it to be reduced,
8162 but that can generate poor code.
8163
8164 The problem is that the benefit of reducing TV, below should
8165 be increased if BL can actually be eliminated, but this means
8166 we might have to do a topological sort of the order in which
8167 we try to process biv. It doesn't seem worthwhile to do
8168 this sort of thing now. */
8169
8170 #if 0
8171 /* Otherwise the reg compared with had better be a biv. */
8172 if (GET_CODE (arg) != REG
8173 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8174 return 0;
8175
8176 /* Look for a pair of givs, one for each biv,
8177 with identical coefficients. */
8178 for (v = bl->giv; v; v = v->next_iv)
8179 {
8180 struct induction *tv;
8181
8182 if (v->ignore || v->maybe_dead || v->mode != mode)
8183 continue;
8184
8185 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8186 tv = tv->next_iv)
8187 if (! tv->ignore && ! tv->maybe_dead
8188 && rtx_equal_p (tv->mult_val, v->mult_val)
8189 && rtx_equal_p (tv->add_val, v->add_val)
8190 && tv->mode == mode)
8191 {
8192 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8193 continue;
8194
8195 if (! eliminate_p)
8196 return 1;
8197
8198 /* Replace biv with its giv's reduced reg. */
8199 XEXP (x, 1 - arg_operand) = v->new_reg;
8200 /* Replace other operand with the other giv's
8201 reduced reg. */
8202 XEXP (x, arg_operand) = tv->new_reg;
8203 return 1;
8204 }
8205 }
8206 #endif
8207 }
8208
8209 /* If we get here, the biv can't be eliminated. */
8210 return 0;
8211
8212 case MEM:
8213 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8214 biv is used in it, since it will be replaced. */
8215 for (v = bl->giv; v; v = v->next_iv)
8216 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8217 return 1;
8218 break;
8219
8220 default:
8221 break;
8222 }
8223
8224 /* See if any subexpression fails elimination. */
8225 fmt = GET_RTX_FORMAT (code);
8226 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8227 {
8228 switch (fmt[i])
8229 {
8230 case 'e':
8231 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8232 eliminate_p, where_bb, where_insn))
8233 return 0;
8234 break;
8235
8236 case 'E':
8237 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8238 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8239 eliminate_p, where_bb, where_insn))
8240 return 0;
8241 break;
8242 }
8243 }
8244
8245 return 1;
8246 }
8247 \f
8248 /* Return nonzero if the last use of REG
8249 is in an insn following INSN in the same basic block. */
8250
8251 static int
8252 last_use_this_basic_block (reg, insn)
8253 rtx reg;
8254 rtx insn;
8255 {
8256 rtx n;
8257 for (n = insn;
8258 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8259 n = NEXT_INSN (n))
8260 {
8261 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8262 return 1;
8263 }
8264 return 0;
8265 }
8266 \f
8267 /* Called via `note_stores' to record the initial value of a biv. Here we
8268 just record the location of the set and process it later. */
8269
8270 static void
8271 record_initial (dest, set, data)
8272 rtx dest;
8273 rtx set;
8274 void *data ATTRIBUTE_UNUSED;
8275 {
8276 struct loop_ivs *ivs = (struct loop_ivs *) data;
8277 struct iv_class *bl;
8278
8279 if (GET_CODE (dest) != REG
8280 || REGNO (dest) >= ivs->n_regs
8281 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8282 return;
8283
8284 bl = REG_IV_CLASS (ivs, REGNO (dest));
8285
8286 /* If this is the first set found, record it. */
8287 if (bl->init_insn == 0)
8288 {
8289 bl->init_insn = note_insn;
8290 bl->init_set = set;
8291 }
8292 }
8293 \f
8294 /* If any of the registers in X are "old" and currently have a last use earlier
8295 than INSN, update them to have a last use of INSN. Their actual last use
8296 will be the previous insn but it will not have a valid uid_luid so we can't
8297 use it. X must be a source expression only. */
8298
8299 static void
8300 update_reg_last_use (x, insn)
8301 rtx x;
8302 rtx insn;
8303 {
8304 /* Check for the case where INSN does not have a valid luid. In this case,
8305 there is no need to modify the regno_last_uid, as this can only happen
8306 when code is inserted after the loop_end to set a pseudo's final value,
8307 and hence this insn will never be the last use of x.
8308 ???? This comment is not correct. See for example loop_givs_reduce.
8309 This may insert an insn before another new insn. */
8310 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8311 && INSN_UID (insn) < max_uid_for_loop
8312 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
8313 {
8314 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8315 }
8316 else
8317 {
8318 int i, j;
8319 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8320 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8321 {
8322 if (fmt[i] == 'e')
8323 update_reg_last_use (XEXP (x, i), insn);
8324 else if (fmt[i] == 'E')
8325 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8326 update_reg_last_use (XVECEXP (x, i, j), insn);
8327 }
8328 }
8329 }
8330 \f
8331 /* Given an insn INSN and condition COND, return the condition in a
8332 canonical form to simplify testing by callers. Specifically:
8333
8334 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
8335 (2) Both operands will be machine operands; (cc0) will have been replaced.
8336 (3) If an operand is a constant, it will be the second operand.
8337 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
8338 for GE, GEU, and LEU.
8339
8340 If the condition cannot be understood, or is an inequality floating-point
8341 comparison which needs to be reversed, 0 will be returned.
8342
8343 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
8344
8345 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8346 insn used in locating the condition was found. If a replacement test
8347 of the condition is desired, it should be placed in front of that
8348 insn and we will be sure that the inputs are still valid.
8349
8350 If WANT_REG is non-zero, we wish the condition to be relative to that
8351 register, if possible. Therefore, do not canonicalize the condition
8352 further. */
8353
8354 rtx
8355 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
8356 rtx insn;
8357 rtx cond;
8358 int reverse;
8359 rtx *earliest;
8360 rtx want_reg;
8361 {
8362 enum rtx_code code;
8363 rtx prev = insn;
8364 rtx set;
8365 rtx tem;
8366 rtx op0, op1;
8367 int reverse_code = 0;
8368 enum machine_mode mode;
8369
8370 code = GET_CODE (cond);
8371 mode = GET_MODE (cond);
8372 op0 = XEXP (cond, 0);
8373 op1 = XEXP (cond, 1);
8374
8375 if (reverse)
8376 code = reversed_comparison_code (cond, insn);
8377 if (code == UNKNOWN)
8378 return 0;
8379
8380 if (earliest)
8381 *earliest = insn;
8382
8383 /* If we are comparing a register with zero, see if the register is set
8384 in the previous insn to a COMPARE or a comparison operation. Perform
8385 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
8386 in cse.c */
8387
8388 while (GET_RTX_CLASS (code) == '<'
8389 && op1 == CONST0_RTX (GET_MODE (op0))
8390 && op0 != want_reg)
8391 {
8392 /* Set non-zero when we find something of interest. */
8393 rtx x = 0;
8394
8395 #ifdef HAVE_cc0
8396 /* If comparison with cc0, import actual comparison from compare
8397 insn. */
8398 if (op0 == cc0_rtx)
8399 {
8400 if ((prev = prev_nonnote_insn (prev)) == 0
8401 || GET_CODE (prev) != INSN
8402 || (set = single_set (prev)) == 0
8403 || SET_DEST (set) != cc0_rtx)
8404 return 0;
8405
8406 op0 = SET_SRC (set);
8407 op1 = CONST0_RTX (GET_MODE (op0));
8408 if (earliest)
8409 *earliest = prev;
8410 }
8411 #endif
8412
8413 /* If this is a COMPARE, pick up the two things being compared. */
8414 if (GET_CODE (op0) == COMPARE)
8415 {
8416 op1 = XEXP (op0, 1);
8417 op0 = XEXP (op0, 0);
8418 continue;
8419 }
8420 else if (GET_CODE (op0) != REG)
8421 break;
8422
8423 /* Go back to the previous insn. Stop if it is not an INSN. We also
8424 stop if it isn't a single set or if it has a REG_INC note because
8425 we don't want to bother dealing with it. */
8426
8427 if ((prev = prev_nonnote_insn (prev)) == 0
8428 || GET_CODE (prev) != INSN
8429 || FIND_REG_INC_NOTE (prev, 0))
8430 break;
8431
8432 set = set_of (op0, prev);
8433
8434 if (set
8435 && (GET_CODE (set) != SET
8436 || !rtx_equal_p (SET_DEST (set), op0)))
8437 break;
8438
8439 /* If this is setting OP0, get what it sets it to if it looks
8440 relevant. */
8441 if (set)
8442 {
8443 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
8444
8445 /* ??? We may not combine comparisons done in a CCmode with
8446 comparisons not done in a CCmode. This is to aid targets
8447 like Alpha that have an IEEE compliant EQ instruction, and
8448 a non-IEEE compliant BEQ instruction. The use of CCmode is
8449 actually artificial, simply to prevent the combination, but
8450 should not affect other platforms.
8451
8452 However, we must allow VOIDmode comparisons to match either
8453 CCmode or non-CCmode comparison, because some ports have
8454 modeless comparisons inside branch patterns.
8455
8456 ??? This mode check should perhaps look more like the mode check
8457 in simplify_comparison in combine. */
8458
8459 if ((GET_CODE (SET_SRC (set)) == COMPARE
8460 || (((code == NE
8461 || (code == LT
8462 && GET_MODE_CLASS (inner_mode) == MODE_INT
8463 && (GET_MODE_BITSIZE (inner_mode)
8464 <= HOST_BITS_PER_WIDE_INT)
8465 && (STORE_FLAG_VALUE
8466 & ((HOST_WIDE_INT) 1
8467 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8468 #ifdef FLOAT_STORE_FLAG_VALUE
8469 || (code == LT
8470 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8471 && (REAL_VALUE_NEGATIVE
8472 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8473 #endif
8474 ))
8475 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
8476 && (((GET_MODE_CLASS (mode) == MODE_CC)
8477 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8478 || mode == VOIDmode || inner_mode == VOIDmode))
8479 x = SET_SRC (set);
8480 else if (((code == EQ
8481 || (code == GE
8482 && (GET_MODE_BITSIZE (inner_mode)
8483 <= HOST_BITS_PER_WIDE_INT)
8484 && GET_MODE_CLASS (inner_mode) == MODE_INT
8485 && (STORE_FLAG_VALUE
8486 & ((HOST_WIDE_INT) 1
8487 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8488 #ifdef FLOAT_STORE_FLAG_VALUE
8489 || (code == GE
8490 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8491 && (REAL_VALUE_NEGATIVE
8492 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8493 #endif
8494 ))
8495 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
8496 && (((GET_MODE_CLASS (mode) == MODE_CC)
8497 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8498 || mode == VOIDmode || inner_mode == VOIDmode))
8499
8500 {
8501 reverse_code = 1;
8502 x = SET_SRC (set);
8503 }
8504 else
8505 break;
8506 }
8507
8508 else if (reg_set_p (op0, prev))
8509 /* If this sets OP0, but not directly, we have to give up. */
8510 break;
8511
8512 if (x)
8513 {
8514 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
8515 code = GET_CODE (x);
8516 if (reverse_code)
8517 {
8518 code = reversed_comparison_code (x, prev);
8519 if (code == UNKNOWN)
8520 return 0;
8521 reverse_code = 0;
8522 }
8523
8524 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
8525 if (earliest)
8526 *earliest = prev;
8527 }
8528 }
8529
8530 /* If constant is first, put it last. */
8531 if (CONSTANT_P (op0))
8532 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
8533
8534 /* If OP0 is the result of a comparison, we weren't able to find what
8535 was really being compared, so fail. */
8536 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
8537 return 0;
8538
8539 /* Canonicalize any ordered comparison with integers involving equality
8540 if we can do computations in the relevant mode and we do not
8541 overflow. */
8542
8543 if (GET_CODE (op1) == CONST_INT
8544 && GET_MODE (op0) != VOIDmode
8545 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
8546 {
8547 HOST_WIDE_INT const_val = INTVAL (op1);
8548 unsigned HOST_WIDE_INT uconst_val = const_val;
8549 unsigned HOST_WIDE_INT max_val
8550 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
8551
8552 switch (code)
8553 {
8554 case LE:
8555 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
8556 code = LT, op1 = GEN_INT (const_val + 1);
8557 break;
8558
8559 /* When cross-compiling, const_val might be sign-extended from
8560 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
8561 case GE:
8562 if ((HOST_WIDE_INT) (const_val & max_val)
8563 != (((HOST_WIDE_INT) 1
8564 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
8565 code = GT, op1 = GEN_INT (const_val - 1);
8566 break;
8567
8568 case LEU:
8569 if (uconst_val < max_val)
8570 code = LTU, op1 = GEN_INT (uconst_val + 1);
8571 break;
8572
8573 case GEU:
8574 if (uconst_val != 0)
8575 code = GTU, op1 = GEN_INT (uconst_val - 1);
8576 break;
8577
8578 default:
8579 break;
8580 }
8581 }
8582
8583 #ifdef HAVE_cc0
8584 /* Never return CC0; return zero instead. */
8585 if (op0 == cc0_rtx)
8586 return 0;
8587 #endif
8588
8589 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
8590 }
8591
8592 /* Given a jump insn JUMP, return the condition that will cause it to branch
8593 to its JUMP_LABEL. If the condition cannot be understood, or is an
8594 inequality floating-point comparison which needs to be reversed, 0 will
8595 be returned.
8596
8597 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8598 insn used in locating the condition was found. If a replacement test
8599 of the condition is desired, it should be placed in front of that
8600 insn and we will be sure that the inputs are still valid. */
8601
8602 rtx
8603 get_condition (jump, earliest)
8604 rtx jump;
8605 rtx *earliest;
8606 {
8607 rtx cond;
8608 int reverse;
8609 rtx set;
8610
8611 /* If this is not a standard conditional jump, we can't parse it. */
8612 if (GET_CODE (jump) != JUMP_INSN
8613 || ! any_condjump_p (jump))
8614 return 0;
8615 set = pc_set (jump);
8616
8617 cond = XEXP (SET_SRC (set), 0);
8618
8619 /* If this branches to JUMP_LABEL when the condition is false, reverse
8620 the condition. */
8621 reverse
8622 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
8623 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
8624
8625 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
8626 }
8627
8628 /* Similar to above routine, except that we also put an invariant last
8629 unless both operands are invariants. */
8630
8631 rtx
8632 get_condition_for_loop (loop, x)
8633 const struct loop *loop;
8634 rtx x;
8635 {
8636 rtx comparison = get_condition (x, (rtx*)0);
8637
8638 if (comparison == 0
8639 || ! loop_invariant_p (loop, XEXP (comparison, 0))
8640 || loop_invariant_p (loop, XEXP (comparison, 1)))
8641 return comparison;
8642
8643 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
8644 XEXP (comparison, 1), XEXP (comparison, 0));
8645 }
8646
8647 /* Scan the function and determine whether it has indirect (computed) jumps.
8648
8649 This is taken mostly from flow.c; similar code exists elsewhere
8650 in the compiler. It may be useful to put this into rtlanal.c. */
8651 static int
8652 indirect_jump_in_function_p (start)
8653 rtx start;
8654 {
8655 rtx insn;
8656
8657 for (insn = start; insn; insn = NEXT_INSN (insn))
8658 if (computed_jump_p (insn))
8659 return 1;
8660
8661 return 0;
8662 }
8663
8664 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8665 documentation for LOOP_MEMS for the definition of `appropriate'.
8666 This function is called from prescan_loop via for_each_rtx. */
8667
8668 static int
8669 insert_loop_mem (mem, data)
8670 rtx *mem;
8671 void *data ATTRIBUTE_UNUSED;
8672 {
8673 struct loop_info *loop_info = data;
8674 int i;
8675 rtx m = *mem;
8676
8677 if (m == NULL_RTX)
8678 return 0;
8679
8680 switch (GET_CODE (m))
8681 {
8682 case MEM:
8683 break;
8684
8685 case CLOBBER:
8686 /* We're not interested in MEMs that are only clobbered. */
8687 return -1;
8688
8689 case CONST_DOUBLE:
8690 /* We're not interested in the MEM associated with a
8691 CONST_DOUBLE, so there's no need to traverse into this. */
8692 return -1;
8693
8694 case EXPR_LIST:
8695 /* We're not interested in any MEMs that only appear in notes. */
8696 return -1;
8697
8698 default:
8699 /* This is not a MEM. */
8700 return 0;
8701 }
8702
8703 /* See if we've already seen this MEM. */
8704 for (i = 0; i < loop_info->mems_idx; ++i)
8705 if (rtx_equal_p (m, loop_info->mems[i].mem))
8706 {
8707 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
8708 /* The modes of the two memory accesses are different. If
8709 this happens, something tricky is going on, and we just
8710 don't optimize accesses to this MEM. */
8711 loop_info->mems[i].optimize = 0;
8712
8713 return 0;
8714 }
8715
8716 /* Resize the array, if necessary. */
8717 if (loop_info->mems_idx == loop_info->mems_allocated)
8718 {
8719 if (loop_info->mems_allocated != 0)
8720 loop_info->mems_allocated *= 2;
8721 else
8722 loop_info->mems_allocated = 32;
8723
8724 loop_info->mems = (loop_mem_info *)
8725 xrealloc (loop_info->mems,
8726 loop_info->mems_allocated * sizeof (loop_mem_info));
8727 }
8728
8729 /* Actually insert the MEM. */
8730 loop_info->mems[loop_info->mems_idx].mem = m;
8731 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8732 because we can't put it in a register. We still store it in the
8733 table, though, so that if we see the same address later, but in a
8734 non-BLK mode, we'll not think we can optimize it at that point. */
8735 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
8736 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
8737 ++loop_info->mems_idx;
8738
8739 return 0;
8740 }
8741
8742
8743 /* Allocate REGS->ARRAY or reallocate it if it is too small.
8744
8745 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
8746 register that is modified by an insn between FROM and TO. If the
8747 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
8748 more, stop incrementing it, to avoid overflow.
8749
8750 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
8751 register I is used, if it is only used once. Otherwise, it is set
8752 to 0 (for no uses) or const0_rtx for more than one use. This
8753 parameter may be zero, in which case this processing is not done.
8754
8755 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
8756 optimize register I. */
8757
8758 static void
8759 loop_regs_scan (loop, extra_size)
8760 const struct loop *loop;
8761 int extra_size;
8762 {
8763 struct loop_regs *regs = LOOP_REGS (loop);
8764 int old_nregs;
8765 /* last_set[n] is nonzero iff reg n has been set in the current
8766 basic block. In that case, it is the insn that last set reg n. */
8767 rtx *last_set;
8768 rtx insn;
8769 int i;
8770
8771 old_nregs = regs->num;
8772 regs->num = max_reg_num ();
8773
8774 /* Grow the regs array if not allocated or too small. */
8775 if (regs->num >= regs->size)
8776 {
8777 regs->size = regs->num + extra_size;
8778
8779 regs->array = (struct loop_reg *)
8780 xrealloc (regs->array, regs->size * sizeof (*regs->array));
8781
8782 /* Zero the new elements. */
8783 memset (regs->array + old_nregs, 0,
8784 (regs->size - old_nregs) * sizeof (*regs->array));
8785 }
8786
8787 /* Clear previously scanned fields but do not clear n_times_set. */
8788 for (i = 0; i < old_nregs; i++)
8789 {
8790 regs->array[i].set_in_loop = 0;
8791 regs->array[i].may_not_optimize = 0;
8792 regs->array[i].single_usage = NULL_RTX;
8793 }
8794
8795 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
8796
8797 /* Scan the loop, recording register usage. */
8798 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8799 insn = NEXT_INSN (insn))
8800 {
8801 if (INSN_P (insn))
8802 {
8803 /* Record registers that have exactly one use. */
8804 find_single_use_in_loop (regs, insn, PATTERN (insn));
8805
8806 /* Include uses in REG_EQUAL notes. */
8807 if (REG_NOTES (insn))
8808 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
8809
8810 if (GET_CODE (PATTERN (insn)) == SET
8811 || GET_CODE (PATTERN (insn)) == CLOBBER)
8812 count_one_set (regs, insn, PATTERN (insn), last_set);
8813 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8814 {
8815 int i;
8816 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8817 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
8818 last_set);
8819 }
8820 }
8821
8822 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
8823 memset (last_set, 0, regs->num * sizeof (rtx));
8824 }
8825
8826 /* Invalidate all hard registers clobbered by calls. With one exception:
8827 a call-clobbered PIC register is still function-invariant for our
8828 purposes, since we can hoist any PIC calculations out of the loop.
8829 Thus the call to rtx_varies_p. */
8830 if (LOOP_INFO (loop)->has_call)
8831 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8832 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
8833 && rtx_varies_p (gen_rtx_REG (Pmode, i), /*for_alias=*/1))
8834 {
8835 regs->array[i].may_not_optimize = 1;
8836 regs->array[i].set_in_loop = 1;
8837 }
8838
8839 #ifdef AVOID_CCMODE_COPIES
8840 /* Don't try to move insns which set CC registers if we should not
8841 create CCmode register copies. */
8842 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
8843 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
8844 regs->array[i].may_not_optimize = 1;
8845 #endif
8846
8847 /* Set regs->array[I].n_times_set for the new registers. */
8848 for (i = old_nregs; i < regs->num; i++)
8849 regs->array[i].n_times_set = regs->array[i].set_in_loop;
8850
8851 free (last_set);
8852 }
8853
8854 /* Returns the number of real INSNs in the LOOP. */
8855
8856 static int
8857 count_insns_in_loop (loop)
8858 const struct loop *loop;
8859 {
8860 int count = 0;
8861 rtx insn;
8862
8863 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8864 insn = NEXT_INSN (insn))
8865 if (INSN_P (insn))
8866 ++count;
8867
8868 return count;
8869 }
8870
8871 /* Move MEMs into registers for the duration of the loop. */
8872
8873 static void
8874 load_mems (loop)
8875 const struct loop *loop;
8876 {
8877 struct loop_info *loop_info = LOOP_INFO (loop);
8878 struct loop_regs *regs = LOOP_REGS (loop);
8879 int maybe_never = 0;
8880 int i;
8881 rtx p, prev_ebb_head;
8882 rtx label = NULL_RTX;
8883 rtx end_label;
8884 /* Nonzero if the next instruction may never be executed. */
8885 int next_maybe_never = 0;
8886 unsigned int last_max_reg = max_reg_num ();
8887
8888 if (loop_info->mems_idx == 0)
8889 return;
8890
8891 /* We cannot use next_label here because it skips over normal insns. */
8892 end_label = next_nonnote_insn (loop->end);
8893 if (end_label && GET_CODE (end_label) != CODE_LABEL)
8894 end_label = NULL_RTX;
8895
8896 /* Check to see if it's possible that some instructions in the loop are
8897 never executed. Also check if there is a goto out of the loop other
8898 than right after the end of the loop. */
8899 for (p = next_insn_in_loop (loop, loop->scan_start);
8900 p != NULL_RTX;
8901 p = next_insn_in_loop (loop, p))
8902 {
8903 if (GET_CODE (p) == CODE_LABEL)
8904 maybe_never = 1;
8905 else if (GET_CODE (p) == JUMP_INSN
8906 /* If we enter the loop in the middle, and scan
8907 around to the beginning, don't set maybe_never
8908 for that. This must be an unconditional jump,
8909 otherwise the code at the top of the loop might
8910 never be executed. Unconditional jumps are
8911 followed a by barrier then loop end. */
8912 && ! (GET_CODE (p) == JUMP_INSN
8913 && JUMP_LABEL (p) == loop->top
8914 && NEXT_INSN (NEXT_INSN (p)) == loop->end
8915 && any_uncondjump_p (p)))
8916 {
8917 /* If this is a jump outside of the loop but not right
8918 after the end of the loop, we would have to emit new fixup
8919 sequences for each such label. */
8920 if (/* If we can't tell where control might go when this
8921 JUMP_INSN is executed, we must be conservative. */
8922 !JUMP_LABEL (p)
8923 || (JUMP_LABEL (p) != end_label
8924 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
8925 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
8926 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
8927 return;
8928
8929 if (!any_condjump_p (p))
8930 /* Something complicated. */
8931 maybe_never = 1;
8932 else
8933 /* If there are any more instructions in the loop, they
8934 might not be reached. */
8935 next_maybe_never = 1;
8936 }
8937 else if (next_maybe_never)
8938 maybe_never = 1;
8939 }
8940
8941 /* Find start of the extended basic block that enters the loop. */
8942 for (p = loop->start;
8943 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
8944 p = PREV_INSN (p))
8945 ;
8946 prev_ebb_head = p;
8947
8948 cselib_init ();
8949
8950 /* Build table of mems that get set to constant values before the
8951 loop. */
8952 for (; p != loop->start; p = NEXT_INSN (p))
8953 cselib_process_insn (p);
8954
8955 /* Actually move the MEMs. */
8956 for (i = 0; i < loop_info->mems_idx; ++i)
8957 {
8958 regset_head load_copies;
8959 regset_head store_copies;
8960 int written = 0;
8961 rtx reg;
8962 rtx mem = loop_info->mems[i].mem;
8963 rtx mem_list_entry;
8964
8965 if (MEM_VOLATILE_P (mem)
8966 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
8967 /* There's no telling whether or not MEM is modified. */
8968 loop_info->mems[i].optimize = 0;
8969
8970 /* Go through the MEMs written to in the loop to see if this
8971 one is aliased by one of them. */
8972 mem_list_entry = loop_info->store_mems;
8973 while (mem_list_entry)
8974 {
8975 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
8976 written = 1;
8977 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
8978 mem, rtx_varies_p))
8979 {
8980 /* MEM is indeed aliased by this store. */
8981 loop_info->mems[i].optimize = 0;
8982 break;
8983 }
8984 mem_list_entry = XEXP (mem_list_entry, 1);
8985 }
8986
8987 if (flag_float_store && written
8988 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
8989 loop_info->mems[i].optimize = 0;
8990
8991 /* If this MEM is written to, we must be sure that there
8992 are no reads from another MEM that aliases this one. */
8993 if (loop_info->mems[i].optimize && written)
8994 {
8995 int j;
8996
8997 for (j = 0; j < loop_info->mems_idx; ++j)
8998 {
8999 if (j == i)
9000 continue;
9001 else if (true_dependence (mem,
9002 VOIDmode,
9003 loop_info->mems[j].mem,
9004 rtx_varies_p))
9005 {
9006 /* It's not safe to hoist loop_info->mems[i] out of
9007 the loop because writes to it might not be
9008 seen by reads from loop_info->mems[j]. */
9009 loop_info->mems[i].optimize = 0;
9010 break;
9011 }
9012 }
9013 }
9014
9015 if (maybe_never && may_trap_p (mem))
9016 /* We can't access the MEM outside the loop; it might
9017 cause a trap that wouldn't have happened otherwise. */
9018 loop_info->mems[i].optimize = 0;
9019
9020 if (!loop_info->mems[i].optimize)
9021 /* We thought we were going to lift this MEM out of the
9022 loop, but later discovered that we could not. */
9023 continue;
9024
9025 INIT_REG_SET (&load_copies);
9026 INIT_REG_SET (&store_copies);
9027
9028 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9029 order to keep scan_loop from moving stores to this MEM
9030 out of the loop just because this REG is neither a
9031 user-variable nor used in the loop test. */
9032 reg = gen_reg_rtx (GET_MODE (mem));
9033 REG_USERVAR_P (reg) = 1;
9034 loop_info->mems[i].reg = reg;
9035
9036 /* Now, replace all references to the MEM with the
9037 corresponding pseudos. */
9038 maybe_never = 0;
9039 for (p = next_insn_in_loop (loop, loop->scan_start);
9040 p != NULL_RTX;
9041 p = next_insn_in_loop (loop, p))
9042 {
9043 if (INSN_P (p))
9044 {
9045 rtx set;
9046
9047 set = single_set (p);
9048
9049 /* See if this copies the mem into a register that isn't
9050 modified afterwards. We'll try to do copy propagation
9051 a little further on. */
9052 if (set
9053 /* @@@ This test is _way_ too conservative. */
9054 && ! maybe_never
9055 && GET_CODE (SET_DEST (set)) == REG
9056 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9057 && REGNO (SET_DEST (set)) < last_max_reg
9058 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9059 && rtx_equal_p (SET_SRC (set), mem))
9060 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9061
9062 /* See if this copies the mem from a register that isn't
9063 modified afterwards. We'll try to remove the
9064 redundant copy later on by doing a little register
9065 renaming and copy propagation. This will help
9066 to untangle things for the BIV detection code. */
9067 if (set
9068 && ! maybe_never
9069 && GET_CODE (SET_SRC (set)) == REG
9070 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9071 && REGNO (SET_SRC (set)) < last_max_reg
9072 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9073 && rtx_equal_p (SET_DEST (set), mem))
9074 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9075
9076 /* Replace the memory reference with the shadow register. */
9077 replace_loop_mems (p, loop_info->mems[i].mem,
9078 loop_info->mems[i].reg);
9079 }
9080
9081 if (GET_CODE (p) == CODE_LABEL
9082 || GET_CODE (p) == JUMP_INSN)
9083 maybe_never = 1;
9084 }
9085
9086 if (! apply_change_group ())
9087 /* We couldn't replace all occurrences of the MEM. */
9088 loop_info->mems[i].optimize = 0;
9089 else
9090 {
9091 /* Load the memory immediately before LOOP->START, which is
9092 the NOTE_LOOP_BEG. */
9093 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9094 rtx set;
9095 rtx best = mem;
9096 int j;
9097 struct elt_loc_list *const_equiv = 0;
9098
9099 if (e)
9100 {
9101 struct elt_loc_list *equiv;
9102 struct elt_loc_list *best_equiv = 0;
9103 for (equiv = e->locs; equiv; equiv = equiv->next)
9104 {
9105 if (CONSTANT_P (equiv->loc))
9106 const_equiv = equiv;
9107 else if (GET_CODE (equiv->loc) == REG
9108 /* Extending hard register lifetimes causes crash
9109 on SRC targets. Doing so on non-SRC is
9110 probably also not good idea, since we most
9111 probably have pseudoregister equivalence as
9112 well. */
9113 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9114 best_equiv = equiv;
9115 }
9116 /* Use the constant equivalence if that is cheap enough. */
9117 if (! best_equiv)
9118 best_equiv = const_equiv;
9119 else if (const_equiv
9120 && (rtx_cost (const_equiv->loc, SET)
9121 <= rtx_cost (best_equiv->loc, SET)))
9122 {
9123 best_equiv = const_equiv;
9124 const_equiv = 0;
9125 }
9126
9127 /* If best_equiv is nonzero, we know that MEM is set to a
9128 constant or register before the loop. We will use this
9129 knowledge to initialize the shadow register with that
9130 constant or reg rather than by loading from MEM. */
9131 if (best_equiv)
9132 best = copy_rtx (best_equiv->loc);
9133 }
9134
9135 set = gen_move_insn (reg, best);
9136 set = loop_insn_hoist (loop, set);
9137 if (REG_P (best))
9138 {
9139 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9140 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9141 {
9142 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9143 break;
9144 }
9145 }
9146
9147 if (const_equiv)
9148 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9149
9150 if (written)
9151 {
9152 if (label == NULL_RTX)
9153 {
9154 label = gen_label_rtx ();
9155 emit_label_after (label, loop->end);
9156 }
9157
9158 /* Store the memory immediately after END, which is
9159 the NOTE_LOOP_END. */
9160 set = gen_move_insn (copy_rtx (mem), reg);
9161 loop_insn_emit_after (loop, 0, label, set);
9162 }
9163
9164 if (loop_dump_stream)
9165 {
9166 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9167 REGNO (reg), (written ? "r/w" : "r/o"));
9168 print_rtl (loop_dump_stream, mem);
9169 fputc ('\n', loop_dump_stream);
9170 }
9171
9172 /* Attempt a bit of copy propagation. This helps untangle the
9173 data flow, and enables {basic,general}_induction_var to find
9174 more bivs/givs. */
9175 EXECUTE_IF_SET_IN_REG_SET
9176 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9177 {
9178 try_copy_prop (loop, reg, j);
9179 });
9180 CLEAR_REG_SET (&load_copies);
9181
9182 EXECUTE_IF_SET_IN_REG_SET
9183 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9184 {
9185 try_swap_copy_prop (loop, reg, j);
9186 });
9187 CLEAR_REG_SET (&store_copies);
9188 }
9189 }
9190
9191 if (label != NULL_RTX && end_label != NULL_RTX)
9192 {
9193 /* Now, we need to replace all references to the previous exit
9194 label with the new one. */
9195 rtx_pair rr;
9196 rr.r1 = end_label;
9197 rr.r2 = label;
9198
9199 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9200 {
9201 for_each_rtx (&p, replace_label, &rr);
9202
9203 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9204 field. This is not handled by for_each_rtx because it doesn't
9205 handle unprinted ('0') fields. We need to update JUMP_LABEL
9206 because the immediately following unroll pass will use it.
9207 replace_label would not work anyways, because that only handles
9208 LABEL_REFs. */
9209 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9210 JUMP_LABEL (p) = label;
9211 }
9212 }
9213
9214 cselib_finish ();
9215 }
9216
9217 /* For communication between note_reg_stored and its caller. */
9218 struct note_reg_stored_arg
9219 {
9220 int set_seen;
9221 rtx reg;
9222 };
9223
9224 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9225 is equal to ARG. */
9226 static void
9227 note_reg_stored (x, setter, arg)
9228 rtx x, setter ATTRIBUTE_UNUSED;
9229 void *arg;
9230 {
9231 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9232 if (t->reg == x)
9233 t->set_seen = 1;
9234 }
9235
9236 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9237 There must be exactly one insn that sets this pseudo; it will be
9238 deleted if all replacements succeed and we can prove that the register
9239 is not used after the loop. */
9240
9241 static void
9242 try_copy_prop (loop, replacement, regno)
9243 const struct loop *loop;
9244 rtx replacement;
9245 unsigned int regno;
9246 {
9247 /* This is the reg that we are copying from. */
9248 rtx reg_rtx = regno_reg_rtx[regno];
9249 rtx init_insn = 0;
9250 rtx insn;
9251 /* These help keep track of whether we replaced all uses of the reg. */
9252 int replaced_last = 0;
9253 int store_is_first = 0;
9254
9255 for (insn = next_insn_in_loop (loop, loop->scan_start);
9256 insn != NULL_RTX;
9257 insn = next_insn_in_loop (loop, insn))
9258 {
9259 rtx set;
9260
9261 /* Only substitute within one extended basic block from the initializing
9262 insn. */
9263 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9264 break;
9265
9266 if (! INSN_P (insn))
9267 continue;
9268
9269 /* Is this the initializing insn? */
9270 set = single_set (insn);
9271 if (set
9272 && GET_CODE (SET_DEST (set)) == REG
9273 && REGNO (SET_DEST (set)) == regno)
9274 {
9275 if (init_insn)
9276 abort ();
9277
9278 init_insn = insn;
9279 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9280 store_is_first = 1;
9281 }
9282
9283 /* Only substitute after seeing the initializing insn. */
9284 if (init_insn && insn != init_insn)
9285 {
9286 struct note_reg_stored_arg arg;
9287
9288 replace_loop_regs (insn, reg_rtx, replacement);
9289 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9290 replaced_last = 1;
9291
9292 /* Stop replacing when REPLACEMENT is modified. */
9293 arg.reg = replacement;
9294 arg.set_seen = 0;
9295 note_stores (PATTERN (insn), note_reg_stored, &arg);
9296 if (arg.set_seen)
9297 {
9298 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
9299
9300 /* It is possible that we've turned previously valid REG_EQUAL to
9301 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
9302 REPLACEMENT is modified, we get different meaning. */
9303 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
9304 remove_note (insn, note);
9305 break;
9306 }
9307 }
9308 }
9309 if (! init_insn)
9310 abort ();
9311 if (apply_change_group ())
9312 {
9313 if (loop_dump_stream)
9314 fprintf (loop_dump_stream, " Replaced reg %d", regno);
9315 if (store_is_first && replaced_last)
9316 {
9317 rtx first;
9318 rtx retval_note;
9319
9320 /* Assume we're just deleting INIT_INSN. */
9321 first = init_insn;
9322 /* Look for REG_RETVAL note. If we're deleting the end of
9323 the libcall sequence, the whole sequence can go. */
9324 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
9325 /* If we found a REG_RETVAL note, find the first instruction
9326 in the sequence. */
9327 if (retval_note)
9328 first = XEXP (retval_note, 0);
9329
9330 /* Delete the instructions. */
9331 loop_delete_insns (first, init_insn);
9332 }
9333 if (loop_dump_stream)
9334 fprintf (loop_dump_stream, ".\n");
9335 }
9336 }
9337
9338 /* Replace all the instructions from FIRST up to and including LAST
9339 with NOTE_INSN_DELETED notes. */
9340
9341 static void
9342 loop_delete_insns (first, last)
9343 rtx first;
9344 rtx last;
9345 {
9346 while (1)
9347 {
9348 if (loop_dump_stream)
9349 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
9350 INSN_UID (first));
9351 delete_insn (first);
9352
9353 /* If this was the LAST instructions we're supposed to delete,
9354 we're done. */
9355 if (first == last)
9356 break;
9357
9358 first = NEXT_INSN (first);
9359 }
9360 }
9361
9362 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
9363 loop LOOP if the order of the sets of these registers can be
9364 swapped. There must be exactly one insn within the loop that sets
9365 this pseudo followed immediately by a move insn that sets
9366 REPLACEMENT with REGNO. */
9367 static void
9368 try_swap_copy_prop (loop, replacement, regno)
9369 const struct loop *loop;
9370 rtx replacement;
9371 unsigned int regno;
9372 {
9373 rtx insn;
9374 rtx set = NULL_RTX;
9375 unsigned int new_regno;
9376
9377 new_regno = REGNO (replacement);
9378
9379 for (insn = next_insn_in_loop (loop, loop->scan_start);
9380 insn != NULL_RTX;
9381 insn = next_insn_in_loop (loop, insn))
9382 {
9383 /* Search for the insn that copies REGNO to NEW_REGNO? */
9384 if (INSN_P (insn)
9385 && (set = single_set (insn))
9386 && GET_CODE (SET_DEST (set)) == REG
9387 && REGNO (SET_DEST (set)) == new_regno
9388 && GET_CODE (SET_SRC (set)) == REG
9389 && REGNO (SET_SRC (set)) == regno)
9390 break;
9391 }
9392
9393 if (insn != NULL_RTX)
9394 {
9395 rtx prev_insn;
9396 rtx prev_set;
9397
9398 /* Some DEF-USE info would come in handy here to make this
9399 function more general. For now, just check the previous insn
9400 which is the most likely candidate for setting REGNO. */
9401
9402 prev_insn = PREV_INSN (insn);
9403
9404 if (INSN_P (insn)
9405 && (prev_set = single_set (prev_insn))
9406 && GET_CODE (SET_DEST (prev_set)) == REG
9407 && REGNO (SET_DEST (prev_set)) == regno)
9408 {
9409 /* We have:
9410 (set (reg regno) (expr))
9411 (set (reg new_regno) (reg regno))
9412
9413 so try converting this to:
9414 (set (reg new_regno) (expr))
9415 (set (reg regno) (reg new_regno))
9416
9417 The former construct is often generated when a global
9418 variable used for an induction variable is shadowed by a
9419 register (NEW_REGNO). The latter construct improves the
9420 chances of GIV replacement and BIV elimination. */
9421
9422 validate_change (prev_insn, &SET_DEST (prev_set),
9423 replacement, 1);
9424 validate_change (insn, &SET_DEST (set),
9425 SET_SRC (set), 1);
9426 validate_change (insn, &SET_SRC (set),
9427 replacement, 1);
9428
9429 if (apply_change_group ())
9430 {
9431 if (loop_dump_stream)
9432 fprintf (loop_dump_stream,
9433 " Swapped set of reg %d at %d with reg %d at %d.\n",
9434 regno, INSN_UID (insn),
9435 new_regno, INSN_UID (prev_insn));
9436
9437 /* Update first use of REGNO. */
9438 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
9439 REGNO_FIRST_UID (regno) = INSN_UID (insn);
9440
9441 /* Now perform copy propagation to hopefully
9442 remove all uses of REGNO within the loop. */
9443 try_copy_prop (loop, replacement, regno);
9444 }
9445 }
9446 }
9447 }
9448
9449 /* Replace MEM with its associated pseudo register. This function is
9450 called from load_mems via for_each_rtx. DATA is actually a pointer
9451 to a structure describing the instruction currently being scanned
9452 and the MEM we are currently replacing. */
9453
9454 static int
9455 replace_loop_mem (mem, data)
9456 rtx *mem;
9457 void *data;
9458 {
9459 loop_replace_args *args = (loop_replace_args *) data;
9460 rtx m = *mem;
9461
9462 if (m == NULL_RTX)
9463 return 0;
9464
9465 switch (GET_CODE (m))
9466 {
9467 case MEM:
9468 break;
9469
9470 case CONST_DOUBLE:
9471 /* We're not interested in the MEM associated with a
9472 CONST_DOUBLE, so there's no need to traverse into one. */
9473 return -1;
9474
9475 default:
9476 /* This is not a MEM. */
9477 return 0;
9478 }
9479
9480 if (!rtx_equal_p (args->match, m))
9481 /* This is not the MEM we are currently replacing. */
9482 return 0;
9483
9484 /* Actually replace the MEM. */
9485 validate_change (args->insn, mem, args->replacement, 1);
9486
9487 return 0;
9488 }
9489
9490 static void
9491 replace_loop_mems (insn, mem, reg)
9492 rtx insn;
9493 rtx mem;
9494 rtx reg;
9495 {
9496 loop_replace_args args;
9497
9498 args.insn = insn;
9499 args.match = mem;
9500 args.replacement = reg;
9501
9502 for_each_rtx (&insn, replace_loop_mem, &args);
9503 }
9504
9505 /* Replace one register with another. Called through for_each_rtx; PX points
9506 to the rtx being scanned. DATA is actually a pointer to
9507 a structure of arguments. */
9508
9509 static int
9510 replace_loop_reg (px, data)
9511 rtx *px;
9512 void *data;
9513 {
9514 rtx x = *px;
9515 loop_replace_args *args = (loop_replace_args *) data;
9516
9517 if (x == NULL_RTX)
9518 return 0;
9519
9520 if (x == args->match)
9521 validate_change (args->insn, px, args->replacement, 1);
9522
9523 return 0;
9524 }
9525
9526 static void
9527 replace_loop_regs (insn, reg, replacement)
9528 rtx insn;
9529 rtx reg;
9530 rtx replacement;
9531 {
9532 loop_replace_args args;
9533
9534 args.insn = insn;
9535 args.match = reg;
9536 args.replacement = replacement;
9537
9538 for_each_rtx (&insn, replace_loop_reg, &args);
9539 }
9540
9541 /* Replace occurrences of the old exit label for the loop with the new
9542 one. DATA is an rtx_pair containing the old and new labels,
9543 respectively. */
9544
9545 static int
9546 replace_label (x, data)
9547 rtx *x;
9548 void *data;
9549 {
9550 rtx l = *x;
9551 rtx old_label = ((rtx_pair *) data)->r1;
9552 rtx new_label = ((rtx_pair *) data)->r2;
9553
9554 if (l == NULL_RTX)
9555 return 0;
9556
9557 if (GET_CODE (l) != LABEL_REF)
9558 return 0;
9559
9560 if (XEXP (l, 0) != old_label)
9561 return 0;
9562
9563 XEXP (l, 0) = new_label;
9564 ++LABEL_NUSES (new_label);
9565 --LABEL_NUSES (old_label);
9566
9567 return 0;
9568 }
9569 \f
9570 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
9571 (ignored in the interim). */
9572
9573 static rtx
9574 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
9575 const struct loop *loop ATTRIBUTE_UNUSED;
9576 basic_block where_bb ATTRIBUTE_UNUSED;
9577 rtx where_insn;
9578 rtx pattern;
9579 {
9580 return emit_insn_after (pattern, where_insn);
9581 }
9582
9583
9584 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
9585 in basic block WHERE_BB (ignored in the interim) within the loop
9586 otherwise hoist PATTERN into the loop pre-header. */
9587
9588 rtx
9589 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
9590 const struct loop *loop;
9591 basic_block where_bb ATTRIBUTE_UNUSED;
9592 rtx where_insn;
9593 rtx pattern;
9594 {
9595 if (! where_insn)
9596 return loop_insn_hoist (loop, pattern);
9597 return emit_insn_before (pattern, where_insn);
9598 }
9599
9600
9601 /* Emit call insn for PATTERN before WHERE_INSN in basic block
9602 WHERE_BB (ignored in the interim) within the loop. */
9603
9604 static rtx
9605 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
9606 const struct loop *loop ATTRIBUTE_UNUSED;
9607 basic_block where_bb ATTRIBUTE_UNUSED;
9608 rtx where_insn;
9609 rtx pattern;
9610 {
9611 return emit_call_insn_before (pattern, where_insn);
9612 }
9613
9614
9615 /* Hoist insn for PATTERN into the loop pre-header. */
9616
9617 rtx
9618 loop_insn_hoist (loop, pattern)
9619 const struct loop *loop;
9620 rtx pattern;
9621 {
9622 return loop_insn_emit_before (loop, 0, loop->start, pattern);
9623 }
9624
9625
9626 /* Hoist call insn for PATTERN into the loop pre-header. */
9627
9628 static rtx
9629 loop_call_insn_hoist (loop, pattern)
9630 const struct loop *loop;
9631 rtx pattern;
9632 {
9633 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
9634 }
9635
9636
9637 /* Sink insn for PATTERN after the loop end. */
9638
9639 rtx
9640 loop_insn_sink (loop, pattern)
9641 const struct loop *loop;
9642 rtx pattern;
9643 {
9644 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
9645 }
9646
9647
9648 /* If the loop has multiple exits, emit insn for PATTERN before the
9649 loop to ensure that it will always be executed no matter how the
9650 loop exits. Otherwise, emit the insn for PATTERN after the loop,
9651 since this is slightly more efficient. */
9652
9653 static rtx
9654 loop_insn_sink_or_swim (loop, pattern)
9655 const struct loop *loop;
9656 rtx pattern;
9657 {
9658 if (loop->exit_count)
9659 return loop_insn_hoist (loop, pattern);
9660 else
9661 return loop_insn_sink (loop, pattern);
9662 }
9663 \f
9664 static void
9665 loop_ivs_dump (loop, file, verbose)
9666 const struct loop *loop;
9667 FILE *file;
9668 int verbose;
9669 {
9670 struct iv_class *bl;
9671 int iv_num = 0;
9672
9673 if (! loop || ! file)
9674 return;
9675
9676 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9677 iv_num++;
9678
9679 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
9680
9681 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9682 {
9683 loop_iv_class_dump (bl, file, verbose);
9684 fputc ('\n', file);
9685 }
9686 }
9687
9688
9689 static void
9690 loop_iv_class_dump (bl, file, verbose)
9691 const struct iv_class *bl;
9692 FILE *file;
9693 int verbose ATTRIBUTE_UNUSED;
9694 {
9695 struct induction *v;
9696 rtx incr;
9697 int i;
9698
9699 if (! bl || ! file)
9700 return;
9701
9702 fprintf (file, "IV class for reg %d, benefit %d\n",
9703 bl->regno, bl->total_benefit);
9704
9705 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
9706 if (bl->initial_value)
9707 {
9708 fprintf (file, ", init val: ");
9709 print_simple_rtl (file, bl->initial_value);
9710 }
9711 if (bl->initial_test)
9712 {
9713 fprintf (file, ", init test: ");
9714 print_simple_rtl (file, bl->initial_test);
9715 }
9716 fputc ('\n', file);
9717
9718 if (bl->final_value)
9719 {
9720 fprintf (file, " Final val: ");
9721 print_simple_rtl (file, bl->final_value);
9722 fputc ('\n', file);
9723 }
9724
9725 if ((incr = biv_total_increment (bl)))
9726 {
9727 fprintf (file, " Total increment: ");
9728 print_simple_rtl (file, incr);
9729 fputc ('\n', file);
9730 }
9731
9732 /* List the increments. */
9733 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
9734 {
9735 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
9736 print_simple_rtl (file, v->add_val);
9737 fputc ('\n', file);
9738 }
9739
9740 /* List the givs. */
9741 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
9742 {
9743 fprintf (file, " Giv%d: insn %d, benefit %d, ",
9744 i, INSN_UID (v->insn), v->benefit);
9745 if (v->giv_type == DEST_ADDR)
9746 print_simple_rtl (file, v->mem);
9747 else
9748 print_simple_rtl (file, single_set (v->insn));
9749 fputc ('\n', file);
9750 }
9751 }
9752
9753
9754 static void
9755 loop_biv_dump (v, file, verbose)
9756 const struct induction *v;
9757 FILE *file;
9758 int verbose;
9759 {
9760 if (! v || ! file)
9761 return;
9762
9763 fprintf (file,
9764 "Biv %d: insn %d",
9765 REGNO (v->dest_reg), INSN_UID (v->insn));
9766 fprintf (file, " const ");
9767 print_simple_rtl (file, v->add_val);
9768
9769 if (verbose && v->final_value)
9770 {
9771 fputc ('\n', file);
9772 fprintf (file, " final ");
9773 print_simple_rtl (file, v->final_value);
9774 }
9775
9776 fputc ('\n', file);
9777 }
9778
9779
9780 static void
9781 loop_giv_dump (v, file, verbose)
9782 const struct induction *v;
9783 FILE *file;
9784 int verbose;
9785 {
9786 if (! v || ! file)
9787 return;
9788
9789 if (v->giv_type == DEST_REG)
9790 fprintf (file, "Giv %d: insn %d",
9791 REGNO (v->dest_reg), INSN_UID (v->insn));
9792 else
9793 fprintf (file, "Dest address: insn %d",
9794 INSN_UID (v->insn));
9795
9796 fprintf (file, " src reg %d benefit %d",
9797 REGNO (v->src_reg), v->benefit);
9798 fprintf (file, " lifetime %d",
9799 v->lifetime);
9800
9801 if (v->replaceable)
9802 fprintf (file, " replaceable");
9803
9804 if (v->no_const_addval)
9805 fprintf (file, " ncav");
9806
9807 if (v->ext_dependent)
9808 {
9809 switch (GET_CODE (v->ext_dependent))
9810 {
9811 case SIGN_EXTEND:
9812 fprintf (file, " ext se");
9813 break;
9814 case ZERO_EXTEND:
9815 fprintf (file, " ext ze");
9816 break;
9817 case TRUNCATE:
9818 fprintf (file, " ext tr");
9819 break;
9820 default:
9821 abort ();
9822 }
9823 }
9824
9825 fputc ('\n', file);
9826 fprintf (file, " mult ");
9827 print_simple_rtl (file, v->mult_val);
9828
9829 fputc ('\n', file);
9830 fprintf (file, " add ");
9831 print_simple_rtl (file, v->add_val);
9832
9833 if (verbose && v->final_value)
9834 {
9835 fputc ('\n', file);
9836 fprintf (file, " final ");
9837 print_simple_rtl (file, v->final_value);
9838 }
9839
9840 fputc ('\n', file);
9841 }
9842
9843
9844 void
9845 debug_ivs (loop)
9846 const struct loop *loop;
9847 {
9848 loop_ivs_dump (loop, stderr, 1);
9849 }
9850
9851
9852 void
9853 debug_iv_class (bl)
9854 const struct iv_class *bl;
9855 {
9856 loop_iv_class_dump (bl, stderr, 1);
9857 }
9858
9859
9860 void
9861 debug_biv (v)
9862 const struct induction *v;
9863 {
9864 loop_biv_dump (v, stderr, 1);
9865 }
9866
9867
9868 void
9869 debug_giv (v)
9870 const struct induction *v;
9871 {
9872 loop_giv_dump (v, stderr, 1);
9873 }
9874
9875
9876 #define LOOP_BLOCK_NUM_1(INSN) \
9877 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
9878
9879 /* The notes do not have an assigned block, so look at the next insn. */
9880 #define LOOP_BLOCK_NUM(INSN) \
9881 ((INSN) ? (GET_CODE (INSN) == NOTE \
9882 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
9883 : LOOP_BLOCK_NUM_1 (INSN)) \
9884 : -1)
9885
9886 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
9887
9888 static void
9889 loop_dump_aux (loop, file, verbose)
9890 const struct loop *loop;
9891 FILE *file;
9892 int verbose ATTRIBUTE_UNUSED;
9893 {
9894 rtx label;
9895
9896 if (! loop || ! file)
9897 return;
9898
9899 /* Print diagnostics to compare our concept of a loop with
9900 what the loop notes say. */
9901 if (! PREV_INSN (loop->first->head)
9902 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
9903 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
9904 != NOTE_INSN_LOOP_BEG)
9905 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
9906 INSN_UID (PREV_INSN (loop->first->head)));
9907 if (! NEXT_INSN (loop->last->end)
9908 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
9909 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
9910 != NOTE_INSN_LOOP_END)
9911 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
9912 INSN_UID (NEXT_INSN (loop->last->end)));
9913
9914 if (loop->start)
9915 {
9916 fprintf (file,
9917 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
9918 LOOP_BLOCK_NUM (loop->start),
9919 LOOP_INSN_UID (loop->start),
9920 LOOP_BLOCK_NUM (loop->cont),
9921 LOOP_INSN_UID (loop->cont),
9922 LOOP_BLOCK_NUM (loop->cont),
9923 LOOP_INSN_UID (loop->cont),
9924 LOOP_BLOCK_NUM (loop->vtop),
9925 LOOP_INSN_UID (loop->vtop),
9926 LOOP_BLOCK_NUM (loop->end),
9927 LOOP_INSN_UID (loop->end));
9928 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
9929 LOOP_BLOCK_NUM (loop->top),
9930 LOOP_INSN_UID (loop->top),
9931 LOOP_BLOCK_NUM (loop->scan_start),
9932 LOOP_INSN_UID (loop->scan_start));
9933 fprintf (file, ";; exit_count %d", loop->exit_count);
9934 if (loop->exit_count)
9935 {
9936 fputs (", labels:", file);
9937 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
9938 {
9939 fprintf (file, " %d ",
9940 LOOP_INSN_UID (XEXP (label, 0)));
9941 }
9942 }
9943 fputs ("\n", file);
9944
9945 /* This can happen when a marked loop appears as two nested loops,
9946 say from while (a || b) {}. The inner loop won't match
9947 the loop markers but the outer one will. */
9948 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
9949 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
9950 }
9951 }
9952
9953 /* Call this function from the debugger to dump LOOP. */
9954
9955 void
9956 debug_loop (loop)
9957 const struct loop *loop;
9958 {
9959 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
9960 }
9961
9962 /* Call this function from the debugger to dump LOOPS. */
9963
9964 void
9965 debug_loops (loops)
9966 const struct loops *loops;
9967 {
9968 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
9969 }