alias.c (find_base_value): Recall base values for fixed hard regs.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56 #include "insn-flags.h"
57 #include "optabs.h"
58
59 /* Not really meaningful values, but at least something. */
60 #ifndef SIMULTANEOUS_PREFETCHES
61 #define SIMULTANEOUS_PREFETCHES 3
62 #endif
63 #ifndef PREFETCH_BLOCK
64 #define PREFETCH_BLOCK 32
65 #endif
66 #ifndef HAVE_prefetch
67 #define HAVE_prefetch 0
68 #define CODE_FOR_prefetch 0
69 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
70 #endif
71
72 /* Give up the prefetch optimizations once we exceed a given threshhold.
73 It is unlikely that we would be able to optimize something in a loop
74 with so many detected prefetches. */
75 #define MAX_PREFETCHES 100
76 /* The number of prefetch blocks that are beneficial to fetch at once before
77 a loop with a known (and low) iteration count. */
78 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
79 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
80 since it is likely that the data are already in the cache. */
81 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* The minimal number of prefetch blocks that a loop must consume to make
83 the emitting of prefetch instruction in the body of loop worthwhile. */
84 #define PREFETCH_BLOCKS_IN_LOOP_MIN 6
85
86 /* Parameterize some prefetch heuristics so they can be turned on and off
87 easily for performance testing on new architecures. These can be
88 defined in target-dependent files. */
89
90 /* Prefetch is worthwhile only when loads/stores are dense. */
91 #ifndef PREFETCH_ONLY_DENSE_MEM
92 #define PREFETCH_ONLY_DENSE_MEM 1
93 #endif
94
95 /* Define what we mean by "dense" loads and stores; This value divided by 256
96 is the minimum percentage of memory references that worth prefetching. */
97 #ifndef PREFETCH_DENSE_MEM
98 #define PREFETCH_DENSE_MEM 220
99 #endif
100
101 /* Do not prefetch for a loop whose iteration count is known to be low. */
102 #ifndef PREFETCH_NO_LOW_LOOPCNT
103 #define PREFETCH_NO_LOW_LOOPCNT 1
104 #endif
105
106 /* Define what we mean by a "low" iteration count. */
107 #ifndef PREFETCH_LOW_LOOPCNT
108 #define PREFETCH_LOW_LOOPCNT 32
109 #endif
110
111 /* Do not prefetch for a loop that contains a function call; such a loop is
112 probably not an internal loop. */
113 #ifndef PREFETCH_NO_CALL
114 #define PREFETCH_NO_CALL 1
115 #endif
116
117 /* Do not prefetch accesses with an extreme stride. */
118 #ifndef PREFETCH_NO_EXTREME_STRIDE
119 #define PREFETCH_NO_EXTREME_STRIDE 1
120 #endif
121
122 /* Define what we mean by an "extreme" stride. */
123 #ifndef PREFETCH_EXTREME_STRIDE
124 #define PREFETCH_EXTREME_STRIDE 4096
125 #endif
126
127 /* Do not handle reversed order prefetches (negative stride). */
128 #ifndef PREFETCH_NO_REVERSE_ORDER
129 #define PREFETCH_NO_REVERSE_ORDER 1
130 #endif
131
132 /* Prefetch even if the GIV is not always executed. */
133 #ifndef PREFETCH_NOT_ALWAYS
134 #define PREFETCH_NOT_ALWAYS 0
135 #endif
136
137 /* If the loop requires more prefetches than the target can process in
138 parallel then don't prefetch anything in that loop. */
139 #ifndef PREFETCH_LIMIT_TO_SIMULTANEOUS
140 #define PREFETCH_LIMIT_TO_SIMULTANEOUS 1
141 #endif
142
143 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
144 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
145
146 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
147 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
148 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
149
150 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
151 ((REGNO) < FIRST_PSEUDO_REGISTER \
152 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
153
154
155 /* Vector mapping INSN_UIDs to luids.
156 The luids are like uids but increase monotonically always.
157 We use them to see whether a jump comes from outside a given loop. */
158
159 int *uid_luid;
160
161 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
162 number the insn is contained in. */
163
164 struct loop **uid_loop;
165
166 /* 1 + largest uid of any insn. */
167
168 int max_uid_for_loop;
169
170 /* 1 + luid of last insn. */
171
172 static int max_luid;
173
174 /* Number of loops detected in current function. Used as index to the
175 next few tables. */
176
177 static int max_loop_num;
178
179 /* Bound on pseudo register number before loop optimization.
180 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
181 unsigned int max_reg_before_loop;
182
183 /* The value to pass to the next call of reg_scan_update. */
184 static int loop_max_reg;
185
186 #define obstack_chunk_alloc xmalloc
187 #define obstack_chunk_free free
188 \f
189 /* During the analysis of a loop, a chain of `struct movable's
190 is made to record all the movable insns found.
191 Then the entire chain can be scanned to decide which to move. */
192
193 struct movable
194 {
195 rtx insn; /* A movable insn */
196 rtx set_src; /* The expression this reg is set from. */
197 rtx set_dest; /* The destination of this SET. */
198 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
199 of any registers used within the LIBCALL. */
200 int consec; /* Number of consecutive following insns
201 that must be moved with this one. */
202 unsigned int regno; /* The register it sets */
203 short lifetime; /* lifetime of that register;
204 may be adjusted when matching movables
205 that load the same value are found. */
206 short savings; /* Number of insns we can move for this reg,
207 including other movables that force this
208 or match this one. */
209 unsigned int cond : 1; /* 1 if only conditionally movable */
210 unsigned int force : 1; /* 1 means MUST move this insn */
211 unsigned int global : 1; /* 1 means reg is live outside this loop */
212 /* If PARTIAL is 1, GLOBAL means something different:
213 that the reg is live outside the range from where it is set
214 to the following label. */
215 unsigned int done : 1; /* 1 inhibits further processing of this */
216
217 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
218 In particular, moving it does not make it
219 invariant. */
220 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
221 load SRC, rather than copying INSN. */
222 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
223 first insn of a consecutive sets group. */
224 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
225 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
226 that we should avoid changing when clearing
227 the rest of the reg. */
228 struct movable *match; /* First entry for same value */
229 struct movable *forces; /* An insn that must be moved if this is */
230 struct movable *next;
231 };
232
233
234 FILE *loop_dump_stream;
235
236 /* Forward declarations. */
237
238 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
239 static void mark_loop_jump PARAMS ((rtx, struct loop *));
240 static void prescan_loop PARAMS ((struct loop *));
241 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
242 static int consec_sets_invariant_p PARAMS ((const struct loop *,
243 rtx, int, rtx));
244 static int labels_in_range_p PARAMS ((rtx, int));
245 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
246 static void note_addr_stored PARAMS ((rtx, rtx, void *));
247 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
248 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
249 static void scan_loop PARAMS ((struct loop*, int));
250 #if 0
251 static void replace_call_address PARAMS ((rtx, rtx, rtx));
252 #endif
253 static rtx skip_consec_insns PARAMS ((rtx, int));
254 static int libcall_benefit PARAMS ((rtx));
255 static void ignore_some_movables PARAMS ((struct loop_movables *));
256 static void force_movables PARAMS ((struct loop_movables *));
257 static void combine_movables PARAMS ((struct loop_movables *,
258 struct loop_regs *));
259 static int num_unmoved_movables PARAMS ((const struct loop *));
260 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
261 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
262 struct loop_regs *));
263 static void add_label_notes PARAMS ((rtx, rtx));
264 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
265 int, int));
266 static void loop_movables_add PARAMS((struct loop_movables *,
267 struct movable *));
268 static void loop_movables_free PARAMS((struct loop_movables *));
269 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
270 static void loop_bivs_find PARAMS((struct loop *));
271 static void loop_bivs_init_find PARAMS((struct loop *));
272 static void loop_bivs_check PARAMS((struct loop *));
273 static void loop_givs_find PARAMS((struct loop *));
274 static void loop_givs_check PARAMS((struct loop *));
275 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
276 int, int));
277 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
278 struct induction *, rtx));
279 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
280 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
281 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
282 rtx *));
283 static void loop_ivs_free PARAMS((struct loop *));
284 static void strength_reduce PARAMS ((struct loop *, int));
285 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
286 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
287 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
288 static void record_biv PARAMS ((struct loop *, struct induction *,
289 rtx, rtx, rtx, rtx, rtx *,
290 int, int));
291 static void check_final_value PARAMS ((const struct loop *,
292 struct induction *));
293 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
294 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
295 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
296 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
297 static void record_giv PARAMS ((const struct loop *, struct induction *,
298 rtx, rtx, rtx, rtx, rtx, rtx, int,
299 enum g_types, int, int, rtx *));
300 static void update_giv_derive PARAMS ((const struct loop *, rtx));
301 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
302 struct loop_info *));
303 static int basic_induction_var PARAMS ((const struct loop *, rtx,
304 enum machine_mode, rtx, rtx,
305 rtx *, rtx *, rtx **));
306 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
307 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
308 rtx *, rtx *, rtx *, int, int *,
309 enum machine_mode));
310 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
311 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
312 static int check_dbra_loop PARAMS ((struct loop *, int));
313 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
314 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
315 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
316 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
317 static int product_cheap_p PARAMS ((rtx, rtx));
318 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
319 int, int, int));
320 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
321 struct iv_class *, int,
322 basic_block, rtx));
323 static int last_use_this_basic_block PARAMS ((rtx, rtx));
324 static void record_initial PARAMS ((rtx, rtx, void *));
325 static void update_reg_last_use PARAMS ((rtx, rtx));
326 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
327 static void loop_regs_scan PARAMS ((const struct loop *, int));
328 static int count_insns_in_loop PARAMS ((const struct loop *));
329 static void load_mems PARAMS ((const struct loop *));
330 static int insert_loop_mem PARAMS ((rtx *, void *));
331 static int replace_loop_mem PARAMS ((rtx *, void *));
332 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
333 static int replace_loop_reg PARAMS ((rtx *, void *));
334 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
335 static void note_reg_stored PARAMS ((rtx, rtx, void *));
336 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
337 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
338 unsigned int));
339 static int replace_label PARAMS ((rtx *, void *));
340 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
341 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
342 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
343 static void loop_regs_update PARAMS ((const struct loop *, rtx));
344 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
345
346 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
347 rtx, rtx));
348 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
349 basic_block, rtx, rtx));
350 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
351 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
352
353 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
354 static void loop_delete_insns PARAMS ((rtx, rtx));
355 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
356 void debug_ivs PARAMS ((const struct loop *));
357 void debug_iv_class PARAMS ((const struct iv_class *));
358 void debug_biv PARAMS ((const struct induction *));
359 void debug_giv PARAMS ((const struct induction *));
360 void debug_loop PARAMS ((const struct loop *));
361 void debug_loops PARAMS ((const struct loops *));
362
363 typedef struct rtx_pair
364 {
365 rtx r1;
366 rtx r2;
367 } rtx_pair;
368
369 typedef struct loop_replace_args
370 {
371 rtx match;
372 rtx replacement;
373 rtx insn;
374 } loop_replace_args;
375
376 /* Nonzero iff INSN is between START and END, inclusive. */
377 #define INSN_IN_RANGE_P(INSN, START, END) \
378 (INSN_UID (INSN) < max_uid_for_loop \
379 && INSN_LUID (INSN) >= INSN_LUID (START) \
380 && INSN_LUID (INSN) <= INSN_LUID (END))
381
382 /* Indirect_jump_in_function is computed once per function. */
383 static int indirect_jump_in_function;
384 static int indirect_jump_in_function_p PARAMS ((rtx));
385
386 static int compute_luids PARAMS ((rtx, rtx, int));
387
388 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
389 struct induction *,
390 rtx));
391 \f
392 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
393 copy the value of the strength reduced giv to its original register. */
394 static int copy_cost;
395
396 /* Cost of using a register, to normalize the benefits of a giv. */
397 static int reg_address_cost;
398
399 void
400 init_loop ()
401 {
402 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
403
404 reg_address_cost = address_cost (reg, SImode);
405
406 copy_cost = COSTS_N_INSNS (1);
407 }
408 \f
409 /* Compute the mapping from uids to luids.
410 LUIDs are numbers assigned to insns, like uids,
411 except that luids increase monotonically through the code.
412 Start at insn START and stop just before END. Assign LUIDs
413 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
414 static int
415 compute_luids (start, end, prev_luid)
416 rtx start, end;
417 int prev_luid;
418 {
419 int i;
420 rtx insn;
421
422 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
423 {
424 if (INSN_UID (insn) >= max_uid_for_loop)
425 continue;
426 /* Don't assign luids to line-number NOTEs, so that the distance in
427 luids between two insns is not affected by -g. */
428 if (GET_CODE (insn) != NOTE
429 || NOTE_LINE_NUMBER (insn) <= 0)
430 uid_luid[INSN_UID (insn)] = ++i;
431 else
432 /* Give a line number note the same luid as preceding insn. */
433 uid_luid[INSN_UID (insn)] = i;
434 }
435 return i + 1;
436 }
437 \f
438 /* Entry point of this file. Perform loop optimization
439 on the current function. F is the first insn of the function
440 and DUMPFILE is a stream for output of a trace of actions taken
441 (or 0 if none should be output). */
442
443 void
444 loop_optimize (f, dumpfile, flags)
445 /* f is the first instruction of a chain of insns for one function */
446 rtx f;
447 FILE *dumpfile;
448 int flags;
449 {
450 rtx insn;
451 int i;
452 struct loops loops_data;
453 struct loops *loops = &loops_data;
454 struct loop_info *loops_info;
455
456 loop_dump_stream = dumpfile;
457
458 init_recog_no_volatile ();
459
460 max_reg_before_loop = max_reg_num ();
461 loop_max_reg = max_reg_before_loop;
462
463 regs_may_share = 0;
464
465 /* Count the number of loops. */
466
467 max_loop_num = 0;
468 for (insn = f; insn; insn = NEXT_INSN (insn))
469 {
470 if (GET_CODE (insn) == NOTE
471 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
472 max_loop_num++;
473 }
474
475 /* Don't waste time if no loops. */
476 if (max_loop_num == 0)
477 return;
478
479 loops->num = max_loop_num;
480
481 /* Get size to use for tables indexed by uids.
482 Leave some space for labels allocated by find_and_verify_loops. */
483 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
484
485 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
486 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
487 sizeof (struct loop *));
488
489 /* Allocate storage for array of loops. */
490 loops->array = (struct loop *)
491 xcalloc (loops->num, sizeof (struct loop));
492
493 /* Find and process each loop.
494 First, find them, and record them in order of their beginnings. */
495 find_and_verify_loops (f, loops);
496
497 /* Allocate and initialize auxiliary loop information. */
498 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
499 for (i = 0; i < loops->num; i++)
500 loops->array[i].aux = loops_info + i;
501
502 /* Now find all register lifetimes. This must be done after
503 find_and_verify_loops, because it might reorder the insns in the
504 function. */
505 reg_scan (f, max_reg_before_loop, 1);
506
507 /* This must occur after reg_scan so that registers created by gcse
508 will have entries in the register tables.
509
510 We could have added a call to reg_scan after gcse_main in toplev.c,
511 but moving this call to init_alias_analysis is more efficient. */
512 init_alias_analysis ();
513
514 /* See if we went too far. Note that get_max_uid already returns
515 one more that the maximum uid of all insn. */
516 if (get_max_uid () > max_uid_for_loop)
517 abort ();
518 /* Now reset it to the actual size we need. See above. */
519 max_uid_for_loop = get_max_uid ();
520
521 /* find_and_verify_loops has already called compute_luids, but it
522 might have rearranged code afterwards, so we need to recompute
523 the luids now. */
524 max_luid = compute_luids (f, NULL_RTX, 0);
525
526 /* Don't leave gaps in uid_luid for insns that have been
527 deleted. It is possible that the first or last insn
528 using some register has been deleted by cross-jumping.
529 Make sure that uid_luid for that former insn's uid
530 points to the general area where that insn used to be. */
531 for (i = 0; i < max_uid_for_loop; i++)
532 {
533 uid_luid[0] = uid_luid[i];
534 if (uid_luid[0] != 0)
535 break;
536 }
537 for (i = 0; i < max_uid_for_loop; i++)
538 if (uid_luid[i] == 0)
539 uid_luid[i] = uid_luid[i - 1];
540
541 /* Determine if the function has indirect jump. On some systems
542 this prevents low overhead loop instructions from being used. */
543 indirect_jump_in_function = indirect_jump_in_function_p (f);
544
545 /* Now scan the loops, last ones first, since this means inner ones are done
546 before outer ones. */
547 for (i = max_loop_num - 1; i >= 0; i--)
548 {
549 struct loop *loop = &loops->array[i];
550
551 if (! loop->invalid && loop->end)
552 scan_loop (loop, flags);
553 }
554
555 /* If there were lexical blocks inside the loop, they have been
556 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
557 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
558 the BLOCKs as well. */
559 if (write_symbols != NO_DEBUG)
560 reorder_blocks ();
561
562 end_alias_analysis ();
563
564 /* Clean up. */
565 free (uid_luid);
566 free (uid_loop);
567 free (loops_info);
568 free (loops->array);
569 }
570 \f
571 /* Returns the next insn, in execution order, after INSN. START and
572 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
573 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
574 insn-stream; it is used with loops that are entered near the
575 bottom. */
576
577 static rtx
578 next_insn_in_loop (loop, insn)
579 const struct loop *loop;
580 rtx insn;
581 {
582 insn = NEXT_INSN (insn);
583
584 if (insn == loop->end)
585 {
586 if (loop->top)
587 /* Go to the top of the loop, and continue there. */
588 insn = loop->top;
589 else
590 /* We're done. */
591 insn = NULL_RTX;
592 }
593
594 if (insn == loop->scan_start)
595 /* We're done. */
596 insn = NULL_RTX;
597
598 return insn;
599 }
600
601 /* Optimize one loop described by LOOP. */
602
603 /* ??? Could also move memory writes out of loops if the destination address
604 is invariant, the source is invariant, the memory write is not volatile,
605 and if we can prove that no read inside the loop can read this address
606 before the write occurs. If there is a read of this address after the
607 write, then we can also mark the memory read as invariant. */
608
609 static void
610 scan_loop (loop, flags)
611 struct loop *loop;
612 int flags;
613 {
614 struct loop_info *loop_info = LOOP_INFO (loop);
615 struct loop_regs *regs = LOOP_REGS (loop);
616 int i;
617 rtx loop_start = loop->start;
618 rtx loop_end = loop->end;
619 rtx p;
620 /* 1 if we are scanning insns that could be executed zero times. */
621 int maybe_never = 0;
622 /* 1 if we are scanning insns that might never be executed
623 due to a subroutine call which might exit before they are reached. */
624 int call_passed = 0;
625 /* Jump insn that enters the loop, or 0 if control drops in. */
626 rtx loop_entry_jump = 0;
627 /* Number of insns in the loop. */
628 int insn_count;
629 int tem;
630 rtx temp, update_start, update_end;
631 /* The SET from an insn, if it is the only SET in the insn. */
632 rtx set, set1;
633 /* Chain describing insns movable in current loop. */
634 struct loop_movables *movables = LOOP_MOVABLES (loop);
635 /* Ratio of extra register life span we can justify
636 for saving an instruction. More if loop doesn't call subroutines
637 since in that case saving an insn makes more difference
638 and more registers are available. */
639 int threshold;
640 /* Nonzero if we are scanning instructions in a sub-loop. */
641 int loop_depth = 0;
642
643 loop->top = 0;
644
645 movables->head = 0;
646 movables->last = 0;
647
648 /* Determine whether this loop starts with a jump down to a test at
649 the end. This will occur for a small number of loops with a test
650 that is too complex to duplicate in front of the loop.
651
652 We search for the first insn or label in the loop, skipping NOTEs.
653 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
654 (because we might have a loop executed only once that contains a
655 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
656 (in case we have a degenerate loop).
657
658 Note that if we mistakenly think that a loop is entered at the top
659 when, in fact, it is entered at the exit test, the only effect will be
660 slightly poorer optimization. Making the opposite error can generate
661 incorrect code. Since very few loops now start with a jump to the
662 exit test, the code here to detect that case is very conservative. */
663
664 for (p = NEXT_INSN (loop_start);
665 p != loop_end
666 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
667 && (GET_CODE (p) != NOTE
668 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
669 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
670 p = NEXT_INSN (p))
671 ;
672
673 loop->scan_start = p;
674
675 /* If loop end is the end of the current function, then emit a
676 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
677 note insn. This is the position we use when sinking insns out of
678 the loop. */
679 if (NEXT_INSN (loop->end) != 0)
680 loop->sink = NEXT_INSN (loop->end);
681 else
682 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
683
684 /* Set up variables describing this loop. */
685 prescan_loop (loop);
686 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
687
688 /* If loop has a jump before the first label,
689 the true entry is the target of that jump.
690 Start scan from there.
691 But record in LOOP->TOP the place where the end-test jumps
692 back to so we can scan that after the end of the loop. */
693 if (GET_CODE (p) == JUMP_INSN)
694 {
695 loop_entry_jump = p;
696
697 /* Loop entry must be unconditional jump (and not a RETURN) */
698 if (any_uncondjump_p (p)
699 && JUMP_LABEL (p) != 0
700 /* Check to see whether the jump actually
701 jumps out of the loop (meaning it's no loop).
702 This case can happen for things like
703 do {..} while (0). If this label was generated previously
704 by loop, we can't tell anything about it and have to reject
705 the loop. */
706 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
707 {
708 loop->top = next_label (loop->scan_start);
709 loop->scan_start = JUMP_LABEL (p);
710 }
711 }
712
713 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
714 as required by loop_reg_used_before_p. So skip such loops. (This
715 test may never be true, but it's best to play it safe.)
716
717 Also, skip loops where we do not start scanning at a label. This
718 test also rejects loops starting with a JUMP_INSN that failed the
719 test above. */
720
721 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
722 || GET_CODE (loop->scan_start) != CODE_LABEL)
723 {
724 if (loop_dump_stream)
725 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
726 INSN_UID (loop_start), INSN_UID (loop_end));
727 return;
728 }
729
730 /* Allocate extra space for REGs that might be created by load_mems.
731 We allocate a little extra slop as well, in the hopes that we
732 won't have to reallocate the regs array. */
733 loop_regs_scan (loop, loop_info->mems_idx + 16);
734 insn_count = count_insns_in_loop (loop);
735
736 if (loop_dump_stream)
737 {
738 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
739 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
740 if (loop->cont)
741 fprintf (loop_dump_stream, "Continue at insn %d.\n",
742 INSN_UID (loop->cont));
743 }
744
745 /* Scan through the loop finding insns that are safe to move.
746 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
747 this reg will be considered invariant for subsequent insns.
748 We consider whether subsequent insns use the reg
749 in deciding whether it is worth actually moving.
750
751 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
752 and therefore it is possible that the insns we are scanning
753 would never be executed. At such times, we must make sure
754 that it is safe to execute the insn once instead of zero times.
755 When MAYBE_NEVER is 0, all insns will be executed at least once
756 so that is not a problem. */
757
758 for (p = next_insn_in_loop (loop, loop->scan_start);
759 p != NULL_RTX;
760 p = next_insn_in_loop (loop, p))
761 {
762 if (GET_CODE (p) == INSN
763 && (set = single_set (p))
764 && GET_CODE (SET_DEST (set)) == REG
765 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
766 && SET_DEST (set) != pic_offset_table_rtx
767 #endif
768 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
769 {
770 int tem1 = 0;
771 int tem2 = 0;
772 int move_insn = 0;
773 rtx src = SET_SRC (set);
774 rtx dependencies = 0;
775
776 /* Figure out what to use as a source of this insn. If a REG_EQUIV
777 note is given or if a REG_EQUAL note with a constant operand is
778 specified, use it as the source and mark that we should move
779 this insn by calling emit_move_insn rather that duplicating the
780 insn.
781
782 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
783 is present. */
784 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
785 if (temp)
786 src = XEXP (temp, 0), move_insn = 1;
787 else
788 {
789 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
790 if (temp && CONSTANT_P (XEXP (temp, 0)))
791 src = XEXP (temp, 0), move_insn = 1;
792 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
793 {
794 src = XEXP (temp, 0);
795 /* A libcall block can use regs that don't appear in
796 the equivalent expression. To move the libcall,
797 we must move those regs too. */
798 dependencies = libcall_other_reg (p, src);
799 }
800 }
801
802 /* For parallels, add any possible uses to the depencies, as we can't move
803 the insn without resolving them first. */
804 if (GET_CODE (PATTERN (p)) == PARALLEL)
805 {
806 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
807 {
808 rtx x = XVECEXP (PATTERN (p), 0, i);
809 if (GET_CODE (x) == USE)
810 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
811 }
812 }
813
814 /* Don't try to optimize a register that was made
815 by loop-optimization for an inner loop.
816 We don't know its life-span, so we can't compute the benefit. */
817 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
818 ;
819 else if (/* The register is used in basic blocks other
820 than the one where it is set (meaning that
821 something after this point in the loop might
822 depend on its value before the set). */
823 ! reg_in_basic_block_p (p, SET_DEST (set))
824 /* And the set is not guaranteed to be executed once
825 the loop starts, or the value before the set is
826 needed before the set occurs...
827
828 ??? Note we have quadratic behaviour here, mitigated
829 by the fact that the previous test will often fail for
830 large loops. Rather than re-scanning the entire loop
831 each time for register usage, we should build tables
832 of the register usage and use them here instead. */
833 && (maybe_never
834 || loop_reg_used_before_p (loop, set, p)))
835 /* It is unsafe to move the set.
836
837 This code used to consider it OK to move a set of a variable
838 which was not created by the user and not used in an exit test.
839 That behavior is incorrect and was removed. */
840 ;
841 else if ((tem = loop_invariant_p (loop, src))
842 && (dependencies == 0
843 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
844 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
845 || (tem1
846 = consec_sets_invariant_p
847 (loop, SET_DEST (set),
848 regs->array[REGNO (SET_DEST (set))].set_in_loop,
849 p)))
850 /* If the insn can cause a trap (such as divide by zero),
851 can't move it unless it's guaranteed to be executed
852 once loop is entered. Even a function call might
853 prevent the trap insn from being reached
854 (since it might exit!) */
855 && ! ((maybe_never || call_passed)
856 && may_trap_p (src)))
857 {
858 struct movable *m;
859 int regno = REGNO (SET_DEST (set));
860
861 /* A potential lossage is where we have a case where two insns
862 can be combined as long as they are both in the loop, but
863 we move one of them outside the loop. For large loops,
864 this can lose. The most common case of this is the address
865 of a function being called.
866
867 Therefore, if this register is marked as being used exactly
868 once if we are in a loop with calls (a "large loop"), see if
869 we can replace the usage of this register with the source
870 of this SET. If we can, delete this insn.
871
872 Don't do this if P has a REG_RETVAL note or if we have
873 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
874
875 if (loop_info->has_call
876 && regs->array[regno].single_usage != 0
877 && regs->array[regno].single_usage != const0_rtx
878 && REGNO_FIRST_UID (regno) == INSN_UID (p)
879 && (REGNO_LAST_UID (regno)
880 == INSN_UID (regs->array[regno].single_usage))
881 && regs->array[regno].set_in_loop == 1
882 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
883 && ! side_effects_p (SET_SRC (set))
884 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
885 && (! SMALL_REGISTER_CLASSES
886 || (! (GET_CODE (SET_SRC (set)) == REG
887 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
888 /* This test is not redundant; SET_SRC (set) might be
889 a call-clobbered register and the life of REGNO
890 might span a call. */
891 && ! modified_between_p (SET_SRC (set), p,
892 regs->array[regno].single_usage)
893 && no_labels_between_p (p, regs->array[regno].single_usage)
894 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
895 regs->array[regno].single_usage))
896 {
897 /* Replace any usage in a REG_EQUAL note. Must copy the
898 new source, so that we don't get rtx sharing between the
899 SET_SOURCE and REG_NOTES of insn p. */
900 REG_NOTES (regs->array[regno].single_usage)
901 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
902 SET_DEST (set), copy_rtx (SET_SRC (set)));
903
904 delete_insn (p);
905 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
906 regs->array[regno+i].set_in_loop = 0;
907 continue;
908 }
909
910 m = (struct movable *) xmalloc (sizeof (struct movable));
911 m->next = 0;
912 m->insn = p;
913 m->set_src = src;
914 m->dependencies = dependencies;
915 m->set_dest = SET_DEST (set);
916 m->force = 0;
917 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
918 m->done = 0;
919 m->forces = 0;
920 m->partial = 0;
921 m->move_insn = move_insn;
922 m->move_insn_first = 0;
923 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
924 m->savemode = VOIDmode;
925 m->regno = regno;
926 /* Set M->cond if either loop_invariant_p
927 or consec_sets_invariant_p returned 2
928 (only conditionally invariant). */
929 m->cond = ((tem | tem1 | tem2) > 1);
930 m->global = LOOP_REG_GLOBAL_P (loop, regno);
931 m->match = 0;
932 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
933 m->savings = regs->array[regno].n_times_set;
934 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
935 m->savings += libcall_benefit (p);
936 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
937 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
938 /* Add M to the end of the chain MOVABLES. */
939 loop_movables_add (movables, m);
940
941 if (m->consec > 0)
942 {
943 /* It is possible for the first instruction to have a
944 REG_EQUAL note but a non-invariant SET_SRC, so we must
945 remember the status of the first instruction in case
946 the last instruction doesn't have a REG_EQUAL note. */
947 m->move_insn_first = m->move_insn;
948
949 /* Skip this insn, not checking REG_LIBCALL notes. */
950 p = next_nonnote_insn (p);
951 /* Skip the consecutive insns, if there are any. */
952 p = skip_consec_insns (p, m->consec);
953 /* Back up to the last insn of the consecutive group. */
954 p = prev_nonnote_insn (p);
955
956 /* We must now reset m->move_insn, m->is_equiv, and possibly
957 m->set_src to correspond to the effects of all the
958 insns. */
959 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
960 if (temp)
961 m->set_src = XEXP (temp, 0), m->move_insn = 1;
962 else
963 {
964 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
965 if (temp && CONSTANT_P (XEXP (temp, 0)))
966 m->set_src = XEXP (temp, 0), m->move_insn = 1;
967 else
968 m->move_insn = 0;
969
970 }
971 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
972 }
973 }
974 /* If this register is always set within a STRICT_LOW_PART
975 or set to zero, then its high bytes are constant.
976 So clear them outside the loop and within the loop
977 just load the low bytes.
978 We must check that the machine has an instruction to do so.
979 Also, if the value loaded into the register
980 depends on the same register, this cannot be done. */
981 else if (SET_SRC (set) == const0_rtx
982 && GET_CODE (NEXT_INSN (p)) == INSN
983 && (set1 = single_set (NEXT_INSN (p)))
984 && GET_CODE (set1) == SET
985 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
986 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
987 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
988 == SET_DEST (set))
989 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
990 {
991 int regno = REGNO (SET_DEST (set));
992 if (regs->array[regno].set_in_loop == 2)
993 {
994 struct movable *m;
995 m = (struct movable *) xmalloc (sizeof (struct movable));
996 m->next = 0;
997 m->insn = p;
998 m->set_dest = SET_DEST (set);
999 m->dependencies = 0;
1000 m->force = 0;
1001 m->consec = 0;
1002 m->done = 0;
1003 m->forces = 0;
1004 m->move_insn = 0;
1005 m->move_insn_first = 0;
1006 m->partial = 1;
1007 /* If the insn may not be executed on some cycles,
1008 we can't clear the whole reg; clear just high part.
1009 Not even if the reg is used only within this loop.
1010 Consider this:
1011 while (1)
1012 while (s != t) {
1013 if (foo ()) x = *s;
1014 use (x);
1015 }
1016 Clearing x before the inner loop could clobber a value
1017 being saved from the last time around the outer loop.
1018 However, if the reg is not used outside this loop
1019 and all uses of the register are in the same
1020 basic block as the store, there is no problem.
1021
1022 If this insn was made by loop, we don't know its
1023 INSN_LUID and hence must make a conservative
1024 assumption. */
1025 m->global = (INSN_UID (p) >= max_uid_for_loop
1026 || LOOP_REG_GLOBAL_P (loop, regno)
1027 || (labels_in_range_p
1028 (p, REGNO_FIRST_LUID (regno))));
1029 if (maybe_never && m->global)
1030 m->savemode = GET_MODE (SET_SRC (set1));
1031 else
1032 m->savemode = VOIDmode;
1033 m->regno = regno;
1034 m->cond = 0;
1035 m->match = 0;
1036 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1037 m->savings = 1;
1038 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1039 regs->array[regno+i].set_in_loop = -1;
1040 /* Add M to the end of the chain MOVABLES. */
1041 loop_movables_add (movables, m);
1042 }
1043 }
1044 }
1045 /* Past a call insn, we get to insns which might not be executed
1046 because the call might exit. This matters for insns that trap.
1047 Constant and pure call insns always return, so they don't count. */
1048 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1049 call_passed = 1;
1050 /* Past a label or a jump, we get to insns for which we
1051 can't count on whether or how many times they will be
1052 executed during each iteration. Therefore, we can
1053 only move out sets of trivial variables
1054 (those not used after the loop). */
1055 /* Similar code appears twice in strength_reduce. */
1056 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1057 /* If we enter the loop in the middle, and scan around to the
1058 beginning, don't set maybe_never for that. This must be an
1059 unconditional jump, otherwise the code at the top of the
1060 loop might never be executed. Unconditional jumps are
1061 followed by a barrier then the loop_end. */
1062 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1063 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1064 && any_uncondjump_p (p)))
1065 maybe_never = 1;
1066 else if (GET_CODE (p) == NOTE)
1067 {
1068 /* At the virtual top of a converted loop, insns are again known to
1069 be executed: logically, the loop begins here even though the exit
1070 code has been duplicated. */
1071 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1072 maybe_never = call_passed = 0;
1073 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1074 loop_depth++;
1075 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1076 loop_depth--;
1077 }
1078 }
1079
1080 /* If one movable subsumes another, ignore that other. */
1081
1082 ignore_some_movables (movables);
1083
1084 /* For each movable insn, see if the reg that it loads
1085 leads when it dies right into another conditionally movable insn.
1086 If so, record that the second insn "forces" the first one,
1087 since the second can be moved only if the first is. */
1088
1089 force_movables (movables);
1090
1091 /* See if there are multiple movable insns that load the same value.
1092 If there are, make all but the first point at the first one
1093 through the `match' field, and add the priorities of them
1094 all together as the priority of the first. */
1095
1096 combine_movables (movables, regs);
1097
1098 /* Now consider each movable insn to decide whether it is worth moving.
1099 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1100
1101 Generally this increases code size, so do not move moveables when
1102 optimizing for code size. */
1103
1104 if (! optimize_size)
1105 move_movables (loop, movables, threshold, insn_count);
1106
1107 /* Now candidates that still are negative are those not moved.
1108 Change regs->array[I].set_in_loop to indicate that those are not actually
1109 invariant. */
1110 for (i = 0; i < regs->num; i++)
1111 if (regs->array[i].set_in_loop < 0)
1112 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1113
1114 /* Now that we've moved some things out of the loop, we might be able to
1115 hoist even more memory references. */
1116 load_mems (loop);
1117
1118 /* Recalculate regs->array if load_mems has created new registers. */
1119 if (max_reg_num () > regs->num)
1120 loop_regs_scan (loop, 0);
1121
1122 for (update_start = loop_start;
1123 PREV_INSN (update_start)
1124 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1125 update_start = PREV_INSN (update_start))
1126 ;
1127 update_end = NEXT_INSN (loop_end);
1128
1129 reg_scan_update (update_start, update_end, loop_max_reg);
1130 loop_max_reg = max_reg_num ();
1131
1132 if (flag_strength_reduce)
1133 {
1134 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1135 /* Ensure our label doesn't go away. */
1136 LABEL_NUSES (update_end)++;
1137
1138 strength_reduce (loop, flags);
1139
1140 reg_scan_update (update_start, update_end, loop_max_reg);
1141 loop_max_reg = max_reg_num ();
1142
1143 if (update_end && GET_CODE (update_end) == CODE_LABEL
1144 && --LABEL_NUSES (update_end) == 0)
1145 delete_related_insns (update_end);
1146 }
1147
1148
1149 /* The movable information is required for strength reduction. */
1150 loop_movables_free (movables);
1151
1152 free (regs->array);
1153 regs->array = 0;
1154 regs->num = 0;
1155 }
1156 \f
1157 /* Add elements to *OUTPUT to record all the pseudo-regs
1158 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1159
1160 void
1161 record_excess_regs (in_this, not_in_this, output)
1162 rtx in_this, not_in_this;
1163 rtx *output;
1164 {
1165 enum rtx_code code;
1166 const char *fmt;
1167 int i;
1168
1169 code = GET_CODE (in_this);
1170
1171 switch (code)
1172 {
1173 case PC:
1174 case CC0:
1175 case CONST_INT:
1176 case CONST_DOUBLE:
1177 case CONST:
1178 case SYMBOL_REF:
1179 case LABEL_REF:
1180 return;
1181
1182 case REG:
1183 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1184 && ! reg_mentioned_p (in_this, not_in_this))
1185 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1186 return;
1187
1188 default:
1189 break;
1190 }
1191
1192 fmt = GET_RTX_FORMAT (code);
1193 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1194 {
1195 int j;
1196
1197 switch (fmt[i])
1198 {
1199 case 'E':
1200 for (j = 0; j < XVECLEN (in_this, i); j++)
1201 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1202 break;
1203
1204 case 'e':
1205 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1206 break;
1207 }
1208 }
1209 }
1210 \f
1211 /* Check what regs are referred to in the libcall block ending with INSN,
1212 aside from those mentioned in the equivalent value.
1213 If there are none, return 0.
1214 If there are one or more, return an EXPR_LIST containing all of them. */
1215
1216 rtx
1217 libcall_other_reg (insn, equiv)
1218 rtx insn, equiv;
1219 {
1220 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1221 rtx p = XEXP (note, 0);
1222 rtx output = 0;
1223
1224 /* First, find all the regs used in the libcall block
1225 that are not mentioned as inputs to the result. */
1226
1227 while (p != insn)
1228 {
1229 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1230 || GET_CODE (p) == CALL_INSN)
1231 record_excess_regs (PATTERN (p), equiv, &output);
1232 p = NEXT_INSN (p);
1233 }
1234
1235 return output;
1236 }
1237 \f
1238 /* Return 1 if all uses of REG
1239 are between INSN and the end of the basic block. */
1240
1241 static int
1242 reg_in_basic_block_p (insn, reg)
1243 rtx insn, reg;
1244 {
1245 int regno = REGNO (reg);
1246 rtx p;
1247
1248 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1249 return 0;
1250
1251 /* Search this basic block for the already recorded last use of the reg. */
1252 for (p = insn; p; p = NEXT_INSN (p))
1253 {
1254 switch (GET_CODE (p))
1255 {
1256 case NOTE:
1257 break;
1258
1259 case INSN:
1260 case CALL_INSN:
1261 /* Ordinary insn: if this is the last use, we win. */
1262 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1263 return 1;
1264 break;
1265
1266 case JUMP_INSN:
1267 /* Jump insn: if this is the last use, we win. */
1268 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1269 return 1;
1270 /* Otherwise, it's the end of the basic block, so we lose. */
1271 return 0;
1272
1273 case CODE_LABEL:
1274 case BARRIER:
1275 /* It's the end of the basic block, so we lose. */
1276 return 0;
1277
1278 default:
1279 break;
1280 }
1281 }
1282
1283 /* The "last use" that was recorded can't be found after the first
1284 use. This can happen when the last use was deleted while
1285 processing an inner loop, this inner loop was then completely
1286 unrolled, and the outer loop is always exited after the inner loop,
1287 so that everything after the first use becomes a single basic block. */
1288 return 1;
1289 }
1290 \f
1291 /* Compute the benefit of eliminating the insns in the block whose
1292 last insn is LAST. This may be a group of insns used to compute a
1293 value directly or can contain a library call. */
1294
1295 static int
1296 libcall_benefit (last)
1297 rtx last;
1298 {
1299 rtx insn;
1300 int benefit = 0;
1301
1302 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1303 insn != last; insn = NEXT_INSN (insn))
1304 {
1305 if (GET_CODE (insn) == CALL_INSN)
1306 benefit += 10; /* Assume at least this many insns in a library
1307 routine. */
1308 else if (GET_CODE (insn) == INSN
1309 && GET_CODE (PATTERN (insn)) != USE
1310 && GET_CODE (PATTERN (insn)) != CLOBBER)
1311 benefit++;
1312 }
1313
1314 return benefit;
1315 }
1316 \f
1317 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1318
1319 static rtx
1320 skip_consec_insns (insn, count)
1321 rtx insn;
1322 int count;
1323 {
1324 for (; count > 0; count--)
1325 {
1326 rtx temp;
1327
1328 /* If first insn of libcall sequence, skip to end. */
1329 /* Do this at start of loop, since INSN is guaranteed to
1330 be an insn here. */
1331 if (GET_CODE (insn) != NOTE
1332 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1333 insn = XEXP (temp, 0);
1334
1335 do
1336 insn = NEXT_INSN (insn);
1337 while (GET_CODE (insn) == NOTE);
1338 }
1339
1340 return insn;
1341 }
1342
1343 /* Ignore any movable whose insn falls within a libcall
1344 which is part of another movable.
1345 We make use of the fact that the movable for the libcall value
1346 was made later and so appears later on the chain. */
1347
1348 static void
1349 ignore_some_movables (movables)
1350 struct loop_movables *movables;
1351 {
1352 struct movable *m, *m1;
1353
1354 for (m = movables->head; m; m = m->next)
1355 {
1356 /* Is this a movable for the value of a libcall? */
1357 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1358 if (note)
1359 {
1360 rtx insn;
1361 /* Check for earlier movables inside that range,
1362 and mark them invalid. We cannot use LUIDs here because
1363 insns created by loop.c for prior loops don't have LUIDs.
1364 Rather than reject all such insns from movables, we just
1365 explicitly check each insn in the libcall (since invariant
1366 libcalls aren't that common). */
1367 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1368 for (m1 = movables->head; m1 != m; m1 = m1->next)
1369 if (m1->insn == insn)
1370 m1->done = 1;
1371 }
1372 }
1373 }
1374
1375 /* For each movable insn, see if the reg that it loads
1376 leads when it dies right into another conditionally movable insn.
1377 If so, record that the second insn "forces" the first one,
1378 since the second can be moved only if the first is. */
1379
1380 static void
1381 force_movables (movables)
1382 struct loop_movables *movables;
1383 {
1384 struct movable *m, *m1;
1385
1386 for (m1 = movables->head; m1; m1 = m1->next)
1387 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1388 if (!m1->partial && !m1->done)
1389 {
1390 int regno = m1->regno;
1391 for (m = m1->next; m; m = m->next)
1392 /* ??? Could this be a bug? What if CSE caused the
1393 register of M1 to be used after this insn?
1394 Since CSE does not update regno_last_uid,
1395 this insn M->insn might not be where it dies.
1396 But very likely this doesn't matter; what matters is
1397 that M's reg is computed from M1's reg. */
1398 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1399 && !m->done)
1400 break;
1401 if (m != 0 && m->set_src == m1->set_dest
1402 /* If m->consec, m->set_src isn't valid. */
1403 && m->consec == 0)
1404 m = 0;
1405
1406 /* Increase the priority of the moving the first insn
1407 since it permits the second to be moved as well. */
1408 if (m != 0)
1409 {
1410 m->forces = m1;
1411 m1->lifetime += m->lifetime;
1412 m1->savings += m->savings;
1413 }
1414 }
1415 }
1416 \f
1417 /* Find invariant expressions that are equal and can be combined into
1418 one register. */
1419
1420 static void
1421 combine_movables (movables, regs)
1422 struct loop_movables *movables;
1423 struct loop_regs *regs;
1424 {
1425 struct movable *m;
1426 char *matched_regs = (char *) xmalloc (regs->num);
1427 enum machine_mode mode;
1428
1429 /* Regs that are set more than once are not allowed to match
1430 or be matched. I'm no longer sure why not. */
1431 /* Perhaps testing m->consec_sets would be more appropriate here? */
1432
1433 for (m = movables->head; m; m = m->next)
1434 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1435 && !m->partial)
1436 {
1437 struct movable *m1;
1438 int regno = m->regno;
1439
1440 memset (matched_regs, 0, regs->num);
1441 matched_regs[regno] = 1;
1442
1443 /* We want later insns to match the first one. Don't make the first
1444 one match any later ones. So start this loop at m->next. */
1445 for (m1 = m->next; m1; m1 = m1->next)
1446 /* ??? HACK! move_movables does not verify that the replacement
1447 is valid, which can have disasterous effects with hard regs
1448 and match_dup. Turn combination off for now. */
1449 if (0 && m != m1 && m1->match == 0
1450 && regs->array[m1->regno].n_times_set == 1
1451 /* A reg used outside the loop mustn't be eliminated. */
1452 && !m1->global
1453 /* A reg used for zero-extending mustn't be eliminated. */
1454 && !m1->partial
1455 && (matched_regs[m1->regno]
1456 ||
1457 (
1458 /* Can combine regs with different modes loaded from the
1459 same constant only if the modes are the same or
1460 if both are integer modes with M wider or the same
1461 width as M1. The check for integer is redundant, but
1462 safe, since the only case of differing destination
1463 modes with equal sources is when both sources are
1464 VOIDmode, i.e., CONST_INT. */
1465 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1466 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1467 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1468 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1469 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1470 /* See if the source of M1 says it matches M. */
1471 && ((GET_CODE (m1->set_src) == REG
1472 && matched_regs[REGNO (m1->set_src)])
1473 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1474 movables, regs))))
1475 && ((m->dependencies == m1->dependencies)
1476 || rtx_equal_p (m->dependencies, m1->dependencies)))
1477 {
1478 m->lifetime += m1->lifetime;
1479 m->savings += m1->savings;
1480 m1->done = 1;
1481 m1->match = m;
1482 matched_regs[m1->regno] = 1;
1483 }
1484 }
1485
1486 /* Now combine the regs used for zero-extension.
1487 This can be done for those not marked `global'
1488 provided their lives don't overlap. */
1489
1490 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1491 mode = GET_MODE_WIDER_MODE (mode))
1492 {
1493 struct movable *m0 = 0;
1494
1495 /* Combine all the registers for extension from mode MODE.
1496 Don't combine any that are used outside this loop. */
1497 for (m = movables->head; m; m = m->next)
1498 if (m->partial && ! m->global
1499 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1500 {
1501 struct movable *m1;
1502
1503 int first = REGNO_FIRST_LUID (m->regno);
1504 int last = REGNO_LAST_LUID (m->regno);
1505
1506 if (m0 == 0)
1507 {
1508 /* First one: don't check for overlap, just record it. */
1509 m0 = m;
1510 continue;
1511 }
1512
1513 /* Make sure they extend to the same mode.
1514 (Almost always true.) */
1515 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1516 continue;
1517
1518 /* We already have one: check for overlap with those
1519 already combined together. */
1520 for (m1 = movables->head; m1 != m; m1 = m1->next)
1521 if (m1 == m0 || (m1->partial && m1->match == m0))
1522 if (! (REGNO_FIRST_LUID (m1->regno) > last
1523 || REGNO_LAST_LUID (m1->regno) < first))
1524 goto overlap;
1525
1526 /* No overlap: we can combine this with the others. */
1527 m0->lifetime += m->lifetime;
1528 m0->savings += m->savings;
1529 m->done = 1;
1530 m->match = m0;
1531
1532 overlap:
1533 ;
1534 }
1535 }
1536
1537 /* Clean up. */
1538 free (matched_regs);
1539 }
1540
1541 /* Returns the number of movable instructions in LOOP that were not
1542 moved outside the loop. */
1543
1544 static int
1545 num_unmoved_movables (loop)
1546 const struct loop *loop;
1547 {
1548 int num = 0;
1549 struct movable *m;
1550
1551 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1552 if (!m->done)
1553 ++num;
1554
1555 return num;
1556 }
1557
1558 \f
1559 /* Return 1 if regs X and Y will become the same if moved. */
1560
1561 static int
1562 regs_match_p (x, y, movables)
1563 rtx x, y;
1564 struct loop_movables *movables;
1565 {
1566 unsigned int xn = REGNO (x);
1567 unsigned int yn = REGNO (y);
1568 struct movable *mx, *my;
1569
1570 for (mx = movables->head; mx; mx = mx->next)
1571 if (mx->regno == xn)
1572 break;
1573
1574 for (my = movables->head; my; my = my->next)
1575 if (my->regno == yn)
1576 break;
1577
1578 return (mx && my
1579 && ((mx->match == my->match && mx->match != 0)
1580 || mx->match == my
1581 || mx == my->match));
1582 }
1583
1584 /* Return 1 if X and Y are identical-looking rtx's.
1585 This is the Lisp function EQUAL for rtx arguments.
1586
1587 If two registers are matching movables or a movable register and an
1588 equivalent constant, consider them equal. */
1589
1590 static int
1591 rtx_equal_for_loop_p (x, y, movables, regs)
1592 rtx x, y;
1593 struct loop_movables *movables;
1594 struct loop_regs *regs;
1595 {
1596 int i;
1597 int j;
1598 struct movable *m;
1599 enum rtx_code code;
1600 const char *fmt;
1601
1602 if (x == y)
1603 return 1;
1604 if (x == 0 || y == 0)
1605 return 0;
1606
1607 code = GET_CODE (x);
1608
1609 /* If we have a register and a constant, they may sometimes be
1610 equal. */
1611 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1612 && CONSTANT_P (y))
1613 {
1614 for (m = movables->head; m; m = m->next)
1615 if (m->move_insn && m->regno == REGNO (x)
1616 && rtx_equal_p (m->set_src, y))
1617 return 1;
1618 }
1619 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1620 && CONSTANT_P (x))
1621 {
1622 for (m = movables->head; m; m = m->next)
1623 if (m->move_insn && m->regno == REGNO (y)
1624 && rtx_equal_p (m->set_src, x))
1625 return 1;
1626 }
1627
1628 /* Otherwise, rtx's of different codes cannot be equal. */
1629 if (code != GET_CODE (y))
1630 return 0;
1631
1632 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1633 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1634
1635 if (GET_MODE (x) != GET_MODE (y))
1636 return 0;
1637
1638 /* These three types of rtx's can be compared nonrecursively. */
1639 if (code == REG)
1640 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1641
1642 if (code == LABEL_REF)
1643 return XEXP (x, 0) == XEXP (y, 0);
1644 if (code == SYMBOL_REF)
1645 return XSTR (x, 0) == XSTR (y, 0);
1646
1647 /* Compare the elements. If any pair of corresponding elements
1648 fail to match, return 0 for the whole things. */
1649
1650 fmt = GET_RTX_FORMAT (code);
1651 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1652 {
1653 switch (fmt[i])
1654 {
1655 case 'w':
1656 if (XWINT (x, i) != XWINT (y, i))
1657 return 0;
1658 break;
1659
1660 case 'i':
1661 if (XINT (x, i) != XINT (y, i))
1662 return 0;
1663 break;
1664
1665 case 'E':
1666 /* Two vectors must have the same length. */
1667 if (XVECLEN (x, i) != XVECLEN (y, i))
1668 return 0;
1669
1670 /* And the corresponding elements must match. */
1671 for (j = 0; j < XVECLEN (x, i); j++)
1672 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1673 movables, regs) == 0)
1674 return 0;
1675 break;
1676
1677 case 'e':
1678 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1679 == 0)
1680 return 0;
1681 break;
1682
1683 case 's':
1684 if (strcmp (XSTR (x, i), XSTR (y, i)))
1685 return 0;
1686 break;
1687
1688 case 'u':
1689 /* These are just backpointers, so they don't matter. */
1690 break;
1691
1692 case '0':
1693 break;
1694
1695 /* It is believed that rtx's at this level will never
1696 contain anything but integers and other rtx's,
1697 except for within LABEL_REFs and SYMBOL_REFs. */
1698 default:
1699 abort ();
1700 }
1701 }
1702 return 1;
1703 }
1704 \f
1705 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1706 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1707 references is incremented once for each added note. */
1708
1709 static void
1710 add_label_notes (x, insns)
1711 rtx x;
1712 rtx insns;
1713 {
1714 enum rtx_code code = GET_CODE (x);
1715 int i, j;
1716 const char *fmt;
1717 rtx insn;
1718
1719 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1720 {
1721 /* This code used to ignore labels that referred to dispatch tables to
1722 avoid flow generating (slighly) worse code.
1723
1724 We no longer ignore such label references (see LABEL_REF handling in
1725 mark_jump_label for additional information). */
1726 for (insn = insns; insn; insn = NEXT_INSN (insn))
1727 if (reg_mentioned_p (XEXP (x, 0), insn))
1728 {
1729 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1730 REG_NOTES (insn));
1731 if (LABEL_P (XEXP (x, 0)))
1732 LABEL_NUSES (XEXP (x, 0))++;
1733 }
1734 }
1735
1736 fmt = GET_RTX_FORMAT (code);
1737 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1738 {
1739 if (fmt[i] == 'e')
1740 add_label_notes (XEXP (x, i), insns);
1741 else if (fmt[i] == 'E')
1742 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1743 add_label_notes (XVECEXP (x, i, j), insns);
1744 }
1745 }
1746 \f
1747 /* Scan MOVABLES, and move the insns that deserve to be moved.
1748 If two matching movables are combined, replace one reg with the
1749 other throughout. */
1750
1751 static void
1752 move_movables (loop, movables, threshold, insn_count)
1753 struct loop *loop;
1754 struct loop_movables *movables;
1755 int threshold;
1756 int insn_count;
1757 {
1758 struct loop_regs *regs = LOOP_REGS (loop);
1759 int nregs = regs->num;
1760 rtx new_start = 0;
1761 struct movable *m;
1762 rtx p;
1763 rtx loop_start = loop->start;
1764 rtx loop_end = loop->end;
1765 /* Map of pseudo-register replacements to handle combining
1766 when we move several insns that load the same value
1767 into different pseudo-registers. */
1768 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1769 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1770
1771 for (m = movables->head; m; m = m->next)
1772 {
1773 /* Describe this movable insn. */
1774
1775 if (loop_dump_stream)
1776 {
1777 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1778 INSN_UID (m->insn), m->regno, m->lifetime);
1779 if (m->consec > 0)
1780 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1781 if (m->cond)
1782 fprintf (loop_dump_stream, "cond ");
1783 if (m->force)
1784 fprintf (loop_dump_stream, "force ");
1785 if (m->global)
1786 fprintf (loop_dump_stream, "global ");
1787 if (m->done)
1788 fprintf (loop_dump_stream, "done ");
1789 if (m->move_insn)
1790 fprintf (loop_dump_stream, "move-insn ");
1791 if (m->match)
1792 fprintf (loop_dump_stream, "matches %d ",
1793 INSN_UID (m->match->insn));
1794 if (m->forces)
1795 fprintf (loop_dump_stream, "forces %d ",
1796 INSN_UID (m->forces->insn));
1797 }
1798
1799 /* Ignore the insn if it's already done (it matched something else).
1800 Otherwise, see if it is now safe to move. */
1801
1802 if (!m->done
1803 && (! m->cond
1804 || (1 == loop_invariant_p (loop, m->set_src)
1805 && (m->dependencies == 0
1806 || 1 == loop_invariant_p (loop, m->dependencies))
1807 && (m->consec == 0
1808 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1809 m->consec + 1,
1810 m->insn))))
1811 && (! m->forces || m->forces->done))
1812 {
1813 int regno;
1814 rtx p;
1815 int savings = m->savings;
1816
1817 /* We have an insn that is safe to move.
1818 Compute its desirability. */
1819
1820 p = m->insn;
1821 regno = m->regno;
1822
1823 if (loop_dump_stream)
1824 fprintf (loop_dump_stream, "savings %d ", savings);
1825
1826 if (regs->array[regno].moved_once && loop_dump_stream)
1827 fprintf (loop_dump_stream, "halved since already moved ");
1828
1829 /* An insn MUST be moved if we already moved something else
1830 which is safe only if this one is moved too: that is,
1831 if already_moved[REGNO] is nonzero. */
1832
1833 /* An insn is desirable to move if the new lifetime of the
1834 register is no more than THRESHOLD times the old lifetime.
1835 If it's not desirable, it means the loop is so big
1836 that moving won't speed things up much,
1837 and it is liable to make register usage worse. */
1838
1839 /* It is also desirable to move if it can be moved at no
1840 extra cost because something else was already moved. */
1841
1842 if (already_moved[regno]
1843 || flag_move_all_movables
1844 || (threshold * savings * m->lifetime) >=
1845 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1846 || (m->forces && m->forces->done
1847 && regs->array[m->forces->regno].n_times_set == 1))
1848 {
1849 int count;
1850 struct movable *m1;
1851 rtx first = NULL_RTX;
1852
1853 /* Now move the insns that set the reg. */
1854
1855 if (m->partial && m->match)
1856 {
1857 rtx newpat, i1;
1858 rtx r1, r2;
1859 /* Find the end of this chain of matching regs.
1860 Thus, we load each reg in the chain from that one reg.
1861 And that reg is loaded with 0 directly,
1862 since it has ->match == 0. */
1863 for (m1 = m; m1->match; m1 = m1->match);
1864 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1865 SET_DEST (PATTERN (m1->insn)));
1866 i1 = loop_insn_hoist (loop, newpat);
1867
1868 /* Mark the moved, invariant reg as being allowed to
1869 share a hard reg with the other matching invariant. */
1870 REG_NOTES (i1) = REG_NOTES (m->insn);
1871 r1 = SET_DEST (PATTERN (m->insn));
1872 r2 = SET_DEST (PATTERN (m1->insn));
1873 regs_may_share
1874 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1875 gen_rtx_EXPR_LIST (VOIDmode, r2,
1876 regs_may_share));
1877 delete_insn (m->insn);
1878
1879 if (new_start == 0)
1880 new_start = i1;
1881
1882 if (loop_dump_stream)
1883 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1884 }
1885 /* If we are to re-generate the item being moved with a
1886 new move insn, first delete what we have and then emit
1887 the move insn before the loop. */
1888 else if (m->move_insn)
1889 {
1890 rtx i1, temp, seq;
1891
1892 for (count = m->consec; count >= 0; count--)
1893 {
1894 /* If this is the first insn of a library call sequence,
1895 skip to the end. */
1896 if (GET_CODE (p) != NOTE
1897 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1898 p = XEXP (temp, 0);
1899
1900 /* If this is the last insn of a libcall sequence, then
1901 delete every insn in the sequence except the last.
1902 The last insn is handled in the normal manner. */
1903 if (GET_CODE (p) != NOTE
1904 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1905 {
1906 temp = XEXP (temp, 0);
1907 while (temp != p)
1908 temp = delete_insn (temp);
1909 }
1910
1911 temp = p;
1912 p = delete_insn (p);
1913
1914 /* simplify_giv_expr expects that it can walk the insns
1915 at m->insn forwards and see this old sequence we are
1916 tossing here. delete_insn does preserve the next
1917 pointers, but when we skip over a NOTE we must fix
1918 it up. Otherwise that code walks into the non-deleted
1919 insn stream. */
1920 while (p && GET_CODE (p) == NOTE)
1921 p = NEXT_INSN (temp) = NEXT_INSN (p);
1922 }
1923
1924 start_sequence ();
1925 emit_move_insn (m->set_dest, m->set_src);
1926 temp = get_insns ();
1927 seq = gen_sequence ();
1928 end_sequence ();
1929
1930 add_label_notes (m->set_src, temp);
1931
1932 i1 = loop_insn_hoist (loop, seq);
1933 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1934 set_unique_reg_note (i1,
1935 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1936 m->set_src);
1937
1938 if (loop_dump_stream)
1939 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1940
1941 /* The more regs we move, the less we like moving them. */
1942 threshold -= 3;
1943 }
1944 else
1945 {
1946 for (count = m->consec; count >= 0; count--)
1947 {
1948 rtx i1, temp;
1949
1950 /* If first insn of libcall sequence, skip to end. */
1951 /* Do this at start of loop, since p is guaranteed to
1952 be an insn here. */
1953 if (GET_CODE (p) != NOTE
1954 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1955 p = XEXP (temp, 0);
1956
1957 /* If last insn of libcall sequence, move all
1958 insns except the last before the loop. The last
1959 insn is handled in the normal manner. */
1960 if (GET_CODE (p) != NOTE
1961 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1962 {
1963 rtx fn_address = 0;
1964 rtx fn_reg = 0;
1965 rtx fn_address_insn = 0;
1966
1967 first = 0;
1968 for (temp = XEXP (temp, 0); temp != p;
1969 temp = NEXT_INSN (temp))
1970 {
1971 rtx body;
1972 rtx n;
1973 rtx next;
1974
1975 if (GET_CODE (temp) == NOTE)
1976 continue;
1977
1978 body = PATTERN (temp);
1979
1980 /* Find the next insn after TEMP,
1981 not counting USE or NOTE insns. */
1982 for (next = NEXT_INSN (temp); next != p;
1983 next = NEXT_INSN (next))
1984 if (! (GET_CODE (next) == INSN
1985 && GET_CODE (PATTERN (next)) == USE)
1986 && GET_CODE (next) != NOTE)
1987 break;
1988
1989 /* If that is the call, this may be the insn
1990 that loads the function address.
1991
1992 Extract the function address from the insn
1993 that loads it into a register.
1994 If this insn was cse'd, we get incorrect code.
1995
1996 So emit a new move insn that copies the
1997 function address into the register that the
1998 call insn will use. flow.c will delete any
1999 redundant stores that we have created. */
2000 if (GET_CODE (next) == CALL_INSN
2001 && GET_CODE (body) == SET
2002 && GET_CODE (SET_DEST (body)) == REG
2003 && (n = find_reg_note (temp, REG_EQUAL,
2004 NULL_RTX)))
2005 {
2006 fn_reg = SET_SRC (body);
2007 if (GET_CODE (fn_reg) != REG)
2008 fn_reg = SET_DEST (body);
2009 fn_address = XEXP (n, 0);
2010 fn_address_insn = temp;
2011 }
2012 /* We have the call insn.
2013 If it uses the register we suspect it might,
2014 load it with the correct address directly. */
2015 if (GET_CODE (temp) == CALL_INSN
2016 && fn_address != 0
2017 && reg_referenced_p (fn_reg, body))
2018 loop_insn_emit_after (loop, 0, fn_address_insn,
2019 gen_move_insn
2020 (fn_reg, fn_address));
2021
2022 if (GET_CODE (temp) == CALL_INSN)
2023 {
2024 i1 = loop_call_insn_hoist (loop, body);
2025 /* Because the USAGE information potentially
2026 contains objects other than hard registers
2027 we need to copy it. */
2028 if (CALL_INSN_FUNCTION_USAGE (temp))
2029 CALL_INSN_FUNCTION_USAGE (i1)
2030 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2031 }
2032 else
2033 i1 = loop_insn_hoist (loop, body);
2034 if (first == 0)
2035 first = i1;
2036 if (temp == fn_address_insn)
2037 fn_address_insn = i1;
2038 REG_NOTES (i1) = REG_NOTES (temp);
2039 REG_NOTES (temp) = NULL;
2040 delete_insn (temp);
2041 }
2042 if (new_start == 0)
2043 new_start = first;
2044 }
2045 if (m->savemode != VOIDmode)
2046 {
2047 /* P sets REG to zero; but we should clear only
2048 the bits that are not covered by the mode
2049 m->savemode. */
2050 rtx reg = m->set_dest;
2051 rtx sequence;
2052 rtx tem;
2053
2054 start_sequence ();
2055 tem = expand_simple_binop
2056 (GET_MODE (reg), AND, reg,
2057 GEN_INT ((((HOST_WIDE_INT) 1
2058 << GET_MODE_BITSIZE (m->savemode)))
2059 - 1),
2060 reg, 1, OPTAB_LIB_WIDEN);
2061 if (tem == 0)
2062 abort ();
2063 if (tem != reg)
2064 emit_move_insn (reg, tem);
2065 sequence = gen_sequence ();
2066 end_sequence ();
2067 i1 = loop_insn_hoist (loop, sequence);
2068 }
2069 else if (GET_CODE (p) == CALL_INSN)
2070 {
2071 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2072 /* Because the USAGE information potentially
2073 contains objects other than hard registers
2074 we need to copy it. */
2075 if (CALL_INSN_FUNCTION_USAGE (p))
2076 CALL_INSN_FUNCTION_USAGE (i1)
2077 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2078 }
2079 else if (count == m->consec && m->move_insn_first)
2080 {
2081 rtx seq;
2082 /* The SET_SRC might not be invariant, so we must
2083 use the REG_EQUAL note. */
2084 start_sequence ();
2085 emit_move_insn (m->set_dest, m->set_src);
2086 temp = get_insns ();
2087 seq = gen_sequence ();
2088 end_sequence ();
2089
2090 add_label_notes (m->set_src, temp);
2091
2092 i1 = loop_insn_hoist (loop, seq);
2093 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2094 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2095 : REG_EQUAL, m->set_src);
2096 }
2097 else
2098 i1 = loop_insn_hoist (loop, PATTERN (p));
2099
2100 if (REG_NOTES (i1) == 0)
2101 {
2102 REG_NOTES (i1) = REG_NOTES (p);
2103 REG_NOTES (p) = NULL;
2104
2105 /* If there is a REG_EQUAL note present whose value
2106 is not loop invariant, then delete it, since it
2107 may cause problems with later optimization passes.
2108 It is possible for cse to create such notes
2109 like this as a result of record_jump_cond. */
2110
2111 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2112 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2113 remove_note (i1, temp);
2114 }
2115
2116 if (new_start == 0)
2117 new_start = i1;
2118
2119 if (loop_dump_stream)
2120 fprintf (loop_dump_stream, " moved to %d",
2121 INSN_UID (i1));
2122
2123 /* If library call, now fix the REG_NOTES that contain
2124 insn pointers, namely REG_LIBCALL on FIRST
2125 and REG_RETVAL on I1. */
2126 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2127 {
2128 XEXP (temp, 0) = first;
2129 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2130 XEXP (temp, 0) = i1;
2131 }
2132
2133 temp = p;
2134 delete_insn (p);
2135 p = NEXT_INSN (p);
2136
2137 /* simplify_giv_expr expects that it can walk the insns
2138 at m->insn forwards and see this old sequence we are
2139 tossing here. delete_insn does preserve the next
2140 pointers, but when we skip over a NOTE we must fix
2141 it up. Otherwise that code walks into the non-deleted
2142 insn stream. */
2143 while (p && GET_CODE (p) == NOTE)
2144 p = NEXT_INSN (temp) = NEXT_INSN (p);
2145 }
2146
2147 /* The more regs we move, the less we like moving them. */
2148 threshold -= 3;
2149 }
2150
2151 /* Any other movable that loads the same register
2152 MUST be moved. */
2153 already_moved[regno] = 1;
2154
2155 /* This reg has been moved out of one loop. */
2156 regs->array[regno].moved_once = 1;
2157
2158 /* The reg set here is now invariant. */
2159 if (! m->partial)
2160 {
2161 int i;
2162 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2163 regs->array[regno+i].set_in_loop = 0;
2164 }
2165
2166 m->done = 1;
2167
2168 /* Change the length-of-life info for the register
2169 to say it lives at least the full length of this loop.
2170 This will help guide optimizations in outer loops. */
2171
2172 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2173 /* This is the old insn before all the moved insns.
2174 We can't use the moved insn because it is out of range
2175 in uid_luid. Only the old insns have luids. */
2176 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2177 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2178 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2179
2180 /* Combine with this moved insn any other matching movables. */
2181
2182 if (! m->partial)
2183 for (m1 = movables->head; m1; m1 = m1->next)
2184 if (m1->match == m)
2185 {
2186 rtx temp;
2187
2188 /* Schedule the reg loaded by M1
2189 for replacement so that shares the reg of M.
2190 If the modes differ (only possible in restricted
2191 circumstances, make a SUBREG.
2192
2193 Note this assumes that the target dependent files
2194 treat REG and SUBREG equally, including within
2195 GO_IF_LEGITIMATE_ADDRESS and in all the
2196 predicates since we never verify that replacing the
2197 original register with a SUBREG results in a
2198 recognizable insn. */
2199 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2200 reg_map[m1->regno] = m->set_dest;
2201 else
2202 reg_map[m1->regno]
2203 = gen_lowpart_common (GET_MODE (m1->set_dest),
2204 m->set_dest);
2205
2206 /* Get rid of the matching insn
2207 and prevent further processing of it. */
2208 m1->done = 1;
2209
2210 /* if library call, delete all insns. */
2211 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2212 NULL_RTX)))
2213 delete_insn_chain (XEXP (temp, 0), m1->insn);
2214 else
2215 delete_insn (m1->insn);
2216
2217 /* Any other movable that loads the same register
2218 MUST be moved. */
2219 already_moved[m1->regno] = 1;
2220
2221 /* The reg merged here is now invariant,
2222 if the reg it matches is invariant. */
2223 if (! m->partial)
2224 {
2225 int i;
2226 for (i = 0;
2227 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2228 i++)
2229 regs->array[m1->regno+i].set_in_loop = 0;
2230 }
2231 }
2232 }
2233 else if (loop_dump_stream)
2234 fprintf (loop_dump_stream, "not desirable");
2235 }
2236 else if (loop_dump_stream && !m->match)
2237 fprintf (loop_dump_stream, "not safe");
2238
2239 if (loop_dump_stream)
2240 fprintf (loop_dump_stream, "\n");
2241 }
2242
2243 if (new_start == 0)
2244 new_start = loop_start;
2245
2246 /* Go through all the instructions in the loop, making
2247 all the register substitutions scheduled in REG_MAP. */
2248 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2249 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2250 || GET_CODE (p) == CALL_INSN)
2251 {
2252 replace_regs (PATTERN (p), reg_map, nregs, 0);
2253 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2254 INSN_CODE (p) = -1;
2255 }
2256
2257 /* Clean up. */
2258 free (reg_map);
2259 free (already_moved);
2260 }
2261
2262
2263 static void
2264 loop_movables_add (movables, m)
2265 struct loop_movables *movables;
2266 struct movable *m;
2267 {
2268 if (movables->head == 0)
2269 movables->head = m;
2270 else
2271 movables->last->next = m;
2272 movables->last = m;
2273 }
2274
2275
2276 static void
2277 loop_movables_free (movables)
2278 struct loop_movables *movables;
2279 {
2280 struct movable *m;
2281 struct movable *m_next;
2282
2283 for (m = movables->head; m; m = m_next)
2284 {
2285 m_next = m->next;
2286 free (m);
2287 }
2288 }
2289 \f
2290 #if 0
2291 /* Scan X and replace the address of any MEM in it with ADDR.
2292 REG is the address that MEM should have before the replacement. */
2293
2294 static void
2295 replace_call_address (x, reg, addr)
2296 rtx x, reg, addr;
2297 {
2298 enum rtx_code code;
2299 int i;
2300 const char *fmt;
2301
2302 if (x == 0)
2303 return;
2304 code = GET_CODE (x);
2305 switch (code)
2306 {
2307 case PC:
2308 case CC0:
2309 case CONST_INT:
2310 case CONST_DOUBLE:
2311 case CONST:
2312 case SYMBOL_REF:
2313 case LABEL_REF:
2314 case REG:
2315 return;
2316
2317 case SET:
2318 /* Short cut for very common case. */
2319 replace_call_address (XEXP (x, 1), reg, addr);
2320 return;
2321
2322 case CALL:
2323 /* Short cut for very common case. */
2324 replace_call_address (XEXP (x, 0), reg, addr);
2325 return;
2326
2327 case MEM:
2328 /* If this MEM uses a reg other than the one we expected,
2329 something is wrong. */
2330 if (XEXP (x, 0) != reg)
2331 abort ();
2332 XEXP (x, 0) = addr;
2333 return;
2334
2335 default:
2336 break;
2337 }
2338
2339 fmt = GET_RTX_FORMAT (code);
2340 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2341 {
2342 if (fmt[i] == 'e')
2343 replace_call_address (XEXP (x, i), reg, addr);
2344 else if (fmt[i] == 'E')
2345 {
2346 int j;
2347 for (j = 0; j < XVECLEN (x, i); j++)
2348 replace_call_address (XVECEXP (x, i, j), reg, addr);
2349 }
2350 }
2351 }
2352 #endif
2353 \f
2354 /* Return the number of memory refs to addresses that vary
2355 in the rtx X. */
2356
2357 static int
2358 count_nonfixed_reads (loop, x)
2359 const struct loop *loop;
2360 rtx x;
2361 {
2362 enum rtx_code code;
2363 int i;
2364 const char *fmt;
2365 int value;
2366
2367 if (x == 0)
2368 return 0;
2369
2370 code = GET_CODE (x);
2371 switch (code)
2372 {
2373 case PC:
2374 case CC0:
2375 case CONST_INT:
2376 case CONST_DOUBLE:
2377 case CONST:
2378 case SYMBOL_REF:
2379 case LABEL_REF:
2380 case REG:
2381 return 0;
2382
2383 case MEM:
2384 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2385 + count_nonfixed_reads (loop, XEXP (x, 0)));
2386
2387 default:
2388 break;
2389 }
2390
2391 value = 0;
2392 fmt = GET_RTX_FORMAT (code);
2393 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2394 {
2395 if (fmt[i] == 'e')
2396 value += count_nonfixed_reads (loop, XEXP (x, i));
2397 if (fmt[i] == 'E')
2398 {
2399 int j;
2400 for (j = 0; j < XVECLEN (x, i); j++)
2401 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2402 }
2403 }
2404 return value;
2405 }
2406 \f
2407 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2408 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2409 `unknown_address_altered', `unknown_constant_address_altered', and
2410 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2411 list `store_mems' in LOOP. */
2412
2413 static void
2414 prescan_loop (loop)
2415 struct loop *loop;
2416 {
2417 int level = 1;
2418 rtx insn;
2419 struct loop_info *loop_info = LOOP_INFO (loop);
2420 rtx start = loop->start;
2421 rtx end = loop->end;
2422 /* The label after END. Jumping here is just like falling off the
2423 end of the loop. We use next_nonnote_insn instead of next_label
2424 as a hedge against the (pathological) case where some actual insn
2425 might end up between the two. */
2426 rtx exit_target = next_nonnote_insn (end);
2427
2428 loop_info->has_indirect_jump = indirect_jump_in_function;
2429 loop_info->pre_header_has_call = 0;
2430 loop_info->has_call = 0;
2431 loop_info->has_nonconst_call = 0;
2432 loop_info->has_volatile = 0;
2433 loop_info->has_tablejump = 0;
2434 loop_info->has_multiple_exit_targets = 0;
2435 loop->level = 1;
2436
2437 loop_info->unknown_address_altered = 0;
2438 loop_info->unknown_constant_address_altered = 0;
2439 loop_info->store_mems = NULL_RTX;
2440 loop_info->first_loop_store_insn = NULL_RTX;
2441 loop_info->mems_idx = 0;
2442 loop_info->num_mem_sets = 0;
2443
2444
2445 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2446 insn = PREV_INSN (insn))
2447 {
2448 if (GET_CODE (insn) == CALL_INSN)
2449 {
2450 loop_info->pre_header_has_call = 1;
2451 break;
2452 }
2453 }
2454
2455 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2456 insn = NEXT_INSN (insn))
2457 {
2458 switch (GET_CODE (insn))
2459 {
2460 case NOTE:
2461 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2462 {
2463 ++level;
2464 /* Count number of loops contained in this one. */
2465 loop->level++;
2466 }
2467 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2468 --level;
2469 break;
2470
2471 case CALL_INSN:
2472 if (! CONST_OR_PURE_CALL_P (insn))
2473 {
2474 loop_info->unknown_address_altered = 1;
2475 loop_info->has_nonconst_call = 1;
2476 }
2477 loop_info->has_call = 1;
2478 if (can_throw_internal (insn))
2479 loop_info->has_multiple_exit_targets = 1;
2480 break;
2481
2482 case JUMP_INSN:
2483 if (! loop_info->has_multiple_exit_targets)
2484 {
2485 rtx set = pc_set (insn);
2486
2487 if (set)
2488 {
2489 rtx label1, label2;
2490
2491 if (GET_CODE (SET_SRC (set)) == IF_THEN_ELSE)
2492 {
2493 label1 = XEXP (SET_SRC (set), 1);
2494 label2 = XEXP (SET_SRC (set), 2);
2495 }
2496 else
2497 {
2498 label1 = SET_SRC (PATTERN (insn));
2499 label2 = NULL_RTX;
2500 }
2501
2502 do
2503 {
2504 if (label1 && label1 != pc_rtx)
2505 {
2506 if (GET_CODE (label1) != LABEL_REF)
2507 {
2508 /* Something tricky. */
2509 loop_info->has_multiple_exit_targets = 1;
2510 break;
2511 }
2512 else if (XEXP (label1, 0) != exit_target
2513 && LABEL_OUTSIDE_LOOP_P (label1))
2514 {
2515 /* A jump outside the current loop. */
2516 loop_info->has_multiple_exit_targets = 1;
2517 break;
2518 }
2519 }
2520
2521 label1 = label2;
2522 label2 = NULL_RTX;
2523 }
2524 while (label1);
2525 }
2526 else
2527 {
2528 /* A return, or something tricky. */
2529 loop_info->has_multiple_exit_targets = 1;
2530 }
2531 }
2532 /* FALLTHRU */
2533
2534 case INSN:
2535 if (volatile_refs_p (PATTERN (insn)))
2536 loop_info->has_volatile = 1;
2537
2538 if (GET_CODE (insn) == JUMP_INSN
2539 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2540 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2541 loop_info->has_tablejump = 1;
2542
2543 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2544 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2545 loop_info->first_loop_store_insn = insn;
2546
2547 if (flag_non_call_exceptions && can_throw_internal (insn))
2548 loop_info->has_multiple_exit_targets = 1;
2549 break;
2550
2551 default:
2552 break;
2553 }
2554 }
2555
2556 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2557 if (/* An exception thrown by a called function might land us
2558 anywhere. */
2559 ! loop_info->has_nonconst_call
2560 /* We don't want loads for MEMs moved to a location before the
2561 one at which their stack memory becomes allocated. (Note
2562 that this is not a problem for malloc, etc., since those
2563 require actual function calls. */
2564 && ! current_function_calls_alloca
2565 /* There are ways to leave the loop other than falling off the
2566 end. */
2567 && ! loop_info->has_multiple_exit_targets)
2568 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2569 insn = NEXT_INSN (insn))
2570 for_each_rtx (&insn, insert_loop_mem, loop_info);
2571
2572 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2573 that loop_invariant_p and load_mems can use true_dependence
2574 to determine what is really clobbered. */
2575 if (loop_info->unknown_address_altered)
2576 {
2577 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2578
2579 loop_info->store_mems
2580 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2581 }
2582 if (loop_info->unknown_constant_address_altered)
2583 {
2584 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2585
2586 RTX_UNCHANGING_P (mem) = 1;
2587 loop_info->store_mems
2588 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2589 }
2590 }
2591 \f
2592 /* Scan the function looking for loops. Record the start and end of each loop.
2593 Also mark as invalid loops any loops that contain a setjmp or are branched
2594 to from outside the loop. */
2595
2596 static void
2597 find_and_verify_loops (f, loops)
2598 rtx f;
2599 struct loops *loops;
2600 {
2601 rtx insn;
2602 rtx label;
2603 int num_loops;
2604 struct loop *current_loop;
2605 struct loop *next_loop;
2606 struct loop *loop;
2607
2608 num_loops = loops->num;
2609
2610 compute_luids (f, NULL_RTX, 0);
2611
2612 /* If there are jumps to undefined labels,
2613 treat them as jumps out of any/all loops.
2614 This also avoids writing past end of tables when there are no loops. */
2615 uid_loop[0] = NULL;
2616
2617 /* Find boundaries of loops, mark which loops are contained within
2618 loops, and invalidate loops that have setjmp. */
2619
2620 num_loops = 0;
2621 current_loop = NULL;
2622 for (insn = f; insn; insn = NEXT_INSN (insn))
2623 {
2624 if (GET_CODE (insn) == NOTE)
2625 switch (NOTE_LINE_NUMBER (insn))
2626 {
2627 case NOTE_INSN_LOOP_BEG:
2628 next_loop = loops->array + num_loops;
2629 next_loop->num = num_loops;
2630 num_loops++;
2631 next_loop->start = insn;
2632 next_loop->outer = current_loop;
2633 current_loop = next_loop;
2634 break;
2635
2636 case NOTE_INSN_LOOP_CONT:
2637 current_loop->cont = insn;
2638 break;
2639
2640 case NOTE_INSN_LOOP_VTOP:
2641 current_loop->vtop = insn;
2642 break;
2643
2644 case NOTE_INSN_LOOP_END:
2645 if (! current_loop)
2646 abort ();
2647
2648 current_loop->end = insn;
2649 current_loop = current_loop->outer;
2650 break;
2651
2652 default:
2653 break;
2654 }
2655
2656 if (GET_CODE (insn) == CALL_INSN
2657 && find_reg_note (insn, REG_SETJMP, NULL))
2658 {
2659 /* In this case, we must invalidate our current loop and any
2660 enclosing loop. */
2661 for (loop = current_loop; loop; loop = loop->outer)
2662 {
2663 loop->invalid = 1;
2664 if (loop_dump_stream)
2665 fprintf (loop_dump_stream,
2666 "\nLoop at %d ignored due to setjmp.\n",
2667 INSN_UID (loop->start));
2668 }
2669 }
2670
2671 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2672 enclosing loop, but this doesn't matter. */
2673 uid_loop[INSN_UID (insn)] = current_loop;
2674 }
2675
2676 /* Any loop containing a label used in an initializer must be invalidated,
2677 because it can be jumped into from anywhere. */
2678
2679 for (label = forced_labels; label; label = XEXP (label, 1))
2680 {
2681 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2682 loop; loop = loop->outer)
2683 loop->invalid = 1;
2684 }
2685
2686 /* Any loop containing a label used for an exception handler must be
2687 invalidated, because it can be jumped into from anywhere. */
2688
2689 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2690 {
2691 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2692 loop; loop = loop->outer)
2693 loop->invalid = 1;
2694 }
2695
2696 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2697 loop that it is not contained within, that loop is marked invalid.
2698 If any INSN or CALL_INSN uses a label's address, then the loop containing
2699 that label is marked invalid, because it could be jumped into from
2700 anywhere.
2701
2702 Also look for blocks of code ending in an unconditional branch that
2703 exits the loop. If such a block is surrounded by a conditional
2704 branch around the block, move the block elsewhere (see below) and
2705 invert the jump to point to the code block. This may eliminate a
2706 label in our loop and will simplify processing by both us and a
2707 possible second cse pass. */
2708
2709 for (insn = f; insn; insn = NEXT_INSN (insn))
2710 if (INSN_P (insn))
2711 {
2712 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2713
2714 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2715 {
2716 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2717 if (note)
2718 {
2719 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2720 loop; loop = loop->outer)
2721 loop->invalid = 1;
2722 }
2723 }
2724
2725 if (GET_CODE (insn) != JUMP_INSN)
2726 continue;
2727
2728 mark_loop_jump (PATTERN (insn), this_loop);
2729
2730 /* See if this is an unconditional branch outside the loop. */
2731 if (this_loop
2732 && (GET_CODE (PATTERN (insn)) == RETURN
2733 || (any_uncondjump_p (insn)
2734 && onlyjump_p (insn)
2735 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2736 != this_loop)))
2737 && get_max_uid () < max_uid_for_loop)
2738 {
2739 rtx p;
2740 rtx our_next = next_real_insn (insn);
2741 rtx last_insn_to_move = NEXT_INSN (insn);
2742 struct loop *dest_loop;
2743 struct loop *outer_loop = NULL;
2744
2745 /* Go backwards until we reach the start of the loop, a label,
2746 or a JUMP_INSN. */
2747 for (p = PREV_INSN (insn);
2748 GET_CODE (p) != CODE_LABEL
2749 && ! (GET_CODE (p) == NOTE
2750 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2751 && GET_CODE (p) != JUMP_INSN;
2752 p = PREV_INSN (p))
2753 ;
2754
2755 /* Check for the case where we have a jump to an inner nested
2756 loop, and do not perform the optimization in that case. */
2757
2758 if (JUMP_LABEL (insn))
2759 {
2760 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2761 if (dest_loop)
2762 {
2763 for (outer_loop = dest_loop; outer_loop;
2764 outer_loop = outer_loop->outer)
2765 if (outer_loop == this_loop)
2766 break;
2767 }
2768 }
2769
2770 /* Make sure that the target of P is within the current loop. */
2771
2772 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2773 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2774 outer_loop = this_loop;
2775
2776 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2777 we have a block of code to try to move.
2778
2779 We look backward and then forward from the target of INSN
2780 to find a BARRIER at the same loop depth as the target.
2781 If we find such a BARRIER, we make a new label for the start
2782 of the block, invert the jump in P and point it to that label,
2783 and move the block of code to the spot we found. */
2784
2785 if (! outer_loop
2786 && GET_CODE (p) == JUMP_INSN
2787 && JUMP_LABEL (p) != 0
2788 /* Just ignore jumps to labels that were never emitted.
2789 These always indicate compilation errors. */
2790 && INSN_UID (JUMP_LABEL (p)) != 0
2791 && any_condjump_p (p) && onlyjump_p (p)
2792 && next_real_insn (JUMP_LABEL (p)) == our_next
2793 /* If it's not safe to move the sequence, then we
2794 mustn't try. */
2795 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2796 &last_insn_to_move))
2797 {
2798 rtx target
2799 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2800 struct loop *target_loop = uid_loop[INSN_UID (target)];
2801 rtx loc, loc2;
2802 rtx tmp;
2803
2804 /* Search for possible garbage past the conditional jumps
2805 and look for the last barrier. */
2806 for (tmp = last_insn_to_move;
2807 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2808 if (GET_CODE (tmp) == BARRIER)
2809 last_insn_to_move = tmp;
2810
2811 for (loc = target; loc; loc = PREV_INSN (loc))
2812 if (GET_CODE (loc) == BARRIER
2813 /* Don't move things inside a tablejump. */
2814 && ((loc2 = next_nonnote_insn (loc)) == 0
2815 || GET_CODE (loc2) != CODE_LABEL
2816 || (loc2 = next_nonnote_insn (loc2)) == 0
2817 || GET_CODE (loc2) != JUMP_INSN
2818 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2819 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2820 && uid_loop[INSN_UID (loc)] == target_loop)
2821 break;
2822
2823 if (loc == 0)
2824 for (loc = target; loc; loc = NEXT_INSN (loc))
2825 if (GET_CODE (loc) == BARRIER
2826 /* Don't move things inside a tablejump. */
2827 && ((loc2 = next_nonnote_insn (loc)) == 0
2828 || GET_CODE (loc2) != CODE_LABEL
2829 || (loc2 = next_nonnote_insn (loc2)) == 0
2830 || GET_CODE (loc2) != JUMP_INSN
2831 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2832 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2833 && uid_loop[INSN_UID (loc)] == target_loop)
2834 break;
2835
2836 if (loc)
2837 {
2838 rtx cond_label = JUMP_LABEL (p);
2839 rtx new_label = get_label_after (p);
2840
2841 /* Ensure our label doesn't go away. */
2842 LABEL_NUSES (cond_label)++;
2843
2844 /* Verify that uid_loop is large enough and that
2845 we can invert P. */
2846 if (invert_jump (p, new_label, 1))
2847 {
2848 rtx q, r;
2849
2850 /* If no suitable BARRIER was found, create a suitable
2851 one before TARGET. Since TARGET is a fall through
2852 path, we'll need to insert an jump around our block
2853 and add a BARRIER before TARGET.
2854
2855 This creates an extra unconditional jump outside
2856 the loop. However, the benefits of removing rarely
2857 executed instructions from inside the loop usually
2858 outweighs the cost of the extra unconditional jump
2859 outside the loop. */
2860 if (loc == 0)
2861 {
2862 rtx temp;
2863
2864 temp = gen_jump (JUMP_LABEL (insn));
2865 temp = emit_jump_insn_before (temp, target);
2866 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2867 LABEL_NUSES (JUMP_LABEL (insn))++;
2868 loc = emit_barrier_before (target);
2869 }
2870
2871 /* Include the BARRIER after INSN and copy the
2872 block after LOC. */
2873 if (squeeze_notes (&new_label, &last_insn_to_move))
2874 abort ();
2875 reorder_insns (new_label, last_insn_to_move, loc);
2876
2877 /* All those insns are now in TARGET_LOOP. */
2878 for (q = new_label;
2879 q != NEXT_INSN (last_insn_to_move);
2880 q = NEXT_INSN (q))
2881 uid_loop[INSN_UID (q)] = target_loop;
2882
2883 /* The label jumped to by INSN is no longer a loop
2884 exit. Unless INSN does not have a label (e.g.,
2885 it is a RETURN insn), search loop->exit_labels
2886 to find its label_ref, and remove it. Also turn
2887 off LABEL_OUTSIDE_LOOP_P bit. */
2888 if (JUMP_LABEL (insn))
2889 {
2890 for (q = 0, r = this_loop->exit_labels;
2891 r;
2892 q = r, r = LABEL_NEXTREF (r))
2893 if (XEXP (r, 0) == JUMP_LABEL (insn))
2894 {
2895 LABEL_OUTSIDE_LOOP_P (r) = 0;
2896 if (q)
2897 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2898 else
2899 this_loop->exit_labels = LABEL_NEXTREF (r);
2900 break;
2901 }
2902
2903 for (loop = this_loop; loop && loop != target_loop;
2904 loop = loop->outer)
2905 loop->exit_count--;
2906
2907 /* If we didn't find it, then something is
2908 wrong. */
2909 if (! r)
2910 abort ();
2911 }
2912
2913 /* P is now a jump outside the loop, so it must be put
2914 in loop->exit_labels, and marked as such.
2915 The easiest way to do this is to just call
2916 mark_loop_jump again for P. */
2917 mark_loop_jump (PATTERN (p), this_loop);
2918
2919 /* If INSN now jumps to the insn after it,
2920 delete INSN. */
2921 if (JUMP_LABEL (insn) != 0
2922 && (next_real_insn (JUMP_LABEL (insn))
2923 == next_real_insn (insn)))
2924 delete_related_insns (insn);
2925 }
2926
2927 /* Continue the loop after where the conditional
2928 branch used to jump, since the only branch insn
2929 in the block (if it still remains) is an inter-loop
2930 branch and hence needs no processing. */
2931 insn = NEXT_INSN (cond_label);
2932
2933 if (--LABEL_NUSES (cond_label) == 0)
2934 delete_related_insns (cond_label);
2935
2936 /* This loop will be continued with NEXT_INSN (insn). */
2937 insn = PREV_INSN (insn);
2938 }
2939 }
2940 }
2941 }
2942 }
2943
2944 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2945 loops it is contained in, mark the target loop invalid.
2946
2947 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2948
2949 static void
2950 mark_loop_jump (x, loop)
2951 rtx x;
2952 struct loop *loop;
2953 {
2954 struct loop *dest_loop;
2955 struct loop *outer_loop;
2956 int i;
2957
2958 switch (GET_CODE (x))
2959 {
2960 case PC:
2961 case USE:
2962 case CLOBBER:
2963 case REG:
2964 case MEM:
2965 case CONST_INT:
2966 case CONST_DOUBLE:
2967 case RETURN:
2968 return;
2969
2970 case CONST:
2971 /* There could be a label reference in here. */
2972 mark_loop_jump (XEXP (x, 0), loop);
2973 return;
2974
2975 case PLUS:
2976 case MINUS:
2977 case MULT:
2978 mark_loop_jump (XEXP (x, 0), loop);
2979 mark_loop_jump (XEXP (x, 1), loop);
2980 return;
2981
2982 case LO_SUM:
2983 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2984 mark_loop_jump (XEXP (x, 1), loop);
2985 return;
2986
2987 case SIGN_EXTEND:
2988 case ZERO_EXTEND:
2989 mark_loop_jump (XEXP (x, 0), loop);
2990 return;
2991
2992 case LABEL_REF:
2993 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2994
2995 /* Link together all labels that branch outside the loop. This
2996 is used by final_[bg]iv_value and the loop unrolling code. Also
2997 mark this LABEL_REF so we know that this branch should predict
2998 false. */
2999
3000 /* A check to make sure the label is not in an inner nested loop,
3001 since this does not count as a loop exit. */
3002 if (dest_loop)
3003 {
3004 for (outer_loop = dest_loop; outer_loop;
3005 outer_loop = outer_loop->outer)
3006 if (outer_loop == loop)
3007 break;
3008 }
3009 else
3010 outer_loop = NULL;
3011
3012 if (loop && ! outer_loop)
3013 {
3014 LABEL_OUTSIDE_LOOP_P (x) = 1;
3015 LABEL_NEXTREF (x) = loop->exit_labels;
3016 loop->exit_labels = x;
3017
3018 for (outer_loop = loop;
3019 outer_loop && outer_loop != dest_loop;
3020 outer_loop = outer_loop->outer)
3021 outer_loop->exit_count++;
3022 }
3023
3024 /* If this is inside a loop, but not in the current loop or one enclosed
3025 by it, it invalidates at least one loop. */
3026
3027 if (! dest_loop)
3028 return;
3029
3030 /* We must invalidate every nested loop containing the target of this
3031 label, except those that also contain the jump insn. */
3032
3033 for (; dest_loop; dest_loop = dest_loop->outer)
3034 {
3035 /* Stop when we reach a loop that also contains the jump insn. */
3036 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3037 if (dest_loop == outer_loop)
3038 return;
3039
3040 /* If we get here, we know we need to invalidate a loop. */
3041 if (loop_dump_stream && ! dest_loop->invalid)
3042 fprintf (loop_dump_stream,
3043 "\nLoop at %d ignored due to multiple entry points.\n",
3044 INSN_UID (dest_loop->start));
3045
3046 dest_loop->invalid = 1;
3047 }
3048 return;
3049
3050 case SET:
3051 /* If this is not setting pc, ignore. */
3052 if (SET_DEST (x) == pc_rtx)
3053 mark_loop_jump (SET_SRC (x), loop);
3054 return;
3055
3056 case IF_THEN_ELSE:
3057 mark_loop_jump (XEXP (x, 1), loop);
3058 mark_loop_jump (XEXP (x, 2), loop);
3059 return;
3060
3061 case PARALLEL:
3062 case ADDR_VEC:
3063 for (i = 0; i < XVECLEN (x, 0); i++)
3064 mark_loop_jump (XVECEXP (x, 0, i), loop);
3065 return;
3066
3067 case ADDR_DIFF_VEC:
3068 for (i = 0; i < XVECLEN (x, 1); i++)
3069 mark_loop_jump (XVECEXP (x, 1, i), loop);
3070 return;
3071
3072 default:
3073 /* Strictly speaking this is not a jump into the loop, only a possible
3074 jump out of the loop. However, we have no way to link the destination
3075 of this jump onto the list of exit labels. To be safe we mark this
3076 loop and any containing loops as invalid. */
3077 if (loop)
3078 {
3079 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3080 {
3081 if (loop_dump_stream && ! outer_loop->invalid)
3082 fprintf (loop_dump_stream,
3083 "\nLoop at %d ignored due to unknown exit jump.\n",
3084 INSN_UID (outer_loop->start));
3085 outer_loop->invalid = 1;
3086 }
3087 }
3088 return;
3089 }
3090 }
3091 \f
3092 /* Return nonzero if there is a label in the range from
3093 insn INSN to and including the insn whose luid is END
3094 INSN must have an assigned luid (i.e., it must not have
3095 been previously created by loop.c). */
3096
3097 static int
3098 labels_in_range_p (insn, end)
3099 rtx insn;
3100 int end;
3101 {
3102 while (insn && INSN_LUID (insn) <= end)
3103 {
3104 if (GET_CODE (insn) == CODE_LABEL)
3105 return 1;
3106 insn = NEXT_INSN (insn);
3107 }
3108
3109 return 0;
3110 }
3111
3112 /* Record that a memory reference X is being set. */
3113
3114 static void
3115 note_addr_stored (x, y, data)
3116 rtx x;
3117 rtx y ATTRIBUTE_UNUSED;
3118 void *data ATTRIBUTE_UNUSED;
3119 {
3120 struct loop_info *loop_info = data;
3121
3122 if (x == 0 || GET_CODE (x) != MEM)
3123 return;
3124
3125 /* Count number of memory writes.
3126 This affects heuristics in strength_reduce. */
3127 loop_info->num_mem_sets++;
3128
3129 /* BLKmode MEM means all memory is clobbered. */
3130 if (GET_MODE (x) == BLKmode)
3131 {
3132 if (RTX_UNCHANGING_P (x))
3133 loop_info->unknown_constant_address_altered = 1;
3134 else
3135 loop_info->unknown_address_altered = 1;
3136
3137 return;
3138 }
3139
3140 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3141 loop_info->store_mems);
3142 }
3143
3144 /* X is a value modified by an INSN that references a biv inside a loop
3145 exit test (ie, X is somehow related to the value of the biv). If X
3146 is a pseudo that is used more than once, then the biv is (effectively)
3147 used more than once. DATA is a pointer to a loop_regs structure. */
3148
3149 static void
3150 note_set_pseudo_multiple_uses (x, y, data)
3151 rtx x;
3152 rtx y ATTRIBUTE_UNUSED;
3153 void *data;
3154 {
3155 struct loop_regs *regs = (struct loop_regs *) data;
3156
3157 if (x == 0)
3158 return;
3159
3160 while (GET_CODE (x) == STRICT_LOW_PART
3161 || GET_CODE (x) == SIGN_EXTRACT
3162 || GET_CODE (x) == ZERO_EXTRACT
3163 || GET_CODE (x) == SUBREG)
3164 x = XEXP (x, 0);
3165
3166 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3167 return;
3168
3169 /* If we do not have usage information, or if we know the register
3170 is used more than once, note that fact for check_dbra_loop. */
3171 if (REGNO (x) >= max_reg_before_loop
3172 || ! regs->array[REGNO (x)].single_usage
3173 || regs->array[REGNO (x)].single_usage == const0_rtx)
3174 regs->multiple_uses = 1;
3175 }
3176 \f
3177 /* Return nonzero if the rtx X is invariant over the current loop.
3178
3179 The value is 2 if we refer to something only conditionally invariant.
3180
3181 A memory ref is invariant if it is not volatile and does not conflict
3182 with anything stored in `loop_info->store_mems'. */
3183
3184 int
3185 loop_invariant_p (loop, x)
3186 const struct loop *loop;
3187 rtx x;
3188 {
3189 struct loop_info *loop_info = LOOP_INFO (loop);
3190 struct loop_regs *regs = LOOP_REGS (loop);
3191 int i;
3192 enum rtx_code code;
3193 const char *fmt;
3194 int conditional = 0;
3195 rtx mem_list_entry;
3196
3197 if (x == 0)
3198 return 1;
3199 code = GET_CODE (x);
3200 switch (code)
3201 {
3202 case CONST_INT:
3203 case CONST_DOUBLE:
3204 case SYMBOL_REF:
3205 case CONST:
3206 return 1;
3207
3208 case LABEL_REF:
3209 /* A LABEL_REF is normally invariant, however, if we are unrolling
3210 loops, and this label is inside the loop, then it isn't invariant.
3211 This is because each unrolled copy of the loop body will have
3212 a copy of this label. If this was invariant, then an insn loading
3213 the address of this label into a register might get moved outside
3214 the loop, and then each loop body would end up using the same label.
3215
3216 We don't know the loop bounds here though, so just fail for all
3217 labels. */
3218 if (flag_unroll_loops)
3219 return 0;
3220 else
3221 return 1;
3222
3223 case PC:
3224 case CC0:
3225 case UNSPEC_VOLATILE:
3226 return 0;
3227
3228 case REG:
3229 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3230 since the reg might be set by initialization within the loop. */
3231
3232 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3233 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3234 && ! current_function_has_nonlocal_goto)
3235 return 1;
3236
3237 if (LOOP_INFO (loop)->has_call
3238 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3239 return 0;
3240
3241 if (regs->array[REGNO (x)].set_in_loop < 0)
3242 return 2;
3243
3244 return regs->array[REGNO (x)].set_in_loop == 0;
3245
3246 case MEM:
3247 /* Volatile memory references must be rejected. Do this before
3248 checking for read-only items, so that volatile read-only items
3249 will be rejected also. */
3250 if (MEM_VOLATILE_P (x))
3251 return 0;
3252
3253 /* See if there is any dependence between a store and this load. */
3254 mem_list_entry = loop_info->store_mems;
3255 while (mem_list_entry)
3256 {
3257 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3258 x, rtx_varies_p))
3259 return 0;
3260
3261 mem_list_entry = XEXP (mem_list_entry, 1);
3262 }
3263
3264 /* It's not invalidated by a store in memory
3265 but we must still verify the address is invariant. */
3266 break;
3267
3268 case ASM_OPERANDS:
3269 /* Don't mess with insns declared volatile. */
3270 if (MEM_VOLATILE_P (x))
3271 return 0;
3272 break;
3273
3274 default:
3275 break;
3276 }
3277
3278 fmt = GET_RTX_FORMAT (code);
3279 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3280 {
3281 if (fmt[i] == 'e')
3282 {
3283 int tem = loop_invariant_p (loop, XEXP (x, i));
3284 if (tem == 0)
3285 return 0;
3286 if (tem == 2)
3287 conditional = 1;
3288 }
3289 else if (fmt[i] == 'E')
3290 {
3291 int j;
3292 for (j = 0; j < XVECLEN (x, i); j++)
3293 {
3294 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3295 if (tem == 0)
3296 return 0;
3297 if (tem == 2)
3298 conditional = 1;
3299 }
3300
3301 }
3302 }
3303
3304 return 1 + conditional;
3305 }
3306 \f
3307 /* Return nonzero if all the insns in the loop that set REG
3308 are INSN and the immediately following insns,
3309 and if each of those insns sets REG in an invariant way
3310 (not counting uses of REG in them).
3311
3312 The value is 2 if some of these insns are only conditionally invariant.
3313
3314 We assume that INSN itself is the first set of REG
3315 and that its source is invariant. */
3316
3317 static int
3318 consec_sets_invariant_p (loop, reg, n_sets, insn)
3319 const struct loop *loop;
3320 int n_sets;
3321 rtx reg, insn;
3322 {
3323 struct loop_regs *regs = LOOP_REGS (loop);
3324 rtx p = insn;
3325 unsigned int regno = REGNO (reg);
3326 rtx temp;
3327 /* Number of sets we have to insist on finding after INSN. */
3328 int count = n_sets - 1;
3329 int old = regs->array[regno].set_in_loop;
3330 int value = 0;
3331 int this;
3332
3333 /* If N_SETS hit the limit, we can't rely on its value. */
3334 if (n_sets == 127)
3335 return 0;
3336
3337 regs->array[regno].set_in_loop = 0;
3338
3339 while (count > 0)
3340 {
3341 enum rtx_code code;
3342 rtx set;
3343
3344 p = NEXT_INSN (p);
3345 code = GET_CODE (p);
3346
3347 /* If library call, skip to end of it. */
3348 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3349 p = XEXP (temp, 0);
3350
3351 this = 0;
3352 if (code == INSN
3353 && (set = single_set (p))
3354 && GET_CODE (SET_DEST (set)) == REG
3355 && REGNO (SET_DEST (set)) == regno)
3356 {
3357 this = loop_invariant_p (loop, SET_SRC (set));
3358 if (this != 0)
3359 value |= this;
3360 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3361 {
3362 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3363 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3364 notes are OK. */
3365 this = (CONSTANT_P (XEXP (temp, 0))
3366 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3367 && loop_invariant_p (loop, XEXP (temp, 0))));
3368 if (this != 0)
3369 value |= this;
3370 }
3371 }
3372 if (this != 0)
3373 count--;
3374 else if (code != NOTE)
3375 {
3376 regs->array[regno].set_in_loop = old;
3377 return 0;
3378 }
3379 }
3380
3381 regs->array[regno].set_in_loop = old;
3382 /* If loop_invariant_p ever returned 2, we return 2. */
3383 return 1 + (value & 2);
3384 }
3385
3386 #if 0
3387 /* I don't think this condition is sufficient to allow INSN
3388 to be moved, so we no longer test it. */
3389
3390 /* Return 1 if all insns in the basic block of INSN and following INSN
3391 that set REG are invariant according to TABLE. */
3392
3393 static int
3394 all_sets_invariant_p (reg, insn, table)
3395 rtx reg, insn;
3396 short *table;
3397 {
3398 rtx p = insn;
3399 int regno = REGNO (reg);
3400
3401 while (1)
3402 {
3403 enum rtx_code code;
3404 p = NEXT_INSN (p);
3405 code = GET_CODE (p);
3406 if (code == CODE_LABEL || code == JUMP_INSN)
3407 return 1;
3408 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3409 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3410 && REGNO (SET_DEST (PATTERN (p))) == regno)
3411 {
3412 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3413 return 0;
3414 }
3415 }
3416 }
3417 #endif /* 0 */
3418 \f
3419 /* Look at all uses (not sets) of registers in X. For each, if it is
3420 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3421 a different insn, set USAGE[REGNO] to const0_rtx. */
3422
3423 static void
3424 find_single_use_in_loop (regs, insn, x)
3425 struct loop_regs *regs;
3426 rtx insn;
3427 rtx x;
3428 {
3429 enum rtx_code code = GET_CODE (x);
3430 const char *fmt = GET_RTX_FORMAT (code);
3431 int i, j;
3432
3433 if (code == REG)
3434 regs->array[REGNO (x)].single_usage
3435 = (regs->array[REGNO (x)].single_usage != 0
3436 && regs->array[REGNO (x)].single_usage != insn)
3437 ? const0_rtx : insn;
3438
3439 else if (code == SET)
3440 {
3441 /* Don't count SET_DEST if it is a REG; otherwise count things
3442 in SET_DEST because if a register is partially modified, it won't
3443 show up as a potential movable so we don't care how USAGE is set
3444 for it. */
3445 if (GET_CODE (SET_DEST (x)) != REG)
3446 find_single_use_in_loop (regs, insn, SET_DEST (x));
3447 find_single_use_in_loop (regs, insn, SET_SRC (x));
3448 }
3449 else
3450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3451 {
3452 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3453 find_single_use_in_loop (regs, insn, XEXP (x, i));
3454 else if (fmt[i] == 'E')
3455 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3456 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3457 }
3458 }
3459 \f
3460 /* Count and record any set in X which is contained in INSN. Update
3461 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3462 in X. */
3463
3464 static void
3465 count_one_set (regs, insn, x, last_set)
3466 struct loop_regs *regs;
3467 rtx insn, x;
3468 rtx *last_set;
3469 {
3470 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3471 /* Don't move a reg that has an explicit clobber.
3472 It's not worth the pain to try to do it correctly. */
3473 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3474
3475 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3476 {
3477 rtx dest = SET_DEST (x);
3478 while (GET_CODE (dest) == SUBREG
3479 || GET_CODE (dest) == ZERO_EXTRACT
3480 || GET_CODE (dest) == SIGN_EXTRACT
3481 || GET_CODE (dest) == STRICT_LOW_PART)
3482 dest = XEXP (dest, 0);
3483 if (GET_CODE (dest) == REG)
3484 {
3485 int i;
3486 int regno = REGNO (dest);
3487 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3488 {
3489 /* If this is the first setting of this reg
3490 in current basic block, and it was set before,
3491 it must be set in two basic blocks, so it cannot
3492 be moved out of the loop. */
3493 if (regs->array[regno].set_in_loop > 0
3494 && last_set == 0)
3495 regs->array[regno+i].may_not_optimize = 1;
3496 /* If this is not first setting in current basic block,
3497 see if reg was used in between previous one and this.
3498 If so, neither one can be moved. */
3499 if (last_set[regno] != 0
3500 && reg_used_between_p (dest, last_set[regno], insn))
3501 regs->array[regno+i].may_not_optimize = 1;
3502 if (regs->array[regno+i].set_in_loop < 127)
3503 ++regs->array[regno+i].set_in_loop;
3504 last_set[regno+i] = insn;
3505 }
3506 }
3507 }
3508 }
3509 \f
3510 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3511 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3512 contained in insn INSN is used by any insn that precedes INSN in
3513 cyclic order starting from the loop entry point.
3514
3515 We don't want to use INSN_LUID here because if we restrict INSN to those
3516 that have a valid INSN_LUID, it means we cannot move an invariant out
3517 from an inner loop past two loops. */
3518
3519 static int
3520 loop_reg_used_before_p (loop, set, insn)
3521 const struct loop *loop;
3522 rtx set, insn;
3523 {
3524 rtx reg = SET_DEST (set);
3525 rtx p;
3526
3527 /* Scan forward checking for register usage. If we hit INSN, we
3528 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3529 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3530 {
3531 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3532 return 1;
3533
3534 if (p == loop->end)
3535 p = loop->start;
3536 }
3537
3538 return 0;
3539 }
3540 \f
3541
3542 /* Information we collect about arrays that we might want to prefetch. */
3543 struct prefetch_info
3544 {
3545 struct iv_class *class; /* Class this prefetch is based on. */
3546 struct induction *giv; /* GIV this prefetch is based on. */
3547 rtx base_address; /* Start prefetching from this address plus
3548 index. */
3549 HOST_WIDE_INT index;
3550 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3551 iteration. */
3552 unsigned int bytes_accesed; /* Sum of sizes of all acceses to this
3553 prefetch area in one iteration. */
3554 unsigned int total_bytes; /* Total bytes loop will access in this block.
3555 This is set only for loops with known
3556 iteration counts and is 0xffffffff
3557 otherwise. */
3558 unsigned int write : 1; /* 1 for read/write prefetches. */
3559 unsigned int prefetch_in_loop : 1;
3560 /* 1 for those chosen for prefetching. */
3561 unsigned int prefetch_before_loop : 1;
3562 /* 1 for those chosen for prefetching. */
3563 };
3564
3565 /* Data used by check_store function. */
3566 struct check_store_data
3567 {
3568 rtx mem_address;
3569 int mem_write;
3570 };
3571
3572 static void check_store PARAMS ((rtx, rtx, void *));
3573 static void emit_prefetch_instructions PARAMS ((struct loop *));
3574 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3575
3576 /* Set mem_write when mem_address is found. Used as callback to
3577 note_stores. */
3578 static void
3579 check_store (x, pat, data)
3580 rtx x, pat ATTRIBUTE_UNUSED;
3581 void *data;
3582 {
3583 struct check_store_data *d = (struct check_store_data *) data;
3584
3585 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3586 d->mem_write = 1;
3587 }
3588 \f
3589 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3590 important to get some addresses combined. Later more sophisticated
3591 transformations can be added when necesary.
3592
3593 ??? Same trick with swapping operand is done at several other places.
3594 It can be nice to develop some common way to handle this. */
3595
3596 static int
3597 rtx_equal_for_prefetch_p (x, y)
3598 rtx x, y;
3599 {
3600 int i;
3601 int j;
3602 enum rtx_code code = GET_CODE (x);
3603 const char *fmt;
3604
3605 if (x == y)
3606 return 1;
3607 if (code != GET_CODE (y))
3608 return 0;
3609
3610 code = GET_CODE (x);
3611
3612 if (GET_RTX_CLASS (code) == 'c')
3613 {
3614 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3615 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3616 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3617 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3618 }
3619 /* Compare the elements. If any pair of corresponding elements fails to
3620 match, return 0 for the whole thing. */
3621
3622 fmt = GET_RTX_FORMAT (code);
3623 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3624 {
3625 switch (fmt[i])
3626 {
3627 case 'w':
3628 if (XWINT (x, i) != XWINT (y, i))
3629 return 0;
3630 break;
3631
3632 case 'i':
3633 if (XINT (x, i) != XINT (y, i))
3634 return 0;
3635 break;
3636
3637 case 'E':
3638 /* Two vectors must have the same length. */
3639 if (XVECLEN (x, i) != XVECLEN (y, i))
3640 return 0;
3641
3642 /* And the corresponding elements must match. */
3643 for (j = 0; j < XVECLEN (x, i); j++)
3644 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3645 XVECEXP (y, i, j)) == 0)
3646 return 0;
3647 break;
3648
3649 case 'e':
3650 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3651 return 0;
3652 break;
3653
3654 case 's':
3655 if (strcmp (XSTR (x, i), XSTR (y, i)))
3656 return 0;
3657 break;
3658
3659 case 'u':
3660 /* These are just backpointers, so they don't matter. */
3661 break;
3662
3663 case '0':
3664 break;
3665
3666 /* It is believed that rtx's at this level will never
3667 contain anything but integers and other rtx's,
3668 except for within LABEL_REFs and SYMBOL_REFs. */
3669 default:
3670 abort ();
3671 }
3672 }
3673 return 1;
3674 }
3675 \f
3676 /* Remove constant addition value from the expression X (when present)
3677 and return it. */
3678
3679 static HOST_WIDE_INT
3680 remove_constant_addition (x)
3681 rtx *x;
3682 {
3683 HOST_WIDE_INT addval = 0;
3684 rtx exp = *x;
3685
3686 /* Avoid clobbering a shared CONST expression. */
3687 if (GET_CODE (exp) == CONST)
3688 {
3689 if (GET_CODE (XEXP (exp, 0)) == PLUS
3690 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3691 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3692 {
3693 *x = XEXP (XEXP (exp, 0), 0);
3694 return INTVAL (XEXP (XEXP (exp, 0), 1));
3695 }
3696 return 0;
3697 }
3698
3699 if (GET_CODE (exp) == CONST_INT)
3700 {
3701 addval = INTVAL (exp);
3702 *x = const0_rtx;
3703 }
3704
3705 /* For plus expression recurse on ourself. */
3706 else if (GET_CODE (exp) == PLUS)
3707 {
3708 addval += remove_constant_addition (&XEXP (exp, 0));
3709 addval += remove_constant_addition (&XEXP (exp, 1));
3710
3711 /* In case our parameter was constant, remove extra zero from the
3712 expression. */
3713 if (XEXP (exp, 0) == const0_rtx)
3714 *x = XEXP (exp, 1);
3715 else if (XEXP (exp, 1) == const0_rtx)
3716 *x = XEXP (exp, 0);
3717 }
3718
3719 return addval;
3720 }
3721
3722 /* Attempt to identify accesses to arrays that are most likely to cause cache
3723 misses, and emit prefetch instructions a few prefetch blocks forward.
3724
3725 To detect the arrays we use the GIV information that was collected by the
3726 strength reduction pass.
3727
3728 The prefetch instructions are generated after the GIV information is done
3729 and before the strength reduction process. The new GIVs are injected into
3730 the strength reduction tables, so the prefetch addresses are optimized as
3731 well.
3732
3733 GIVs are split into base address, stride, and constant addition values.
3734 GIVs with the same address, stride and close addition values are combined
3735 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3736 for write instructions can be used for the block we write to, on machines
3737 that support write prefetches.
3738
3739 Several heuristics are used to determine when to prefetch. They are
3740 controlled by defined symbols that can be overridden for each target. */
3741
3742 static void
3743 emit_prefetch_instructions (loop)
3744 struct loop *loop;
3745 {
3746 int num_prefetches = 0;
3747 int num_real_prefetches = 0;
3748 int num_real_write_prefetches = 0;
3749 int ahead;
3750 int i;
3751 struct iv_class *bl;
3752 struct induction *iv;
3753 struct prefetch_info info[MAX_PREFETCHES];
3754 struct loop_ivs *ivs = LOOP_IVS (loop);
3755
3756 if (!HAVE_prefetch)
3757 return;
3758
3759 /* Consider only loops w/o calls. When a call is done, the loop is probably
3760 slow enough to read the memory. */
3761 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3762 {
3763 if (loop_dump_stream)
3764 fprintf (loop_dump_stream, "Prefetch: ignoring loop - has call.\n");
3765
3766 return;
3767 }
3768
3769 if (PREFETCH_NO_LOW_LOOPCNT
3770 && LOOP_INFO (loop)->n_iterations
3771 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3772 {
3773 if (loop_dump_stream)
3774 fprintf (loop_dump_stream,
3775 "Prefetch: ignoring loop - not enought iterations.\n");
3776 return;
3777 }
3778
3779 /* Search all induction variables and pick those interesting for the prefetch
3780 machinery. */
3781 for (bl = ivs->list; bl; bl = bl->next)
3782 {
3783 struct induction *biv = bl->biv, *biv1;
3784 int basestride = 0;
3785
3786 biv1 = biv;
3787
3788 /* Expect all BIVs to be executed in each iteration. This makes our
3789 analysis more conservative. */
3790 while (biv1)
3791 {
3792 /* Discard non-constant additions that we can't handle well yet, and
3793 BIVs that are executed multiple times; such BIVs ought to be
3794 handled in the nested loop. We accept not_every_iteration BIVs,
3795 since these only result in larger strides and make our
3796 heuristics more conservative.
3797 ??? What does the last sentence mean? */
3798 if (GET_CODE (biv->add_val) != CONST_INT)
3799 {
3800 if (loop_dump_stream)
3801 {
3802 fprintf (loop_dump_stream,
3803 "Prefetch: biv %i ignored: non-constant addition at insn %i:",
3804 REGNO (biv->src_reg), INSN_UID (biv->insn));
3805 print_rtl (loop_dump_stream, biv->add_val);
3806 fprintf (loop_dump_stream, "\n");
3807 }
3808 break;
3809 }
3810
3811 if (biv->maybe_multiple)
3812 {
3813 if (loop_dump_stream)
3814 {
3815 fprintf (loop_dump_stream,
3816 "Prefetch: biv %i ignored: maybe_multiple at insn %i:",
3817 REGNO (biv->src_reg), INSN_UID (biv->insn));
3818 print_rtl (loop_dump_stream, biv->add_val);
3819 fprintf (loop_dump_stream, "\n");
3820 }
3821 break;
3822 }
3823
3824 basestride += INTVAL (biv1->add_val);
3825 biv1 = biv1->next_iv;
3826 }
3827
3828 if (biv1 || !basestride)
3829 continue;
3830
3831 for (iv = bl->giv; iv; iv = iv->next_iv)
3832 {
3833 rtx address;
3834 rtx temp;
3835 HOST_WIDE_INT index = 0;
3836 int add = 1;
3837 HOST_WIDE_INT stride;
3838 struct check_store_data d;
3839 int size = GET_MODE_SIZE (GET_MODE (iv));
3840
3841 /* There are several reasons why an induction variable is not
3842 interesting to us. */
3843 if (iv->giv_type != DEST_ADDR
3844 /* We are interested only in constant stride memory references
3845 in order to be able to compute density easily. */
3846 || GET_CODE (iv->mult_val) != CONST_INT
3847 /* Don't handle reversed order prefetches, since they are usually
3848 ineffective. Later we may be able to reverse such BIVs. */
3849 || (PREFETCH_NO_REVERSE_ORDER
3850 && (stride = INTVAL (iv->mult_val) * basestride) < 0)
3851 /* Prefetching of accesses with such an extreme stride is probably
3852 not worthwhile, either. */
3853 || (PREFETCH_NO_EXTREME_STRIDE
3854 && stride > PREFETCH_EXTREME_STRIDE)
3855 /* Ignore GIVs with varying add values; we can't predict the
3856 value for the next iteration. */
3857 || !loop_invariant_p (loop, iv->add_val)
3858 /* Ignore GIVs in the nested loops; they ought to have been
3859 handled already. */
3860 || iv->maybe_multiple)
3861 {
3862 if (loop_dump_stream)
3863 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %i\n",
3864 INSN_UID (iv->insn));
3865 continue;
3866 }
3867
3868 /* Determine the pointer to the basic array we are examining. It is
3869 the sum of the BIV's initial value and the GIV's add_val. */
3870 index = 0;
3871
3872 address = copy_rtx (iv->add_val);
3873 temp = copy_rtx (bl->initial_value);
3874
3875 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3876 index = remove_constant_addition (&address);
3877
3878 index += size;
3879 d.mem_write = 0;
3880 d.mem_address = *iv->location;
3881
3882 /* When the GIV is not always executed, we might be better off by
3883 not dirtying the cache pages. */
3884 if (PREFETCH_NOT_ALWAYS || iv->always_executed)
3885 note_stores (PATTERN (iv->insn), check_store, &d);
3886
3887 /* Attempt to find another prefetch to the same array and see if we
3888 can merge this one. */
3889 for (i = 0; i < num_prefetches; i++)
3890 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3891 && stride == info[i].stride)
3892 {
3893 /* In case both access same array (same location
3894 just with small difference in constant indexes), merge
3895 the prefetches. Just do the later and the earlier will
3896 get prefetched from previous iteration.
3897 4096 is artificial threshold. It should not be too small,
3898 but also not bigger than small portion of memory usually
3899 traversed by single loop. */
3900 if (index >= info[i].index && index - info[i].index < 4096)
3901 {
3902 info[i].write |= d.mem_write;
3903 info[i].bytes_accesed += size;
3904 info[i].index = index;
3905 info[i].giv = iv;
3906 info[i].class = bl;
3907 info[num_prefetches].base_address = address;
3908 add = 0;
3909 break;
3910 }
3911
3912 if (index < info[i].index && info[i].index - index < 4096)
3913 {
3914 info[i].write |= d.mem_write;
3915 info[i].bytes_accesed += size;
3916 add = 0;
3917 break;
3918 }
3919 }
3920
3921 /* Merging failed. */
3922 if (add)
3923 {
3924 info[num_prefetches].giv = iv;
3925 info[num_prefetches].class = bl;
3926 info[num_prefetches].index = index;
3927 info[num_prefetches].stride = stride;
3928 info[num_prefetches].base_address = address;
3929 info[num_prefetches].write = d.mem_write;
3930 info[num_prefetches].bytes_accesed = size;
3931 num_prefetches++;
3932 if (num_prefetches >= MAX_PREFETCHES)
3933 {
3934 if (loop_dump_stream)
3935 fprintf (loop_dump_stream,
3936 "Maximal number of prefetches exceeded.\n");
3937 return;
3938 }
3939 }
3940 }
3941 }
3942
3943 for (i = 0; i < num_prefetches; i++)
3944 {
3945 /* Attempt to calculate the number of bytes fetched by the loop.
3946 Avoid overflow. */
3947 if (LOOP_INFO (loop)->n_iterations
3948 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
3949 >= LOOP_INFO (loop)->n_iterations))
3950 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
3951 else
3952 info[i].total_bytes = 0xffffffff;
3953
3954 /* Prefetch is worthwhile only when the loads/stores are dense. */
3955 if (PREFETCH_ONLY_DENSE_MEM
3956 && info[i].bytes_accesed * 256 / info[i].stride > PREFETCH_DENSE_MEM
3957 && (info[i].total_bytes / PREFETCH_BLOCK
3958 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
3959 {
3960 info[i].prefetch_before_loop = 1;
3961 info[i].prefetch_in_loop
3962 = (info[i].total_bytes / PREFETCH_BLOCK
3963 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
3964 }
3965 else
3966 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
3967
3968 if (info[i].prefetch_in_loop)
3969 {
3970 num_real_prefetches += ((info[i].stride + PREFETCH_BLOCK - 1)
3971 / PREFETCH_BLOCK);
3972 if (info[i].write)
3973 num_real_write_prefetches
3974 += (info[i].stride + PREFETCH_BLOCK - 1) / PREFETCH_BLOCK;
3975 }
3976 }
3977
3978 if (loop_dump_stream)
3979 {
3980 for (i = 0; i < num_prefetches; i++)
3981 {
3982 fprintf (loop_dump_stream, "Prefetch insn %i address: ",
3983 INSN_UID (info[i].giv->insn));
3984 print_rtl (loop_dump_stream, info[i].base_address);
3985 fprintf (loop_dump_stream, " Index: ");
3986 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
3987 fprintf (loop_dump_stream, " stride: ");
3988 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
3989 fprintf (loop_dump_stream,
3990 " density: %i%% total_bytes: %u%sin loop: %s before: %s\n",
3991 (int) (info[i].bytes_accesed * 100 / info[i].stride),
3992 info[i].total_bytes,
3993 info[i].write ? " read/write " : " read only ",
3994 info[i].prefetch_in_loop ? "yes" : "no",
3995 info[i].prefetch_before_loop ? "yes" : "no");
3996 }
3997
3998 fprintf (loop_dump_stream, "Real prefetches needed: %i (write: %i)\n",
3999 num_real_prefetches, num_real_write_prefetches);
4000 }
4001
4002 if (!num_real_prefetches)
4003 return;
4004
4005 ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches;
4006
4007 if (!ahead)
4008 return;
4009
4010 for (i = 0; i < num_prefetches; i++)
4011 {
4012 if (info[i].prefetch_in_loop)
4013 {
4014 int y;
4015
4016 for (y = 0; y < ((info[i].stride + PREFETCH_BLOCK - 1)
4017 / PREFETCH_BLOCK); y++)
4018 {
4019 rtx loc = copy_rtx (*info[i].giv->location);
4020 rtx insn;
4021 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4022 rtx before_insn = info[i].giv->insn;
4023 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4024
4025 /* We can save some effort by offsetting the address on
4026 architectures with offsettable memory references. */
4027 if (offsettable_address_p (0, VOIDmode, loc))
4028 loc = plus_constant (loc, bytes_ahead);
4029 else
4030 {
4031 rtx reg = gen_reg_rtx (Pmode);
4032 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4033 GEN_INT (bytes_ahead), reg,
4034 0, before_insn);
4035 loc = reg;
4036 }
4037
4038 /* Make sure the address operand is valid for prefetch. */
4039 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4040 (loc,
4041 insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4042 loc = force_reg (Pmode, loc);
4043 emit_insn_before (gen_prefetch (loc, GEN_INT (info[i].write),
4044 GEN_INT (3)),
4045 before_insn);
4046
4047 /* Check all insns emitted and record the new GIV
4048 information. */
4049 insn = NEXT_INSN (prev_insn);
4050 while (insn != before_insn)
4051 {
4052 insn = check_insn_for_givs (loop, insn,
4053 info[i].giv->always_executed,
4054 info[i].giv->maybe_multiple);
4055 insn = NEXT_INSN (insn);
4056 }
4057 }
4058 }
4059
4060 if (info[i].prefetch_before_loop)
4061 {
4062 int y;
4063
4064 /* Emit INSNs before the loop to fetch the first cache lines. */
4065 for (y = 0;
4066 (!info[i].prefetch_in_loop || y < ahead)
4067 && y * PREFETCH_BLOCK < (int) info[i].total_bytes; y ++)
4068 {
4069 rtx reg = gen_reg_rtx (Pmode);
4070 rtx loop_start = loop->start;
4071 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4072 info[i].giv->add_val,
4073 GEN_INT (y * PREFETCH_BLOCK));
4074
4075 loop_iv_add_mult_emit_before (loop, info[i].class->initial_value,
4076 info[i].giv->mult_val,
4077 add_val, reg, 0, loop_start);
4078 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4079 GEN_INT (3)),
4080 loop_start);
4081 }
4082 }
4083 }
4084
4085 return;
4086 }
4087 \f
4088 /* A "basic induction variable" or biv is a pseudo reg that is set
4089 (within this loop) only by incrementing or decrementing it. */
4090 /* A "general induction variable" or giv is a pseudo reg whose
4091 value is a linear function of a biv. */
4092
4093 /* Bivs are recognized by `basic_induction_var';
4094 Givs by `general_induction_var'. */
4095
4096 /* Communication with routines called via `note_stores'. */
4097
4098 static rtx note_insn;
4099
4100 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4101
4102 static rtx addr_placeholder;
4103
4104 /* ??? Unfinished optimizations, and possible future optimizations,
4105 for the strength reduction code. */
4106
4107 /* ??? The interaction of biv elimination, and recognition of 'constant'
4108 bivs, may cause problems. */
4109
4110 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4111 performance problems.
4112
4113 Perhaps don't eliminate things that can be combined with an addressing
4114 mode. Find all givs that have the same biv, mult_val, and add_val;
4115 then for each giv, check to see if its only use dies in a following
4116 memory address. If so, generate a new memory address and check to see
4117 if it is valid. If it is valid, then store the modified memory address,
4118 otherwise, mark the giv as not done so that it will get its own iv. */
4119
4120 /* ??? Could try to optimize branches when it is known that a biv is always
4121 positive. */
4122
4123 /* ??? When replace a biv in a compare insn, we should replace with closest
4124 giv so that an optimized branch can still be recognized by the combiner,
4125 e.g. the VAX acb insn. */
4126
4127 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4128 was rerun in loop_optimize whenever a register was added or moved.
4129 Also, some of the optimizations could be a little less conservative. */
4130 \f
4131 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4132 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4133 callback.
4134
4135 NOT_EVERY_ITERATION if current insn is not executed at least once for every
4136 loop iteration except for the last one.
4137
4138 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4139 loop iteration.
4140 */
4141 void
4142 for_each_insn_in_loop (loop, fncall)
4143 struct loop *loop;
4144 loop_insn_callback fncall;
4145 {
4146 /* This is 1 if current insn is not executed at least once for every loop
4147 iteration. */
4148 int not_every_iteration = 0;
4149 int maybe_multiple = 0;
4150 int past_loop_latch = 0;
4151 int loop_depth = 0;
4152 rtx p;
4153
4154 /* If loop_scan_start points to the loop exit test, we have to be wary of
4155 subversive use of gotos inside expression statements. */
4156 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4157 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4158
4159 /* Scan through loop to find all possible bivs. */
4160
4161 for (p = next_insn_in_loop (loop, loop->scan_start);
4162 p != NULL_RTX;
4163 p = next_insn_in_loop (loop, p))
4164 {
4165 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4166
4167 /* Past CODE_LABEL, we get to insns that may be executed multiple
4168 times. The only way we can be sure that they can't is if every
4169 jump insn between here and the end of the loop either
4170 returns, exits the loop, is a jump to a location that is still
4171 behind the label, or is a jump to the loop start. */
4172
4173 if (GET_CODE (p) == CODE_LABEL)
4174 {
4175 rtx insn = p;
4176
4177 maybe_multiple = 0;
4178
4179 while (1)
4180 {
4181 insn = NEXT_INSN (insn);
4182 if (insn == loop->scan_start)
4183 break;
4184 if (insn == loop->end)
4185 {
4186 if (loop->top != 0)
4187 insn = loop->top;
4188 else
4189 break;
4190 if (insn == loop->scan_start)
4191 break;
4192 }
4193
4194 if (GET_CODE (insn) == JUMP_INSN
4195 && GET_CODE (PATTERN (insn)) != RETURN
4196 && (!any_condjump_p (insn)
4197 || (JUMP_LABEL (insn) != 0
4198 && JUMP_LABEL (insn) != loop->scan_start
4199 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4200 {
4201 maybe_multiple = 1;
4202 break;
4203 }
4204 }
4205 }
4206
4207 /* Past a jump, we get to insns for which we can't count
4208 on whether they will be executed during each iteration. */
4209 /* This code appears twice in strength_reduce. There is also similar
4210 code in scan_loop. */
4211 if (GET_CODE (p) == JUMP_INSN
4212 /* If we enter the loop in the middle, and scan around to the
4213 beginning, don't set not_every_iteration for that.
4214 This can be any kind of jump, since we want to know if insns
4215 will be executed if the loop is executed. */
4216 && !(JUMP_LABEL (p) == loop->top
4217 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4218 && any_uncondjump_p (p))
4219 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4220 {
4221 rtx label = 0;
4222
4223 /* If this is a jump outside the loop, then it also doesn't
4224 matter. Check to see if the target of this branch is on the
4225 loop->exits_labels list. */
4226
4227 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4228 if (XEXP (label, 0) == JUMP_LABEL (p))
4229 break;
4230
4231 if (!label)
4232 not_every_iteration = 1;
4233 }
4234
4235 else if (GET_CODE (p) == NOTE)
4236 {
4237 /* At the virtual top of a converted loop, insns are again known to
4238 be executed each iteration: logically, the loop begins here
4239 even though the exit code has been duplicated.
4240
4241 Insns are also again known to be executed each iteration at
4242 the LOOP_CONT note. */
4243 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4244 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4245 && loop_depth == 0)
4246 not_every_iteration = 0;
4247 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4248 loop_depth++;
4249 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4250 loop_depth--;
4251 }
4252
4253 /* Note if we pass a loop latch. If we do, then we can not clear
4254 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4255 a loop since a jump before the last CODE_LABEL may have started
4256 a new loop iteration.
4257
4258 Note that LOOP_TOP is only set for rotated loops and we need
4259 this check for all loops, so compare against the CODE_LABEL
4260 which immediately follows LOOP_START. */
4261 if (GET_CODE (p) == JUMP_INSN
4262 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4263 past_loop_latch = 1;
4264
4265 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4266 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4267 or not an insn is known to be executed each iteration of the
4268 loop, whether or not any iterations are known to occur.
4269
4270 Therefore, if we have just passed a label and have no more labels
4271 between here and the test insn of the loop, and we have not passed
4272 a jump to the top of the loop, then we know these insns will be
4273 executed each iteration. */
4274
4275 if (not_every_iteration
4276 && !past_loop_latch
4277 && GET_CODE (p) == CODE_LABEL
4278 && no_labels_between_p (p, loop->end)
4279 && loop_insn_first_p (p, loop->cont))
4280 not_every_iteration = 0;
4281 }
4282 }
4283 \f
4284 static void
4285 loop_bivs_find (loop)
4286 struct loop *loop;
4287 {
4288 struct loop_regs *regs = LOOP_REGS (loop);
4289 struct loop_ivs *ivs = LOOP_IVS (loop);
4290 /* Temporary list pointers for traversing ivs->list. */
4291 struct iv_class *bl, **backbl;
4292
4293 ivs->list = 0;
4294
4295 for_each_insn_in_loop (loop, check_insn_for_bivs);
4296
4297 /* Scan ivs->list to remove all regs that proved not to be bivs.
4298 Make a sanity check against regs->n_times_set. */
4299 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4300 {
4301 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4302 /* Above happens if register modified by subreg, etc. */
4303 /* Make sure it is not recognized as a basic induction var: */
4304 || regs->array[bl->regno].n_times_set != bl->biv_count
4305 /* If never incremented, it is invariant that we decided not to
4306 move. So leave it alone. */
4307 || ! bl->incremented)
4308 {
4309 if (loop_dump_stream)
4310 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4311 bl->regno,
4312 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4313 ? "not induction variable"
4314 : (! bl->incremented ? "never incremented"
4315 : "count error")));
4316
4317 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4318 *backbl = bl->next;
4319 }
4320 else
4321 {
4322 backbl = &bl->next;
4323
4324 if (loop_dump_stream)
4325 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4326 }
4327 }
4328 }
4329
4330
4331 /* Determine how BIVS are initialised by looking through pre-header
4332 extended basic block. */
4333 static void
4334 loop_bivs_init_find (loop)
4335 struct loop *loop;
4336 {
4337 struct loop_ivs *ivs = LOOP_IVS (loop);
4338 /* Temporary list pointers for traversing ivs->list. */
4339 struct iv_class *bl;
4340 int call_seen;
4341 rtx p;
4342
4343 /* Find initial value for each biv by searching backwards from loop_start,
4344 halting at first label. Also record any test condition. */
4345
4346 call_seen = 0;
4347 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4348 {
4349 rtx test;
4350
4351 note_insn = p;
4352
4353 if (GET_CODE (p) == CALL_INSN)
4354 call_seen = 1;
4355
4356 if (INSN_P (p))
4357 note_stores (PATTERN (p), record_initial, ivs);
4358
4359 /* Record any test of a biv that branches around the loop if no store
4360 between it and the start of loop. We only care about tests with
4361 constants and registers and only certain of those. */
4362 if (GET_CODE (p) == JUMP_INSN
4363 && JUMP_LABEL (p) != 0
4364 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4365 && (test = get_condition_for_loop (loop, p)) != 0
4366 && GET_CODE (XEXP (test, 0)) == REG
4367 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4368 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4369 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4370 && bl->init_insn == 0)
4371 {
4372 /* If an NE test, we have an initial value! */
4373 if (GET_CODE (test) == NE)
4374 {
4375 bl->init_insn = p;
4376 bl->init_set = gen_rtx_SET (VOIDmode,
4377 XEXP (test, 0), XEXP (test, 1));
4378 }
4379 else
4380 bl->initial_test = test;
4381 }
4382 }
4383 }
4384
4385
4386 /* Look at the each biv and see if we can say anything better about its
4387 initial value from any initializing insns set up above. (This is done
4388 in two passes to avoid missing SETs in a PARALLEL.) */
4389 static void
4390 loop_bivs_check (loop)
4391 struct loop *loop;
4392 {
4393 struct loop_ivs *ivs = LOOP_IVS (loop);
4394 /* Temporary list pointers for traversing ivs->list. */
4395 struct iv_class *bl;
4396 struct iv_class **backbl;
4397
4398 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4399 {
4400 rtx src;
4401 rtx note;
4402
4403 if (! bl->init_insn)
4404 continue;
4405
4406 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4407 is a constant, use the value of that. */
4408 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4409 && CONSTANT_P (XEXP (note, 0)))
4410 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4411 && CONSTANT_P (XEXP (note, 0))))
4412 src = XEXP (note, 0);
4413 else
4414 src = SET_SRC (bl->init_set);
4415
4416 if (loop_dump_stream)
4417 fprintf (loop_dump_stream,
4418 "Biv %d: initialized at insn %d: initial value ",
4419 bl->regno, INSN_UID (bl->init_insn));
4420
4421 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4422 || GET_MODE (src) == VOIDmode)
4423 && valid_initial_value_p (src, bl->init_insn,
4424 LOOP_INFO (loop)->pre_header_has_call,
4425 loop->start))
4426 {
4427 bl->initial_value = src;
4428
4429 if (loop_dump_stream)
4430 {
4431 print_simple_rtl (loop_dump_stream, src);
4432 fputc ('\n', loop_dump_stream);
4433 }
4434 }
4435 /* If we can't make it a giv,
4436 let biv keep initial value of "itself". */
4437 else if (loop_dump_stream)
4438 fprintf (loop_dump_stream, "is complex\n");
4439 }
4440 }
4441
4442
4443 /* Search the loop for general induction variables. */
4444
4445 static void
4446 loop_givs_find (loop)
4447 struct loop* loop;
4448 {
4449 for_each_insn_in_loop (loop, check_insn_for_givs);
4450 }
4451
4452
4453 /* For each giv for which we still don't know whether or not it is
4454 replaceable, check to see if it is replaceable because its final value
4455 can be calculated. */
4456
4457 static void
4458 loop_givs_check (loop)
4459 struct loop *loop;
4460 {
4461 struct loop_ivs *ivs = LOOP_IVS (loop);
4462 struct iv_class *bl;
4463
4464 for (bl = ivs->list; bl; bl = bl->next)
4465 {
4466 struct induction *v;
4467
4468 for (v = bl->giv; v; v = v->next_iv)
4469 if (! v->replaceable && ! v->not_replaceable)
4470 check_final_value (loop, v);
4471 }
4472 }
4473
4474
4475 /* Return non-zero if it is possible to eliminate the biv BL provided
4476 all givs are reduced. This is possible if either the reg is not
4477 used outside the loop, or we can compute what its final value will
4478 be. */
4479
4480 static int
4481 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4482 struct loop *loop;
4483 struct iv_class *bl;
4484 int threshold;
4485 int insn_count;
4486 {
4487 /* For architectures with a decrement_and_branch_until_zero insn,
4488 don't do this if we put a REG_NONNEG note on the endtest for this
4489 biv. */
4490
4491 #ifdef HAVE_decrement_and_branch_until_zero
4492 if (bl->nonneg)
4493 {
4494 if (loop_dump_stream)
4495 fprintf (loop_dump_stream,
4496 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4497 return 0;
4498 }
4499 #endif
4500
4501 /* Check that biv is used outside loop or if it has a final value.
4502 Compare against bl->init_insn rather than loop->start. We aren't
4503 concerned with any uses of the biv between init_insn and
4504 loop->start since these won't be affected by the value of the biv
4505 elsewhere in the function, so long as init_insn doesn't use the
4506 biv itself. */
4507
4508 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4509 && bl->init_insn
4510 && INSN_UID (bl->init_insn) < max_uid_for_loop
4511 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4512 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4513 || (bl->final_value = final_biv_value (loop, bl)))
4514 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4515
4516 if (loop_dump_stream)
4517 {
4518 fprintf (loop_dump_stream,
4519 "Cannot eliminate biv %d.\n",
4520 bl->regno);
4521 fprintf (loop_dump_stream,
4522 "First use: insn %d, last use: insn %d.\n",
4523 REGNO_FIRST_UID (bl->regno),
4524 REGNO_LAST_UID (bl->regno));
4525 }
4526 return 0;
4527 }
4528
4529
4530 /* Reduce each giv of BL that we have decided to reduce. */
4531
4532 static void
4533 loop_givs_reduce (loop, bl)
4534 struct loop *loop;
4535 struct iv_class *bl;
4536 {
4537 struct induction *v;
4538
4539 for (v = bl->giv; v; v = v->next_iv)
4540 {
4541 struct induction *tv;
4542 if (! v->ignore && v->same == 0)
4543 {
4544 int auto_inc_opt = 0;
4545
4546 /* If the code for derived givs immediately below has already
4547 allocated a new_reg, we must keep it. */
4548 if (! v->new_reg)
4549 v->new_reg = gen_reg_rtx (v->mode);
4550
4551 #ifdef AUTO_INC_DEC
4552 /* If the target has auto-increment addressing modes, and
4553 this is an address giv, then try to put the increment
4554 immediately after its use, so that flow can create an
4555 auto-increment addressing mode. */
4556 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4557 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4558 /* We don't handle reversed biv's because bl->biv->insn
4559 does not have a valid INSN_LUID. */
4560 && ! bl->reversed
4561 && v->always_executed && ! v->maybe_multiple
4562 && INSN_UID (v->insn) < max_uid_for_loop)
4563 {
4564 /* If other giv's have been combined with this one, then
4565 this will work only if all uses of the other giv's occur
4566 before this giv's insn. This is difficult to check.
4567
4568 We simplify this by looking for the common case where
4569 there is one DEST_REG giv, and this giv's insn is the
4570 last use of the dest_reg of that DEST_REG giv. If the
4571 increment occurs after the address giv, then we can
4572 perform the optimization. (Otherwise, the increment
4573 would have to go before other_giv, and we would not be
4574 able to combine it with the address giv to get an
4575 auto-inc address.) */
4576 if (v->combined_with)
4577 {
4578 struct induction *other_giv = 0;
4579
4580 for (tv = bl->giv; tv; tv = tv->next_iv)
4581 if (tv->same == v)
4582 {
4583 if (other_giv)
4584 break;
4585 else
4586 other_giv = tv;
4587 }
4588 if (! tv && other_giv
4589 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4590 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4591 == INSN_UID (v->insn))
4592 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4593 auto_inc_opt = 1;
4594 }
4595 /* Check for case where increment is before the address
4596 giv. Do this test in "loop order". */
4597 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4598 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4599 || (INSN_LUID (bl->biv->insn)
4600 > INSN_LUID (loop->scan_start))))
4601 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4602 && (INSN_LUID (loop->scan_start)
4603 < INSN_LUID (bl->biv->insn))))
4604 auto_inc_opt = -1;
4605 else
4606 auto_inc_opt = 1;
4607
4608 #ifdef HAVE_cc0
4609 {
4610 rtx prev;
4611
4612 /* We can't put an insn immediately after one setting
4613 cc0, or immediately before one using cc0. */
4614 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4615 || (auto_inc_opt == -1
4616 && (prev = prev_nonnote_insn (v->insn)) != 0
4617 && INSN_P (prev)
4618 && sets_cc0_p (PATTERN (prev))))
4619 auto_inc_opt = 0;
4620 }
4621 #endif
4622
4623 if (auto_inc_opt)
4624 v->auto_inc_opt = 1;
4625 }
4626 #endif
4627
4628 /* For each place where the biv is incremented, add an insn
4629 to increment the new, reduced reg for the giv. */
4630 for (tv = bl->biv; tv; tv = tv->next_iv)
4631 {
4632 rtx insert_before;
4633
4634 if (! auto_inc_opt)
4635 insert_before = tv->insn;
4636 else if (auto_inc_opt == 1)
4637 insert_before = NEXT_INSN (v->insn);
4638 else
4639 insert_before = v->insn;
4640
4641 if (tv->mult_val == const1_rtx)
4642 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4643 v->new_reg, v->new_reg,
4644 0, insert_before);
4645 else /* tv->mult_val == const0_rtx */
4646 /* A multiply is acceptable here
4647 since this is presumed to be seldom executed. */
4648 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4649 v->add_val, v->new_reg,
4650 0, insert_before);
4651 }
4652
4653 /* Add code at loop start to initialize giv's reduced reg. */
4654
4655 loop_iv_add_mult_hoist (loop,
4656 extend_value_for_giv (v, bl->initial_value),
4657 v->mult_val, v->add_val, v->new_reg);
4658 }
4659 }
4660 }
4661
4662
4663 /* Check for givs whose first use is their definition and whose
4664 last use is the definition of another giv. If so, it is likely
4665 dead and should not be used to derive another giv nor to
4666 eliminate a biv. */
4667
4668 static void
4669 loop_givs_dead_check (loop, bl)
4670 struct loop *loop ATTRIBUTE_UNUSED;
4671 struct iv_class *bl;
4672 {
4673 struct induction *v;
4674
4675 for (v = bl->giv; v; v = v->next_iv)
4676 {
4677 if (v->ignore
4678 || (v->same && v->same->ignore))
4679 continue;
4680
4681 if (v->giv_type == DEST_REG
4682 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4683 {
4684 struct induction *v1;
4685
4686 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4687 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4688 v->maybe_dead = 1;
4689 }
4690 }
4691 }
4692
4693
4694 static void
4695 loop_givs_rescan (loop, bl, reg_map)
4696 struct loop *loop;
4697 struct iv_class *bl;
4698 rtx *reg_map;
4699 {
4700 struct induction *v;
4701
4702 for (v = bl->giv; v; v = v->next_iv)
4703 {
4704 if (v->same && v->same->ignore)
4705 v->ignore = 1;
4706
4707 if (v->ignore)
4708 continue;
4709
4710 /* Update expression if this was combined, in case other giv was
4711 replaced. */
4712 if (v->same)
4713 v->new_reg = replace_rtx (v->new_reg,
4714 v->same->dest_reg, v->same->new_reg);
4715
4716 /* See if this register is known to be a pointer to something. If
4717 so, see if we can find the alignment. First see if there is a
4718 destination register that is a pointer. If so, this shares the
4719 alignment too. Next see if we can deduce anything from the
4720 computational information. If not, and this is a DEST_ADDR
4721 giv, at least we know that it's a pointer, though we don't know
4722 the alignment. */
4723 if (GET_CODE (v->new_reg) == REG
4724 && v->giv_type == DEST_REG
4725 && REG_POINTER (v->dest_reg))
4726 mark_reg_pointer (v->new_reg,
4727 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4728 else if (GET_CODE (v->new_reg) == REG
4729 && REG_POINTER (v->src_reg))
4730 {
4731 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4732
4733 if (align == 0
4734 || GET_CODE (v->add_val) != CONST_INT
4735 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4736 align = 0;
4737
4738 mark_reg_pointer (v->new_reg, align);
4739 }
4740 else if (GET_CODE (v->new_reg) == REG
4741 && GET_CODE (v->add_val) == REG
4742 && REG_POINTER (v->add_val))
4743 {
4744 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4745
4746 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4747 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4748 align = 0;
4749
4750 mark_reg_pointer (v->new_reg, align);
4751 }
4752 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4753 mark_reg_pointer (v->new_reg, 0);
4754
4755 if (v->giv_type == DEST_ADDR)
4756 /* Store reduced reg as the address in the memref where we found
4757 this giv. */
4758 validate_change (v->insn, v->location, v->new_reg, 0);
4759 else if (v->replaceable)
4760 {
4761 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4762 }
4763 else
4764 {
4765 /* Not replaceable; emit an insn to set the original giv reg from
4766 the reduced giv, same as above. */
4767 loop_insn_emit_after (loop, 0, v->insn,
4768 gen_move_insn (v->dest_reg, v->new_reg));
4769 }
4770
4771 /* When a loop is reversed, givs which depend on the reversed
4772 biv, and which are live outside the loop, must be set to their
4773 correct final value. This insn is only needed if the giv is
4774 not replaceable. The correct final value is the same as the
4775 value that the giv starts the reversed loop with. */
4776 if (bl->reversed && ! v->replaceable)
4777 loop_iv_add_mult_sink (loop,
4778 extend_value_for_giv (v, bl->initial_value),
4779 v->mult_val, v->add_val, v->dest_reg);
4780 else if (v->final_value)
4781 loop_insn_sink_or_swim (loop,
4782 gen_move_insn (v->dest_reg, v->final_value));
4783
4784 if (loop_dump_stream)
4785 {
4786 fprintf (loop_dump_stream, "giv at %d reduced to ",
4787 INSN_UID (v->insn));
4788 print_simple_rtl (loop_dump_stream, v->new_reg);
4789 fprintf (loop_dump_stream, "\n");
4790 }
4791 }
4792 }
4793
4794
4795 static int
4796 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4797 struct loop *loop ATTRIBUTE_UNUSED;
4798 struct iv_class *bl;
4799 struct induction *v;
4800 rtx test_reg;
4801 {
4802 int add_cost;
4803 int benefit;
4804
4805 benefit = v->benefit;
4806 PUT_MODE (test_reg, v->mode);
4807 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4808 test_reg, test_reg);
4809
4810 /* Reduce benefit if not replaceable, since we will insert a
4811 move-insn to replace the insn that calculates this giv. Don't do
4812 this unless the giv is a user variable, since it will often be
4813 marked non-replaceable because of the duplication of the exit
4814 code outside the loop. In such a case, the copies we insert are
4815 dead and will be deleted. So they don't have a cost. Similar
4816 situations exist. */
4817 /* ??? The new final_[bg]iv_value code does a much better job of
4818 finding replaceable giv's, and hence this code may no longer be
4819 necessary. */
4820 if (! v->replaceable && ! bl->eliminable
4821 && REG_USERVAR_P (v->dest_reg))
4822 benefit -= copy_cost;
4823
4824 /* Decrease the benefit to count the add-insns that we will insert
4825 to increment the reduced reg for the giv. ??? This can
4826 overestimate the run-time cost of the additional insns, e.g. if
4827 there are multiple basic blocks that increment the biv, but only
4828 one of these blocks is executed during each iteration. There is
4829 no good way to detect cases like this with the current structure
4830 of the loop optimizer. This code is more accurate for
4831 determining code size than run-time benefits. */
4832 benefit -= add_cost * bl->biv_count;
4833
4834 /* Decide whether to strength-reduce this giv or to leave the code
4835 unchanged (recompute it from the biv each time it is used). This
4836 decision can be made independently for each giv. */
4837
4838 #ifdef AUTO_INC_DEC
4839 /* Attempt to guess whether autoincrement will handle some of the
4840 new add insns; if so, increase BENEFIT (undo the subtraction of
4841 add_cost that was done above). */
4842 if (v->giv_type == DEST_ADDR
4843 /* Increasing the benefit is risky, since this is only a guess.
4844 Avoid increasing register pressure in cases where there would
4845 be no other benefit from reducing this giv. */
4846 && benefit > 0
4847 && GET_CODE (v->mult_val) == CONST_INT)
4848 {
4849 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4850
4851 if (HAVE_POST_INCREMENT
4852 && INTVAL (v->mult_val) == size)
4853 benefit += add_cost * bl->biv_count;
4854 else if (HAVE_PRE_INCREMENT
4855 && INTVAL (v->mult_val) == size)
4856 benefit += add_cost * bl->biv_count;
4857 else if (HAVE_POST_DECREMENT
4858 && -INTVAL (v->mult_val) == size)
4859 benefit += add_cost * bl->biv_count;
4860 else if (HAVE_PRE_DECREMENT
4861 && -INTVAL (v->mult_val) == size)
4862 benefit += add_cost * bl->biv_count;
4863 }
4864 #endif
4865
4866 return benefit;
4867 }
4868
4869
4870 /* Free IV structures for LOOP. */
4871
4872 static void
4873 loop_ivs_free (loop)
4874 struct loop *loop;
4875 {
4876 struct loop_ivs *ivs = LOOP_IVS (loop);
4877 struct iv_class *iv = ivs->list;
4878
4879 free (ivs->regs);
4880
4881 while (iv)
4882 {
4883 struct iv_class *next = iv->next;
4884 struct induction *induction;
4885 struct induction *next_induction;
4886
4887 for (induction = iv->biv; induction; induction = next_induction)
4888 {
4889 next_induction = induction->next_iv;
4890 free (induction);
4891 }
4892 for (induction = iv->giv; induction; induction = next_induction)
4893 {
4894 next_induction = induction->next_iv;
4895 free (induction);
4896 }
4897
4898 free (iv);
4899 iv = next;
4900 }
4901 }
4902
4903
4904 /* Perform strength reduction and induction variable elimination.
4905
4906 Pseudo registers created during this function will be beyond the
4907 last valid index in several tables including
4908 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4909 problem here, because the added registers cannot be givs outside of
4910 their loop, and hence will never be reconsidered. But scan_loop
4911 must check regnos to make sure they are in bounds. */
4912
4913 static void
4914 strength_reduce (loop, flags)
4915 struct loop *loop;
4916 int flags;
4917 {
4918 struct loop_info *loop_info = LOOP_INFO (loop);
4919 struct loop_regs *regs = LOOP_REGS (loop);
4920 struct loop_ivs *ivs = LOOP_IVS (loop);
4921 rtx p;
4922 /* Temporary list pointer for traversing ivs->list. */
4923 struct iv_class *bl;
4924 /* Ratio of extra register life span we can justify
4925 for saving an instruction. More if loop doesn't call subroutines
4926 since in that case saving an insn makes more difference
4927 and more registers are available. */
4928 /* ??? could set this to last value of threshold in move_movables */
4929 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4930 /* Map of pseudo-register replacements. */
4931 rtx *reg_map = NULL;
4932 int reg_map_size;
4933 int unrolled_insn_copies = 0;
4934 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4935 int insn_count = count_insns_in_loop (loop);
4936
4937 addr_placeholder = gen_reg_rtx (Pmode);
4938
4939 ivs->n_regs = max_reg_before_loop;
4940 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4941
4942 /* Find all BIVs in loop. */
4943 loop_bivs_find (loop);
4944
4945 /* Exit if there are no bivs. */
4946 if (! ivs->list)
4947 {
4948 /* Can still unroll the loop anyways, but indicate that there is no
4949 strength reduction info available. */
4950 if (flags & LOOP_UNROLL)
4951 unroll_loop (loop, insn_count, 0);
4952
4953 loop_ivs_free (loop);
4954 return;
4955 }
4956
4957 /* Determine how BIVS are initialised by looking through pre-header
4958 extended basic block. */
4959 loop_bivs_init_find (loop);
4960
4961 /* Look at the each biv and see if we can say anything better about its
4962 initial value from any initializing insns set up above. */
4963 loop_bivs_check (loop);
4964
4965 /* Search the loop for general induction variables. */
4966 loop_givs_find (loop);
4967
4968 /* Try to calculate and save the number of loop iterations. This is
4969 set to zero if the actual number can not be calculated. This must
4970 be called after all giv's have been identified, since otherwise it may
4971 fail if the iteration variable is a giv. */
4972 loop_iterations (loop);
4973
4974 #ifdef HAVE_prefetch
4975 if (flags & LOOP_PREFETCH)
4976 emit_prefetch_instructions (loop);
4977 #endif
4978
4979 /* Now for each giv for which we still don't know whether or not it is
4980 replaceable, check to see if it is replaceable because its final value
4981 can be calculated. This must be done after loop_iterations is called,
4982 so that final_giv_value will work correctly. */
4983 loop_givs_check (loop);
4984
4985 /* Try to prove that the loop counter variable (if any) is always
4986 nonnegative; if so, record that fact with a REG_NONNEG note
4987 so that "decrement and branch until zero" insn can be used. */
4988 check_dbra_loop (loop, insn_count);
4989
4990 /* Create reg_map to hold substitutions for replaceable giv regs.
4991 Some givs might have been made from biv increments, so look at
4992 ivs->reg_iv_type for a suitable size. */
4993 reg_map_size = ivs->n_regs;
4994 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4995
4996 /* Examine each iv class for feasibility of strength reduction/induction
4997 variable elimination. */
4998
4999 for (bl = ivs->list; bl; bl = bl->next)
5000 {
5001 struct induction *v;
5002 int benefit;
5003
5004 /* Test whether it will be possible to eliminate this biv
5005 provided all givs are reduced. */
5006 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5007
5008 /* This will be true at the end, if all givs which depend on this
5009 biv have been strength reduced.
5010 We can't (currently) eliminate the biv unless this is so. */
5011 bl->all_reduced = 1;
5012
5013 /* Check each extension dependent giv in this class to see if its
5014 root biv is safe from wrapping in the interior mode. */
5015 check_ext_dependent_givs (bl, loop_info);
5016
5017 /* Combine all giv's for this iv_class. */
5018 combine_givs (regs, bl);
5019
5020 for (v = bl->giv; v; v = v->next_iv)
5021 {
5022 struct induction *tv;
5023
5024 if (v->ignore || v->same)
5025 continue;
5026
5027 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5028
5029 /* If an insn is not to be strength reduced, then set its ignore
5030 flag, and clear bl->all_reduced. */
5031
5032 /* A giv that depends on a reversed biv must be reduced if it is
5033 used after the loop exit, otherwise, it would have the wrong
5034 value after the loop exit. To make it simple, just reduce all
5035 of such giv's whether or not we know they are used after the loop
5036 exit. */
5037
5038 if (! flag_reduce_all_givs
5039 && v->lifetime * threshold * benefit < insn_count
5040 && ! bl->reversed)
5041 {
5042 if (loop_dump_stream)
5043 fprintf (loop_dump_stream,
5044 "giv of insn %d not worth while, %d vs %d.\n",
5045 INSN_UID (v->insn),
5046 v->lifetime * threshold * benefit, insn_count);
5047 v->ignore = 1;
5048 bl->all_reduced = 0;
5049 }
5050 else
5051 {
5052 /* Check that we can increment the reduced giv without a
5053 multiply insn. If not, reject it. */
5054
5055 for (tv = bl->biv; tv; tv = tv->next_iv)
5056 if (tv->mult_val == const1_rtx
5057 && ! product_cheap_p (tv->add_val, v->mult_val))
5058 {
5059 if (loop_dump_stream)
5060 fprintf (loop_dump_stream,
5061 "giv of insn %d: would need a multiply.\n",
5062 INSN_UID (v->insn));
5063 v->ignore = 1;
5064 bl->all_reduced = 0;
5065 break;
5066 }
5067 }
5068 }
5069
5070 /* Check for givs whose first use is their definition and whose
5071 last use is the definition of another giv. If so, it is likely
5072 dead and should not be used to derive another giv nor to
5073 eliminate a biv. */
5074 loop_givs_dead_check (loop, bl);
5075
5076 /* Reduce each giv that we decided to reduce. */
5077 loop_givs_reduce (loop, bl);
5078
5079 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5080 as not reduced.
5081
5082 For each giv register that can be reduced now: if replaceable,
5083 substitute reduced reg wherever the old giv occurs;
5084 else add new move insn "giv_reg = reduced_reg". */
5085 loop_givs_rescan (loop, bl, reg_map);
5086
5087 /* All the givs based on the biv bl have been reduced if they
5088 merit it. */
5089
5090 /* For each giv not marked as maybe dead that has been combined with a
5091 second giv, clear any "maybe dead" mark on that second giv.
5092 v->new_reg will either be or refer to the register of the giv it
5093 combined with.
5094
5095 Doing this clearing avoids problems in biv elimination where
5096 a giv's new_reg is a complex value that can't be put in the
5097 insn but the giv combined with (with a reg as new_reg) is
5098 marked maybe_dead. Since the register will be used in either
5099 case, we'd prefer it be used from the simpler giv. */
5100
5101 for (v = bl->giv; v; v = v->next_iv)
5102 if (! v->maybe_dead && v->same)
5103 v->same->maybe_dead = 0;
5104
5105 /* Try to eliminate the biv, if it is a candidate.
5106 This won't work if ! bl->all_reduced,
5107 since the givs we planned to use might not have been reduced.
5108
5109 We have to be careful that we didn't initially think we could
5110 eliminate this biv because of a giv that we now think may be
5111 dead and shouldn't be used as a biv replacement.
5112
5113 Also, there is the possibility that we may have a giv that looks
5114 like it can be used to eliminate a biv, but the resulting insn
5115 isn't valid. This can happen, for example, on the 88k, where a
5116 JUMP_INSN can compare a register only with zero. Attempts to
5117 replace it with a compare with a constant will fail.
5118
5119 Note that in cases where this call fails, we may have replaced some
5120 of the occurrences of the biv with a giv, but no harm was done in
5121 doing so in the rare cases where it can occur. */
5122
5123 if (bl->all_reduced == 1 && bl->eliminable
5124 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5125 {
5126 /* ?? If we created a new test to bypass the loop entirely,
5127 or otherwise drop straight in, based on this test, then
5128 we might want to rewrite it also. This way some later
5129 pass has more hope of removing the initialization of this
5130 biv entirely. */
5131
5132 /* If final_value != 0, then the biv may be used after loop end
5133 and we must emit an insn to set it just in case.
5134
5135 Reversed bivs already have an insn after the loop setting their
5136 value, so we don't need another one. We can't calculate the
5137 proper final value for such a biv here anyways. */
5138 if (bl->final_value && ! bl->reversed)
5139 loop_insn_sink_or_swim (loop, gen_move_insn
5140 (bl->biv->dest_reg, bl->final_value));
5141
5142 if (loop_dump_stream)
5143 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5144 bl->regno);
5145 }
5146 }
5147
5148 /* Go through all the instructions in the loop, making all the
5149 register substitutions scheduled in REG_MAP. */
5150
5151 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5152 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5153 || GET_CODE (p) == CALL_INSN)
5154 {
5155 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5156 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5157 INSN_CODE (p) = -1;
5158 }
5159
5160 if (loop_info->n_iterations > 0)
5161 {
5162 /* When we completely unroll a loop we will likely not need the increment
5163 of the loop BIV and we will not need the conditional branch at the
5164 end of the loop. */
5165 unrolled_insn_copies = insn_count - 2;
5166
5167 #ifdef HAVE_cc0
5168 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5169 need the comparison before the conditional branch at the end of the
5170 loop. */
5171 unrolled_insn_copies -= 1;
5172 #endif
5173
5174 /* We'll need one copy for each loop iteration. */
5175 unrolled_insn_copies *= loop_info->n_iterations;
5176
5177 /* A little slop to account for the ability to remove initialization
5178 code, better CSE, and other secondary benefits of completely
5179 unrolling some loops. */
5180 unrolled_insn_copies -= 1;
5181
5182 /* Clamp the value. */
5183 if (unrolled_insn_copies < 0)
5184 unrolled_insn_copies = 0;
5185 }
5186
5187 /* Unroll loops from within strength reduction so that we can use the
5188 induction variable information that strength_reduce has already
5189 collected. Always unroll loops that would be as small or smaller
5190 unrolled than when rolled. */
5191 if ((flags & LOOP_UNROLL)
5192 || (loop_info->n_iterations > 0
5193 && unrolled_insn_copies <= insn_count))
5194 unroll_loop (loop, insn_count, 1);
5195
5196 #ifdef HAVE_doloop_end
5197 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5198 doloop_optimize (loop);
5199 #endif /* HAVE_doloop_end */
5200
5201 /* In case number of iterations is known, drop branch prediction note
5202 in the branch. Do that only in second loop pass, as loop unrolling
5203 may change the number of iterations performed. */
5204 if ((flags & LOOP_BCT)
5205 && loop_info->n_iterations / loop_info->unroll_number > 1)
5206 {
5207 int n = loop_info->n_iterations / loop_info->unroll_number;
5208 predict_insn (PREV_INSN (loop->end),
5209 PRED_LOOP_ITERATIONS,
5210 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5211 }
5212
5213 if (loop_dump_stream)
5214 fprintf (loop_dump_stream, "\n");
5215
5216 loop_ivs_free (loop);
5217 if (reg_map)
5218 free (reg_map);
5219 }
5220 \f
5221 /*Record all basic induction variables calculated in the insn. */
5222 static rtx
5223 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5224 struct loop *loop;
5225 rtx p;
5226 int not_every_iteration;
5227 int maybe_multiple;
5228 {
5229 struct loop_ivs *ivs = LOOP_IVS (loop);
5230 rtx set;
5231 rtx dest_reg;
5232 rtx inc_val;
5233 rtx mult_val;
5234 rtx *location;
5235
5236 if (GET_CODE (p) == INSN
5237 && (set = single_set (p))
5238 && GET_CODE (SET_DEST (set)) == REG)
5239 {
5240 dest_reg = SET_DEST (set);
5241 if (REGNO (dest_reg) < max_reg_before_loop
5242 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5243 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5244 {
5245 if (basic_induction_var (loop, SET_SRC (set),
5246 GET_MODE (SET_SRC (set)),
5247 dest_reg, p, &inc_val, &mult_val,
5248 &location))
5249 {
5250 /* It is a possible basic induction variable.
5251 Create and initialize an induction structure for it. */
5252
5253 struct induction *v
5254 = (struct induction *) xmalloc (sizeof (struct induction));
5255
5256 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5257 not_every_iteration, maybe_multiple);
5258 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5259 }
5260 else if (REGNO (dest_reg) < ivs->n_regs)
5261 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5262 }
5263 }
5264 return p;
5265 }
5266 \f
5267 /* Record all givs calculated in the insn.
5268 A register is a giv if: it is only set once, it is a function of a
5269 biv and a constant (or invariant), and it is not a biv. */
5270 static rtx
5271 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5272 struct loop *loop;
5273 rtx p;
5274 int not_every_iteration;
5275 int maybe_multiple;
5276 {
5277 struct loop_regs *regs = LOOP_REGS (loop);
5278
5279 rtx set;
5280 /* Look for a general induction variable in a register. */
5281 if (GET_CODE (p) == INSN
5282 && (set = single_set (p))
5283 && GET_CODE (SET_DEST (set)) == REG
5284 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5285 {
5286 rtx src_reg;
5287 rtx dest_reg;
5288 rtx add_val;
5289 rtx mult_val;
5290 rtx ext_val;
5291 int benefit;
5292 rtx regnote = 0;
5293 rtx last_consec_insn;
5294
5295 dest_reg = SET_DEST (set);
5296 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5297 return p;
5298
5299 if (/* SET_SRC is a giv. */
5300 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5301 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5302 /* Equivalent expression is a giv. */
5303 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5304 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5305 &add_val, &mult_val, &ext_val, 0,
5306 &benefit, VOIDmode)))
5307 /* Don't try to handle any regs made by loop optimization.
5308 We have nothing on them in regno_first_uid, etc. */
5309 && REGNO (dest_reg) < max_reg_before_loop
5310 /* Don't recognize a BASIC_INDUCT_VAR here. */
5311 && dest_reg != src_reg
5312 /* This must be the only place where the register is set. */
5313 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5314 /* or all sets must be consecutive and make a giv. */
5315 || (benefit = consec_sets_giv (loop, benefit, p,
5316 src_reg, dest_reg,
5317 &add_val, &mult_val, &ext_val,
5318 &last_consec_insn))))
5319 {
5320 struct induction *v
5321 = (struct induction *) xmalloc (sizeof (struct induction));
5322
5323 /* If this is a library call, increase benefit. */
5324 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5325 benefit += libcall_benefit (p);
5326
5327 /* Skip the consecutive insns, if there are any. */
5328 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5329 p = last_consec_insn;
5330
5331 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5332 ext_val, benefit, DEST_REG, not_every_iteration,
5333 maybe_multiple, (rtx*) 0);
5334
5335 }
5336 }
5337
5338 #ifndef DONT_REDUCE_ADDR
5339 /* Look for givs which are memory addresses. */
5340 /* This resulted in worse code on a VAX 8600. I wonder if it
5341 still does. */
5342 if (GET_CODE (p) == INSN)
5343 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5344 maybe_multiple);
5345 #endif
5346
5347 /* Update the status of whether giv can derive other givs. This can
5348 change when we pass a label or an insn that updates a biv. */
5349 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5350 || GET_CODE (p) == CODE_LABEL)
5351 update_giv_derive (loop, p);
5352 return p;
5353 }
5354 \f
5355 /* Return 1 if X is a valid source for an initial value (or as value being
5356 compared against in an initial test).
5357
5358 X must be either a register or constant and must not be clobbered between
5359 the current insn and the start of the loop.
5360
5361 INSN is the insn containing X. */
5362
5363 static int
5364 valid_initial_value_p (x, insn, call_seen, loop_start)
5365 rtx x;
5366 rtx insn;
5367 int call_seen;
5368 rtx loop_start;
5369 {
5370 if (CONSTANT_P (x))
5371 return 1;
5372
5373 /* Only consider pseudos we know about initialized in insns whose luids
5374 we know. */
5375 if (GET_CODE (x) != REG
5376 || REGNO (x) >= max_reg_before_loop)
5377 return 0;
5378
5379 /* Don't use call-clobbered registers across a call which clobbers it. On
5380 some machines, don't use any hard registers at all. */
5381 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5382 && (SMALL_REGISTER_CLASSES
5383 || (call_used_regs[REGNO (x)] && call_seen)))
5384 return 0;
5385
5386 /* Don't use registers that have been clobbered before the start of the
5387 loop. */
5388 if (reg_set_between_p (x, insn, loop_start))
5389 return 0;
5390
5391 return 1;
5392 }
5393 \f
5394 /* Scan X for memory refs and check each memory address
5395 as a possible giv. INSN is the insn whose pattern X comes from.
5396 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5397 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5398 more thanonce in each loop iteration. */
5399
5400 static void
5401 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5402 const struct loop *loop;
5403 rtx x;
5404 rtx insn;
5405 int not_every_iteration, maybe_multiple;
5406 {
5407 int i, j;
5408 enum rtx_code code;
5409 const char *fmt;
5410
5411 if (x == 0)
5412 return;
5413
5414 code = GET_CODE (x);
5415 switch (code)
5416 {
5417 case REG:
5418 case CONST_INT:
5419 case CONST:
5420 case CONST_DOUBLE:
5421 case SYMBOL_REF:
5422 case LABEL_REF:
5423 case PC:
5424 case CC0:
5425 case ADDR_VEC:
5426 case ADDR_DIFF_VEC:
5427 case USE:
5428 case CLOBBER:
5429 return;
5430
5431 case MEM:
5432 {
5433 rtx src_reg;
5434 rtx add_val;
5435 rtx mult_val;
5436 rtx ext_val;
5437 int benefit;
5438
5439 /* This code used to disable creating GIVs with mult_val == 1 and
5440 add_val == 0. However, this leads to lost optimizations when
5441 it comes time to combine a set of related DEST_ADDR GIVs, since
5442 this one would not be seen. */
5443
5444 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5445 &mult_val, &ext_val, 1, &benefit,
5446 GET_MODE (x)))
5447 {
5448 /* Found one; record it. */
5449 struct induction *v
5450 = (struct induction *) xmalloc (sizeof (struct induction));
5451
5452 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5453 add_val, ext_val, benefit, DEST_ADDR,
5454 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5455
5456 v->mem = x;
5457 }
5458 }
5459 return;
5460
5461 default:
5462 break;
5463 }
5464
5465 /* Recursively scan the subexpressions for other mem refs. */
5466
5467 fmt = GET_RTX_FORMAT (code);
5468 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5469 if (fmt[i] == 'e')
5470 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5471 maybe_multiple);
5472 else if (fmt[i] == 'E')
5473 for (j = 0; j < XVECLEN (x, i); j++)
5474 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5475 maybe_multiple);
5476 }
5477 \f
5478 /* Fill in the data about one biv update.
5479 V is the `struct induction' in which we record the biv. (It is
5480 allocated by the caller, with alloca.)
5481 INSN is the insn that sets it.
5482 DEST_REG is the biv's reg.
5483
5484 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5485 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5486 being set to INC_VAL.
5487
5488 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5489 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5490 can be executed more than once per iteration. If MAYBE_MULTIPLE
5491 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5492 executed exactly once per iteration. */
5493
5494 static void
5495 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5496 not_every_iteration, maybe_multiple)
5497 struct loop *loop;
5498 struct induction *v;
5499 rtx insn;
5500 rtx dest_reg;
5501 rtx inc_val;
5502 rtx mult_val;
5503 rtx *location;
5504 int not_every_iteration;
5505 int maybe_multiple;
5506 {
5507 struct loop_ivs *ivs = LOOP_IVS (loop);
5508 struct iv_class *bl;
5509
5510 v->insn = insn;
5511 v->src_reg = dest_reg;
5512 v->dest_reg = dest_reg;
5513 v->mult_val = mult_val;
5514 v->add_val = inc_val;
5515 v->ext_dependent = NULL_RTX;
5516 v->location = location;
5517 v->mode = GET_MODE (dest_reg);
5518 v->always_computable = ! not_every_iteration;
5519 v->always_executed = ! not_every_iteration;
5520 v->maybe_multiple = maybe_multiple;
5521
5522 /* Add this to the reg's iv_class, creating a class
5523 if this is the first incrementation of the reg. */
5524
5525 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5526 if (bl == 0)
5527 {
5528 /* Create and initialize new iv_class. */
5529
5530 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5531
5532 bl->regno = REGNO (dest_reg);
5533 bl->biv = 0;
5534 bl->giv = 0;
5535 bl->biv_count = 0;
5536 bl->giv_count = 0;
5537
5538 /* Set initial value to the reg itself. */
5539 bl->initial_value = dest_reg;
5540 bl->final_value = 0;
5541 /* We haven't seen the initializing insn yet */
5542 bl->init_insn = 0;
5543 bl->init_set = 0;
5544 bl->initial_test = 0;
5545 bl->incremented = 0;
5546 bl->eliminable = 0;
5547 bl->nonneg = 0;
5548 bl->reversed = 0;
5549 bl->total_benefit = 0;
5550
5551 /* Add this class to ivs->list. */
5552 bl->next = ivs->list;
5553 ivs->list = bl;
5554
5555 /* Put it in the array of biv register classes. */
5556 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5557 }
5558
5559 /* Update IV_CLASS entry for this biv. */
5560 v->next_iv = bl->biv;
5561 bl->biv = v;
5562 bl->biv_count++;
5563 if (mult_val == const1_rtx)
5564 bl->incremented = 1;
5565
5566 if (loop_dump_stream)
5567 loop_biv_dump (v, loop_dump_stream, 0);
5568 }
5569 \f
5570 /* Fill in the data about one giv.
5571 V is the `struct induction' in which we record the giv. (It is
5572 allocated by the caller, with alloca.)
5573 INSN is the insn that sets it.
5574 BENEFIT estimates the savings from deleting this insn.
5575 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5576 into a register or is used as a memory address.
5577
5578 SRC_REG is the biv reg which the giv is computed from.
5579 DEST_REG is the giv's reg (if the giv is stored in a reg).
5580 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5581 LOCATION points to the place where this giv's value appears in INSN. */
5582
5583 static void
5584 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5585 benefit, type, not_every_iteration, maybe_multiple, location)
5586 const struct loop *loop;
5587 struct induction *v;
5588 rtx insn;
5589 rtx src_reg;
5590 rtx dest_reg;
5591 rtx mult_val, add_val, ext_val;
5592 int benefit;
5593 enum g_types type;
5594 int not_every_iteration, maybe_multiple;
5595 rtx *location;
5596 {
5597 struct loop_ivs *ivs = LOOP_IVS (loop);
5598 struct induction *b;
5599 struct iv_class *bl;
5600 rtx set = single_set (insn);
5601 rtx temp;
5602
5603 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5604 undo the MULT canonicalization that we performed earlier. */
5605 temp = simplify_rtx (add_val);
5606 if (temp
5607 && ! (GET_CODE (add_val) == MULT
5608 && GET_CODE (temp) == ASHIFT))
5609 add_val = temp;
5610
5611 v->insn = insn;
5612 v->src_reg = src_reg;
5613 v->giv_type = type;
5614 v->dest_reg = dest_reg;
5615 v->mult_val = mult_val;
5616 v->add_val = add_val;
5617 v->ext_dependent = ext_val;
5618 v->benefit = benefit;
5619 v->location = location;
5620 v->cant_derive = 0;
5621 v->combined_with = 0;
5622 v->maybe_multiple = maybe_multiple;
5623 v->maybe_dead = 0;
5624 v->derive_adjustment = 0;
5625 v->same = 0;
5626 v->ignore = 0;
5627 v->new_reg = 0;
5628 v->final_value = 0;
5629 v->same_insn = 0;
5630 v->auto_inc_opt = 0;
5631 v->unrolled = 0;
5632 v->shared = 0;
5633
5634 /* The v->always_computable field is used in update_giv_derive, to
5635 determine whether a giv can be used to derive another giv. For a
5636 DEST_REG giv, INSN computes a new value for the giv, so its value
5637 isn't computable if INSN insn't executed every iteration.
5638 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5639 it does not compute a new value. Hence the value is always computable
5640 regardless of whether INSN is executed each iteration. */
5641
5642 if (type == DEST_ADDR)
5643 v->always_computable = 1;
5644 else
5645 v->always_computable = ! not_every_iteration;
5646
5647 v->always_executed = ! not_every_iteration;
5648
5649 if (type == DEST_ADDR)
5650 {
5651 v->mode = GET_MODE (*location);
5652 v->lifetime = 1;
5653 }
5654 else /* type == DEST_REG */
5655 {
5656 v->mode = GET_MODE (SET_DEST (set));
5657
5658 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5659
5660 /* If the lifetime is zero, it means that this register is
5661 really a dead store. So mark this as a giv that can be
5662 ignored. This will not prevent the biv from being eliminated. */
5663 if (v->lifetime == 0)
5664 v->ignore = 1;
5665
5666 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5667 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5668 }
5669
5670 /* Add the giv to the class of givs computed from one biv. */
5671
5672 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5673 if (bl)
5674 {
5675 v->next_iv = bl->giv;
5676 bl->giv = v;
5677 /* Don't count DEST_ADDR. This is supposed to count the number of
5678 insns that calculate givs. */
5679 if (type == DEST_REG)
5680 bl->giv_count++;
5681 bl->total_benefit += benefit;
5682 }
5683 else
5684 /* Fatal error, biv missing for this giv? */
5685 abort ();
5686
5687 if (type == DEST_ADDR)
5688 v->replaceable = 1;
5689 else
5690 {
5691 /* The giv can be replaced outright by the reduced register only if all
5692 of the following conditions are true:
5693 - the insn that sets the giv is always executed on any iteration
5694 on which the giv is used at all
5695 (there are two ways to deduce this:
5696 either the insn is executed on every iteration,
5697 or all uses follow that insn in the same basic block),
5698 - the giv is not used outside the loop
5699 - no assignments to the biv occur during the giv's lifetime. */
5700
5701 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5702 /* Previous line always fails if INSN was moved by loop opt. */
5703 && REGNO_LAST_LUID (REGNO (dest_reg))
5704 < INSN_LUID (loop->end)
5705 && (! not_every_iteration
5706 || last_use_this_basic_block (dest_reg, insn)))
5707 {
5708 /* Now check that there are no assignments to the biv within the
5709 giv's lifetime. This requires two separate checks. */
5710
5711 /* Check each biv update, and fail if any are between the first
5712 and last use of the giv.
5713
5714 If this loop contains an inner loop that was unrolled, then
5715 the insn modifying the biv may have been emitted by the loop
5716 unrolling code, and hence does not have a valid luid. Just
5717 mark the biv as not replaceable in this case. It is not very
5718 useful as a biv, because it is used in two different loops.
5719 It is very unlikely that we would be able to optimize the giv
5720 using this biv anyways. */
5721
5722 v->replaceable = 1;
5723 for (b = bl->biv; b; b = b->next_iv)
5724 {
5725 if (INSN_UID (b->insn) >= max_uid_for_loop
5726 || ((INSN_LUID (b->insn)
5727 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5728 && (INSN_LUID (b->insn)
5729 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5730 {
5731 v->replaceable = 0;
5732 v->not_replaceable = 1;
5733 break;
5734 }
5735 }
5736
5737 /* If there are any backwards branches that go from after the
5738 biv update to before it, then this giv is not replaceable. */
5739 if (v->replaceable)
5740 for (b = bl->biv; b; b = b->next_iv)
5741 if (back_branch_in_range_p (loop, b->insn))
5742 {
5743 v->replaceable = 0;
5744 v->not_replaceable = 1;
5745 break;
5746 }
5747 }
5748 else
5749 {
5750 /* May still be replaceable, we don't have enough info here to
5751 decide. */
5752 v->replaceable = 0;
5753 v->not_replaceable = 0;
5754 }
5755 }
5756
5757 /* Record whether the add_val contains a const_int, for later use by
5758 combine_givs. */
5759 {
5760 rtx tem = add_val;
5761
5762 v->no_const_addval = 1;
5763 if (tem == const0_rtx)
5764 ;
5765 else if (CONSTANT_P (add_val))
5766 v->no_const_addval = 0;
5767 if (GET_CODE (tem) == PLUS)
5768 {
5769 while (1)
5770 {
5771 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5772 tem = XEXP (tem, 0);
5773 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5774 tem = XEXP (tem, 1);
5775 else
5776 break;
5777 }
5778 if (CONSTANT_P (XEXP (tem, 1)))
5779 v->no_const_addval = 0;
5780 }
5781 }
5782
5783 if (loop_dump_stream)
5784 loop_giv_dump (v, loop_dump_stream, 0);
5785 }
5786
5787 /* All this does is determine whether a giv can be made replaceable because
5788 its final value can be calculated. This code can not be part of record_giv
5789 above, because final_giv_value requires that the number of loop iterations
5790 be known, and that can not be accurately calculated until after all givs
5791 have been identified. */
5792
5793 static void
5794 check_final_value (loop, v)
5795 const struct loop *loop;
5796 struct induction *v;
5797 {
5798 struct loop_ivs *ivs = LOOP_IVS (loop);
5799 struct iv_class *bl;
5800 rtx final_value = 0;
5801
5802 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5803
5804 /* DEST_ADDR givs will never reach here, because they are always marked
5805 replaceable above in record_giv. */
5806
5807 /* The giv can be replaced outright by the reduced register only if all
5808 of the following conditions are true:
5809 - the insn that sets the giv is always executed on any iteration
5810 on which the giv is used at all
5811 (there are two ways to deduce this:
5812 either the insn is executed on every iteration,
5813 or all uses follow that insn in the same basic block),
5814 - its final value can be calculated (this condition is different
5815 than the one above in record_giv)
5816 - it's not used before the it's set
5817 - no assignments to the biv occur during the giv's lifetime. */
5818
5819 #if 0
5820 /* This is only called now when replaceable is known to be false. */
5821 /* Clear replaceable, so that it won't confuse final_giv_value. */
5822 v->replaceable = 0;
5823 #endif
5824
5825 if ((final_value = final_giv_value (loop, v))
5826 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5827 {
5828 int biv_increment_seen = 0, before_giv_insn = 0;
5829 rtx p = v->insn;
5830 rtx last_giv_use;
5831
5832 v->replaceable = 1;
5833
5834 /* When trying to determine whether or not a biv increment occurs
5835 during the lifetime of the giv, we can ignore uses of the variable
5836 outside the loop because final_value is true. Hence we can not
5837 use regno_last_uid and regno_first_uid as above in record_giv. */
5838
5839 /* Search the loop to determine whether any assignments to the
5840 biv occur during the giv's lifetime. Start with the insn
5841 that sets the giv, and search around the loop until we come
5842 back to that insn again.
5843
5844 Also fail if there is a jump within the giv's lifetime that jumps
5845 to somewhere outside the lifetime but still within the loop. This
5846 catches spaghetti code where the execution order is not linear, and
5847 hence the above test fails. Here we assume that the giv lifetime
5848 does not extend from one iteration of the loop to the next, so as
5849 to make the test easier. Since the lifetime isn't known yet,
5850 this requires two loops. See also record_giv above. */
5851
5852 last_giv_use = v->insn;
5853
5854 while (1)
5855 {
5856 p = NEXT_INSN (p);
5857 if (p == loop->end)
5858 {
5859 before_giv_insn = 1;
5860 p = NEXT_INSN (loop->start);
5861 }
5862 if (p == v->insn)
5863 break;
5864
5865 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5866 || GET_CODE (p) == CALL_INSN)
5867 {
5868 /* It is possible for the BIV increment to use the GIV if we
5869 have a cycle. Thus we must be sure to check each insn for
5870 both BIV and GIV uses, and we must check for BIV uses
5871 first. */
5872
5873 if (! biv_increment_seen
5874 && reg_set_p (v->src_reg, PATTERN (p)))
5875 biv_increment_seen = 1;
5876
5877 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5878 {
5879 if (biv_increment_seen || before_giv_insn)
5880 {
5881 v->replaceable = 0;
5882 v->not_replaceable = 1;
5883 break;
5884 }
5885 last_giv_use = p;
5886 }
5887 }
5888 }
5889
5890 /* Now that the lifetime of the giv is known, check for branches
5891 from within the lifetime to outside the lifetime if it is still
5892 replaceable. */
5893
5894 if (v->replaceable)
5895 {
5896 p = v->insn;
5897 while (1)
5898 {
5899 p = NEXT_INSN (p);
5900 if (p == loop->end)
5901 p = NEXT_INSN (loop->start);
5902 if (p == last_giv_use)
5903 break;
5904
5905 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5906 && LABEL_NAME (JUMP_LABEL (p))
5907 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5908 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5909 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5910 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5911 {
5912 v->replaceable = 0;
5913 v->not_replaceable = 1;
5914
5915 if (loop_dump_stream)
5916 fprintf (loop_dump_stream,
5917 "Found branch outside giv lifetime.\n");
5918
5919 break;
5920 }
5921 }
5922 }
5923
5924 /* If it is replaceable, then save the final value. */
5925 if (v->replaceable)
5926 v->final_value = final_value;
5927 }
5928
5929 if (loop_dump_stream && v->replaceable)
5930 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5931 INSN_UID (v->insn), REGNO (v->dest_reg));
5932 }
5933 \f
5934 /* Update the status of whether a giv can derive other givs.
5935
5936 We need to do something special if there is or may be an update to the biv
5937 between the time the giv is defined and the time it is used to derive
5938 another giv.
5939
5940 In addition, a giv that is only conditionally set is not allowed to
5941 derive another giv once a label has been passed.
5942
5943 The cases we look at are when a label or an update to a biv is passed. */
5944
5945 static void
5946 update_giv_derive (loop, p)
5947 const struct loop *loop;
5948 rtx p;
5949 {
5950 struct loop_ivs *ivs = LOOP_IVS (loop);
5951 struct iv_class *bl;
5952 struct induction *biv, *giv;
5953 rtx tem;
5954 int dummy;
5955
5956 /* Search all IV classes, then all bivs, and finally all givs.
5957
5958 There are three cases we are concerned with. First we have the situation
5959 of a giv that is only updated conditionally. In that case, it may not
5960 derive any givs after a label is passed.
5961
5962 The second case is when a biv update occurs, or may occur, after the
5963 definition of a giv. For certain biv updates (see below) that are
5964 known to occur between the giv definition and use, we can adjust the
5965 giv definition. For others, or when the biv update is conditional,
5966 we must prevent the giv from deriving any other givs. There are two
5967 sub-cases within this case.
5968
5969 If this is a label, we are concerned with any biv update that is done
5970 conditionally, since it may be done after the giv is defined followed by
5971 a branch here (actually, we need to pass both a jump and a label, but
5972 this extra tracking doesn't seem worth it).
5973
5974 If this is a jump, we are concerned about any biv update that may be
5975 executed multiple times. We are actually only concerned about
5976 backward jumps, but it is probably not worth performing the test
5977 on the jump again here.
5978
5979 If this is a biv update, we must adjust the giv status to show that a
5980 subsequent biv update was performed. If this adjustment cannot be done,
5981 the giv cannot derive further givs. */
5982
5983 for (bl = ivs->list; bl; bl = bl->next)
5984 for (biv = bl->biv; biv; biv = biv->next_iv)
5985 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5986 || biv->insn == p)
5987 {
5988 for (giv = bl->giv; giv; giv = giv->next_iv)
5989 {
5990 /* If cant_derive is already true, there is no point in
5991 checking all of these conditions again. */
5992 if (giv->cant_derive)
5993 continue;
5994
5995 /* If this giv is conditionally set and we have passed a label,
5996 it cannot derive anything. */
5997 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5998 giv->cant_derive = 1;
5999
6000 /* Skip givs that have mult_val == 0, since
6001 they are really invariants. Also skip those that are
6002 replaceable, since we know their lifetime doesn't contain
6003 any biv update. */
6004 else if (giv->mult_val == const0_rtx || giv->replaceable)
6005 continue;
6006
6007 /* The only way we can allow this giv to derive another
6008 is if this is a biv increment and we can form the product
6009 of biv->add_val and giv->mult_val. In this case, we will
6010 be able to compute a compensation. */
6011 else if (biv->insn == p)
6012 {
6013 rtx ext_val_dummy;
6014
6015 tem = 0;
6016 if (biv->mult_val == const1_rtx)
6017 tem = simplify_giv_expr (loop,
6018 gen_rtx_MULT (giv->mode,
6019 biv->add_val,
6020 giv->mult_val),
6021 &ext_val_dummy, &dummy);
6022
6023 if (tem && giv->derive_adjustment)
6024 tem = simplify_giv_expr
6025 (loop,
6026 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6027 &ext_val_dummy, &dummy);
6028
6029 if (tem)
6030 giv->derive_adjustment = tem;
6031 else
6032 giv->cant_derive = 1;
6033 }
6034 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6035 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6036 giv->cant_derive = 1;
6037 }
6038 }
6039 }
6040 \f
6041 /* Check whether an insn is an increment legitimate for a basic induction var.
6042 X is the source of insn P, or a part of it.
6043 MODE is the mode in which X should be interpreted.
6044
6045 DEST_REG is the putative biv, also the destination of the insn.
6046 We accept patterns of these forms:
6047 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6048 REG = INVARIANT + REG
6049
6050 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6051 store the additive term into *INC_VAL, and store the place where
6052 we found the additive term into *LOCATION.
6053
6054 If X is an assignment of an invariant into DEST_REG, we set
6055 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6056
6057 We also want to detect a BIV when it corresponds to a variable
6058 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6059 of the variable may be a PLUS that adds a SUBREG of that variable to
6060 an invariant and then sign- or zero-extends the result of the PLUS
6061 into the variable.
6062
6063 Most GIVs in such cases will be in the promoted mode, since that is the
6064 probably the natural computation mode (and almost certainly the mode
6065 used for addresses) on the machine. So we view the pseudo-reg containing
6066 the variable as the BIV, as if it were simply incremented.
6067
6068 Note that treating the entire pseudo as a BIV will result in making
6069 simple increments to any GIVs based on it. However, if the variable
6070 overflows in its declared mode but not its promoted mode, the result will
6071 be incorrect. This is acceptable if the variable is signed, since
6072 overflows in such cases are undefined, but not if it is unsigned, since
6073 those overflows are defined. So we only check for SIGN_EXTEND and
6074 not ZERO_EXTEND.
6075
6076 If we cannot find a biv, we return 0. */
6077
6078 static int
6079 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6080 const struct loop *loop;
6081 rtx x;
6082 enum machine_mode mode;
6083 rtx dest_reg;
6084 rtx p;
6085 rtx *inc_val;
6086 rtx *mult_val;
6087 rtx **location;
6088 {
6089 enum rtx_code code;
6090 rtx *argp, arg;
6091 rtx insn, set = 0;
6092
6093 code = GET_CODE (x);
6094 *location = NULL;
6095 switch (code)
6096 {
6097 case PLUS:
6098 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6099 || (GET_CODE (XEXP (x, 0)) == SUBREG
6100 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6101 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6102 {
6103 argp = &XEXP (x, 1);
6104 }
6105 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6106 || (GET_CODE (XEXP (x, 1)) == SUBREG
6107 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6108 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6109 {
6110 argp = &XEXP (x, 0);
6111 }
6112 else
6113 return 0;
6114
6115 arg = *argp;
6116 if (loop_invariant_p (loop, arg) != 1)
6117 return 0;
6118
6119 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6120 *mult_val = const1_rtx;
6121 *location = argp;
6122 return 1;
6123
6124 case SUBREG:
6125 /* If this is a SUBREG for a promoted variable, check the inner
6126 value. */
6127 if (SUBREG_PROMOTED_VAR_P (x))
6128 return basic_induction_var (loop, SUBREG_REG (x),
6129 GET_MODE (SUBREG_REG (x)),
6130 dest_reg, p, inc_val, mult_val, location);
6131 return 0;
6132
6133 case REG:
6134 /* If this register is assigned in a previous insn, look at its
6135 source, but don't go outside the loop or past a label. */
6136
6137 /* If this sets a register to itself, we would repeat any previous
6138 biv increment if we applied this strategy blindly. */
6139 if (rtx_equal_p (dest_reg, x))
6140 return 0;
6141
6142 insn = p;
6143 while (1)
6144 {
6145 rtx dest;
6146 do
6147 {
6148 insn = PREV_INSN (insn);
6149 }
6150 while (insn && GET_CODE (insn) == NOTE
6151 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6152
6153 if (!insn)
6154 break;
6155 set = single_set (insn);
6156 if (set == 0)
6157 break;
6158 dest = SET_DEST (set);
6159 if (dest == x
6160 || (GET_CODE (dest) == SUBREG
6161 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6162 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6163 && SUBREG_REG (dest) == x))
6164 return basic_induction_var (loop, SET_SRC (set),
6165 (GET_MODE (SET_SRC (set)) == VOIDmode
6166 ? GET_MODE (x)
6167 : GET_MODE (SET_SRC (set))),
6168 dest_reg, insn,
6169 inc_val, mult_val, location);
6170
6171 while (GET_CODE (dest) == SIGN_EXTRACT
6172 || GET_CODE (dest) == ZERO_EXTRACT
6173 || GET_CODE (dest) == SUBREG
6174 || GET_CODE (dest) == STRICT_LOW_PART)
6175 dest = XEXP (dest, 0);
6176 if (dest == x)
6177 break;
6178 }
6179 /* Fall through. */
6180
6181 /* Can accept constant setting of biv only when inside inner most loop.
6182 Otherwise, a biv of an inner loop may be incorrectly recognized
6183 as a biv of the outer loop,
6184 causing code to be moved INTO the inner loop. */
6185 case MEM:
6186 if (loop_invariant_p (loop, x) != 1)
6187 return 0;
6188 case CONST_INT:
6189 case SYMBOL_REF:
6190 case CONST:
6191 /* convert_modes aborts if we try to convert to or from CCmode, so just
6192 exclude that case. It is very unlikely that a condition code value
6193 would be a useful iterator anyways. */
6194 if (loop->level == 1
6195 && GET_MODE_CLASS (mode) != MODE_CC
6196 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
6197 {
6198 /* Possible bug here? Perhaps we don't know the mode of X. */
6199 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6200 *mult_val = const0_rtx;
6201 return 1;
6202 }
6203 else
6204 return 0;
6205
6206 case SIGN_EXTEND:
6207 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6208 dest_reg, p, inc_val, mult_val, location);
6209
6210 case ASHIFTRT:
6211 /* Similar, since this can be a sign extension. */
6212 for (insn = PREV_INSN (p);
6213 (insn && GET_CODE (insn) == NOTE
6214 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6215 insn = PREV_INSN (insn))
6216 ;
6217
6218 if (insn)
6219 set = single_set (insn);
6220
6221 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6222 && set && SET_DEST (set) == XEXP (x, 0)
6223 && GET_CODE (XEXP (x, 1)) == CONST_INT
6224 && INTVAL (XEXP (x, 1)) >= 0
6225 && GET_CODE (SET_SRC (set)) == ASHIFT
6226 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6227 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6228 GET_MODE (XEXP (x, 0)),
6229 dest_reg, insn, inc_val, mult_val,
6230 location);
6231 return 0;
6232
6233 default:
6234 return 0;
6235 }
6236 }
6237 \f
6238 /* A general induction variable (giv) is any quantity that is a linear
6239 function of a basic induction variable,
6240 i.e. giv = biv * mult_val + add_val.
6241 The coefficients can be any loop invariant quantity.
6242 A giv need not be computed directly from the biv;
6243 it can be computed by way of other givs. */
6244
6245 /* Determine whether X computes a giv.
6246 If it does, return a nonzero value
6247 which is the benefit from eliminating the computation of X;
6248 set *SRC_REG to the register of the biv that it is computed from;
6249 set *ADD_VAL and *MULT_VAL to the coefficients,
6250 such that the value of X is biv * mult + add; */
6251
6252 static int
6253 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6254 is_addr, pbenefit, addr_mode)
6255 const struct loop *loop;
6256 rtx x;
6257 rtx *src_reg;
6258 rtx *add_val;
6259 rtx *mult_val;
6260 rtx *ext_val;
6261 int is_addr;
6262 int *pbenefit;
6263 enum machine_mode addr_mode;
6264 {
6265 struct loop_ivs *ivs = LOOP_IVS (loop);
6266 rtx orig_x = x;
6267
6268 /* If this is an invariant, forget it, it isn't a giv. */
6269 if (loop_invariant_p (loop, x) == 1)
6270 return 0;
6271
6272 *pbenefit = 0;
6273 *ext_val = NULL_RTX;
6274 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6275 if (x == 0)
6276 return 0;
6277
6278 switch (GET_CODE (x))
6279 {
6280 case USE:
6281 case CONST_INT:
6282 /* Since this is now an invariant and wasn't before, it must be a giv
6283 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6284 with. */
6285 *src_reg = ivs->list->biv->dest_reg;
6286 *mult_val = const0_rtx;
6287 *add_val = x;
6288 break;
6289
6290 case REG:
6291 /* This is equivalent to a BIV. */
6292 *src_reg = x;
6293 *mult_val = const1_rtx;
6294 *add_val = const0_rtx;
6295 break;
6296
6297 case PLUS:
6298 /* Either (plus (biv) (invar)) or
6299 (plus (mult (biv) (invar_1)) (invar_2)). */
6300 if (GET_CODE (XEXP (x, 0)) == MULT)
6301 {
6302 *src_reg = XEXP (XEXP (x, 0), 0);
6303 *mult_val = XEXP (XEXP (x, 0), 1);
6304 }
6305 else
6306 {
6307 *src_reg = XEXP (x, 0);
6308 *mult_val = const1_rtx;
6309 }
6310 *add_val = XEXP (x, 1);
6311 break;
6312
6313 case MULT:
6314 /* ADD_VAL is zero. */
6315 *src_reg = XEXP (x, 0);
6316 *mult_val = XEXP (x, 1);
6317 *add_val = const0_rtx;
6318 break;
6319
6320 default:
6321 abort ();
6322 }
6323
6324 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6325 unless they are CONST_INT). */
6326 if (GET_CODE (*add_val) == USE)
6327 *add_val = XEXP (*add_val, 0);
6328 if (GET_CODE (*mult_val) == USE)
6329 *mult_val = XEXP (*mult_val, 0);
6330
6331 if (is_addr)
6332 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6333 else
6334 *pbenefit += rtx_cost (orig_x, SET);
6335
6336 /* Always return true if this is a giv so it will be detected as such,
6337 even if the benefit is zero or negative. This allows elimination
6338 of bivs that might otherwise not be eliminated. */
6339 return 1;
6340 }
6341 \f
6342 /* Given an expression, X, try to form it as a linear function of a biv.
6343 We will canonicalize it to be of the form
6344 (plus (mult (BIV) (invar_1))
6345 (invar_2))
6346 with possible degeneracies.
6347
6348 The invariant expressions must each be of a form that can be used as a
6349 machine operand. We surround then with a USE rtx (a hack, but localized
6350 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6351 routine; it is the caller's responsibility to strip them.
6352
6353 If no such canonicalization is possible (i.e., two biv's are used or an
6354 expression that is neither invariant nor a biv or giv), this routine
6355 returns 0.
6356
6357 For a non-zero return, the result will have a code of CONST_INT, USE,
6358 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6359
6360 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6361
6362 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6363 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6364
6365 static rtx
6366 simplify_giv_expr (loop, x, ext_val, benefit)
6367 const struct loop *loop;
6368 rtx x;
6369 rtx *ext_val;
6370 int *benefit;
6371 {
6372 struct loop_ivs *ivs = LOOP_IVS (loop);
6373 struct loop_regs *regs = LOOP_REGS (loop);
6374 enum machine_mode mode = GET_MODE (x);
6375 rtx arg0, arg1;
6376 rtx tem;
6377
6378 /* If this is not an integer mode, or if we cannot do arithmetic in this
6379 mode, this can't be a giv. */
6380 if (mode != VOIDmode
6381 && (GET_MODE_CLASS (mode) != MODE_INT
6382 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6383 return NULL_RTX;
6384
6385 switch (GET_CODE (x))
6386 {
6387 case PLUS:
6388 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6389 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6390 if (arg0 == 0 || arg1 == 0)
6391 return NULL_RTX;
6392
6393 /* Put constant last, CONST_INT last if both constant. */
6394 if ((GET_CODE (arg0) == USE
6395 || GET_CODE (arg0) == CONST_INT)
6396 && ! ((GET_CODE (arg0) == USE
6397 && GET_CODE (arg1) == USE)
6398 || GET_CODE (arg1) == CONST_INT))
6399 tem = arg0, arg0 = arg1, arg1 = tem;
6400
6401 /* Handle addition of zero, then addition of an invariant. */
6402 if (arg1 == const0_rtx)
6403 return arg0;
6404 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6405 switch (GET_CODE (arg0))
6406 {
6407 case CONST_INT:
6408 case USE:
6409 /* Adding two invariants must result in an invariant, so enclose
6410 addition operation inside a USE and return it. */
6411 if (GET_CODE (arg0) == USE)
6412 arg0 = XEXP (arg0, 0);
6413 if (GET_CODE (arg1) == USE)
6414 arg1 = XEXP (arg1, 0);
6415
6416 if (GET_CODE (arg0) == CONST_INT)
6417 tem = arg0, arg0 = arg1, arg1 = tem;
6418 if (GET_CODE (arg1) == CONST_INT)
6419 tem = sge_plus_constant (arg0, arg1);
6420 else
6421 tem = sge_plus (mode, arg0, arg1);
6422
6423 if (GET_CODE (tem) != CONST_INT)
6424 tem = gen_rtx_USE (mode, tem);
6425 return tem;
6426
6427 case REG:
6428 case MULT:
6429 /* biv + invar or mult + invar. Return sum. */
6430 return gen_rtx_PLUS (mode, arg0, arg1);
6431
6432 case PLUS:
6433 /* (a + invar_1) + invar_2. Associate. */
6434 return
6435 simplify_giv_expr (loop,
6436 gen_rtx_PLUS (mode,
6437 XEXP (arg0, 0),
6438 gen_rtx_PLUS (mode,
6439 XEXP (arg0, 1),
6440 arg1)),
6441 ext_val, benefit);
6442
6443 default:
6444 abort ();
6445 }
6446
6447 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6448 MULT to reduce cases. */
6449 if (GET_CODE (arg0) == REG)
6450 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6451 if (GET_CODE (arg1) == REG)
6452 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6453
6454 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6455 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6456 Recurse to associate the second PLUS. */
6457 if (GET_CODE (arg1) == MULT)
6458 tem = arg0, arg0 = arg1, arg1 = tem;
6459
6460 if (GET_CODE (arg1) == PLUS)
6461 return
6462 simplify_giv_expr (loop,
6463 gen_rtx_PLUS (mode,
6464 gen_rtx_PLUS (mode, arg0,
6465 XEXP (arg1, 0)),
6466 XEXP (arg1, 1)),
6467 ext_val, benefit);
6468
6469 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6470 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6471 return NULL_RTX;
6472
6473 if (!rtx_equal_p (arg0, arg1))
6474 return NULL_RTX;
6475
6476 return simplify_giv_expr (loop,
6477 gen_rtx_MULT (mode,
6478 XEXP (arg0, 0),
6479 gen_rtx_PLUS (mode,
6480 XEXP (arg0, 1),
6481 XEXP (arg1, 1))),
6482 ext_val, benefit);
6483
6484 case MINUS:
6485 /* Handle "a - b" as "a + b * (-1)". */
6486 return simplify_giv_expr (loop,
6487 gen_rtx_PLUS (mode,
6488 XEXP (x, 0),
6489 gen_rtx_MULT (mode,
6490 XEXP (x, 1),
6491 constm1_rtx)),
6492 ext_val, benefit);
6493
6494 case MULT:
6495 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6496 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6497 if (arg0 == 0 || arg1 == 0)
6498 return NULL_RTX;
6499
6500 /* Put constant last, CONST_INT last if both constant. */
6501 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6502 && GET_CODE (arg1) != CONST_INT)
6503 tem = arg0, arg0 = arg1, arg1 = tem;
6504
6505 /* If second argument is not now constant, not giv. */
6506 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6507 return NULL_RTX;
6508
6509 /* Handle multiply by 0 or 1. */
6510 if (arg1 == const0_rtx)
6511 return const0_rtx;
6512
6513 else if (arg1 == const1_rtx)
6514 return arg0;
6515
6516 switch (GET_CODE (arg0))
6517 {
6518 case REG:
6519 /* biv * invar. Done. */
6520 return gen_rtx_MULT (mode, arg0, arg1);
6521
6522 case CONST_INT:
6523 /* Product of two constants. */
6524 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6525
6526 case USE:
6527 /* invar * invar is a giv, but attempt to simplify it somehow. */
6528 if (GET_CODE (arg1) != CONST_INT)
6529 return NULL_RTX;
6530
6531 arg0 = XEXP (arg0, 0);
6532 if (GET_CODE (arg0) == MULT)
6533 {
6534 /* (invar_0 * invar_1) * invar_2. Associate. */
6535 return simplify_giv_expr (loop,
6536 gen_rtx_MULT (mode,
6537 XEXP (arg0, 0),
6538 gen_rtx_MULT (mode,
6539 XEXP (arg0,
6540 1),
6541 arg1)),
6542 ext_val, benefit);
6543 }
6544 /* Porpagate the MULT expressions to the intermost nodes. */
6545 else if (GET_CODE (arg0) == PLUS)
6546 {
6547 /* (invar_0 + invar_1) * invar_2. Distribute. */
6548 return simplify_giv_expr (loop,
6549 gen_rtx_PLUS (mode,
6550 gen_rtx_MULT (mode,
6551 XEXP (arg0,
6552 0),
6553 arg1),
6554 gen_rtx_MULT (mode,
6555 XEXP (arg0,
6556 1),
6557 arg1)),
6558 ext_val, benefit);
6559 }
6560 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6561
6562 case MULT:
6563 /* (a * invar_1) * invar_2. Associate. */
6564 return simplify_giv_expr (loop,
6565 gen_rtx_MULT (mode,
6566 XEXP (arg0, 0),
6567 gen_rtx_MULT (mode,
6568 XEXP (arg0, 1),
6569 arg1)),
6570 ext_val, benefit);
6571
6572 case PLUS:
6573 /* (a + invar_1) * invar_2. Distribute. */
6574 return simplify_giv_expr (loop,
6575 gen_rtx_PLUS (mode,
6576 gen_rtx_MULT (mode,
6577 XEXP (arg0, 0),
6578 arg1),
6579 gen_rtx_MULT (mode,
6580 XEXP (arg0, 1),
6581 arg1)),
6582 ext_val, benefit);
6583
6584 default:
6585 abort ();
6586 }
6587
6588 case ASHIFT:
6589 /* Shift by constant is multiply by power of two. */
6590 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6591 return 0;
6592
6593 return
6594 simplify_giv_expr (loop,
6595 gen_rtx_MULT (mode,
6596 XEXP (x, 0),
6597 GEN_INT ((HOST_WIDE_INT) 1
6598 << INTVAL (XEXP (x, 1)))),
6599 ext_val, benefit);
6600
6601 case NEG:
6602 /* "-a" is "a * (-1)" */
6603 return simplify_giv_expr (loop,
6604 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6605 ext_val, benefit);
6606
6607 case NOT:
6608 /* "~a" is "-a - 1". Silly, but easy. */
6609 return simplify_giv_expr (loop,
6610 gen_rtx_MINUS (mode,
6611 gen_rtx_NEG (mode, XEXP (x, 0)),
6612 const1_rtx),
6613 ext_val, benefit);
6614
6615 case USE:
6616 /* Already in proper form for invariant. */
6617 return x;
6618
6619 case SIGN_EXTEND:
6620 case ZERO_EXTEND:
6621 case TRUNCATE:
6622 /* Conditionally recognize extensions of simple IVs. After we've
6623 computed loop traversal counts and verified the range of the
6624 source IV, we'll reevaluate this as a GIV. */
6625 if (*ext_val == NULL_RTX)
6626 {
6627 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6628 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6629 {
6630 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6631 return arg0;
6632 }
6633 }
6634 goto do_default;
6635
6636 case REG:
6637 /* If this is a new register, we can't deal with it. */
6638 if (REGNO (x) >= max_reg_before_loop)
6639 return 0;
6640
6641 /* Check for biv or giv. */
6642 switch (REG_IV_TYPE (ivs, REGNO (x)))
6643 {
6644 case BASIC_INDUCT:
6645 return x;
6646 case GENERAL_INDUCT:
6647 {
6648 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6649
6650 /* Form expression from giv and add benefit. Ensure this giv
6651 can derive another and subtract any needed adjustment if so. */
6652
6653 /* Increasing the benefit here is risky. The only case in which it
6654 is arguably correct is if this is the only use of V. In other
6655 cases, this will artificially inflate the benefit of the current
6656 giv, and lead to suboptimal code. Thus, it is disabled, since
6657 potentially not reducing an only marginally beneficial giv is
6658 less harmful than reducing many givs that are not really
6659 beneficial. */
6660 {
6661 rtx single_use = regs->array[REGNO (x)].single_usage;
6662 if (single_use && single_use != const0_rtx)
6663 *benefit += v->benefit;
6664 }
6665
6666 if (v->cant_derive)
6667 return 0;
6668
6669 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6670 v->src_reg, v->mult_val),
6671 v->add_val);
6672
6673 if (v->derive_adjustment)
6674 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6675 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6676 if (*ext_val)
6677 {
6678 if (!v->ext_dependent)
6679 return arg0;
6680 }
6681 else
6682 {
6683 *ext_val = v->ext_dependent;
6684 return arg0;
6685 }
6686 return 0;
6687 }
6688
6689 default:
6690 do_default:
6691 /* If it isn't an induction variable, and it is invariant, we
6692 may be able to simplify things further by looking through
6693 the bits we just moved outside the loop. */
6694 if (loop_invariant_p (loop, x) == 1)
6695 {
6696 struct movable *m;
6697 struct loop_movables *movables = LOOP_MOVABLES (loop);
6698
6699 for (m = movables->head; m; m = m->next)
6700 if (rtx_equal_p (x, m->set_dest))
6701 {
6702 /* Ok, we found a match. Substitute and simplify. */
6703
6704 /* If we match another movable, we must use that, as
6705 this one is going away. */
6706 if (m->match)
6707 return simplify_giv_expr (loop, m->match->set_dest,
6708 ext_val, benefit);
6709
6710 /* If consec is non-zero, this is a member of a group of
6711 instructions that were moved together. We handle this
6712 case only to the point of seeking to the last insn and
6713 looking for a REG_EQUAL. Fail if we don't find one. */
6714 if (m->consec != 0)
6715 {
6716 int i = m->consec;
6717 tem = m->insn;
6718 do
6719 {
6720 tem = NEXT_INSN (tem);
6721 }
6722 while (--i > 0);
6723
6724 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6725 if (tem)
6726 tem = XEXP (tem, 0);
6727 }
6728 else
6729 {
6730 tem = single_set (m->insn);
6731 if (tem)
6732 tem = SET_SRC (tem);
6733 }
6734
6735 if (tem)
6736 {
6737 /* What we are most interested in is pointer
6738 arithmetic on invariants -- only take
6739 patterns we may be able to do something with. */
6740 if (GET_CODE (tem) == PLUS
6741 || GET_CODE (tem) == MULT
6742 || GET_CODE (tem) == ASHIFT
6743 || GET_CODE (tem) == CONST_INT
6744 || GET_CODE (tem) == SYMBOL_REF)
6745 {
6746 tem = simplify_giv_expr (loop, tem, ext_val,
6747 benefit);
6748 if (tem)
6749 return tem;
6750 }
6751 else if (GET_CODE (tem) == CONST
6752 && GET_CODE (XEXP (tem, 0)) == PLUS
6753 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6754 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6755 {
6756 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6757 ext_val, benefit);
6758 if (tem)
6759 return tem;
6760 }
6761 }
6762 break;
6763 }
6764 }
6765 break;
6766 }
6767
6768 /* Fall through to general case. */
6769 default:
6770 /* If invariant, return as USE (unless CONST_INT).
6771 Otherwise, not giv. */
6772 if (GET_CODE (x) == USE)
6773 x = XEXP (x, 0);
6774
6775 if (loop_invariant_p (loop, x) == 1)
6776 {
6777 if (GET_CODE (x) == CONST_INT)
6778 return x;
6779 if (GET_CODE (x) == CONST
6780 && GET_CODE (XEXP (x, 0)) == PLUS
6781 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6782 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6783 x = XEXP (x, 0);
6784 return gen_rtx_USE (mode, x);
6785 }
6786 else
6787 return 0;
6788 }
6789 }
6790
6791 /* This routine folds invariants such that there is only ever one
6792 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6793
6794 static rtx
6795 sge_plus_constant (x, c)
6796 rtx x, c;
6797 {
6798 if (GET_CODE (x) == CONST_INT)
6799 return GEN_INT (INTVAL (x) + INTVAL (c));
6800 else if (GET_CODE (x) != PLUS)
6801 return gen_rtx_PLUS (GET_MODE (x), x, c);
6802 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6803 {
6804 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6805 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6806 }
6807 else if (GET_CODE (XEXP (x, 0)) == PLUS
6808 || GET_CODE (XEXP (x, 1)) != PLUS)
6809 {
6810 return gen_rtx_PLUS (GET_MODE (x),
6811 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6812 }
6813 else
6814 {
6815 return gen_rtx_PLUS (GET_MODE (x),
6816 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6817 }
6818 }
6819
6820 static rtx
6821 sge_plus (mode, x, y)
6822 enum machine_mode mode;
6823 rtx x, y;
6824 {
6825 while (GET_CODE (y) == PLUS)
6826 {
6827 rtx a = XEXP (y, 0);
6828 if (GET_CODE (a) == CONST_INT)
6829 x = sge_plus_constant (x, a);
6830 else
6831 x = gen_rtx_PLUS (mode, x, a);
6832 y = XEXP (y, 1);
6833 }
6834 if (GET_CODE (y) == CONST_INT)
6835 x = sge_plus_constant (x, y);
6836 else
6837 x = gen_rtx_PLUS (mode, x, y);
6838 return x;
6839 }
6840 \f
6841 /* Help detect a giv that is calculated by several consecutive insns;
6842 for example,
6843 giv = biv * M
6844 giv = giv + A
6845 The caller has already identified the first insn P as having a giv as dest;
6846 we check that all other insns that set the same register follow
6847 immediately after P, that they alter nothing else,
6848 and that the result of the last is still a giv.
6849
6850 The value is 0 if the reg set in P is not really a giv.
6851 Otherwise, the value is the amount gained by eliminating
6852 all the consecutive insns that compute the value.
6853
6854 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6855 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6856
6857 The coefficients of the ultimate giv value are stored in
6858 *MULT_VAL and *ADD_VAL. */
6859
6860 static int
6861 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6862 add_val, mult_val, ext_val, last_consec_insn)
6863 const struct loop *loop;
6864 int first_benefit;
6865 rtx p;
6866 rtx src_reg;
6867 rtx dest_reg;
6868 rtx *add_val;
6869 rtx *mult_val;
6870 rtx *ext_val;
6871 rtx *last_consec_insn;
6872 {
6873 struct loop_ivs *ivs = LOOP_IVS (loop);
6874 struct loop_regs *regs = LOOP_REGS (loop);
6875 int count;
6876 enum rtx_code code;
6877 int benefit;
6878 rtx temp;
6879 rtx set;
6880
6881 /* Indicate that this is a giv so that we can update the value produced in
6882 each insn of the multi-insn sequence.
6883
6884 This induction structure will be used only by the call to
6885 general_induction_var below, so we can allocate it on our stack.
6886 If this is a giv, our caller will replace the induct var entry with
6887 a new induction structure. */
6888 struct induction *v;
6889
6890 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6891 return 0;
6892
6893 v = (struct induction *) alloca (sizeof (struct induction));
6894 v->src_reg = src_reg;
6895 v->mult_val = *mult_val;
6896 v->add_val = *add_val;
6897 v->benefit = first_benefit;
6898 v->cant_derive = 0;
6899 v->derive_adjustment = 0;
6900 v->ext_dependent = NULL_RTX;
6901
6902 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6903 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6904
6905 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6906
6907 while (count > 0)
6908 {
6909 p = NEXT_INSN (p);
6910 code = GET_CODE (p);
6911
6912 /* If libcall, skip to end of call sequence. */
6913 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6914 p = XEXP (temp, 0);
6915
6916 if (code == INSN
6917 && (set = single_set (p))
6918 && GET_CODE (SET_DEST (set)) == REG
6919 && SET_DEST (set) == dest_reg
6920 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6921 add_val, mult_val, ext_val, 0,
6922 &benefit, VOIDmode)
6923 /* Giv created by equivalent expression. */
6924 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6925 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6926 add_val, mult_val, ext_val, 0,
6927 &benefit, VOIDmode)))
6928 && src_reg == v->src_reg)
6929 {
6930 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6931 benefit += libcall_benefit (p);
6932
6933 count--;
6934 v->mult_val = *mult_val;
6935 v->add_val = *add_val;
6936 v->benefit += benefit;
6937 }
6938 else if (code != NOTE)
6939 {
6940 /* Allow insns that set something other than this giv to a
6941 constant. Such insns are needed on machines which cannot
6942 include long constants and should not disqualify a giv. */
6943 if (code == INSN
6944 && (set = single_set (p))
6945 && SET_DEST (set) != dest_reg
6946 && CONSTANT_P (SET_SRC (set)))
6947 continue;
6948
6949 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6950 return 0;
6951 }
6952 }
6953
6954 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6955 *last_consec_insn = p;
6956 return v->benefit;
6957 }
6958 \f
6959 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6960 represented by G1. If no such expression can be found, or it is clear that
6961 it cannot possibly be a valid address, 0 is returned.
6962
6963 To perform the computation, we note that
6964 G1 = x * v + a and
6965 G2 = y * v + b
6966 where `v' is the biv.
6967
6968 So G2 = (y/b) * G1 + (b - a*y/x).
6969
6970 Note that MULT = y/x.
6971
6972 Update: A and B are now allowed to be additive expressions such that
6973 B contains all variables in A. That is, computing B-A will not require
6974 subtracting variables. */
6975
6976 static rtx
6977 express_from_1 (a, b, mult)
6978 rtx a, b, mult;
6979 {
6980 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6981
6982 if (mult == const0_rtx)
6983 return b;
6984
6985 /* If MULT is not 1, we cannot handle A with non-constants, since we
6986 would then be required to subtract multiples of the registers in A.
6987 This is theoretically possible, and may even apply to some Fortran
6988 constructs, but it is a lot of work and we do not attempt it here. */
6989
6990 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6991 return NULL_RTX;
6992
6993 /* In general these structures are sorted top to bottom (down the PLUS
6994 chain), but not left to right across the PLUS. If B is a higher
6995 order giv than A, we can strip one level and recurse. If A is higher
6996 order, we'll eventually bail out, but won't know that until the end.
6997 If they are the same, we'll strip one level around this loop. */
6998
6999 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7000 {
7001 rtx ra, rb, oa, ob, tmp;
7002
7003 ra = XEXP (a, 0), oa = XEXP (a, 1);
7004 if (GET_CODE (ra) == PLUS)
7005 tmp = ra, ra = oa, oa = tmp;
7006
7007 rb = XEXP (b, 0), ob = XEXP (b, 1);
7008 if (GET_CODE (rb) == PLUS)
7009 tmp = rb, rb = ob, ob = tmp;
7010
7011 if (rtx_equal_p (ra, rb))
7012 /* We matched: remove one reg completely. */
7013 a = oa, b = ob;
7014 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7015 /* An alternate match. */
7016 a = oa, b = rb;
7017 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7018 /* An alternate match. */
7019 a = ra, b = ob;
7020 else
7021 {
7022 /* Indicates an extra register in B. Strip one level from B and
7023 recurse, hoping B was the higher order expression. */
7024 ob = express_from_1 (a, ob, mult);
7025 if (ob == NULL_RTX)
7026 return NULL_RTX;
7027 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7028 }
7029 }
7030
7031 /* Here we are at the last level of A, go through the cases hoping to
7032 get rid of everything but a constant. */
7033
7034 if (GET_CODE (a) == PLUS)
7035 {
7036 rtx ra, oa;
7037
7038 ra = XEXP (a, 0), oa = XEXP (a, 1);
7039 if (rtx_equal_p (oa, b))
7040 oa = ra;
7041 else if (!rtx_equal_p (ra, b))
7042 return NULL_RTX;
7043
7044 if (GET_CODE (oa) != CONST_INT)
7045 return NULL_RTX;
7046
7047 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7048 }
7049 else if (GET_CODE (a) == CONST_INT)
7050 {
7051 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7052 }
7053 else if (CONSTANT_P (a))
7054 {
7055 enum machine_mode mode_a = GET_MODE (a);
7056 enum machine_mode mode_b = GET_MODE (b);
7057 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7058 return simplify_gen_binary (MINUS, mode, b, a);
7059 }
7060 else if (GET_CODE (b) == PLUS)
7061 {
7062 if (rtx_equal_p (a, XEXP (b, 0)))
7063 return XEXP (b, 1);
7064 else if (rtx_equal_p (a, XEXP (b, 1)))
7065 return XEXP (b, 0);
7066 else
7067 return NULL_RTX;
7068 }
7069 else if (rtx_equal_p (a, b))
7070 return const0_rtx;
7071
7072 return NULL_RTX;
7073 }
7074
7075 rtx
7076 express_from (g1, g2)
7077 struct induction *g1, *g2;
7078 {
7079 rtx mult, add;
7080
7081 /* The value that G1 will be multiplied by must be a constant integer. Also,
7082 the only chance we have of getting a valid address is if b*c/a (see above
7083 for notation) is also an integer. */
7084 if (GET_CODE (g1->mult_val) == CONST_INT
7085 && GET_CODE (g2->mult_val) == CONST_INT)
7086 {
7087 if (g1->mult_val == const0_rtx
7088 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7089 return NULL_RTX;
7090 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7091 }
7092 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7093 mult = const1_rtx;
7094 else
7095 {
7096 /* ??? Find out if the one is a multiple of the other? */
7097 return NULL_RTX;
7098 }
7099
7100 add = express_from_1 (g1->add_val, g2->add_val, mult);
7101 if (add == NULL_RTX)
7102 {
7103 /* Failed. If we've got a multiplication factor between G1 and G2,
7104 scale G1's addend and try again. */
7105 if (INTVAL (mult) > 1)
7106 {
7107 rtx g1_add_val = g1->add_val;
7108 if (GET_CODE (g1_add_val) == MULT
7109 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7110 {
7111 HOST_WIDE_INT m;
7112 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7113 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7114 XEXP (g1_add_val, 0), GEN_INT (m));
7115 }
7116 else
7117 {
7118 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7119 mult);
7120 }
7121
7122 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7123 }
7124 }
7125 if (add == NULL_RTX)
7126 return NULL_RTX;
7127
7128 /* Form simplified final result. */
7129 if (mult == const0_rtx)
7130 return add;
7131 else if (mult == const1_rtx)
7132 mult = g1->dest_reg;
7133 else
7134 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7135
7136 if (add == const0_rtx)
7137 return mult;
7138 else
7139 {
7140 if (GET_CODE (add) == PLUS
7141 && CONSTANT_P (XEXP (add, 1)))
7142 {
7143 rtx tem = XEXP (add, 1);
7144 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7145 add = tem;
7146 }
7147
7148 return gen_rtx_PLUS (g2->mode, mult, add);
7149 }
7150 }
7151 \f
7152 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7153 represented by G1. This indicates that G2 should be combined with G1 and
7154 that G2 can use (either directly or via an address expression) a register
7155 used to represent G1. */
7156
7157 static rtx
7158 combine_givs_p (g1, g2)
7159 struct induction *g1, *g2;
7160 {
7161 rtx comb, ret;
7162
7163 /* With the introduction of ext dependent givs, we must care for modes.
7164 G2 must not use a wider mode than G1. */
7165 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7166 return NULL_RTX;
7167
7168 ret = comb = express_from (g1, g2);
7169 if (comb == NULL_RTX)
7170 return NULL_RTX;
7171 if (g1->mode != g2->mode)
7172 ret = gen_lowpart (g2->mode, comb);
7173
7174 /* If these givs are identical, they can be combined. We use the results
7175 of express_from because the addends are not in a canonical form, so
7176 rtx_equal_p is a weaker test. */
7177 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7178 combination to be the other way round. */
7179 if (comb == g1->dest_reg
7180 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7181 {
7182 return ret;
7183 }
7184
7185 /* If G2 can be expressed as a function of G1 and that function is valid
7186 as an address and no more expensive than using a register for G2,
7187 the expression of G2 in terms of G1 can be used. */
7188 if (ret != NULL_RTX
7189 && g2->giv_type == DEST_ADDR
7190 && memory_address_p (GET_MODE (g2->mem), ret)
7191 /* ??? Looses, especially with -fforce-addr, where *g2->location
7192 will always be a register, and so anything more complicated
7193 gets discarded. */
7194 #if 0
7195 #ifdef ADDRESS_COST
7196 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7197 #else
7198 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7199 #endif
7200 #endif
7201 )
7202 {
7203 return ret;
7204 }
7205
7206 return NULL_RTX;
7207 }
7208 \f
7209 /* Check each extension dependent giv in this class to see if its
7210 root biv is safe from wrapping in the interior mode, which would
7211 make the giv illegal. */
7212
7213 static void
7214 check_ext_dependent_givs (bl, loop_info)
7215 struct iv_class *bl;
7216 struct loop_info *loop_info;
7217 {
7218 int ze_ok = 0, se_ok = 0, info_ok = 0;
7219 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7220 HOST_WIDE_INT start_val;
7221 unsigned HOST_WIDE_INT u_end_val = 0;
7222 unsigned HOST_WIDE_INT u_start_val = 0;
7223 rtx incr = pc_rtx;
7224 struct induction *v;
7225
7226 /* Make sure the iteration data is available. We must have
7227 constants in order to be certain of no overflow. */
7228 /* ??? An unknown iteration count with an increment of +-1
7229 combined with friendly exit tests of against an invariant
7230 value is also ameanable to optimization. Not implemented. */
7231 if (loop_info->n_iterations > 0
7232 && bl->initial_value
7233 && GET_CODE (bl->initial_value) == CONST_INT
7234 && (incr = biv_total_increment (bl))
7235 && GET_CODE (incr) == CONST_INT
7236 /* Make sure the host can represent the arithmetic. */
7237 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7238 {
7239 unsigned HOST_WIDE_INT abs_incr, total_incr;
7240 HOST_WIDE_INT s_end_val;
7241 int neg_incr;
7242
7243 info_ok = 1;
7244 start_val = INTVAL (bl->initial_value);
7245 u_start_val = start_val;
7246
7247 neg_incr = 0, abs_incr = INTVAL (incr);
7248 if (INTVAL (incr) < 0)
7249 neg_incr = 1, abs_incr = -abs_incr;
7250 total_incr = abs_incr * loop_info->n_iterations;
7251
7252 /* Check for host arithmatic overflow. */
7253 if (total_incr / loop_info->n_iterations == abs_incr)
7254 {
7255 unsigned HOST_WIDE_INT u_max;
7256 HOST_WIDE_INT s_max;
7257
7258 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7259 s_end_val = u_end_val;
7260 u_max = GET_MODE_MASK (biv_mode);
7261 s_max = u_max >> 1;
7262
7263 /* Check zero extension of biv ok. */
7264 if (start_val >= 0
7265 /* Check for host arithmatic overflow. */
7266 && (neg_incr
7267 ? u_end_val < u_start_val
7268 : u_end_val > u_start_val)
7269 /* Check for target arithmetic overflow. */
7270 && (neg_incr
7271 ? 1 /* taken care of with host overflow */
7272 : u_end_val <= u_max))
7273 {
7274 ze_ok = 1;
7275 }
7276
7277 /* Check sign extension of biv ok. */
7278 /* ??? While it is true that overflow with signed and pointer
7279 arithmetic is undefined, I fear too many programmers don't
7280 keep this fact in mind -- myself included on occasion.
7281 So leave alone with the signed overflow optimizations. */
7282 if (start_val >= -s_max - 1
7283 /* Check for host arithmatic overflow. */
7284 && (neg_incr
7285 ? s_end_val < start_val
7286 : s_end_val > start_val)
7287 /* Check for target arithmetic overflow. */
7288 && (neg_incr
7289 ? s_end_val >= -s_max - 1
7290 : s_end_val <= s_max))
7291 {
7292 se_ok = 1;
7293 }
7294 }
7295 }
7296
7297 /* Invalidate givs that fail the tests. */
7298 for (v = bl->giv; v; v = v->next_iv)
7299 if (v->ext_dependent)
7300 {
7301 enum rtx_code code = GET_CODE (v->ext_dependent);
7302 int ok = 0;
7303
7304 switch (code)
7305 {
7306 case SIGN_EXTEND:
7307 ok = se_ok;
7308 break;
7309 case ZERO_EXTEND:
7310 ok = ze_ok;
7311 break;
7312
7313 case TRUNCATE:
7314 /* We don't know whether this value is being used as either
7315 signed or unsigned, so to safely truncate we must satisfy
7316 both. The initial check here verifies the BIV itself;
7317 once that is successful we may check its range wrt the
7318 derived GIV. */
7319 if (se_ok && ze_ok)
7320 {
7321 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7322 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7323
7324 /* We know from the above that both endpoints are nonnegative,
7325 and that there is no wrapping. Verify that both endpoints
7326 are within the (signed) range of the outer mode. */
7327 if (u_start_val <= max && u_end_val <= max)
7328 ok = 1;
7329 }
7330 break;
7331
7332 default:
7333 abort ();
7334 }
7335
7336 if (ok)
7337 {
7338 if (loop_dump_stream)
7339 {
7340 fprintf (loop_dump_stream,
7341 "Verified ext dependent giv at %d of reg %d\n",
7342 INSN_UID (v->insn), bl->regno);
7343 }
7344 }
7345 else
7346 {
7347 if (loop_dump_stream)
7348 {
7349 const char *why;
7350
7351 if (info_ok)
7352 why = "biv iteration values overflowed";
7353 else
7354 {
7355 if (incr == pc_rtx)
7356 incr = biv_total_increment (bl);
7357 if (incr == const1_rtx)
7358 why = "biv iteration info incomplete; incr by 1";
7359 else
7360 why = "biv iteration info incomplete";
7361 }
7362
7363 fprintf (loop_dump_stream,
7364 "Failed ext dependent giv at %d, %s\n",
7365 INSN_UID (v->insn), why);
7366 }
7367 v->ignore = 1;
7368 bl->all_reduced = 0;
7369 }
7370 }
7371 }
7372
7373 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7374
7375 rtx
7376 extend_value_for_giv (v, value)
7377 struct induction *v;
7378 rtx value;
7379 {
7380 rtx ext_dep = v->ext_dependent;
7381
7382 if (! ext_dep)
7383 return value;
7384
7385 /* Recall that check_ext_dependent_givs verified that the known bounds
7386 of a biv did not overflow or wrap with respect to the extension for
7387 the giv. Therefore, constants need no additional adjustment. */
7388 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7389 return value;
7390
7391 /* Otherwise, we must adjust the value to compensate for the
7392 differing modes of the biv and the giv. */
7393 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7394 }
7395 \f
7396 struct combine_givs_stats
7397 {
7398 int giv_number;
7399 int total_benefit;
7400 };
7401
7402 static int
7403 cmp_combine_givs_stats (xp, yp)
7404 const PTR xp;
7405 const PTR yp;
7406 {
7407 const struct combine_givs_stats * const x =
7408 (const struct combine_givs_stats *) xp;
7409 const struct combine_givs_stats * const y =
7410 (const struct combine_givs_stats *) yp;
7411 int d;
7412 d = y->total_benefit - x->total_benefit;
7413 /* Stabilize the sort. */
7414 if (!d)
7415 d = x->giv_number - y->giv_number;
7416 return d;
7417 }
7418
7419 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7420 any other. If so, point SAME to the giv combined with and set NEW_REG to
7421 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7422 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7423
7424 static void
7425 combine_givs (regs, bl)
7426 struct loop_regs *regs;
7427 struct iv_class *bl;
7428 {
7429 /* Additional benefit to add for being combined multiple times. */
7430 const int extra_benefit = 3;
7431
7432 struct induction *g1, *g2, **giv_array;
7433 int i, j, k, giv_count;
7434 struct combine_givs_stats *stats;
7435 rtx *can_combine;
7436
7437 /* Count givs, because bl->giv_count is incorrect here. */
7438 giv_count = 0;
7439 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7440 if (!g1->ignore)
7441 giv_count++;
7442
7443 giv_array
7444 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7445 i = 0;
7446 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7447 if (!g1->ignore)
7448 giv_array[i++] = g1;
7449
7450 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7451 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7452
7453 for (i = 0; i < giv_count; i++)
7454 {
7455 int this_benefit;
7456 rtx single_use;
7457
7458 g1 = giv_array[i];
7459 stats[i].giv_number = i;
7460
7461 /* If a DEST_REG GIV is used only once, do not allow it to combine
7462 with anything, for in doing so we will gain nothing that cannot
7463 be had by simply letting the GIV with which we would have combined
7464 to be reduced on its own. The losage shows up in particular with
7465 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7466 be seen elsewhere as well. */
7467 if (g1->giv_type == DEST_REG
7468 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7469 && single_use != const0_rtx)
7470 continue;
7471
7472 this_benefit = g1->benefit;
7473 /* Add an additional weight for zero addends. */
7474 if (g1->no_const_addval)
7475 this_benefit += 1;
7476
7477 for (j = 0; j < giv_count; j++)
7478 {
7479 rtx this_combine;
7480
7481 g2 = giv_array[j];
7482 if (g1 != g2
7483 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7484 {
7485 can_combine[i * giv_count + j] = this_combine;
7486 this_benefit += g2->benefit + extra_benefit;
7487 }
7488 }
7489 stats[i].total_benefit = this_benefit;
7490 }
7491
7492 /* Iterate, combining until we can't. */
7493 restart:
7494 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7495
7496 if (loop_dump_stream)
7497 {
7498 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7499 for (k = 0; k < giv_count; k++)
7500 {
7501 g1 = giv_array[stats[k].giv_number];
7502 if (!g1->combined_with && !g1->same)
7503 fprintf (loop_dump_stream, " {%d, %d}",
7504 INSN_UID (giv_array[stats[k].giv_number]->insn),
7505 stats[k].total_benefit);
7506 }
7507 putc ('\n', loop_dump_stream);
7508 }
7509
7510 for (k = 0; k < giv_count; k++)
7511 {
7512 int g1_add_benefit = 0;
7513
7514 i = stats[k].giv_number;
7515 g1 = giv_array[i];
7516
7517 /* If it has already been combined, skip. */
7518 if (g1->combined_with || g1->same)
7519 continue;
7520
7521 for (j = 0; j < giv_count; j++)
7522 {
7523 g2 = giv_array[j];
7524 if (g1 != g2 && can_combine[i * giv_count + j]
7525 /* If it has already been combined, skip. */
7526 && ! g2->same && ! g2->combined_with)
7527 {
7528 int l;
7529
7530 g2->new_reg = can_combine[i * giv_count + j];
7531 g2->same = g1;
7532 /* For destination, we now may replace by mem expression instead
7533 of register. This changes the costs considerably, so add the
7534 compensation. */
7535 if (g2->giv_type == DEST_ADDR)
7536 g2->benefit = (g2->benefit + reg_address_cost
7537 - address_cost (g2->new_reg,
7538 GET_MODE (g2->mem)));
7539 g1->combined_with++;
7540 g1->lifetime += g2->lifetime;
7541
7542 g1_add_benefit += g2->benefit;
7543
7544 /* ??? The new final_[bg]iv_value code does a much better job
7545 of finding replaceable giv's, and hence this code may no
7546 longer be necessary. */
7547 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7548 g1_add_benefit -= copy_cost;
7549
7550 /* To help optimize the next set of combinations, remove
7551 this giv from the benefits of other potential mates. */
7552 for (l = 0; l < giv_count; ++l)
7553 {
7554 int m = stats[l].giv_number;
7555 if (can_combine[m * giv_count + j])
7556 stats[l].total_benefit -= g2->benefit + extra_benefit;
7557 }
7558
7559 if (loop_dump_stream)
7560 fprintf (loop_dump_stream,
7561 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7562 INSN_UID (g2->insn), INSN_UID (g1->insn),
7563 g1->benefit, g1_add_benefit, g1->lifetime);
7564 }
7565 }
7566
7567 /* To help optimize the next set of combinations, remove
7568 this giv from the benefits of other potential mates. */
7569 if (g1->combined_with)
7570 {
7571 for (j = 0; j < giv_count; ++j)
7572 {
7573 int m = stats[j].giv_number;
7574 if (can_combine[m * giv_count + i])
7575 stats[j].total_benefit -= g1->benefit + extra_benefit;
7576 }
7577
7578 g1->benefit += g1_add_benefit;
7579
7580 /* We've finished with this giv, and everything it touched.
7581 Restart the combination so that proper weights for the
7582 rest of the givs are properly taken into account. */
7583 /* ??? Ideally we would compact the arrays at this point, so
7584 as to not cover old ground. But sanely compacting
7585 can_combine is tricky. */
7586 goto restart;
7587 }
7588 }
7589
7590 /* Clean up. */
7591 free (stats);
7592 free (can_combine);
7593 }
7594 \f
7595 /* Generate sequence for REG = B * M + A. */
7596
7597 static rtx
7598 gen_add_mult (b, m, a, reg)
7599 rtx b; /* initial value of basic induction variable */
7600 rtx m; /* multiplicative constant */
7601 rtx a; /* additive constant */
7602 rtx reg; /* destination register */
7603 {
7604 rtx seq;
7605 rtx result;
7606
7607 start_sequence ();
7608 /* Use unsigned arithmetic. */
7609 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7610 if (reg != result)
7611 emit_move_insn (reg, result);
7612 seq = gen_sequence ();
7613 end_sequence ();
7614
7615 return seq;
7616 }
7617
7618
7619 /* Update registers created in insn sequence SEQ. */
7620
7621 static void
7622 loop_regs_update (loop, seq)
7623 const struct loop *loop ATTRIBUTE_UNUSED;
7624 rtx seq;
7625 {
7626 /* Update register info for alias analysis. */
7627
7628 if (GET_CODE (seq) == SEQUENCE)
7629 {
7630 int i;
7631 for (i = 0; i < XVECLEN (seq, 0); ++i)
7632 {
7633 rtx set = single_set (XVECEXP (seq, 0, i));
7634 if (set && GET_CODE (SET_DEST (set)) == REG)
7635 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7636 }
7637 }
7638 else
7639 {
7640 if (GET_CODE (seq) == SET
7641 && GET_CODE (SET_DEST (seq)) == REG)
7642 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7643 }
7644 }
7645
7646
7647 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7648
7649 void
7650 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7651 const struct loop *loop;
7652 rtx b; /* initial value of basic induction variable */
7653 rtx m; /* multiplicative constant */
7654 rtx a; /* additive constant */
7655 rtx reg; /* destination register */
7656 basic_block before_bb;
7657 rtx before_insn;
7658 {
7659 rtx seq;
7660
7661 if (! before_insn)
7662 {
7663 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7664 return;
7665 }
7666
7667 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7668 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7669
7670 /* Increase the lifetime of any invariants moved further in code. */
7671 update_reg_last_use (a, before_insn);
7672 update_reg_last_use (b, before_insn);
7673 update_reg_last_use (m, before_insn);
7674
7675 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7676
7677 /* It is possible that the expansion created lots of new registers.
7678 Iterate over the sequence we just created and record them all. */
7679 loop_regs_update (loop, seq);
7680 }
7681
7682
7683 /* Emit insns in loop pre-header to set REG = B * M + A. */
7684
7685 void
7686 loop_iv_add_mult_sink (loop, b, m, a, reg)
7687 const struct loop *loop;
7688 rtx b; /* initial value of basic induction variable */
7689 rtx m; /* multiplicative constant */
7690 rtx a; /* additive constant */
7691 rtx reg; /* destination register */
7692 {
7693 rtx seq;
7694
7695 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7696 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7697
7698 /* Increase the lifetime of any invariants moved further in code.
7699 ???? Is this really necessary? */
7700 update_reg_last_use (a, loop->sink);
7701 update_reg_last_use (b, loop->sink);
7702 update_reg_last_use (m, loop->sink);
7703
7704 loop_insn_sink (loop, seq);
7705
7706 /* It is possible that the expansion created lots of new registers.
7707 Iterate over the sequence we just created and record them all. */
7708 loop_regs_update (loop, seq);
7709 }
7710
7711
7712 /* Emit insns after loop to set REG = B * M + A. */
7713
7714 void
7715 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7716 const struct loop *loop;
7717 rtx b; /* initial value of basic induction variable */
7718 rtx m; /* multiplicative constant */
7719 rtx a; /* additive constant */
7720 rtx reg; /* destination register */
7721 {
7722 rtx seq;
7723
7724 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7725 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7726
7727 loop_insn_hoist (loop, seq);
7728
7729 /* It is possible that the expansion created lots of new registers.
7730 Iterate over the sequence we just created and record them all. */
7731 loop_regs_update (loop, seq);
7732 }
7733
7734
7735
7736 /* Similar to gen_add_mult, but compute cost rather than generating
7737 sequence. */
7738
7739 static int
7740 iv_add_mult_cost (b, m, a, reg)
7741 rtx b; /* initial value of basic induction variable */
7742 rtx m; /* multiplicative constant */
7743 rtx a; /* additive constant */
7744 rtx reg; /* destination register */
7745 {
7746 int cost = 0;
7747 rtx last, result;
7748
7749 start_sequence ();
7750 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7751 if (reg != result)
7752 emit_move_insn (reg, result);
7753 last = get_last_insn ();
7754 while (last)
7755 {
7756 rtx t = single_set (last);
7757 if (t)
7758 cost += rtx_cost (SET_SRC (t), SET);
7759 last = PREV_INSN (last);
7760 }
7761 end_sequence ();
7762 return cost;
7763 }
7764 \f
7765 /* Test whether A * B can be computed without
7766 an actual multiply insn. Value is 1 if so. */
7767
7768 static int
7769 product_cheap_p (a, b)
7770 rtx a;
7771 rtx b;
7772 {
7773 int i;
7774 rtx tmp;
7775 int win = 1;
7776
7777 /* If only one is constant, make it B. */
7778 if (GET_CODE (a) == CONST_INT)
7779 tmp = a, a = b, b = tmp;
7780
7781 /* If first constant, both constant, so don't need multiply. */
7782 if (GET_CODE (a) == CONST_INT)
7783 return 1;
7784
7785 /* If second not constant, neither is constant, so would need multiply. */
7786 if (GET_CODE (b) != CONST_INT)
7787 return 0;
7788
7789 /* One operand is constant, so might not need multiply insn. Generate the
7790 code for the multiply and see if a call or multiply, or long sequence
7791 of insns is generated. */
7792
7793 start_sequence ();
7794 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7795 tmp = gen_sequence ();
7796 end_sequence ();
7797
7798 if (GET_CODE (tmp) == SEQUENCE)
7799 {
7800 if (XVEC (tmp, 0) == 0)
7801 win = 1;
7802 else if (XVECLEN (tmp, 0) > 3)
7803 win = 0;
7804 else
7805 for (i = 0; i < XVECLEN (tmp, 0); i++)
7806 {
7807 rtx insn = XVECEXP (tmp, 0, i);
7808
7809 if (GET_CODE (insn) != INSN
7810 || (GET_CODE (PATTERN (insn)) == SET
7811 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7812 || (GET_CODE (PATTERN (insn)) == PARALLEL
7813 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7814 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7815 {
7816 win = 0;
7817 break;
7818 }
7819 }
7820 }
7821 else if (GET_CODE (tmp) == SET
7822 && GET_CODE (SET_SRC (tmp)) == MULT)
7823 win = 0;
7824 else if (GET_CODE (tmp) == PARALLEL
7825 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7826 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7827 win = 0;
7828
7829 return win;
7830 }
7831 \f
7832 /* Check to see if loop can be terminated by a "decrement and branch until
7833 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7834 Also try reversing an increment loop to a decrement loop
7835 to see if the optimization can be performed.
7836 Value is nonzero if optimization was performed. */
7837
7838 /* This is useful even if the architecture doesn't have such an insn,
7839 because it might change a loops which increments from 0 to n to a loop
7840 which decrements from n to 0. A loop that decrements to zero is usually
7841 faster than one that increments from zero. */
7842
7843 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7844 such as approx_final_value, biv_total_increment, loop_iterations, and
7845 final_[bg]iv_value. */
7846
7847 static int
7848 check_dbra_loop (loop, insn_count)
7849 struct loop *loop;
7850 int insn_count;
7851 {
7852 struct loop_info *loop_info = LOOP_INFO (loop);
7853 struct loop_regs *regs = LOOP_REGS (loop);
7854 struct loop_ivs *ivs = LOOP_IVS (loop);
7855 struct iv_class *bl;
7856 rtx reg;
7857 rtx jump_label;
7858 rtx final_value;
7859 rtx start_value;
7860 rtx new_add_val;
7861 rtx comparison;
7862 rtx before_comparison;
7863 rtx p;
7864 rtx jump;
7865 rtx first_compare;
7866 int compare_and_branch;
7867 rtx loop_start = loop->start;
7868 rtx loop_end = loop->end;
7869
7870 /* If last insn is a conditional branch, and the insn before tests a
7871 register value, try to optimize it. Otherwise, we can't do anything. */
7872
7873 jump = PREV_INSN (loop_end);
7874 comparison = get_condition_for_loop (loop, jump);
7875 if (comparison == 0)
7876 return 0;
7877 if (!onlyjump_p (jump))
7878 return 0;
7879
7880 /* Try to compute whether the compare/branch at the loop end is one or
7881 two instructions. */
7882 get_condition (jump, &first_compare);
7883 if (first_compare == jump)
7884 compare_and_branch = 1;
7885 else if (first_compare == prev_nonnote_insn (jump))
7886 compare_and_branch = 2;
7887 else
7888 return 0;
7889
7890 {
7891 /* If more than one condition is present to control the loop, then
7892 do not proceed, as this function does not know how to rewrite
7893 loop tests with more than one condition.
7894
7895 Look backwards from the first insn in the last comparison
7896 sequence and see if we've got another comparison sequence. */
7897
7898 rtx jump1;
7899 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7900 if (GET_CODE (jump1) == JUMP_INSN)
7901 return 0;
7902 }
7903
7904 /* Check all of the bivs to see if the compare uses one of them.
7905 Skip biv's set more than once because we can't guarantee that
7906 it will be zero on the last iteration. Also skip if the biv is
7907 used between its update and the test insn. */
7908
7909 for (bl = ivs->list; bl; bl = bl->next)
7910 {
7911 if (bl->biv_count == 1
7912 && ! bl->biv->maybe_multiple
7913 && bl->biv->dest_reg == XEXP (comparison, 0)
7914 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7915 first_compare))
7916 break;
7917 }
7918
7919 if (! bl)
7920 return 0;
7921
7922 /* Look for the case where the basic induction variable is always
7923 nonnegative, and equals zero on the last iteration.
7924 In this case, add a reg_note REG_NONNEG, which allows the
7925 m68k DBRA instruction to be used. */
7926
7927 if (((GET_CODE (comparison) == GT
7928 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7929 && INTVAL (XEXP (comparison, 1)) == -1)
7930 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7931 && GET_CODE (bl->biv->add_val) == CONST_INT
7932 && INTVAL (bl->biv->add_val) < 0)
7933 {
7934 /* Initial value must be greater than 0,
7935 init_val % -dec_value == 0 to ensure that it equals zero on
7936 the last iteration */
7937
7938 if (GET_CODE (bl->initial_value) == CONST_INT
7939 && INTVAL (bl->initial_value) > 0
7940 && (INTVAL (bl->initial_value)
7941 % (-INTVAL (bl->biv->add_val))) == 0)
7942 {
7943 /* register always nonnegative, add REG_NOTE to branch */
7944 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7945 REG_NOTES (jump)
7946 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7947 REG_NOTES (jump));
7948 bl->nonneg = 1;
7949
7950 return 1;
7951 }
7952
7953 /* If the decrement is 1 and the value was tested as >= 0 before
7954 the loop, then we can safely optimize. */
7955 for (p = loop_start; p; p = PREV_INSN (p))
7956 {
7957 if (GET_CODE (p) == CODE_LABEL)
7958 break;
7959 if (GET_CODE (p) != JUMP_INSN)
7960 continue;
7961
7962 before_comparison = get_condition_for_loop (loop, p);
7963 if (before_comparison
7964 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7965 && GET_CODE (before_comparison) == LT
7966 && XEXP (before_comparison, 1) == const0_rtx
7967 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7968 && INTVAL (bl->biv->add_val) == -1)
7969 {
7970 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7971 REG_NOTES (jump)
7972 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7973 REG_NOTES (jump));
7974 bl->nonneg = 1;
7975
7976 return 1;
7977 }
7978 }
7979 }
7980 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7981 && INTVAL (bl->biv->add_val) > 0)
7982 {
7983 /* Try to change inc to dec, so can apply above optimization. */
7984 /* Can do this if:
7985 all registers modified are induction variables or invariant,
7986 all memory references have non-overlapping addresses
7987 (obviously true if only one write)
7988 allow 2 insns for the compare/jump at the end of the loop. */
7989 /* Also, we must avoid any instructions which use both the reversed
7990 biv and another biv. Such instructions will fail if the loop is
7991 reversed. We meet this condition by requiring that either
7992 no_use_except_counting is true, or else that there is only
7993 one biv. */
7994 int num_nonfixed_reads = 0;
7995 /* 1 if the iteration var is used only to count iterations. */
7996 int no_use_except_counting = 0;
7997 /* 1 if the loop has no memory store, or it has a single memory store
7998 which is reversible. */
7999 int reversible_mem_store = 1;
8000
8001 if (bl->giv_count == 0
8002 && !loop->exit_count
8003 && !loop_info->has_multiple_exit_targets)
8004 {
8005 rtx bivreg = regno_reg_rtx[bl->regno];
8006 struct iv_class *blt;
8007
8008 /* If there are no givs for this biv, and the only exit is the
8009 fall through at the end of the loop, then
8010 see if perhaps there are no uses except to count. */
8011 no_use_except_counting = 1;
8012 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8013 if (INSN_P (p))
8014 {
8015 rtx set = single_set (p);
8016
8017 if (set && GET_CODE (SET_DEST (set)) == REG
8018 && REGNO (SET_DEST (set)) == bl->regno)
8019 /* An insn that sets the biv is okay. */
8020 ;
8021 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8022 || p == prev_nonnote_insn (loop_end))
8023 && reg_mentioned_p (bivreg, PATTERN (p)))
8024 {
8025 /* If either of these insns uses the biv and sets a pseudo
8026 that has more than one usage, then the biv has uses
8027 other than counting since it's used to derive a value
8028 that is used more than one time. */
8029 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8030 regs);
8031 if (regs->multiple_uses)
8032 {
8033 no_use_except_counting = 0;
8034 break;
8035 }
8036 }
8037 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8038 {
8039 no_use_except_counting = 0;
8040 break;
8041 }
8042 }
8043
8044 /* A biv has uses besides counting if it is used to set
8045 another biv. */
8046 for (blt = ivs->list; blt; blt = blt->next)
8047 if (blt->init_set
8048 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8049 {
8050 no_use_except_counting = 0;
8051 break;
8052 }
8053 }
8054
8055 if (no_use_except_counting)
8056 /* No need to worry about MEMs. */
8057 ;
8058 else if (loop_info->num_mem_sets <= 1)
8059 {
8060 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8061 if (INSN_P (p))
8062 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8063
8064 /* If the loop has a single store, and the destination address is
8065 invariant, then we can't reverse the loop, because this address
8066 might then have the wrong value at loop exit.
8067 This would work if the source was invariant also, however, in that
8068 case, the insn should have been moved out of the loop. */
8069
8070 if (loop_info->num_mem_sets == 1)
8071 {
8072 struct induction *v;
8073
8074 /* If we could prove that each of the memory locations
8075 written to was different, then we could reverse the
8076 store -- but we don't presently have any way of
8077 knowing that. */
8078 reversible_mem_store = 0;
8079
8080 /* If the store depends on a register that is set after the
8081 store, it depends on the initial value, and is thus not
8082 reversible. */
8083 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8084 {
8085 if (v->giv_type == DEST_REG
8086 && reg_mentioned_p (v->dest_reg,
8087 PATTERN (loop_info->first_loop_store_insn))
8088 && loop_insn_first_p (loop_info->first_loop_store_insn,
8089 v->insn))
8090 reversible_mem_store = 0;
8091 }
8092 }
8093 }
8094 else
8095 return 0;
8096
8097 /* This code only acts for innermost loops. Also it simplifies
8098 the memory address check by only reversing loops with
8099 zero or one memory access.
8100 Two memory accesses could involve parts of the same array,
8101 and that can't be reversed.
8102 If the biv is used only for counting, than we don't need to worry
8103 about all these things. */
8104
8105 if ((num_nonfixed_reads <= 1
8106 && ! loop_info->has_nonconst_call
8107 && ! loop_info->has_volatile
8108 && reversible_mem_store
8109 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8110 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8111 && (bl == ivs->list && bl->next == 0))
8112 || no_use_except_counting)
8113 {
8114 rtx tem;
8115
8116 /* Loop can be reversed. */
8117 if (loop_dump_stream)
8118 fprintf (loop_dump_stream, "Can reverse loop\n");
8119
8120 /* Now check other conditions:
8121
8122 The increment must be a constant, as must the initial value,
8123 and the comparison code must be LT.
8124
8125 This test can probably be improved since +/- 1 in the constant
8126 can be obtained by changing LT to LE and vice versa; this is
8127 confusing. */
8128
8129 if (comparison
8130 /* for constants, LE gets turned into LT */
8131 && (GET_CODE (comparison) == LT
8132 || (GET_CODE (comparison) == LE
8133 && no_use_except_counting)))
8134 {
8135 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8136 rtx initial_value, comparison_value;
8137 int nonneg = 0;
8138 enum rtx_code cmp_code;
8139 int comparison_const_width;
8140 unsigned HOST_WIDE_INT comparison_sign_mask;
8141
8142 add_val = INTVAL (bl->biv->add_val);
8143 comparison_value = XEXP (comparison, 1);
8144 if (GET_MODE (comparison_value) == VOIDmode)
8145 comparison_const_width
8146 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8147 else
8148 comparison_const_width
8149 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8150 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8151 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8152 comparison_sign_mask
8153 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8154
8155 /* If the comparison value is not a loop invariant, then we
8156 can not reverse this loop.
8157
8158 ??? If the insns which initialize the comparison value as
8159 a whole compute an invariant result, then we could move
8160 them out of the loop and proceed with loop reversal. */
8161 if (! loop_invariant_p (loop, comparison_value))
8162 return 0;
8163
8164 if (GET_CODE (comparison_value) == CONST_INT)
8165 comparison_val = INTVAL (comparison_value);
8166 initial_value = bl->initial_value;
8167
8168 /* Normalize the initial value if it is an integer and
8169 has no other use except as a counter. This will allow
8170 a few more loops to be reversed. */
8171 if (no_use_except_counting
8172 && GET_CODE (comparison_value) == CONST_INT
8173 && GET_CODE (initial_value) == CONST_INT)
8174 {
8175 comparison_val = comparison_val - INTVAL (bl->initial_value);
8176 /* The code below requires comparison_val to be a multiple
8177 of add_val in order to do the loop reversal, so
8178 round up comparison_val to a multiple of add_val.
8179 Since comparison_value is constant, we know that the
8180 current comparison code is LT. */
8181 comparison_val = comparison_val + add_val - 1;
8182 comparison_val
8183 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8184 /* We postpone overflow checks for COMPARISON_VAL here;
8185 even if there is an overflow, we might still be able to
8186 reverse the loop, if converting the loop exit test to
8187 NE is possible. */
8188 initial_value = const0_rtx;
8189 }
8190
8191 /* First check if we can do a vanilla loop reversal. */
8192 if (initial_value == const0_rtx
8193 /* If we have a decrement_and_branch_on_count,
8194 prefer the NE test, since this will allow that
8195 instruction to be generated. Note that we must
8196 use a vanilla loop reversal if the biv is used to
8197 calculate a giv or has a non-counting use. */
8198 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8199 && defined (HAVE_decrement_and_branch_on_count)
8200 && (! (add_val == 1 && loop->vtop
8201 && (bl->biv_count == 0
8202 || no_use_except_counting)))
8203 #endif
8204 && GET_CODE (comparison_value) == CONST_INT
8205 /* Now do postponed overflow checks on COMPARISON_VAL. */
8206 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8207 & comparison_sign_mask))
8208 {
8209 /* Register will always be nonnegative, with value
8210 0 on last iteration */
8211 add_adjust = add_val;
8212 nonneg = 1;
8213 cmp_code = GE;
8214 }
8215 else if (add_val == 1 && loop->vtop
8216 && (bl->biv_count == 0
8217 || no_use_except_counting))
8218 {
8219 add_adjust = 0;
8220 cmp_code = NE;
8221 }
8222 else
8223 return 0;
8224
8225 if (GET_CODE (comparison) == LE)
8226 add_adjust -= add_val;
8227
8228 /* If the initial value is not zero, or if the comparison
8229 value is not an exact multiple of the increment, then we
8230 can not reverse this loop. */
8231 if (initial_value == const0_rtx
8232 && GET_CODE (comparison_value) == CONST_INT)
8233 {
8234 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8235 return 0;
8236 }
8237 else
8238 {
8239 if (! no_use_except_counting || add_val != 1)
8240 return 0;
8241 }
8242
8243 final_value = comparison_value;
8244
8245 /* Reset these in case we normalized the initial value
8246 and comparison value above. */
8247 if (GET_CODE (comparison_value) == CONST_INT
8248 && GET_CODE (initial_value) == CONST_INT)
8249 {
8250 comparison_value = GEN_INT (comparison_val);
8251 final_value
8252 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8253 }
8254 bl->initial_value = initial_value;
8255
8256 /* Save some info needed to produce the new insns. */
8257 reg = bl->biv->dest_reg;
8258 jump_label = condjump_label (PREV_INSN (loop_end));
8259 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8260
8261 /* Set start_value; if this is not a CONST_INT, we need
8262 to generate a SUB.
8263 Initialize biv to start_value before loop start.
8264 The old initializing insn will be deleted as a
8265 dead store by flow.c. */
8266 if (initial_value == const0_rtx
8267 && GET_CODE (comparison_value) == CONST_INT)
8268 {
8269 start_value = GEN_INT (comparison_val - add_adjust);
8270 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8271 }
8272 else if (GET_CODE (initial_value) == CONST_INT)
8273 {
8274 enum machine_mode mode = GET_MODE (reg);
8275 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8276 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8277
8278 if (add_insn == 0)
8279 return 0;
8280
8281 start_value
8282 = gen_rtx_PLUS (mode, comparison_value, offset);
8283 loop_insn_hoist (loop, add_insn);
8284 if (GET_CODE (comparison) == LE)
8285 final_value = gen_rtx_PLUS (mode, comparison_value,
8286 GEN_INT (add_val));
8287 }
8288 else if (! add_adjust)
8289 {
8290 enum machine_mode mode = GET_MODE (reg);
8291 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8292 initial_value);
8293
8294 if (sub_insn == 0)
8295 return 0;
8296 start_value
8297 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8298 loop_insn_hoist (loop, sub_insn);
8299 }
8300 else
8301 /* We could handle the other cases too, but it'll be
8302 better to have a testcase first. */
8303 return 0;
8304
8305 /* We may not have a single insn which can increment a reg, so
8306 create a sequence to hold all the insns from expand_inc. */
8307 start_sequence ();
8308 expand_inc (reg, new_add_val);
8309 tem = gen_sequence ();
8310 end_sequence ();
8311
8312 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8313 delete_insn (bl->biv->insn);
8314
8315 /* Update biv info to reflect its new status. */
8316 bl->biv->insn = p;
8317 bl->initial_value = start_value;
8318 bl->biv->add_val = new_add_val;
8319
8320 /* Update loop info. */
8321 loop_info->initial_value = reg;
8322 loop_info->initial_equiv_value = reg;
8323 loop_info->final_value = const0_rtx;
8324 loop_info->final_equiv_value = const0_rtx;
8325 loop_info->comparison_value = const0_rtx;
8326 loop_info->comparison_code = cmp_code;
8327 loop_info->increment = new_add_val;
8328
8329 /* Inc LABEL_NUSES so that delete_insn will
8330 not delete the label. */
8331 LABEL_NUSES (XEXP (jump_label, 0))++;
8332
8333 /* Emit an insn after the end of the loop to set the biv's
8334 proper exit value if it is used anywhere outside the loop. */
8335 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8336 || ! bl->init_insn
8337 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8338 loop_insn_sink (loop, gen_move_insn (reg, final_value));
8339
8340 /* Delete compare/branch at end of loop. */
8341 delete_related_insns (PREV_INSN (loop_end));
8342 if (compare_and_branch == 2)
8343 delete_related_insns (first_compare);
8344
8345 /* Add new compare/branch insn at end of loop. */
8346 start_sequence ();
8347 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8348 GET_MODE (reg), 0,
8349 XEXP (jump_label, 0));
8350 tem = gen_sequence ();
8351 end_sequence ();
8352 emit_jump_insn_before (tem, loop_end);
8353
8354 for (tem = PREV_INSN (loop_end);
8355 tem && GET_CODE (tem) != JUMP_INSN;
8356 tem = PREV_INSN (tem))
8357 ;
8358
8359 if (tem)
8360 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8361
8362 if (nonneg)
8363 {
8364 if (tem)
8365 {
8366 /* Increment of LABEL_NUSES done above. */
8367 /* Register is now always nonnegative,
8368 so add REG_NONNEG note to the branch. */
8369 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8370 REG_NOTES (tem));
8371 }
8372 bl->nonneg = 1;
8373 }
8374
8375 /* No insn may reference both the reversed and another biv or it
8376 will fail (see comment near the top of the loop reversal
8377 code).
8378 Earlier on, we have verified that the biv has no use except
8379 counting, or it is the only biv in this function.
8380 However, the code that computes no_use_except_counting does
8381 not verify reg notes. It's possible to have an insn that
8382 references another biv, and has a REG_EQUAL note with an
8383 expression based on the reversed biv. To avoid this case,
8384 remove all REG_EQUAL notes based on the reversed biv
8385 here. */
8386 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8387 if (INSN_P (p))
8388 {
8389 rtx *pnote;
8390 rtx set = single_set (p);
8391 /* If this is a set of a GIV based on the reversed biv, any
8392 REG_EQUAL notes should still be correct. */
8393 if (! set
8394 || GET_CODE (SET_DEST (set)) != REG
8395 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8396 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8397 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8398 for (pnote = &REG_NOTES (p); *pnote;)
8399 {
8400 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8401 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8402 XEXP (*pnote, 0)))
8403 *pnote = XEXP (*pnote, 1);
8404 else
8405 pnote = &XEXP (*pnote, 1);
8406 }
8407 }
8408
8409 /* Mark that this biv has been reversed. Each giv which depends
8410 on this biv, and which is also live past the end of the loop
8411 will have to be fixed up. */
8412
8413 bl->reversed = 1;
8414
8415 if (loop_dump_stream)
8416 {
8417 fprintf (loop_dump_stream, "Reversed loop");
8418 if (bl->nonneg)
8419 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8420 else
8421 fprintf (loop_dump_stream, "\n");
8422 }
8423
8424 return 1;
8425 }
8426 }
8427 }
8428
8429 return 0;
8430 }
8431 \f
8432 /* Verify whether the biv BL appears to be eliminable,
8433 based on the insns in the loop that refer to it.
8434
8435 If ELIMINATE_P is non-zero, actually do the elimination.
8436
8437 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8438 determine whether invariant insns should be placed inside or at the
8439 start of the loop. */
8440
8441 static int
8442 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8443 const struct loop *loop;
8444 struct iv_class *bl;
8445 int eliminate_p;
8446 int threshold, insn_count;
8447 {
8448 struct loop_ivs *ivs = LOOP_IVS (loop);
8449 rtx reg = bl->biv->dest_reg;
8450 rtx p;
8451
8452 /* Scan all insns in the loop, stopping if we find one that uses the
8453 biv in a way that we cannot eliminate. */
8454
8455 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8456 {
8457 enum rtx_code code = GET_CODE (p);
8458 basic_block where_bb = 0;
8459 rtx where_insn = threshold >= insn_count ? 0 : p;
8460
8461 /* If this is a libcall that sets a giv, skip ahead to its end. */
8462 if (GET_RTX_CLASS (code) == 'i')
8463 {
8464 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8465
8466 if (note)
8467 {
8468 rtx last = XEXP (note, 0);
8469 rtx set = single_set (last);
8470
8471 if (set && GET_CODE (SET_DEST (set)) == REG)
8472 {
8473 unsigned int regno = REGNO (SET_DEST (set));
8474
8475 if (regno < ivs->n_regs
8476 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8477 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8478 p = last;
8479 }
8480 }
8481 }
8482 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8483 && reg_mentioned_p (reg, PATTERN (p))
8484 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8485 eliminate_p, where_bb, where_insn))
8486 {
8487 if (loop_dump_stream)
8488 fprintf (loop_dump_stream,
8489 "Cannot eliminate biv %d: biv used in insn %d.\n",
8490 bl->regno, INSN_UID (p));
8491 break;
8492 }
8493 }
8494
8495 if (p == loop->end)
8496 {
8497 if (loop_dump_stream)
8498 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8499 bl->regno, eliminate_p ? "was" : "can be");
8500 return 1;
8501 }
8502
8503 return 0;
8504 }
8505 \f
8506 /* INSN and REFERENCE are instructions in the same insn chain.
8507 Return non-zero if INSN is first. */
8508
8509 int
8510 loop_insn_first_p (insn, reference)
8511 rtx insn, reference;
8512 {
8513 rtx p, q;
8514
8515 for (p = insn, q = reference;;)
8516 {
8517 /* Start with test for not first so that INSN == REFERENCE yields not
8518 first. */
8519 if (q == insn || ! p)
8520 return 0;
8521 if (p == reference || ! q)
8522 return 1;
8523
8524 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8525 previous insn, hence the <= comparison below does not work if
8526 P is a note. */
8527 if (INSN_UID (p) < max_uid_for_loop
8528 && INSN_UID (q) < max_uid_for_loop
8529 && GET_CODE (p) != NOTE)
8530 return INSN_LUID (p) <= INSN_LUID (q);
8531
8532 if (INSN_UID (p) >= max_uid_for_loop
8533 || GET_CODE (p) == NOTE)
8534 p = NEXT_INSN (p);
8535 if (INSN_UID (q) >= max_uid_for_loop)
8536 q = NEXT_INSN (q);
8537 }
8538 }
8539
8540 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8541 the offset that we have to take into account due to auto-increment /
8542 div derivation is zero. */
8543 static int
8544 biv_elimination_giv_has_0_offset (biv, giv, insn)
8545 struct induction *biv, *giv;
8546 rtx insn;
8547 {
8548 /* If the giv V had the auto-inc address optimization applied
8549 to it, and INSN occurs between the giv insn and the biv
8550 insn, then we'd have to adjust the value used here.
8551 This is rare, so we don't bother to make this possible. */
8552 if (giv->auto_inc_opt
8553 && ((loop_insn_first_p (giv->insn, insn)
8554 && loop_insn_first_p (insn, biv->insn))
8555 || (loop_insn_first_p (biv->insn, insn)
8556 && loop_insn_first_p (insn, giv->insn))))
8557 return 0;
8558
8559 return 1;
8560 }
8561
8562 /* If BL appears in X (part of the pattern of INSN), see if we can
8563 eliminate its use. If so, return 1. If not, return 0.
8564
8565 If BIV does not appear in X, return 1.
8566
8567 If ELIMINATE_P is non-zero, actually do the elimination.
8568 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8569 Depending on how many items have been moved out of the loop, it
8570 will either be before INSN (when WHERE_INSN is non-zero) or at the
8571 start of the loop (when WHERE_INSN is zero). */
8572
8573 static int
8574 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8575 const struct loop *loop;
8576 rtx x, insn;
8577 struct iv_class *bl;
8578 int eliminate_p;
8579 basic_block where_bb;
8580 rtx where_insn;
8581 {
8582 enum rtx_code code = GET_CODE (x);
8583 rtx reg = bl->biv->dest_reg;
8584 enum machine_mode mode = GET_MODE (reg);
8585 struct induction *v;
8586 rtx arg, tem;
8587 #ifdef HAVE_cc0
8588 rtx new;
8589 #endif
8590 int arg_operand;
8591 const char *fmt;
8592 int i, j;
8593
8594 switch (code)
8595 {
8596 case REG:
8597 /* If we haven't already been able to do something with this BIV,
8598 we can't eliminate it. */
8599 if (x == reg)
8600 return 0;
8601 return 1;
8602
8603 case SET:
8604 /* If this sets the BIV, it is not a problem. */
8605 if (SET_DEST (x) == reg)
8606 return 1;
8607
8608 /* If this is an insn that defines a giv, it is also ok because
8609 it will go away when the giv is reduced. */
8610 for (v = bl->giv; v; v = v->next_iv)
8611 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8612 return 1;
8613
8614 #ifdef HAVE_cc0
8615 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8616 {
8617 /* Can replace with any giv that was reduced and
8618 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8619 Require a constant for MULT_VAL, so we know it's nonzero.
8620 ??? We disable this optimization to avoid potential
8621 overflows. */
8622
8623 for (v = bl->giv; v; v = v->next_iv)
8624 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8625 && v->add_val == const0_rtx
8626 && ! v->ignore && ! v->maybe_dead && v->always_computable
8627 && v->mode == mode
8628 && 0)
8629 {
8630 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8631 continue;
8632
8633 if (! eliminate_p)
8634 return 1;
8635
8636 /* If the giv has the opposite direction of change,
8637 then reverse the comparison. */
8638 if (INTVAL (v->mult_val) < 0)
8639 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8640 const0_rtx, v->new_reg);
8641 else
8642 new = v->new_reg;
8643
8644 /* We can probably test that giv's reduced reg. */
8645 if (validate_change (insn, &SET_SRC (x), new, 0))
8646 return 1;
8647 }
8648
8649 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8650 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8651 Require a constant for MULT_VAL, so we know it's nonzero.
8652 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8653 overflow problem. */
8654
8655 for (v = bl->giv; v; v = v->next_iv)
8656 if (GET_CODE (v->mult_val) == CONST_INT
8657 && v->mult_val != const0_rtx
8658 && ! v->ignore && ! v->maybe_dead && v->always_computable
8659 && v->mode == mode
8660 && (GET_CODE (v->add_val) == SYMBOL_REF
8661 || GET_CODE (v->add_val) == LABEL_REF
8662 || GET_CODE (v->add_val) == CONST
8663 || (GET_CODE (v->add_val) == REG
8664 && REG_POINTER (v->add_val))))
8665 {
8666 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8667 continue;
8668
8669 if (! eliminate_p)
8670 return 1;
8671
8672 /* If the giv has the opposite direction of change,
8673 then reverse the comparison. */
8674 if (INTVAL (v->mult_val) < 0)
8675 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8676 v->new_reg);
8677 else
8678 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8679 copy_rtx (v->add_val));
8680
8681 /* Replace biv with the giv's reduced register. */
8682 update_reg_last_use (v->add_val, insn);
8683 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8684 return 1;
8685
8686 /* Insn doesn't support that constant or invariant. Copy it
8687 into a register (it will be a loop invariant.) */
8688 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8689
8690 loop_insn_emit_before (loop, 0, where_insn,
8691 gen_move_insn (tem,
8692 copy_rtx (v->add_val)));
8693
8694 /* Substitute the new register for its invariant value in
8695 the compare expression. */
8696 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8697 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8698 return 1;
8699 }
8700 }
8701 #endif
8702 break;
8703
8704 case COMPARE:
8705 case EQ: case NE:
8706 case GT: case GE: case GTU: case GEU:
8707 case LT: case LE: case LTU: case LEU:
8708 /* See if either argument is the biv. */
8709 if (XEXP (x, 0) == reg)
8710 arg = XEXP (x, 1), arg_operand = 1;
8711 else if (XEXP (x, 1) == reg)
8712 arg = XEXP (x, 0), arg_operand = 0;
8713 else
8714 break;
8715
8716 if (CONSTANT_P (arg))
8717 {
8718 /* First try to replace with any giv that has constant positive
8719 mult_val and constant add_val. We might be able to support
8720 negative mult_val, but it seems complex to do it in general. */
8721
8722 for (v = bl->giv; v; v = v->next_iv)
8723 if (GET_CODE (v->mult_val) == CONST_INT
8724 && INTVAL (v->mult_val) > 0
8725 && (GET_CODE (v->add_val) == SYMBOL_REF
8726 || GET_CODE (v->add_val) == LABEL_REF
8727 || GET_CODE (v->add_val) == CONST
8728 || (GET_CODE (v->add_val) == REG
8729 && REG_POINTER (v->add_val)))
8730 && ! v->ignore && ! v->maybe_dead && v->always_computable
8731 && v->mode == mode)
8732 {
8733 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8734 continue;
8735
8736 if (! eliminate_p)
8737 return 1;
8738
8739 /* Replace biv with the giv's reduced reg. */
8740 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8741
8742 /* If all constants are actually constant integers and
8743 the derived constant can be directly placed in the COMPARE,
8744 do so. */
8745 if (GET_CODE (arg) == CONST_INT
8746 && GET_CODE (v->mult_val) == CONST_INT
8747 && GET_CODE (v->add_val) == CONST_INT)
8748 {
8749 validate_change (insn, &XEXP (x, arg_operand),
8750 GEN_INT (INTVAL (arg)
8751 * INTVAL (v->mult_val)
8752 + INTVAL (v->add_val)), 1);
8753 }
8754 else
8755 {
8756 /* Otherwise, load it into a register. */
8757 tem = gen_reg_rtx (mode);
8758 loop_iv_add_mult_emit_before (loop, arg,
8759 v->mult_val, v->add_val,
8760 tem, where_bb, where_insn);
8761 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8762 }
8763 if (apply_change_group ())
8764 return 1;
8765 }
8766
8767 /* Look for giv with positive constant mult_val and nonconst add_val.
8768 Insert insns to calculate new compare value.
8769 ??? Turn this off due to possible overflow. */
8770
8771 for (v = bl->giv; v; v = v->next_iv)
8772 if (GET_CODE (v->mult_val) == CONST_INT
8773 && INTVAL (v->mult_val) > 0
8774 && ! v->ignore && ! v->maybe_dead && v->always_computable
8775 && v->mode == mode
8776 && 0)
8777 {
8778 rtx tem;
8779
8780 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8781 continue;
8782
8783 if (! eliminate_p)
8784 return 1;
8785
8786 tem = gen_reg_rtx (mode);
8787
8788 /* Replace biv with giv's reduced register. */
8789 validate_change (insn, &XEXP (x, 1 - arg_operand),
8790 v->new_reg, 1);
8791
8792 /* Compute value to compare against. */
8793 loop_iv_add_mult_emit_before (loop, arg,
8794 v->mult_val, v->add_val,
8795 tem, where_bb, where_insn);
8796 /* Use it in this insn. */
8797 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8798 if (apply_change_group ())
8799 return 1;
8800 }
8801 }
8802 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8803 {
8804 if (loop_invariant_p (loop, arg) == 1)
8805 {
8806 /* Look for giv with constant positive mult_val and nonconst
8807 add_val. Insert insns to compute new compare value.
8808 ??? Turn this off due to possible overflow. */
8809
8810 for (v = bl->giv; v; v = v->next_iv)
8811 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8812 && ! v->ignore && ! v->maybe_dead && v->always_computable
8813 && v->mode == mode
8814 && 0)
8815 {
8816 rtx tem;
8817
8818 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8819 continue;
8820
8821 if (! eliminate_p)
8822 return 1;
8823
8824 tem = gen_reg_rtx (mode);
8825
8826 /* Replace biv with giv's reduced register. */
8827 validate_change (insn, &XEXP (x, 1 - arg_operand),
8828 v->new_reg, 1);
8829
8830 /* Compute value to compare against. */
8831 loop_iv_add_mult_emit_before (loop, arg,
8832 v->mult_val, v->add_val,
8833 tem, where_bb, where_insn);
8834 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8835 if (apply_change_group ())
8836 return 1;
8837 }
8838 }
8839
8840 /* This code has problems. Basically, you can't know when
8841 seeing if we will eliminate BL, whether a particular giv
8842 of ARG will be reduced. If it isn't going to be reduced,
8843 we can't eliminate BL. We can try forcing it to be reduced,
8844 but that can generate poor code.
8845
8846 The problem is that the benefit of reducing TV, below should
8847 be increased if BL can actually be eliminated, but this means
8848 we might have to do a topological sort of the order in which
8849 we try to process biv. It doesn't seem worthwhile to do
8850 this sort of thing now. */
8851
8852 #if 0
8853 /* Otherwise the reg compared with had better be a biv. */
8854 if (GET_CODE (arg) != REG
8855 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8856 return 0;
8857
8858 /* Look for a pair of givs, one for each biv,
8859 with identical coefficients. */
8860 for (v = bl->giv; v; v = v->next_iv)
8861 {
8862 struct induction *tv;
8863
8864 if (v->ignore || v->maybe_dead || v->mode != mode)
8865 continue;
8866
8867 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8868 tv = tv->next_iv)
8869 if (! tv->ignore && ! tv->maybe_dead
8870 && rtx_equal_p (tv->mult_val, v->mult_val)
8871 && rtx_equal_p (tv->add_val, v->add_val)
8872 && tv->mode == mode)
8873 {
8874 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8875 continue;
8876
8877 if (! eliminate_p)
8878 return 1;
8879
8880 /* Replace biv with its giv's reduced reg. */
8881 XEXP (x, 1 - arg_operand) = v->new_reg;
8882 /* Replace other operand with the other giv's
8883 reduced reg. */
8884 XEXP (x, arg_operand) = tv->new_reg;
8885 return 1;
8886 }
8887 }
8888 #endif
8889 }
8890
8891 /* If we get here, the biv can't be eliminated. */
8892 return 0;
8893
8894 case MEM:
8895 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8896 biv is used in it, since it will be replaced. */
8897 for (v = bl->giv; v; v = v->next_iv)
8898 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8899 return 1;
8900 break;
8901
8902 default:
8903 break;
8904 }
8905
8906 /* See if any subexpression fails elimination. */
8907 fmt = GET_RTX_FORMAT (code);
8908 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8909 {
8910 switch (fmt[i])
8911 {
8912 case 'e':
8913 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8914 eliminate_p, where_bb, where_insn))
8915 return 0;
8916 break;
8917
8918 case 'E':
8919 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8920 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8921 eliminate_p, where_bb, where_insn))
8922 return 0;
8923 break;
8924 }
8925 }
8926
8927 return 1;
8928 }
8929 \f
8930 /* Return nonzero if the last use of REG
8931 is in an insn following INSN in the same basic block. */
8932
8933 static int
8934 last_use_this_basic_block (reg, insn)
8935 rtx reg;
8936 rtx insn;
8937 {
8938 rtx n;
8939 for (n = insn;
8940 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8941 n = NEXT_INSN (n))
8942 {
8943 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8944 return 1;
8945 }
8946 return 0;
8947 }
8948 \f
8949 /* Called via `note_stores' to record the initial value of a biv. Here we
8950 just record the location of the set and process it later. */
8951
8952 static void
8953 record_initial (dest, set, data)
8954 rtx dest;
8955 rtx set;
8956 void *data ATTRIBUTE_UNUSED;
8957 {
8958 struct loop_ivs *ivs = (struct loop_ivs *) data;
8959 struct iv_class *bl;
8960
8961 if (GET_CODE (dest) != REG
8962 || REGNO (dest) >= ivs->n_regs
8963 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8964 return;
8965
8966 bl = REG_IV_CLASS (ivs, REGNO (dest));
8967
8968 /* If this is the first set found, record it. */
8969 if (bl->init_insn == 0)
8970 {
8971 bl->init_insn = note_insn;
8972 bl->init_set = set;
8973 }
8974 }
8975 \f
8976 /* If any of the registers in X are "old" and currently have a last use earlier
8977 than INSN, update them to have a last use of INSN. Their actual last use
8978 will be the previous insn but it will not have a valid uid_luid so we can't
8979 use it. X must be a source expression only. */
8980
8981 static void
8982 update_reg_last_use (x, insn)
8983 rtx x;
8984 rtx insn;
8985 {
8986 /* Check for the case where INSN does not have a valid luid. In this case,
8987 there is no need to modify the regno_last_uid, as this can only happen
8988 when code is inserted after the loop_end to set a pseudo's final value,
8989 and hence this insn will never be the last use of x.
8990 ???? This comment is not correct. See for example loop_givs_reduce.
8991 This may insert an insn before another new insn. */
8992 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8993 && INSN_UID (insn) < max_uid_for_loop
8994 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
8995 {
8996 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8997 }
8998 else
8999 {
9000 int i, j;
9001 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9002 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9003 {
9004 if (fmt[i] == 'e')
9005 update_reg_last_use (XEXP (x, i), insn);
9006 else if (fmt[i] == 'E')
9007 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9008 update_reg_last_use (XVECEXP (x, i, j), insn);
9009 }
9010 }
9011 }
9012 \f
9013 /* Given an insn INSN and condition COND, return the condition in a
9014 canonical form to simplify testing by callers. Specifically:
9015
9016 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9017 (2) Both operands will be machine operands; (cc0) will have been replaced.
9018 (3) If an operand is a constant, it will be the second operand.
9019 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9020 for GE, GEU, and LEU.
9021
9022 If the condition cannot be understood, or is an inequality floating-point
9023 comparison which needs to be reversed, 0 will be returned.
9024
9025 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9026
9027 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9028 insn used in locating the condition was found. If a replacement test
9029 of the condition is desired, it should be placed in front of that
9030 insn and we will be sure that the inputs are still valid.
9031
9032 If WANT_REG is non-zero, we wish the condition to be relative to that
9033 register, if possible. Therefore, do not canonicalize the condition
9034 further. */
9035
9036 rtx
9037 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9038 rtx insn;
9039 rtx cond;
9040 int reverse;
9041 rtx *earliest;
9042 rtx want_reg;
9043 {
9044 enum rtx_code code;
9045 rtx prev = insn;
9046 rtx set;
9047 rtx tem;
9048 rtx op0, op1;
9049 int reverse_code = 0;
9050 enum machine_mode mode;
9051
9052 code = GET_CODE (cond);
9053 mode = GET_MODE (cond);
9054 op0 = XEXP (cond, 0);
9055 op1 = XEXP (cond, 1);
9056
9057 if (reverse)
9058 code = reversed_comparison_code (cond, insn);
9059 if (code == UNKNOWN)
9060 return 0;
9061
9062 if (earliest)
9063 *earliest = insn;
9064
9065 /* If we are comparing a register with zero, see if the register is set
9066 in the previous insn to a COMPARE or a comparison operation. Perform
9067 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9068 in cse.c */
9069
9070 while (GET_RTX_CLASS (code) == '<'
9071 && op1 == CONST0_RTX (GET_MODE (op0))
9072 && op0 != want_reg)
9073 {
9074 /* Set non-zero when we find something of interest. */
9075 rtx x = 0;
9076
9077 #ifdef HAVE_cc0
9078 /* If comparison with cc0, import actual comparison from compare
9079 insn. */
9080 if (op0 == cc0_rtx)
9081 {
9082 if ((prev = prev_nonnote_insn (prev)) == 0
9083 || GET_CODE (prev) != INSN
9084 || (set = single_set (prev)) == 0
9085 || SET_DEST (set) != cc0_rtx)
9086 return 0;
9087
9088 op0 = SET_SRC (set);
9089 op1 = CONST0_RTX (GET_MODE (op0));
9090 if (earliest)
9091 *earliest = prev;
9092 }
9093 #endif
9094
9095 /* If this is a COMPARE, pick up the two things being compared. */
9096 if (GET_CODE (op0) == COMPARE)
9097 {
9098 op1 = XEXP (op0, 1);
9099 op0 = XEXP (op0, 0);
9100 continue;
9101 }
9102 else if (GET_CODE (op0) != REG)
9103 break;
9104
9105 /* Go back to the previous insn. Stop if it is not an INSN. We also
9106 stop if it isn't a single set or if it has a REG_INC note because
9107 we don't want to bother dealing with it. */
9108
9109 if ((prev = prev_nonnote_insn (prev)) == 0
9110 || GET_CODE (prev) != INSN
9111 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9112 break;
9113
9114 set = set_of (op0, prev);
9115
9116 if (set
9117 && (GET_CODE (set) != SET
9118 || !rtx_equal_p (SET_DEST (set), op0)))
9119 break;
9120
9121 /* If this is setting OP0, get what it sets it to if it looks
9122 relevant. */
9123 if (set)
9124 {
9125 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9126
9127 /* ??? We may not combine comparisons done in a CCmode with
9128 comparisons not done in a CCmode. This is to aid targets
9129 like Alpha that have an IEEE compliant EQ instruction, and
9130 a non-IEEE compliant BEQ instruction. The use of CCmode is
9131 actually artificial, simply to prevent the combination, but
9132 should not affect other platforms.
9133
9134 However, we must allow VOIDmode comparisons to match either
9135 CCmode or non-CCmode comparison, because some ports have
9136 modeless comparisons inside branch patterns.
9137
9138 ??? This mode check should perhaps look more like the mode check
9139 in simplify_comparison in combine. */
9140
9141 if ((GET_CODE (SET_SRC (set)) == COMPARE
9142 || (((code == NE
9143 || (code == LT
9144 && GET_MODE_CLASS (inner_mode) == MODE_INT
9145 && (GET_MODE_BITSIZE (inner_mode)
9146 <= HOST_BITS_PER_WIDE_INT)
9147 && (STORE_FLAG_VALUE
9148 & ((HOST_WIDE_INT) 1
9149 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9150 #ifdef FLOAT_STORE_FLAG_VALUE
9151 || (code == LT
9152 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9153 && (REAL_VALUE_NEGATIVE
9154 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9155 #endif
9156 ))
9157 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9158 && (((GET_MODE_CLASS (mode) == MODE_CC)
9159 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9160 || mode == VOIDmode || inner_mode == VOIDmode))
9161 x = SET_SRC (set);
9162 else if (((code == EQ
9163 || (code == GE
9164 && (GET_MODE_BITSIZE (inner_mode)
9165 <= HOST_BITS_PER_WIDE_INT)
9166 && GET_MODE_CLASS (inner_mode) == MODE_INT
9167 && (STORE_FLAG_VALUE
9168 & ((HOST_WIDE_INT) 1
9169 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9170 #ifdef FLOAT_STORE_FLAG_VALUE
9171 || (code == GE
9172 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9173 && (REAL_VALUE_NEGATIVE
9174 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9175 #endif
9176 ))
9177 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9178 && (((GET_MODE_CLASS (mode) == MODE_CC)
9179 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9180 || mode == VOIDmode || inner_mode == VOIDmode))
9181
9182 {
9183 reverse_code = 1;
9184 x = SET_SRC (set);
9185 }
9186 else
9187 break;
9188 }
9189
9190 else if (reg_set_p (op0, prev))
9191 /* If this sets OP0, but not directly, we have to give up. */
9192 break;
9193
9194 if (x)
9195 {
9196 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9197 code = GET_CODE (x);
9198 if (reverse_code)
9199 {
9200 code = reversed_comparison_code (x, prev);
9201 if (code == UNKNOWN)
9202 return 0;
9203 reverse_code = 0;
9204 }
9205
9206 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9207 if (earliest)
9208 *earliest = prev;
9209 }
9210 }
9211
9212 /* If constant is first, put it last. */
9213 if (CONSTANT_P (op0))
9214 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9215
9216 /* If OP0 is the result of a comparison, we weren't able to find what
9217 was really being compared, so fail. */
9218 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9219 return 0;
9220
9221 /* Canonicalize any ordered comparison with integers involving equality
9222 if we can do computations in the relevant mode and we do not
9223 overflow. */
9224
9225 if (GET_CODE (op1) == CONST_INT
9226 && GET_MODE (op0) != VOIDmode
9227 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9228 {
9229 HOST_WIDE_INT const_val = INTVAL (op1);
9230 unsigned HOST_WIDE_INT uconst_val = const_val;
9231 unsigned HOST_WIDE_INT max_val
9232 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9233
9234 switch (code)
9235 {
9236 case LE:
9237 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9238 code = LT, op1 = GEN_INT (const_val + 1);
9239 break;
9240
9241 /* When cross-compiling, const_val might be sign-extended from
9242 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9243 case GE:
9244 if ((HOST_WIDE_INT) (const_val & max_val)
9245 != (((HOST_WIDE_INT) 1
9246 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9247 code = GT, op1 = GEN_INT (const_val - 1);
9248 break;
9249
9250 case LEU:
9251 if (uconst_val < max_val)
9252 code = LTU, op1 = GEN_INT (uconst_val + 1);
9253 break;
9254
9255 case GEU:
9256 if (uconst_val != 0)
9257 code = GTU, op1 = GEN_INT (uconst_val - 1);
9258 break;
9259
9260 default:
9261 break;
9262 }
9263 }
9264
9265 #ifdef HAVE_cc0
9266 /* Never return CC0; return zero instead. */
9267 if (op0 == cc0_rtx)
9268 return 0;
9269 #endif
9270
9271 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9272 }
9273
9274 /* Given a jump insn JUMP, return the condition that will cause it to branch
9275 to its JUMP_LABEL. If the condition cannot be understood, or is an
9276 inequality floating-point comparison which needs to be reversed, 0 will
9277 be returned.
9278
9279 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9280 insn used in locating the condition was found. If a replacement test
9281 of the condition is desired, it should be placed in front of that
9282 insn and we will be sure that the inputs are still valid. */
9283
9284 rtx
9285 get_condition (jump, earliest)
9286 rtx jump;
9287 rtx *earliest;
9288 {
9289 rtx cond;
9290 int reverse;
9291 rtx set;
9292
9293 /* If this is not a standard conditional jump, we can't parse it. */
9294 if (GET_CODE (jump) != JUMP_INSN
9295 || ! any_condjump_p (jump))
9296 return 0;
9297 set = pc_set (jump);
9298
9299 cond = XEXP (SET_SRC (set), 0);
9300
9301 /* If this branches to JUMP_LABEL when the condition is false, reverse
9302 the condition. */
9303 reverse
9304 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9305 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9306
9307 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9308 }
9309
9310 /* Similar to above routine, except that we also put an invariant last
9311 unless both operands are invariants. */
9312
9313 rtx
9314 get_condition_for_loop (loop, x)
9315 const struct loop *loop;
9316 rtx x;
9317 {
9318 rtx comparison = get_condition (x, (rtx*) 0);
9319
9320 if (comparison == 0
9321 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9322 || loop_invariant_p (loop, XEXP (comparison, 1)))
9323 return comparison;
9324
9325 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9326 XEXP (comparison, 1), XEXP (comparison, 0));
9327 }
9328
9329 /* Scan the function and determine whether it has indirect (computed) jumps.
9330
9331 This is taken mostly from flow.c; similar code exists elsewhere
9332 in the compiler. It may be useful to put this into rtlanal.c. */
9333 static int
9334 indirect_jump_in_function_p (start)
9335 rtx start;
9336 {
9337 rtx insn;
9338
9339 for (insn = start; insn; insn = NEXT_INSN (insn))
9340 if (computed_jump_p (insn))
9341 return 1;
9342
9343 return 0;
9344 }
9345
9346 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9347 documentation for LOOP_MEMS for the definition of `appropriate'.
9348 This function is called from prescan_loop via for_each_rtx. */
9349
9350 static int
9351 insert_loop_mem (mem, data)
9352 rtx *mem;
9353 void *data ATTRIBUTE_UNUSED;
9354 {
9355 struct loop_info *loop_info = data;
9356 int i;
9357 rtx m = *mem;
9358
9359 if (m == NULL_RTX)
9360 return 0;
9361
9362 switch (GET_CODE (m))
9363 {
9364 case MEM:
9365 break;
9366
9367 case CLOBBER:
9368 /* We're not interested in MEMs that are only clobbered. */
9369 return -1;
9370
9371 case CONST_DOUBLE:
9372 /* We're not interested in the MEM associated with a
9373 CONST_DOUBLE, so there's no need to traverse into this. */
9374 return -1;
9375
9376 case EXPR_LIST:
9377 /* We're not interested in any MEMs that only appear in notes. */
9378 return -1;
9379
9380 default:
9381 /* This is not a MEM. */
9382 return 0;
9383 }
9384
9385 /* See if we've already seen this MEM. */
9386 for (i = 0; i < loop_info->mems_idx; ++i)
9387 if (rtx_equal_p (m, loop_info->mems[i].mem))
9388 {
9389 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9390 /* The modes of the two memory accesses are different. If
9391 this happens, something tricky is going on, and we just
9392 don't optimize accesses to this MEM. */
9393 loop_info->mems[i].optimize = 0;
9394
9395 return 0;
9396 }
9397
9398 /* Resize the array, if necessary. */
9399 if (loop_info->mems_idx == loop_info->mems_allocated)
9400 {
9401 if (loop_info->mems_allocated != 0)
9402 loop_info->mems_allocated *= 2;
9403 else
9404 loop_info->mems_allocated = 32;
9405
9406 loop_info->mems = (loop_mem_info *)
9407 xrealloc (loop_info->mems,
9408 loop_info->mems_allocated * sizeof (loop_mem_info));
9409 }
9410
9411 /* Actually insert the MEM. */
9412 loop_info->mems[loop_info->mems_idx].mem = m;
9413 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9414 because we can't put it in a register. We still store it in the
9415 table, though, so that if we see the same address later, but in a
9416 non-BLK mode, we'll not think we can optimize it at that point. */
9417 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9418 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9419 ++loop_info->mems_idx;
9420
9421 return 0;
9422 }
9423
9424
9425 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9426
9427 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9428 register that is modified by an insn between FROM and TO. If the
9429 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9430 more, stop incrementing it, to avoid overflow.
9431
9432 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9433 register I is used, if it is only used once. Otherwise, it is set
9434 to 0 (for no uses) or const0_rtx for more than one use. This
9435 parameter may be zero, in which case this processing is not done.
9436
9437 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9438 optimize register I. */
9439
9440 static void
9441 loop_regs_scan (loop, extra_size)
9442 const struct loop *loop;
9443 int extra_size;
9444 {
9445 struct loop_regs *regs = LOOP_REGS (loop);
9446 int old_nregs;
9447 /* last_set[n] is nonzero iff reg n has been set in the current
9448 basic block. In that case, it is the insn that last set reg n. */
9449 rtx *last_set;
9450 rtx insn;
9451 int i;
9452
9453 old_nregs = regs->num;
9454 regs->num = max_reg_num ();
9455
9456 /* Grow the regs array if not allocated or too small. */
9457 if (regs->num >= regs->size)
9458 {
9459 regs->size = regs->num + extra_size;
9460
9461 regs->array = (struct loop_reg *)
9462 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9463
9464 /* Zero the new elements. */
9465 memset (regs->array + old_nregs, 0,
9466 (regs->size - old_nregs) * sizeof (*regs->array));
9467 }
9468
9469 /* Clear previously scanned fields but do not clear n_times_set. */
9470 for (i = 0; i < old_nregs; i++)
9471 {
9472 regs->array[i].set_in_loop = 0;
9473 regs->array[i].may_not_optimize = 0;
9474 regs->array[i].single_usage = NULL_RTX;
9475 }
9476
9477 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9478
9479 /* Scan the loop, recording register usage. */
9480 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9481 insn = NEXT_INSN (insn))
9482 {
9483 if (INSN_P (insn))
9484 {
9485 /* Record registers that have exactly one use. */
9486 find_single_use_in_loop (regs, insn, PATTERN (insn));
9487
9488 /* Include uses in REG_EQUAL notes. */
9489 if (REG_NOTES (insn))
9490 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9491
9492 if (GET_CODE (PATTERN (insn)) == SET
9493 || GET_CODE (PATTERN (insn)) == CLOBBER)
9494 count_one_set (regs, insn, PATTERN (insn), last_set);
9495 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9496 {
9497 int i;
9498 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9499 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9500 last_set);
9501 }
9502 }
9503
9504 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9505 memset (last_set, 0, regs->num * sizeof (rtx));
9506 }
9507
9508 /* Invalidate all hard registers clobbered by calls. With one exception:
9509 a call-clobbered PIC register is still function-invariant for our
9510 purposes, since we can hoist any PIC calculations out of the loop.
9511 Thus the call to rtx_varies_p. */
9512 if (LOOP_INFO (loop)->has_call)
9513 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9514 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9515 && rtx_varies_p (gen_rtx_REG (Pmode, i), /*for_alias=*/1))
9516 {
9517 regs->array[i].may_not_optimize = 1;
9518 regs->array[i].set_in_loop = 1;
9519 }
9520
9521 #ifdef AVOID_CCMODE_COPIES
9522 /* Don't try to move insns which set CC registers if we should not
9523 create CCmode register copies. */
9524 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9525 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9526 regs->array[i].may_not_optimize = 1;
9527 #endif
9528
9529 /* Set regs->array[I].n_times_set for the new registers. */
9530 for (i = old_nregs; i < regs->num; i++)
9531 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9532
9533 free (last_set);
9534 }
9535
9536 /* Returns the number of real INSNs in the LOOP. */
9537
9538 static int
9539 count_insns_in_loop (loop)
9540 const struct loop *loop;
9541 {
9542 int count = 0;
9543 rtx insn;
9544
9545 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9546 insn = NEXT_INSN (insn))
9547 if (INSN_P (insn))
9548 ++count;
9549
9550 return count;
9551 }
9552
9553 /* Move MEMs into registers for the duration of the loop. */
9554
9555 static void
9556 load_mems (loop)
9557 const struct loop *loop;
9558 {
9559 struct loop_info *loop_info = LOOP_INFO (loop);
9560 struct loop_regs *regs = LOOP_REGS (loop);
9561 int maybe_never = 0;
9562 int i;
9563 rtx p, prev_ebb_head;
9564 rtx label = NULL_RTX;
9565 rtx end_label;
9566 /* Nonzero if the next instruction may never be executed. */
9567 int next_maybe_never = 0;
9568 unsigned int last_max_reg = max_reg_num ();
9569
9570 if (loop_info->mems_idx == 0)
9571 return;
9572
9573 /* We cannot use next_label here because it skips over normal insns. */
9574 end_label = next_nonnote_insn (loop->end);
9575 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9576 end_label = NULL_RTX;
9577
9578 /* Check to see if it's possible that some instructions in the loop are
9579 never executed. Also check if there is a goto out of the loop other
9580 than right after the end of the loop. */
9581 for (p = next_insn_in_loop (loop, loop->scan_start);
9582 p != NULL_RTX;
9583 p = next_insn_in_loop (loop, p))
9584 {
9585 if (GET_CODE (p) == CODE_LABEL)
9586 maybe_never = 1;
9587 else if (GET_CODE (p) == JUMP_INSN
9588 /* If we enter the loop in the middle, and scan
9589 around to the beginning, don't set maybe_never
9590 for that. This must be an unconditional jump,
9591 otherwise the code at the top of the loop might
9592 never be executed. Unconditional jumps are
9593 followed a by barrier then loop end. */
9594 && ! (GET_CODE (p) == JUMP_INSN
9595 && JUMP_LABEL (p) == loop->top
9596 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9597 && any_uncondjump_p (p)))
9598 {
9599 /* If this is a jump outside of the loop but not right
9600 after the end of the loop, we would have to emit new fixup
9601 sequences for each such label. */
9602 if (/* If we can't tell where control might go when this
9603 JUMP_INSN is executed, we must be conservative. */
9604 !JUMP_LABEL (p)
9605 || (JUMP_LABEL (p) != end_label
9606 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9607 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9608 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9609 return;
9610
9611 if (!any_condjump_p (p))
9612 /* Something complicated. */
9613 maybe_never = 1;
9614 else
9615 /* If there are any more instructions in the loop, they
9616 might not be reached. */
9617 next_maybe_never = 1;
9618 }
9619 else if (next_maybe_never)
9620 maybe_never = 1;
9621 }
9622
9623 /* Find start of the extended basic block that enters the loop. */
9624 for (p = loop->start;
9625 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9626 p = PREV_INSN (p))
9627 ;
9628 prev_ebb_head = p;
9629
9630 cselib_init ();
9631
9632 /* Build table of mems that get set to constant values before the
9633 loop. */
9634 for (; p != loop->start; p = NEXT_INSN (p))
9635 cselib_process_insn (p);
9636
9637 /* Actually move the MEMs. */
9638 for (i = 0; i < loop_info->mems_idx; ++i)
9639 {
9640 regset_head load_copies;
9641 regset_head store_copies;
9642 int written = 0;
9643 rtx reg;
9644 rtx mem = loop_info->mems[i].mem;
9645 rtx mem_list_entry;
9646
9647 if (MEM_VOLATILE_P (mem)
9648 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9649 /* There's no telling whether or not MEM is modified. */
9650 loop_info->mems[i].optimize = 0;
9651
9652 /* Go through the MEMs written to in the loop to see if this
9653 one is aliased by one of them. */
9654 mem_list_entry = loop_info->store_mems;
9655 while (mem_list_entry)
9656 {
9657 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9658 written = 1;
9659 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9660 mem, rtx_varies_p))
9661 {
9662 /* MEM is indeed aliased by this store. */
9663 loop_info->mems[i].optimize = 0;
9664 break;
9665 }
9666 mem_list_entry = XEXP (mem_list_entry, 1);
9667 }
9668
9669 if (flag_float_store && written
9670 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9671 loop_info->mems[i].optimize = 0;
9672
9673 /* If this MEM is written to, we must be sure that there
9674 are no reads from another MEM that aliases this one. */
9675 if (loop_info->mems[i].optimize && written)
9676 {
9677 int j;
9678
9679 for (j = 0; j < loop_info->mems_idx; ++j)
9680 {
9681 if (j == i)
9682 continue;
9683 else if (true_dependence (mem,
9684 VOIDmode,
9685 loop_info->mems[j].mem,
9686 rtx_varies_p))
9687 {
9688 /* It's not safe to hoist loop_info->mems[i] out of
9689 the loop because writes to it might not be
9690 seen by reads from loop_info->mems[j]. */
9691 loop_info->mems[i].optimize = 0;
9692 break;
9693 }
9694 }
9695 }
9696
9697 if (maybe_never && may_trap_p (mem))
9698 /* We can't access the MEM outside the loop; it might
9699 cause a trap that wouldn't have happened otherwise. */
9700 loop_info->mems[i].optimize = 0;
9701
9702 if (!loop_info->mems[i].optimize)
9703 /* We thought we were going to lift this MEM out of the
9704 loop, but later discovered that we could not. */
9705 continue;
9706
9707 INIT_REG_SET (&load_copies);
9708 INIT_REG_SET (&store_copies);
9709
9710 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9711 order to keep scan_loop from moving stores to this MEM
9712 out of the loop just because this REG is neither a
9713 user-variable nor used in the loop test. */
9714 reg = gen_reg_rtx (GET_MODE (mem));
9715 REG_USERVAR_P (reg) = 1;
9716 loop_info->mems[i].reg = reg;
9717
9718 /* Now, replace all references to the MEM with the
9719 corresponding pseudos. */
9720 maybe_never = 0;
9721 for (p = next_insn_in_loop (loop, loop->scan_start);
9722 p != NULL_RTX;
9723 p = next_insn_in_loop (loop, p))
9724 {
9725 if (INSN_P (p))
9726 {
9727 rtx set;
9728
9729 set = single_set (p);
9730
9731 /* See if this copies the mem into a register that isn't
9732 modified afterwards. We'll try to do copy propagation
9733 a little further on. */
9734 if (set
9735 /* @@@ This test is _way_ too conservative. */
9736 && ! maybe_never
9737 && GET_CODE (SET_DEST (set)) == REG
9738 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9739 && REGNO (SET_DEST (set)) < last_max_reg
9740 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9741 && rtx_equal_p (SET_SRC (set), mem))
9742 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9743
9744 /* See if this copies the mem from a register that isn't
9745 modified afterwards. We'll try to remove the
9746 redundant copy later on by doing a little register
9747 renaming and copy propagation. This will help
9748 to untangle things for the BIV detection code. */
9749 if (set
9750 && ! maybe_never
9751 && GET_CODE (SET_SRC (set)) == REG
9752 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9753 && REGNO (SET_SRC (set)) < last_max_reg
9754 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9755 && rtx_equal_p (SET_DEST (set), mem))
9756 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9757
9758 /* Replace the memory reference with the shadow register. */
9759 replace_loop_mems (p, loop_info->mems[i].mem,
9760 loop_info->mems[i].reg);
9761 }
9762
9763 if (GET_CODE (p) == CODE_LABEL
9764 || GET_CODE (p) == JUMP_INSN)
9765 maybe_never = 1;
9766 }
9767
9768 if (! apply_change_group ())
9769 /* We couldn't replace all occurrences of the MEM. */
9770 loop_info->mems[i].optimize = 0;
9771 else
9772 {
9773 /* Load the memory immediately before LOOP->START, which is
9774 the NOTE_LOOP_BEG. */
9775 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9776 rtx set;
9777 rtx best = mem;
9778 int j;
9779 struct elt_loc_list *const_equiv = 0;
9780
9781 if (e)
9782 {
9783 struct elt_loc_list *equiv;
9784 struct elt_loc_list *best_equiv = 0;
9785 for (equiv = e->locs; equiv; equiv = equiv->next)
9786 {
9787 if (CONSTANT_P (equiv->loc))
9788 const_equiv = equiv;
9789 else if (GET_CODE (equiv->loc) == REG
9790 /* Extending hard register lifetimes causes crash
9791 on SRC targets. Doing so on non-SRC is
9792 probably also not good idea, since we most
9793 probably have pseudoregister equivalence as
9794 well. */
9795 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9796 best_equiv = equiv;
9797 }
9798 /* Use the constant equivalence if that is cheap enough. */
9799 if (! best_equiv)
9800 best_equiv = const_equiv;
9801 else if (const_equiv
9802 && (rtx_cost (const_equiv->loc, SET)
9803 <= rtx_cost (best_equiv->loc, SET)))
9804 {
9805 best_equiv = const_equiv;
9806 const_equiv = 0;
9807 }
9808
9809 /* If best_equiv is nonzero, we know that MEM is set to a
9810 constant or register before the loop. We will use this
9811 knowledge to initialize the shadow register with that
9812 constant or reg rather than by loading from MEM. */
9813 if (best_equiv)
9814 best = copy_rtx (best_equiv->loc);
9815 }
9816
9817 set = gen_move_insn (reg, best);
9818 set = loop_insn_hoist (loop, set);
9819 if (REG_P (best))
9820 {
9821 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9822 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9823 {
9824 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9825 break;
9826 }
9827 }
9828
9829 if (const_equiv)
9830 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9831
9832 if (written)
9833 {
9834 if (label == NULL_RTX)
9835 {
9836 label = gen_label_rtx ();
9837 emit_label_after (label, loop->end);
9838 }
9839
9840 /* Store the memory immediately after END, which is
9841 the NOTE_LOOP_END. */
9842 set = gen_move_insn (copy_rtx (mem), reg);
9843 loop_insn_emit_after (loop, 0, label, set);
9844 }
9845
9846 if (loop_dump_stream)
9847 {
9848 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9849 REGNO (reg), (written ? "r/w" : "r/o"));
9850 print_rtl (loop_dump_stream, mem);
9851 fputc ('\n', loop_dump_stream);
9852 }
9853
9854 /* Attempt a bit of copy propagation. This helps untangle the
9855 data flow, and enables {basic,general}_induction_var to find
9856 more bivs/givs. */
9857 EXECUTE_IF_SET_IN_REG_SET
9858 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9859 {
9860 try_copy_prop (loop, reg, j);
9861 });
9862 CLEAR_REG_SET (&load_copies);
9863
9864 EXECUTE_IF_SET_IN_REG_SET
9865 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9866 {
9867 try_swap_copy_prop (loop, reg, j);
9868 });
9869 CLEAR_REG_SET (&store_copies);
9870 }
9871 }
9872
9873 if (label != NULL_RTX && end_label != NULL_RTX)
9874 {
9875 /* Now, we need to replace all references to the previous exit
9876 label with the new one. */
9877 rtx_pair rr;
9878 rr.r1 = end_label;
9879 rr.r2 = label;
9880
9881 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9882 {
9883 for_each_rtx (&p, replace_label, &rr);
9884
9885 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9886 field. This is not handled by for_each_rtx because it doesn't
9887 handle unprinted ('0') fields. We need to update JUMP_LABEL
9888 because the immediately following unroll pass will use it.
9889 replace_label would not work anyways, because that only handles
9890 LABEL_REFs. */
9891 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9892 JUMP_LABEL (p) = label;
9893 }
9894 }
9895
9896 cselib_finish ();
9897 }
9898
9899 /* For communication between note_reg_stored and its caller. */
9900 struct note_reg_stored_arg
9901 {
9902 int set_seen;
9903 rtx reg;
9904 };
9905
9906 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9907 is equal to ARG. */
9908 static void
9909 note_reg_stored (x, setter, arg)
9910 rtx x, setter ATTRIBUTE_UNUSED;
9911 void *arg;
9912 {
9913 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9914 if (t->reg == x)
9915 t->set_seen = 1;
9916 }
9917
9918 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9919 There must be exactly one insn that sets this pseudo; it will be
9920 deleted if all replacements succeed and we can prove that the register
9921 is not used after the loop. */
9922
9923 static void
9924 try_copy_prop (loop, replacement, regno)
9925 const struct loop *loop;
9926 rtx replacement;
9927 unsigned int regno;
9928 {
9929 /* This is the reg that we are copying from. */
9930 rtx reg_rtx = regno_reg_rtx[regno];
9931 rtx init_insn = 0;
9932 rtx insn;
9933 /* These help keep track of whether we replaced all uses of the reg. */
9934 int replaced_last = 0;
9935 int store_is_first = 0;
9936
9937 for (insn = next_insn_in_loop (loop, loop->scan_start);
9938 insn != NULL_RTX;
9939 insn = next_insn_in_loop (loop, insn))
9940 {
9941 rtx set;
9942
9943 /* Only substitute within one extended basic block from the initializing
9944 insn. */
9945 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9946 break;
9947
9948 if (! INSN_P (insn))
9949 continue;
9950
9951 /* Is this the initializing insn? */
9952 set = single_set (insn);
9953 if (set
9954 && GET_CODE (SET_DEST (set)) == REG
9955 && REGNO (SET_DEST (set)) == regno)
9956 {
9957 if (init_insn)
9958 abort ();
9959
9960 init_insn = insn;
9961 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9962 store_is_first = 1;
9963 }
9964
9965 /* Only substitute after seeing the initializing insn. */
9966 if (init_insn && insn != init_insn)
9967 {
9968 struct note_reg_stored_arg arg;
9969
9970 replace_loop_regs (insn, reg_rtx, replacement);
9971 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9972 replaced_last = 1;
9973
9974 /* Stop replacing when REPLACEMENT is modified. */
9975 arg.reg = replacement;
9976 arg.set_seen = 0;
9977 note_stores (PATTERN (insn), note_reg_stored, &arg);
9978 if (arg.set_seen)
9979 {
9980 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
9981
9982 /* It is possible that we've turned previously valid REG_EQUAL to
9983 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
9984 REPLACEMENT is modified, we get different meaning. */
9985 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
9986 remove_note (insn, note);
9987 break;
9988 }
9989 }
9990 }
9991 if (! init_insn)
9992 abort ();
9993 if (apply_change_group ())
9994 {
9995 if (loop_dump_stream)
9996 fprintf (loop_dump_stream, " Replaced reg %d", regno);
9997 if (store_is_first && replaced_last)
9998 {
9999 rtx first;
10000 rtx retval_note;
10001
10002 /* Assume we're just deleting INIT_INSN. */
10003 first = init_insn;
10004 /* Look for REG_RETVAL note. If we're deleting the end of
10005 the libcall sequence, the whole sequence can go. */
10006 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10007 /* If we found a REG_RETVAL note, find the first instruction
10008 in the sequence. */
10009 if (retval_note)
10010 first = XEXP (retval_note, 0);
10011
10012 /* Delete the instructions. */
10013 loop_delete_insns (first, init_insn);
10014 }
10015 if (loop_dump_stream)
10016 fprintf (loop_dump_stream, ".\n");
10017 }
10018 }
10019
10020 /* Replace all the instructions from FIRST up to and including LAST
10021 with NOTE_INSN_DELETED notes. */
10022
10023 static void
10024 loop_delete_insns (first, last)
10025 rtx first;
10026 rtx last;
10027 {
10028 while (1)
10029 {
10030 if (loop_dump_stream)
10031 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10032 INSN_UID (first));
10033 delete_insn (first);
10034
10035 /* If this was the LAST instructions we're supposed to delete,
10036 we're done. */
10037 if (first == last)
10038 break;
10039
10040 first = NEXT_INSN (first);
10041 }
10042 }
10043
10044 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10045 loop LOOP if the order of the sets of these registers can be
10046 swapped. There must be exactly one insn within the loop that sets
10047 this pseudo followed immediately by a move insn that sets
10048 REPLACEMENT with REGNO. */
10049 static void
10050 try_swap_copy_prop (loop, replacement, regno)
10051 const struct loop *loop;
10052 rtx replacement;
10053 unsigned int regno;
10054 {
10055 rtx insn;
10056 rtx set = NULL_RTX;
10057 unsigned int new_regno;
10058
10059 new_regno = REGNO (replacement);
10060
10061 for (insn = next_insn_in_loop (loop, loop->scan_start);
10062 insn != NULL_RTX;
10063 insn = next_insn_in_loop (loop, insn))
10064 {
10065 /* Search for the insn that copies REGNO to NEW_REGNO? */
10066 if (INSN_P (insn)
10067 && (set = single_set (insn))
10068 && GET_CODE (SET_DEST (set)) == REG
10069 && REGNO (SET_DEST (set)) == new_regno
10070 && GET_CODE (SET_SRC (set)) == REG
10071 && REGNO (SET_SRC (set)) == regno)
10072 break;
10073 }
10074
10075 if (insn != NULL_RTX)
10076 {
10077 rtx prev_insn;
10078 rtx prev_set;
10079
10080 /* Some DEF-USE info would come in handy here to make this
10081 function more general. For now, just check the previous insn
10082 which is the most likely candidate for setting REGNO. */
10083
10084 prev_insn = PREV_INSN (insn);
10085
10086 if (INSN_P (insn)
10087 && (prev_set = single_set (prev_insn))
10088 && GET_CODE (SET_DEST (prev_set)) == REG
10089 && REGNO (SET_DEST (prev_set)) == regno)
10090 {
10091 /* We have:
10092 (set (reg regno) (expr))
10093 (set (reg new_regno) (reg regno))
10094
10095 so try converting this to:
10096 (set (reg new_regno) (expr))
10097 (set (reg regno) (reg new_regno))
10098
10099 The former construct is often generated when a global
10100 variable used for an induction variable is shadowed by a
10101 register (NEW_REGNO). The latter construct improves the
10102 chances of GIV replacement and BIV elimination. */
10103
10104 validate_change (prev_insn, &SET_DEST (prev_set),
10105 replacement, 1);
10106 validate_change (insn, &SET_DEST (set),
10107 SET_SRC (set), 1);
10108 validate_change (insn, &SET_SRC (set),
10109 replacement, 1);
10110
10111 if (apply_change_group ())
10112 {
10113 if (loop_dump_stream)
10114 fprintf (loop_dump_stream,
10115 " Swapped set of reg %d at %d with reg %d at %d.\n",
10116 regno, INSN_UID (insn),
10117 new_regno, INSN_UID (prev_insn));
10118
10119 /* Update first use of REGNO. */
10120 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10121 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10122
10123 /* Now perform copy propagation to hopefully
10124 remove all uses of REGNO within the loop. */
10125 try_copy_prop (loop, replacement, regno);
10126 }
10127 }
10128 }
10129 }
10130
10131 /* Replace MEM with its associated pseudo register. This function is
10132 called from load_mems via for_each_rtx. DATA is actually a pointer
10133 to a structure describing the instruction currently being scanned
10134 and the MEM we are currently replacing. */
10135
10136 static int
10137 replace_loop_mem (mem, data)
10138 rtx *mem;
10139 void *data;
10140 {
10141 loop_replace_args *args = (loop_replace_args *) data;
10142 rtx m = *mem;
10143
10144 if (m == NULL_RTX)
10145 return 0;
10146
10147 switch (GET_CODE (m))
10148 {
10149 case MEM:
10150 break;
10151
10152 case CONST_DOUBLE:
10153 /* We're not interested in the MEM associated with a
10154 CONST_DOUBLE, so there's no need to traverse into one. */
10155 return -1;
10156
10157 default:
10158 /* This is not a MEM. */
10159 return 0;
10160 }
10161
10162 if (!rtx_equal_p (args->match, m))
10163 /* This is not the MEM we are currently replacing. */
10164 return 0;
10165
10166 /* Actually replace the MEM. */
10167 validate_change (args->insn, mem, args->replacement, 1);
10168
10169 return 0;
10170 }
10171
10172 static void
10173 replace_loop_mems (insn, mem, reg)
10174 rtx insn;
10175 rtx mem;
10176 rtx reg;
10177 {
10178 loop_replace_args args;
10179
10180 args.insn = insn;
10181 args.match = mem;
10182 args.replacement = reg;
10183
10184 for_each_rtx (&insn, replace_loop_mem, &args);
10185 }
10186
10187 /* Replace one register with another. Called through for_each_rtx; PX points
10188 to the rtx being scanned. DATA is actually a pointer to
10189 a structure of arguments. */
10190
10191 static int
10192 replace_loop_reg (px, data)
10193 rtx *px;
10194 void *data;
10195 {
10196 rtx x = *px;
10197 loop_replace_args *args = (loop_replace_args *) data;
10198
10199 if (x == NULL_RTX)
10200 return 0;
10201
10202 if (x == args->match)
10203 validate_change (args->insn, px, args->replacement, 1);
10204
10205 return 0;
10206 }
10207
10208 static void
10209 replace_loop_regs (insn, reg, replacement)
10210 rtx insn;
10211 rtx reg;
10212 rtx replacement;
10213 {
10214 loop_replace_args args;
10215
10216 args.insn = insn;
10217 args.match = reg;
10218 args.replacement = replacement;
10219
10220 for_each_rtx (&insn, replace_loop_reg, &args);
10221 }
10222
10223 /* Replace occurrences of the old exit label for the loop with the new
10224 one. DATA is an rtx_pair containing the old and new labels,
10225 respectively. */
10226
10227 static int
10228 replace_label (x, data)
10229 rtx *x;
10230 void *data;
10231 {
10232 rtx l = *x;
10233 rtx old_label = ((rtx_pair *) data)->r1;
10234 rtx new_label = ((rtx_pair *) data)->r2;
10235
10236 if (l == NULL_RTX)
10237 return 0;
10238
10239 if (GET_CODE (l) != LABEL_REF)
10240 return 0;
10241
10242 if (XEXP (l, 0) != old_label)
10243 return 0;
10244
10245 XEXP (l, 0) = new_label;
10246 ++LABEL_NUSES (new_label);
10247 --LABEL_NUSES (old_label);
10248
10249 return 0;
10250 }
10251 \f
10252 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10253 (ignored in the interim). */
10254
10255 static rtx
10256 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10257 const struct loop *loop ATTRIBUTE_UNUSED;
10258 basic_block where_bb ATTRIBUTE_UNUSED;
10259 rtx where_insn;
10260 rtx pattern;
10261 {
10262 return emit_insn_after (pattern, where_insn);
10263 }
10264
10265
10266 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10267 in basic block WHERE_BB (ignored in the interim) within the loop
10268 otherwise hoist PATTERN into the loop pre-header. */
10269
10270 rtx
10271 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10272 const struct loop *loop;
10273 basic_block where_bb ATTRIBUTE_UNUSED;
10274 rtx where_insn;
10275 rtx pattern;
10276 {
10277 if (! where_insn)
10278 return loop_insn_hoist (loop, pattern);
10279 return emit_insn_before (pattern, where_insn);
10280 }
10281
10282
10283 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10284 WHERE_BB (ignored in the interim) within the loop. */
10285
10286 static rtx
10287 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10288 const struct loop *loop ATTRIBUTE_UNUSED;
10289 basic_block where_bb ATTRIBUTE_UNUSED;
10290 rtx where_insn;
10291 rtx pattern;
10292 {
10293 return emit_call_insn_before (pattern, where_insn);
10294 }
10295
10296
10297 /* Hoist insn for PATTERN into the loop pre-header. */
10298
10299 rtx
10300 loop_insn_hoist (loop, pattern)
10301 const struct loop *loop;
10302 rtx pattern;
10303 {
10304 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10305 }
10306
10307
10308 /* Hoist call insn for PATTERN into the loop pre-header. */
10309
10310 static rtx
10311 loop_call_insn_hoist (loop, pattern)
10312 const struct loop *loop;
10313 rtx pattern;
10314 {
10315 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10316 }
10317
10318
10319 /* Sink insn for PATTERN after the loop end. */
10320
10321 rtx
10322 loop_insn_sink (loop, pattern)
10323 const struct loop *loop;
10324 rtx pattern;
10325 {
10326 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10327 }
10328
10329
10330 /* If the loop has multiple exits, emit insn for PATTERN before the
10331 loop to ensure that it will always be executed no matter how the
10332 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10333 since this is slightly more efficient. */
10334
10335 static rtx
10336 loop_insn_sink_or_swim (loop, pattern)
10337 const struct loop *loop;
10338 rtx pattern;
10339 {
10340 if (loop->exit_count)
10341 return loop_insn_hoist (loop, pattern);
10342 else
10343 return loop_insn_sink (loop, pattern);
10344 }
10345 \f
10346 static void
10347 loop_ivs_dump (loop, file, verbose)
10348 const struct loop *loop;
10349 FILE *file;
10350 int verbose;
10351 {
10352 struct iv_class *bl;
10353 int iv_num = 0;
10354
10355 if (! loop || ! file)
10356 return;
10357
10358 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10359 iv_num++;
10360
10361 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10362
10363 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10364 {
10365 loop_iv_class_dump (bl, file, verbose);
10366 fputc ('\n', file);
10367 }
10368 }
10369
10370
10371 static void
10372 loop_iv_class_dump (bl, file, verbose)
10373 const struct iv_class *bl;
10374 FILE *file;
10375 int verbose ATTRIBUTE_UNUSED;
10376 {
10377 struct induction *v;
10378 rtx incr;
10379 int i;
10380
10381 if (! bl || ! file)
10382 return;
10383
10384 fprintf (file, "IV class for reg %d, benefit %d\n",
10385 bl->regno, bl->total_benefit);
10386
10387 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10388 if (bl->initial_value)
10389 {
10390 fprintf (file, ", init val: ");
10391 print_simple_rtl (file, bl->initial_value);
10392 }
10393 if (bl->initial_test)
10394 {
10395 fprintf (file, ", init test: ");
10396 print_simple_rtl (file, bl->initial_test);
10397 }
10398 fputc ('\n', file);
10399
10400 if (bl->final_value)
10401 {
10402 fprintf (file, " Final val: ");
10403 print_simple_rtl (file, bl->final_value);
10404 fputc ('\n', file);
10405 }
10406
10407 if ((incr = biv_total_increment (bl)))
10408 {
10409 fprintf (file, " Total increment: ");
10410 print_simple_rtl (file, incr);
10411 fputc ('\n', file);
10412 }
10413
10414 /* List the increments. */
10415 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10416 {
10417 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10418 print_simple_rtl (file, v->add_val);
10419 fputc ('\n', file);
10420 }
10421
10422 /* List the givs. */
10423 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10424 {
10425 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10426 i, INSN_UID (v->insn), v->benefit);
10427 if (v->giv_type == DEST_ADDR)
10428 print_simple_rtl (file, v->mem);
10429 else
10430 print_simple_rtl (file, single_set (v->insn));
10431 fputc ('\n', file);
10432 }
10433 }
10434
10435
10436 static void
10437 loop_biv_dump (v, file, verbose)
10438 const struct induction *v;
10439 FILE *file;
10440 int verbose;
10441 {
10442 if (! v || ! file)
10443 return;
10444
10445 fprintf (file,
10446 "Biv %d: insn %d",
10447 REGNO (v->dest_reg), INSN_UID (v->insn));
10448 fprintf (file, " const ");
10449 print_simple_rtl (file, v->add_val);
10450
10451 if (verbose && v->final_value)
10452 {
10453 fputc ('\n', file);
10454 fprintf (file, " final ");
10455 print_simple_rtl (file, v->final_value);
10456 }
10457
10458 fputc ('\n', file);
10459 }
10460
10461
10462 static void
10463 loop_giv_dump (v, file, verbose)
10464 const struct induction *v;
10465 FILE *file;
10466 int verbose;
10467 {
10468 if (! v || ! file)
10469 return;
10470
10471 if (v->giv_type == DEST_REG)
10472 fprintf (file, "Giv %d: insn %d",
10473 REGNO (v->dest_reg), INSN_UID (v->insn));
10474 else
10475 fprintf (file, "Dest address: insn %d",
10476 INSN_UID (v->insn));
10477
10478 fprintf (file, " src reg %d benefit %d",
10479 REGNO (v->src_reg), v->benefit);
10480 fprintf (file, " lifetime %d",
10481 v->lifetime);
10482
10483 if (v->replaceable)
10484 fprintf (file, " replaceable");
10485
10486 if (v->no_const_addval)
10487 fprintf (file, " ncav");
10488
10489 if (v->ext_dependent)
10490 {
10491 switch (GET_CODE (v->ext_dependent))
10492 {
10493 case SIGN_EXTEND:
10494 fprintf (file, " ext se");
10495 break;
10496 case ZERO_EXTEND:
10497 fprintf (file, " ext ze");
10498 break;
10499 case TRUNCATE:
10500 fprintf (file, " ext tr");
10501 break;
10502 default:
10503 abort ();
10504 }
10505 }
10506
10507 fputc ('\n', file);
10508 fprintf (file, " mult ");
10509 print_simple_rtl (file, v->mult_val);
10510
10511 fputc ('\n', file);
10512 fprintf (file, " add ");
10513 print_simple_rtl (file, v->add_val);
10514
10515 if (verbose && v->final_value)
10516 {
10517 fputc ('\n', file);
10518 fprintf (file, " final ");
10519 print_simple_rtl (file, v->final_value);
10520 }
10521
10522 fputc ('\n', file);
10523 }
10524
10525
10526 void
10527 debug_ivs (loop)
10528 const struct loop *loop;
10529 {
10530 loop_ivs_dump (loop, stderr, 1);
10531 }
10532
10533
10534 void
10535 debug_iv_class (bl)
10536 const struct iv_class *bl;
10537 {
10538 loop_iv_class_dump (bl, stderr, 1);
10539 }
10540
10541
10542 void
10543 debug_biv (v)
10544 const struct induction *v;
10545 {
10546 loop_biv_dump (v, stderr, 1);
10547 }
10548
10549
10550 void
10551 debug_giv (v)
10552 const struct induction *v;
10553 {
10554 loop_giv_dump (v, stderr, 1);
10555 }
10556
10557
10558 #define LOOP_BLOCK_NUM_1(INSN) \
10559 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10560
10561 /* The notes do not have an assigned block, so look at the next insn. */
10562 #define LOOP_BLOCK_NUM(INSN) \
10563 ((INSN) ? (GET_CODE (INSN) == NOTE \
10564 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10565 : LOOP_BLOCK_NUM_1 (INSN)) \
10566 : -1)
10567
10568 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10569
10570 static void
10571 loop_dump_aux (loop, file, verbose)
10572 const struct loop *loop;
10573 FILE *file;
10574 int verbose ATTRIBUTE_UNUSED;
10575 {
10576 rtx label;
10577
10578 if (! loop || ! file)
10579 return;
10580
10581 /* Print diagnostics to compare our concept of a loop with
10582 what the loop notes say. */
10583 if (! PREV_INSN (loop->first->head)
10584 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10585 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10586 != NOTE_INSN_LOOP_BEG)
10587 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10588 INSN_UID (PREV_INSN (loop->first->head)));
10589 if (! NEXT_INSN (loop->last->end)
10590 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10591 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10592 != NOTE_INSN_LOOP_END)
10593 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10594 INSN_UID (NEXT_INSN (loop->last->end)));
10595
10596 if (loop->start)
10597 {
10598 fprintf (file,
10599 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10600 LOOP_BLOCK_NUM (loop->start),
10601 LOOP_INSN_UID (loop->start),
10602 LOOP_BLOCK_NUM (loop->cont),
10603 LOOP_INSN_UID (loop->cont),
10604 LOOP_BLOCK_NUM (loop->cont),
10605 LOOP_INSN_UID (loop->cont),
10606 LOOP_BLOCK_NUM (loop->vtop),
10607 LOOP_INSN_UID (loop->vtop),
10608 LOOP_BLOCK_NUM (loop->end),
10609 LOOP_INSN_UID (loop->end));
10610 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10611 LOOP_BLOCK_NUM (loop->top),
10612 LOOP_INSN_UID (loop->top),
10613 LOOP_BLOCK_NUM (loop->scan_start),
10614 LOOP_INSN_UID (loop->scan_start));
10615 fprintf (file, ";; exit_count %d", loop->exit_count);
10616 if (loop->exit_count)
10617 {
10618 fputs (", labels:", file);
10619 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10620 {
10621 fprintf (file, " %d ",
10622 LOOP_INSN_UID (XEXP (label, 0)));
10623 }
10624 }
10625 fputs ("\n", file);
10626
10627 /* This can happen when a marked loop appears as two nested loops,
10628 say from while (a || b) {}. The inner loop won't match
10629 the loop markers but the outer one will. */
10630 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10631 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10632 }
10633 }
10634
10635 /* Call this function from the debugger to dump LOOP. */
10636
10637 void
10638 debug_loop (loop)
10639 const struct loop *loop;
10640 {
10641 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10642 }
10643
10644 /* Call this function from the debugger to dump LOOPS. */
10645
10646 void
10647 debug_loops (loops)
10648 const struct loops *loops;
10649 {
10650 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
10651 }