re PR tree-optimization/47053 (ICE: verify_flow_info failed: BB 2 can not throw but...
[gcc.git] / gcc / postreload.c
1 /* Perform simple optimizations to clean up the result of reload.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
4 2010 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26
27 #include "machmode.h"
28 #include "hard-reg-set.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "obstack.h"
32 #include "insn-config.h"
33 #include "flags.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "regs.h"
38 #include "basic-block.h"
39 #include "reload.h"
40 #include "recog.h"
41 #include "output.h"
42 #include "cselib.h"
43 #include "diagnostic-core.h"
44 #include "except.h"
45 #include "tree.h"
46 #include "target.h"
47 #include "timevar.h"
48 #include "tree-pass.h"
49 #include "df.h"
50 #include "dbgcnt.h"
51
52 static int reload_cse_noop_set_p (rtx);
53 static void reload_cse_simplify (rtx, rtx);
54 static void reload_cse_regs_1 (rtx);
55 static int reload_cse_simplify_set (rtx, rtx);
56 static int reload_cse_simplify_operands (rtx, rtx);
57
58 static void reload_combine (void);
59 static void reload_combine_note_use (rtx *, rtx, int, rtx);
60 static void reload_combine_note_store (rtx, const_rtx, void *);
61
62 static bool reload_cse_move2add (rtx);
63 static void move2add_note_store (rtx, const_rtx, void *);
64
65 /* Call cse / combine like post-reload optimization phases.
66 FIRST is the first instruction. */
67 void
68 reload_cse_regs (rtx first ATTRIBUTE_UNUSED)
69 {
70 bool moves_converted;
71 reload_cse_regs_1 (first);
72 reload_combine ();
73 moves_converted = reload_cse_move2add (first);
74 if (flag_expensive_optimizations)
75 {
76 if (moves_converted)
77 reload_combine ();
78 reload_cse_regs_1 (first);
79 }
80 }
81
82 /* See whether a single set SET is a noop. */
83 static int
84 reload_cse_noop_set_p (rtx set)
85 {
86 if (cselib_reg_set_mode (SET_DEST (set)) != GET_MODE (SET_DEST (set)))
87 return 0;
88
89 return rtx_equal_for_cselib_p (SET_DEST (set), SET_SRC (set));
90 }
91
92 /* Try to simplify INSN. */
93 static void
94 reload_cse_simplify (rtx insn, rtx testreg)
95 {
96 rtx body = PATTERN (insn);
97
98 if (GET_CODE (body) == SET)
99 {
100 int count = 0;
101
102 /* Simplify even if we may think it is a no-op.
103 We may think a memory load of a value smaller than WORD_SIZE
104 is redundant because we haven't taken into account possible
105 implicit extension. reload_cse_simplify_set() will bring
106 this out, so it's safer to simplify before we delete. */
107 count += reload_cse_simplify_set (body, insn);
108
109 if (!count && reload_cse_noop_set_p (body))
110 {
111 rtx value = SET_DEST (body);
112 if (REG_P (value)
113 && ! REG_FUNCTION_VALUE_P (value))
114 value = 0;
115 delete_insn_and_edges (insn);
116 return;
117 }
118
119 if (count > 0)
120 apply_change_group ();
121 else
122 reload_cse_simplify_operands (insn, testreg);
123 }
124 else if (GET_CODE (body) == PARALLEL)
125 {
126 int i;
127 int count = 0;
128 rtx value = NULL_RTX;
129
130 /* Registers mentioned in the clobber list for an asm cannot be reused
131 within the body of the asm. Invalidate those registers now so that
132 we don't try to substitute values for them. */
133 if (asm_noperands (body) >= 0)
134 {
135 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
136 {
137 rtx part = XVECEXP (body, 0, i);
138 if (GET_CODE (part) == CLOBBER && REG_P (XEXP (part, 0)))
139 cselib_invalidate_rtx (XEXP (part, 0));
140 }
141 }
142
143 /* If every action in a PARALLEL is a noop, we can delete
144 the entire PARALLEL. */
145 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
146 {
147 rtx part = XVECEXP (body, 0, i);
148 if (GET_CODE (part) == SET)
149 {
150 if (! reload_cse_noop_set_p (part))
151 break;
152 if (REG_P (SET_DEST (part))
153 && REG_FUNCTION_VALUE_P (SET_DEST (part)))
154 {
155 if (value)
156 break;
157 value = SET_DEST (part);
158 }
159 }
160 else if (GET_CODE (part) != CLOBBER)
161 break;
162 }
163
164 if (i < 0)
165 {
166 delete_insn_and_edges (insn);
167 /* We're done with this insn. */
168 return;
169 }
170
171 /* It's not a no-op, but we can try to simplify it. */
172 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
173 if (GET_CODE (XVECEXP (body, 0, i)) == SET)
174 count += reload_cse_simplify_set (XVECEXP (body, 0, i), insn);
175
176 if (count > 0)
177 apply_change_group ();
178 else
179 reload_cse_simplify_operands (insn, testreg);
180 }
181 }
182
183 /* Do a very simple CSE pass over the hard registers.
184
185 This function detects no-op moves where we happened to assign two
186 different pseudo-registers to the same hard register, and then
187 copied one to the other. Reload will generate a useless
188 instruction copying a register to itself.
189
190 This function also detects cases where we load a value from memory
191 into two different registers, and (if memory is more expensive than
192 registers) changes it to simply copy the first register into the
193 second register.
194
195 Another optimization is performed that scans the operands of each
196 instruction to see whether the value is already available in a
197 hard register. It then replaces the operand with the hard register
198 if possible, much like an optional reload would. */
199
200 static void
201 reload_cse_regs_1 (rtx first)
202 {
203 rtx insn;
204 rtx testreg = gen_rtx_REG (VOIDmode, -1);
205
206 cselib_init (CSELIB_RECORD_MEMORY);
207 init_alias_analysis ();
208
209 for (insn = first; insn; insn = NEXT_INSN (insn))
210 {
211 if (INSN_P (insn))
212 reload_cse_simplify (insn, testreg);
213
214 cselib_process_insn (insn);
215 }
216
217 /* Clean up. */
218 end_alias_analysis ();
219 cselib_finish ();
220 }
221
222 /* Try to simplify a single SET instruction. SET is the set pattern.
223 INSN is the instruction it came from.
224 This function only handles one case: if we set a register to a value
225 which is not a register, we try to find that value in some other register
226 and change the set into a register copy. */
227
228 static int
229 reload_cse_simplify_set (rtx set, rtx insn)
230 {
231 int did_change = 0;
232 int dreg;
233 rtx src;
234 enum reg_class dclass;
235 int old_cost;
236 cselib_val *val;
237 struct elt_loc_list *l;
238 #ifdef LOAD_EXTEND_OP
239 enum rtx_code extend_op = UNKNOWN;
240 #endif
241 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
242
243 dreg = true_regnum (SET_DEST (set));
244 if (dreg < 0)
245 return 0;
246
247 src = SET_SRC (set);
248 if (side_effects_p (src) || true_regnum (src) >= 0)
249 return 0;
250
251 dclass = REGNO_REG_CLASS (dreg);
252
253 #ifdef LOAD_EXTEND_OP
254 /* When replacing a memory with a register, we need to honor assumptions
255 that combine made wrt the contents of sign bits. We'll do this by
256 generating an extend instruction instead of a reg->reg copy. Thus
257 the destination must be a register that we can widen. */
258 if (MEM_P (src)
259 && GET_MODE_BITSIZE (GET_MODE (src)) < BITS_PER_WORD
260 && (extend_op = LOAD_EXTEND_OP (GET_MODE (src))) != UNKNOWN
261 && !REG_P (SET_DEST (set)))
262 return 0;
263 #endif
264
265 val = cselib_lookup (src, GET_MODE (SET_DEST (set)), 0);
266 if (! val)
267 return 0;
268
269 /* If memory loads are cheaper than register copies, don't change them. */
270 if (MEM_P (src))
271 old_cost = memory_move_cost (GET_MODE (src), dclass, true);
272 else if (REG_P (src))
273 old_cost = register_move_cost (GET_MODE (src),
274 REGNO_REG_CLASS (REGNO (src)), dclass);
275 else
276 old_cost = rtx_cost (src, SET, speed);
277
278 for (l = val->locs; l; l = l->next)
279 {
280 rtx this_rtx = l->loc;
281 int this_cost;
282
283 if (CONSTANT_P (this_rtx) && ! references_value_p (this_rtx, 0))
284 {
285 #ifdef LOAD_EXTEND_OP
286 if (extend_op != UNKNOWN)
287 {
288 HOST_WIDE_INT this_val;
289
290 /* ??? I'm lazy and don't wish to handle CONST_DOUBLE. Other
291 constants, such as SYMBOL_REF, cannot be extended. */
292 if (!CONST_INT_P (this_rtx))
293 continue;
294
295 this_val = INTVAL (this_rtx);
296 switch (extend_op)
297 {
298 case ZERO_EXTEND:
299 this_val &= GET_MODE_MASK (GET_MODE (src));
300 break;
301 case SIGN_EXTEND:
302 /* ??? In theory we're already extended. */
303 if (this_val == trunc_int_for_mode (this_val, GET_MODE (src)))
304 break;
305 default:
306 gcc_unreachable ();
307 }
308 this_rtx = GEN_INT (this_val);
309 }
310 #endif
311 this_cost = rtx_cost (this_rtx, SET, speed);
312 }
313 else if (REG_P (this_rtx))
314 {
315 #ifdef LOAD_EXTEND_OP
316 if (extend_op != UNKNOWN)
317 {
318 this_rtx = gen_rtx_fmt_e (extend_op, word_mode, this_rtx);
319 this_cost = rtx_cost (this_rtx, SET, speed);
320 }
321 else
322 #endif
323 this_cost = register_move_cost (GET_MODE (this_rtx),
324 REGNO_REG_CLASS (REGNO (this_rtx)),
325 dclass);
326 }
327 else
328 continue;
329
330 /* If equal costs, prefer registers over anything else. That
331 tends to lead to smaller instructions on some machines. */
332 if (this_cost < old_cost
333 || (this_cost == old_cost
334 && REG_P (this_rtx)
335 && !REG_P (SET_SRC (set))))
336 {
337 #ifdef LOAD_EXTEND_OP
338 if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (set))) < BITS_PER_WORD
339 && extend_op != UNKNOWN
340 #ifdef CANNOT_CHANGE_MODE_CLASS
341 && !CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
342 word_mode,
343 REGNO_REG_CLASS (REGNO (SET_DEST (set))))
344 #endif
345 )
346 {
347 rtx wide_dest = gen_rtx_REG (word_mode, REGNO (SET_DEST (set)));
348 ORIGINAL_REGNO (wide_dest) = ORIGINAL_REGNO (SET_DEST (set));
349 validate_change (insn, &SET_DEST (set), wide_dest, 1);
350 }
351 #endif
352
353 validate_unshare_change (insn, &SET_SRC (set), this_rtx, 1);
354 old_cost = this_cost, did_change = 1;
355 }
356 }
357
358 return did_change;
359 }
360
361 /* Try to replace operands in INSN with equivalent values that are already
362 in registers. This can be viewed as optional reloading.
363
364 For each non-register operand in the insn, see if any hard regs are
365 known to be equivalent to that operand. Record the alternatives which
366 can accept these hard registers. Among all alternatives, select the
367 ones which are better or equal to the one currently matching, where
368 "better" is in terms of '?' and '!' constraints. Among the remaining
369 alternatives, select the one which replaces most operands with
370 hard registers. */
371
372 static int
373 reload_cse_simplify_operands (rtx insn, rtx testreg)
374 {
375 int i, j;
376
377 /* For each operand, all registers that are equivalent to it. */
378 HARD_REG_SET equiv_regs[MAX_RECOG_OPERANDS];
379
380 const char *constraints[MAX_RECOG_OPERANDS];
381
382 /* Vector recording how bad an alternative is. */
383 int *alternative_reject;
384 /* Vector recording how many registers can be introduced by choosing
385 this alternative. */
386 int *alternative_nregs;
387 /* Array of vectors recording, for each operand and each alternative,
388 which hard register to substitute, or -1 if the operand should be
389 left as it is. */
390 int *op_alt_regno[MAX_RECOG_OPERANDS];
391 /* Array of alternatives, sorted in order of decreasing desirability. */
392 int *alternative_order;
393
394 extract_insn (insn);
395
396 if (recog_data.n_alternatives == 0 || recog_data.n_operands == 0)
397 return 0;
398
399 /* Figure out which alternative currently matches. */
400 if (! constrain_operands (1))
401 fatal_insn_not_found (insn);
402
403 alternative_reject = XALLOCAVEC (int, recog_data.n_alternatives);
404 alternative_nregs = XALLOCAVEC (int, recog_data.n_alternatives);
405 alternative_order = XALLOCAVEC (int, recog_data.n_alternatives);
406 memset (alternative_reject, 0, recog_data.n_alternatives * sizeof (int));
407 memset (alternative_nregs, 0, recog_data.n_alternatives * sizeof (int));
408
409 /* For each operand, find out which regs are equivalent. */
410 for (i = 0; i < recog_data.n_operands; i++)
411 {
412 cselib_val *v;
413 struct elt_loc_list *l;
414 rtx op;
415
416 CLEAR_HARD_REG_SET (equiv_regs[i]);
417
418 /* cselib blows up on CODE_LABELs. Trying to fix that doesn't seem
419 right, so avoid the problem here. Likewise if we have a constant
420 and the insn pattern doesn't tell us the mode we need. */
421 if (LABEL_P (recog_data.operand[i])
422 || (CONSTANT_P (recog_data.operand[i])
423 && recog_data.operand_mode[i] == VOIDmode))
424 continue;
425
426 op = recog_data.operand[i];
427 #ifdef LOAD_EXTEND_OP
428 if (MEM_P (op)
429 && GET_MODE_BITSIZE (GET_MODE (op)) < BITS_PER_WORD
430 && LOAD_EXTEND_OP (GET_MODE (op)) != UNKNOWN)
431 {
432 rtx set = single_set (insn);
433
434 /* We might have multiple sets, some of which do implicit
435 extension. Punt on this for now. */
436 if (! set)
437 continue;
438 /* If the destination is also a MEM or a STRICT_LOW_PART, no
439 extension applies.
440 Also, if there is an explicit extension, we don't have to
441 worry about an implicit one. */
442 else if (MEM_P (SET_DEST (set))
443 || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART
444 || GET_CODE (SET_SRC (set)) == ZERO_EXTEND
445 || GET_CODE (SET_SRC (set)) == SIGN_EXTEND)
446 ; /* Continue ordinary processing. */
447 #ifdef CANNOT_CHANGE_MODE_CLASS
448 /* If the register cannot change mode to word_mode, it follows that
449 it cannot have been used in word_mode. */
450 else if (REG_P (SET_DEST (set))
451 && CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
452 word_mode,
453 REGNO_REG_CLASS (REGNO (SET_DEST (set)))))
454 ; /* Continue ordinary processing. */
455 #endif
456 /* If this is a straight load, make the extension explicit. */
457 else if (REG_P (SET_DEST (set))
458 && recog_data.n_operands == 2
459 && SET_SRC (set) == op
460 && SET_DEST (set) == recog_data.operand[1-i])
461 {
462 validate_change (insn, recog_data.operand_loc[i],
463 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (op)),
464 word_mode, op),
465 1);
466 validate_change (insn, recog_data.operand_loc[1-i],
467 gen_rtx_REG (word_mode, REGNO (SET_DEST (set))),
468 1);
469 if (! apply_change_group ())
470 return 0;
471 return reload_cse_simplify_operands (insn, testreg);
472 }
473 else
474 /* ??? There might be arithmetic operations with memory that are
475 safe to optimize, but is it worth the trouble? */
476 continue;
477 }
478 #endif /* LOAD_EXTEND_OP */
479 v = cselib_lookup (op, recog_data.operand_mode[i], 0);
480 if (! v)
481 continue;
482
483 for (l = v->locs; l; l = l->next)
484 if (REG_P (l->loc))
485 SET_HARD_REG_BIT (equiv_regs[i], REGNO (l->loc));
486 }
487
488 for (i = 0; i < recog_data.n_operands; i++)
489 {
490 enum machine_mode mode;
491 int regno;
492 const char *p;
493
494 op_alt_regno[i] = XALLOCAVEC (int, recog_data.n_alternatives);
495 for (j = 0; j < recog_data.n_alternatives; j++)
496 op_alt_regno[i][j] = -1;
497
498 p = constraints[i] = recog_data.constraints[i];
499 mode = recog_data.operand_mode[i];
500
501 /* Add the reject values for each alternative given by the constraints
502 for this operand. */
503 j = 0;
504 while (*p != '\0')
505 {
506 char c = *p++;
507 if (c == ',')
508 j++;
509 else if (c == '?')
510 alternative_reject[j] += 3;
511 else if (c == '!')
512 alternative_reject[j] += 300;
513 }
514
515 /* We won't change operands which are already registers. We
516 also don't want to modify output operands. */
517 regno = true_regnum (recog_data.operand[i]);
518 if (regno >= 0
519 || constraints[i][0] == '='
520 || constraints[i][0] == '+')
521 continue;
522
523 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
524 {
525 enum reg_class rclass = NO_REGS;
526
527 if (! TEST_HARD_REG_BIT (equiv_regs[i], regno))
528 continue;
529
530 SET_REGNO_RAW (testreg, regno);
531 PUT_MODE (testreg, mode);
532
533 /* We found a register equal to this operand. Now look for all
534 alternatives that can accept this register and have not been
535 assigned a register they can use yet. */
536 j = 0;
537 p = constraints[i];
538 for (;;)
539 {
540 char c = *p;
541
542 switch (c)
543 {
544 case '=': case '+': case '?':
545 case '#': case '&': case '!':
546 case '*': case '%':
547 case '0': case '1': case '2': case '3': case '4':
548 case '5': case '6': case '7': case '8': case '9':
549 case '<': case '>': case 'V': case 'o':
550 case 'E': case 'F': case 'G': case 'H':
551 case 's': case 'i': case 'n':
552 case 'I': case 'J': case 'K': case 'L':
553 case 'M': case 'N': case 'O': case 'P':
554 case 'p': case 'X': case TARGET_MEM_CONSTRAINT:
555 /* These don't say anything we care about. */
556 break;
557
558 case 'g': case 'r':
559 rclass = reg_class_subunion[(int) rclass][(int) GENERAL_REGS];
560 break;
561
562 default:
563 rclass
564 = (reg_class_subunion
565 [(int) rclass]
566 [(int) REG_CLASS_FROM_CONSTRAINT ((unsigned char) c, p)]);
567 break;
568
569 case ',': case '\0':
570 /* See if REGNO fits this alternative, and set it up as the
571 replacement register if we don't have one for this
572 alternative yet and the operand being replaced is not
573 a cheap CONST_INT. */
574 if (op_alt_regno[i][j] == -1
575 && recog_data.alternative_enabled_p[j]
576 && reg_fits_class_p (testreg, rclass, 0, mode)
577 && (!CONST_INT_P (recog_data.operand[i])
578 || (rtx_cost (recog_data.operand[i], SET,
579 optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn)))
580 > rtx_cost (testreg, SET,
581 optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn))))))
582 {
583 alternative_nregs[j]++;
584 op_alt_regno[i][j] = regno;
585 }
586 j++;
587 rclass = NO_REGS;
588 break;
589 }
590 p += CONSTRAINT_LEN (c, p);
591
592 if (c == '\0')
593 break;
594 }
595 }
596 }
597
598 /* Record all alternatives which are better or equal to the currently
599 matching one in the alternative_order array. */
600 for (i = j = 0; i < recog_data.n_alternatives; i++)
601 if (alternative_reject[i] <= alternative_reject[which_alternative])
602 alternative_order[j++] = i;
603 recog_data.n_alternatives = j;
604
605 /* Sort it. Given a small number of alternatives, a dumb algorithm
606 won't hurt too much. */
607 for (i = 0; i < recog_data.n_alternatives - 1; i++)
608 {
609 int best = i;
610 int best_reject = alternative_reject[alternative_order[i]];
611 int best_nregs = alternative_nregs[alternative_order[i]];
612 int tmp;
613
614 for (j = i + 1; j < recog_data.n_alternatives; j++)
615 {
616 int this_reject = alternative_reject[alternative_order[j]];
617 int this_nregs = alternative_nregs[alternative_order[j]];
618
619 if (this_reject < best_reject
620 || (this_reject == best_reject && this_nregs > best_nregs))
621 {
622 best = j;
623 best_reject = this_reject;
624 best_nregs = this_nregs;
625 }
626 }
627
628 tmp = alternative_order[best];
629 alternative_order[best] = alternative_order[i];
630 alternative_order[i] = tmp;
631 }
632
633 /* Substitute the operands as determined by op_alt_regno for the best
634 alternative. */
635 j = alternative_order[0];
636
637 for (i = 0; i < recog_data.n_operands; i++)
638 {
639 enum machine_mode mode = recog_data.operand_mode[i];
640 if (op_alt_regno[i][j] == -1)
641 continue;
642
643 validate_change (insn, recog_data.operand_loc[i],
644 gen_rtx_REG (mode, op_alt_regno[i][j]), 1);
645 }
646
647 for (i = recog_data.n_dups - 1; i >= 0; i--)
648 {
649 int op = recog_data.dup_num[i];
650 enum machine_mode mode = recog_data.operand_mode[op];
651
652 if (op_alt_regno[op][j] == -1)
653 continue;
654
655 validate_change (insn, recog_data.dup_loc[i],
656 gen_rtx_REG (mode, op_alt_regno[op][j]), 1);
657 }
658
659 return apply_change_group ();
660 }
661 \f
662 /* If reload couldn't use reg+reg+offset addressing, try to use reg+reg
663 addressing now.
664 This code might also be useful when reload gave up on reg+reg addressing
665 because of clashes between the return register and INDEX_REG_CLASS. */
666
667 /* The maximum number of uses of a register we can keep track of to
668 replace them with reg+reg addressing. */
669 #define RELOAD_COMBINE_MAX_USES 16
670
671 /* Describes a recorded use of a register. */
672 struct reg_use
673 {
674 /* The insn where a register has been used. */
675 rtx insn;
676 /* Points to the memory reference enclosing the use, if any, NULL_RTX
677 otherwise. */
678 rtx containing_mem;
679 /* Location of the register withing INSN. */
680 rtx *usep;
681 /* The reverse uid of the insn. */
682 int ruid;
683 };
684
685 /* If the register is used in some unknown fashion, USE_INDEX is negative.
686 If it is dead, USE_INDEX is RELOAD_COMBINE_MAX_USES, and STORE_RUID
687 indicates where it is first set or clobbered.
688 Otherwise, USE_INDEX is the index of the last encountered use of the
689 register (which is first among these we have seen since we scan backwards).
690 USE_RUID indicates the first encountered, i.e. last, of these uses.
691 If ALL_OFFSETS_MATCH is true, all encountered uses were inside a PLUS
692 with a constant offset; OFFSET contains this constant in that case.
693 STORE_RUID is always meaningful if we only want to use a value in a
694 register in a different place: it denotes the next insn in the insn
695 stream (i.e. the last encountered) that sets or clobbers the register.
696 REAL_STORE_RUID is similar, but clobbers are ignored when updating it. */
697 static struct
698 {
699 struct reg_use reg_use[RELOAD_COMBINE_MAX_USES];
700 rtx offset;
701 int use_index;
702 int store_ruid;
703 int real_store_ruid;
704 int use_ruid;
705 bool all_offsets_match;
706 } reg_state[FIRST_PSEUDO_REGISTER];
707
708 /* Reverse linear uid. This is increased in reload_combine while scanning
709 the instructions from last to first. It is used to set last_label_ruid
710 and the store_ruid / use_ruid fields in reg_state. */
711 static int reload_combine_ruid;
712
713 /* The RUID of the last label we encountered in reload_combine. */
714 static int last_label_ruid;
715
716 /* The RUID of the last jump we encountered in reload_combine. */
717 static int last_jump_ruid;
718
719 /* The register numbers of the first and last index register. A value of
720 -1 in LAST_INDEX_REG indicates that we've previously computed these
721 values and found no suitable index registers. */
722 static int first_index_reg = -1;
723 static int last_index_reg;
724
725 #define LABEL_LIVE(LABEL) \
726 (label_live[CODE_LABEL_NUMBER (LABEL) - min_labelno])
727
728 /* Subroutine of reload_combine_split_ruids, called to fix up a single
729 ruid pointed to by *PRUID if it is higher than SPLIT_RUID. */
730
731 static inline void
732 reload_combine_split_one_ruid (int *pruid, int split_ruid)
733 {
734 if (*pruid > split_ruid)
735 (*pruid)++;
736 }
737
738 /* Called when we insert a new insn in a position we've already passed in
739 the scan. Examine all our state, increasing all ruids that are higher
740 than SPLIT_RUID by one in order to make room for a new insn. */
741
742 static void
743 reload_combine_split_ruids (int split_ruid)
744 {
745 unsigned i;
746
747 reload_combine_split_one_ruid (&reload_combine_ruid, split_ruid);
748 reload_combine_split_one_ruid (&last_label_ruid, split_ruid);
749 reload_combine_split_one_ruid (&last_jump_ruid, split_ruid);
750
751 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
752 {
753 int j, idx = reg_state[i].use_index;
754 reload_combine_split_one_ruid (&reg_state[i].use_ruid, split_ruid);
755 reload_combine_split_one_ruid (&reg_state[i].store_ruid, split_ruid);
756 reload_combine_split_one_ruid (&reg_state[i].real_store_ruid,
757 split_ruid);
758 if (idx < 0)
759 continue;
760 for (j = idx; j < RELOAD_COMBINE_MAX_USES; j++)
761 {
762 reload_combine_split_one_ruid (&reg_state[i].reg_use[j].ruid,
763 split_ruid);
764 }
765 }
766 }
767
768 /* Called when we are about to rescan a previously encountered insn with
769 reload_combine_note_use after modifying some part of it. This clears all
770 information about uses in that particular insn. */
771
772 static void
773 reload_combine_purge_insn_uses (rtx insn)
774 {
775 unsigned i;
776
777 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
778 {
779 int j, k, idx = reg_state[i].use_index;
780 if (idx < 0)
781 continue;
782 j = k = RELOAD_COMBINE_MAX_USES;
783 while (j-- > idx)
784 {
785 if (reg_state[i].reg_use[j].insn != insn)
786 {
787 k--;
788 if (k != j)
789 reg_state[i].reg_use[k] = reg_state[i].reg_use[j];
790 }
791 }
792 reg_state[i].use_index = k;
793 }
794 }
795
796 /* Called when we need to forget about all uses of REGNO after an insn
797 which is identified by RUID. */
798
799 static void
800 reload_combine_purge_reg_uses_after_ruid (unsigned regno, int ruid)
801 {
802 int j, k, idx = reg_state[regno].use_index;
803 if (idx < 0)
804 return;
805 j = k = RELOAD_COMBINE_MAX_USES;
806 while (j-- > idx)
807 {
808 if (reg_state[regno].reg_use[j].ruid >= ruid)
809 {
810 k--;
811 if (k != j)
812 reg_state[regno].reg_use[k] = reg_state[regno].reg_use[j];
813 }
814 }
815 reg_state[regno].use_index = k;
816 }
817
818 /* Find the use of REGNO with the ruid that is highest among those
819 lower than RUID_LIMIT, and return it if it is the only use of this
820 reg in the insn. Return NULL otherwise. */
821
822 static struct reg_use *
823 reload_combine_closest_single_use (unsigned regno, int ruid_limit)
824 {
825 int i, best_ruid = 0;
826 int use_idx = reg_state[regno].use_index;
827 struct reg_use *retval;
828
829 if (use_idx < 0)
830 return NULL;
831 retval = NULL;
832 for (i = use_idx; i < RELOAD_COMBINE_MAX_USES; i++)
833 {
834 struct reg_use *use = reg_state[regno].reg_use + i;
835 int this_ruid = use->ruid;
836 if (this_ruid >= ruid_limit)
837 continue;
838 if (this_ruid > best_ruid)
839 {
840 best_ruid = this_ruid;
841 retval = use;
842 }
843 else if (this_ruid == best_ruid)
844 retval = NULL;
845 }
846 if (last_label_ruid >= best_ruid)
847 return NULL;
848 return retval;
849 }
850
851 /* After we've moved an add insn, fix up any debug insns that occur
852 between the old location of the add and the new location. REG is
853 the destination register of the add insn; REPLACEMENT is the
854 SET_SRC of the add. FROM and TO specify the range in which we
855 should make this change on debug insns. */
856
857 static void
858 fixup_debug_insns (rtx reg, rtx replacement, rtx from, rtx to)
859 {
860 rtx insn;
861 for (insn = from; insn != to; insn = NEXT_INSN (insn))
862 {
863 rtx t;
864
865 if (!DEBUG_INSN_P (insn))
866 continue;
867
868 t = INSN_VAR_LOCATION_LOC (insn);
869 t = simplify_replace_rtx (t, reg, replacement);
870 validate_change (insn, &INSN_VAR_LOCATION_LOC (insn), t, 0);
871 }
872 }
873
874 /* Subroutine of reload_combine_recognize_const_pattern. Try to replace REG
875 with SRC in the insn described by USE, taking costs into account. Return
876 true if we made the replacement. */
877
878 static bool
879 try_replace_in_use (struct reg_use *use, rtx reg, rtx src)
880 {
881 rtx use_insn = use->insn;
882 rtx mem = use->containing_mem;
883 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (use_insn));
884
885 if (mem != NULL_RTX)
886 {
887 addr_space_t as = MEM_ADDR_SPACE (mem);
888 rtx oldaddr = XEXP (mem, 0);
889 rtx newaddr = NULL_RTX;
890 int old_cost = address_cost (oldaddr, GET_MODE (mem), as, speed);
891 int new_cost;
892
893 newaddr = simplify_replace_rtx (oldaddr, reg, src);
894 if (memory_address_addr_space_p (GET_MODE (mem), newaddr, as))
895 {
896 XEXP (mem, 0) = newaddr;
897 new_cost = address_cost (newaddr, GET_MODE (mem), as, speed);
898 XEXP (mem, 0) = oldaddr;
899 if (new_cost <= old_cost
900 && validate_change (use_insn,
901 &XEXP (mem, 0), newaddr, 0))
902 return true;
903 }
904 }
905 else
906 {
907 rtx new_set = single_set (use_insn);
908 if (new_set
909 && REG_P (SET_DEST (new_set))
910 && GET_CODE (SET_SRC (new_set)) == PLUS
911 && REG_P (XEXP (SET_SRC (new_set), 0))
912 && CONSTANT_P (XEXP (SET_SRC (new_set), 1)))
913 {
914 rtx new_src;
915 int old_cost = rtx_cost (SET_SRC (new_set), SET, speed);
916
917 gcc_assert (rtx_equal_p (XEXP (SET_SRC (new_set), 0), reg));
918 new_src = simplify_replace_rtx (SET_SRC (new_set), reg, src);
919
920 if (rtx_cost (new_src, SET, speed) <= old_cost
921 && validate_change (use_insn, &SET_SRC (new_set),
922 new_src, 0))
923 return true;
924 }
925 }
926 return false;
927 }
928
929 /* Called by reload_combine when scanning INSN. This function tries to detect
930 patterns where a constant is added to a register, and the result is used
931 in an address.
932 Return true if no further processing is needed on INSN; false if it wasn't
933 recognized and should be handled normally. */
934
935 static bool
936 reload_combine_recognize_const_pattern (rtx insn)
937 {
938 int from_ruid = reload_combine_ruid;
939 rtx set, pat, reg, src, addreg;
940 unsigned int regno;
941 struct reg_use *use;
942 bool must_move_add;
943 rtx add_moved_after_insn = NULL_RTX;
944 int add_moved_after_ruid = 0;
945 int clobbered_regno = -1;
946
947 set = single_set (insn);
948 if (set == NULL_RTX)
949 return false;
950
951 reg = SET_DEST (set);
952 src = SET_SRC (set);
953 if (!REG_P (reg)
954 || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1
955 || GET_MODE (reg) != Pmode
956 || reg == stack_pointer_rtx)
957 return false;
958
959 regno = REGNO (reg);
960
961 /* We look for a REG1 = REG2 + CONSTANT insn, followed by either
962 uses of REG1 inside an address, or inside another add insn. If
963 possible and profitable, merge the addition into subsequent
964 uses. */
965 if (GET_CODE (src) != PLUS
966 || !REG_P (XEXP (src, 0))
967 || !CONSTANT_P (XEXP (src, 1)))
968 return false;
969
970 addreg = XEXP (src, 0);
971 must_move_add = rtx_equal_p (reg, addreg);
972
973 pat = PATTERN (insn);
974 if (must_move_add && set != pat)
975 {
976 /* We have to be careful when moving the add; apart from the
977 single_set there may also be clobbers. Recognize one special
978 case, that of one clobber alongside the set (likely a clobber
979 of the CC register). */
980 gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
981 if (XVECLEN (pat, 0) != 2 || XVECEXP (pat, 0, 0) != set
982 || GET_CODE (XVECEXP (pat, 0, 1)) != CLOBBER
983 || !REG_P (XEXP (XVECEXP (pat, 0, 1), 0)))
984 return false;
985 clobbered_regno = REGNO (XEXP (XVECEXP (pat, 0, 1), 0));
986 }
987
988 do
989 {
990 use = reload_combine_closest_single_use (regno, from_ruid);
991
992 if (use)
993 /* Start the search for the next use from here. */
994 from_ruid = use->ruid;
995
996 if (use && GET_MODE (*use->usep) == Pmode)
997 {
998 bool delete_add = false;
999 rtx use_insn = use->insn;
1000 int use_ruid = use->ruid;
1001
1002 /* Avoid moving the add insn past a jump. */
1003 if (must_move_add && use_ruid <= last_jump_ruid)
1004 break;
1005
1006 /* If the add clobbers another hard reg in parallel, don't move
1007 it past a real set of this hard reg. */
1008 if (must_move_add && clobbered_regno >= 0
1009 && reg_state[clobbered_regno].real_store_ruid >= use_ruid)
1010 break;
1011
1012 gcc_assert (reg_state[regno].store_ruid <= use_ruid);
1013 /* Avoid moving a use of ADDREG past a point where it is stored. */
1014 if (reg_state[REGNO (addreg)].store_ruid > use_ruid)
1015 break;
1016
1017 /* We also must not move the addition past an insn that sets
1018 the same register, unless we can combine two add insns. */
1019 if (must_move_add && reg_state[regno].store_ruid == use_ruid)
1020 {
1021 if (use->containing_mem == NULL_RTX)
1022 delete_add = true;
1023 else
1024 break;
1025 }
1026
1027 if (try_replace_in_use (use, reg, src))
1028 {
1029 reload_combine_purge_insn_uses (use_insn);
1030 reload_combine_note_use (&PATTERN (use_insn), use_insn,
1031 use_ruid, NULL_RTX);
1032
1033 if (delete_add)
1034 {
1035 fixup_debug_insns (reg, src, insn, use_insn);
1036 delete_insn (insn);
1037 return true;
1038 }
1039 if (must_move_add)
1040 {
1041 add_moved_after_insn = use_insn;
1042 add_moved_after_ruid = use_ruid;
1043 }
1044 continue;
1045 }
1046 }
1047 /* If we get here, we couldn't handle this use. */
1048 if (must_move_add)
1049 break;
1050 }
1051 while (use);
1052
1053 if (!must_move_add || add_moved_after_insn == NULL_RTX)
1054 /* Process the add normally. */
1055 return false;
1056
1057 fixup_debug_insns (reg, src, insn, add_moved_after_insn);
1058
1059 reorder_insns (insn, insn, add_moved_after_insn);
1060 reload_combine_purge_reg_uses_after_ruid (regno, add_moved_after_ruid);
1061 reload_combine_split_ruids (add_moved_after_ruid - 1);
1062 reload_combine_note_use (&PATTERN (insn), insn,
1063 add_moved_after_ruid, NULL_RTX);
1064 reg_state[regno].store_ruid = add_moved_after_ruid;
1065
1066 return true;
1067 }
1068
1069 /* Called by reload_combine when scanning INSN. Try to detect a pattern we
1070 can handle and improve. Return true if no further processing is needed on
1071 INSN; false if it wasn't recognized and should be handled normally. */
1072
1073 static bool
1074 reload_combine_recognize_pattern (rtx insn)
1075 {
1076 rtx set, reg, src;
1077 unsigned int regno;
1078
1079 set = single_set (insn);
1080 if (set == NULL_RTX)
1081 return false;
1082
1083 reg = SET_DEST (set);
1084 src = SET_SRC (set);
1085 if (!REG_P (reg)
1086 || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1)
1087 return false;
1088
1089 regno = REGNO (reg);
1090
1091 /* Look for (set (REGX) (CONST_INT))
1092 (set (REGX) (PLUS (REGX) (REGY)))
1093 ...
1094 ... (MEM (REGX)) ...
1095 and convert it to
1096 (set (REGZ) (CONST_INT))
1097 ...
1098 ... (MEM (PLUS (REGZ) (REGY)))... .
1099
1100 First, check that we have (set (REGX) (PLUS (REGX) (REGY)))
1101 and that we know all uses of REGX before it dies.
1102 Also, explicitly check that REGX != REGY; our life information
1103 does not yet show whether REGY changes in this insn. */
1104
1105 if (GET_CODE (src) == PLUS
1106 && reg_state[regno].all_offsets_match
1107 && last_index_reg != -1
1108 && REG_P (XEXP (src, 1))
1109 && rtx_equal_p (XEXP (src, 0), reg)
1110 && !rtx_equal_p (XEXP (src, 1), reg)
1111 && reg_state[regno].use_index >= 0
1112 && reg_state[regno].use_index < RELOAD_COMBINE_MAX_USES
1113 && last_label_ruid < reg_state[regno].use_ruid)
1114 {
1115 rtx base = XEXP (src, 1);
1116 rtx prev = prev_nonnote_nondebug_insn (insn);
1117 rtx prev_set = prev ? single_set (prev) : NULL_RTX;
1118 rtx index_reg = NULL_RTX;
1119 rtx reg_sum = NULL_RTX;
1120 int i;
1121
1122 /* Now we need to set INDEX_REG to an index register (denoted as
1123 REGZ in the illustration above) and REG_SUM to the expression
1124 register+register that we want to use to substitute uses of REG
1125 (typically in MEMs) with. First check REG and BASE for being
1126 index registers; we can use them even if they are not dead. */
1127 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno)
1128 || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
1129 REGNO (base)))
1130 {
1131 index_reg = reg;
1132 reg_sum = src;
1133 }
1134 else
1135 {
1136 /* Otherwise, look for a free index register. Since we have
1137 checked above that neither REG nor BASE are index registers,
1138 if we find anything at all, it will be different from these
1139 two registers. */
1140 for (i = first_index_reg; i <= last_index_reg; i++)
1141 {
1142 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], i)
1143 && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES
1144 && reg_state[i].store_ruid <= reg_state[regno].use_ruid
1145 && (call_used_regs[i] || df_regs_ever_live_p (i))
1146 && (!frame_pointer_needed || i != HARD_FRAME_POINTER_REGNUM)
1147 && !fixed_regs[i] && !global_regs[i]
1148 && hard_regno_nregs[i][GET_MODE (reg)] == 1
1149 && targetm.hard_regno_scratch_ok (i))
1150 {
1151 index_reg = gen_rtx_REG (GET_MODE (reg), i);
1152 reg_sum = gen_rtx_PLUS (GET_MODE (reg), index_reg, base);
1153 break;
1154 }
1155 }
1156 }
1157
1158 /* Check that PREV_SET is indeed (set (REGX) (CONST_INT)) and that
1159 (REGY), i.e. BASE, is not clobbered before the last use we'll
1160 create. */
1161 if (reg_sum
1162 && prev_set
1163 && CONST_INT_P (SET_SRC (prev_set))
1164 && rtx_equal_p (SET_DEST (prev_set), reg)
1165 && (reg_state[REGNO (base)].store_ruid
1166 <= reg_state[regno].use_ruid))
1167 {
1168 /* Change destination register and, if necessary, the constant
1169 value in PREV, the constant loading instruction. */
1170 validate_change (prev, &SET_DEST (prev_set), index_reg, 1);
1171 if (reg_state[regno].offset != const0_rtx)
1172 validate_change (prev,
1173 &SET_SRC (prev_set),
1174 GEN_INT (INTVAL (SET_SRC (prev_set))
1175 + INTVAL (reg_state[regno].offset)),
1176 1);
1177
1178 /* Now for every use of REG that we have recorded, replace REG
1179 with REG_SUM. */
1180 for (i = reg_state[regno].use_index;
1181 i < RELOAD_COMBINE_MAX_USES; i++)
1182 validate_unshare_change (reg_state[regno].reg_use[i].insn,
1183 reg_state[regno].reg_use[i].usep,
1184 /* Each change must have its own
1185 replacement. */
1186 reg_sum, 1);
1187
1188 if (apply_change_group ())
1189 {
1190 struct reg_use *lowest_ruid = NULL;
1191
1192 /* For every new use of REG_SUM, we have to record the use
1193 of BASE therein, i.e. operand 1. */
1194 for (i = reg_state[regno].use_index;
1195 i < RELOAD_COMBINE_MAX_USES; i++)
1196 {
1197 struct reg_use *use = reg_state[regno].reg_use + i;
1198 reload_combine_note_use (&XEXP (*use->usep, 1), use->insn,
1199 use->ruid, use->containing_mem);
1200 if (lowest_ruid == NULL || use->ruid < lowest_ruid->ruid)
1201 lowest_ruid = use;
1202 }
1203
1204 fixup_debug_insns (reg, reg_sum, insn, lowest_ruid->insn);
1205
1206 /* Delete the reg-reg addition. */
1207 delete_insn (insn);
1208
1209 if (reg_state[regno].offset != const0_rtx)
1210 /* Previous REG_EQUIV / REG_EQUAL notes for PREV
1211 are now invalid. */
1212 remove_reg_equal_equiv_notes (prev);
1213
1214 reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES;
1215 return true;
1216 }
1217 }
1218 }
1219 return false;
1220 }
1221
1222 static void
1223 reload_combine (void)
1224 {
1225 rtx insn, prev;
1226 basic_block bb;
1227 unsigned int r;
1228 int min_labelno, n_labels;
1229 HARD_REG_SET ever_live_at_start, *label_live;
1230
1231 /* To avoid wasting too much time later searching for an index register,
1232 determine the minimum and maximum index register numbers. */
1233 if (INDEX_REG_CLASS == NO_REGS)
1234 last_index_reg = -1;
1235 else if (first_index_reg == -1 && last_index_reg == 0)
1236 {
1237 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1238 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], r))
1239 {
1240 if (first_index_reg == -1)
1241 first_index_reg = r;
1242
1243 last_index_reg = r;
1244 }
1245
1246 /* If no index register is available, we can quit now. Set LAST_INDEX_REG
1247 to -1 so we'll know to quit early the next time we get here. */
1248 if (first_index_reg == -1)
1249 {
1250 last_index_reg = -1;
1251 return;
1252 }
1253 }
1254
1255 /* Set up LABEL_LIVE and EVER_LIVE_AT_START. The register lifetime
1256 information is a bit fuzzy immediately after reload, but it's
1257 still good enough to determine which registers are live at a jump
1258 destination. */
1259 min_labelno = get_first_label_num ();
1260 n_labels = max_label_num () - min_labelno;
1261 label_live = XNEWVEC (HARD_REG_SET, n_labels);
1262 CLEAR_HARD_REG_SET (ever_live_at_start);
1263
1264 FOR_EACH_BB_REVERSE (bb)
1265 {
1266 insn = BB_HEAD (bb);
1267 if (LABEL_P (insn))
1268 {
1269 HARD_REG_SET live;
1270 bitmap live_in = df_get_live_in (bb);
1271
1272 REG_SET_TO_HARD_REG_SET (live, live_in);
1273 compute_use_by_pseudos (&live, live_in);
1274 COPY_HARD_REG_SET (LABEL_LIVE (insn), live);
1275 IOR_HARD_REG_SET (ever_live_at_start, live);
1276 }
1277 }
1278
1279 /* Initialize last_label_ruid, reload_combine_ruid and reg_state. */
1280 last_label_ruid = last_jump_ruid = reload_combine_ruid = 0;
1281 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1282 {
1283 reg_state[r].store_ruid = 0;
1284 reg_state[r].real_store_ruid = 0;
1285 if (fixed_regs[r])
1286 reg_state[r].use_index = -1;
1287 else
1288 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1289 }
1290
1291 for (insn = get_last_insn (); insn; insn = prev)
1292 {
1293 bool control_flow_insn;
1294 rtx note;
1295
1296 prev = PREV_INSN (insn);
1297
1298 /* We cannot do our optimization across labels. Invalidating all the use
1299 information we have would be costly, so we just note where the label
1300 is and then later disable any optimization that would cross it. */
1301 if (LABEL_P (insn))
1302 last_label_ruid = reload_combine_ruid;
1303 else if (BARRIER_P (insn))
1304 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1305 if (! fixed_regs[r])
1306 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1307
1308 if (! NONDEBUG_INSN_P (insn))
1309 continue;
1310
1311 reload_combine_ruid++;
1312
1313 control_flow_insn = control_flow_insn_p (insn);
1314 if (control_flow_insn)
1315 last_jump_ruid = reload_combine_ruid;
1316
1317 if (reload_combine_recognize_const_pattern (insn)
1318 || reload_combine_recognize_pattern (insn))
1319 continue;
1320
1321 note_stores (PATTERN (insn), reload_combine_note_store, NULL);
1322
1323 if (CALL_P (insn))
1324 {
1325 rtx link;
1326
1327 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1328 if (call_used_regs[r])
1329 {
1330 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1331 reg_state[r].store_ruid = reload_combine_ruid;
1332 }
1333
1334 for (link = CALL_INSN_FUNCTION_USAGE (insn); link;
1335 link = XEXP (link, 1))
1336 {
1337 rtx usage_rtx = XEXP (XEXP (link, 0), 0);
1338 if (REG_P (usage_rtx))
1339 {
1340 unsigned int i;
1341 unsigned int start_reg = REGNO (usage_rtx);
1342 unsigned int num_regs
1343 = hard_regno_nregs[start_reg][GET_MODE (usage_rtx)];
1344 unsigned int end_reg = start_reg + num_regs - 1;
1345 for (i = start_reg; i <= end_reg; i++)
1346 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1347 {
1348 reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
1349 reg_state[i].store_ruid = reload_combine_ruid;
1350 }
1351 else
1352 reg_state[i].use_index = -1;
1353 }
1354 }
1355 }
1356
1357 if (control_flow_insn && GET_CODE (PATTERN (insn)) != RETURN)
1358 {
1359 /* Non-spill registers might be used at the call destination in
1360 some unknown fashion, so we have to mark the unknown use. */
1361 HARD_REG_SET *live;
1362
1363 if ((condjump_p (insn) || condjump_in_parallel_p (insn))
1364 && JUMP_LABEL (insn))
1365 live = &LABEL_LIVE (JUMP_LABEL (insn));
1366 else
1367 live = &ever_live_at_start;
1368
1369 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1370 if (TEST_HARD_REG_BIT (*live, r))
1371 reg_state[r].use_index = -1;
1372 }
1373
1374 reload_combine_note_use (&PATTERN (insn), insn, reload_combine_ruid,
1375 NULL_RTX);
1376
1377 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1378 {
1379 if (REG_NOTE_KIND (note) == REG_INC && REG_P (XEXP (note, 0)))
1380 {
1381 int regno = REGNO (XEXP (note, 0));
1382 reg_state[regno].store_ruid = reload_combine_ruid;
1383 reg_state[regno].real_store_ruid = reload_combine_ruid;
1384 reg_state[regno].use_index = -1;
1385 }
1386 }
1387 }
1388
1389 free (label_live);
1390 }
1391
1392 /* Check if DST is a register or a subreg of a register; if it is,
1393 update store_ruid, real_store_ruid and use_index in the reg_state
1394 structure accordingly. Called via note_stores from reload_combine. */
1395
1396 static void
1397 reload_combine_note_store (rtx dst, const_rtx set, void *data ATTRIBUTE_UNUSED)
1398 {
1399 int regno = 0;
1400 int i;
1401 enum machine_mode mode = GET_MODE (dst);
1402
1403 if (GET_CODE (dst) == SUBREG)
1404 {
1405 regno = subreg_regno_offset (REGNO (SUBREG_REG (dst)),
1406 GET_MODE (SUBREG_REG (dst)),
1407 SUBREG_BYTE (dst),
1408 GET_MODE (dst));
1409 dst = SUBREG_REG (dst);
1410 }
1411
1412 /* Some targets do argument pushes without adding REG_INC notes. */
1413
1414 if (MEM_P (dst))
1415 {
1416 dst = XEXP (dst, 0);
1417 if (GET_CODE (dst) == PRE_INC || GET_CODE (dst) == POST_INC
1418 || GET_CODE (dst) == PRE_DEC || GET_CODE (dst) == POST_DEC
1419 || GET_CODE (dst) == PRE_MODIFY || GET_CODE (dst) == POST_MODIFY)
1420 {
1421 regno = REGNO (XEXP (dst, 0));
1422 mode = GET_MODE (XEXP (dst, 0));
1423 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1424 {
1425 /* We could probably do better, but for now mark the register
1426 as used in an unknown fashion and set/clobbered at this
1427 insn. */
1428 reg_state[i].use_index = -1;
1429 reg_state[i].store_ruid = reload_combine_ruid;
1430 reg_state[i].real_store_ruid = reload_combine_ruid;
1431 }
1432 }
1433 else
1434 return;
1435 }
1436
1437 if (!REG_P (dst))
1438 return;
1439 regno += REGNO (dst);
1440
1441 /* note_stores might have stripped a STRICT_LOW_PART, so we have to be
1442 careful with registers / register parts that are not full words.
1443 Similarly for ZERO_EXTRACT. */
1444 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT
1445 || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART)
1446 {
1447 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1448 {
1449 reg_state[i].use_index = -1;
1450 reg_state[i].store_ruid = reload_combine_ruid;
1451 reg_state[i].real_store_ruid = reload_combine_ruid;
1452 }
1453 }
1454 else
1455 {
1456 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1457 {
1458 reg_state[i].store_ruid = reload_combine_ruid;
1459 if (GET_CODE (set) == SET)
1460 reg_state[i].real_store_ruid = reload_combine_ruid;
1461 reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
1462 }
1463 }
1464 }
1465
1466 /* XP points to a piece of rtl that has to be checked for any uses of
1467 registers.
1468 *XP is the pattern of INSN, or a part of it.
1469 Called from reload_combine, and recursively by itself. */
1470 static void
1471 reload_combine_note_use (rtx *xp, rtx insn, int ruid, rtx containing_mem)
1472 {
1473 rtx x = *xp;
1474 enum rtx_code code = x->code;
1475 const char *fmt;
1476 int i, j;
1477 rtx offset = const0_rtx; /* For the REG case below. */
1478
1479 switch (code)
1480 {
1481 case SET:
1482 if (REG_P (SET_DEST (x)))
1483 {
1484 reload_combine_note_use (&SET_SRC (x), insn, ruid, NULL_RTX);
1485 return;
1486 }
1487 break;
1488
1489 case USE:
1490 /* If this is the USE of a return value, we can't change it. */
1491 if (REG_P (XEXP (x, 0)) && REG_FUNCTION_VALUE_P (XEXP (x, 0)))
1492 {
1493 /* Mark the return register as used in an unknown fashion. */
1494 rtx reg = XEXP (x, 0);
1495 int regno = REGNO (reg);
1496 int nregs = hard_regno_nregs[regno][GET_MODE (reg)];
1497
1498 while (--nregs >= 0)
1499 reg_state[regno + nregs].use_index = -1;
1500 return;
1501 }
1502 break;
1503
1504 case CLOBBER:
1505 if (REG_P (SET_DEST (x)))
1506 {
1507 /* No spurious CLOBBERs of pseudo registers may remain. */
1508 gcc_assert (REGNO (SET_DEST (x)) < FIRST_PSEUDO_REGISTER);
1509 return;
1510 }
1511 break;
1512
1513 case PLUS:
1514 /* We are interested in (plus (reg) (const_int)) . */
1515 if (!REG_P (XEXP (x, 0))
1516 || !CONST_INT_P (XEXP (x, 1)))
1517 break;
1518 offset = XEXP (x, 1);
1519 x = XEXP (x, 0);
1520 /* Fall through. */
1521 case REG:
1522 {
1523 int regno = REGNO (x);
1524 int use_index;
1525 int nregs;
1526
1527 /* No spurious USEs of pseudo registers may remain. */
1528 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1529
1530 nregs = hard_regno_nregs[regno][GET_MODE (x)];
1531
1532 /* We can't substitute into multi-hard-reg uses. */
1533 if (nregs > 1)
1534 {
1535 while (--nregs >= 0)
1536 reg_state[regno + nregs].use_index = -1;
1537 return;
1538 }
1539
1540 /* We may be called to update uses in previously seen insns.
1541 Don't add uses beyond the last store we saw. */
1542 if (ruid < reg_state[regno].store_ruid)
1543 return;
1544
1545 /* If this register is already used in some unknown fashion, we
1546 can't do anything.
1547 If we decrement the index from zero to -1, we can't store more
1548 uses, so this register becomes used in an unknown fashion. */
1549 use_index = --reg_state[regno].use_index;
1550 if (use_index < 0)
1551 return;
1552
1553 if (use_index == RELOAD_COMBINE_MAX_USES - 1)
1554 {
1555 /* This is the first use of this register we have seen since we
1556 marked it as dead. */
1557 reg_state[regno].offset = offset;
1558 reg_state[regno].all_offsets_match = true;
1559 reg_state[regno].use_ruid = ruid;
1560 }
1561 else
1562 {
1563 if (reg_state[regno].use_ruid > ruid)
1564 reg_state[regno].use_ruid = ruid;
1565
1566 if (! rtx_equal_p (offset, reg_state[regno].offset))
1567 reg_state[regno].all_offsets_match = false;
1568 }
1569
1570 reg_state[regno].reg_use[use_index].insn = insn;
1571 reg_state[regno].reg_use[use_index].ruid = ruid;
1572 reg_state[regno].reg_use[use_index].containing_mem = containing_mem;
1573 reg_state[regno].reg_use[use_index].usep = xp;
1574 return;
1575 }
1576
1577 case MEM:
1578 containing_mem = x;
1579 break;
1580
1581 default:
1582 break;
1583 }
1584
1585 /* Recursively process the components of X. */
1586 fmt = GET_RTX_FORMAT (code);
1587 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1588 {
1589 if (fmt[i] == 'e')
1590 reload_combine_note_use (&XEXP (x, i), insn, ruid, containing_mem);
1591 else if (fmt[i] == 'E')
1592 {
1593 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1594 reload_combine_note_use (&XVECEXP (x, i, j), insn, ruid,
1595 containing_mem);
1596 }
1597 }
1598 }
1599 \f
1600 /* See if we can reduce the cost of a constant by replacing a move
1601 with an add. We track situations in which a register is set to a
1602 constant or to a register plus a constant. */
1603 /* We cannot do our optimization across labels. Invalidating all the
1604 information about register contents we have would be costly, so we
1605 use move2add_last_label_luid to note where the label is and then
1606 later disable any optimization that would cross it.
1607 reg_offset[n] / reg_base_reg[n] / reg_symbol_ref[n] / reg_mode[n]
1608 are only valid if reg_set_luid[n] is greater than
1609 move2add_last_label_luid. */
1610 static int reg_set_luid[FIRST_PSEUDO_REGISTER];
1611
1612 /* If reg_base_reg[n] is negative, register n has been set to
1613 reg_offset[n] or reg_symbol_ref[n] + reg_offset[n] in mode reg_mode[n].
1614 If reg_base_reg[n] is non-negative, register n has been set to the
1615 sum of reg_offset[n] and the value of register reg_base_reg[n]
1616 before reg_set_luid[n], calculated in mode reg_mode[n] . */
1617 static HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
1618 static int reg_base_reg[FIRST_PSEUDO_REGISTER];
1619 static rtx reg_symbol_ref[FIRST_PSEUDO_REGISTER];
1620 static enum machine_mode reg_mode[FIRST_PSEUDO_REGISTER];
1621
1622 /* move2add_luid is linearly increased while scanning the instructions
1623 from first to last. It is used to set reg_set_luid in
1624 reload_cse_move2add and move2add_note_store. */
1625 static int move2add_luid;
1626
1627 /* move2add_last_label_luid is set whenever a label is found. Labels
1628 invalidate all previously collected reg_offset data. */
1629 static int move2add_last_label_luid;
1630
1631 /* ??? We don't know how zero / sign extension is handled, hence we
1632 can't go from a narrower to a wider mode. */
1633 #define MODES_OK_FOR_MOVE2ADD(OUTMODE, INMODE) \
1634 (GET_MODE_SIZE (OUTMODE) == GET_MODE_SIZE (INMODE) \
1635 || (GET_MODE_SIZE (OUTMODE) <= GET_MODE_SIZE (INMODE) \
1636 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (OUTMODE), \
1637 GET_MODE_BITSIZE (INMODE))))
1638
1639 /* This function is called with INSN that sets REG to (SYM + OFF),
1640 while REG is known to already have value (SYM + offset).
1641 This function tries to change INSN into an add instruction
1642 (set (REG) (plus (REG) (OFF - offset))) using the known value.
1643 It also updates the information about REG's known value.
1644 Return true if we made a change. */
1645
1646 static bool
1647 move2add_use_add2_insn (rtx reg, rtx sym, rtx off, rtx insn)
1648 {
1649 rtx pat = PATTERN (insn);
1650 rtx src = SET_SRC (pat);
1651 int regno = REGNO (reg);
1652 rtx new_src = gen_int_mode (INTVAL (off) - reg_offset[regno],
1653 GET_MODE (reg));
1654 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
1655 bool changed = false;
1656
1657 /* (set (reg) (plus (reg) (const_int 0))) is not canonical;
1658 use (set (reg) (reg)) instead.
1659 We don't delete this insn, nor do we convert it into a
1660 note, to avoid losing register notes or the return
1661 value flag. jump2 already knows how to get rid of
1662 no-op moves. */
1663 if (new_src == const0_rtx)
1664 {
1665 /* If the constants are different, this is a
1666 truncation, that, if turned into (set (reg)
1667 (reg)), would be discarded. Maybe we should
1668 try a truncMN pattern? */
1669 if (INTVAL (off) == reg_offset [regno])
1670 changed = validate_change (insn, &SET_SRC (pat), reg, 0);
1671 }
1672 else
1673 {
1674 struct full_rtx_costs oldcst, newcst;
1675 rtx tem = gen_rtx_PLUS (GET_MODE (reg), reg, new_src);
1676
1677 get_full_rtx_cost (pat, SET, &oldcst);
1678 SET_SRC (pat) = tem;
1679 get_full_rtx_cost (pat, SET, &newcst);
1680 SET_SRC (pat) = src;
1681
1682 if (costs_lt_p (&newcst, &oldcst, speed)
1683 && have_add2_insn (reg, new_src))
1684 changed = validate_change (insn, &SET_SRC (pat), tem, 0);
1685 else if (sym == NULL_RTX && GET_MODE (reg) != BImode)
1686 {
1687 enum machine_mode narrow_mode;
1688 for (narrow_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
1689 narrow_mode != VOIDmode
1690 && narrow_mode != GET_MODE (reg);
1691 narrow_mode = GET_MODE_WIDER_MODE (narrow_mode))
1692 {
1693 if (have_insn_for (STRICT_LOW_PART, narrow_mode)
1694 && ((reg_offset[regno] & ~GET_MODE_MASK (narrow_mode))
1695 == (INTVAL (off) & ~GET_MODE_MASK (narrow_mode))))
1696 {
1697 rtx narrow_reg = gen_rtx_REG (narrow_mode,
1698 REGNO (reg));
1699 rtx narrow_src = gen_int_mode (INTVAL (off),
1700 narrow_mode);
1701 rtx new_set
1702 = gen_rtx_SET (VOIDmode,
1703 gen_rtx_STRICT_LOW_PART (VOIDmode,
1704 narrow_reg),
1705 narrow_src);
1706 changed = validate_change (insn, &PATTERN (insn),
1707 new_set, 0);
1708 if (changed)
1709 break;
1710 }
1711 }
1712 }
1713 }
1714 reg_set_luid[regno] = move2add_luid;
1715 reg_base_reg[regno] = -1;
1716 reg_mode[regno] = GET_MODE (reg);
1717 reg_symbol_ref[regno] = sym;
1718 reg_offset[regno] = INTVAL (off);
1719 return changed;
1720 }
1721
1722
1723 /* This function is called with INSN that sets REG to (SYM + OFF),
1724 but REG doesn't have known value (SYM + offset). This function
1725 tries to find another register which is known to already have
1726 value (SYM + offset) and change INSN into an add instruction
1727 (set (REG) (plus (the found register) (OFF - offset))) if such
1728 a register is found. It also updates the information about
1729 REG's known value.
1730 Return true iff we made a change. */
1731
1732 static bool
1733 move2add_use_add3_insn (rtx reg, rtx sym, rtx off, rtx insn)
1734 {
1735 rtx pat = PATTERN (insn);
1736 rtx src = SET_SRC (pat);
1737 int regno = REGNO (reg);
1738 int min_regno = 0;
1739 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
1740 int i;
1741 bool changed = false;
1742 struct full_rtx_costs oldcst, newcst, mincst;
1743 rtx plus_expr;
1744
1745 init_costs_to_max (&mincst);
1746 get_full_rtx_cost (pat, SET, &oldcst);
1747
1748 plus_expr = gen_rtx_PLUS (GET_MODE (reg), reg, const0_rtx);
1749 SET_SRC (pat) = plus_expr;
1750
1751 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1752 if (reg_set_luid[i] > move2add_last_label_luid
1753 && reg_mode[i] == GET_MODE (reg)
1754 && reg_base_reg[i] < 0
1755 && reg_symbol_ref[i] != NULL_RTX
1756 && rtx_equal_p (sym, reg_symbol_ref[i]))
1757 {
1758 rtx new_src = gen_int_mode (INTVAL (off) - reg_offset[i],
1759 GET_MODE (reg));
1760 /* (set (reg) (plus (reg) (const_int 0))) is not canonical;
1761 use (set (reg) (reg)) instead.
1762 We don't delete this insn, nor do we convert it into a
1763 note, to avoid losing register notes or the return
1764 value flag. jump2 already knows how to get rid of
1765 no-op moves. */
1766 if (new_src == const0_rtx)
1767 {
1768 init_costs_to_zero (&mincst);
1769 min_regno = i;
1770 break;
1771 }
1772 else
1773 {
1774 XEXP (plus_expr, 1) = new_src;
1775 get_full_rtx_cost (pat, SET, &newcst);
1776
1777 if (costs_lt_p (&newcst, &mincst, speed))
1778 {
1779 mincst = newcst;
1780 min_regno = i;
1781 }
1782 }
1783 }
1784 SET_SRC (pat) = src;
1785
1786 if (costs_lt_p (&mincst, &oldcst, speed))
1787 {
1788 rtx tem;
1789
1790 tem = gen_rtx_REG (GET_MODE (reg), min_regno);
1791 if (i != min_regno)
1792 {
1793 rtx new_src = gen_int_mode (INTVAL (off) - reg_offset[min_regno],
1794 GET_MODE (reg));
1795 tem = gen_rtx_PLUS (GET_MODE (reg), tem, new_src);
1796 }
1797 if (validate_change (insn, &SET_SRC (pat), tem, 0))
1798 changed = true;
1799 }
1800 reg_set_luid[regno] = move2add_luid;
1801 reg_base_reg[regno] = -1;
1802 reg_mode[regno] = GET_MODE (reg);
1803 reg_symbol_ref[regno] = sym;
1804 reg_offset[regno] = INTVAL (off);
1805 return changed;
1806 }
1807
1808 /* Convert move insns with constant inputs to additions if they are cheaper.
1809 Return true if any changes were made. */
1810 static bool
1811 reload_cse_move2add (rtx first)
1812 {
1813 int i;
1814 rtx insn;
1815 bool changed = false;
1816
1817 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
1818 {
1819 reg_set_luid[i] = 0;
1820 reg_offset[i] = 0;
1821 reg_base_reg[i] = 0;
1822 reg_symbol_ref[i] = NULL_RTX;
1823 reg_mode[i] = VOIDmode;
1824 }
1825
1826 move2add_last_label_luid = 0;
1827 move2add_luid = 2;
1828 for (insn = first; insn; insn = NEXT_INSN (insn), move2add_luid++)
1829 {
1830 rtx pat, note;
1831
1832 if (LABEL_P (insn))
1833 {
1834 move2add_last_label_luid = move2add_luid;
1835 /* We're going to increment move2add_luid twice after a
1836 label, so that we can use move2add_last_label_luid + 1 as
1837 the luid for constants. */
1838 move2add_luid++;
1839 continue;
1840 }
1841 if (! INSN_P (insn))
1842 continue;
1843 pat = PATTERN (insn);
1844 /* For simplicity, we only perform this optimization on
1845 straightforward SETs. */
1846 if (GET_CODE (pat) == SET
1847 && REG_P (SET_DEST (pat)))
1848 {
1849 rtx reg = SET_DEST (pat);
1850 int regno = REGNO (reg);
1851 rtx src = SET_SRC (pat);
1852
1853 /* Check if we have valid information on the contents of this
1854 register in the mode of REG. */
1855 if (reg_set_luid[regno] > move2add_last_label_luid
1856 && MODES_OK_FOR_MOVE2ADD (GET_MODE (reg), reg_mode[regno])
1857 && dbg_cnt (cse2_move2add))
1858 {
1859 /* Try to transform (set (REGX) (CONST_INT A))
1860 ...
1861 (set (REGX) (CONST_INT B))
1862 to
1863 (set (REGX) (CONST_INT A))
1864 ...
1865 (set (REGX) (plus (REGX) (CONST_INT B-A)))
1866 or
1867 (set (REGX) (CONST_INT A))
1868 ...
1869 (set (STRICT_LOW_PART (REGX)) (CONST_INT B))
1870 */
1871
1872 if (CONST_INT_P (src)
1873 && reg_base_reg[regno] < 0
1874 && reg_symbol_ref[regno] == NULL_RTX)
1875 {
1876 changed |= move2add_use_add2_insn (reg, NULL_RTX, src, insn);
1877 continue;
1878 }
1879
1880 /* Try to transform (set (REGX) (REGY))
1881 (set (REGX) (PLUS (REGX) (CONST_INT A)))
1882 ...
1883 (set (REGX) (REGY))
1884 (set (REGX) (PLUS (REGX) (CONST_INT B)))
1885 to
1886 (set (REGX) (REGY))
1887 (set (REGX) (PLUS (REGX) (CONST_INT A)))
1888 ...
1889 (set (REGX) (plus (REGX) (CONST_INT B-A))) */
1890 else if (REG_P (src)
1891 && reg_set_luid[regno] == reg_set_luid[REGNO (src)]
1892 && reg_base_reg[regno] == reg_base_reg[REGNO (src)]
1893 && MODES_OK_FOR_MOVE2ADD (GET_MODE (reg),
1894 reg_mode[REGNO (src)]))
1895 {
1896 rtx next = next_nonnote_nondebug_insn (insn);
1897 rtx set = NULL_RTX;
1898 if (next)
1899 set = single_set (next);
1900 if (set
1901 && SET_DEST (set) == reg
1902 && GET_CODE (SET_SRC (set)) == PLUS
1903 && XEXP (SET_SRC (set), 0) == reg
1904 && CONST_INT_P (XEXP (SET_SRC (set), 1)))
1905 {
1906 rtx src3 = XEXP (SET_SRC (set), 1);
1907 HOST_WIDE_INT added_offset = INTVAL (src3);
1908 HOST_WIDE_INT base_offset = reg_offset[REGNO (src)];
1909 HOST_WIDE_INT regno_offset = reg_offset[regno];
1910 rtx new_src =
1911 gen_int_mode (added_offset
1912 + base_offset
1913 - regno_offset,
1914 GET_MODE (reg));
1915 bool success = false;
1916 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
1917
1918 if (new_src == const0_rtx)
1919 /* See above why we create (set (reg) (reg)) here. */
1920 success
1921 = validate_change (next, &SET_SRC (set), reg, 0);
1922 else
1923 {
1924 rtx old_src = SET_SRC (set);
1925 struct full_rtx_costs oldcst, newcst;
1926 rtx tem = gen_rtx_PLUS (GET_MODE (reg), reg, new_src);
1927
1928 get_full_rtx_cost (set, SET, &oldcst);
1929 SET_SRC (set) = tem;
1930 get_full_rtx_cost (tem, SET, &newcst);
1931 SET_SRC (set) = old_src;
1932 costs_add_n_insns (&oldcst, 1);
1933
1934 if (costs_lt_p (&newcst, &oldcst, speed)
1935 && have_add2_insn (reg, new_src))
1936 {
1937 rtx newpat = gen_rtx_SET (VOIDmode, reg, tem);
1938 success
1939 = validate_change (next, &PATTERN (next),
1940 newpat, 0);
1941 }
1942 }
1943 if (success)
1944 delete_insn (insn);
1945 changed |= success;
1946 insn = next;
1947 reg_mode[regno] = GET_MODE (reg);
1948 reg_offset[regno] =
1949 trunc_int_for_mode (added_offset + base_offset,
1950 GET_MODE (reg));
1951 continue;
1952 }
1953 }
1954 }
1955
1956 /* Try to transform
1957 (set (REGX) (CONST (PLUS (SYMBOL_REF) (CONST_INT A))))
1958 ...
1959 (set (REGY) (CONST (PLUS (SYMBOL_REF) (CONST_INT B))))
1960 to
1961 (set (REGX) (CONST (PLUS (SYMBOL_REF) (CONST_INT A))))
1962 ...
1963 (set (REGY) (CONST (PLUS (REGX) (CONST_INT B-A)))) */
1964 if ((GET_CODE (src) == SYMBOL_REF
1965 || (GET_CODE (src) == CONST
1966 && GET_CODE (XEXP (src, 0)) == PLUS
1967 && GET_CODE (XEXP (XEXP (src, 0), 0)) == SYMBOL_REF
1968 && CONST_INT_P (XEXP (XEXP (src, 0), 1))))
1969 && dbg_cnt (cse2_move2add))
1970 {
1971 rtx sym, off;
1972
1973 if (GET_CODE (src) == SYMBOL_REF)
1974 {
1975 sym = src;
1976 off = const0_rtx;
1977 }
1978 else
1979 {
1980 sym = XEXP (XEXP (src, 0), 0);
1981 off = XEXP (XEXP (src, 0), 1);
1982 }
1983
1984 /* If the reg already contains the value which is sum of
1985 sym and some constant value, we can use an add2 insn. */
1986 if (reg_set_luid[regno] > move2add_last_label_luid
1987 && MODES_OK_FOR_MOVE2ADD (GET_MODE (reg), reg_mode[regno])
1988 && reg_base_reg[regno] < 0
1989 && reg_symbol_ref[regno] != NULL_RTX
1990 && rtx_equal_p (sym, reg_symbol_ref[regno]))
1991 changed |= move2add_use_add2_insn (reg, sym, off, insn);
1992
1993 /* Otherwise, we have to find a register whose value is sum
1994 of sym and some constant value. */
1995 else
1996 changed |= move2add_use_add3_insn (reg, sym, off, insn);
1997
1998 continue;
1999 }
2000 }
2001
2002 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2003 {
2004 if (REG_NOTE_KIND (note) == REG_INC
2005 && REG_P (XEXP (note, 0)))
2006 {
2007 /* Reset the information about this register. */
2008 int regno = REGNO (XEXP (note, 0));
2009 if (regno < FIRST_PSEUDO_REGISTER)
2010 reg_set_luid[regno] = 0;
2011 }
2012 }
2013 note_stores (PATTERN (insn), move2add_note_store, insn);
2014
2015 /* If INSN is a conditional branch, we try to extract an
2016 implicit set out of it. */
2017 if (any_condjump_p (insn))
2018 {
2019 rtx cnd = fis_get_condition (insn);
2020
2021 if (cnd != NULL_RTX
2022 && GET_CODE (cnd) == NE
2023 && REG_P (XEXP (cnd, 0))
2024 && !reg_set_p (XEXP (cnd, 0), insn)
2025 /* The following two checks, which are also in
2026 move2add_note_store, are intended to reduce the
2027 number of calls to gen_rtx_SET to avoid memory
2028 allocation if possible. */
2029 && SCALAR_INT_MODE_P (GET_MODE (XEXP (cnd, 0)))
2030 && hard_regno_nregs[REGNO (XEXP (cnd, 0))][GET_MODE (XEXP (cnd, 0))] == 1
2031 && CONST_INT_P (XEXP (cnd, 1)))
2032 {
2033 rtx implicit_set =
2034 gen_rtx_SET (VOIDmode, XEXP (cnd, 0), XEXP (cnd, 1));
2035 move2add_note_store (SET_DEST (implicit_set), implicit_set, insn);
2036 }
2037 }
2038
2039 /* If this is a CALL_INSN, all call used registers are stored with
2040 unknown values. */
2041 if (CALL_P (insn))
2042 {
2043 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
2044 {
2045 if (call_used_regs[i])
2046 /* Reset the information about this register. */
2047 reg_set_luid[i] = 0;
2048 }
2049 }
2050 }
2051 return changed;
2052 }
2053
2054 /* SET is a SET or CLOBBER that sets DST. DATA is the insn which
2055 contains SET.
2056 Update reg_set_luid, reg_offset and reg_base_reg accordingly.
2057 Called from reload_cse_move2add via note_stores. */
2058
2059 static void
2060 move2add_note_store (rtx dst, const_rtx set, void *data)
2061 {
2062 rtx insn = (rtx) data;
2063 unsigned int regno = 0;
2064 unsigned int nregs = 0;
2065 unsigned int i;
2066 enum machine_mode mode = GET_MODE (dst);
2067
2068 if (GET_CODE (dst) == SUBREG)
2069 {
2070 regno = subreg_regno_offset (REGNO (SUBREG_REG (dst)),
2071 GET_MODE (SUBREG_REG (dst)),
2072 SUBREG_BYTE (dst),
2073 GET_MODE (dst));
2074 nregs = subreg_nregs (dst);
2075 dst = SUBREG_REG (dst);
2076 }
2077
2078 /* Some targets do argument pushes without adding REG_INC notes. */
2079
2080 if (MEM_P (dst))
2081 {
2082 dst = XEXP (dst, 0);
2083 if (GET_CODE (dst) == PRE_INC || GET_CODE (dst) == POST_INC
2084 || GET_CODE (dst) == PRE_DEC || GET_CODE (dst) == POST_DEC)
2085 reg_set_luid[REGNO (XEXP (dst, 0))] = 0;
2086 return;
2087 }
2088 if (!REG_P (dst))
2089 return;
2090
2091 regno += REGNO (dst);
2092 if (!nregs)
2093 nregs = hard_regno_nregs[regno][mode];
2094
2095 if (SCALAR_INT_MODE_P (GET_MODE (dst))
2096 && nregs == 1 && GET_CODE (set) == SET)
2097 {
2098 rtx note, sym = NULL_RTX;
2099 HOST_WIDE_INT off;
2100
2101 note = find_reg_equal_equiv_note (insn);
2102 if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
2103 {
2104 sym = XEXP (note, 0);
2105 off = 0;
2106 }
2107 else if (note && GET_CODE (XEXP (note, 0)) == CONST
2108 && GET_CODE (XEXP (XEXP (note, 0), 0)) == PLUS
2109 && GET_CODE (XEXP (XEXP (XEXP (note, 0), 0), 0)) == SYMBOL_REF
2110 && CONST_INT_P (XEXP (XEXP (XEXP (note, 0), 0), 1)))
2111 {
2112 sym = XEXP (XEXP (XEXP (note, 0), 0), 0);
2113 off = INTVAL (XEXP (XEXP (XEXP (note, 0), 0), 1));
2114 }
2115
2116 if (sym != NULL_RTX)
2117 {
2118 reg_base_reg[regno] = -1;
2119 reg_symbol_ref[regno] = sym;
2120 reg_offset[regno] = off;
2121 reg_mode[regno] = mode;
2122 reg_set_luid[regno] = move2add_luid;
2123 return;
2124 }
2125 }
2126
2127 if (SCALAR_INT_MODE_P (GET_MODE (dst))
2128 && nregs == 1 && GET_CODE (set) == SET
2129 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2130 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2131 {
2132 rtx src = SET_SRC (set);
2133 rtx base_reg;
2134 HOST_WIDE_INT offset;
2135 int base_regno;
2136 /* This may be different from mode, if SET_DEST (set) is a
2137 SUBREG. */
2138 enum machine_mode dst_mode = GET_MODE (dst);
2139
2140 switch (GET_CODE (src))
2141 {
2142 case PLUS:
2143 if (REG_P (XEXP (src, 0)))
2144 {
2145 base_reg = XEXP (src, 0);
2146
2147 if (CONST_INT_P (XEXP (src, 1)))
2148 offset = INTVAL (XEXP (src, 1));
2149 else if (REG_P (XEXP (src, 1))
2150 && (reg_set_luid[REGNO (XEXP (src, 1))]
2151 > move2add_last_label_luid)
2152 && (MODES_OK_FOR_MOVE2ADD
2153 (dst_mode, reg_mode[REGNO (XEXP (src, 1))])))
2154 {
2155 if (reg_base_reg[REGNO (XEXP (src, 1))] < 0
2156 && reg_symbol_ref[REGNO (XEXP (src, 1))] == NULL_RTX)
2157 offset = reg_offset[REGNO (XEXP (src, 1))];
2158 /* Maybe the first register is known to be a
2159 constant. */
2160 else if (reg_set_luid[REGNO (base_reg)]
2161 > move2add_last_label_luid
2162 && (MODES_OK_FOR_MOVE2ADD
2163 (dst_mode, reg_mode[REGNO (base_reg)]))
2164 && reg_base_reg[REGNO (base_reg)] < 0
2165 && reg_symbol_ref[REGNO (base_reg)] == NULL_RTX)
2166 {
2167 offset = reg_offset[REGNO (base_reg)];
2168 base_reg = XEXP (src, 1);
2169 }
2170 else
2171 goto invalidate;
2172 }
2173 else
2174 goto invalidate;
2175
2176 break;
2177 }
2178
2179 goto invalidate;
2180
2181 case REG:
2182 base_reg = src;
2183 offset = 0;
2184 break;
2185
2186 case CONST_INT:
2187 /* Start tracking the register as a constant. */
2188 reg_base_reg[regno] = -1;
2189 reg_symbol_ref[regno] = NULL_RTX;
2190 reg_offset[regno] = INTVAL (SET_SRC (set));
2191 /* We assign the same luid to all registers set to constants. */
2192 reg_set_luid[regno] = move2add_last_label_luid + 1;
2193 reg_mode[regno] = mode;
2194 return;
2195
2196 default:
2197 invalidate:
2198 /* Invalidate the contents of the register. */
2199 reg_set_luid[regno] = 0;
2200 return;
2201 }
2202
2203 base_regno = REGNO (base_reg);
2204 /* If information about the base register is not valid, set it
2205 up as a new base register, pretending its value is known
2206 starting from the current insn. */
2207 if (reg_set_luid[base_regno] <= move2add_last_label_luid)
2208 {
2209 reg_base_reg[base_regno] = base_regno;
2210 reg_symbol_ref[base_regno] = NULL_RTX;
2211 reg_offset[base_regno] = 0;
2212 reg_set_luid[base_regno] = move2add_luid;
2213 reg_mode[base_regno] = mode;
2214 }
2215 else if (! MODES_OK_FOR_MOVE2ADD (dst_mode,
2216 reg_mode[base_regno]))
2217 goto invalidate;
2218
2219 reg_mode[regno] = mode;
2220
2221 /* Copy base information from our base register. */
2222 reg_set_luid[regno] = reg_set_luid[base_regno];
2223 reg_base_reg[regno] = reg_base_reg[base_regno];
2224 reg_symbol_ref[regno] = reg_symbol_ref[base_regno];
2225
2226 /* Compute the sum of the offsets or constants. */
2227 reg_offset[regno] = trunc_int_for_mode (offset
2228 + reg_offset[base_regno],
2229 dst_mode);
2230 }
2231 else
2232 {
2233 unsigned int endregno = regno + nregs;
2234
2235 for (i = regno; i < endregno; i++)
2236 /* Reset the information about this register. */
2237 reg_set_luid[i] = 0;
2238 }
2239 }
2240 \f
2241 static bool
2242 gate_handle_postreload (void)
2243 {
2244 return (optimize > 0 && reload_completed);
2245 }
2246
2247
2248 static unsigned int
2249 rest_of_handle_postreload (void)
2250 {
2251 if (!dbg_cnt (postreload_cse))
2252 return 0;
2253
2254 /* Do a very simple CSE pass over just the hard registers. */
2255 reload_cse_regs (get_insns ());
2256 /* Reload_cse_regs can eliminate potentially-trapping MEMs.
2257 Remove any EH edges associated with them. */
2258 if (cfun->can_throw_non_call_exceptions)
2259 purge_all_dead_edges ();
2260
2261 return 0;
2262 }
2263
2264 struct rtl_opt_pass pass_postreload_cse =
2265 {
2266 {
2267 RTL_PASS,
2268 "postreload", /* name */
2269 gate_handle_postreload, /* gate */
2270 rest_of_handle_postreload, /* execute */
2271 NULL, /* sub */
2272 NULL, /* next */
2273 0, /* static_pass_number */
2274 TV_RELOAD_CSE_REGS, /* tv_id */
2275 0, /* properties_required */
2276 0, /* properties_provided */
2277 0, /* properties_destroyed */
2278 0, /* todo_flags_start */
2279 TODO_df_finish | TODO_verify_rtl_sharing |
2280 TODO_dump_func /* todo_flags_finish */
2281 }
2282 };