lto-cgraph.c (output_profile_summary, [...]): Use gcov streaming; stream hot bb thres...
[gcc.git] / gcc / reorg.c
1 /* Perform instruction reorganizations for delay slot filling.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
4 Hacked by Michael Tiemann (tiemann@cygnus.com).
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Instruction reorganization pass.
23
24 This pass runs after register allocation and final jump
25 optimization. It should be the last pass to run before peephole.
26 It serves primarily to fill delay slots of insns, typically branch
27 and call insns. Other insns typically involve more complicated
28 interactions of data dependencies and resource constraints, and
29 are better handled by scheduling before register allocation (by the
30 function `schedule_insns').
31
32 The Branch Penalty is the number of extra cycles that are needed to
33 execute a branch insn. On an ideal machine, branches take a single
34 cycle, and the Branch Penalty is 0. Several RISC machines approach
35 branch delays differently:
36
37 The MIPS has a single branch delay slot. Most insns
38 (except other branches) can be used to fill this slot. When the
39 slot is filled, two insns execute in two cycles, reducing the
40 branch penalty to zero.
41
42 The SPARC always has a branch delay slot, but its effects can be
43 annulled when the branch is not taken. This means that failing to
44 find other sources of insns, we can hoist an insn from the branch
45 target that would only be safe to execute knowing that the branch
46 is taken.
47
48 The HP-PA always has a branch delay slot. For unconditional branches
49 its effects can be annulled when the branch is taken. The effects
50 of the delay slot in a conditional branch can be nullified for forward
51 taken branches, or for untaken backward branches. This means
52 we can hoist insns from the fall-through path for forward branches or
53 steal insns from the target of backward branches.
54
55 The TMS320C3x and C4x have three branch delay slots. When the three
56 slots are filled, the branch penalty is zero. Most insns can fill the
57 delay slots except jump insns.
58
59 Three techniques for filling delay slots have been implemented so far:
60
61 (1) `fill_simple_delay_slots' is the simplest, most efficient way
62 to fill delay slots. This pass first looks for insns which come
63 from before the branch and which are safe to execute after the
64 branch. Then it searches after the insn requiring delay slots or,
65 in the case of a branch, for insns that are after the point at
66 which the branch merges into the fallthrough code, if such a point
67 exists. When such insns are found, the branch penalty decreases
68 and no code expansion takes place.
69
70 (2) `fill_eager_delay_slots' is more complicated: it is used for
71 scheduling conditional jumps, or for scheduling jumps which cannot
72 be filled using (1). A machine need not have annulled jumps to use
73 this strategy, but it helps (by keeping more options open).
74 `fill_eager_delay_slots' tries to guess the direction the branch
75 will go; if it guesses right 100% of the time, it can reduce the
76 branch penalty as much as `fill_simple_delay_slots' does. If it
77 guesses wrong 100% of the time, it might as well schedule nops. When
78 `fill_eager_delay_slots' takes insns from the fall-through path of
79 the jump, usually there is no code expansion; when it takes insns
80 from the branch target, there is code expansion if it is not the
81 only way to reach that target.
82
83 (3) `relax_delay_slots' uses a set of rules to simplify code that
84 has been reorganized by (1) and (2). It finds cases where
85 conditional test can be eliminated, jumps can be threaded, extra
86 insns can be eliminated, etc. It is the job of (1) and (2) to do a
87 good job of scheduling locally; `relax_delay_slots' takes care of
88 making the various individual schedules work well together. It is
89 especially tuned to handle the control flow interactions of branch
90 insns. It does nothing for insns with delay slots that do not
91 branch.
92
93 On machines that use CC0, we are very conservative. We will not make
94 a copy of an insn involving CC0 since we want to maintain a 1-1
95 correspondence between the insn that sets and uses CC0. The insns are
96 allowed to be separated by placing an insn that sets CC0 (but not an insn
97 that uses CC0; we could do this, but it doesn't seem worthwhile) in a
98 delay slot. In that case, we point each insn at the other with REG_CC_USER
99 and REG_CC_SETTER notes. Note that these restrictions affect very few
100 machines because most RISC machines with delay slots will not use CC0
101 (the RT is the only known exception at this point). */
102
103 #include "config.h"
104 #include "system.h"
105 #include "coretypes.h"
106 #include "tm.h"
107 #include "diagnostic-core.h"
108 #include "rtl.h"
109 #include "tm_p.h"
110 #include "expr.h"
111 #include "function.h"
112 #include "insn-config.h"
113 #include "conditions.h"
114 #include "hard-reg-set.h"
115 #include "basic-block.h"
116 #include "regs.h"
117 #include "recog.h"
118 #include "flags.h"
119 #include "obstack.h"
120 #include "insn-attr.h"
121 #include "resource.h"
122 #include "except.h"
123 #include "params.h"
124 #include "target.h"
125 #include "tree-pass.h"
126 #include "emit-rtl.h"
127
128 #ifdef DELAY_SLOTS
129
130 #ifndef ANNUL_IFTRUE_SLOTS
131 #define eligible_for_annul_true(INSN, SLOTS, TRIAL, FLAGS) 0
132 #endif
133 #ifndef ANNUL_IFFALSE_SLOTS
134 #define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0
135 #endif
136
137 /* Insns which have delay slots that have not yet been filled. */
138
139 static struct obstack unfilled_slots_obstack;
140 static rtx *unfilled_firstobj;
141
142 /* Define macros to refer to the first and last slot containing unfilled
143 insns. These are used because the list may move and its address
144 should be recomputed at each use. */
145
146 #define unfilled_slots_base \
147 ((rtx *) obstack_base (&unfilled_slots_obstack))
148
149 #define unfilled_slots_next \
150 ((rtx *) obstack_next_free (&unfilled_slots_obstack))
151
152 /* Points to the label before the end of the function, or before a
153 return insn. */
154 static rtx function_return_label;
155 /* Likewise for a simple_return. */
156 static rtx function_simple_return_label;
157
158 /* Mapping between INSN_UID's and position in the code since INSN_UID's do
159 not always monotonically increase. */
160 static int *uid_to_ruid;
161
162 /* Highest valid index in `uid_to_ruid'. */
163 static int max_uid;
164
165 static int stop_search_p (rtx, int);
166 static int resource_conflicts_p (struct resources *, struct resources *);
167 static int insn_references_resource_p (rtx, struct resources *, bool);
168 static int insn_sets_resource_p (rtx, struct resources *, bool);
169 static rtx find_end_label (rtx);
170 static rtx emit_delay_sequence (rtx, rtx, int);
171 static rtx add_to_delay_list (rtx, rtx);
172 static rtx delete_from_delay_slot (rtx);
173 static void delete_scheduled_jump (rtx);
174 static void note_delay_statistics (int, int);
175 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
176 static rtx optimize_skip (rtx);
177 #endif
178 static int get_jump_flags (rtx, rtx);
179 static int mostly_true_jump (rtx);
180 static rtx get_branch_condition (rtx, rtx);
181 static int condition_dominates_p (rtx, rtx);
182 static int redirect_with_delay_slots_safe_p (rtx, rtx, rtx);
183 static int redirect_with_delay_list_safe_p (rtx, rtx, rtx);
184 static int check_annul_list_true_false (int, rtx);
185 static rtx steal_delay_list_from_target (rtx, rtx, rtx, rtx,
186 struct resources *,
187 struct resources *,
188 struct resources *,
189 int, int *, int *, rtx *);
190 static rtx steal_delay_list_from_fallthrough (rtx, rtx, rtx, rtx,
191 struct resources *,
192 struct resources *,
193 struct resources *,
194 int, int *, int *);
195 static void try_merge_delay_insns (rtx, rtx);
196 static rtx redundant_insn (rtx, rtx, rtx);
197 static int own_thread_p (rtx, rtx, int);
198 static void update_block (rtx, rtx);
199 static int reorg_redirect_jump (rtx, rtx);
200 static void update_reg_dead_notes (rtx, rtx);
201 static void fix_reg_dead_note (rtx, rtx);
202 static void update_reg_unused_notes (rtx, rtx);
203 static void fill_simple_delay_slots (int);
204 static rtx fill_slots_from_thread (rtx, rtx, rtx, rtx,
205 int, int, int, int,
206 int *, rtx);
207 static void fill_eager_delay_slots (void);
208 static void relax_delay_slots (rtx);
209 static void make_return_insns (rtx);
210 \f
211 /* A wrapper around next_active_insn which takes care to return ret_rtx
212 unchanged. */
213
214 static rtx
215 first_active_target_insn (rtx insn)
216 {
217 if (ANY_RETURN_P (insn))
218 return insn;
219 return next_active_insn (insn);
220 }
221 \f
222 /* Return true iff INSN is a simplejump, or any kind of return insn. */
223
224 static bool
225 simplejump_or_return_p (rtx insn)
226 {
227 return (JUMP_P (insn)
228 && (simplejump_p (insn) || ANY_RETURN_P (PATTERN (insn))));
229 }
230 \f
231 /* Return TRUE if this insn should stop the search for insn to fill delay
232 slots. LABELS_P indicates that labels should terminate the search.
233 In all cases, jumps terminate the search. */
234
235 static int
236 stop_search_p (rtx insn, int labels_p)
237 {
238 if (insn == 0)
239 return 1;
240
241 /* If the insn can throw an exception that is caught within the function,
242 it may effectively perform a jump from the viewpoint of the function.
243 Therefore act like for a jump. */
244 if (can_throw_internal (insn))
245 return 1;
246
247 switch (GET_CODE (insn))
248 {
249 case NOTE:
250 case CALL_INSN:
251 return 0;
252
253 case CODE_LABEL:
254 return labels_p;
255
256 case JUMP_INSN:
257 case BARRIER:
258 return 1;
259
260 case INSN:
261 /* OK unless it contains a delay slot or is an `asm' insn of some type.
262 We don't know anything about these. */
263 return (GET_CODE (PATTERN (insn)) == SEQUENCE
264 || GET_CODE (PATTERN (insn)) == ASM_INPUT
265 || asm_noperands (PATTERN (insn)) >= 0);
266
267 default:
268 gcc_unreachable ();
269 }
270 }
271 \f
272 /* Return TRUE if any resources are marked in both RES1 and RES2 or if either
273 resource set contains a volatile memory reference. Otherwise, return FALSE. */
274
275 static int
276 resource_conflicts_p (struct resources *res1, struct resources *res2)
277 {
278 if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
279 || (res1->unch_memory && res2->unch_memory)
280 || res1->volatil || res2->volatil)
281 return 1;
282
283 return hard_reg_set_intersect_p (res1->regs, res2->regs);
284 }
285
286 /* Return TRUE if any resource marked in RES, a `struct resources', is
287 referenced by INSN. If INCLUDE_DELAYED_EFFECTS is set, return if the called
288 routine is using those resources.
289
290 We compute this by computing all the resources referenced by INSN and
291 seeing if this conflicts with RES. It might be faster to directly check
292 ourselves, and this is the way it used to work, but it means duplicating
293 a large block of complex code. */
294
295 static int
296 insn_references_resource_p (rtx insn, struct resources *res,
297 bool include_delayed_effects)
298 {
299 struct resources insn_res;
300
301 CLEAR_RESOURCE (&insn_res);
302 mark_referenced_resources (insn, &insn_res, include_delayed_effects);
303 return resource_conflicts_p (&insn_res, res);
304 }
305
306 /* Return TRUE if INSN modifies resources that are marked in RES.
307 INCLUDE_DELAYED_EFFECTS is set if the actions of that routine should be
308 included. CC0 is only modified if it is explicitly set; see comments
309 in front of mark_set_resources for details. */
310
311 static int
312 insn_sets_resource_p (rtx insn, struct resources *res,
313 bool include_delayed_effects)
314 {
315 struct resources insn_sets;
316
317 CLEAR_RESOURCE (&insn_sets);
318 mark_set_resources (insn, &insn_sets, 0,
319 (include_delayed_effects
320 ? MARK_SRC_DEST_CALL
321 : MARK_SRC_DEST));
322 return resource_conflicts_p (&insn_sets, res);
323 }
324 \f
325 /* Find a label at the end of the function or before a RETURN. If there
326 is none, try to make one. If that fails, returns 0.
327
328 The property of such a label is that it is placed just before the
329 epilogue or a bare RETURN insn, so that another bare RETURN can be
330 turned into a jump to the label unconditionally. In particular, the
331 label cannot be placed before a RETURN insn with a filled delay slot.
332
333 ??? There may be a problem with the current implementation. Suppose
334 we start with a bare RETURN insn and call find_end_label. It may set
335 function_return_label just before the RETURN. Suppose the machinery
336 is able to fill the delay slot of the RETURN insn afterwards. Then
337 function_return_label is no longer valid according to the property
338 described above and find_end_label will still return it unmodified.
339 Note that this is probably mitigated by the following observation:
340 once function_return_label is made, it is very likely the target of
341 a jump, so filling the delay slot of the RETURN will be much more
342 difficult.
343 KIND is either simple_return_rtx or ret_rtx, indicating which type of
344 return we're looking for. */
345
346 static rtx
347 find_end_label (rtx kind)
348 {
349 rtx insn;
350 rtx *plabel;
351
352 if (kind == ret_rtx)
353 plabel = &function_return_label;
354 else
355 {
356 gcc_assert (kind == simple_return_rtx);
357 plabel = &function_simple_return_label;
358 }
359
360 /* If we found one previously, return it. */
361 if (*plabel)
362 return *plabel;
363
364 /* Otherwise, see if there is a label at the end of the function. If there
365 is, it must be that RETURN insns aren't needed, so that is our return
366 label and we don't have to do anything else. */
367
368 insn = get_last_insn ();
369 while (NOTE_P (insn)
370 || (NONJUMP_INSN_P (insn)
371 && (GET_CODE (PATTERN (insn)) == USE
372 || GET_CODE (PATTERN (insn)) == CLOBBER)))
373 insn = PREV_INSN (insn);
374
375 /* When a target threads its epilogue we might already have a
376 suitable return insn. If so put a label before it for the
377 function_return_label. */
378 if (BARRIER_P (insn)
379 && JUMP_P (PREV_INSN (insn))
380 && PATTERN (PREV_INSN (insn)) == kind)
381 {
382 rtx temp = PREV_INSN (PREV_INSN (insn));
383 rtx label = gen_label_rtx ();
384 LABEL_NUSES (label) = 0;
385
386 /* Put the label before any USE insns that may precede the RETURN
387 insn. */
388 while (GET_CODE (temp) == USE)
389 temp = PREV_INSN (temp);
390
391 emit_label_after (label, temp);
392 *plabel = label;
393 }
394
395 else if (LABEL_P (insn))
396 *plabel = insn;
397 else
398 {
399 rtx label = gen_label_rtx ();
400 LABEL_NUSES (label) = 0;
401 /* If the basic block reorder pass moves the return insn to
402 some other place try to locate it again and put our
403 function_return_label there. */
404 while (insn && ! (JUMP_P (insn) && (PATTERN (insn) == kind)))
405 insn = PREV_INSN (insn);
406 if (insn)
407 {
408 insn = PREV_INSN (insn);
409
410 /* Put the label before any USE insns that may precede the
411 RETURN insn. */
412 while (GET_CODE (insn) == USE)
413 insn = PREV_INSN (insn);
414
415 emit_label_after (label, insn);
416 }
417 else
418 {
419 #ifdef HAVE_epilogue
420 if (HAVE_epilogue
421 #ifdef HAVE_return
422 && ! HAVE_return
423 #endif
424 )
425 /* The RETURN insn has its delay slot filled so we cannot
426 emit the label just before it. Since we already have
427 an epilogue and cannot emit a new RETURN, we cannot
428 emit the label at all. */
429 return NULL_RTX;
430 #endif /* HAVE_epilogue */
431
432 /* Otherwise, make a new label and emit a RETURN and BARRIER,
433 if needed. */
434 emit_label (label);
435 #ifdef HAVE_return
436 if (HAVE_return)
437 {
438 /* The return we make may have delay slots too. */
439 rtx insn = gen_return ();
440 insn = emit_jump_insn (insn);
441 set_return_jump_label (insn);
442 emit_barrier ();
443 if (num_delay_slots (insn) > 0)
444 obstack_ptr_grow (&unfilled_slots_obstack, insn);
445 }
446 #endif
447 }
448 *plabel = label;
449 }
450
451 /* Show one additional use for this label so it won't go away until
452 we are done. */
453 ++LABEL_NUSES (*plabel);
454
455 return *plabel;
456 }
457 \f
458 /* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
459 the pattern of INSN with the SEQUENCE.
460
461 Chain the insns so that NEXT_INSN of each insn in the sequence points to
462 the next and NEXT_INSN of the last insn in the sequence points to
463 the first insn after the sequence. Similarly for PREV_INSN. This makes
464 it easier to scan all insns.
465
466 Returns the SEQUENCE that replaces INSN. */
467
468 static rtx
469 emit_delay_sequence (rtx insn, rtx list, int length)
470 {
471 int i = 1;
472 rtx li;
473 int had_barrier = 0;
474
475 /* Allocate the rtvec to hold the insns and the SEQUENCE. */
476 rtvec seqv = rtvec_alloc (length + 1);
477 rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv);
478 rtx seq_insn = make_insn_raw (seq);
479 rtx first = get_insns ();
480 rtx last = get_last_insn ();
481
482 /* Make a copy of the insn having delay slots. */
483 rtx delay_insn = copy_rtx (insn);
484
485 /* If INSN is followed by a BARRIER, delete the BARRIER since it will only
486 confuse further processing. Update LAST in case it was the last insn.
487 We will put the BARRIER back in later. */
488 if (NEXT_INSN (insn) && BARRIER_P (NEXT_INSN (insn)))
489 {
490 delete_related_insns (NEXT_INSN (insn));
491 last = get_last_insn ();
492 had_barrier = 1;
493 }
494
495 /* Splice our SEQUENCE into the insn stream where INSN used to be. */
496 NEXT_INSN (seq_insn) = NEXT_INSN (insn);
497 PREV_INSN (seq_insn) = PREV_INSN (insn);
498
499 if (insn != last)
500 PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn;
501
502 if (insn != first)
503 NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn;
504
505 /* Note the calls to set_new_first_and_last_insn must occur after
506 SEQ_INSN has been completely spliced into the insn stream.
507
508 Otherwise CUR_INSN_UID will get set to an incorrect value because
509 set_new_first_and_last_insn will not find SEQ_INSN in the chain. */
510 if (insn == last)
511 set_new_first_and_last_insn (first, seq_insn);
512
513 if (insn == first)
514 set_new_first_and_last_insn (seq_insn, last);
515
516 /* Build our SEQUENCE and rebuild the insn chain. */
517 XVECEXP (seq, 0, 0) = delay_insn;
518 INSN_DELETED_P (delay_insn) = 0;
519 PREV_INSN (delay_insn) = PREV_INSN (seq_insn);
520
521 INSN_LOCATION (seq_insn) = INSN_LOCATION (delay_insn);
522
523 for (li = list; li; li = XEXP (li, 1), i++)
524 {
525 rtx tem = XEXP (li, 0);
526 rtx note, next;
527
528 /* Show that this copy of the insn isn't deleted. */
529 INSN_DELETED_P (tem) = 0;
530
531 XVECEXP (seq, 0, i) = tem;
532 PREV_INSN (tem) = XVECEXP (seq, 0, i - 1);
533 NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem;
534
535 /* SPARC assembler, for instance, emit warning when debug info is output
536 into the delay slot. */
537 if (INSN_LOCATION (tem) && !INSN_LOCATION (seq_insn))
538 INSN_LOCATION (seq_insn) = INSN_LOCATION (tem);
539 INSN_LOCATION (tem) = 0;
540
541 for (note = REG_NOTES (tem); note; note = next)
542 {
543 next = XEXP (note, 1);
544 switch (REG_NOTE_KIND (note))
545 {
546 case REG_DEAD:
547 /* Remove any REG_DEAD notes because we can't rely on them now
548 that the insn has been moved. */
549 remove_note (tem, note);
550 break;
551
552 case REG_LABEL_OPERAND:
553 case REG_LABEL_TARGET:
554 /* Keep the label reference count up to date. */
555 if (LABEL_P (XEXP (note, 0)))
556 LABEL_NUSES (XEXP (note, 0)) ++;
557 break;
558
559 default:
560 break;
561 }
562 }
563 }
564
565 NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn);
566
567 /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the
568 last insn in that SEQUENCE to point to us. Similarly for the first
569 insn in the following insn if it is a SEQUENCE. */
570
571 if (PREV_INSN (seq_insn) && NONJUMP_INSN_P (PREV_INSN (seq_insn))
572 && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE)
573 NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0,
574 XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1))
575 = seq_insn;
576
577 if (NEXT_INSN (seq_insn) && NONJUMP_INSN_P (NEXT_INSN (seq_insn))
578 && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE)
579 PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn;
580
581 /* If there used to be a BARRIER, put it back. */
582 if (had_barrier)
583 emit_barrier_after (seq_insn);
584
585 gcc_assert (i == length + 1);
586
587 return seq_insn;
588 }
589
590 /* Add INSN to DELAY_LIST and return the head of the new list. The list must
591 be in the order in which the insns are to be executed. */
592
593 static rtx
594 add_to_delay_list (rtx insn, rtx delay_list)
595 {
596 /* If we have an empty list, just make a new list element. If
597 INSN has its block number recorded, clear it since we may
598 be moving the insn to a new block. */
599
600 if (delay_list == 0)
601 {
602 clear_hashed_info_for_insn (insn);
603 return gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
604 }
605
606 /* Otherwise this must be an INSN_LIST. Add INSN to the end of the
607 list. */
608 XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1));
609
610 return delay_list;
611 }
612 \f
613 /* Delete INSN from the delay slot of the insn that it is in, which may
614 produce an insn with no delay slots. Return the new insn. */
615
616 static rtx
617 delete_from_delay_slot (rtx insn)
618 {
619 rtx trial, seq_insn, seq, prev;
620 rtx delay_list = 0;
621 int i;
622 int had_barrier = 0;
623
624 /* We first must find the insn containing the SEQUENCE with INSN in its
625 delay slot. Do this by finding an insn, TRIAL, where
626 PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */
627
628 for (trial = insn;
629 PREV_INSN (NEXT_INSN (trial)) == trial;
630 trial = NEXT_INSN (trial))
631 ;
632
633 seq_insn = PREV_INSN (NEXT_INSN (trial));
634 seq = PATTERN (seq_insn);
635
636 if (NEXT_INSN (seq_insn) && BARRIER_P (NEXT_INSN (seq_insn)))
637 had_barrier = 1;
638
639 /* Create a delay list consisting of all the insns other than the one
640 we are deleting (unless we were the only one). */
641 if (XVECLEN (seq, 0) > 2)
642 for (i = 1; i < XVECLEN (seq, 0); i++)
643 if (XVECEXP (seq, 0, i) != insn)
644 delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list);
645
646 /* Delete the old SEQUENCE, re-emit the insn that used to have the delay
647 list, and rebuild the delay list if non-empty. */
648 prev = PREV_INSN (seq_insn);
649 trial = XVECEXP (seq, 0, 0);
650 delete_related_insns (seq_insn);
651 add_insn_after (trial, prev, NULL);
652
653 /* If there was a barrier after the old SEQUENCE, remit it. */
654 if (had_barrier)
655 emit_barrier_after (trial);
656
657 /* If there are any delay insns, remit them. Otherwise clear the
658 annul flag. */
659 if (delay_list)
660 trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2);
661 else if (JUMP_P (trial))
662 INSN_ANNULLED_BRANCH_P (trial) = 0;
663
664 INSN_FROM_TARGET_P (insn) = 0;
665
666 /* Show we need to fill this insn again. */
667 obstack_ptr_grow (&unfilled_slots_obstack, trial);
668
669 return trial;
670 }
671 \f
672 /* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down
673 the insn that sets CC0 for it and delete it too. */
674
675 static void
676 delete_scheduled_jump (rtx insn)
677 {
678 /* Delete the insn that sets cc0 for us. On machines without cc0, we could
679 delete the insn that sets the condition code, but it is hard to find it.
680 Since this case is rare anyway, don't bother trying; there would likely
681 be other insns that became dead anyway, which we wouldn't know to
682 delete. */
683
684 #ifdef HAVE_cc0
685 if (reg_mentioned_p (cc0_rtx, insn))
686 {
687 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
688
689 /* If a reg-note was found, it points to an insn to set CC0. This
690 insn is in the delay list of some other insn. So delete it from
691 the delay list it was in. */
692 if (note)
693 {
694 if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
695 && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
696 delete_from_delay_slot (XEXP (note, 0));
697 }
698 else
699 {
700 /* The insn setting CC0 is our previous insn, but it may be in
701 a delay slot. It will be the last insn in the delay slot, if
702 it is. */
703 rtx trial = previous_insn (insn);
704 if (NOTE_P (trial))
705 trial = prev_nonnote_insn (trial);
706 if (sets_cc0_p (PATTERN (trial)) != 1
707 || FIND_REG_INC_NOTE (trial, NULL_RTX))
708 return;
709 if (PREV_INSN (NEXT_INSN (trial)) == trial)
710 delete_related_insns (trial);
711 else
712 delete_from_delay_slot (trial);
713 }
714 }
715 #endif
716
717 delete_related_insns (insn);
718 }
719 \f
720 /* Counters for delay-slot filling. */
721
722 #define NUM_REORG_FUNCTIONS 2
723 #define MAX_DELAY_HISTOGRAM 3
724 #define MAX_REORG_PASSES 2
725
726 static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES];
727
728 static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES];
729
730 static int reorg_pass_number;
731
732 static void
733 note_delay_statistics (int slots_filled, int index)
734 {
735 num_insns_needing_delays[index][reorg_pass_number]++;
736 if (slots_filled > MAX_DELAY_HISTOGRAM)
737 slots_filled = MAX_DELAY_HISTOGRAM;
738 num_filled_delays[index][slots_filled][reorg_pass_number]++;
739 }
740 \f
741 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
742
743 /* Optimize the following cases:
744
745 1. When a conditional branch skips over only one instruction,
746 use an annulling branch and put that insn in the delay slot.
747 Use either a branch that annuls when the condition if true or
748 invert the test with a branch that annuls when the condition is
749 false. This saves insns, since otherwise we must copy an insn
750 from the L1 target.
751
752 (orig) (skip) (otherwise)
753 Bcc.n L1 Bcc',a L1 Bcc,a L1'
754 insn insn insn2
755 L1: L1: L1:
756 insn2 insn2 insn2
757 insn3 insn3 L1':
758 insn3
759
760 2. When a conditional branch skips over only one instruction,
761 and after that, it unconditionally branches somewhere else,
762 perform the similar optimization. This saves executing the
763 second branch in the case where the inverted condition is true.
764
765 Bcc.n L1 Bcc',a L2
766 insn insn
767 L1: L1:
768 Bra L2 Bra L2
769
770 INSN is a JUMP_INSN.
771
772 This should be expanded to skip over N insns, where N is the number
773 of delay slots required. */
774
775 static rtx
776 optimize_skip (rtx insn)
777 {
778 rtx trial = next_nonnote_insn (insn);
779 rtx next_trial = next_active_insn (trial);
780 rtx delay_list = 0;
781 int flags;
782
783 flags = get_jump_flags (insn, JUMP_LABEL (insn));
784
785 if (trial == 0
786 || !NONJUMP_INSN_P (trial)
787 || GET_CODE (PATTERN (trial)) == SEQUENCE
788 || recog_memoized (trial) < 0
789 || (! eligible_for_annul_false (insn, 0, trial, flags)
790 && ! eligible_for_annul_true (insn, 0, trial, flags))
791 || can_throw_internal (trial))
792 return 0;
793
794 /* There are two cases where we are just executing one insn (we assume
795 here that a branch requires only one insn; this should be generalized
796 at some point): Where the branch goes around a single insn or where
797 we have one insn followed by a branch to the same label we branch to.
798 In both of these cases, inverting the jump and annulling the delay
799 slot give the same effect in fewer insns. */
800 if (next_trial == next_active_insn (JUMP_LABEL (insn))
801 || (next_trial != 0
802 && simplejump_or_return_p (next_trial)
803 && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)))
804 {
805 if (eligible_for_annul_false (insn, 0, trial, flags))
806 {
807 if (invert_jump (insn, JUMP_LABEL (insn), 1))
808 INSN_FROM_TARGET_P (trial) = 1;
809 else if (! eligible_for_annul_true (insn, 0, trial, flags))
810 return 0;
811 }
812
813 delay_list = add_to_delay_list (trial, NULL_RTX);
814 next_trial = next_active_insn (trial);
815 update_block (trial, trial);
816 delete_related_insns (trial);
817
818 /* Also, if we are targeting an unconditional
819 branch, thread our jump to the target of that branch. Don't
820 change this into a RETURN here, because it may not accept what
821 we have in the delay slot. We'll fix this up later. */
822 if (next_trial && simplejump_or_return_p (next_trial))
823 {
824 rtx target_label = JUMP_LABEL (next_trial);
825 if (ANY_RETURN_P (target_label))
826 target_label = find_end_label (target_label);
827
828 if (target_label)
829 {
830 /* Recompute the flags based on TARGET_LABEL since threading
831 the jump to TARGET_LABEL may change the direction of the
832 jump (which may change the circumstances in which the
833 delay slot is nullified). */
834 flags = get_jump_flags (insn, target_label);
835 if (eligible_for_annul_true (insn, 0, trial, flags))
836 reorg_redirect_jump (insn, target_label);
837 }
838 }
839
840 INSN_ANNULLED_BRANCH_P (insn) = 1;
841 }
842
843 return delay_list;
844 }
845 #endif
846 \f
847 /* Encode and return branch direction and prediction information for
848 INSN assuming it will jump to LABEL.
849
850 Non conditional branches return no direction information and
851 are predicted as very likely taken. */
852
853 static int
854 get_jump_flags (rtx insn, rtx label)
855 {
856 int flags;
857
858 /* get_jump_flags can be passed any insn with delay slots, these may
859 be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch
860 direction information, and only if they are conditional jumps.
861
862 If LABEL is a return, then there is no way to determine the branch
863 direction. */
864 if (JUMP_P (insn)
865 && (condjump_p (insn) || condjump_in_parallel_p (insn))
866 && !ANY_RETURN_P (label)
867 && INSN_UID (insn) <= max_uid
868 && INSN_UID (label) <= max_uid)
869 flags
870 = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)])
871 ? ATTR_FLAG_forward : ATTR_FLAG_backward;
872 /* No valid direction information. */
873 else
874 flags = 0;
875
876 return flags;
877 }
878
879 /* Return truth value of the statement that this branch
880 is mostly taken. If we think that the branch is extremely likely
881 to be taken, we return 2. If the branch is slightly more likely to be
882 taken, return 1. If the branch is slightly less likely to be taken,
883 return 0 and if the branch is highly unlikely to be taken, return -1. */
884
885 static int
886 mostly_true_jump (rtx jump_insn)
887 {
888 /* If branch probabilities are available, then use that number since it
889 always gives a correct answer. */
890 rtx note = find_reg_note (jump_insn, REG_BR_PROB, 0);
891 if (note)
892 {
893 int prob = INTVAL (XEXP (note, 0));
894
895 if (prob >= REG_BR_PROB_BASE * 9 / 10)
896 return 2;
897 else if (prob >= REG_BR_PROB_BASE / 2)
898 return 1;
899 else if (prob >= REG_BR_PROB_BASE / 10)
900 return 0;
901 else
902 return -1;
903 }
904
905 /* If there is no note, assume branches are not taken.
906 This should be rare. */
907 return 0;
908 }
909
910 /* Return the condition under which INSN will branch to TARGET. If TARGET
911 is zero, return the condition under which INSN will return. If INSN is
912 an unconditional branch, return const_true_rtx. If INSN isn't a simple
913 type of jump, or it doesn't go to TARGET, return 0. */
914
915 static rtx
916 get_branch_condition (rtx insn, rtx target)
917 {
918 rtx pat = PATTERN (insn);
919 rtx src;
920
921 if (condjump_in_parallel_p (insn))
922 pat = XVECEXP (pat, 0, 0);
923
924 if (ANY_RETURN_P (pat))
925 return pat == target ? const_true_rtx : 0;
926
927 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
928 return 0;
929
930 src = SET_SRC (pat);
931 if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target)
932 return const_true_rtx;
933
934 else if (GET_CODE (src) == IF_THEN_ELSE
935 && XEXP (src, 2) == pc_rtx
936 && GET_CODE (XEXP (src, 1)) == LABEL_REF
937 && XEXP (XEXP (src, 1), 0) == target)
938 return XEXP (src, 0);
939
940 else if (GET_CODE (src) == IF_THEN_ELSE
941 && XEXP (src, 1) == pc_rtx
942 && GET_CODE (XEXP (src, 2)) == LABEL_REF
943 && XEXP (XEXP (src, 2), 0) == target)
944 {
945 enum rtx_code rev;
946 rev = reversed_comparison_code (XEXP (src, 0), insn);
947 if (rev != UNKNOWN)
948 return gen_rtx_fmt_ee (rev, GET_MODE (XEXP (src, 0)),
949 XEXP (XEXP (src, 0), 0),
950 XEXP (XEXP (src, 0), 1));
951 }
952
953 return 0;
954 }
955
956 /* Return nonzero if CONDITION is more strict than the condition of
957 INSN, i.e., if INSN will always branch if CONDITION is true. */
958
959 static int
960 condition_dominates_p (rtx condition, rtx insn)
961 {
962 rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn));
963 enum rtx_code code = GET_CODE (condition);
964 enum rtx_code other_code;
965
966 if (rtx_equal_p (condition, other_condition)
967 || other_condition == const_true_rtx)
968 return 1;
969
970 else if (condition == const_true_rtx || other_condition == 0)
971 return 0;
972
973 other_code = GET_CODE (other_condition);
974 if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2
975 || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0))
976 || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1)))
977 return 0;
978
979 return comparison_dominates_p (code, other_code);
980 }
981
982 /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
983 any insns already in the delay slot of JUMP. */
984
985 static int
986 redirect_with_delay_slots_safe_p (rtx jump, rtx newlabel, rtx seq)
987 {
988 int flags, i;
989 rtx pat = PATTERN (seq);
990
991 /* Make sure all the delay slots of this jump would still
992 be valid after threading the jump. If they are still
993 valid, then return nonzero. */
994
995 flags = get_jump_flags (jump, newlabel);
996 for (i = 1; i < XVECLEN (pat, 0); i++)
997 if (! (
998 #ifdef ANNUL_IFFALSE_SLOTS
999 (INSN_ANNULLED_BRANCH_P (jump)
1000 && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
1001 ? eligible_for_annul_false (jump, i - 1,
1002 XVECEXP (pat, 0, i), flags) :
1003 #endif
1004 #ifdef ANNUL_IFTRUE_SLOTS
1005 (INSN_ANNULLED_BRANCH_P (jump)
1006 && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
1007 ? eligible_for_annul_true (jump, i - 1,
1008 XVECEXP (pat, 0, i), flags) :
1009 #endif
1010 eligible_for_delay (jump, i - 1, XVECEXP (pat, 0, i), flags)))
1011 break;
1012
1013 return (i == XVECLEN (pat, 0));
1014 }
1015
1016 /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
1017 any insns we wish to place in the delay slot of JUMP. */
1018
1019 static int
1020 redirect_with_delay_list_safe_p (rtx jump, rtx newlabel, rtx delay_list)
1021 {
1022 int flags, i;
1023 rtx li;
1024
1025 /* Make sure all the insns in DELAY_LIST would still be
1026 valid after threading the jump. If they are still
1027 valid, then return nonzero. */
1028
1029 flags = get_jump_flags (jump, newlabel);
1030 for (li = delay_list, i = 0; li; li = XEXP (li, 1), i++)
1031 if (! (
1032 #ifdef ANNUL_IFFALSE_SLOTS
1033 (INSN_ANNULLED_BRANCH_P (jump)
1034 && INSN_FROM_TARGET_P (XEXP (li, 0)))
1035 ? eligible_for_annul_false (jump, i, XEXP (li, 0), flags) :
1036 #endif
1037 #ifdef ANNUL_IFTRUE_SLOTS
1038 (INSN_ANNULLED_BRANCH_P (jump)
1039 && ! INSN_FROM_TARGET_P (XEXP (li, 0)))
1040 ? eligible_for_annul_true (jump, i, XEXP (li, 0), flags) :
1041 #endif
1042 eligible_for_delay (jump, i, XEXP (li, 0), flags)))
1043 break;
1044
1045 return (li == NULL);
1046 }
1047
1048 /* DELAY_LIST is a list of insns that have already been placed into delay
1049 slots. See if all of them have the same annulling status as ANNUL_TRUE_P.
1050 If not, return 0; otherwise return 1. */
1051
1052 static int
1053 check_annul_list_true_false (int annul_true_p, rtx delay_list)
1054 {
1055 rtx temp;
1056
1057 if (delay_list)
1058 {
1059 for (temp = delay_list; temp; temp = XEXP (temp, 1))
1060 {
1061 rtx trial = XEXP (temp, 0);
1062
1063 if ((annul_true_p && INSN_FROM_TARGET_P (trial))
1064 || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
1065 return 0;
1066 }
1067 }
1068
1069 return 1;
1070 }
1071 \f
1072 /* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that
1073 the condition tested by INSN is CONDITION and the resources shown in
1074 OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns
1075 from SEQ's delay list, in addition to whatever insns it may execute
1076 (in DELAY_LIST). SETS and NEEDED are denote resources already set and
1077 needed while searching for delay slot insns. Return the concatenated
1078 delay list if possible, otherwise, return 0.
1079
1080 SLOTS_TO_FILL is the total number of slots required by INSN, and
1081 PSLOTS_FILLED points to the number filled so far (also the number of
1082 insns in DELAY_LIST). It is updated with the number that have been
1083 filled from the SEQUENCE, if any.
1084
1085 PANNUL_P points to a nonzero value if we already know that we need
1086 to annul INSN. If this routine determines that annulling is needed,
1087 it may set that value nonzero.
1088
1089 PNEW_THREAD points to a location that is to receive the place at which
1090 execution should continue. */
1091
1092 static rtx
1093 steal_delay_list_from_target (rtx insn, rtx condition, rtx seq,
1094 rtx delay_list, struct resources *sets,
1095 struct resources *needed,
1096 struct resources *other_needed,
1097 int slots_to_fill, int *pslots_filled,
1098 int *pannul_p, rtx *pnew_thread)
1099 {
1100 rtx temp;
1101 int slots_remaining = slots_to_fill - *pslots_filled;
1102 int total_slots_filled = *pslots_filled;
1103 rtx new_delay_list = 0;
1104 int must_annul = *pannul_p;
1105 int used_annul = 0;
1106 int i;
1107 struct resources cc_set;
1108
1109 /* We can't do anything if there are more delay slots in SEQ than we
1110 can handle, or if we don't know that it will be a taken branch.
1111 We know that it will be a taken branch if it is either an unconditional
1112 branch or a conditional branch with a stricter branch condition.
1113
1114 Also, exit if the branch has more than one set, since then it is computing
1115 other results that can't be ignored, e.g. the HPPA mov&branch instruction.
1116 ??? It may be possible to move other sets into INSN in addition to
1117 moving the instructions in the delay slots.
1118
1119 We can not steal the delay list if one of the instructions in the
1120 current delay_list modifies the condition codes and the jump in the
1121 sequence is a conditional jump. We can not do this because we can
1122 not change the direction of the jump because the condition codes
1123 will effect the direction of the jump in the sequence. */
1124
1125 CLEAR_RESOURCE (&cc_set);
1126 for (temp = delay_list; temp; temp = XEXP (temp, 1))
1127 {
1128 rtx trial = XEXP (temp, 0);
1129
1130 mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL);
1131 if (insn_references_resource_p (XVECEXP (seq , 0, 0), &cc_set, false))
1132 return delay_list;
1133 }
1134
1135 if (XVECLEN (seq, 0) - 1 > slots_remaining
1136 || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))
1137 || ! single_set (XVECEXP (seq, 0, 0)))
1138 return delay_list;
1139
1140 #ifdef MD_CAN_REDIRECT_BRANCH
1141 /* On some targets, branches with delay slots can have a limited
1142 displacement. Give the back end a chance to tell us we can't do
1143 this. */
1144 if (! MD_CAN_REDIRECT_BRANCH (insn, XVECEXP (seq, 0, 0)))
1145 return delay_list;
1146 #endif
1147
1148 for (i = 1; i < XVECLEN (seq, 0); i++)
1149 {
1150 rtx trial = XVECEXP (seq, 0, i);
1151 int flags;
1152
1153 if (insn_references_resource_p (trial, sets, false)
1154 || insn_sets_resource_p (trial, needed, false)
1155 || insn_sets_resource_p (trial, sets, false)
1156 #ifdef HAVE_cc0
1157 /* If TRIAL sets CC0, we can't copy it, so we can't steal this
1158 delay list. */
1159 || find_reg_note (trial, REG_CC_USER, NULL_RTX)
1160 #endif
1161 /* If TRIAL is from the fallthrough code of an annulled branch insn
1162 in SEQ, we cannot use it. */
1163 || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))
1164 && ! INSN_FROM_TARGET_P (trial)))
1165 return delay_list;
1166
1167 /* If this insn was already done (usually in a previous delay slot),
1168 pretend we put it in our delay slot. */
1169 if (redundant_insn (trial, insn, new_delay_list))
1170 continue;
1171
1172 /* We will end up re-vectoring this branch, so compute flags
1173 based on jumping to the new label. */
1174 flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));
1175
1176 if (! must_annul
1177 && ((condition == const_true_rtx
1178 || (! insn_sets_resource_p (trial, other_needed, false)
1179 && ! may_trap_or_fault_p (PATTERN (trial)))))
1180 ? eligible_for_delay (insn, total_slots_filled, trial, flags)
1181 : (must_annul || (delay_list == NULL && new_delay_list == NULL))
1182 && (must_annul = 1,
1183 check_annul_list_true_false (0, delay_list)
1184 && check_annul_list_true_false (0, new_delay_list)
1185 && eligible_for_annul_false (insn, total_slots_filled,
1186 trial, flags)))
1187 {
1188 if (must_annul)
1189 used_annul = 1;
1190 temp = copy_delay_slot_insn (trial);
1191 INSN_FROM_TARGET_P (temp) = 1;
1192 new_delay_list = add_to_delay_list (temp, new_delay_list);
1193 total_slots_filled++;
1194
1195 if (--slots_remaining == 0)
1196 break;
1197 }
1198 else
1199 return delay_list;
1200 }
1201
1202 /* Show the place to which we will be branching. */
1203 *pnew_thread = first_active_target_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));
1204
1205 /* Add any new insns to the delay list and update the count of the
1206 number of slots filled. */
1207 *pslots_filled = total_slots_filled;
1208 if (used_annul)
1209 *pannul_p = 1;
1210
1211 if (delay_list == 0)
1212 return new_delay_list;
1213
1214 for (temp = new_delay_list; temp; temp = XEXP (temp, 1))
1215 delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);
1216
1217 return delay_list;
1218 }
1219 \f
1220 /* Similar to steal_delay_list_from_target except that SEQ is on the
1221 fallthrough path of INSN. Here we only do something if the delay insn
1222 of SEQ is an unconditional branch. In that case we steal its delay slot
1223 for INSN since unconditional branches are much easier to fill. */
1224
1225 static rtx
1226 steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx seq,
1227 rtx delay_list, struct resources *sets,
1228 struct resources *needed,
1229 struct resources *other_needed,
1230 int slots_to_fill, int *pslots_filled,
1231 int *pannul_p)
1232 {
1233 int i;
1234 int flags;
1235 int must_annul = *pannul_p;
1236 int used_annul = 0;
1237
1238 flags = get_jump_flags (insn, JUMP_LABEL (insn));
1239
1240 /* We can't do anything if SEQ's delay insn isn't an
1241 unconditional branch. */
1242
1243 if (! simplejump_or_return_p (XVECEXP (seq, 0, 0)))
1244 return delay_list;
1245
1246 for (i = 1; i < XVECLEN (seq, 0); i++)
1247 {
1248 rtx trial = XVECEXP (seq, 0, i);
1249
1250 /* If TRIAL sets CC0, stealing it will move it too far from the use
1251 of CC0. */
1252 if (insn_references_resource_p (trial, sets, false)
1253 || insn_sets_resource_p (trial, needed, false)
1254 || insn_sets_resource_p (trial, sets, false)
1255 #ifdef HAVE_cc0
1256 || sets_cc0_p (PATTERN (trial))
1257 #endif
1258 )
1259
1260 break;
1261
1262 /* If this insn was already done, we don't need it. */
1263 if (redundant_insn (trial, insn, delay_list))
1264 {
1265 delete_from_delay_slot (trial);
1266 continue;
1267 }
1268
1269 if (! must_annul
1270 && ((condition == const_true_rtx
1271 || (! insn_sets_resource_p (trial, other_needed, false)
1272 && ! may_trap_or_fault_p (PATTERN (trial)))))
1273 ? eligible_for_delay (insn, *pslots_filled, trial, flags)
1274 : (must_annul || delay_list == NULL) && (must_annul = 1,
1275 check_annul_list_true_false (1, delay_list)
1276 && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
1277 {
1278 if (must_annul)
1279 used_annul = 1;
1280 delete_from_delay_slot (trial);
1281 delay_list = add_to_delay_list (trial, delay_list);
1282
1283 if (++(*pslots_filled) == slots_to_fill)
1284 break;
1285 }
1286 else
1287 break;
1288 }
1289
1290 if (used_annul)
1291 *pannul_p = 1;
1292 return delay_list;
1293 }
1294 \f
1295 /* Try merging insns starting at THREAD which match exactly the insns in
1296 INSN's delay list.
1297
1298 If all insns were matched and the insn was previously annulling, the
1299 annul bit will be cleared.
1300
1301 For each insn that is merged, if the branch is or will be non-annulling,
1302 we delete the merged insn. */
1303
1304 static void
1305 try_merge_delay_insns (rtx insn, rtx thread)
1306 {
1307 rtx trial, next_trial;
1308 rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);
1309 int annul_p = JUMP_P (delay_insn) && INSN_ANNULLED_BRANCH_P (delay_insn);
1310 int slot_number = 1;
1311 int num_slots = XVECLEN (PATTERN (insn), 0);
1312 rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1313 struct resources set, needed;
1314 rtx merged_insns = 0;
1315 int i;
1316 int flags;
1317
1318 flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn));
1319
1320 CLEAR_RESOURCE (&needed);
1321 CLEAR_RESOURCE (&set);
1322
1323 /* If this is not an annulling branch, take into account anything needed in
1324 INSN's delay slot. This prevents two increments from being incorrectly
1325 folded into one. If we are annulling, this would be the correct
1326 thing to do. (The alternative, looking at things set in NEXT_TO_MATCH
1327 will essentially disable this optimization. This method is somewhat of
1328 a kludge, but I don't see a better way.) */
1329 if (! annul_p)
1330 for (i = 1 ; i < num_slots; i++)
1331 if (XVECEXP (PATTERN (insn), 0, i))
1332 mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed,
1333 true);
1334
1335 for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
1336 {
1337 rtx pat = PATTERN (trial);
1338 rtx oldtrial = trial;
1339
1340 next_trial = next_nonnote_insn (trial);
1341
1342 /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */
1343 if (NONJUMP_INSN_P (trial)
1344 && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER))
1345 continue;
1346
1347 if (GET_CODE (next_to_match) == GET_CODE (trial)
1348 #ifdef HAVE_cc0
1349 /* We can't share an insn that sets cc0. */
1350 && ! sets_cc0_p (pat)
1351 #endif
1352 && ! insn_references_resource_p (trial, &set, true)
1353 && ! insn_sets_resource_p (trial, &set, true)
1354 && ! insn_sets_resource_p (trial, &needed, true)
1355 && (trial = try_split (pat, trial, 0)) != 0
1356 /* Update next_trial, in case try_split succeeded. */
1357 && (next_trial = next_nonnote_insn (trial))
1358 /* Likewise THREAD. */
1359 && (thread = oldtrial == thread ? trial : thread)
1360 && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial))
1361 /* Have to test this condition if annul condition is different
1362 from (and less restrictive than) non-annulling one. */
1363 && eligible_for_delay (delay_insn, slot_number - 1, trial, flags))
1364 {
1365
1366 if (! annul_p)
1367 {
1368 update_block (trial, thread);
1369 if (trial == thread)
1370 thread = next_active_insn (thread);
1371
1372 delete_related_insns (trial);
1373 INSN_FROM_TARGET_P (next_to_match) = 0;
1374 }
1375 else
1376 merged_insns = gen_rtx_INSN_LIST (VOIDmode, trial, merged_insns);
1377
1378 if (++slot_number == num_slots)
1379 break;
1380
1381 next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1382 }
1383
1384 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
1385 mark_referenced_resources (trial, &needed, true);
1386 }
1387
1388 /* See if we stopped on a filled insn. If we did, try to see if its
1389 delay slots match. */
1390 if (slot_number != num_slots
1391 && trial && NONJUMP_INSN_P (trial)
1392 && GET_CODE (PATTERN (trial)) == SEQUENCE
1393 && !(JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
1394 && INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))))
1395 {
1396 rtx pat = PATTERN (trial);
1397 rtx filled_insn = XVECEXP (pat, 0, 0);
1398
1399 /* Account for resources set/needed by the filled insn. */
1400 mark_set_resources (filled_insn, &set, 0, MARK_SRC_DEST_CALL);
1401 mark_referenced_resources (filled_insn, &needed, true);
1402
1403 for (i = 1; i < XVECLEN (pat, 0); i++)
1404 {
1405 rtx dtrial = XVECEXP (pat, 0, i);
1406
1407 if (! insn_references_resource_p (dtrial, &set, true)
1408 && ! insn_sets_resource_p (dtrial, &set, true)
1409 && ! insn_sets_resource_p (dtrial, &needed, true)
1410 #ifdef HAVE_cc0
1411 && ! sets_cc0_p (PATTERN (dtrial))
1412 #endif
1413 && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
1414 && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags))
1415 {
1416 if (! annul_p)
1417 {
1418 rtx new_rtx;
1419
1420 update_block (dtrial, thread);
1421 new_rtx = delete_from_delay_slot (dtrial);
1422 if (INSN_DELETED_P (thread))
1423 thread = new_rtx;
1424 INSN_FROM_TARGET_P (next_to_match) = 0;
1425 }
1426 else
1427 merged_insns = gen_rtx_INSN_LIST (SImode, dtrial,
1428 merged_insns);
1429
1430 if (++slot_number == num_slots)
1431 break;
1432
1433 next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1434 }
1435 else
1436 {
1437 /* Keep track of the set/referenced resources for the delay
1438 slots of any trial insns we encounter. */
1439 mark_set_resources (dtrial, &set, 0, MARK_SRC_DEST_CALL);
1440 mark_referenced_resources (dtrial, &needed, true);
1441 }
1442 }
1443 }
1444
1445 /* If all insns in the delay slot have been matched and we were previously
1446 annulling the branch, we need not any more. In that case delete all the
1447 merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn in
1448 the delay list so that we know that it isn't only being used at the
1449 target. */
1450 if (slot_number == num_slots && annul_p)
1451 {
1452 for (; merged_insns; merged_insns = XEXP (merged_insns, 1))
1453 {
1454 if (GET_MODE (merged_insns) == SImode)
1455 {
1456 rtx new_rtx;
1457
1458 update_block (XEXP (merged_insns, 0), thread);
1459 new_rtx = delete_from_delay_slot (XEXP (merged_insns, 0));
1460 if (INSN_DELETED_P (thread))
1461 thread = new_rtx;
1462 }
1463 else
1464 {
1465 update_block (XEXP (merged_insns, 0), thread);
1466 delete_related_insns (XEXP (merged_insns, 0));
1467 }
1468 }
1469
1470 INSN_ANNULLED_BRANCH_P (delay_insn) = 0;
1471
1472 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1473 INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0;
1474 }
1475 }
1476 \f
1477 /* See if INSN is redundant with an insn in front of TARGET. Often this
1478 is called when INSN is a candidate for a delay slot of TARGET.
1479 DELAY_LIST are insns that will be placed in delay slots of TARGET in front
1480 of INSN. Often INSN will be redundant with an insn in a delay slot of
1481 some previous insn. This happens when we have a series of branches to the
1482 same label; in that case the first insn at the target might want to go
1483 into each of the delay slots.
1484
1485 If we are not careful, this routine can take up a significant fraction
1486 of the total compilation time (4%), but only wins rarely. Hence we
1487 speed this routine up by making two passes. The first pass goes back
1488 until it hits a label and sees if it finds an insn with an identical
1489 pattern. Only in this (relatively rare) event does it check for
1490 data conflicts.
1491
1492 We do not split insns we encounter. This could cause us not to find a
1493 redundant insn, but the cost of splitting seems greater than the possible
1494 gain in rare cases. */
1495
1496 static rtx
1497 redundant_insn (rtx insn, rtx target, rtx delay_list)
1498 {
1499 rtx target_main = target;
1500 rtx ipat = PATTERN (insn);
1501 rtx trial, pat;
1502 struct resources needed, set;
1503 int i;
1504 unsigned insns_to_search;
1505
1506 /* If INSN has any REG_UNUSED notes, it can't match anything since we
1507 are allowed to not actually assign to such a register. */
1508 if (find_reg_note (insn, REG_UNUSED, NULL_RTX) != 0)
1509 return 0;
1510
1511 /* Scan backwards looking for a match. */
1512 for (trial = PREV_INSN (target),
1513 insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
1514 trial && insns_to_search > 0;
1515 trial = PREV_INSN (trial))
1516 {
1517 if (LABEL_P (trial))
1518 return 0;
1519
1520 if (!INSN_P (trial))
1521 continue;
1522 --insns_to_search;
1523
1524 pat = PATTERN (trial);
1525 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
1526 continue;
1527
1528 if (GET_CODE (pat) == SEQUENCE)
1529 {
1530 /* Stop for a CALL and its delay slots because it is difficult to
1531 track its resource needs correctly. */
1532 if (CALL_P (XVECEXP (pat, 0, 0)))
1533 return 0;
1534
1535 /* Stop for an INSN or JUMP_INSN with delayed effects and its delay
1536 slots because it is difficult to track its resource needs
1537 correctly. */
1538
1539 #ifdef INSN_SETS_ARE_DELAYED
1540 if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1541 return 0;
1542 #endif
1543
1544 #ifdef INSN_REFERENCES_ARE_DELAYED
1545 if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1546 return 0;
1547 #endif
1548
1549 /* See if any of the insns in the delay slot match, updating
1550 resource requirements as we go. */
1551 for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
1552 if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn)
1553 && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)
1554 && ! find_reg_note (XVECEXP (pat, 0, i), REG_UNUSED, NULL_RTX))
1555 break;
1556
1557 /* If found a match, exit this loop early. */
1558 if (i > 0)
1559 break;
1560 }
1561
1562 else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat)
1563 && ! find_reg_note (trial, REG_UNUSED, NULL_RTX))
1564 break;
1565 }
1566
1567 /* If we didn't find an insn that matches, return 0. */
1568 if (trial == 0)
1569 return 0;
1570
1571 /* See what resources this insn sets and needs. If they overlap, or
1572 if this insn references CC0, it can't be redundant. */
1573
1574 CLEAR_RESOURCE (&needed);
1575 CLEAR_RESOURCE (&set);
1576 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
1577 mark_referenced_resources (insn, &needed, true);
1578
1579 /* If TARGET is a SEQUENCE, get the main insn. */
1580 if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
1581 target_main = XVECEXP (PATTERN (target), 0, 0);
1582
1583 if (resource_conflicts_p (&needed, &set)
1584 #ifdef HAVE_cc0
1585 || reg_mentioned_p (cc0_rtx, ipat)
1586 #endif
1587 /* The insn requiring the delay may not set anything needed or set by
1588 INSN. */
1589 || insn_sets_resource_p (target_main, &needed, true)
1590 || insn_sets_resource_p (target_main, &set, true))
1591 return 0;
1592
1593 /* Insns we pass may not set either NEEDED or SET, so merge them for
1594 simpler tests. */
1595 needed.memory |= set.memory;
1596 needed.unch_memory |= set.unch_memory;
1597 IOR_HARD_REG_SET (needed.regs, set.regs);
1598
1599 /* This insn isn't redundant if it conflicts with an insn that either is
1600 or will be in a delay slot of TARGET. */
1601
1602 while (delay_list)
1603 {
1604 if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, true))
1605 return 0;
1606 delay_list = XEXP (delay_list, 1);
1607 }
1608
1609 if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
1610 for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
1611 if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed,
1612 true))
1613 return 0;
1614
1615 /* Scan backwards until we reach a label or an insn that uses something
1616 INSN sets or sets something insn uses or sets. */
1617
1618 for (trial = PREV_INSN (target),
1619 insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
1620 trial && !LABEL_P (trial) && insns_to_search > 0;
1621 trial = PREV_INSN (trial))
1622 {
1623 if (!INSN_P (trial))
1624 continue;
1625 --insns_to_search;
1626
1627 pat = PATTERN (trial);
1628 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
1629 continue;
1630
1631 if (GET_CODE (pat) == SEQUENCE)
1632 {
1633 bool annul_p = false;
1634 rtx control = XVECEXP (pat, 0, 0);
1635
1636 /* If this is a CALL_INSN and its delay slots, it is hard to track
1637 the resource needs properly, so give up. */
1638 if (CALL_P (control))
1639 return 0;
1640
1641 /* If this is an INSN or JUMP_INSN with delayed effects, it
1642 is hard to track the resource needs properly, so give up. */
1643
1644 #ifdef INSN_SETS_ARE_DELAYED
1645 if (INSN_SETS_ARE_DELAYED (control))
1646 return 0;
1647 #endif
1648
1649 #ifdef INSN_REFERENCES_ARE_DELAYED
1650 if (INSN_REFERENCES_ARE_DELAYED (control))
1651 return 0;
1652 #endif
1653
1654 if (JUMP_P (control))
1655 annul_p = INSN_ANNULLED_BRANCH_P (control);
1656
1657 /* See if any of the insns in the delay slot match, updating
1658 resource requirements as we go. */
1659 for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
1660 {
1661 rtx candidate = XVECEXP (pat, 0, i);
1662
1663 /* If an insn will be annulled if the branch is false, it isn't
1664 considered as a possible duplicate insn. */
1665 if (rtx_equal_p (PATTERN (candidate), ipat)
1666 && ! (annul_p && INSN_FROM_TARGET_P (candidate)))
1667 {
1668 /* Show that this insn will be used in the sequel. */
1669 INSN_FROM_TARGET_P (candidate) = 0;
1670 return candidate;
1671 }
1672
1673 /* Unless this is an annulled insn from the target of a branch,
1674 we must stop if it sets anything needed or set by INSN. */
1675 if ((!annul_p || !INSN_FROM_TARGET_P (candidate))
1676 && insn_sets_resource_p (candidate, &needed, true))
1677 return 0;
1678 }
1679
1680 /* If the insn requiring the delay slot conflicts with INSN, we
1681 must stop. */
1682 if (insn_sets_resource_p (control, &needed, true))
1683 return 0;
1684 }
1685 else
1686 {
1687 /* See if TRIAL is the same as INSN. */
1688 pat = PATTERN (trial);
1689 if (rtx_equal_p (pat, ipat))
1690 return trial;
1691
1692 /* Can't go any further if TRIAL conflicts with INSN. */
1693 if (insn_sets_resource_p (trial, &needed, true))
1694 return 0;
1695 }
1696 }
1697
1698 return 0;
1699 }
1700 \f
1701 /* Return 1 if THREAD can only be executed in one way. If LABEL is nonzero,
1702 it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH
1703 is nonzero, we are allowed to fall into this thread; otherwise, we are
1704 not.
1705
1706 If LABEL is used more than one or we pass a label other than LABEL before
1707 finding an active insn, we do not own this thread. */
1708
1709 static int
1710 own_thread_p (rtx thread, rtx label, int allow_fallthrough)
1711 {
1712 rtx active_insn;
1713 rtx insn;
1714
1715 /* We don't own the function end. */
1716 if (thread == 0 || ANY_RETURN_P (thread))
1717 return 0;
1718
1719 /* Get the first active insn, or THREAD, if it is an active insn. */
1720 active_insn = next_active_insn (PREV_INSN (thread));
1721
1722 for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn))
1723 if (LABEL_P (insn)
1724 && (insn != label || LABEL_NUSES (insn) != 1))
1725 return 0;
1726
1727 if (allow_fallthrough)
1728 return 1;
1729
1730 /* Ensure that we reach a BARRIER before any insn or label. */
1731 for (insn = prev_nonnote_insn (thread);
1732 insn == 0 || !BARRIER_P (insn);
1733 insn = prev_nonnote_insn (insn))
1734 if (insn == 0
1735 || LABEL_P (insn)
1736 || (NONJUMP_INSN_P (insn)
1737 && GET_CODE (PATTERN (insn)) != USE
1738 && GET_CODE (PATTERN (insn)) != CLOBBER))
1739 return 0;
1740
1741 return 1;
1742 }
1743 \f
1744 /* Called when INSN is being moved from a location near the target of a jump.
1745 We leave a marker of the form (use (INSN)) immediately in front
1746 of WHERE for mark_target_live_regs. These markers will be deleted when
1747 reorg finishes.
1748
1749 We used to try to update the live status of registers if WHERE is at
1750 the start of a basic block, but that can't work since we may remove a
1751 BARRIER in relax_delay_slots. */
1752
1753 static void
1754 update_block (rtx insn, rtx where)
1755 {
1756 /* Ignore if this was in a delay slot and it came from the target of
1757 a branch. */
1758 if (INSN_FROM_TARGET_P (insn))
1759 return;
1760
1761 emit_insn_before (gen_rtx_USE (VOIDmode, insn), where);
1762
1763 /* INSN might be making a value live in a block where it didn't use to
1764 be. So recompute liveness information for this block. */
1765
1766 incr_ticks_for_insn (insn);
1767 }
1768
1769 /* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for
1770 the basic block containing the jump. */
1771
1772 static int
1773 reorg_redirect_jump (rtx jump, rtx nlabel)
1774 {
1775 incr_ticks_for_insn (jump);
1776 return redirect_jump (jump, nlabel, 1);
1777 }
1778
1779 /* Called when INSN is being moved forward into a delay slot of DELAYED_INSN.
1780 We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes
1781 that reference values used in INSN. If we find one, then we move the
1782 REG_DEAD note to INSN.
1783
1784 This is needed to handle the case where a later insn (after INSN) has a
1785 REG_DEAD note for a register used by INSN, and this later insn subsequently
1786 gets moved before a CODE_LABEL because it is a redundant insn. In this
1787 case, mark_target_live_regs may be confused into thinking the register
1788 is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */
1789
1790 static void
1791 update_reg_dead_notes (rtx insn, rtx delayed_insn)
1792 {
1793 rtx p, link, next;
1794
1795 for (p = next_nonnote_insn (insn); p != delayed_insn;
1796 p = next_nonnote_insn (p))
1797 for (link = REG_NOTES (p); link; link = next)
1798 {
1799 next = XEXP (link, 1);
1800
1801 if (REG_NOTE_KIND (link) != REG_DEAD
1802 || !REG_P (XEXP (link, 0)))
1803 continue;
1804
1805 if (reg_referenced_p (XEXP (link, 0), PATTERN (insn)))
1806 {
1807 /* Move the REG_DEAD note from P to INSN. */
1808 remove_note (p, link);
1809 XEXP (link, 1) = REG_NOTES (insn);
1810 REG_NOTES (insn) = link;
1811 }
1812 }
1813 }
1814
1815 /* Called when an insn redundant with start_insn is deleted. If there
1816 is a REG_DEAD note for the target of start_insn between start_insn
1817 and stop_insn, then the REG_DEAD note needs to be deleted since the
1818 value no longer dies there.
1819
1820 If the REG_DEAD note isn't deleted, then mark_target_live_regs may be
1821 confused into thinking the register is dead. */
1822
1823 static void
1824 fix_reg_dead_note (rtx start_insn, rtx stop_insn)
1825 {
1826 rtx p, link, next;
1827
1828 for (p = next_nonnote_insn (start_insn); p != stop_insn;
1829 p = next_nonnote_insn (p))
1830 for (link = REG_NOTES (p); link; link = next)
1831 {
1832 next = XEXP (link, 1);
1833
1834 if (REG_NOTE_KIND (link) != REG_DEAD
1835 || !REG_P (XEXP (link, 0)))
1836 continue;
1837
1838 if (reg_set_p (XEXP (link, 0), PATTERN (start_insn)))
1839 {
1840 remove_note (p, link);
1841 return;
1842 }
1843 }
1844 }
1845
1846 /* Delete any REG_UNUSED notes that exist on INSN but not on REDUNDANT_INSN.
1847
1848 This handles the case of udivmodXi4 instructions which optimize their
1849 output depending on whether any REG_UNUSED notes are present.
1850 we must make sure that INSN calculates as many results as REDUNDANT_INSN
1851 does. */
1852
1853 static void
1854 update_reg_unused_notes (rtx insn, rtx redundant_insn)
1855 {
1856 rtx link, next;
1857
1858 for (link = REG_NOTES (insn); link; link = next)
1859 {
1860 next = XEXP (link, 1);
1861
1862 if (REG_NOTE_KIND (link) != REG_UNUSED
1863 || !REG_P (XEXP (link, 0)))
1864 continue;
1865
1866 if (! find_regno_note (redundant_insn, REG_UNUSED,
1867 REGNO (XEXP (link, 0))))
1868 remove_note (insn, link);
1869 }
1870 }
1871 \f
1872 /* Return the label before INSN, or put a new label there. */
1873
1874 static rtx
1875 get_label_before (rtx insn)
1876 {
1877 rtx label;
1878
1879 /* Find an existing label at this point
1880 or make a new one if there is none. */
1881 label = prev_nonnote_insn (insn);
1882
1883 if (label == 0 || !LABEL_P (label))
1884 {
1885 rtx prev = PREV_INSN (insn);
1886
1887 label = gen_label_rtx ();
1888 emit_label_after (label, prev);
1889 LABEL_NUSES (label) = 0;
1890 }
1891 return label;
1892 }
1893
1894 /* Scan a function looking for insns that need a delay slot and find insns to
1895 put into the delay slot.
1896
1897 NON_JUMPS_P is nonzero if we are to only try to fill non-jump insns (such
1898 as calls). We do these first since we don't want jump insns (that are
1899 easier to fill) to get the only insns that could be used for non-jump insns.
1900 When it is zero, only try to fill JUMP_INSNs.
1901
1902 When slots are filled in this manner, the insns (including the
1903 delay_insn) are put together in a SEQUENCE rtx. In this fashion,
1904 it is possible to tell whether a delay slot has really been filled
1905 or not. `final' knows how to deal with this, by communicating
1906 through FINAL_SEQUENCE. */
1907
1908 static void
1909 fill_simple_delay_slots (int non_jumps_p)
1910 {
1911 rtx insn, pat, trial, next_trial;
1912 int i;
1913 int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
1914 struct resources needed, set;
1915 int slots_to_fill, slots_filled;
1916 rtx delay_list;
1917
1918 for (i = 0; i < num_unfilled_slots; i++)
1919 {
1920 int flags;
1921 /* Get the next insn to fill. If it has already had any slots assigned,
1922 we can't do anything with it. Maybe we'll improve this later. */
1923
1924 insn = unfilled_slots_base[i];
1925 if (insn == 0
1926 || INSN_DELETED_P (insn)
1927 || (NONJUMP_INSN_P (insn)
1928 && GET_CODE (PATTERN (insn)) == SEQUENCE)
1929 || (JUMP_P (insn) && non_jumps_p)
1930 || (!JUMP_P (insn) && ! non_jumps_p))
1931 continue;
1932
1933 /* It may have been that this insn used to need delay slots, but
1934 now doesn't; ignore in that case. This can happen, for example,
1935 on the HP PA RISC, where the number of delay slots depends on
1936 what insns are nearby. */
1937 slots_to_fill = num_delay_slots (insn);
1938
1939 /* Some machine description have defined instructions to have
1940 delay slots only in certain circumstances which may depend on
1941 nearby insns (which change due to reorg's actions).
1942
1943 For example, the PA port normally has delay slots for unconditional
1944 jumps.
1945
1946 However, the PA port claims such jumps do not have a delay slot
1947 if they are immediate successors of certain CALL_INSNs. This
1948 allows the port to favor filling the delay slot of the call with
1949 the unconditional jump. */
1950 if (slots_to_fill == 0)
1951 continue;
1952
1953 /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL
1954 says how many. After initialization, first try optimizing
1955
1956 call _foo call _foo
1957 nop add %o7,.-L1,%o7
1958 b,a L1
1959 nop
1960
1961 If this case applies, the delay slot of the call is filled with
1962 the unconditional jump. This is done first to avoid having the
1963 delay slot of the call filled in the backward scan. Also, since
1964 the unconditional jump is likely to also have a delay slot, that
1965 insn must exist when it is subsequently scanned.
1966
1967 This is tried on each insn with delay slots as some machines
1968 have insns which perform calls, but are not represented as
1969 CALL_INSNs. */
1970
1971 slots_filled = 0;
1972 delay_list = 0;
1973
1974 if (JUMP_P (insn))
1975 flags = get_jump_flags (insn, JUMP_LABEL (insn));
1976 else
1977 flags = get_jump_flags (insn, NULL_RTX);
1978
1979 if ((trial = next_active_insn (insn))
1980 && JUMP_P (trial)
1981 && simplejump_p (trial)
1982 && eligible_for_delay (insn, slots_filled, trial, flags)
1983 && no_labels_between_p (insn, trial)
1984 && ! can_throw_internal (trial))
1985 {
1986 rtx *tmp;
1987 slots_filled++;
1988 delay_list = add_to_delay_list (trial, delay_list);
1989
1990 /* TRIAL may have had its delay slot filled, then unfilled. When
1991 the delay slot is unfilled, TRIAL is placed back on the unfilled
1992 slots obstack. Unfortunately, it is placed on the end of the
1993 obstack, not in its original location. Therefore, we must search
1994 from entry i + 1 to the end of the unfilled slots obstack to
1995 try and find TRIAL. */
1996 tmp = &unfilled_slots_base[i + 1];
1997 while (*tmp != trial && tmp != unfilled_slots_next)
1998 tmp++;
1999
2000 /* Remove the unconditional jump from consideration for delay slot
2001 filling and unthread it. */
2002 if (*tmp == trial)
2003 *tmp = 0;
2004 {
2005 rtx next = NEXT_INSN (trial);
2006 rtx prev = PREV_INSN (trial);
2007 if (prev)
2008 NEXT_INSN (prev) = next;
2009 if (next)
2010 PREV_INSN (next) = prev;
2011 }
2012 }
2013
2014 /* Now, scan backwards from the insn to search for a potential
2015 delay-slot candidate. Stop searching when a label or jump is hit.
2016
2017 For each candidate, if it is to go into the delay slot (moved
2018 forward in execution sequence), it must not need or set any resources
2019 that were set by later insns and must not set any resources that
2020 are needed for those insns.
2021
2022 The delay slot insn itself sets resources unless it is a call
2023 (in which case the called routine, not the insn itself, is doing
2024 the setting). */
2025
2026 if (slots_filled < slots_to_fill)
2027 {
2028 CLEAR_RESOURCE (&needed);
2029 CLEAR_RESOURCE (&set);
2030 mark_set_resources (insn, &set, 0, MARK_SRC_DEST);
2031 mark_referenced_resources (insn, &needed, false);
2032
2033 for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1);
2034 trial = next_trial)
2035 {
2036 next_trial = prev_nonnote_insn (trial);
2037
2038 /* This must be an INSN or CALL_INSN. */
2039 pat = PATTERN (trial);
2040
2041 /* Stand-alone USE and CLOBBER are just for flow. */
2042 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2043 continue;
2044
2045 /* Check for resource conflict first, to avoid unnecessary
2046 splitting. */
2047 if (! insn_references_resource_p (trial, &set, true)
2048 && ! insn_sets_resource_p (trial, &set, true)
2049 && ! insn_sets_resource_p (trial, &needed, true)
2050 #ifdef HAVE_cc0
2051 /* Can't separate set of cc0 from its use. */
2052 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
2053 #endif
2054 && ! can_throw_internal (trial))
2055 {
2056 trial = try_split (pat, trial, 1);
2057 next_trial = prev_nonnote_insn (trial);
2058 if (eligible_for_delay (insn, slots_filled, trial, flags))
2059 {
2060 /* In this case, we are searching backward, so if we
2061 find insns to put on the delay list, we want
2062 to put them at the head, rather than the
2063 tail, of the list. */
2064
2065 update_reg_dead_notes (trial, insn);
2066 delay_list = gen_rtx_INSN_LIST (VOIDmode,
2067 trial, delay_list);
2068 update_block (trial, trial);
2069 delete_related_insns (trial);
2070 if (slots_to_fill == ++slots_filled)
2071 break;
2072 continue;
2073 }
2074 }
2075
2076 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2077 mark_referenced_resources (trial, &needed, true);
2078 }
2079 }
2080
2081 /* If all needed slots haven't been filled, we come here. */
2082
2083 /* Try to optimize case of jumping around a single insn. */
2084 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
2085 if (slots_filled != slots_to_fill
2086 && delay_list == 0
2087 && JUMP_P (insn)
2088 && (condjump_p (insn) || condjump_in_parallel_p (insn))
2089 && !ANY_RETURN_P (JUMP_LABEL (insn)))
2090 {
2091 delay_list = optimize_skip (insn);
2092 if (delay_list)
2093 slots_filled += 1;
2094 }
2095 #endif
2096
2097 /* Try to get insns from beyond the insn needing the delay slot.
2098 These insns can neither set or reference resources set in insns being
2099 skipped, cannot set resources in the insn being skipped, and, if this
2100 is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the
2101 call might not return).
2102
2103 There used to be code which continued past the target label if
2104 we saw all uses of the target label. This code did not work,
2105 because it failed to account for some instructions which were
2106 both annulled and marked as from the target. This can happen as a
2107 result of optimize_skip. Since this code was redundant with
2108 fill_eager_delay_slots anyways, it was just deleted. */
2109
2110 if (slots_filled != slots_to_fill
2111 /* If this instruction could throw an exception which is
2112 caught in the same function, then it's not safe to fill
2113 the delay slot with an instruction from beyond this
2114 point. For example, consider:
2115
2116 int i = 2;
2117
2118 try {
2119 f();
2120 i = 3;
2121 } catch (...) {}
2122
2123 return i;
2124
2125 Even though `i' is a local variable, we must be sure not
2126 to put `i = 3' in the delay slot if `f' might throw an
2127 exception.
2128
2129 Presumably, we should also check to see if we could get
2130 back to this function via `setjmp'. */
2131 && ! can_throw_internal (insn)
2132 && (!JUMP_P (insn)
2133 || ((condjump_p (insn) || condjump_in_parallel_p (insn))
2134 && ! simplejump_p (insn)
2135 && !ANY_RETURN_P (JUMP_LABEL (insn)))))
2136 {
2137 /* Invariant: If insn is a JUMP_INSN, the insn's jump
2138 label. Otherwise, zero. */
2139 rtx target = 0;
2140 int maybe_never = 0;
2141 rtx pat, trial_delay;
2142
2143 CLEAR_RESOURCE (&needed);
2144 CLEAR_RESOURCE (&set);
2145
2146 if (CALL_P (insn))
2147 {
2148 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
2149 mark_referenced_resources (insn, &needed, true);
2150 maybe_never = 1;
2151 }
2152 else
2153 {
2154 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
2155 mark_referenced_resources (insn, &needed, true);
2156 if (JUMP_P (insn))
2157 target = JUMP_LABEL (insn);
2158 }
2159
2160 if (target == 0 || ANY_RETURN_P (target))
2161 for (trial = next_nonnote_insn (insn); !stop_search_p (trial, 1);
2162 trial = next_trial)
2163 {
2164 next_trial = next_nonnote_insn (trial);
2165
2166 /* This must be an INSN or CALL_INSN. */
2167 pat = PATTERN (trial);
2168
2169 /* Stand-alone USE and CLOBBER are just for flow. */
2170 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2171 continue;
2172
2173 /* If this already has filled delay slots, get the insn needing
2174 the delay slots. */
2175 if (GET_CODE (pat) == SEQUENCE)
2176 trial_delay = XVECEXP (pat, 0, 0);
2177 else
2178 trial_delay = trial;
2179
2180 /* Stop our search when seeing a jump. */
2181 if (JUMP_P (trial_delay))
2182 break;
2183
2184 /* See if we have a resource problem before we try to
2185 split. */
2186 if (GET_CODE (pat) != SEQUENCE
2187 && ! insn_references_resource_p (trial, &set, true)
2188 && ! insn_sets_resource_p (trial, &set, true)
2189 && ! insn_sets_resource_p (trial, &needed, true)
2190 #ifdef HAVE_cc0
2191 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
2192 #endif
2193 && ! (maybe_never && may_trap_or_fault_p (pat))
2194 && (trial = try_split (pat, trial, 0))
2195 && eligible_for_delay (insn, slots_filled, trial, flags)
2196 && ! can_throw_internal(trial))
2197 {
2198 next_trial = next_nonnote_insn (trial);
2199 delay_list = add_to_delay_list (trial, delay_list);
2200
2201 #ifdef HAVE_cc0
2202 if (reg_mentioned_p (cc0_rtx, pat))
2203 link_cc0_insns (trial);
2204 #endif
2205
2206 delete_related_insns (trial);
2207 if (slots_to_fill == ++slots_filled)
2208 break;
2209 continue;
2210 }
2211
2212 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2213 mark_referenced_resources (trial, &needed, true);
2214
2215 /* Ensure we don't put insns between the setting of cc and the
2216 comparison by moving a setting of cc into an earlier delay
2217 slot since these insns could clobber the condition code. */
2218 set.cc = 1;
2219
2220 /* If this is a call or jump, we might not get here. */
2221 if (CALL_P (trial_delay)
2222 || JUMP_P (trial_delay))
2223 maybe_never = 1;
2224 }
2225
2226 /* If there are slots left to fill and our search was stopped by an
2227 unconditional branch, try the insn at the branch target. We can
2228 redirect the branch if it works.
2229
2230 Don't do this if the insn at the branch target is a branch. */
2231 if (slots_to_fill != slots_filled
2232 && trial
2233 && jump_to_label_p (trial)
2234 && simplejump_p (trial)
2235 && (target == 0 || JUMP_LABEL (trial) == target)
2236 && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0
2237 && ! (NONJUMP_INSN_P (next_trial)
2238 && GET_CODE (PATTERN (next_trial)) == SEQUENCE)
2239 && !JUMP_P (next_trial)
2240 && ! insn_references_resource_p (next_trial, &set, true)
2241 && ! insn_sets_resource_p (next_trial, &set, true)
2242 && ! insn_sets_resource_p (next_trial, &needed, true)
2243 #ifdef HAVE_cc0
2244 && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
2245 #endif
2246 && ! (maybe_never && may_trap_or_fault_p (PATTERN (next_trial)))
2247 && (next_trial = try_split (PATTERN (next_trial), next_trial, 0))
2248 && eligible_for_delay (insn, slots_filled, next_trial, flags)
2249 && ! can_throw_internal (trial))
2250 {
2251 /* See comment in relax_delay_slots about necessity of using
2252 next_real_insn here. */
2253 rtx new_label = next_real_insn (next_trial);
2254
2255 if (new_label != 0)
2256 new_label = get_label_before (new_label);
2257 else
2258 new_label = find_end_label (simple_return_rtx);
2259
2260 if (new_label)
2261 {
2262 delay_list
2263 = add_to_delay_list (copy_delay_slot_insn (next_trial),
2264 delay_list);
2265 slots_filled++;
2266 reorg_redirect_jump (trial, new_label);
2267
2268 /* If we merged because we both jumped to the same place,
2269 redirect the original insn also. */
2270 if (target)
2271 reorg_redirect_jump (insn, new_label);
2272 }
2273 }
2274 }
2275
2276 /* If this is an unconditional jump, then try to get insns from the
2277 target of the jump. */
2278 if (JUMP_P (insn)
2279 && simplejump_p (insn)
2280 && slots_filled != slots_to_fill)
2281 delay_list
2282 = fill_slots_from_thread (insn, const_true_rtx,
2283 next_active_insn (JUMP_LABEL (insn)),
2284 NULL, 1, 1,
2285 own_thread_p (JUMP_LABEL (insn),
2286 JUMP_LABEL (insn), 0),
2287 slots_to_fill, &slots_filled,
2288 delay_list);
2289
2290 if (delay_list)
2291 unfilled_slots_base[i]
2292 = emit_delay_sequence (insn, delay_list, slots_filled);
2293
2294 if (slots_to_fill == slots_filled)
2295 unfilled_slots_base[i] = 0;
2296
2297 note_delay_statistics (slots_filled, 0);
2298 }
2299 }
2300 \f
2301 /* Follow any unconditional jump at LABEL, for the purpose of redirecting JUMP;
2302 return the ultimate label reached by any such chain of jumps.
2303 Return a suitable return rtx if the chain ultimately leads to a
2304 return instruction.
2305 If LABEL is not followed by a jump, return LABEL.
2306 If the chain loops or we can't find end, return LABEL,
2307 since that tells caller to avoid changing the insn.
2308 If the returned label is obtained by following a REG_CROSSING_JUMP
2309 jump, set *CROSSING to true, otherwise set it to false. */
2310
2311 static rtx
2312 follow_jumps (rtx label, rtx jump, bool *crossing)
2313 {
2314 rtx insn;
2315 rtx next;
2316 rtx value = label;
2317 int depth;
2318
2319 *crossing = false;
2320 if (ANY_RETURN_P (label))
2321 return label;
2322 for (depth = 0;
2323 (depth < 10
2324 && (insn = next_active_insn (value)) != 0
2325 && JUMP_P (insn)
2326 && JUMP_LABEL (insn) != NULL_RTX
2327 && ((any_uncondjump_p (insn) && onlyjump_p (insn))
2328 || ANY_RETURN_P (PATTERN (insn)))
2329 && (next = NEXT_INSN (insn))
2330 && BARRIER_P (next));
2331 depth++)
2332 {
2333 rtx this_label = JUMP_LABEL (insn);
2334 rtx tem;
2335
2336 /* If we have found a cycle, make the insn jump to itself. */
2337 if (this_label == label)
2338 return label;
2339 if (ANY_RETURN_P (this_label))
2340 return this_label;
2341 tem = next_active_insn (this_label);
2342 if (tem && JUMP_TABLE_DATA_P (tem))
2343 break;
2344
2345 if (!targetm.can_follow_jump (jump, insn))
2346 break;
2347 if (!*crossing)
2348 *crossing
2349 = find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX) != NULL_RTX;
2350 value = this_label;
2351 }
2352 if (depth == 10)
2353 return label;
2354 return value;
2355 }
2356
2357 /* Try to find insns to place in delay slots.
2358
2359 INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION
2360 or is an unconditional branch if CONDITION is const_true_rtx.
2361 *PSLOTS_FILLED is updated with the number of slots that we have filled.
2362
2363 THREAD is a flow-of-control, either the insns to be executed if the
2364 branch is true or if the branch is false, THREAD_IF_TRUE says which.
2365
2366 OPPOSITE_THREAD is the thread in the opposite direction. It is used
2367 to see if any potential delay slot insns set things needed there.
2368
2369 LIKELY is nonzero if it is extremely likely that the branch will be
2370 taken and THREAD_IF_TRUE is set. This is used for the branch at the
2371 end of a loop back up to the top.
2372
2373 OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the
2374 thread. I.e., it is the fallthrough code of our jump or the target of the
2375 jump when we are the only jump going there.
2376
2377 If OWN_THREAD is false, it must be the "true" thread of a jump. In that
2378 case, we can only take insns from the head of the thread for our delay
2379 slot. We then adjust the jump to point after the insns we have taken. */
2380
2381 static rtx
2382 fill_slots_from_thread (rtx insn, rtx condition, rtx thread,
2383 rtx opposite_thread, int likely, int thread_if_true,
2384 int own_thread, int slots_to_fill,
2385 int *pslots_filled, rtx delay_list)
2386 {
2387 rtx new_thread;
2388 struct resources opposite_needed, set, needed;
2389 rtx trial;
2390 int lose = 0;
2391 int must_annul = 0;
2392 int flags;
2393
2394 /* Validate our arguments. */
2395 gcc_assert(condition != const_true_rtx || thread_if_true);
2396 gcc_assert(own_thread || thread_if_true);
2397
2398 flags = get_jump_flags (insn, JUMP_LABEL (insn));
2399
2400 /* If our thread is the end of subroutine, we can't get any delay
2401 insns from that. */
2402 if (thread == NULL_RTX || ANY_RETURN_P (thread))
2403 return delay_list;
2404
2405 /* If this is an unconditional branch, nothing is needed at the
2406 opposite thread. Otherwise, compute what is needed there. */
2407 if (condition == const_true_rtx)
2408 CLEAR_RESOURCE (&opposite_needed);
2409 else
2410 mark_target_live_regs (get_insns (), opposite_thread, &opposite_needed);
2411
2412 /* If the insn at THREAD can be split, do it here to avoid having to
2413 update THREAD and NEW_THREAD if it is done in the loop below. Also
2414 initialize NEW_THREAD. */
2415
2416 new_thread = thread = try_split (PATTERN (thread), thread, 0);
2417
2418 /* Scan insns at THREAD. We are looking for an insn that can be removed
2419 from THREAD (it neither sets nor references resources that were set
2420 ahead of it and it doesn't set anything needs by the insns ahead of
2421 it) and that either can be placed in an annulling insn or aren't
2422 needed at OPPOSITE_THREAD. */
2423
2424 CLEAR_RESOURCE (&needed);
2425 CLEAR_RESOURCE (&set);
2426
2427 /* If we do not own this thread, we must stop as soon as we find
2428 something that we can't put in a delay slot, since all we can do
2429 is branch into THREAD at a later point. Therefore, labels stop
2430 the search if this is not the `true' thread. */
2431
2432 for (trial = thread;
2433 ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread);
2434 trial = next_nonnote_insn (trial))
2435 {
2436 rtx pat, old_trial;
2437
2438 /* If we have passed a label, we no longer own this thread. */
2439 if (LABEL_P (trial))
2440 {
2441 own_thread = 0;
2442 continue;
2443 }
2444
2445 pat = PATTERN (trial);
2446 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2447 continue;
2448
2449 /* If TRIAL conflicts with the insns ahead of it, we lose. Also,
2450 don't separate or copy insns that set and use CC0. */
2451 if (! insn_references_resource_p (trial, &set, true)
2452 && ! insn_sets_resource_p (trial, &set, true)
2453 && ! insn_sets_resource_p (trial, &needed, true)
2454 #ifdef HAVE_cc0
2455 && ! (reg_mentioned_p (cc0_rtx, pat)
2456 && (! own_thread || ! sets_cc0_p (pat)))
2457 #endif
2458 && ! can_throw_internal (trial))
2459 {
2460 rtx prior_insn;
2461
2462 /* If TRIAL is redundant with some insn before INSN, we don't
2463 actually need to add it to the delay list; we can merely pretend
2464 we did. */
2465 if ((prior_insn = redundant_insn (trial, insn, delay_list)))
2466 {
2467 fix_reg_dead_note (prior_insn, insn);
2468 if (own_thread)
2469 {
2470 update_block (trial, thread);
2471 if (trial == thread)
2472 {
2473 thread = next_active_insn (thread);
2474 if (new_thread == trial)
2475 new_thread = thread;
2476 }
2477
2478 delete_related_insns (trial);
2479 }
2480 else
2481 {
2482 update_reg_unused_notes (prior_insn, trial);
2483 new_thread = next_active_insn (trial);
2484 }
2485
2486 continue;
2487 }
2488
2489 /* There are two ways we can win: If TRIAL doesn't set anything
2490 needed at the opposite thread and can't trap, or if it can
2491 go into an annulled delay slot. */
2492 if (!must_annul
2493 && (condition == const_true_rtx
2494 || (! insn_sets_resource_p (trial, &opposite_needed, true)
2495 && ! may_trap_or_fault_p (pat)
2496 && ! RTX_FRAME_RELATED_P (trial))))
2497 {
2498 old_trial = trial;
2499 trial = try_split (pat, trial, 0);
2500 if (new_thread == old_trial)
2501 new_thread = trial;
2502 if (thread == old_trial)
2503 thread = trial;
2504 pat = PATTERN (trial);
2505 if (eligible_for_delay (insn, *pslots_filled, trial, flags))
2506 goto winner;
2507 }
2508 else if (0
2509 #ifdef ANNUL_IFTRUE_SLOTS
2510 || ! thread_if_true
2511 #endif
2512 #ifdef ANNUL_IFFALSE_SLOTS
2513 || thread_if_true
2514 #endif
2515 )
2516 {
2517 old_trial = trial;
2518 trial = try_split (pat, trial, 0);
2519 if (new_thread == old_trial)
2520 new_thread = trial;
2521 if (thread == old_trial)
2522 thread = trial;
2523 pat = PATTERN (trial);
2524 if ((must_annul || delay_list == NULL) && (thread_if_true
2525 ? check_annul_list_true_false (0, delay_list)
2526 && eligible_for_annul_false (insn, *pslots_filled, trial, flags)
2527 : check_annul_list_true_false (1, delay_list)
2528 && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
2529 {
2530 rtx temp;
2531
2532 must_annul = 1;
2533 winner:
2534
2535 #ifdef HAVE_cc0
2536 if (reg_mentioned_p (cc0_rtx, pat))
2537 link_cc0_insns (trial);
2538 #endif
2539
2540 /* If we own this thread, delete the insn. If this is the
2541 destination of a branch, show that a basic block status
2542 may have been updated. In any case, mark the new
2543 starting point of this thread. */
2544 if (own_thread)
2545 {
2546 rtx note;
2547
2548 update_block (trial, thread);
2549 if (trial == thread)
2550 {
2551 thread = next_active_insn (thread);
2552 if (new_thread == trial)
2553 new_thread = thread;
2554 }
2555
2556 /* We are moving this insn, not deleting it. We must
2557 temporarily increment the use count on any referenced
2558 label lest it be deleted by delete_related_insns. */
2559 for (note = REG_NOTES (trial);
2560 note != NULL_RTX;
2561 note = XEXP (note, 1))
2562 if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
2563 || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
2564 {
2565 /* REG_LABEL_OPERAND could be
2566 NOTE_INSN_DELETED_LABEL too. */
2567 if (LABEL_P (XEXP (note, 0)))
2568 LABEL_NUSES (XEXP (note, 0))++;
2569 else
2570 gcc_assert (REG_NOTE_KIND (note)
2571 == REG_LABEL_OPERAND);
2572 }
2573 if (jump_to_label_p (trial))
2574 LABEL_NUSES (JUMP_LABEL (trial))++;
2575
2576 delete_related_insns (trial);
2577
2578 for (note = REG_NOTES (trial);
2579 note != NULL_RTX;
2580 note = XEXP (note, 1))
2581 if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
2582 || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
2583 {
2584 /* REG_LABEL_OPERAND could be
2585 NOTE_INSN_DELETED_LABEL too. */
2586 if (LABEL_P (XEXP (note, 0)))
2587 LABEL_NUSES (XEXP (note, 0))--;
2588 else
2589 gcc_assert (REG_NOTE_KIND (note)
2590 == REG_LABEL_OPERAND);
2591 }
2592 if (jump_to_label_p (trial))
2593 LABEL_NUSES (JUMP_LABEL (trial))--;
2594 }
2595 else
2596 new_thread = next_active_insn (trial);
2597
2598 temp = own_thread ? trial : copy_delay_slot_insn (trial);
2599 if (thread_if_true)
2600 INSN_FROM_TARGET_P (temp) = 1;
2601
2602 delay_list = add_to_delay_list (temp, delay_list);
2603
2604 if (slots_to_fill == ++(*pslots_filled))
2605 {
2606 /* Even though we have filled all the slots, we
2607 may be branching to a location that has a
2608 redundant insn. Skip any if so. */
2609 while (new_thread && ! own_thread
2610 && ! insn_sets_resource_p (new_thread, &set, true)
2611 && ! insn_sets_resource_p (new_thread, &needed,
2612 true)
2613 && ! insn_references_resource_p (new_thread,
2614 &set, true)
2615 && (prior_insn
2616 = redundant_insn (new_thread, insn,
2617 delay_list)))
2618 {
2619 /* We know we do not own the thread, so no need
2620 to call update_block and delete_insn. */
2621 fix_reg_dead_note (prior_insn, insn);
2622 update_reg_unused_notes (prior_insn, new_thread);
2623 new_thread = next_active_insn (new_thread);
2624 }
2625 break;
2626 }
2627
2628 continue;
2629 }
2630 }
2631 }
2632
2633 /* This insn can't go into a delay slot. */
2634 lose = 1;
2635 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2636 mark_referenced_resources (trial, &needed, true);
2637
2638 /* Ensure we don't put insns between the setting of cc and the comparison
2639 by moving a setting of cc into an earlier delay slot since these insns
2640 could clobber the condition code. */
2641 set.cc = 1;
2642
2643 /* If this insn is a register-register copy and the next insn has
2644 a use of our destination, change it to use our source. That way,
2645 it will become a candidate for our delay slot the next time
2646 through this loop. This case occurs commonly in loops that
2647 scan a list.
2648
2649 We could check for more complex cases than those tested below,
2650 but it doesn't seem worth it. It might also be a good idea to try
2651 to swap the two insns. That might do better.
2652
2653 We can't do this if the next insn modifies our destination, because
2654 that would make the replacement into the insn invalid. We also can't
2655 do this if it modifies our source, because it might be an earlyclobber
2656 operand. This latter test also prevents updating the contents of
2657 a PRE_INC. We also can't do this if there's overlap of source and
2658 destination. Overlap may happen for larger-than-register-size modes. */
2659
2660 if (NONJUMP_INSN_P (trial) && GET_CODE (pat) == SET
2661 && REG_P (SET_SRC (pat))
2662 && REG_P (SET_DEST (pat))
2663 && !reg_overlap_mentioned_p (SET_DEST (pat), SET_SRC (pat)))
2664 {
2665 rtx next = next_nonnote_insn (trial);
2666
2667 if (next && NONJUMP_INSN_P (next)
2668 && GET_CODE (PATTERN (next)) != USE
2669 && ! reg_set_p (SET_DEST (pat), next)
2670 && ! reg_set_p (SET_SRC (pat), next)
2671 && reg_referenced_p (SET_DEST (pat), PATTERN (next))
2672 && ! modified_in_p (SET_DEST (pat), next))
2673 validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next);
2674 }
2675 }
2676
2677 /* If we stopped on a branch insn that has delay slots, see if we can
2678 steal some of the insns in those slots. */
2679 if (trial && NONJUMP_INSN_P (trial)
2680 && GET_CODE (PATTERN (trial)) == SEQUENCE
2681 && JUMP_P (XVECEXP (PATTERN (trial), 0, 0)))
2682 {
2683 /* If this is the `true' thread, we will want to follow the jump,
2684 so we can only do this if we have taken everything up to here. */
2685 if (thread_if_true && trial == new_thread)
2686 {
2687 delay_list
2688 = steal_delay_list_from_target (insn, condition, PATTERN (trial),
2689 delay_list, &set, &needed,
2690 &opposite_needed, slots_to_fill,
2691 pslots_filled, &must_annul,
2692 &new_thread);
2693 /* If we owned the thread and are told that it branched
2694 elsewhere, make sure we own the thread at the new location. */
2695 if (own_thread && trial != new_thread)
2696 own_thread = own_thread_p (new_thread, new_thread, 0);
2697 }
2698 else if (! thread_if_true)
2699 delay_list
2700 = steal_delay_list_from_fallthrough (insn, condition,
2701 PATTERN (trial),
2702 delay_list, &set, &needed,
2703 &opposite_needed, slots_to_fill,
2704 pslots_filled, &must_annul);
2705 }
2706
2707 /* If we haven't found anything for this delay slot and it is very
2708 likely that the branch will be taken, see if the insn at our target
2709 increments or decrements a register with an increment that does not
2710 depend on the destination register. If so, try to place the opposite
2711 arithmetic insn after the jump insn and put the arithmetic insn in the
2712 delay slot. If we can't do this, return. */
2713 if (delay_list == 0 && likely
2714 && new_thread && !ANY_RETURN_P (new_thread)
2715 && NONJUMP_INSN_P (new_thread)
2716 && !RTX_FRAME_RELATED_P (new_thread)
2717 && GET_CODE (PATTERN (new_thread)) != ASM_INPUT
2718 && asm_noperands (PATTERN (new_thread)) < 0)
2719 {
2720 rtx pat = PATTERN (new_thread);
2721 rtx dest;
2722 rtx src;
2723
2724 trial = new_thread;
2725 pat = PATTERN (trial);
2726
2727 if (!NONJUMP_INSN_P (trial)
2728 || GET_CODE (pat) != SET
2729 || ! eligible_for_delay (insn, 0, trial, flags)
2730 || can_throw_internal (trial))
2731 return 0;
2732
2733 dest = SET_DEST (pat), src = SET_SRC (pat);
2734 if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
2735 && rtx_equal_p (XEXP (src, 0), dest)
2736 && (!FLOAT_MODE_P (GET_MODE (src))
2737 || flag_unsafe_math_optimizations)
2738 && ! reg_overlap_mentioned_p (dest, XEXP (src, 1))
2739 && ! side_effects_p (pat))
2740 {
2741 rtx other = XEXP (src, 1);
2742 rtx new_arith;
2743 rtx ninsn;
2744
2745 /* If this is a constant adjustment, use the same code with
2746 the negated constant. Otherwise, reverse the sense of the
2747 arithmetic. */
2748 if (CONST_INT_P (other))
2749 new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest,
2750 negate_rtx (GET_MODE (src), other));
2751 else
2752 new_arith = gen_rtx_fmt_ee (GET_CODE (src) == PLUS ? MINUS : PLUS,
2753 GET_MODE (src), dest, other);
2754
2755 ninsn = emit_insn_after (gen_rtx_SET (VOIDmode, dest, new_arith),
2756 insn);
2757
2758 if (recog_memoized (ninsn) < 0
2759 || (extract_insn (ninsn), ! constrain_operands (1)))
2760 {
2761 delete_related_insns (ninsn);
2762 return 0;
2763 }
2764
2765 if (own_thread)
2766 {
2767 update_block (trial, thread);
2768 if (trial == thread)
2769 {
2770 thread = next_active_insn (thread);
2771 if (new_thread == trial)
2772 new_thread = thread;
2773 }
2774 delete_related_insns (trial);
2775 }
2776 else
2777 new_thread = next_active_insn (trial);
2778
2779 ninsn = own_thread ? trial : copy_delay_slot_insn (trial);
2780 if (thread_if_true)
2781 INSN_FROM_TARGET_P (ninsn) = 1;
2782
2783 delay_list = add_to_delay_list (ninsn, NULL_RTX);
2784 (*pslots_filled)++;
2785 }
2786 }
2787
2788 if (delay_list && must_annul)
2789 INSN_ANNULLED_BRANCH_P (insn) = 1;
2790
2791 /* If we are to branch into the middle of this thread, find an appropriate
2792 label or make a new one if none, and redirect INSN to it. If we hit the
2793 end of the function, use the end-of-function label. */
2794 if (new_thread != thread)
2795 {
2796 rtx label;
2797 bool crossing = false;
2798
2799 gcc_assert (thread_if_true);
2800
2801 if (new_thread && simplejump_or_return_p (new_thread)
2802 && redirect_with_delay_list_safe_p (insn,
2803 JUMP_LABEL (new_thread),
2804 delay_list))
2805 new_thread = follow_jumps (JUMP_LABEL (new_thread), insn, &crossing);
2806
2807 if (ANY_RETURN_P (new_thread))
2808 label = find_end_label (new_thread);
2809 else if (LABEL_P (new_thread))
2810 label = new_thread;
2811 else
2812 label = get_label_before (new_thread);
2813
2814 if (label)
2815 {
2816 reorg_redirect_jump (insn, label);
2817 if (crossing)
2818 set_unique_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX);
2819 }
2820 }
2821
2822 return delay_list;
2823 }
2824 \f
2825 /* Make another attempt to find insns to place in delay slots.
2826
2827 We previously looked for insns located in front of the delay insn
2828 and, for non-jump delay insns, located behind the delay insn.
2829
2830 Here only try to schedule jump insns and try to move insns from either
2831 the target or the following insns into the delay slot. If annulling is
2832 supported, we will be likely to do this. Otherwise, we can do this only
2833 if safe. */
2834
2835 static void
2836 fill_eager_delay_slots (void)
2837 {
2838 rtx insn;
2839 int i;
2840 int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
2841
2842 for (i = 0; i < num_unfilled_slots; i++)
2843 {
2844 rtx condition;
2845 rtx target_label, insn_at_target, fallthrough_insn;
2846 rtx delay_list = 0;
2847 int own_target;
2848 int own_fallthrough;
2849 int prediction, slots_to_fill, slots_filled;
2850
2851 insn = unfilled_slots_base[i];
2852 if (insn == 0
2853 || INSN_DELETED_P (insn)
2854 || !JUMP_P (insn)
2855 || ! (condjump_p (insn) || condjump_in_parallel_p (insn)))
2856 continue;
2857
2858 slots_to_fill = num_delay_slots (insn);
2859 /* Some machine description have defined instructions to have
2860 delay slots only in certain circumstances which may depend on
2861 nearby insns (which change due to reorg's actions).
2862
2863 For example, the PA port normally has delay slots for unconditional
2864 jumps.
2865
2866 However, the PA port claims such jumps do not have a delay slot
2867 if they are immediate successors of certain CALL_INSNs. This
2868 allows the port to favor filling the delay slot of the call with
2869 the unconditional jump. */
2870 if (slots_to_fill == 0)
2871 continue;
2872
2873 slots_filled = 0;
2874 target_label = JUMP_LABEL (insn);
2875 condition = get_branch_condition (insn, target_label);
2876
2877 if (condition == 0)
2878 continue;
2879
2880 /* Get the next active fallthrough and target insns and see if we own
2881 them. Then see whether the branch is likely true. We don't need
2882 to do a lot of this for unconditional branches. */
2883
2884 insn_at_target = first_active_target_insn (target_label);
2885 own_target = own_thread_p (target_label, target_label, 0);
2886
2887 if (condition == const_true_rtx)
2888 {
2889 own_fallthrough = 0;
2890 fallthrough_insn = 0;
2891 prediction = 2;
2892 }
2893 else
2894 {
2895 fallthrough_insn = next_active_insn (insn);
2896 own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1);
2897 prediction = mostly_true_jump (insn);
2898 }
2899
2900 /* If this insn is expected to branch, first try to get insns from our
2901 target, then our fallthrough insns. If it is not expected to branch,
2902 try the other order. */
2903
2904 if (prediction > 0)
2905 {
2906 delay_list
2907 = fill_slots_from_thread (insn, condition, insn_at_target,
2908 fallthrough_insn, prediction == 2, 1,
2909 own_target,
2910 slots_to_fill, &slots_filled, delay_list);
2911
2912 if (delay_list == 0 && own_fallthrough)
2913 {
2914 /* Even though we didn't find anything for delay slots,
2915 we might have found a redundant insn which we deleted
2916 from the thread that was filled. So we have to recompute
2917 the next insn at the target. */
2918 target_label = JUMP_LABEL (insn);
2919 insn_at_target = first_active_target_insn (target_label);
2920
2921 delay_list
2922 = fill_slots_from_thread (insn, condition, fallthrough_insn,
2923 insn_at_target, 0, 0,
2924 own_fallthrough,
2925 slots_to_fill, &slots_filled,
2926 delay_list);
2927 }
2928 }
2929 else
2930 {
2931 if (own_fallthrough)
2932 delay_list
2933 = fill_slots_from_thread (insn, condition, fallthrough_insn,
2934 insn_at_target, 0, 0,
2935 own_fallthrough,
2936 slots_to_fill, &slots_filled,
2937 delay_list);
2938
2939 if (delay_list == 0)
2940 delay_list
2941 = fill_slots_from_thread (insn, condition, insn_at_target,
2942 next_active_insn (insn), 0, 1,
2943 own_target,
2944 slots_to_fill, &slots_filled,
2945 delay_list);
2946 }
2947
2948 if (delay_list)
2949 unfilled_slots_base[i]
2950 = emit_delay_sequence (insn, delay_list, slots_filled);
2951
2952 if (slots_to_fill == slots_filled)
2953 unfilled_slots_base[i] = 0;
2954
2955 note_delay_statistics (slots_filled, 1);
2956 }
2957 }
2958 \f
2959 static void delete_computation (rtx insn);
2960
2961 /* Recursively delete prior insns that compute the value (used only by INSN
2962 which the caller is deleting) stored in the register mentioned by NOTE
2963 which is a REG_DEAD note associated with INSN. */
2964
2965 static void
2966 delete_prior_computation (rtx note, rtx insn)
2967 {
2968 rtx our_prev;
2969 rtx reg = XEXP (note, 0);
2970
2971 for (our_prev = prev_nonnote_insn (insn);
2972 our_prev && (NONJUMP_INSN_P (our_prev)
2973 || CALL_P (our_prev));
2974 our_prev = prev_nonnote_insn (our_prev))
2975 {
2976 rtx pat = PATTERN (our_prev);
2977
2978 /* If we reach a CALL which is not calling a const function
2979 or the callee pops the arguments, then give up. */
2980 if (CALL_P (our_prev)
2981 && (! RTL_CONST_CALL_P (our_prev)
2982 || GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != CALL))
2983 break;
2984
2985 /* If we reach a SEQUENCE, it is too complex to try to
2986 do anything with it, so give up. We can be run during
2987 and after reorg, so SEQUENCE rtl can legitimately show
2988 up here. */
2989 if (GET_CODE (pat) == SEQUENCE)
2990 break;
2991
2992 if (GET_CODE (pat) == USE
2993 && NONJUMP_INSN_P (XEXP (pat, 0)))
2994 /* reorg creates USEs that look like this. We leave them
2995 alone because reorg needs them for its own purposes. */
2996 break;
2997
2998 if (reg_set_p (reg, pat))
2999 {
3000 if (side_effects_p (pat) && !CALL_P (our_prev))
3001 break;
3002
3003 if (GET_CODE (pat) == PARALLEL)
3004 {
3005 /* If we find a SET of something else, we can't
3006 delete the insn. */
3007
3008 int i;
3009
3010 for (i = 0; i < XVECLEN (pat, 0); i++)
3011 {
3012 rtx part = XVECEXP (pat, 0, i);
3013
3014 if (GET_CODE (part) == SET
3015 && SET_DEST (part) != reg)
3016 break;
3017 }
3018
3019 if (i == XVECLEN (pat, 0))
3020 delete_computation (our_prev);
3021 }
3022 else if (GET_CODE (pat) == SET
3023 && REG_P (SET_DEST (pat)))
3024 {
3025 int dest_regno = REGNO (SET_DEST (pat));
3026 int dest_endregno = END_REGNO (SET_DEST (pat));
3027 int regno = REGNO (reg);
3028 int endregno = END_REGNO (reg);
3029
3030 if (dest_regno >= regno
3031 && dest_endregno <= endregno)
3032 delete_computation (our_prev);
3033
3034 /* We may have a multi-word hard register and some, but not
3035 all, of the words of the register are needed in subsequent
3036 insns. Write REG_UNUSED notes for those parts that were not
3037 needed. */
3038 else if (dest_regno <= regno
3039 && dest_endregno >= endregno)
3040 {
3041 int i;
3042
3043 add_reg_note (our_prev, REG_UNUSED, reg);
3044
3045 for (i = dest_regno; i < dest_endregno; i++)
3046 if (! find_regno_note (our_prev, REG_UNUSED, i))
3047 break;
3048
3049 if (i == dest_endregno)
3050 delete_computation (our_prev);
3051 }
3052 }
3053
3054 break;
3055 }
3056
3057 /* If PAT references the register that dies here, it is an
3058 additional use. Hence any prior SET isn't dead. However, this
3059 insn becomes the new place for the REG_DEAD note. */
3060 if (reg_overlap_mentioned_p (reg, pat))
3061 {
3062 XEXP (note, 1) = REG_NOTES (our_prev);
3063 REG_NOTES (our_prev) = note;
3064 break;
3065 }
3066 }
3067 }
3068
3069 /* Delete INSN and recursively delete insns that compute values used only
3070 by INSN. This uses the REG_DEAD notes computed during flow analysis.
3071
3072 Look at all our REG_DEAD notes. If a previous insn does nothing other
3073 than set a register that dies in this insn, we can delete that insn
3074 as well.
3075
3076 On machines with CC0, if CC0 is used in this insn, we may be able to
3077 delete the insn that set it. */
3078
3079 static void
3080 delete_computation (rtx insn)
3081 {
3082 rtx note, next;
3083
3084 #ifdef HAVE_cc0
3085 if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
3086 {
3087 rtx prev = prev_nonnote_insn (insn);
3088 /* We assume that at this stage
3089 CC's are always set explicitly
3090 and always immediately before the jump that
3091 will use them. So if the previous insn
3092 exists to set the CC's, delete it
3093 (unless it performs auto-increments, etc.). */
3094 if (prev && NONJUMP_INSN_P (prev)
3095 && sets_cc0_p (PATTERN (prev)))
3096 {
3097 if (sets_cc0_p (PATTERN (prev)) > 0
3098 && ! side_effects_p (PATTERN (prev)))
3099 delete_computation (prev);
3100 else
3101 /* Otherwise, show that cc0 won't be used. */
3102 add_reg_note (prev, REG_UNUSED, cc0_rtx);
3103 }
3104 }
3105 #endif
3106
3107 for (note = REG_NOTES (insn); note; note = next)
3108 {
3109 next = XEXP (note, 1);
3110
3111 if (REG_NOTE_KIND (note) != REG_DEAD
3112 /* Verify that the REG_NOTE is legitimate. */
3113 || !REG_P (XEXP (note, 0)))
3114 continue;
3115
3116 delete_prior_computation (note, insn);
3117 }
3118
3119 delete_related_insns (insn);
3120 }
3121
3122 /* If all INSN does is set the pc, delete it,
3123 and delete the insn that set the condition codes for it
3124 if that's what the previous thing was. */
3125
3126 static void
3127 delete_jump (rtx insn)
3128 {
3129 rtx set = single_set (insn);
3130
3131 if (set && GET_CODE (SET_DEST (set)) == PC)
3132 delete_computation (insn);
3133 }
3134
3135 static rtx
3136 label_before_next_insn (rtx x, rtx scan_limit)
3137 {
3138 rtx insn = next_active_insn (x);
3139 while (insn)
3140 {
3141 insn = PREV_INSN (insn);
3142 if (insn == scan_limit || insn == NULL_RTX)
3143 return NULL_RTX;
3144 if (LABEL_P (insn))
3145 break;
3146 }
3147 return insn;
3148 }
3149
3150 \f
3151 /* Once we have tried two ways to fill a delay slot, make a pass over the
3152 code to try to improve the results and to do such things as more jump
3153 threading. */
3154
3155 static void
3156 relax_delay_slots (rtx first)
3157 {
3158 rtx insn, next, pat;
3159 rtx trial, delay_insn, target_label;
3160
3161 /* Look at every JUMP_INSN and see if we can improve it. */
3162 for (insn = first; insn; insn = next)
3163 {
3164 rtx other;
3165 bool crossing;
3166
3167 next = next_active_insn (insn);
3168
3169 /* If this is a jump insn, see if it now jumps to a jump, jumps to
3170 the next insn, or jumps to a label that is not the last of a
3171 group of consecutive labels. */
3172 if (JUMP_P (insn)
3173 && (condjump_p (insn) || condjump_in_parallel_p (insn))
3174 && !ANY_RETURN_P (target_label = JUMP_LABEL (insn)))
3175 {
3176 target_label
3177 = skip_consecutive_labels (follow_jumps (target_label, insn,
3178 &crossing));
3179 if (ANY_RETURN_P (target_label))
3180 target_label = find_end_label (target_label);
3181
3182 if (target_label && next_active_insn (target_label) == next
3183 && ! condjump_in_parallel_p (insn))
3184 {
3185 delete_jump (insn);
3186 continue;
3187 }
3188
3189 if (target_label && target_label != JUMP_LABEL (insn))
3190 {
3191 reorg_redirect_jump (insn, target_label);
3192 if (crossing)
3193 set_unique_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX);
3194 }
3195
3196 /* See if this jump conditionally branches around an unconditional
3197 jump. If so, invert this jump and point it to the target of the
3198 second jump. */
3199 if (next && simplejump_or_return_p (next)
3200 && any_condjump_p (insn)
3201 && target_label
3202 && next_active_insn (target_label) == next_active_insn (next)
3203 && no_labels_between_p (insn, next))
3204 {
3205 rtx label = JUMP_LABEL (next);
3206
3207 /* Be careful how we do this to avoid deleting code or
3208 labels that are momentarily dead. See similar optimization
3209 in jump.c.
3210
3211 We also need to ensure we properly handle the case when
3212 invert_jump fails. */
3213
3214 ++LABEL_NUSES (target_label);
3215 if (!ANY_RETURN_P (label))
3216 ++LABEL_NUSES (label);
3217
3218 if (invert_jump (insn, label, 1))
3219 {
3220 delete_related_insns (next);
3221 next = insn;
3222 }
3223
3224 if (!ANY_RETURN_P (label))
3225 --LABEL_NUSES (label);
3226
3227 if (--LABEL_NUSES (target_label) == 0)
3228 delete_related_insns (target_label);
3229
3230 continue;
3231 }
3232 }
3233
3234 /* If this is an unconditional jump and the previous insn is a
3235 conditional jump, try reversing the condition of the previous
3236 insn and swapping our targets. The next pass might be able to
3237 fill the slots.
3238
3239 Don't do this if we expect the conditional branch to be true, because
3240 we would then be making the more common case longer. */
3241
3242 if (simplejump_or_return_p (insn)
3243 && (other = prev_active_insn (insn)) != 0
3244 && any_condjump_p (other)
3245 && no_labels_between_p (other, insn)
3246 && 0 > mostly_true_jump (other))
3247 {
3248 rtx other_target = JUMP_LABEL (other);
3249 target_label = JUMP_LABEL (insn);
3250
3251 if (invert_jump (other, target_label, 0))
3252 reorg_redirect_jump (insn, other_target);
3253 }
3254
3255 /* Now look only at cases where we have a filled delay slot. */
3256 if (!NONJUMP_INSN_P (insn) || GET_CODE (PATTERN (insn)) != SEQUENCE)
3257 continue;
3258
3259 pat = PATTERN (insn);
3260 delay_insn = XVECEXP (pat, 0, 0);
3261
3262 /* See if the first insn in the delay slot is redundant with some
3263 previous insn. Remove it from the delay slot if so; then set up
3264 to reprocess this insn. */
3265 if (redundant_insn (XVECEXP (pat, 0, 1), delay_insn, 0))
3266 {
3267 delete_from_delay_slot (XVECEXP (pat, 0, 1));
3268 next = prev_active_insn (next);
3269 continue;
3270 }
3271
3272 /* See if we have a RETURN insn with a filled delay slot followed
3273 by a RETURN insn with an unfilled a delay slot. If so, we can delete
3274 the first RETURN (but not its delay insn). This gives the same
3275 effect in fewer instructions.
3276
3277 Only do so if optimizing for size since this results in slower, but
3278 smaller code. */
3279 if (optimize_function_for_size_p (cfun)
3280 && ANY_RETURN_P (PATTERN (delay_insn))
3281 && next
3282 && JUMP_P (next)
3283 && PATTERN (next) == PATTERN (delay_insn))
3284 {
3285 rtx after;
3286 int i;
3287
3288 /* Delete the RETURN and just execute the delay list insns.
3289
3290 We do this by deleting the INSN containing the SEQUENCE, then
3291 re-emitting the insns separately, and then deleting the RETURN.
3292 This allows the count of the jump target to be properly
3293 decremented.
3294
3295 Note that we need to change the INSN_UID of the re-emitted insns
3296 since it is used to hash the insns for mark_target_live_regs and
3297 the re-emitted insns will no longer be wrapped up in a SEQUENCE.
3298
3299 Clear the from target bit, since these insns are no longer
3300 in delay slots. */
3301 for (i = 0; i < XVECLEN (pat, 0); i++)
3302 INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
3303
3304 trial = PREV_INSN (insn);
3305 delete_related_insns (insn);
3306 gcc_assert (GET_CODE (pat) == SEQUENCE);
3307 add_insn_after (delay_insn, trial, NULL);
3308 after = delay_insn;
3309 for (i = 1; i < XVECLEN (pat, 0); i++)
3310 after = emit_copy_of_insn_after (XVECEXP (pat, 0, i), after);
3311 delete_scheduled_jump (delay_insn);
3312 continue;
3313 }
3314
3315 /* Now look only at the cases where we have a filled JUMP_INSN. */
3316 if (!JUMP_P (delay_insn)
3317 || !(condjump_p (delay_insn) || condjump_in_parallel_p (delay_insn)))
3318 continue;
3319
3320 target_label = JUMP_LABEL (delay_insn);
3321 if (target_label && ANY_RETURN_P (target_label))
3322 continue;
3323
3324 /* If this jump goes to another unconditional jump, thread it, but
3325 don't convert a jump into a RETURN here. */
3326 trial = skip_consecutive_labels (follow_jumps (target_label, delay_insn,
3327 &crossing));
3328 if (ANY_RETURN_P (trial))
3329 trial = find_end_label (trial);
3330
3331 if (trial && trial != target_label
3332 && redirect_with_delay_slots_safe_p (delay_insn, trial, insn))
3333 {
3334 reorg_redirect_jump (delay_insn, trial);
3335 target_label = trial;
3336 if (crossing)
3337 set_unique_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX);
3338 }
3339
3340 /* If the first insn at TARGET_LABEL is redundant with a previous
3341 insn, redirect the jump to the following insn and process again.
3342 We use next_real_insn instead of next_active_insn so we
3343 don't skip USE-markers, or we'll end up with incorrect
3344 liveness info. */
3345 trial = next_real_insn (target_label);
3346 if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
3347 && redundant_insn (trial, insn, 0)
3348 && ! can_throw_internal (trial))
3349 {
3350 /* Figure out where to emit the special USE insn so we don't
3351 later incorrectly compute register live/death info. */
3352 rtx tmp = next_active_insn (trial);
3353 if (tmp == 0)
3354 tmp = find_end_label (simple_return_rtx);
3355
3356 if (tmp)
3357 {
3358 /* Insert the special USE insn and update dataflow info. */
3359 update_block (trial, tmp);
3360
3361 /* Now emit a label before the special USE insn, and
3362 redirect our jump to the new label. */
3363 target_label = get_label_before (PREV_INSN (tmp));
3364 reorg_redirect_jump (delay_insn, target_label);
3365 next = insn;
3366 continue;
3367 }
3368 }
3369
3370 /* Similarly, if it is an unconditional jump with one insn in its
3371 delay list and that insn is redundant, thread the jump. */
3372 if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE
3373 && XVECLEN (PATTERN (trial), 0) == 2
3374 && JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
3375 && simplejump_or_return_p (XVECEXP (PATTERN (trial), 0, 0))
3376 && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
3377 {
3378 target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0));
3379 if (ANY_RETURN_P (target_label))
3380 target_label = find_end_label (target_label);
3381
3382 if (target_label
3383 && redirect_with_delay_slots_safe_p (delay_insn, target_label,
3384 insn))
3385 {
3386 reorg_redirect_jump (delay_insn, target_label);
3387 next = insn;
3388 continue;
3389 }
3390 }
3391
3392 /* See if we have a simple (conditional) jump that is useless. */
3393 if (! INSN_ANNULLED_BRANCH_P (delay_insn)
3394 && ! condjump_in_parallel_p (delay_insn)
3395 && prev_active_insn (target_label) == insn
3396 && ! BARRIER_P (prev_nonnote_insn (target_label))
3397 #ifdef HAVE_cc0
3398 /* If the last insn in the delay slot sets CC0 for some insn,
3399 various code assumes that it is in a delay slot. We could
3400 put it back where it belonged and delete the register notes,
3401 but it doesn't seem worthwhile in this uncommon case. */
3402 && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1),
3403 REG_CC_USER, NULL_RTX)
3404 #endif
3405 )
3406 {
3407 rtx after;
3408 int i;
3409
3410 /* All this insn does is execute its delay list and jump to the
3411 following insn. So delete the jump and just execute the delay
3412 list insns.
3413
3414 We do this by deleting the INSN containing the SEQUENCE, then
3415 re-emitting the insns separately, and then deleting the jump.
3416 This allows the count of the jump target to be properly
3417 decremented.
3418
3419 Note that we need to change the INSN_UID of the re-emitted insns
3420 since it is used to hash the insns for mark_target_live_regs and
3421 the re-emitted insns will no longer be wrapped up in a SEQUENCE.
3422
3423 Clear the from target bit, since these insns are no longer
3424 in delay slots. */
3425 for (i = 0; i < XVECLEN (pat, 0); i++)
3426 INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
3427
3428 trial = PREV_INSN (insn);
3429 delete_related_insns (insn);
3430 gcc_assert (GET_CODE (pat) == SEQUENCE);
3431 add_insn_after (delay_insn, trial, NULL);
3432 after = delay_insn;
3433 for (i = 1; i < XVECLEN (pat, 0); i++)
3434 after = emit_copy_of_insn_after (XVECEXP (pat, 0, i), after);
3435 delete_scheduled_jump (delay_insn);
3436 continue;
3437 }
3438
3439 /* See if this is an unconditional jump around a single insn which is
3440 identical to the one in its delay slot. In this case, we can just
3441 delete the branch and the insn in its delay slot. */
3442 if (next && NONJUMP_INSN_P (next)
3443 && label_before_next_insn (next, insn) == target_label
3444 && simplejump_p (insn)
3445 && XVECLEN (pat, 0) == 2
3446 && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1))))
3447 {
3448 delete_related_insns (insn);
3449 continue;
3450 }
3451
3452 /* See if this jump (with its delay slots) conditionally branches
3453 around an unconditional jump (without delay slots). If so, invert
3454 this jump and point it to the target of the second jump. We cannot
3455 do this for annulled jumps, though. Again, don't convert a jump to
3456 a RETURN here. */
3457 if (! INSN_ANNULLED_BRANCH_P (delay_insn)
3458 && any_condjump_p (delay_insn)
3459 && next && simplejump_or_return_p (next)
3460 && next_active_insn (target_label) == next_active_insn (next)
3461 && no_labels_between_p (insn, next))
3462 {
3463 rtx label = JUMP_LABEL (next);
3464 rtx old_label = JUMP_LABEL (delay_insn);
3465
3466 if (ANY_RETURN_P (label))
3467 label = find_end_label (label);
3468
3469 /* find_end_label can generate a new label. Check this first. */
3470 if (label
3471 && no_labels_between_p (insn, next)
3472 && redirect_with_delay_slots_safe_p (delay_insn, label, insn))
3473 {
3474 /* Be careful how we do this to avoid deleting code or labels
3475 that are momentarily dead. See similar optimization in
3476 jump.c */
3477 if (old_label)
3478 ++LABEL_NUSES (old_label);
3479
3480 if (invert_jump (delay_insn, label, 1))
3481 {
3482 int i;
3483
3484 /* Must update the INSN_FROM_TARGET_P bits now that
3485 the branch is reversed, so that mark_target_live_regs
3486 will handle the delay slot insn correctly. */
3487 for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
3488 {
3489 rtx slot = XVECEXP (PATTERN (insn), 0, i);
3490 INSN_FROM_TARGET_P (slot) = ! INSN_FROM_TARGET_P (slot);
3491 }
3492
3493 delete_related_insns (next);
3494 next = insn;
3495 }
3496
3497 if (old_label && --LABEL_NUSES (old_label) == 0)
3498 delete_related_insns (old_label);
3499 continue;
3500 }
3501 }
3502
3503 /* If we own the thread opposite the way this insn branches, see if we
3504 can merge its delay slots with following insns. */
3505 if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
3506 && own_thread_p (NEXT_INSN (insn), 0, 1))
3507 try_merge_delay_insns (insn, next);
3508 else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
3509 && own_thread_p (target_label, target_label, 0))
3510 try_merge_delay_insns (insn, next_active_insn (target_label));
3511
3512 /* If we get here, we haven't deleted INSN. But we may have deleted
3513 NEXT, so recompute it. */
3514 next = next_active_insn (insn);
3515 }
3516 }
3517 \f
3518
3519 /* Look for filled jumps to the end of function label. We can try to convert
3520 them into RETURN insns if the insns in the delay slot are valid for the
3521 RETURN as well. */
3522
3523 static void
3524 make_return_insns (rtx first)
3525 {
3526 rtx insn, jump_insn, pat;
3527 rtx real_return_label = function_return_label;
3528 rtx real_simple_return_label = function_simple_return_label;
3529 int slots, i;
3530
3531 /* See if there is a RETURN insn in the function other than the one we
3532 made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change
3533 into a RETURN to jump to it. */
3534 for (insn = first; insn; insn = NEXT_INSN (insn))
3535 if (JUMP_P (insn) && ANY_RETURN_P (PATTERN (insn)))
3536 {
3537 rtx t = get_label_before (insn);
3538 if (PATTERN (insn) == ret_rtx)
3539 real_return_label = t;
3540 else
3541 real_simple_return_label = t;
3542 break;
3543 }
3544
3545 /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it
3546 was equal to END_OF_FUNCTION_LABEL. */
3547 if (real_return_label)
3548 LABEL_NUSES (real_return_label)++;
3549 if (real_simple_return_label)
3550 LABEL_NUSES (real_simple_return_label)++;
3551
3552 /* Clear the list of insns to fill so we can use it. */
3553 obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
3554
3555 for (insn = first; insn; insn = NEXT_INSN (insn))
3556 {
3557 int flags;
3558 rtx kind, real_label;
3559
3560 /* Only look at filled JUMP_INSNs that go to the end of function
3561 label. */
3562 if (!NONJUMP_INSN_P (insn)
3563 || GET_CODE (PATTERN (insn)) != SEQUENCE
3564 || !jump_to_label_p (XVECEXP (PATTERN (insn), 0, 0)))
3565 continue;
3566
3567 if (JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) == function_return_label)
3568 {
3569 kind = ret_rtx;
3570 real_label = real_return_label;
3571 }
3572 else if (JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0))
3573 == function_simple_return_label)
3574 {
3575 kind = simple_return_rtx;
3576 real_label = real_simple_return_label;
3577 }
3578 else
3579 continue;
3580
3581 pat = PATTERN (insn);
3582 jump_insn = XVECEXP (pat, 0, 0);
3583
3584 /* If we can't make the jump into a RETURN, try to redirect it to the best
3585 RETURN and go on to the next insn. */
3586 if (!reorg_redirect_jump (jump_insn, kind))
3587 {
3588 /* Make sure redirecting the jump will not invalidate the delay
3589 slot insns. */
3590 if (redirect_with_delay_slots_safe_p (jump_insn, real_label, insn))
3591 reorg_redirect_jump (jump_insn, real_label);
3592 continue;
3593 }
3594
3595 /* See if this RETURN can accept the insns current in its delay slot.
3596 It can if it has more or an equal number of slots and the contents
3597 of each is valid. */
3598
3599 flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn));
3600 slots = num_delay_slots (jump_insn);
3601 if (slots >= XVECLEN (pat, 0) - 1)
3602 {
3603 for (i = 1; i < XVECLEN (pat, 0); i++)
3604 if (! (
3605 #ifdef ANNUL_IFFALSE_SLOTS
3606 (INSN_ANNULLED_BRANCH_P (jump_insn)
3607 && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
3608 ? eligible_for_annul_false (jump_insn, i - 1,
3609 XVECEXP (pat, 0, i), flags) :
3610 #endif
3611 #ifdef ANNUL_IFTRUE_SLOTS
3612 (INSN_ANNULLED_BRANCH_P (jump_insn)
3613 && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
3614 ? eligible_for_annul_true (jump_insn, i - 1,
3615 XVECEXP (pat, 0, i), flags) :
3616 #endif
3617 eligible_for_delay (jump_insn, i - 1,
3618 XVECEXP (pat, 0, i), flags)))
3619 break;
3620 }
3621 else
3622 i = 0;
3623
3624 if (i == XVECLEN (pat, 0))
3625 continue;
3626
3627 /* We have to do something with this insn. If it is an unconditional
3628 RETURN, delete the SEQUENCE and output the individual insns,
3629 followed by the RETURN. Then set things up so we try to find
3630 insns for its delay slots, if it needs some. */
3631 if (ANY_RETURN_P (PATTERN (jump_insn)))
3632 {
3633 rtx prev = PREV_INSN (insn);
3634
3635 delete_related_insns (insn);
3636 for (i = 1; i < XVECLEN (pat, 0); i++)
3637 prev = emit_insn_after (PATTERN (XVECEXP (pat, 0, i)), prev);
3638
3639 insn = emit_jump_insn_after (PATTERN (jump_insn), prev);
3640 emit_barrier_after (insn);
3641
3642 if (slots)
3643 obstack_ptr_grow (&unfilled_slots_obstack, insn);
3644 }
3645 else
3646 /* It is probably more efficient to keep this with its current
3647 delay slot as a branch to a RETURN. */
3648 reorg_redirect_jump (jump_insn, real_label);
3649 }
3650
3651 /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any
3652 new delay slots we have created. */
3653 if (real_return_label != NULL_RTX && --LABEL_NUSES (real_return_label) == 0)
3654 delete_related_insns (real_return_label);
3655 if (real_simple_return_label != NULL_RTX
3656 && --LABEL_NUSES (real_simple_return_label) == 0)
3657 delete_related_insns (real_simple_return_label);
3658
3659 fill_simple_delay_slots (1);
3660 fill_simple_delay_slots (0);
3661 }
3662 \f
3663 /* Try to find insns to place in delay slots. */
3664
3665 void
3666 dbr_schedule (rtx first)
3667 {
3668 rtx insn, next, epilogue_insn = 0;
3669 int i;
3670 bool need_return_insns;
3671
3672 /* If the current function has no insns other than the prologue and
3673 epilogue, then do not try to fill any delay slots. */
3674 if (n_basic_blocks == NUM_FIXED_BLOCKS)
3675 return;
3676
3677 /* Find the highest INSN_UID and allocate and initialize our map from
3678 INSN_UID's to position in code. */
3679 for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn))
3680 {
3681 if (INSN_UID (insn) > max_uid)
3682 max_uid = INSN_UID (insn);
3683 if (NOTE_P (insn)
3684 && NOTE_KIND (insn) == NOTE_INSN_EPILOGUE_BEG)
3685 epilogue_insn = insn;
3686 }
3687
3688 uid_to_ruid = XNEWVEC (int, max_uid + 1);
3689 for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn))
3690 uid_to_ruid[INSN_UID (insn)] = i;
3691
3692 /* Initialize the list of insns that need filling. */
3693 if (unfilled_firstobj == 0)
3694 {
3695 gcc_obstack_init (&unfilled_slots_obstack);
3696 unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
3697 }
3698
3699 for (insn = next_active_insn (first); insn; insn = next_active_insn (insn))
3700 {
3701 rtx target;
3702
3703 if (JUMP_P (insn))
3704 INSN_ANNULLED_BRANCH_P (insn) = 0;
3705 INSN_FROM_TARGET_P (insn) = 0;
3706
3707 /* Skip vector tables. We can't get attributes for them. */
3708 if (JUMP_TABLE_DATA_P (insn))
3709 continue;
3710
3711 if (num_delay_slots (insn) > 0)
3712 obstack_ptr_grow (&unfilled_slots_obstack, insn);
3713
3714 /* Ensure all jumps go to the last of a set of consecutive labels. */
3715 if (JUMP_P (insn)
3716 && (condjump_p (insn) || condjump_in_parallel_p (insn))
3717 && !ANY_RETURN_P (JUMP_LABEL (insn))
3718 && ((target = skip_consecutive_labels (JUMP_LABEL (insn)))
3719 != JUMP_LABEL (insn)))
3720 redirect_jump (insn, target, 1);
3721 }
3722
3723 init_resource_info (epilogue_insn);
3724
3725 /* Show we haven't computed an end-of-function label yet. */
3726 function_return_label = function_simple_return_label = NULL_RTX;
3727
3728 /* Initialize the statistics for this function. */
3729 memset (num_insns_needing_delays, 0, sizeof num_insns_needing_delays);
3730 memset (num_filled_delays, 0, sizeof num_filled_delays);
3731
3732 /* Now do the delay slot filling. Try everything twice in case earlier
3733 changes make more slots fillable. */
3734
3735 for (reorg_pass_number = 0;
3736 reorg_pass_number < MAX_REORG_PASSES;
3737 reorg_pass_number++)
3738 {
3739 fill_simple_delay_slots (1);
3740 fill_simple_delay_slots (0);
3741 fill_eager_delay_slots ();
3742 relax_delay_slots (first);
3743 }
3744
3745 /* If we made an end of function label, indicate that it is now
3746 safe to delete it by undoing our prior adjustment to LABEL_NUSES.
3747 If it is now unused, delete it. */
3748 if (function_return_label && --LABEL_NUSES (function_return_label) == 0)
3749 delete_related_insns (function_return_label);
3750 if (function_simple_return_label
3751 && --LABEL_NUSES (function_simple_return_label) == 0)
3752 delete_related_insns (function_simple_return_label);
3753
3754 need_return_insns = false;
3755 #ifdef HAVE_return
3756 need_return_insns |= HAVE_return && function_return_label != 0;
3757 #endif
3758 #ifdef HAVE_simple_return
3759 need_return_insns |= HAVE_simple_return && function_simple_return_label != 0;
3760 #endif
3761 if (need_return_insns)
3762 make_return_insns (first);
3763
3764 /* Delete any USE insns made by update_block; subsequent passes don't need
3765 them or know how to deal with them. */
3766 for (insn = first; insn; insn = next)
3767 {
3768 next = NEXT_INSN (insn);
3769
3770 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE
3771 && INSN_P (XEXP (PATTERN (insn), 0)))
3772 next = delete_related_insns (insn);
3773 }
3774
3775 obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
3776
3777 /* It is not clear why the line below is needed, but it does seem to be. */
3778 unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
3779
3780 if (dump_file)
3781 {
3782 int i, j, need_comma;
3783 int total_delay_slots[MAX_DELAY_HISTOGRAM + 1];
3784 int total_annul_slots[MAX_DELAY_HISTOGRAM + 1];
3785
3786 for (reorg_pass_number = 0;
3787 reorg_pass_number < MAX_REORG_PASSES;
3788 reorg_pass_number++)
3789 {
3790 fprintf (dump_file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
3791 for (i = 0; i < NUM_REORG_FUNCTIONS; i++)
3792 {
3793 need_comma = 0;
3794 fprintf (dump_file, ";; Reorg function #%d\n", i);
3795
3796 fprintf (dump_file, ";; %d insns needing delay slots\n;; ",
3797 num_insns_needing_delays[i][reorg_pass_number]);
3798
3799 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3800 if (num_filled_delays[i][j][reorg_pass_number])
3801 {
3802 if (need_comma)
3803 fprintf (dump_file, ", ");
3804 need_comma = 1;
3805 fprintf (dump_file, "%d got %d delays",
3806 num_filled_delays[i][j][reorg_pass_number], j);
3807 }
3808 fprintf (dump_file, "\n");
3809 }
3810 }
3811 memset (total_delay_slots, 0, sizeof total_delay_slots);
3812 memset (total_annul_slots, 0, sizeof total_annul_slots);
3813 for (insn = first; insn; insn = NEXT_INSN (insn))
3814 {
3815 if (! INSN_DELETED_P (insn)
3816 && NONJUMP_INSN_P (insn)
3817 && GET_CODE (PATTERN (insn)) != USE
3818 && GET_CODE (PATTERN (insn)) != CLOBBER)
3819 {
3820 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
3821 {
3822 rtx control;
3823 j = XVECLEN (PATTERN (insn), 0) - 1;
3824 if (j > MAX_DELAY_HISTOGRAM)
3825 j = MAX_DELAY_HISTOGRAM;
3826 control = XVECEXP (PATTERN (insn), 0, 0);
3827 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
3828 total_annul_slots[j]++;
3829 else
3830 total_delay_slots[j]++;
3831 }
3832 else if (num_delay_slots (insn) > 0)
3833 total_delay_slots[0]++;
3834 }
3835 }
3836 fprintf (dump_file, ";; Reorg totals: ");
3837 need_comma = 0;
3838 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3839 {
3840 if (total_delay_slots[j])
3841 {
3842 if (need_comma)
3843 fprintf (dump_file, ", ");
3844 need_comma = 1;
3845 fprintf (dump_file, "%d got %d delays", total_delay_slots[j], j);
3846 }
3847 }
3848 fprintf (dump_file, "\n");
3849 #if defined (ANNUL_IFTRUE_SLOTS) || defined (ANNUL_IFFALSE_SLOTS)
3850 fprintf (dump_file, ";; Reorg annuls: ");
3851 need_comma = 0;
3852 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3853 {
3854 if (total_annul_slots[j])
3855 {
3856 if (need_comma)
3857 fprintf (dump_file, ", ");
3858 need_comma = 1;
3859 fprintf (dump_file, "%d got %d delays", total_annul_slots[j], j);
3860 }
3861 }
3862 fprintf (dump_file, "\n");
3863 #endif
3864 fprintf (dump_file, "\n");
3865 }
3866
3867 free_resource_info ();
3868 free (uid_to_ruid);
3869 crtl->dbr_scheduled_p = true;
3870 }
3871 #endif /* DELAY_SLOTS */
3872 \f
3873 static bool
3874 gate_handle_delay_slots (void)
3875 {
3876 #ifdef DELAY_SLOTS
3877 /* At -O0 dataflow info isn't updated after RA. */
3878 return optimize > 0 && flag_delayed_branch && !crtl->dbr_scheduled_p;
3879 #else
3880 return 0;
3881 #endif
3882 }
3883
3884 /* Run delay slot optimization. */
3885 static unsigned int
3886 rest_of_handle_delay_slots (void)
3887 {
3888 #ifdef DELAY_SLOTS
3889 dbr_schedule (get_insns ());
3890 #endif
3891 return 0;
3892 }
3893
3894 struct rtl_opt_pass pass_delay_slots =
3895 {
3896 {
3897 RTL_PASS,
3898 "dbr", /* name */
3899 OPTGROUP_NONE, /* optinfo_flags */
3900 gate_handle_delay_slots, /* gate */
3901 rest_of_handle_delay_slots, /* execute */
3902 NULL, /* sub */
3903 NULL, /* next */
3904 0, /* static_pass_number */
3905 TV_DBR_SCHED, /* tv_id */
3906 0, /* properties_required */
3907 0, /* properties_provided */
3908 0, /* properties_destroyed */
3909 0, /* todo_flags_start */
3910 TODO_ggc_collect /* todo_flags_finish */
3911 }
3912 };
3913
3914 /* Machine dependent reorg pass. */
3915 static bool
3916 gate_handle_machine_reorg (void)
3917 {
3918 return targetm.machine_dependent_reorg != 0;
3919 }
3920
3921
3922 static unsigned int
3923 rest_of_handle_machine_reorg (void)
3924 {
3925 targetm.machine_dependent_reorg ();
3926 return 0;
3927 }
3928
3929 struct rtl_opt_pass pass_machine_reorg =
3930 {
3931 {
3932 RTL_PASS,
3933 "mach", /* name */
3934 OPTGROUP_NONE, /* optinfo_flags */
3935 gate_handle_machine_reorg, /* gate */
3936 rest_of_handle_machine_reorg, /* execute */
3937 NULL, /* sub */
3938 NULL, /* next */
3939 0, /* static_pass_number */
3940 TV_MACH_DEP, /* tv_id */
3941 0, /* properties_required */
3942 0, /* properties_provided */
3943 0, /* properties_destroyed */
3944 0, /* todo_flags_start */
3945 TODO_ggc_collect /* todo_flags_finish */
3946 }
3947 };