exp_attr.adb, [...]: Minor reformatting.
[gcc.git] / gcc / ifcvt.c
1 /* If-conversion support.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "cfghooks.h"
28 #include "df.h"
29 #include "tm_p.h"
30 #include "expmed.h"
31 #include "optabs.h"
32 #include "regs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35
36 #include "cfgrtl.h"
37 #include "cfganal.h"
38 #include "cfgcleanup.h"
39 #include "expr.h"
40 #include "output.h"
41 #include "cfgloop.h"
42 #include "tree-pass.h"
43 #include "dbgcnt.h"
44 #include "shrink-wrap.h"
45 #include "rtl-iter.h"
46 #include "ifcvt.h"
47
48 #ifndef MAX_CONDITIONAL_EXECUTE
49 #define MAX_CONDITIONAL_EXECUTE \
50 (BRANCH_COST (optimize_function_for_speed_p (cfun), false) \
51 + 1)
52 #endif
53
54 #define IFCVT_MULTIPLE_DUMPS 1
55
56 #define NULL_BLOCK ((basic_block) NULL)
57
58 /* True if after combine pass. */
59 static bool ifcvt_after_combine;
60
61 /* True if the target has the cbranchcc4 optab. */
62 static bool have_cbranchcc4;
63
64 /* # of IF-THEN or IF-THEN-ELSE blocks we looked at */
65 static int num_possible_if_blocks;
66
67 /* # of IF-THEN or IF-THEN-ELSE blocks were converted to conditional
68 execution. */
69 static int num_updated_if_blocks;
70
71 /* # of changes made. */
72 static int num_true_changes;
73
74 /* Whether conditional execution changes were made. */
75 static int cond_exec_changed_p;
76
77 /* Forward references. */
78 static int count_bb_insns (const_basic_block);
79 static bool cheap_bb_rtx_cost_p (const_basic_block, int, int);
80 static rtx_insn *first_active_insn (basic_block);
81 static rtx_insn *last_active_insn (basic_block, int);
82 static rtx_insn *find_active_insn_before (basic_block, rtx_insn *);
83 static rtx_insn *find_active_insn_after (basic_block, rtx_insn *);
84 static basic_block block_fallthru (basic_block);
85 static int cond_exec_process_insns (ce_if_block *, rtx_insn *, rtx, rtx, int,
86 int);
87 static rtx cond_exec_get_condition (rtx_insn *);
88 static rtx noce_get_condition (rtx_insn *, rtx_insn **, bool);
89 static int noce_operand_ok (const_rtx);
90 static void merge_if_block (ce_if_block *);
91 static int find_cond_trap (basic_block, edge, edge);
92 static basic_block find_if_header (basic_block, int);
93 static int block_jumps_and_fallthru_p (basic_block, basic_block);
94 static int noce_find_if_block (basic_block, edge, edge, int);
95 static int cond_exec_find_if_block (ce_if_block *);
96 static int find_if_case_1 (basic_block, edge, edge);
97 static int find_if_case_2 (basic_block, edge, edge);
98 static int dead_or_predicable (basic_block, basic_block, basic_block,
99 edge, int);
100 static void noce_emit_move_insn (rtx, rtx);
101 static rtx_insn *block_has_only_trap (basic_block);
102 \f
103 /* Count the number of non-jump active insns in BB. */
104
105 static int
106 count_bb_insns (const_basic_block bb)
107 {
108 int count = 0;
109 rtx_insn *insn = BB_HEAD (bb);
110
111 while (1)
112 {
113 if (active_insn_p (insn) && !JUMP_P (insn))
114 count++;
115
116 if (insn == BB_END (bb))
117 break;
118 insn = NEXT_INSN (insn);
119 }
120
121 return count;
122 }
123
124 /* Determine whether the total insn_rtx_cost on non-jump insns in
125 basic block BB is less than MAX_COST. This function returns
126 false if the cost of any instruction could not be estimated.
127
128 The cost of the non-jump insns in BB is scaled by REG_BR_PROB_BASE
129 as those insns are being speculated. MAX_COST is scaled with SCALE
130 plus a small fudge factor. */
131
132 static bool
133 cheap_bb_rtx_cost_p (const_basic_block bb, int scale, int max_cost)
134 {
135 int count = 0;
136 rtx_insn *insn = BB_HEAD (bb);
137 bool speed = optimize_bb_for_speed_p (bb);
138
139 /* Set scale to REG_BR_PROB_BASE to void the identical scaling
140 applied to insn_rtx_cost when optimizing for size. Only do
141 this after combine because if-conversion might interfere with
142 passes before combine.
143
144 Use optimize_function_for_speed_p instead of the pre-defined
145 variable speed to make sure it is set to same value for all
146 basic blocks in one if-conversion transformation. */
147 if (!optimize_function_for_speed_p (cfun) && ifcvt_after_combine)
148 scale = REG_BR_PROB_BASE;
149 /* Our branch probability/scaling factors are just estimates and don't
150 account for cases where we can get speculation for free and other
151 secondary benefits. So we fudge the scale factor to make speculating
152 appear a little more profitable when optimizing for performance. */
153 else
154 scale += REG_BR_PROB_BASE / 8;
155
156
157 max_cost *= scale;
158
159 while (1)
160 {
161 if (NONJUMP_INSN_P (insn))
162 {
163 int cost = insn_rtx_cost (PATTERN (insn), speed) * REG_BR_PROB_BASE;
164 if (cost == 0)
165 return false;
166
167 /* If this instruction is the load or set of a "stack" register,
168 such as a floating point register on x87, then the cost of
169 speculatively executing this insn may need to include
170 the additional cost of popping its result off of the
171 register stack. Unfortunately, correctly recognizing and
172 accounting for this additional overhead is tricky, so for
173 now we simply prohibit such speculative execution. */
174 #ifdef STACK_REGS
175 {
176 rtx set = single_set (insn);
177 if (set && STACK_REG_P (SET_DEST (set)))
178 return false;
179 }
180 #endif
181
182 count += cost;
183 if (count >= max_cost)
184 return false;
185 }
186 else if (CALL_P (insn))
187 return false;
188
189 if (insn == BB_END (bb))
190 break;
191 insn = NEXT_INSN (insn);
192 }
193
194 return true;
195 }
196
197 /* Return the first non-jump active insn in the basic block. */
198
199 static rtx_insn *
200 first_active_insn (basic_block bb)
201 {
202 rtx_insn *insn = BB_HEAD (bb);
203
204 if (LABEL_P (insn))
205 {
206 if (insn == BB_END (bb))
207 return NULL;
208 insn = NEXT_INSN (insn);
209 }
210
211 while (NOTE_P (insn) || DEBUG_INSN_P (insn))
212 {
213 if (insn == BB_END (bb))
214 return NULL;
215 insn = NEXT_INSN (insn);
216 }
217
218 if (JUMP_P (insn))
219 return NULL;
220
221 return insn;
222 }
223
224 /* Return the last non-jump active (non-jump) insn in the basic block. */
225
226 static rtx_insn *
227 last_active_insn (basic_block bb, int skip_use_p)
228 {
229 rtx_insn *insn = BB_END (bb);
230 rtx_insn *head = BB_HEAD (bb);
231
232 while (NOTE_P (insn)
233 || JUMP_P (insn)
234 || DEBUG_INSN_P (insn)
235 || (skip_use_p
236 && NONJUMP_INSN_P (insn)
237 && GET_CODE (PATTERN (insn)) == USE))
238 {
239 if (insn == head)
240 return NULL;
241 insn = PREV_INSN (insn);
242 }
243
244 if (LABEL_P (insn))
245 return NULL;
246
247 return insn;
248 }
249
250 /* Return the active insn before INSN inside basic block CURR_BB. */
251
252 static rtx_insn *
253 find_active_insn_before (basic_block curr_bb, rtx_insn *insn)
254 {
255 if (!insn || insn == BB_HEAD (curr_bb))
256 return NULL;
257
258 while ((insn = PREV_INSN (insn)) != NULL_RTX)
259 {
260 if (NONJUMP_INSN_P (insn) || JUMP_P (insn) || CALL_P (insn))
261 break;
262
263 /* No other active insn all the way to the start of the basic block. */
264 if (insn == BB_HEAD (curr_bb))
265 return NULL;
266 }
267
268 return insn;
269 }
270
271 /* Return the active insn after INSN inside basic block CURR_BB. */
272
273 static rtx_insn *
274 find_active_insn_after (basic_block curr_bb, rtx_insn *insn)
275 {
276 if (!insn || insn == BB_END (curr_bb))
277 return NULL;
278
279 while ((insn = NEXT_INSN (insn)) != NULL_RTX)
280 {
281 if (NONJUMP_INSN_P (insn) || JUMP_P (insn) || CALL_P (insn))
282 break;
283
284 /* No other active insn all the way to the end of the basic block. */
285 if (insn == BB_END (curr_bb))
286 return NULL;
287 }
288
289 return insn;
290 }
291
292 /* Return the basic block reached by falling though the basic block BB. */
293
294 static basic_block
295 block_fallthru (basic_block bb)
296 {
297 edge e = find_fallthru_edge (bb->succs);
298
299 return (e) ? e->dest : NULL_BLOCK;
300 }
301
302 /* Return true if RTXs A and B can be safely interchanged. */
303
304 static bool
305 rtx_interchangeable_p (const_rtx a, const_rtx b)
306 {
307 if (!rtx_equal_p (a, b))
308 return false;
309
310 if (GET_CODE (a) != MEM)
311 return true;
312
313 /* A dead type-unsafe memory reference is legal, but a live type-unsafe memory
314 reference is not. Interchanging a dead type-unsafe memory reference with
315 a live type-safe one creates a live type-unsafe memory reference, in other
316 words, it makes the program illegal.
317 We check here conservatively whether the two memory references have equal
318 memory attributes. */
319
320 return mem_attrs_eq_p (get_mem_attrs (a), get_mem_attrs (b));
321 }
322
323 \f
324 /* Go through a bunch of insns, converting them to conditional
325 execution format if possible. Return TRUE if all of the non-note
326 insns were processed. */
327
328 static int
329 cond_exec_process_insns (ce_if_block *ce_info ATTRIBUTE_UNUSED,
330 /* if block information */rtx_insn *start,
331 /* first insn to look at */rtx end,
332 /* last insn to look at */rtx test,
333 /* conditional execution test */int prob_val,
334 /* probability of branch taken. */int mod_ok)
335 {
336 int must_be_last = FALSE;
337 rtx_insn *insn;
338 rtx xtest;
339 rtx pattern;
340
341 if (!start || !end)
342 return FALSE;
343
344 for (insn = start; ; insn = NEXT_INSN (insn))
345 {
346 /* dwarf2out can't cope with conditional prologues. */
347 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END)
348 return FALSE;
349
350 if (NOTE_P (insn) || DEBUG_INSN_P (insn))
351 goto insn_done;
352
353 gcc_assert (NONJUMP_INSN_P (insn) || CALL_P (insn));
354
355 /* dwarf2out can't cope with conditional unwind info. */
356 if (RTX_FRAME_RELATED_P (insn))
357 return FALSE;
358
359 /* Remove USE insns that get in the way. */
360 if (reload_completed && GET_CODE (PATTERN (insn)) == USE)
361 {
362 /* ??? Ug. Actually unlinking the thing is problematic,
363 given what we'd have to coordinate with our callers. */
364 SET_INSN_DELETED (insn);
365 goto insn_done;
366 }
367
368 /* Last insn wasn't last? */
369 if (must_be_last)
370 return FALSE;
371
372 if (modified_in_p (test, insn))
373 {
374 if (!mod_ok)
375 return FALSE;
376 must_be_last = TRUE;
377 }
378
379 /* Now build the conditional form of the instruction. */
380 pattern = PATTERN (insn);
381 xtest = copy_rtx (test);
382
383 /* If this is already a COND_EXEC, rewrite the test to be an AND of the
384 two conditions. */
385 if (GET_CODE (pattern) == COND_EXEC)
386 {
387 if (GET_MODE (xtest) != GET_MODE (COND_EXEC_TEST (pattern)))
388 return FALSE;
389
390 xtest = gen_rtx_AND (GET_MODE (xtest), xtest,
391 COND_EXEC_TEST (pattern));
392 pattern = COND_EXEC_CODE (pattern);
393 }
394
395 pattern = gen_rtx_COND_EXEC (VOIDmode, xtest, pattern);
396
397 /* If the machine needs to modify the insn being conditionally executed,
398 say for example to force a constant integer operand into a temp
399 register, do so here. */
400 #ifdef IFCVT_MODIFY_INSN
401 IFCVT_MODIFY_INSN (ce_info, pattern, insn);
402 if (! pattern)
403 return FALSE;
404 #endif
405
406 validate_change (insn, &PATTERN (insn), pattern, 1);
407
408 if (CALL_P (insn) && prob_val >= 0)
409 validate_change (insn, &REG_NOTES (insn),
410 gen_rtx_INT_LIST ((machine_mode) REG_BR_PROB,
411 prob_val, REG_NOTES (insn)), 1);
412
413 insn_done:
414 if (insn == end)
415 break;
416 }
417
418 return TRUE;
419 }
420
421 /* Return the condition for a jump. Do not do any special processing. */
422
423 static rtx
424 cond_exec_get_condition (rtx_insn *jump)
425 {
426 rtx test_if, cond;
427
428 if (any_condjump_p (jump))
429 test_if = SET_SRC (pc_set (jump));
430 else
431 return NULL_RTX;
432 cond = XEXP (test_if, 0);
433
434 /* If this branches to JUMP_LABEL when the condition is false,
435 reverse the condition. */
436 if (GET_CODE (XEXP (test_if, 2)) == LABEL_REF
437 && LABEL_REF_LABEL (XEXP (test_if, 2)) == JUMP_LABEL (jump))
438 {
439 enum rtx_code rev = reversed_comparison_code (cond, jump);
440 if (rev == UNKNOWN)
441 return NULL_RTX;
442
443 cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0),
444 XEXP (cond, 1));
445 }
446
447 return cond;
448 }
449
450 /* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it
451 to conditional execution. Return TRUE if we were successful at
452 converting the block. */
453
454 static int
455 cond_exec_process_if_block (ce_if_block * ce_info,
456 /* if block information */int do_multiple_p)
457 {
458 basic_block test_bb = ce_info->test_bb; /* last test block */
459 basic_block then_bb = ce_info->then_bb; /* THEN */
460 basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
461 rtx test_expr; /* expression in IF_THEN_ELSE that is tested */
462 rtx_insn *then_start; /* first insn in THEN block */
463 rtx_insn *then_end; /* last insn + 1 in THEN block */
464 rtx_insn *else_start = NULL; /* first insn in ELSE block or NULL */
465 rtx_insn *else_end = NULL; /* last insn + 1 in ELSE block */
466 int max; /* max # of insns to convert. */
467 int then_mod_ok; /* whether conditional mods are ok in THEN */
468 rtx true_expr; /* test for else block insns */
469 rtx false_expr; /* test for then block insns */
470 int true_prob_val; /* probability of else block */
471 int false_prob_val; /* probability of then block */
472 rtx_insn *then_last_head = NULL; /* Last match at the head of THEN */
473 rtx_insn *else_last_head = NULL; /* Last match at the head of ELSE */
474 rtx_insn *then_first_tail = NULL; /* First match at the tail of THEN */
475 rtx_insn *else_first_tail = NULL; /* First match at the tail of ELSE */
476 int then_n_insns, else_n_insns, n_insns;
477 enum rtx_code false_code;
478 rtx note;
479
480 /* If test is comprised of && or || elements, and we've failed at handling
481 all of them together, just use the last test if it is the special case of
482 && elements without an ELSE block. */
483 if (!do_multiple_p && ce_info->num_multiple_test_blocks)
484 {
485 if (else_bb || ! ce_info->and_and_p)
486 return FALSE;
487
488 ce_info->test_bb = test_bb = ce_info->last_test_bb;
489 ce_info->num_multiple_test_blocks = 0;
490 ce_info->num_and_and_blocks = 0;
491 ce_info->num_or_or_blocks = 0;
492 }
493
494 /* Find the conditional jump to the ELSE or JOIN part, and isolate
495 the test. */
496 test_expr = cond_exec_get_condition (BB_END (test_bb));
497 if (! test_expr)
498 return FALSE;
499
500 /* If the conditional jump is more than just a conditional jump,
501 then we can not do conditional execution conversion on this block. */
502 if (! onlyjump_p (BB_END (test_bb)))
503 return FALSE;
504
505 /* Collect the bounds of where we're to search, skipping any labels, jumps
506 and notes at the beginning and end of the block. Then count the total
507 number of insns and see if it is small enough to convert. */
508 then_start = first_active_insn (then_bb);
509 then_end = last_active_insn (then_bb, TRUE);
510 then_n_insns = ce_info->num_then_insns = count_bb_insns (then_bb);
511 n_insns = then_n_insns;
512 max = MAX_CONDITIONAL_EXECUTE;
513
514 if (else_bb)
515 {
516 int n_matching;
517
518 max *= 2;
519 else_start = first_active_insn (else_bb);
520 else_end = last_active_insn (else_bb, TRUE);
521 else_n_insns = ce_info->num_else_insns = count_bb_insns (else_bb);
522 n_insns += else_n_insns;
523
524 /* Look for matching sequences at the head and tail of the two blocks,
525 and limit the range of insns to be converted if possible. */
526 n_matching = flow_find_cross_jump (then_bb, else_bb,
527 &then_first_tail, &else_first_tail,
528 NULL);
529 if (then_first_tail == BB_HEAD (then_bb))
530 then_start = then_end = NULL;
531 if (else_first_tail == BB_HEAD (else_bb))
532 else_start = else_end = NULL;
533
534 if (n_matching > 0)
535 {
536 if (then_end)
537 then_end = find_active_insn_before (then_bb, then_first_tail);
538 if (else_end)
539 else_end = find_active_insn_before (else_bb, else_first_tail);
540 n_insns -= 2 * n_matching;
541 }
542
543 if (then_start
544 && else_start
545 && then_n_insns > n_matching
546 && else_n_insns > n_matching)
547 {
548 int longest_match = MIN (then_n_insns - n_matching,
549 else_n_insns - n_matching);
550 n_matching
551 = flow_find_head_matching_sequence (then_bb, else_bb,
552 &then_last_head,
553 &else_last_head,
554 longest_match);
555
556 if (n_matching > 0)
557 {
558 rtx_insn *insn;
559
560 /* We won't pass the insns in the head sequence to
561 cond_exec_process_insns, so we need to test them here
562 to make sure that they don't clobber the condition. */
563 for (insn = BB_HEAD (then_bb);
564 insn != NEXT_INSN (then_last_head);
565 insn = NEXT_INSN (insn))
566 if (!LABEL_P (insn) && !NOTE_P (insn)
567 && !DEBUG_INSN_P (insn)
568 && modified_in_p (test_expr, insn))
569 return FALSE;
570 }
571
572 if (then_last_head == then_end)
573 then_start = then_end = NULL;
574 if (else_last_head == else_end)
575 else_start = else_end = NULL;
576
577 if (n_matching > 0)
578 {
579 if (then_start)
580 then_start = find_active_insn_after (then_bb, then_last_head);
581 if (else_start)
582 else_start = find_active_insn_after (else_bb, else_last_head);
583 n_insns -= 2 * n_matching;
584 }
585 }
586 }
587
588 if (n_insns > max)
589 return FALSE;
590
591 /* Map test_expr/test_jump into the appropriate MD tests to use on
592 the conditionally executed code. */
593
594 true_expr = test_expr;
595
596 false_code = reversed_comparison_code (true_expr, BB_END (test_bb));
597 if (false_code != UNKNOWN)
598 false_expr = gen_rtx_fmt_ee (false_code, GET_MODE (true_expr),
599 XEXP (true_expr, 0), XEXP (true_expr, 1));
600 else
601 false_expr = NULL_RTX;
602
603 #ifdef IFCVT_MODIFY_TESTS
604 /* If the machine description needs to modify the tests, such as setting a
605 conditional execution register from a comparison, it can do so here. */
606 IFCVT_MODIFY_TESTS (ce_info, true_expr, false_expr);
607
608 /* See if the conversion failed. */
609 if (!true_expr || !false_expr)
610 goto fail;
611 #endif
612
613 note = find_reg_note (BB_END (test_bb), REG_BR_PROB, NULL_RTX);
614 if (note)
615 {
616 true_prob_val = XINT (note, 0);
617 false_prob_val = REG_BR_PROB_BASE - true_prob_val;
618 }
619 else
620 {
621 true_prob_val = -1;
622 false_prob_val = -1;
623 }
624
625 /* If we have && or || tests, do them here. These tests are in the adjacent
626 blocks after the first block containing the test. */
627 if (ce_info->num_multiple_test_blocks > 0)
628 {
629 basic_block bb = test_bb;
630 basic_block last_test_bb = ce_info->last_test_bb;
631
632 if (! false_expr)
633 goto fail;
634
635 do
636 {
637 rtx_insn *start, *end;
638 rtx t, f;
639 enum rtx_code f_code;
640
641 bb = block_fallthru (bb);
642 start = first_active_insn (bb);
643 end = last_active_insn (bb, TRUE);
644 if (start
645 && ! cond_exec_process_insns (ce_info, start, end, false_expr,
646 false_prob_val, FALSE))
647 goto fail;
648
649 /* If the conditional jump is more than just a conditional jump, then
650 we can not do conditional execution conversion on this block. */
651 if (! onlyjump_p (BB_END (bb)))
652 goto fail;
653
654 /* Find the conditional jump and isolate the test. */
655 t = cond_exec_get_condition (BB_END (bb));
656 if (! t)
657 goto fail;
658
659 f_code = reversed_comparison_code (t, BB_END (bb));
660 if (f_code == UNKNOWN)
661 goto fail;
662
663 f = gen_rtx_fmt_ee (f_code, GET_MODE (t), XEXP (t, 0), XEXP (t, 1));
664 if (ce_info->and_and_p)
665 {
666 t = gen_rtx_AND (GET_MODE (t), true_expr, t);
667 f = gen_rtx_IOR (GET_MODE (t), false_expr, f);
668 }
669 else
670 {
671 t = gen_rtx_IOR (GET_MODE (t), true_expr, t);
672 f = gen_rtx_AND (GET_MODE (t), false_expr, f);
673 }
674
675 /* If the machine description needs to modify the tests, such as
676 setting a conditional execution register from a comparison, it can
677 do so here. */
678 #ifdef IFCVT_MODIFY_MULTIPLE_TESTS
679 IFCVT_MODIFY_MULTIPLE_TESTS (ce_info, bb, t, f);
680
681 /* See if the conversion failed. */
682 if (!t || !f)
683 goto fail;
684 #endif
685
686 true_expr = t;
687 false_expr = f;
688 }
689 while (bb != last_test_bb);
690 }
691
692 /* For IF-THEN-ELSE blocks, we don't allow modifications of the test
693 on then THEN block. */
694 then_mod_ok = (else_bb == NULL_BLOCK);
695
696 /* Go through the THEN and ELSE blocks converting the insns if possible
697 to conditional execution. */
698
699 if (then_end
700 && (! false_expr
701 || ! cond_exec_process_insns (ce_info, then_start, then_end,
702 false_expr, false_prob_val,
703 then_mod_ok)))
704 goto fail;
705
706 if (else_bb && else_end
707 && ! cond_exec_process_insns (ce_info, else_start, else_end,
708 true_expr, true_prob_val, TRUE))
709 goto fail;
710
711 /* If we cannot apply the changes, fail. Do not go through the normal fail
712 processing, since apply_change_group will call cancel_changes. */
713 if (! apply_change_group ())
714 {
715 #ifdef IFCVT_MODIFY_CANCEL
716 /* Cancel any machine dependent changes. */
717 IFCVT_MODIFY_CANCEL (ce_info);
718 #endif
719 return FALSE;
720 }
721
722 #ifdef IFCVT_MODIFY_FINAL
723 /* Do any machine dependent final modifications. */
724 IFCVT_MODIFY_FINAL (ce_info);
725 #endif
726
727 /* Conversion succeeded. */
728 if (dump_file)
729 fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
730 n_insns, (n_insns == 1) ? " was" : "s were");
731
732 /* Merge the blocks! If we had matching sequences, make sure to delete one
733 copy at the appropriate location first: delete the copy in the THEN branch
734 for a tail sequence so that the remaining one is executed last for both
735 branches, and delete the copy in the ELSE branch for a head sequence so
736 that the remaining one is executed first for both branches. */
737 if (then_first_tail)
738 {
739 rtx_insn *from = then_first_tail;
740 if (!INSN_P (from))
741 from = find_active_insn_after (then_bb, from);
742 delete_insn_chain (from, BB_END (then_bb), false);
743 }
744 if (else_last_head)
745 delete_insn_chain (first_active_insn (else_bb), else_last_head, false);
746
747 merge_if_block (ce_info);
748 cond_exec_changed_p = TRUE;
749 return TRUE;
750
751 fail:
752 #ifdef IFCVT_MODIFY_CANCEL
753 /* Cancel any machine dependent changes. */
754 IFCVT_MODIFY_CANCEL (ce_info);
755 #endif
756
757 cancel_changes (0);
758 return FALSE;
759 }
760 \f
761 /* Used by noce_process_if_block to communicate with its subroutines.
762
763 The subroutines know that A and B may be evaluated freely. They
764 know that X is a register. They should insert new instructions
765 before cond_earliest. */
766
767 struct noce_if_info
768 {
769 /* The basic blocks that make up the IF-THEN-{ELSE-,}JOIN block. */
770 basic_block test_bb, then_bb, else_bb, join_bb;
771
772 /* The jump that ends TEST_BB. */
773 rtx_insn *jump;
774
775 /* The jump condition. */
776 rtx cond;
777
778 /* New insns should be inserted before this one. */
779 rtx_insn *cond_earliest;
780
781 /* Insns in the THEN and ELSE block. There is always just this
782 one insns in those blocks. The insns are single_set insns.
783 If there was no ELSE block, INSN_B is the last insn before
784 COND_EARLIEST, or NULL_RTX. In the former case, the insn
785 operands are still valid, as if INSN_B was moved down below
786 the jump. */
787 rtx_insn *insn_a, *insn_b;
788
789 /* The SET_SRC of INSN_A and INSN_B. */
790 rtx a, b;
791
792 /* The SET_DEST of INSN_A. */
793 rtx x;
794
795 /* True if this if block is not canonical. In the canonical form of
796 if blocks, the THEN_BB is the block reached via the fallthru edge
797 from TEST_BB. For the noce transformations, we allow the symmetric
798 form as well. */
799 bool then_else_reversed;
800
801 /* True if the contents of then_bb and else_bb are a
802 simple single set instruction. */
803 bool then_simple;
804 bool else_simple;
805
806 /* The total rtx cost of the instructions in then_bb and else_bb. */
807 unsigned int then_cost;
808 unsigned int else_cost;
809
810 /* Estimated cost of the particular branch instruction. */
811 unsigned int branch_cost;
812 };
813
814 static rtx noce_emit_store_flag (struct noce_if_info *, rtx, int, int);
815 static int noce_try_move (struct noce_if_info *);
816 static int noce_try_store_flag (struct noce_if_info *);
817 static int noce_try_addcc (struct noce_if_info *);
818 static int noce_try_store_flag_constants (struct noce_if_info *);
819 static int noce_try_store_flag_mask (struct noce_if_info *);
820 static rtx noce_emit_cmove (struct noce_if_info *, rtx, enum rtx_code, rtx,
821 rtx, rtx, rtx);
822 static int noce_try_cmove (struct noce_if_info *);
823 static int noce_try_cmove_arith (struct noce_if_info *);
824 static rtx noce_get_alt_condition (struct noce_if_info *, rtx, rtx_insn **);
825 static int noce_try_minmax (struct noce_if_info *);
826 static int noce_try_abs (struct noce_if_info *);
827 static int noce_try_sign_mask (struct noce_if_info *);
828
829 /* Helper function for noce_try_store_flag*. */
830
831 static rtx
832 noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep,
833 int normalize)
834 {
835 rtx cond = if_info->cond;
836 int cond_complex;
837 enum rtx_code code;
838
839 cond_complex = (! general_operand (XEXP (cond, 0), VOIDmode)
840 || ! general_operand (XEXP (cond, 1), VOIDmode));
841
842 /* If earliest == jump, or when the condition is complex, try to
843 build the store_flag insn directly. */
844
845 if (cond_complex)
846 {
847 rtx set = pc_set (if_info->jump);
848 cond = XEXP (SET_SRC (set), 0);
849 if (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
850 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (if_info->jump))
851 reversep = !reversep;
852 if (if_info->then_else_reversed)
853 reversep = !reversep;
854 }
855
856 if (reversep)
857 code = reversed_comparison_code (cond, if_info->jump);
858 else
859 code = GET_CODE (cond);
860
861 if ((if_info->cond_earliest == if_info->jump || cond_complex)
862 && (normalize == 0 || STORE_FLAG_VALUE == normalize))
863 {
864 rtx src = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
865 XEXP (cond, 1));
866 rtx set = gen_rtx_SET (x, src);
867
868 start_sequence ();
869 rtx_insn *insn = emit_insn (set);
870
871 if (recog_memoized (insn) >= 0)
872 {
873 rtx_insn *seq = get_insns ();
874 end_sequence ();
875 emit_insn (seq);
876
877 if_info->cond_earliest = if_info->jump;
878
879 return x;
880 }
881
882 end_sequence ();
883 }
884
885 /* Don't even try if the comparison operands or the mode of X are weird. */
886 if (cond_complex || !SCALAR_INT_MODE_P (GET_MODE (x)))
887 return NULL_RTX;
888
889 return emit_store_flag (x, code, XEXP (cond, 0),
890 XEXP (cond, 1), VOIDmode,
891 (code == LTU || code == LEU
892 || code == GEU || code == GTU), normalize);
893 }
894
895 /* Emit instruction to move an rtx, possibly into STRICT_LOW_PART.
896 X is the destination/target and Y is the value to copy. */
897
898 static void
899 noce_emit_move_insn (rtx x, rtx y)
900 {
901 machine_mode outmode;
902 rtx outer, inner;
903 int bitpos;
904
905 if (GET_CODE (x) != STRICT_LOW_PART)
906 {
907 rtx_insn *seq, *insn;
908 rtx target;
909 optab ot;
910
911 start_sequence ();
912 /* Check that the SET_SRC is reasonable before calling emit_move_insn,
913 otherwise construct a suitable SET pattern ourselves. */
914 insn = (OBJECT_P (y) || CONSTANT_P (y) || GET_CODE (y) == SUBREG)
915 ? emit_move_insn (x, y)
916 : emit_insn (gen_rtx_SET (x, y));
917 seq = get_insns ();
918 end_sequence ();
919
920 if (recog_memoized (insn) <= 0)
921 {
922 if (GET_CODE (x) == ZERO_EXTRACT)
923 {
924 rtx op = XEXP (x, 0);
925 unsigned HOST_WIDE_INT size = INTVAL (XEXP (x, 1));
926 unsigned HOST_WIDE_INT start = INTVAL (XEXP (x, 2));
927
928 /* store_bit_field expects START to be relative to
929 BYTES_BIG_ENDIAN and adjusts this value for machines with
930 BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN. In order to be able to
931 invoke store_bit_field again it is necessary to have the START
932 value from the first call. */
933 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
934 {
935 if (MEM_P (op))
936 start = BITS_PER_UNIT - start - size;
937 else
938 {
939 gcc_assert (REG_P (op));
940 start = BITS_PER_WORD - start - size;
941 }
942 }
943
944 gcc_assert (start < (MEM_P (op) ? BITS_PER_UNIT : BITS_PER_WORD));
945 store_bit_field (op, size, start, 0, 0, GET_MODE (x), y, false);
946 return;
947 }
948
949 switch (GET_RTX_CLASS (GET_CODE (y)))
950 {
951 case RTX_UNARY:
952 ot = code_to_optab (GET_CODE (y));
953 if (ot)
954 {
955 start_sequence ();
956 target = expand_unop (GET_MODE (y), ot, XEXP (y, 0), x, 0);
957 if (target != NULL_RTX)
958 {
959 if (target != x)
960 emit_move_insn (x, target);
961 seq = get_insns ();
962 }
963 end_sequence ();
964 }
965 break;
966
967 case RTX_BIN_ARITH:
968 case RTX_COMM_ARITH:
969 ot = code_to_optab (GET_CODE (y));
970 if (ot)
971 {
972 start_sequence ();
973 target = expand_binop (GET_MODE (y), ot,
974 XEXP (y, 0), XEXP (y, 1),
975 x, 0, OPTAB_DIRECT);
976 if (target != NULL_RTX)
977 {
978 if (target != x)
979 emit_move_insn (x, target);
980 seq = get_insns ();
981 }
982 end_sequence ();
983 }
984 break;
985
986 default:
987 break;
988 }
989 }
990
991 emit_insn (seq);
992 return;
993 }
994
995 outer = XEXP (x, 0);
996 inner = XEXP (outer, 0);
997 outmode = GET_MODE (outer);
998 bitpos = SUBREG_BYTE (outer) * BITS_PER_UNIT;
999 store_bit_field (inner, GET_MODE_BITSIZE (outmode), bitpos,
1000 0, 0, outmode, y, false);
1001 }
1002
1003 /* Return the CC reg if it is used in COND. */
1004
1005 static rtx
1006 cc_in_cond (rtx cond)
1007 {
1008 if (have_cbranchcc4 && cond
1009 && GET_MODE_CLASS (GET_MODE (XEXP (cond, 0))) == MODE_CC)
1010 return XEXP (cond, 0);
1011
1012 return NULL_RTX;
1013 }
1014
1015 /* Return sequence of instructions generated by if conversion. This
1016 function calls end_sequence() to end the current stream, ensures
1017 that the instructions are unshared, recognizable non-jump insns.
1018 On failure, this function returns a NULL_RTX. */
1019
1020 static rtx_insn *
1021 end_ifcvt_sequence (struct noce_if_info *if_info)
1022 {
1023 rtx_insn *insn;
1024 rtx_insn *seq = get_insns ();
1025 rtx cc = cc_in_cond (if_info->cond);
1026
1027 set_used_flags (if_info->x);
1028 set_used_flags (if_info->cond);
1029 set_used_flags (if_info->a);
1030 set_used_flags (if_info->b);
1031
1032 for (insn = seq; insn; insn = NEXT_INSN (insn))
1033 set_used_flags (insn);
1034
1035 unshare_all_rtl_in_chain (seq);
1036 end_sequence ();
1037
1038 /* Make sure that all of the instructions emitted are recognizable,
1039 and that we haven't introduced a new jump instruction.
1040 As an exercise for the reader, build a general mechanism that
1041 allows proper placement of required clobbers. */
1042 for (insn = seq; insn; insn = NEXT_INSN (insn))
1043 if (JUMP_P (insn)
1044 || recog_memoized (insn) == -1
1045 /* Make sure new generated code does not clobber CC. */
1046 || (cc && set_of (cc, insn)))
1047 return NULL;
1048
1049 return seq;
1050 }
1051
1052 /* Return true iff the then and else basic block (if it exists)
1053 consist of a single simple set instruction. */
1054
1055 static bool
1056 noce_simple_bbs (struct noce_if_info *if_info)
1057 {
1058 if (!if_info->then_simple)
1059 return false;
1060
1061 if (if_info->else_bb)
1062 return if_info->else_simple;
1063
1064 return true;
1065 }
1066
1067 /* Convert "if (a != b) x = a; else x = b" into "x = a" and
1068 "if (a == b) x = a; else x = b" into "x = b". */
1069
1070 static int
1071 noce_try_move (struct noce_if_info *if_info)
1072 {
1073 rtx cond = if_info->cond;
1074 enum rtx_code code = GET_CODE (cond);
1075 rtx y;
1076 rtx_insn *seq;
1077
1078 if (code != NE && code != EQ)
1079 return FALSE;
1080
1081 if (!noce_simple_bbs (if_info))
1082 return FALSE;
1083
1084 /* This optimization isn't valid if either A or B could be a NaN
1085 or a signed zero. */
1086 if (HONOR_NANS (if_info->x)
1087 || HONOR_SIGNED_ZEROS (if_info->x))
1088 return FALSE;
1089
1090 /* Check whether the operands of the comparison are A and in
1091 either order. */
1092 if ((rtx_equal_p (if_info->a, XEXP (cond, 0))
1093 && rtx_equal_p (if_info->b, XEXP (cond, 1)))
1094 || (rtx_equal_p (if_info->a, XEXP (cond, 1))
1095 && rtx_equal_p (if_info->b, XEXP (cond, 0))))
1096 {
1097 if (!rtx_interchangeable_p (if_info->a, if_info->b))
1098 return FALSE;
1099
1100 y = (code == EQ) ? if_info->a : if_info->b;
1101
1102 /* Avoid generating the move if the source is the destination. */
1103 if (! rtx_equal_p (if_info->x, y))
1104 {
1105 start_sequence ();
1106 noce_emit_move_insn (if_info->x, y);
1107 seq = end_ifcvt_sequence (if_info);
1108 if (!seq)
1109 return FALSE;
1110
1111 emit_insn_before_setloc (seq, if_info->jump,
1112 INSN_LOCATION (if_info->insn_a));
1113 }
1114 return TRUE;
1115 }
1116 return FALSE;
1117 }
1118
1119 /* Convert "if (test) x = 1; else x = 0".
1120
1121 Only try 0 and STORE_FLAG_VALUE here. Other combinations will be
1122 tried in noce_try_store_flag_constants after noce_try_cmove has had
1123 a go at the conversion. */
1124
1125 static int
1126 noce_try_store_flag (struct noce_if_info *if_info)
1127 {
1128 int reversep;
1129 rtx target;
1130 rtx_insn *seq;
1131
1132 if (!noce_simple_bbs (if_info))
1133 return FALSE;
1134
1135 if (CONST_INT_P (if_info->b)
1136 && INTVAL (if_info->b) == STORE_FLAG_VALUE
1137 && if_info->a == const0_rtx)
1138 reversep = 0;
1139 else if (if_info->b == const0_rtx
1140 && CONST_INT_P (if_info->a)
1141 && INTVAL (if_info->a) == STORE_FLAG_VALUE
1142 && (reversed_comparison_code (if_info->cond, if_info->jump)
1143 != UNKNOWN))
1144 reversep = 1;
1145 else
1146 return FALSE;
1147
1148 start_sequence ();
1149
1150 target = noce_emit_store_flag (if_info, if_info->x, reversep, 0);
1151 if (target)
1152 {
1153 if (target != if_info->x)
1154 noce_emit_move_insn (if_info->x, target);
1155
1156 seq = end_ifcvt_sequence (if_info);
1157 if (! seq)
1158 return FALSE;
1159
1160 emit_insn_before_setloc (seq, if_info->jump,
1161 INSN_LOCATION (if_info->insn_a));
1162 return TRUE;
1163 }
1164 else
1165 {
1166 end_sequence ();
1167 return FALSE;
1168 }
1169 }
1170
1171
1172 /* Convert "if (test) x = -A; else x = A" into
1173 x = A; if (test) x = -x if the machine can do the
1174 conditional negate form of this cheaply.
1175 Try this before noce_try_cmove that will just load the
1176 immediates into two registers and do a conditional select
1177 between them. If the target has a conditional negate or
1178 conditional invert operation we can save a potentially
1179 expensive constant synthesis. */
1180
1181 static bool
1182 noce_try_inverse_constants (struct noce_if_info *if_info)
1183 {
1184 if (!noce_simple_bbs (if_info))
1185 return false;
1186
1187 if (!CONST_INT_P (if_info->a)
1188 || !CONST_INT_P (if_info->b)
1189 || !REG_P (if_info->x))
1190 return false;
1191
1192 machine_mode mode = GET_MODE (if_info->x);
1193
1194 HOST_WIDE_INT val_a = INTVAL (if_info->a);
1195 HOST_WIDE_INT val_b = INTVAL (if_info->b);
1196
1197 rtx cond = if_info->cond;
1198
1199 rtx x = if_info->x;
1200 rtx target;
1201
1202 start_sequence ();
1203
1204 rtx_code code;
1205 if (val_b != HOST_WIDE_INT_MIN && val_a == -val_b)
1206 code = NEG;
1207 else if (val_a == ~val_b)
1208 code = NOT;
1209 else
1210 {
1211 end_sequence ();
1212 return false;
1213 }
1214
1215 rtx tmp = gen_reg_rtx (mode);
1216 noce_emit_move_insn (tmp, if_info->a);
1217
1218 target = emit_conditional_neg_or_complement (x, code, mode, cond, tmp, tmp);
1219
1220 if (target)
1221 {
1222 rtx_insn *seq = get_insns ();
1223
1224 if (!seq)
1225 {
1226 end_sequence ();
1227 return false;
1228 }
1229
1230 if (target != if_info->x)
1231 noce_emit_move_insn (if_info->x, target);
1232
1233 seq = end_ifcvt_sequence (if_info);
1234
1235 if (!seq)
1236 return false;
1237
1238 emit_insn_before_setloc (seq, if_info->jump,
1239 INSN_LOCATION (if_info->insn_a));
1240 return true;
1241 }
1242
1243 end_sequence ();
1244 return false;
1245 }
1246
1247
1248 /* Convert "if (test) x = a; else x = b", for A and B constant.
1249 Also allow A = y + c1, B = y + c2, with a common y between A
1250 and B. */
1251
1252 static int
1253 noce_try_store_flag_constants (struct noce_if_info *if_info)
1254 {
1255 rtx target;
1256 rtx_insn *seq;
1257 bool reversep;
1258 HOST_WIDE_INT itrue, ifalse, diff, tmp;
1259 int normalize;
1260 bool can_reverse;
1261 machine_mode mode = GET_MODE (if_info->x);;
1262 rtx common = NULL_RTX;
1263
1264 rtx a = if_info->a;
1265 rtx b = if_info->b;
1266
1267 /* Handle cases like x := test ? y + 3 : y + 4. */
1268 if (GET_CODE (a) == PLUS
1269 && GET_CODE (b) == PLUS
1270 && CONST_INT_P (XEXP (a, 1))
1271 && CONST_INT_P (XEXP (b, 1))
1272 && rtx_equal_p (XEXP (a, 0), XEXP (b, 0))
1273 && noce_operand_ok (XEXP (a, 0))
1274 && if_info->branch_cost >= 2)
1275 {
1276 common = XEXP (a, 0);
1277 a = XEXP (a, 1);
1278 b = XEXP (b, 1);
1279 }
1280
1281 if (!noce_simple_bbs (if_info))
1282 return FALSE;
1283
1284 if (CONST_INT_P (a)
1285 && CONST_INT_P (b))
1286 {
1287 ifalse = INTVAL (a);
1288 itrue = INTVAL (b);
1289 bool subtract_flag_p = false;
1290
1291 diff = (unsigned HOST_WIDE_INT) itrue - ifalse;
1292 /* Make sure we can represent the difference between the two values. */
1293 if ((diff > 0)
1294 != ((ifalse < 0) != (itrue < 0) ? ifalse < 0 : ifalse < itrue))
1295 return FALSE;
1296
1297 diff = trunc_int_for_mode (diff, mode);
1298
1299 can_reverse = (reversed_comparison_code (if_info->cond, if_info->jump)
1300 != UNKNOWN);
1301
1302 reversep = false;
1303 if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
1304 {
1305 normalize = 0;
1306 /* We could collapse these cases but it is easier to follow the
1307 diff/STORE_FLAG_VALUE combinations when they are listed
1308 explicitly. */
1309
1310 /* test ? 3 : 4
1311 => 4 + (test != 0). */
1312 if (diff < 0 && STORE_FLAG_VALUE < 0)
1313 reversep = false;
1314 /* test ? 4 : 3
1315 => can_reverse | 4 + (test == 0)
1316 !can_reverse | 3 - (test != 0). */
1317 else if (diff > 0 && STORE_FLAG_VALUE < 0)
1318 {
1319 reversep = can_reverse;
1320 subtract_flag_p = !can_reverse;
1321 /* If we need to subtract the flag and we have PLUS-immediate
1322 A and B then it is unlikely to be beneficial to play tricks
1323 here. */
1324 if (subtract_flag_p && common)
1325 return FALSE;
1326 }
1327 /* test ? 3 : 4
1328 => can_reverse | 3 + (test == 0)
1329 !can_reverse | 4 - (test != 0). */
1330 else if (diff < 0 && STORE_FLAG_VALUE > 0)
1331 {
1332 reversep = can_reverse;
1333 subtract_flag_p = !can_reverse;
1334 /* If we need to subtract the flag and we have PLUS-immediate
1335 A and B then it is unlikely to be beneficial to play tricks
1336 here. */
1337 if (subtract_flag_p && common)
1338 return FALSE;
1339 }
1340 /* test ? 4 : 3
1341 => 4 + (test != 0). */
1342 else if (diff > 0 && STORE_FLAG_VALUE > 0)
1343 reversep = false;
1344 else
1345 gcc_unreachable ();
1346 }
1347 else if (ifalse == 0 && exact_log2 (itrue) >= 0
1348 && (STORE_FLAG_VALUE == 1
1349 || if_info->branch_cost >= 2))
1350 normalize = 1;
1351 else if (itrue == 0 && exact_log2 (ifalse) >= 0 && can_reverse
1352 && (STORE_FLAG_VALUE == 1 || if_info->branch_cost >= 2))
1353 {
1354 normalize = 1;
1355 reversep = true;
1356 }
1357 else if (itrue == -1
1358 && (STORE_FLAG_VALUE == -1
1359 || if_info->branch_cost >= 2))
1360 normalize = -1;
1361 else if (ifalse == -1 && can_reverse
1362 && (STORE_FLAG_VALUE == -1 || if_info->branch_cost >= 2))
1363 {
1364 normalize = -1;
1365 reversep = true;
1366 }
1367 else
1368 return FALSE;
1369
1370 if (reversep)
1371 {
1372 std::swap (itrue, ifalse);
1373 diff = trunc_int_for_mode (-(unsigned HOST_WIDE_INT) diff, mode);
1374 }
1375
1376 start_sequence ();
1377
1378 /* If we have x := test ? x + 3 : x + 4 then move the original
1379 x out of the way while we store flags. */
1380 if (common && rtx_equal_p (common, if_info->x))
1381 {
1382 common = gen_reg_rtx (mode);
1383 noce_emit_move_insn (common, if_info->x);
1384 }
1385
1386 target = noce_emit_store_flag (if_info, if_info->x, reversep, normalize);
1387 if (! target)
1388 {
1389 end_sequence ();
1390 return FALSE;
1391 }
1392
1393 /* if (test) x = 3; else x = 4;
1394 => x = 3 + (test == 0); */
1395 if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
1396 {
1397 /* Add the common part now. This may allow combine to merge this
1398 with the store flag operation earlier into some sort of conditional
1399 increment/decrement if the target allows it. */
1400 if (common)
1401 target = expand_simple_binop (mode, PLUS,
1402 target, common,
1403 target, 0, OPTAB_WIDEN);
1404
1405 /* Always use ifalse here. It should have been swapped with itrue
1406 when appropriate when reversep is true. */
1407 target = expand_simple_binop (mode, subtract_flag_p ? MINUS : PLUS,
1408 gen_int_mode (ifalse, mode), target,
1409 if_info->x, 0, OPTAB_WIDEN);
1410 }
1411 /* Other cases are not beneficial when the original A and B are PLUS
1412 expressions. */
1413 else if (common)
1414 {
1415 end_sequence ();
1416 return FALSE;
1417 }
1418 /* if (test) x = 8; else x = 0;
1419 => x = (test != 0) << 3; */
1420 else if (ifalse == 0 && (tmp = exact_log2 (itrue)) >= 0)
1421 {
1422 target = expand_simple_binop (mode, ASHIFT,
1423 target, GEN_INT (tmp), if_info->x, 0,
1424 OPTAB_WIDEN);
1425 }
1426
1427 /* if (test) x = -1; else x = b;
1428 => x = -(test != 0) | b; */
1429 else if (itrue == -1)
1430 {
1431 target = expand_simple_binop (mode, IOR,
1432 target, gen_int_mode (ifalse, mode),
1433 if_info->x, 0, OPTAB_WIDEN);
1434 }
1435 else
1436 {
1437 end_sequence ();
1438 return FALSE;
1439 }
1440
1441 if (! target)
1442 {
1443 end_sequence ();
1444 return FALSE;
1445 }
1446
1447 if (target != if_info->x)
1448 noce_emit_move_insn (if_info->x, target);
1449
1450 seq = end_ifcvt_sequence (if_info);
1451 if (!seq)
1452 return FALSE;
1453
1454 emit_insn_before_setloc (seq, if_info->jump,
1455 INSN_LOCATION (if_info->insn_a));
1456 return TRUE;
1457 }
1458
1459 return FALSE;
1460 }
1461
1462 /* Convert "if (test) foo++" into "foo += (test != 0)", and
1463 similarly for "foo--". */
1464
1465 static int
1466 noce_try_addcc (struct noce_if_info *if_info)
1467 {
1468 rtx target;
1469 rtx_insn *seq;
1470 int subtract, normalize;
1471
1472 if (!noce_simple_bbs (if_info))
1473 return FALSE;
1474
1475 if (GET_CODE (if_info->a) == PLUS
1476 && rtx_equal_p (XEXP (if_info->a, 0), if_info->b)
1477 && (reversed_comparison_code (if_info->cond, if_info->jump)
1478 != UNKNOWN))
1479 {
1480 rtx cond = if_info->cond;
1481 enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
1482
1483 /* First try to use addcc pattern. */
1484 if (general_operand (XEXP (cond, 0), VOIDmode)
1485 && general_operand (XEXP (cond, 1), VOIDmode))
1486 {
1487 start_sequence ();
1488 target = emit_conditional_add (if_info->x, code,
1489 XEXP (cond, 0),
1490 XEXP (cond, 1),
1491 VOIDmode,
1492 if_info->b,
1493 XEXP (if_info->a, 1),
1494 GET_MODE (if_info->x),
1495 (code == LTU || code == GEU
1496 || code == LEU || code == GTU));
1497 if (target)
1498 {
1499 if (target != if_info->x)
1500 noce_emit_move_insn (if_info->x, target);
1501
1502 seq = end_ifcvt_sequence (if_info);
1503 if (!seq)
1504 return FALSE;
1505
1506 emit_insn_before_setloc (seq, if_info->jump,
1507 INSN_LOCATION (if_info->insn_a));
1508 return TRUE;
1509 }
1510 end_sequence ();
1511 }
1512
1513 /* If that fails, construct conditional increment or decrement using
1514 setcc. */
1515 if (if_info->branch_cost >= 2
1516 && (XEXP (if_info->a, 1) == const1_rtx
1517 || XEXP (if_info->a, 1) == constm1_rtx))
1518 {
1519 start_sequence ();
1520 if (STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
1521 subtract = 0, normalize = 0;
1522 else if (-STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
1523 subtract = 1, normalize = 0;
1524 else
1525 subtract = 0, normalize = INTVAL (XEXP (if_info->a, 1));
1526
1527
1528 target = noce_emit_store_flag (if_info,
1529 gen_reg_rtx (GET_MODE (if_info->x)),
1530 1, normalize);
1531
1532 if (target)
1533 target = expand_simple_binop (GET_MODE (if_info->x),
1534 subtract ? MINUS : PLUS,
1535 if_info->b, target, if_info->x,
1536 0, OPTAB_WIDEN);
1537 if (target)
1538 {
1539 if (target != if_info->x)
1540 noce_emit_move_insn (if_info->x, target);
1541
1542 seq = end_ifcvt_sequence (if_info);
1543 if (!seq)
1544 return FALSE;
1545
1546 emit_insn_before_setloc (seq, if_info->jump,
1547 INSN_LOCATION (if_info->insn_a));
1548 return TRUE;
1549 }
1550 end_sequence ();
1551 }
1552 }
1553
1554 return FALSE;
1555 }
1556
1557 /* Convert "if (test) x = 0;" to "x &= -(test == 0);" */
1558
1559 static int
1560 noce_try_store_flag_mask (struct noce_if_info *if_info)
1561 {
1562 rtx target;
1563 rtx_insn *seq;
1564 int reversep;
1565
1566 if (!noce_simple_bbs (if_info))
1567 return FALSE;
1568
1569 reversep = 0;
1570 if ((if_info->branch_cost >= 2
1571 || STORE_FLAG_VALUE == -1)
1572 && ((if_info->a == const0_rtx
1573 && rtx_equal_p (if_info->b, if_info->x))
1574 || ((reversep = (reversed_comparison_code (if_info->cond,
1575 if_info->jump)
1576 != UNKNOWN))
1577 && if_info->b == const0_rtx
1578 && rtx_equal_p (if_info->a, if_info->x))))
1579 {
1580 start_sequence ();
1581 target = noce_emit_store_flag (if_info,
1582 gen_reg_rtx (GET_MODE (if_info->x)),
1583 reversep, -1);
1584 if (target)
1585 target = expand_simple_binop (GET_MODE (if_info->x), AND,
1586 if_info->x,
1587 target, if_info->x, 0,
1588 OPTAB_WIDEN);
1589
1590 if (target)
1591 {
1592 int old_cost, new_cost, insn_cost;
1593 int speed_p;
1594
1595 if (target != if_info->x)
1596 noce_emit_move_insn (if_info->x, target);
1597
1598 seq = end_ifcvt_sequence (if_info);
1599 if (!seq)
1600 return FALSE;
1601
1602 speed_p = optimize_bb_for_speed_p (BLOCK_FOR_INSN (if_info->insn_a));
1603 insn_cost = insn_rtx_cost (PATTERN (if_info->insn_a), speed_p);
1604 old_cost = COSTS_N_INSNS (if_info->branch_cost) + insn_cost;
1605 new_cost = seq_cost (seq, speed_p);
1606
1607 if (new_cost > old_cost)
1608 return FALSE;
1609
1610 emit_insn_before_setloc (seq, if_info->jump,
1611 INSN_LOCATION (if_info->insn_a));
1612 return TRUE;
1613 }
1614
1615 end_sequence ();
1616 }
1617
1618 return FALSE;
1619 }
1620
1621 /* Helper function for noce_try_cmove and noce_try_cmove_arith. */
1622
1623 static rtx
1624 noce_emit_cmove (struct noce_if_info *if_info, rtx x, enum rtx_code code,
1625 rtx cmp_a, rtx cmp_b, rtx vfalse, rtx vtrue)
1626 {
1627 rtx target ATTRIBUTE_UNUSED;
1628 int unsignedp ATTRIBUTE_UNUSED;
1629
1630 /* If earliest == jump, try to build the cmove insn directly.
1631 This is helpful when combine has created some complex condition
1632 (like for alpha's cmovlbs) that we can't hope to regenerate
1633 through the normal interface. */
1634
1635 if (if_info->cond_earliest == if_info->jump)
1636 {
1637 rtx cond = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
1638 rtx if_then_else = gen_rtx_IF_THEN_ELSE (GET_MODE (x),
1639 cond, vtrue, vfalse);
1640 rtx set = gen_rtx_SET (x, if_then_else);
1641
1642 start_sequence ();
1643 rtx_insn *insn = emit_insn (set);
1644
1645 if (recog_memoized (insn) >= 0)
1646 {
1647 rtx_insn *seq = get_insns ();
1648 end_sequence ();
1649 emit_insn (seq);
1650
1651 return x;
1652 }
1653
1654 end_sequence ();
1655 }
1656
1657 /* Don't even try if the comparison operands are weird
1658 except that the target supports cbranchcc4. */
1659 if (! general_operand (cmp_a, GET_MODE (cmp_a))
1660 || ! general_operand (cmp_b, GET_MODE (cmp_b)))
1661 {
1662 if (!have_cbranchcc4
1663 || GET_MODE_CLASS (GET_MODE (cmp_a)) != MODE_CC
1664 || cmp_b != const0_rtx)
1665 return NULL_RTX;
1666 }
1667
1668 unsignedp = (code == LTU || code == GEU
1669 || code == LEU || code == GTU);
1670
1671 target = emit_conditional_move (x, code, cmp_a, cmp_b, VOIDmode,
1672 vtrue, vfalse, GET_MODE (x),
1673 unsignedp);
1674 if (target)
1675 return target;
1676
1677 /* We might be faced with a situation like:
1678
1679 x = (reg:M TARGET)
1680 vtrue = (subreg:M (reg:N VTRUE) BYTE)
1681 vfalse = (subreg:M (reg:N VFALSE) BYTE)
1682
1683 We can't do a conditional move in mode M, but it's possible that we
1684 could do a conditional move in mode N instead and take a subreg of
1685 the result.
1686
1687 If we can't create new pseudos, though, don't bother. */
1688 if (reload_completed)
1689 return NULL_RTX;
1690
1691 if (GET_CODE (vtrue) == SUBREG && GET_CODE (vfalse) == SUBREG)
1692 {
1693 rtx reg_vtrue = SUBREG_REG (vtrue);
1694 rtx reg_vfalse = SUBREG_REG (vfalse);
1695 unsigned int byte_vtrue = SUBREG_BYTE (vtrue);
1696 unsigned int byte_vfalse = SUBREG_BYTE (vfalse);
1697 rtx promoted_target;
1698
1699 if (GET_MODE (reg_vtrue) != GET_MODE (reg_vfalse)
1700 || byte_vtrue != byte_vfalse
1701 || (SUBREG_PROMOTED_VAR_P (vtrue)
1702 != SUBREG_PROMOTED_VAR_P (vfalse))
1703 || (SUBREG_PROMOTED_GET (vtrue)
1704 != SUBREG_PROMOTED_GET (vfalse)))
1705 return NULL_RTX;
1706
1707 promoted_target = gen_reg_rtx (GET_MODE (reg_vtrue));
1708
1709 target = emit_conditional_move (promoted_target, code, cmp_a, cmp_b,
1710 VOIDmode, reg_vtrue, reg_vfalse,
1711 GET_MODE (reg_vtrue), unsignedp);
1712 /* Nope, couldn't do it in that mode either. */
1713 if (!target)
1714 return NULL_RTX;
1715
1716 target = gen_rtx_SUBREG (GET_MODE (vtrue), promoted_target, byte_vtrue);
1717 SUBREG_PROMOTED_VAR_P (target) = SUBREG_PROMOTED_VAR_P (vtrue);
1718 SUBREG_PROMOTED_SET (target, SUBREG_PROMOTED_GET (vtrue));
1719 emit_move_insn (x, target);
1720 return x;
1721 }
1722 else
1723 return NULL_RTX;
1724 }
1725
1726 /* Try only simple constants and registers here. More complex cases
1727 are handled in noce_try_cmove_arith after noce_try_store_flag_arith
1728 has had a go at it. */
1729
1730 static int
1731 noce_try_cmove (struct noce_if_info *if_info)
1732 {
1733 enum rtx_code code;
1734 rtx target;
1735 rtx_insn *seq;
1736
1737 if (!noce_simple_bbs (if_info))
1738 return FALSE;
1739
1740 if ((CONSTANT_P (if_info->a) || register_operand (if_info->a, VOIDmode))
1741 && (CONSTANT_P (if_info->b) || register_operand (if_info->b, VOIDmode)))
1742 {
1743 start_sequence ();
1744
1745 code = GET_CODE (if_info->cond);
1746 target = noce_emit_cmove (if_info, if_info->x, code,
1747 XEXP (if_info->cond, 0),
1748 XEXP (if_info->cond, 1),
1749 if_info->a, if_info->b);
1750
1751 if (target)
1752 {
1753 if (target != if_info->x)
1754 noce_emit_move_insn (if_info->x, target);
1755
1756 seq = end_ifcvt_sequence (if_info);
1757 if (!seq)
1758 return FALSE;
1759
1760 emit_insn_before_setloc (seq, if_info->jump,
1761 INSN_LOCATION (if_info->insn_a));
1762 return TRUE;
1763 }
1764 /* If both a and b are constants try a last-ditch transformation:
1765 if (test) x = a; else x = b;
1766 => x = (-(test != 0) & (b - a)) + a;
1767 Try this only if the target-specific expansion above has failed.
1768 The target-specific expander may want to generate sequences that
1769 we don't know about, so give them a chance before trying this
1770 approach. */
1771 else if (!targetm.have_conditional_execution ()
1772 && CONST_INT_P (if_info->a) && CONST_INT_P (if_info->b)
1773 && ((if_info->branch_cost >= 2 && STORE_FLAG_VALUE == -1)
1774 || if_info->branch_cost >= 3))
1775 {
1776 machine_mode mode = GET_MODE (if_info->x);
1777 HOST_WIDE_INT ifalse = INTVAL (if_info->a);
1778 HOST_WIDE_INT itrue = INTVAL (if_info->b);
1779 rtx target = noce_emit_store_flag (if_info, if_info->x, false, -1);
1780 if (!target)
1781 {
1782 end_sequence ();
1783 return FALSE;
1784 }
1785
1786 HOST_WIDE_INT diff = (unsigned HOST_WIDE_INT) itrue - ifalse;
1787 /* Make sure we can represent the difference
1788 between the two values. */
1789 if ((diff > 0)
1790 != ((ifalse < 0) != (itrue < 0) ? ifalse < 0 : ifalse < itrue))
1791 {
1792 end_sequence ();
1793 return FALSE;
1794 }
1795
1796 diff = trunc_int_for_mode (diff, mode);
1797 target = expand_simple_binop (mode, AND,
1798 target, gen_int_mode (diff, mode),
1799 if_info->x, 0, OPTAB_WIDEN);
1800 if (target)
1801 target = expand_simple_binop (mode, PLUS,
1802 target, gen_int_mode (ifalse, mode),
1803 if_info->x, 0, OPTAB_WIDEN);
1804 if (target)
1805 {
1806 if (target != if_info->x)
1807 noce_emit_move_insn (if_info->x, target);
1808
1809 seq = end_ifcvt_sequence (if_info);
1810 if (!seq)
1811 return FALSE;
1812
1813 emit_insn_before_setloc (seq, if_info->jump,
1814 INSN_LOCATION (if_info->insn_a));
1815 return TRUE;
1816 }
1817 else
1818 {
1819 end_sequence ();
1820 return FALSE;
1821 }
1822 }
1823 else
1824 end_sequence ();
1825 }
1826
1827 return FALSE;
1828 }
1829
1830 /* Return true if X contains a conditional code mode rtx. */
1831
1832 static bool
1833 contains_ccmode_rtx_p (rtx x)
1834 {
1835 subrtx_iterator::array_type array;
1836 FOR_EACH_SUBRTX (iter, array, x, ALL)
1837 if (GET_MODE_CLASS (GET_MODE (*iter)) == MODE_CC)
1838 return true;
1839
1840 return false;
1841 }
1842
1843 /* Helper for bb_valid_for_noce_process_p. Validate that
1844 the rtx insn INSN is a single set that does not set
1845 the conditional register CC and is in general valid for
1846 if-conversion. */
1847
1848 static bool
1849 insn_valid_noce_process_p (rtx_insn *insn, rtx cc)
1850 {
1851 if (!insn
1852 || !NONJUMP_INSN_P (insn)
1853 || (cc && set_of (cc, insn)))
1854 return false;
1855
1856 rtx sset = single_set (insn);
1857
1858 /* Currently support only simple single sets in test_bb. */
1859 if (!sset
1860 || !noce_operand_ok (SET_DEST (sset))
1861 || contains_ccmode_rtx_p (SET_DEST (sset))
1862 || !noce_operand_ok (SET_SRC (sset)))
1863 return false;
1864
1865 return true;
1866 }
1867
1868
1869 /* Return true iff the registers that the insns in BB_A set do not
1870 get used in BB_B. */
1871
1872 static bool
1873 bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b)
1874 {
1875 rtx_insn *a_insn;
1876 bitmap bba_sets = BITMAP_ALLOC (&reg_obstack);
1877
1878 df_ref def;
1879 df_ref use;
1880
1881 FOR_BB_INSNS (bb_a, a_insn)
1882 {
1883 if (!active_insn_p (a_insn))
1884 continue;
1885
1886 rtx sset_a = single_set (a_insn);
1887
1888 if (!sset_a)
1889 {
1890 BITMAP_FREE (bba_sets);
1891 return false;
1892 }
1893
1894 /* Record all registers that BB_A sets. */
1895 FOR_EACH_INSN_DEF (def, a_insn)
1896 bitmap_set_bit (bba_sets, DF_REF_REGNO (def));
1897 }
1898
1899 rtx_insn *b_insn;
1900
1901 FOR_BB_INSNS (bb_b, b_insn)
1902 {
1903 if (!active_insn_p (b_insn))
1904 continue;
1905
1906 rtx sset_b = single_set (b_insn);
1907
1908 if (!sset_b)
1909 {
1910 BITMAP_FREE (bba_sets);
1911 return false;
1912 }
1913
1914 /* Make sure this is a REG and not some instance
1915 of ZERO_EXTRACT or SUBREG or other dangerous stuff. */
1916 if (!REG_P (SET_DEST (sset_b)))
1917 {
1918 BITMAP_FREE (bba_sets);
1919 return false;
1920 }
1921
1922 /* If the insn uses a reg set in BB_A return false. */
1923 FOR_EACH_INSN_USE (use, b_insn)
1924 {
1925 if (bitmap_bit_p (bba_sets, DF_REF_REGNO (use)))
1926 {
1927 BITMAP_FREE (bba_sets);
1928 return false;
1929 }
1930 }
1931
1932 }
1933
1934 BITMAP_FREE (bba_sets);
1935 return true;
1936 }
1937
1938 /* Emit copies of all the active instructions in BB except the last.
1939 This is a helper for noce_try_cmove_arith. */
1940
1941 static void
1942 noce_emit_all_but_last (basic_block bb)
1943 {
1944 rtx_insn *last = last_active_insn (bb, FALSE);
1945 rtx_insn *insn;
1946 FOR_BB_INSNS (bb, insn)
1947 {
1948 if (insn != last && active_insn_p (insn))
1949 {
1950 rtx_insn *to_emit = as_a <rtx_insn *> (copy_rtx (insn));
1951
1952 emit_insn (PATTERN (to_emit));
1953 }
1954 }
1955 }
1956
1957 /* Helper for noce_try_cmove_arith. Emit the pattern TO_EMIT and return
1958 the resulting insn or NULL if it's not a valid insn. */
1959
1960 static rtx_insn *
1961 noce_emit_insn (rtx to_emit)
1962 {
1963 gcc_assert (to_emit);
1964 rtx_insn *insn = emit_insn (to_emit);
1965
1966 if (recog_memoized (insn) < 0)
1967 return NULL;
1968
1969 return insn;
1970 }
1971
1972 /* Helper for noce_try_cmove_arith. Emit a copy of the insns up to
1973 and including the penultimate one in BB if it is not simple
1974 (as indicated by SIMPLE). Then emit LAST_INSN as the last
1975 insn in the block. The reason for that is that LAST_INSN may
1976 have been modified by the preparation in noce_try_cmove_arith. */
1977
1978 static bool
1979 noce_emit_bb (rtx last_insn, basic_block bb, bool simple)
1980 {
1981 if (bb && !simple)
1982 noce_emit_all_but_last (bb);
1983
1984 if (last_insn && !noce_emit_insn (last_insn))
1985 return false;
1986
1987 return true;
1988 }
1989
1990 /* Try more complex cases involving conditional_move. */
1991
1992 static int
1993 noce_try_cmove_arith (struct noce_if_info *if_info)
1994 {
1995 rtx a = if_info->a;
1996 rtx b = if_info->b;
1997 rtx x = if_info->x;
1998 rtx orig_a, orig_b;
1999 rtx_insn *insn_a, *insn_b;
2000 bool a_simple = if_info->then_simple;
2001 bool b_simple = if_info->else_simple;
2002 basic_block then_bb = if_info->then_bb;
2003 basic_block else_bb = if_info->else_bb;
2004 rtx target;
2005 int is_mem = 0;
2006 enum rtx_code code;
2007 rtx_insn *ifcvt_seq;
2008
2009 /* A conditional move from two memory sources is equivalent to a
2010 conditional on their addresses followed by a load. Don't do this
2011 early because it'll screw alias analysis. Note that we've
2012 already checked for no side effects. */
2013 /* ??? FIXME: Magic number 5. */
2014 if (cse_not_expected
2015 && MEM_P (a) && MEM_P (b)
2016 && MEM_ADDR_SPACE (a) == MEM_ADDR_SPACE (b)
2017 && if_info->branch_cost >= 5)
2018 {
2019 machine_mode address_mode = get_address_mode (a);
2020
2021 a = XEXP (a, 0);
2022 b = XEXP (b, 0);
2023 x = gen_reg_rtx (address_mode);
2024 is_mem = 1;
2025 }
2026
2027 /* ??? We could handle this if we knew that a load from A or B could
2028 not trap or fault. This is also true if we've already loaded
2029 from the address along the path from ENTRY. */
2030 else if (may_trap_or_fault_p (a) || may_trap_or_fault_p (b))
2031 return FALSE;
2032
2033 /* if (test) x = a + b; else x = c - d;
2034 => y = a + b;
2035 x = c - d;
2036 if (test)
2037 x = y;
2038 */
2039
2040 code = GET_CODE (if_info->cond);
2041 insn_a = if_info->insn_a;
2042 insn_b = if_info->insn_b;
2043
2044 machine_mode x_mode = GET_MODE (x);
2045
2046 if (!can_conditionally_move_p (x_mode))
2047 return FALSE;
2048
2049 unsigned int then_cost;
2050 unsigned int else_cost;
2051 if (insn_a)
2052 then_cost = if_info->then_cost;
2053 else
2054 then_cost = 0;
2055
2056 if (insn_b)
2057 else_cost = if_info->else_cost;
2058 else
2059 else_cost = 0;
2060
2061 /* We're going to execute one of the basic blocks anyway, so
2062 bail out if the most expensive of the two blocks is unacceptable. */
2063 if (MAX (then_cost, else_cost) > COSTS_N_INSNS (if_info->branch_cost))
2064 return FALSE;
2065
2066 /* Possibly rearrange operands to make things come out more natural. */
2067 if (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN)
2068 {
2069 int reversep = 0;
2070 if (rtx_equal_p (b, x))
2071 reversep = 1;
2072 else if (general_operand (b, GET_MODE (b)))
2073 reversep = 1;
2074
2075 if (reversep)
2076 {
2077 code = reversed_comparison_code (if_info->cond, if_info->jump);
2078 std::swap (a, b);
2079 std::swap (insn_a, insn_b);
2080 std::swap (a_simple, b_simple);
2081 std::swap (then_bb, else_bb);
2082 }
2083 }
2084
2085 if (then_bb && else_bb && !a_simple && !b_simple
2086 && (!bbs_ok_for_cmove_arith (then_bb, else_bb)
2087 || !bbs_ok_for_cmove_arith (else_bb, then_bb)))
2088 return FALSE;
2089
2090 start_sequence ();
2091
2092 /* If one of the blocks is empty then the corresponding B or A value
2093 came from the test block. The non-empty complex block that we will
2094 emit might clobber the register used by B or A, so move it to a pseudo
2095 first. */
2096
2097 rtx tmp_a = NULL_RTX;
2098 rtx tmp_b = NULL_RTX;
2099
2100 if (b_simple || !else_bb)
2101 tmp_b = gen_reg_rtx (x_mode);
2102
2103 if (a_simple || !then_bb)
2104 tmp_a = gen_reg_rtx (x_mode);
2105
2106 orig_a = a;
2107 orig_b = b;
2108
2109 rtx emit_a = NULL_RTX;
2110 rtx emit_b = NULL_RTX;
2111 rtx_insn *tmp_insn = NULL;
2112 bool modified_in_a = false;
2113 bool modified_in_b = false;
2114 /* If either operand is complex, load it into a register first.
2115 The best way to do this is to copy the original insn. In this
2116 way we preserve any clobbers etc that the insn may have had.
2117 This is of course not possible in the IS_MEM case. */
2118
2119 if (! general_operand (a, GET_MODE (a)) || tmp_a)
2120 {
2121
2122 if (is_mem)
2123 {
2124 rtx reg = gen_reg_rtx (GET_MODE (a));
2125 emit_a = gen_rtx_SET (reg, a);
2126 }
2127 else
2128 {
2129 if (insn_a)
2130 {
2131 a = tmp_a ? tmp_a : gen_reg_rtx (GET_MODE (a));
2132
2133 rtx_insn *copy_of_a = as_a <rtx_insn *> (copy_rtx (insn_a));
2134 rtx set = single_set (copy_of_a);
2135 SET_DEST (set) = a;
2136
2137 emit_a = PATTERN (copy_of_a);
2138 }
2139 else
2140 {
2141 rtx tmp_reg = tmp_a ? tmp_a : gen_reg_rtx (GET_MODE (a));
2142 emit_a = gen_rtx_SET (tmp_reg, a);
2143 a = tmp_reg;
2144 }
2145 }
2146 }
2147
2148 if (! general_operand (b, GET_MODE (b)) || tmp_b)
2149 {
2150 if (is_mem)
2151 {
2152 rtx reg = gen_reg_rtx (GET_MODE (b));
2153 emit_b = gen_rtx_SET (reg, b);
2154 }
2155 else
2156 {
2157 if (insn_b)
2158 {
2159 b = tmp_b ? tmp_b : gen_reg_rtx (GET_MODE (b));
2160 rtx_insn *copy_of_b = as_a <rtx_insn *> (copy_rtx (insn_b));
2161 rtx set = single_set (copy_of_b);
2162
2163 SET_DEST (set) = b;
2164 emit_b = PATTERN (copy_of_b);
2165 }
2166 else
2167 {
2168 rtx tmp_reg = tmp_b ? tmp_b : gen_reg_rtx (GET_MODE (b));
2169 emit_b = gen_rtx_SET (tmp_reg, b);
2170 b = tmp_reg;
2171 }
2172 }
2173 }
2174
2175 /* If insn to set up A clobbers any registers B depends on, try to
2176 swap insn that sets up A with the one that sets up B. If even
2177 that doesn't help, punt. */
2178
2179 modified_in_a = emit_a != NULL_RTX && modified_in_p (orig_b, emit_a);
2180 if (tmp_b && then_bb)
2181 {
2182 FOR_BB_INSNS (then_bb, tmp_insn)
2183 if (modified_in_p (orig_b, tmp_insn))
2184 {
2185 modified_in_a = true;
2186 break;
2187 }
2188
2189 }
2190 if (emit_a && modified_in_a)
2191 {
2192 modified_in_b = emit_b != NULL_RTX && modified_in_p (orig_a, emit_b);
2193 if (tmp_b && else_bb)
2194 {
2195 FOR_BB_INSNS (else_bb, tmp_insn)
2196 if (modified_in_p (orig_a, tmp_insn))
2197 {
2198 modified_in_b = true;
2199 break;
2200 }
2201
2202 }
2203 if (modified_in_b)
2204 goto end_seq_and_fail;
2205
2206 if (!noce_emit_bb (emit_b, else_bb, b_simple))
2207 goto end_seq_and_fail;
2208
2209 if (!noce_emit_bb (emit_a, then_bb, a_simple))
2210 goto end_seq_and_fail;
2211 }
2212 else
2213 {
2214 if (!noce_emit_bb (emit_a, then_bb, a_simple))
2215 goto end_seq_and_fail;
2216
2217 if (!noce_emit_bb (emit_b, else_bb, b_simple))
2218 goto end_seq_and_fail;
2219
2220 }
2221
2222 target = noce_emit_cmove (if_info, x, code, XEXP (if_info->cond, 0),
2223 XEXP (if_info->cond, 1), a, b);
2224
2225 if (! target)
2226 goto end_seq_and_fail;
2227
2228 /* If we're handling a memory for above, emit the load now. */
2229 if (is_mem)
2230 {
2231 rtx mem = gen_rtx_MEM (GET_MODE (if_info->x), target);
2232
2233 /* Copy over flags as appropriate. */
2234 if (MEM_VOLATILE_P (if_info->a) || MEM_VOLATILE_P (if_info->b))
2235 MEM_VOLATILE_P (mem) = 1;
2236 if (MEM_ALIAS_SET (if_info->a) == MEM_ALIAS_SET (if_info->b))
2237 set_mem_alias_set (mem, MEM_ALIAS_SET (if_info->a));
2238 set_mem_align (mem,
2239 MIN (MEM_ALIGN (if_info->a), MEM_ALIGN (if_info->b)));
2240
2241 gcc_assert (MEM_ADDR_SPACE (if_info->a) == MEM_ADDR_SPACE (if_info->b));
2242 set_mem_addr_space (mem, MEM_ADDR_SPACE (if_info->a));
2243
2244 noce_emit_move_insn (if_info->x, mem);
2245 }
2246 else if (target != x)
2247 noce_emit_move_insn (x, target);
2248
2249 ifcvt_seq = end_ifcvt_sequence (if_info);
2250 if (!ifcvt_seq)
2251 return FALSE;
2252
2253 emit_insn_before_setloc (ifcvt_seq, if_info->jump,
2254 INSN_LOCATION (if_info->insn_a));
2255 return TRUE;
2256
2257 end_seq_and_fail:
2258 end_sequence ();
2259 return FALSE;
2260 }
2261
2262 /* For most cases, the simplified condition we found is the best
2263 choice, but this is not the case for the min/max/abs transforms.
2264 For these we wish to know that it is A or B in the condition. */
2265
2266 static rtx
2267 noce_get_alt_condition (struct noce_if_info *if_info, rtx target,
2268 rtx_insn **earliest)
2269 {
2270 rtx cond, set;
2271 rtx_insn *insn;
2272 int reverse;
2273
2274 /* If target is already mentioned in the known condition, return it. */
2275 if (reg_mentioned_p (target, if_info->cond))
2276 {
2277 *earliest = if_info->cond_earliest;
2278 return if_info->cond;
2279 }
2280
2281 set = pc_set (if_info->jump);
2282 cond = XEXP (SET_SRC (set), 0);
2283 reverse
2284 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
2285 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (if_info->jump);
2286 if (if_info->then_else_reversed)
2287 reverse = !reverse;
2288
2289 /* If we're looking for a constant, try to make the conditional
2290 have that constant in it. There are two reasons why it may
2291 not have the constant we want:
2292
2293 1. GCC may have needed to put the constant in a register, because
2294 the target can't compare directly against that constant. For
2295 this case, we look for a SET immediately before the comparison
2296 that puts a constant in that register.
2297
2298 2. GCC may have canonicalized the conditional, for example
2299 replacing "if x < 4" with "if x <= 3". We can undo that (or
2300 make equivalent types of changes) to get the constants we need
2301 if they're off by one in the right direction. */
2302
2303 if (CONST_INT_P (target))
2304 {
2305 enum rtx_code code = GET_CODE (if_info->cond);
2306 rtx op_a = XEXP (if_info->cond, 0);
2307 rtx op_b = XEXP (if_info->cond, 1);
2308 rtx_insn *prev_insn;
2309
2310 /* First, look to see if we put a constant in a register. */
2311 prev_insn = prev_nonnote_insn (if_info->cond_earliest);
2312 if (prev_insn
2313 && BLOCK_FOR_INSN (prev_insn)
2314 == BLOCK_FOR_INSN (if_info->cond_earliest)
2315 && INSN_P (prev_insn)
2316 && GET_CODE (PATTERN (prev_insn)) == SET)
2317 {
2318 rtx src = find_reg_equal_equiv_note (prev_insn);
2319 if (!src)
2320 src = SET_SRC (PATTERN (prev_insn));
2321 if (CONST_INT_P (src))
2322 {
2323 if (rtx_equal_p (op_a, SET_DEST (PATTERN (prev_insn))))
2324 op_a = src;
2325 else if (rtx_equal_p (op_b, SET_DEST (PATTERN (prev_insn))))
2326 op_b = src;
2327
2328 if (CONST_INT_P (op_a))
2329 {
2330 std::swap (op_a, op_b);
2331 code = swap_condition (code);
2332 }
2333 }
2334 }
2335
2336 /* Now, look to see if we can get the right constant by
2337 adjusting the conditional. */
2338 if (CONST_INT_P (op_b))
2339 {
2340 HOST_WIDE_INT desired_val = INTVAL (target);
2341 HOST_WIDE_INT actual_val = INTVAL (op_b);
2342
2343 switch (code)
2344 {
2345 case LT:
2346 if (actual_val == desired_val + 1)
2347 {
2348 code = LE;
2349 op_b = GEN_INT (desired_val);
2350 }
2351 break;
2352 case LE:
2353 if (actual_val == desired_val - 1)
2354 {
2355 code = LT;
2356 op_b = GEN_INT (desired_val);
2357 }
2358 break;
2359 case GT:
2360 if (actual_val == desired_val - 1)
2361 {
2362 code = GE;
2363 op_b = GEN_INT (desired_val);
2364 }
2365 break;
2366 case GE:
2367 if (actual_val == desired_val + 1)
2368 {
2369 code = GT;
2370 op_b = GEN_INT (desired_val);
2371 }
2372 break;
2373 default:
2374 break;
2375 }
2376 }
2377
2378 /* If we made any changes, generate a new conditional that is
2379 equivalent to what we started with, but has the right
2380 constants in it. */
2381 if (code != GET_CODE (if_info->cond)
2382 || op_a != XEXP (if_info->cond, 0)
2383 || op_b != XEXP (if_info->cond, 1))
2384 {
2385 cond = gen_rtx_fmt_ee (code, GET_MODE (cond), op_a, op_b);
2386 *earliest = if_info->cond_earliest;
2387 return cond;
2388 }
2389 }
2390
2391 cond = canonicalize_condition (if_info->jump, cond, reverse,
2392 earliest, target, have_cbranchcc4, true);
2393 if (! cond || ! reg_mentioned_p (target, cond))
2394 return NULL;
2395
2396 /* We almost certainly searched back to a different place.
2397 Need to re-verify correct lifetimes. */
2398
2399 /* X may not be mentioned in the range (cond_earliest, jump]. */
2400 for (insn = if_info->jump; insn != *earliest; insn = PREV_INSN (insn))
2401 if (INSN_P (insn) && reg_overlap_mentioned_p (if_info->x, PATTERN (insn)))
2402 return NULL;
2403
2404 /* A and B may not be modified in the range [cond_earliest, jump). */
2405 for (insn = *earliest; insn != if_info->jump; insn = NEXT_INSN (insn))
2406 if (INSN_P (insn)
2407 && (modified_in_p (if_info->a, insn)
2408 || modified_in_p (if_info->b, insn)))
2409 return NULL;
2410
2411 return cond;
2412 }
2413
2414 /* Convert "if (a < b) x = a; else x = b;" to "x = min(a, b);", etc. */
2415
2416 static int
2417 noce_try_minmax (struct noce_if_info *if_info)
2418 {
2419 rtx cond, target;
2420 rtx_insn *earliest, *seq;
2421 enum rtx_code code, op;
2422 int unsignedp;
2423
2424 if (!noce_simple_bbs (if_info))
2425 return FALSE;
2426
2427 /* ??? Reject modes with NaNs or signed zeros since we don't know how
2428 they will be resolved with an SMIN/SMAX. It wouldn't be too hard
2429 to get the target to tell us... */
2430 if (HONOR_SIGNED_ZEROS (if_info->x)
2431 || HONOR_NANS (if_info->x))
2432 return FALSE;
2433
2434 cond = noce_get_alt_condition (if_info, if_info->a, &earliest);
2435 if (!cond)
2436 return FALSE;
2437
2438 /* Verify the condition is of the form we expect, and canonicalize
2439 the comparison code. */
2440 code = GET_CODE (cond);
2441 if (rtx_equal_p (XEXP (cond, 0), if_info->a))
2442 {
2443 if (! rtx_equal_p (XEXP (cond, 1), if_info->b))
2444 return FALSE;
2445 }
2446 else if (rtx_equal_p (XEXP (cond, 1), if_info->a))
2447 {
2448 if (! rtx_equal_p (XEXP (cond, 0), if_info->b))
2449 return FALSE;
2450 code = swap_condition (code);
2451 }
2452 else
2453 return FALSE;
2454
2455 /* Determine what sort of operation this is. Note that the code is for
2456 a taken branch, so the code->operation mapping appears backwards. */
2457 switch (code)
2458 {
2459 case LT:
2460 case LE:
2461 case UNLT:
2462 case UNLE:
2463 op = SMAX;
2464 unsignedp = 0;
2465 break;
2466 case GT:
2467 case GE:
2468 case UNGT:
2469 case UNGE:
2470 op = SMIN;
2471 unsignedp = 0;
2472 break;
2473 case LTU:
2474 case LEU:
2475 op = UMAX;
2476 unsignedp = 1;
2477 break;
2478 case GTU:
2479 case GEU:
2480 op = UMIN;
2481 unsignedp = 1;
2482 break;
2483 default:
2484 return FALSE;
2485 }
2486
2487 start_sequence ();
2488
2489 target = expand_simple_binop (GET_MODE (if_info->x), op,
2490 if_info->a, if_info->b,
2491 if_info->x, unsignedp, OPTAB_WIDEN);
2492 if (! target)
2493 {
2494 end_sequence ();
2495 return FALSE;
2496 }
2497 if (target != if_info->x)
2498 noce_emit_move_insn (if_info->x, target);
2499
2500 seq = end_ifcvt_sequence (if_info);
2501 if (!seq)
2502 return FALSE;
2503
2504 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2505 if_info->cond = cond;
2506 if_info->cond_earliest = earliest;
2507
2508 return TRUE;
2509 }
2510
2511 /* Convert "if (a < 0) x = -a; else x = a;" to "x = abs(a);",
2512 "if (a < 0) x = ~a; else x = a;" to "x = one_cmpl_abs(a);",
2513 etc. */
2514
2515 static int
2516 noce_try_abs (struct noce_if_info *if_info)
2517 {
2518 rtx cond, target, a, b, c;
2519 rtx_insn *earliest, *seq;
2520 int negate;
2521 bool one_cmpl = false;
2522
2523 if (!noce_simple_bbs (if_info))
2524 return FALSE;
2525
2526 /* Reject modes with signed zeros. */
2527 if (HONOR_SIGNED_ZEROS (if_info->x))
2528 return FALSE;
2529
2530 /* Recognize A and B as constituting an ABS or NABS. The canonical
2531 form is a branch around the negation, taken when the object is the
2532 first operand of a comparison against 0 that evaluates to true. */
2533 a = if_info->a;
2534 b = if_info->b;
2535 if (GET_CODE (a) == NEG && rtx_equal_p (XEXP (a, 0), b))
2536 negate = 0;
2537 else if (GET_CODE (b) == NEG && rtx_equal_p (XEXP (b, 0), a))
2538 {
2539 std::swap (a, b);
2540 negate = 1;
2541 }
2542 else if (GET_CODE (a) == NOT && rtx_equal_p (XEXP (a, 0), b))
2543 {
2544 negate = 0;
2545 one_cmpl = true;
2546 }
2547 else if (GET_CODE (b) == NOT && rtx_equal_p (XEXP (b, 0), a))
2548 {
2549 std::swap (a, b);
2550 negate = 1;
2551 one_cmpl = true;
2552 }
2553 else
2554 return FALSE;
2555
2556 cond = noce_get_alt_condition (if_info, b, &earliest);
2557 if (!cond)
2558 return FALSE;
2559
2560 /* Verify the condition is of the form we expect. */
2561 if (rtx_equal_p (XEXP (cond, 0), b))
2562 c = XEXP (cond, 1);
2563 else if (rtx_equal_p (XEXP (cond, 1), b))
2564 {
2565 c = XEXP (cond, 0);
2566 negate = !negate;
2567 }
2568 else
2569 return FALSE;
2570
2571 /* Verify that C is zero. Search one step backward for a
2572 REG_EQUAL note or a simple source if necessary. */
2573 if (REG_P (c))
2574 {
2575 rtx set;
2576 rtx_insn *insn = prev_nonnote_insn (earliest);
2577 if (insn
2578 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (earliest)
2579 && (set = single_set (insn))
2580 && rtx_equal_p (SET_DEST (set), c))
2581 {
2582 rtx note = find_reg_equal_equiv_note (insn);
2583 if (note)
2584 c = XEXP (note, 0);
2585 else
2586 c = SET_SRC (set);
2587 }
2588 else
2589 return FALSE;
2590 }
2591 if (MEM_P (c)
2592 && GET_CODE (XEXP (c, 0)) == SYMBOL_REF
2593 && CONSTANT_POOL_ADDRESS_P (XEXP (c, 0)))
2594 c = get_pool_constant (XEXP (c, 0));
2595
2596 /* Work around funny ideas get_condition has wrt canonicalization.
2597 Note that these rtx constants are known to be CONST_INT, and
2598 therefore imply integer comparisons. */
2599 if (c == constm1_rtx && GET_CODE (cond) == GT)
2600 ;
2601 else if (c == const1_rtx && GET_CODE (cond) == LT)
2602 ;
2603 else if (c != CONST0_RTX (GET_MODE (b)))
2604 return FALSE;
2605
2606 /* Determine what sort of operation this is. */
2607 switch (GET_CODE (cond))
2608 {
2609 case LT:
2610 case LE:
2611 case UNLT:
2612 case UNLE:
2613 negate = !negate;
2614 break;
2615 case GT:
2616 case GE:
2617 case UNGT:
2618 case UNGE:
2619 break;
2620 default:
2621 return FALSE;
2622 }
2623
2624 start_sequence ();
2625 if (one_cmpl)
2626 target = expand_one_cmpl_abs_nojump (GET_MODE (if_info->x), b,
2627 if_info->x);
2628 else
2629 target = expand_abs_nojump (GET_MODE (if_info->x), b, if_info->x, 1);
2630
2631 /* ??? It's a quandary whether cmove would be better here, especially
2632 for integers. Perhaps combine will clean things up. */
2633 if (target && negate)
2634 {
2635 if (one_cmpl)
2636 target = expand_simple_unop (GET_MODE (target), NOT, target,
2637 if_info->x, 0);
2638 else
2639 target = expand_simple_unop (GET_MODE (target), NEG, target,
2640 if_info->x, 0);
2641 }
2642
2643 if (! target)
2644 {
2645 end_sequence ();
2646 return FALSE;
2647 }
2648
2649 if (target != if_info->x)
2650 noce_emit_move_insn (if_info->x, target);
2651
2652 seq = end_ifcvt_sequence (if_info);
2653 if (!seq)
2654 return FALSE;
2655
2656 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2657 if_info->cond = cond;
2658 if_info->cond_earliest = earliest;
2659
2660 return TRUE;
2661 }
2662
2663 /* Convert "if (m < 0) x = b; else x = 0;" to "x = (m >> C) & b;". */
2664
2665 static int
2666 noce_try_sign_mask (struct noce_if_info *if_info)
2667 {
2668 rtx cond, t, m, c;
2669 rtx_insn *seq;
2670 machine_mode mode;
2671 enum rtx_code code;
2672 bool t_unconditional;
2673
2674 if (!noce_simple_bbs (if_info))
2675 return FALSE;
2676
2677 cond = if_info->cond;
2678 code = GET_CODE (cond);
2679 m = XEXP (cond, 0);
2680 c = XEXP (cond, 1);
2681
2682 t = NULL_RTX;
2683 if (if_info->a == const0_rtx)
2684 {
2685 if ((code == LT && c == const0_rtx)
2686 || (code == LE && c == constm1_rtx))
2687 t = if_info->b;
2688 }
2689 else if (if_info->b == const0_rtx)
2690 {
2691 if ((code == GE && c == const0_rtx)
2692 || (code == GT && c == constm1_rtx))
2693 t = if_info->a;
2694 }
2695
2696 if (! t || side_effects_p (t))
2697 return FALSE;
2698
2699 /* We currently don't handle different modes. */
2700 mode = GET_MODE (t);
2701 if (GET_MODE (m) != mode)
2702 return FALSE;
2703
2704 /* This is only profitable if T is unconditionally executed/evaluated in the
2705 original insn sequence or T is cheap. The former happens if B is the
2706 non-zero (T) value and if INSN_B was taken from TEST_BB, or there was no
2707 INSN_B which can happen for e.g. conditional stores to memory. For the
2708 cost computation use the block TEST_BB where the evaluation will end up
2709 after the transformation. */
2710 t_unconditional =
2711 (t == if_info->b
2712 && (if_info->insn_b == NULL_RTX
2713 || BLOCK_FOR_INSN (if_info->insn_b) == if_info->test_bb));
2714 if (!(t_unconditional
2715 || (set_src_cost (t, mode, optimize_bb_for_speed_p (if_info->test_bb))
2716 < COSTS_N_INSNS (2))))
2717 return FALSE;
2718
2719 start_sequence ();
2720 /* Use emit_store_flag to generate "m < 0 ? -1 : 0" instead of expanding
2721 "(signed) m >> 31" directly. This benefits targets with specialized
2722 insns to obtain the signmask, but still uses ashr_optab otherwise. */
2723 m = emit_store_flag (gen_reg_rtx (mode), LT, m, const0_rtx, mode, 0, -1);
2724 t = m ? expand_binop (mode, and_optab, m, t, NULL_RTX, 0, OPTAB_DIRECT)
2725 : NULL_RTX;
2726
2727 if (!t)
2728 {
2729 end_sequence ();
2730 return FALSE;
2731 }
2732
2733 noce_emit_move_insn (if_info->x, t);
2734
2735 seq = end_ifcvt_sequence (if_info);
2736 if (!seq)
2737 return FALSE;
2738
2739 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2740 return TRUE;
2741 }
2742
2743
2744 /* Optimize away "if (x & C) x |= C" and similar bit manipulation
2745 transformations. */
2746
2747 static int
2748 noce_try_bitop (struct noce_if_info *if_info)
2749 {
2750 rtx cond, x, a, result;
2751 rtx_insn *seq;
2752 machine_mode mode;
2753 enum rtx_code code;
2754 int bitnum;
2755
2756 x = if_info->x;
2757 cond = if_info->cond;
2758 code = GET_CODE (cond);
2759
2760 if (!noce_simple_bbs (if_info))
2761 return FALSE;
2762
2763 /* Check for no else condition. */
2764 if (! rtx_equal_p (x, if_info->b))
2765 return FALSE;
2766
2767 /* Check for a suitable condition. */
2768 if (code != NE && code != EQ)
2769 return FALSE;
2770 if (XEXP (cond, 1) != const0_rtx)
2771 return FALSE;
2772 cond = XEXP (cond, 0);
2773
2774 /* ??? We could also handle AND here. */
2775 if (GET_CODE (cond) == ZERO_EXTRACT)
2776 {
2777 if (XEXP (cond, 1) != const1_rtx
2778 || !CONST_INT_P (XEXP (cond, 2))
2779 || ! rtx_equal_p (x, XEXP (cond, 0)))
2780 return FALSE;
2781 bitnum = INTVAL (XEXP (cond, 2));
2782 mode = GET_MODE (x);
2783 if (BITS_BIG_ENDIAN)
2784 bitnum = GET_MODE_BITSIZE (mode) - 1 - bitnum;
2785 if (bitnum < 0 || bitnum >= HOST_BITS_PER_WIDE_INT)
2786 return FALSE;
2787 }
2788 else
2789 return FALSE;
2790
2791 a = if_info->a;
2792 if (GET_CODE (a) == IOR || GET_CODE (a) == XOR)
2793 {
2794 /* Check for "if (X & C) x = x op C". */
2795 if (! rtx_equal_p (x, XEXP (a, 0))
2796 || !CONST_INT_P (XEXP (a, 1))
2797 || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode))
2798 != (unsigned HOST_WIDE_INT) 1 << bitnum)
2799 return FALSE;
2800
2801 /* if ((x & C) == 0) x |= C; is transformed to x |= C. */
2802 /* if ((x & C) != 0) x |= C; is transformed to nothing. */
2803 if (GET_CODE (a) == IOR)
2804 result = (code == NE) ? a : NULL_RTX;
2805 else if (code == NE)
2806 {
2807 /* if ((x & C) == 0) x ^= C; is transformed to x |= C. */
2808 result = gen_int_mode ((HOST_WIDE_INT) 1 << bitnum, mode);
2809 result = simplify_gen_binary (IOR, mode, x, result);
2810 }
2811 else
2812 {
2813 /* if ((x & C) != 0) x ^= C; is transformed to x &= ~C. */
2814 result = gen_int_mode (~((HOST_WIDE_INT) 1 << bitnum), mode);
2815 result = simplify_gen_binary (AND, mode, x, result);
2816 }
2817 }
2818 else if (GET_CODE (a) == AND)
2819 {
2820 /* Check for "if (X & C) x &= ~C". */
2821 if (! rtx_equal_p (x, XEXP (a, 0))
2822 || !CONST_INT_P (XEXP (a, 1))
2823 || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode))
2824 != (~((HOST_WIDE_INT) 1 << bitnum) & GET_MODE_MASK (mode)))
2825 return FALSE;
2826
2827 /* if ((x & C) == 0) x &= ~C; is transformed to nothing. */
2828 /* if ((x & C) != 0) x &= ~C; is transformed to x &= ~C. */
2829 result = (code == EQ) ? a : NULL_RTX;
2830 }
2831 else
2832 return FALSE;
2833
2834 if (result)
2835 {
2836 start_sequence ();
2837 noce_emit_move_insn (x, result);
2838 seq = end_ifcvt_sequence (if_info);
2839 if (!seq)
2840 return FALSE;
2841
2842 emit_insn_before_setloc (seq, if_info->jump,
2843 INSN_LOCATION (if_info->insn_a));
2844 }
2845 return TRUE;
2846 }
2847
2848
2849 /* Similar to get_condition, only the resulting condition must be
2850 valid at JUMP, instead of at EARLIEST.
2851
2852 If THEN_ELSE_REVERSED is true, the fallthrough does not go to the
2853 THEN block of the caller, and we have to reverse the condition. */
2854
2855 static rtx
2856 noce_get_condition (rtx_insn *jump, rtx_insn **earliest, bool then_else_reversed)
2857 {
2858 rtx cond, set, tmp;
2859 bool reverse;
2860
2861 if (! any_condjump_p (jump))
2862 return NULL_RTX;
2863
2864 set = pc_set (jump);
2865
2866 /* If this branches to JUMP_LABEL when the condition is false,
2867 reverse the condition. */
2868 reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
2869 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump));
2870
2871 /* We may have to reverse because the caller's if block is not canonical,
2872 i.e. the THEN block isn't the fallthrough block for the TEST block
2873 (see find_if_header). */
2874 if (then_else_reversed)
2875 reverse = !reverse;
2876
2877 /* If the condition variable is a register and is MODE_INT, accept it. */
2878
2879 cond = XEXP (SET_SRC (set), 0);
2880 tmp = XEXP (cond, 0);
2881 if (REG_P (tmp) && GET_MODE_CLASS (GET_MODE (tmp)) == MODE_INT
2882 && (GET_MODE (tmp) != BImode
2883 || !targetm.small_register_classes_for_mode_p (BImode)))
2884 {
2885 *earliest = jump;
2886
2887 if (reverse)
2888 cond = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)),
2889 GET_MODE (cond), tmp, XEXP (cond, 1));
2890 return cond;
2891 }
2892
2893 /* Otherwise, fall back on canonicalize_condition to do the dirty
2894 work of manipulating MODE_CC values and COMPARE rtx codes. */
2895 tmp = canonicalize_condition (jump, cond, reverse, earliest,
2896 NULL_RTX, have_cbranchcc4, true);
2897
2898 /* We don't handle side-effects in the condition, like handling
2899 REG_INC notes and making sure no duplicate conditions are emitted. */
2900 if (tmp != NULL_RTX && side_effects_p (tmp))
2901 return NULL_RTX;
2902
2903 return tmp;
2904 }
2905
2906 /* Return true if OP is ok for if-then-else processing. */
2907
2908 static int
2909 noce_operand_ok (const_rtx op)
2910 {
2911 if (side_effects_p (op))
2912 return FALSE;
2913
2914 /* We special-case memories, so handle any of them with
2915 no address side effects. */
2916 if (MEM_P (op))
2917 return ! side_effects_p (XEXP (op, 0));
2918
2919 return ! may_trap_p (op);
2920 }
2921
2922 /* Return true if a write into MEM may trap or fault. */
2923
2924 static bool
2925 noce_mem_write_may_trap_or_fault_p (const_rtx mem)
2926 {
2927 rtx addr;
2928
2929 if (MEM_READONLY_P (mem))
2930 return true;
2931
2932 if (may_trap_or_fault_p (mem))
2933 return true;
2934
2935 addr = XEXP (mem, 0);
2936
2937 /* Call target hook to avoid the effects of -fpic etc.... */
2938 addr = targetm.delegitimize_address (addr);
2939
2940 while (addr)
2941 switch (GET_CODE (addr))
2942 {
2943 case CONST:
2944 case PRE_DEC:
2945 case PRE_INC:
2946 case POST_DEC:
2947 case POST_INC:
2948 case POST_MODIFY:
2949 addr = XEXP (addr, 0);
2950 break;
2951 case LO_SUM:
2952 case PRE_MODIFY:
2953 addr = XEXP (addr, 1);
2954 break;
2955 case PLUS:
2956 if (CONST_INT_P (XEXP (addr, 1)))
2957 addr = XEXP (addr, 0);
2958 else
2959 return false;
2960 break;
2961 case LABEL_REF:
2962 return true;
2963 case SYMBOL_REF:
2964 if (SYMBOL_REF_DECL (addr)
2965 && decl_readonly_section (SYMBOL_REF_DECL (addr), 0))
2966 return true;
2967 return false;
2968 default:
2969 return false;
2970 }
2971
2972 return false;
2973 }
2974
2975 /* Return whether we can use store speculation for MEM. TOP_BB is the
2976 basic block above the conditional block where we are considering
2977 doing the speculative store. We look for whether MEM is set
2978 unconditionally later in the function. */
2979
2980 static bool
2981 noce_can_store_speculate_p (basic_block top_bb, const_rtx mem)
2982 {
2983 basic_block dominator;
2984
2985 for (dominator = get_immediate_dominator (CDI_POST_DOMINATORS, top_bb);
2986 dominator != NULL;
2987 dominator = get_immediate_dominator (CDI_POST_DOMINATORS, dominator))
2988 {
2989 rtx_insn *insn;
2990
2991 FOR_BB_INSNS (dominator, insn)
2992 {
2993 /* If we see something that might be a memory barrier, we
2994 have to stop looking. Even if the MEM is set later in
2995 the function, we still don't want to set it
2996 unconditionally before the barrier. */
2997 if (INSN_P (insn)
2998 && (volatile_insn_p (PATTERN (insn))
2999 || (CALL_P (insn) && (!RTL_CONST_CALL_P (insn)))))
3000 return false;
3001
3002 if (memory_must_be_modified_in_insn_p (mem, insn))
3003 return true;
3004 if (modified_in_p (XEXP (mem, 0), insn))
3005 return false;
3006
3007 }
3008 }
3009
3010 return false;
3011 }
3012
3013 /* Return true if X contains a MEM subrtx. */
3014
3015 static bool
3016 contains_mem_rtx_p (rtx x)
3017 {
3018 subrtx_iterator::array_type array;
3019 FOR_EACH_SUBRTX (iter, array, x, ALL)
3020 if (MEM_P (*iter))
3021 return true;
3022
3023 return false;
3024 }
3025
3026 /* Return true iff basic block TEST_BB is valid for noce if-conversion.
3027 The condition used in this if-conversion is in COND.
3028 In practice, check that TEST_BB ends with a single set
3029 x := a and all previous computations
3030 in TEST_BB don't produce any values that are live after TEST_BB.
3031 In other words, all the insns in TEST_BB are there only
3032 to compute a value for x. Put the rtx cost of the insns
3033 in TEST_BB into COST. Record whether TEST_BB is a single simple
3034 set instruction in SIMPLE_P. */
3035
3036 static bool
3037 bb_valid_for_noce_process_p (basic_block test_bb, rtx cond,
3038 unsigned int *cost, bool *simple_p)
3039 {
3040 if (!test_bb)
3041 return false;
3042
3043 rtx_insn *last_insn = last_active_insn (test_bb, FALSE);
3044 rtx last_set = NULL_RTX;
3045
3046 rtx cc = cc_in_cond (cond);
3047
3048 if (!insn_valid_noce_process_p (last_insn, cc))
3049 return false;
3050 last_set = single_set (last_insn);
3051
3052 rtx x = SET_DEST (last_set);
3053 rtx_insn *first_insn = first_active_insn (test_bb);
3054 rtx first_set = single_set (first_insn);
3055
3056 if (!first_set)
3057 return false;
3058
3059 /* We have a single simple set, that's okay. */
3060 bool speed_p = optimize_bb_for_speed_p (test_bb);
3061
3062 if (first_insn == last_insn)
3063 {
3064 *simple_p = noce_operand_ok (SET_DEST (first_set));
3065 *cost = insn_rtx_cost (first_set, speed_p);
3066 return *simple_p;
3067 }
3068
3069 rtx_insn *prev_last_insn = PREV_INSN (last_insn);
3070 gcc_assert (prev_last_insn);
3071
3072 /* For now, disallow setting x multiple times in test_bb. */
3073 if (REG_P (x) && reg_set_between_p (x, first_insn, prev_last_insn))
3074 return false;
3075
3076 bitmap test_bb_temps = BITMAP_ALLOC (&reg_obstack);
3077
3078 /* The regs that are live out of test_bb. */
3079 bitmap test_bb_live_out = df_get_live_out (test_bb);
3080
3081 int potential_cost = insn_rtx_cost (last_set, speed_p);
3082 rtx_insn *insn;
3083 FOR_BB_INSNS (test_bb, insn)
3084 {
3085 if (insn != last_insn)
3086 {
3087 if (!active_insn_p (insn))
3088 continue;
3089
3090 if (!insn_valid_noce_process_p (insn, cc))
3091 goto free_bitmap_and_fail;
3092
3093 rtx sset = single_set (insn);
3094 gcc_assert (sset);
3095
3096 if (contains_mem_rtx_p (SET_SRC (sset))
3097 || !REG_P (SET_DEST (sset))
3098 || reg_overlap_mentioned_p (SET_DEST (sset), cond))
3099 goto free_bitmap_and_fail;
3100
3101 potential_cost += insn_rtx_cost (sset, speed_p);
3102 bitmap_set_bit (test_bb_temps, REGNO (SET_DEST (sset)));
3103 }
3104 }
3105
3106 /* If any of the intermediate results in test_bb are live after test_bb
3107 then fail. */
3108 if (bitmap_intersect_p (test_bb_live_out, test_bb_temps))
3109 goto free_bitmap_and_fail;
3110
3111 BITMAP_FREE (test_bb_temps);
3112 *cost = potential_cost;
3113 *simple_p = false;
3114 return true;
3115
3116 free_bitmap_and_fail:
3117 BITMAP_FREE (test_bb_temps);
3118 return false;
3119 }
3120
3121 /* We have something like:
3122
3123 if (x > y)
3124 { i = a; j = b; k = c; }
3125
3126 Make it:
3127
3128 tmp_i = (x > y) ? a : i;
3129 tmp_j = (x > y) ? b : j;
3130 tmp_k = (x > y) ? c : k;
3131 i = tmp_i;
3132 j = tmp_j;
3133 k = tmp_k;
3134
3135 Subsequent passes are expected to clean up the extra moves.
3136
3137 Look for special cases such as writes to one register which are
3138 read back in another SET, as might occur in a swap idiom or
3139 similar.
3140
3141 These look like:
3142
3143 if (x > y)
3144 i = a;
3145 j = i;
3146
3147 Which we want to rewrite to:
3148
3149 tmp_i = (x > y) ? a : i;
3150 tmp_j = (x > y) ? tmp_i : j;
3151 i = tmp_i;
3152 j = tmp_j;
3153
3154 We can catch these when looking at (SET x y) by keeping a list of the
3155 registers we would have targeted before if-conversion and looking back
3156 through it for an overlap with Y. If we find one, we rewire the
3157 conditional set to use the temporary we introduced earlier.
3158
3159 IF_INFO contains the useful information about the block structure and
3160 jump instructions. */
3161
3162 static int
3163 noce_convert_multiple_sets (struct noce_if_info *if_info)
3164 {
3165 basic_block test_bb = if_info->test_bb;
3166 basic_block then_bb = if_info->then_bb;
3167 basic_block join_bb = if_info->join_bb;
3168 rtx_insn *jump = if_info->jump;
3169 rtx_insn *cond_earliest;
3170 rtx_insn *insn;
3171
3172 start_sequence ();
3173
3174 /* Decompose the condition attached to the jump. */
3175 rtx cond = noce_get_condition (jump, &cond_earliest, false);
3176 rtx x = XEXP (cond, 0);
3177 rtx y = XEXP (cond, 1);
3178 rtx_code cond_code = GET_CODE (cond);
3179
3180 /* The true targets for a conditional move. */
3181 auto_vec<rtx> targets;
3182 /* The temporaries introduced to allow us to not consider register
3183 overlap. */
3184 auto_vec<rtx> temporaries;
3185 /* The insns we've emitted. */
3186 auto_vec<rtx_insn *> unmodified_insns;
3187 int count = 0;
3188
3189 FOR_BB_INSNS (then_bb, insn)
3190 {
3191 /* Skip over non-insns. */
3192 if (!active_insn_p (insn))
3193 continue;
3194
3195 rtx set = single_set (insn);
3196 gcc_checking_assert (set);
3197
3198 rtx target = SET_DEST (set);
3199 rtx temp = gen_reg_rtx (GET_MODE (target));
3200 rtx new_val = SET_SRC (set);
3201 rtx old_val = target;
3202
3203 /* If we were supposed to read from an earlier write in this block,
3204 we've changed the register allocation. Rewire the read. While
3205 we are looking, also try to catch a swap idiom. */
3206 for (int i = count - 1; i >= 0; --i)
3207 if (reg_overlap_mentioned_p (new_val, targets[i]))
3208 {
3209 /* Catch a "swap" style idiom. */
3210 if (find_reg_note (insn, REG_DEAD, new_val) != NULL_RTX)
3211 /* The write to targets[i] is only live until the read
3212 here. As the condition codes match, we can propagate
3213 the set to here. */
3214 new_val = SET_SRC (single_set (unmodified_insns[i]));
3215 else
3216 new_val = temporaries[i];
3217 break;
3218 }
3219
3220 /* If we had a non-canonical conditional jump (i.e. one where
3221 the fallthrough is to the "else" case) we need to reverse
3222 the conditional select. */
3223 if (if_info->then_else_reversed)
3224 std::swap (old_val, new_val);
3225
3226 /* Actually emit the conditional move. */
3227 rtx temp_dest = noce_emit_cmove (if_info, temp, cond_code,
3228 x, y, new_val, old_val);
3229
3230 /* If we failed to expand the conditional move, drop out and don't
3231 try to continue. */
3232 if (temp_dest == NULL_RTX)
3233 {
3234 end_sequence ();
3235 return FALSE;
3236 }
3237
3238 /* Bookkeeping. */
3239 count++;
3240 targets.safe_push (target);
3241 temporaries.safe_push (temp_dest);
3242 unmodified_insns.safe_push (insn);
3243 }
3244
3245 /* We must have seen some sort of insn to insert, otherwise we were
3246 given an empty BB to convert, and we can't handle that. */
3247 gcc_assert (!unmodified_insns.is_empty ());
3248
3249 /* Now fixup the assignments. */
3250 for (int i = 0; i < count; i++)
3251 noce_emit_move_insn (targets[i], temporaries[i]);
3252
3253 /* Actually emit the sequence. */
3254 rtx_insn *seq = get_insns ();
3255
3256 for (insn = seq; insn; insn = NEXT_INSN (insn))
3257 set_used_flags (insn);
3258
3259 /* Mark all our temporaries and targets as used. */
3260 for (int i = 0; i < count; i++)
3261 {
3262 set_used_flags (temporaries[i]);
3263 set_used_flags (targets[i]);
3264 }
3265
3266 set_used_flags (cond);
3267 set_used_flags (x);
3268 set_used_flags (y);
3269
3270 unshare_all_rtl_in_chain (seq);
3271 end_sequence ();
3272
3273 if (!seq)
3274 return FALSE;
3275
3276 for (insn = seq; insn; insn = NEXT_INSN (insn))
3277 if (JUMP_P (insn)
3278 || recog_memoized (insn) == -1)
3279 return FALSE;
3280
3281 emit_insn_before_setloc (seq, if_info->jump,
3282 INSN_LOCATION (unmodified_insns.last ()));
3283
3284 /* Clean up THEN_BB and the edges in and out of it. */
3285 remove_edge (find_edge (test_bb, join_bb));
3286 remove_edge (find_edge (then_bb, join_bb));
3287 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3288 delete_basic_block (then_bb);
3289 num_true_changes++;
3290
3291 /* Maybe merge blocks now the jump is simple enough. */
3292 if (can_merge_blocks_p (test_bb, join_bb))
3293 {
3294 merge_blocks (test_bb, join_bb);
3295 num_true_changes++;
3296 }
3297
3298 num_updated_if_blocks++;
3299 return TRUE;
3300 }
3301
3302 /* Return true iff basic block TEST_BB is comprised of only
3303 (SET (REG) (REG)) insns suitable for conversion to a series
3304 of conditional moves. FORNOW: Use II to find the expected cost of
3305 the branch into/over TEST_BB.
3306
3307 TODO: This creates an implicit "magic number" for branch_cost.
3308 II->branch_cost now guides the maximum number of set instructions in
3309 a basic block which is considered profitable to completely
3310 if-convert. */
3311
3312 static bool
3313 bb_ok_for_noce_convert_multiple_sets (basic_block test_bb,
3314 struct noce_if_info *ii)
3315 {
3316 rtx_insn *insn;
3317 unsigned count = 0;
3318
3319 FOR_BB_INSNS (test_bb, insn)
3320 {
3321 /* Skip over notes etc. */
3322 if (!active_insn_p (insn))
3323 continue;
3324
3325 /* We only handle SET insns. */
3326 rtx set = single_set (insn);
3327 if (set == NULL_RTX)
3328 return false;
3329
3330 rtx dest = SET_DEST (set);
3331 rtx src = SET_SRC (set);
3332
3333 /* We can possibly relax this, but for now only handle REG to REG
3334 moves. This avoids any issues that might come from introducing
3335 loads/stores that might violate data-race-freedom guarantees. */
3336 if (!(REG_P (src) && REG_P (dest)))
3337 return false;
3338
3339 /* Destination must be appropriate for a conditional write. */
3340 if (!noce_operand_ok (dest))
3341 return false;
3342
3343 /* We must be able to conditionally move in this mode. */
3344 if (!can_conditionally_move_p (GET_MODE (dest)))
3345 return false;
3346
3347 ++count;
3348 }
3349
3350 /* FORNOW: Our cost model is a count of the number of instructions we
3351 would if-convert. This is suboptimal, and should be improved as part
3352 of a wider rework of branch_cost. */
3353 if (count > ii->branch_cost)
3354 return FALSE;
3355
3356 return count > 0;
3357 }
3358
3359 /* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
3360 it without using conditional execution. Return TRUE if we were successful
3361 at converting the block. */
3362
3363 static int
3364 noce_process_if_block (struct noce_if_info *if_info)
3365 {
3366 basic_block test_bb = if_info->test_bb; /* test block */
3367 basic_block then_bb = if_info->then_bb; /* THEN */
3368 basic_block else_bb = if_info->else_bb; /* ELSE or NULL */
3369 basic_block join_bb = if_info->join_bb; /* JOIN */
3370 rtx_insn *jump = if_info->jump;
3371 rtx cond = if_info->cond;
3372 rtx_insn *insn_a, *insn_b;
3373 rtx set_a, set_b;
3374 rtx orig_x, x, a, b;
3375
3376 /* We're looking for patterns of the form
3377
3378 (1) if (...) x = a; else x = b;
3379 (2) x = b; if (...) x = a;
3380 (3) if (...) x = a; // as if with an initial x = x.
3381 (4) if (...) { x = a; y = b; z = c; } // Like 3, for multiple SETS.
3382 The later patterns require jumps to be more expensive.
3383 For the if (...) x = a; else x = b; case we allow multiple insns
3384 inside the then and else blocks as long as their only effect is
3385 to calculate a value for x.
3386 ??? For future expansion, further expand the "multiple X" rules. */
3387
3388 /* First look for multiple SETS. */
3389 if (!else_bb
3390 && HAVE_conditional_move
3391 && !HAVE_cc0
3392 && bb_ok_for_noce_convert_multiple_sets (then_bb, if_info))
3393 {
3394 if (noce_convert_multiple_sets (if_info))
3395 return TRUE;
3396 }
3397
3398 if (! bb_valid_for_noce_process_p (then_bb, cond, &if_info->then_cost,
3399 &if_info->then_simple))
3400 return false;
3401
3402 if (else_bb
3403 && ! bb_valid_for_noce_process_p (else_bb, cond, &if_info->else_cost,
3404 &if_info->else_simple))
3405 return false;
3406
3407 insn_a = last_active_insn (then_bb, FALSE);
3408 set_a = single_set (insn_a);
3409 gcc_assert (set_a);
3410
3411 x = SET_DEST (set_a);
3412 a = SET_SRC (set_a);
3413
3414 /* Look for the other potential set. Make sure we've got equivalent
3415 destinations. */
3416 /* ??? This is overconservative. Storing to two different mems is
3417 as easy as conditionally computing the address. Storing to a
3418 single mem merely requires a scratch memory to use as one of the
3419 destination addresses; often the memory immediately below the
3420 stack pointer is available for this. */
3421 set_b = NULL_RTX;
3422 if (else_bb)
3423 {
3424 insn_b = last_active_insn (else_bb, FALSE);
3425 set_b = single_set (insn_b);
3426 gcc_assert (set_b);
3427
3428 if (!rtx_interchangeable_p (x, SET_DEST (set_b)))
3429 return FALSE;
3430 }
3431 else
3432 {
3433 insn_b = prev_nonnote_nondebug_insn (if_info->cond_earliest);
3434 /* We're going to be moving the evaluation of B down from above
3435 COND_EARLIEST to JUMP. Make sure the relevant data is still
3436 intact. */
3437 if (! insn_b
3438 || BLOCK_FOR_INSN (insn_b) != BLOCK_FOR_INSN (if_info->cond_earliest)
3439 || !NONJUMP_INSN_P (insn_b)
3440 || (set_b = single_set (insn_b)) == NULL_RTX
3441 || ! rtx_interchangeable_p (x, SET_DEST (set_b))
3442 || ! noce_operand_ok (SET_SRC (set_b))
3443 || reg_overlap_mentioned_p (x, SET_SRC (set_b))
3444 || modified_between_p (SET_SRC (set_b), insn_b, jump)
3445 /* Avoid extending the lifetime of hard registers on small
3446 register class machines. */
3447 || (REG_P (SET_SRC (set_b))
3448 && HARD_REGISTER_P (SET_SRC (set_b))
3449 && targetm.small_register_classes_for_mode_p
3450 (GET_MODE (SET_SRC (set_b))))
3451 /* Likewise with X. In particular this can happen when
3452 noce_get_condition looks farther back in the instruction
3453 stream than one might expect. */
3454 || reg_overlap_mentioned_p (x, cond)
3455 || reg_overlap_mentioned_p (x, a)
3456 || modified_between_p (x, insn_b, jump))
3457 {
3458 insn_b = NULL;
3459 set_b = NULL_RTX;
3460 }
3461 }
3462
3463 /* If x has side effects then only the if-then-else form is safe to
3464 convert. But even in that case we would need to restore any notes
3465 (such as REG_INC) at then end. That can be tricky if
3466 noce_emit_move_insn expands to more than one insn, so disable the
3467 optimization entirely for now if there are side effects. */
3468 if (side_effects_p (x))
3469 return FALSE;
3470
3471 b = (set_b ? SET_SRC (set_b) : x);
3472
3473 /* Only operate on register destinations, and even then avoid extending
3474 the lifetime of hard registers on small register class machines. */
3475 orig_x = x;
3476 if (!REG_P (x)
3477 || (HARD_REGISTER_P (x)
3478 && targetm.small_register_classes_for_mode_p (GET_MODE (x))))
3479 {
3480 if (GET_MODE (x) == BLKmode)
3481 return FALSE;
3482
3483 if (GET_CODE (x) == ZERO_EXTRACT
3484 && (!CONST_INT_P (XEXP (x, 1))
3485 || !CONST_INT_P (XEXP (x, 2))))
3486 return FALSE;
3487
3488 x = gen_reg_rtx (GET_MODE (GET_CODE (x) == STRICT_LOW_PART
3489 ? XEXP (x, 0) : x));
3490 }
3491
3492 /* Don't operate on sources that may trap or are volatile. */
3493 if (! noce_operand_ok (a) || ! noce_operand_ok (b))
3494 return FALSE;
3495
3496 retry:
3497 /* Set up the info block for our subroutines. */
3498 if_info->insn_a = insn_a;
3499 if_info->insn_b = insn_b;
3500 if_info->x = x;
3501 if_info->a = a;
3502 if_info->b = b;
3503
3504 /* Try optimizations in some approximation of a useful order. */
3505 /* ??? Should first look to see if X is live incoming at all. If it
3506 isn't, we don't need anything but an unconditional set. */
3507
3508 /* Look and see if A and B are really the same. Avoid creating silly
3509 cmove constructs that no one will fix up later. */
3510 if (noce_simple_bbs (if_info)
3511 && rtx_interchangeable_p (a, b))
3512 {
3513 /* If we have an INSN_B, we don't have to create any new rtl. Just
3514 move the instruction that we already have. If we don't have an
3515 INSN_B, that means that A == X, and we've got a noop move. In
3516 that case don't do anything and let the code below delete INSN_A. */
3517 if (insn_b && else_bb)
3518 {
3519 rtx note;
3520
3521 if (else_bb && insn_b == BB_END (else_bb))
3522 BB_END (else_bb) = PREV_INSN (insn_b);
3523 reorder_insns (insn_b, insn_b, PREV_INSN (jump));
3524
3525 /* If there was a REG_EQUAL note, delete it since it may have been
3526 true due to this insn being after a jump. */
3527 if ((note = find_reg_note (insn_b, REG_EQUAL, NULL_RTX)) != 0)
3528 remove_note (insn_b, note);
3529
3530 insn_b = NULL;
3531 }
3532 /* If we have "x = b; if (...) x = a;", and x has side-effects, then
3533 x must be executed twice. */
3534 else if (insn_b && side_effects_p (orig_x))
3535 return FALSE;
3536
3537 x = orig_x;
3538 goto success;
3539 }
3540
3541 if (!set_b && MEM_P (orig_x))
3542 {
3543 /* Disallow the "if (...) x = a;" form (implicit "else x = x;")
3544 for optimizations if writing to x may trap or fault,
3545 i.e. it's a memory other than a static var or a stack slot,
3546 is misaligned on strict aligned machines or is read-only. If
3547 x is a read-only memory, then the program is valid only if we
3548 avoid the store into it. If there are stores on both the
3549 THEN and ELSE arms, then we can go ahead with the conversion;
3550 either the program is broken, or the condition is always
3551 false such that the other memory is selected. */
3552 if (noce_mem_write_may_trap_or_fault_p (orig_x))
3553 return FALSE;
3554
3555 /* Avoid store speculation: given "if (...) x = a" where x is a
3556 MEM, we only want to do the store if x is always set
3557 somewhere in the function. This avoids cases like
3558 if (pthread_mutex_trylock(mutex))
3559 ++global_variable;
3560 where we only want global_variable to be changed if the mutex
3561 is held. FIXME: This should ideally be expressed directly in
3562 RTL somehow. */
3563 if (!noce_can_store_speculate_p (test_bb, orig_x))
3564 return FALSE;
3565 }
3566
3567 if (noce_try_move (if_info))
3568 goto success;
3569 if (noce_try_store_flag (if_info))
3570 goto success;
3571 if (noce_try_bitop (if_info))
3572 goto success;
3573 if (noce_try_minmax (if_info))
3574 goto success;
3575 if (noce_try_abs (if_info))
3576 goto success;
3577 if (noce_try_inverse_constants (if_info))
3578 goto success;
3579 if (!targetm.have_conditional_execution ()
3580 && noce_try_store_flag_constants (if_info))
3581 goto success;
3582 if (HAVE_conditional_move
3583 && noce_try_cmove (if_info))
3584 goto success;
3585 if (! targetm.have_conditional_execution ())
3586 {
3587 if (noce_try_addcc (if_info))
3588 goto success;
3589 if (noce_try_store_flag_mask (if_info))
3590 goto success;
3591 if (HAVE_conditional_move
3592 && noce_try_cmove_arith (if_info))
3593 goto success;
3594 if (noce_try_sign_mask (if_info))
3595 goto success;
3596 }
3597
3598 if (!else_bb && set_b)
3599 {
3600 insn_b = NULL;
3601 set_b = NULL_RTX;
3602 b = orig_x;
3603 goto retry;
3604 }
3605
3606 return FALSE;
3607
3608 success:
3609
3610 /* If we used a temporary, fix it up now. */
3611 if (orig_x != x)
3612 {
3613 rtx_insn *seq;
3614
3615 start_sequence ();
3616 noce_emit_move_insn (orig_x, x);
3617 seq = get_insns ();
3618 set_used_flags (orig_x);
3619 unshare_all_rtl_in_chain (seq);
3620 end_sequence ();
3621
3622 emit_insn_before_setloc (seq, BB_END (test_bb), INSN_LOCATION (insn_a));
3623 }
3624
3625 /* The original THEN and ELSE blocks may now be removed. The test block
3626 must now jump to the join block. If the test block and the join block
3627 can be merged, do so. */
3628 if (else_bb)
3629 {
3630 delete_basic_block (else_bb);
3631 num_true_changes++;
3632 }
3633 else
3634 remove_edge (find_edge (test_bb, join_bb));
3635
3636 remove_edge (find_edge (then_bb, join_bb));
3637 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3638 delete_basic_block (then_bb);
3639 num_true_changes++;
3640
3641 if (can_merge_blocks_p (test_bb, join_bb))
3642 {
3643 merge_blocks (test_bb, join_bb);
3644 num_true_changes++;
3645 }
3646
3647 num_updated_if_blocks++;
3648 return TRUE;
3649 }
3650
3651 /* Check whether a block is suitable for conditional move conversion.
3652 Every insn must be a simple set of a register to a constant or a
3653 register. For each assignment, store the value in the pointer map
3654 VALS, keyed indexed by register pointer, then store the register
3655 pointer in REGS. COND is the condition we will test. */
3656
3657 static int
3658 check_cond_move_block (basic_block bb,
3659 hash_map<rtx, rtx> *vals,
3660 vec<rtx> *regs,
3661 rtx cond)
3662 {
3663 rtx_insn *insn;
3664 rtx cc = cc_in_cond (cond);
3665
3666 /* We can only handle simple jumps at the end of the basic block.
3667 It is almost impossible to update the CFG otherwise. */
3668 insn = BB_END (bb);
3669 if (JUMP_P (insn) && !onlyjump_p (insn))
3670 return FALSE;
3671
3672 FOR_BB_INSNS (bb, insn)
3673 {
3674 rtx set, dest, src;
3675
3676 if (!NONDEBUG_INSN_P (insn) || JUMP_P (insn))
3677 continue;
3678 set = single_set (insn);
3679 if (!set)
3680 return FALSE;
3681
3682 dest = SET_DEST (set);
3683 src = SET_SRC (set);
3684 if (!REG_P (dest)
3685 || (HARD_REGISTER_P (dest)
3686 && targetm.small_register_classes_for_mode_p (GET_MODE (dest))))
3687 return FALSE;
3688
3689 if (!CONSTANT_P (src) && !register_operand (src, VOIDmode))
3690 return FALSE;
3691
3692 if (side_effects_p (src) || side_effects_p (dest))
3693 return FALSE;
3694
3695 if (may_trap_p (src) || may_trap_p (dest))
3696 return FALSE;
3697
3698 /* Don't try to handle this if the source register was
3699 modified earlier in the block. */
3700 if ((REG_P (src)
3701 && vals->get (src))
3702 || (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src))
3703 && vals->get (SUBREG_REG (src))))
3704 return FALSE;
3705
3706 /* Don't try to handle this if the destination register was
3707 modified earlier in the block. */
3708 if (vals->get (dest))
3709 return FALSE;
3710
3711 /* Don't try to handle this if the condition uses the
3712 destination register. */
3713 if (reg_overlap_mentioned_p (dest, cond))
3714 return FALSE;
3715
3716 /* Don't try to handle this if the source register is modified
3717 later in the block. */
3718 if (!CONSTANT_P (src)
3719 && modified_between_p (src, insn, NEXT_INSN (BB_END (bb))))
3720 return FALSE;
3721
3722 /* Skip it if the instruction to be moved might clobber CC. */
3723 if (cc && set_of (cc, insn))
3724 return FALSE;
3725
3726 vals->put (dest, src);
3727
3728 regs->safe_push (dest);
3729 }
3730
3731 return TRUE;
3732 }
3733
3734 /* Given a basic block BB suitable for conditional move conversion,
3735 a condition COND, and pointer maps THEN_VALS and ELSE_VALS containing
3736 the register values depending on COND, emit the insns in the block as
3737 conditional moves. If ELSE_BLOCK is true, THEN_BB was already
3738 processed. The caller has started a sequence for the conversion.
3739 Return true if successful, false if something goes wrong. */
3740
3741 static bool
3742 cond_move_convert_if_block (struct noce_if_info *if_infop,
3743 basic_block bb, rtx cond,
3744 hash_map<rtx, rtx> *then_vals,
3745 hash_map<rtx, rtx> *else_vals,
3746 bool else_block_p)
3747 {
3748 enum rtx_code code;
3749 rtx_insn *insn;
3750 rtx cond_arg0, cond_arg1;
3751
3752 code = GET_CODE (cond);
3753 cond_arg0 = XEXP (cond, 0);
3754 cond_arg1 = XEXP (cond, 1);
3755
3756 FOR_BB_INSNS (bb, insn)
3757 {
3758 rtx set, target, dest, t, e;
3759
3760 /* ??? Maybe emit conditional debug insn? */
3761 if (!NONDEBUG_INSN_P (insn) || JUMP_P (insn))
3762 continue;
3763 set = single_set (insn);
3764 gcc_assert (set && REG_P (SET_DEST (set)));
3765
3766 dest = SET_DEST (set);
3767
3768 rtx *then_slot = then_vals->get (dest);
3769 rtx *else_slot = else_vals->get (dest);
3770 t = then_slot ? *then_slot : NULL_RTX;
3771 e = else_slot ? *else_slot : NULL_RTX;
3772
3773 if (else_block_p)
3774 {
3775 /* If this register was set in the then block, we already
3776 handled this case there. */
3777 if (t)
3778 continue;
3779 t = dest;
3780 gcc_assert (e);
3781 }
3782 else
3783 {
3784 gcc_assert (t);
3785 if (!e)
3786 e = dest;
3787 }
3788
3789 target = noce_emit_cmove (if_infop, dest, code, cond_arg0, cond_arg1,
3790 t, e);
3791 if (!target)
3792 return false;
3793
3794 if (target != dest)
3795 noce_emit_move_insn (dest, target);
3796 }
3797
3798 return true;
3799 }
3800
3801 /* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
3802 it using only conditional moves. Return TRUE if we were successful at
3803 converting the block. */
3804
3805 static int
3806 cond_move_process_if_block (struct noce_if_info *if_info)
3807 {
3808 basic_block test_bb = if_info->test_bb;
3809 basic_block then_bb = if_info->then_bb;
3810 basic_block else_bb = if_info->else_bb;
3811 basic_block join_bb = if_info->join_bb;
3812 rtx_insn *jump = if_info->jump;
3813 rtx cond = if_info->cond;
3814 rtx_insn *seq, *loc_insn;
3815 rtx reg;
3816 int c;
3817 vec<rtx> then_regs = vNULL;
3818 vec<rtx> else_regs = vNULL;
3819 unsigned int i;
3820 int success_p = FALSE;
3821
3822 /* Build a mapping for each block to the value used for each
3823 register. */
3824 hash_map<rtx, rtx> then_vals;
3825 hash_map<rtx, rtx> else_vals;
3826
3827 /* Make sure the blocks are suitable. */
3828 if (!check_cond_move_block (then_bb, &then_vals, &then_regs, cond)
3829 || (else_bb
3830 && !check_cond_move_block (else_bb, &else_vals, &else_regs, cond)))
3831 goto done;
3832
3833 /* Make sure the blocks can be used together. If the same register
3834 is set in both blocks, and is not set to a constant in both
3835 cases, then both blocks must set it to the same register. We
3836 have already verified that if it is set to a register, that the
3837 source register does not change after the assignment. Also count
3838 the number of registers set in only one of the blocks. */
3839 c = 0;
3840 FOR_EACH_VEC_ELT (then_regs, i, reg)
3841 {
3842 rtx *then_slot = then_vals.get (reg);
3843 rtx *else_slot = else_vals.get (reg);
3844
3845 gcc_checking_assert (then_slot);
3846 if (!else_slot)
3847 ++c;
3848 else
3849 {
3850 rtx then_val = *then_slot;
3851 rtx else_val = *else_slot;
3852 if (!CONSTANT_P (then_val) && !CONSTANT_P (else_val)
3853 && !rtx_equal_p (then_val, else_val))
3854 goto done;
3855 }
3856 }
3857
3858 /* Finish off c for MAX_CONDITIONAL_EXECUTE. */
3859 FOR_EACH_VEC_ELT (else_regs, i, reg)
3860 {
3861 gcc_checking_assert (else_vals.get (reg));
3862 if (!then_vals.get (reg))
3863 ++c;
3864 }
3865
3866 /* Make sure it is reasonable to convert this block. What matters
3867 is the number of assignments currently made in only one of the
3868 branches, since if we convert we are going to always execute
3869 them. */
3870 if (c > MAX_CONDITIONAL_EXECUTE)
3871 goto done;
3872
3873 /* Try to emit the conditional moves. First do the then block,
3874 then do anything left in the else blocks. */
3875 start_sequence ();
3876 if (!cond_move_convert_if_block (if_info, then_bb, cond,
3877 &then_vals, &else_vals, false)
3878 || (else_bb
3879 && !cond_move_convert_if_block (if_info, else_bb, cond,
3880 &then_vals, &else_vals, true)))
3881 {
3882 end_sequence ();
3883 goto done;
3884 }
3885 seq = end_ifcvt_sequence (if_info);
3886 if (!seq)
3887 goto done;
3888
3889 loc_insn = first_active_insn (then_bb);
3890 if (!loc_insn)
3891 {
3892 loc_insn = first_active_insn (else_bb);
3893 gcc_assert (loc_insn);
3894 }
3895 emit_insn_before_setloc (seq, jump, INSN_LOCATION (loc_insn));
3896
3897 if (else_bb)
3898 {
3899 delete_basic_block (else_bb);
3900 num_true_changes++;
3901 }
3902 else
3903 remove_edge (find_edge (test_bb, join_bb));
3904
3905 remove_edge (find_edge (then_bb, join_bb));
3906 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3907 delete_basic_block (then_bb);
3908 num_true_changes++;
3909
3910 if (can_merge_blocks_p (test_bb, join_bb))
3911 {
3912 merge_blocks (test_bb, join_bb);
3913 num_true_changes++;
3914 }
3915
3916 num_updated_if_blocks++;
3917
3918 success_p = TRUE;
3919
3920 done:
3921 then_regs.release ();
3922 else_regs.release ();
3923 return success_p;
3924 }
3925
3926 \f
3927 /* Determine if a given basic block heads a simple IF-THEN-JOIN or an
3928 IF-THEN-ELSE-JOIN block.
3929
3930 If so, we'll try to convert the insns to not require the branch,
3931 using only transformations that do not require conditional execution.
3932
3933 Return TRUE if we were successful at converting the block. */
3934
3935 static int
3936 noce_find_if_block (basic_block test_bb, edge then_edge, edge else_edge,
3937 int pass)
3938 {
3939 basic_block then_bb, else_bb, join_bb;
3940 bool then_else_reversed = false;
3941 rtx_insn *jump;
3942 rtx cond;
3943 rtx_insn *cond_earliest;
3944 struct noce_if_info if_info;
3945
3946 /* We only ever should get here before reload. */
3947 gcc_assert (!reload_completed);
3948
3949 /* Recognize an IF-THEN-ELSE-JOIN block. */
3950 if (single_pred_p (then_edge->dest)
3951 && single_succ_p (then_edge->dest)
3952 && single_pred_p (else_edge->dest)
3953 && single_succ_p (else_edge->dest)
3954 && single_succ (then_edge->dest) == single_succ (else_edge->dest))
3955 {
3956 then_bb = then_edge->dest;
3957 else_bb = else_edge->dest;
3958 join_bb = single_succ (then_bb);
3959 }
3960 /* Recognize an IF-THEN-JOIN block. */
3961 else if (single_pred_p (then_edge->dest)
3962 && single_succ_p (then_edge->dest)
3963 && single_succ (then_edge->dest) == else_edge->dest)
3964 {
3965 then_bb = then_edge->dest;
3966 else_bb = NULL_BLOCK;
3967 join_bb = else_edge->dest;
3968 }
3969 /* Recognize an IF-ELSE-JOIN block. We can have those because the order
3970 of basic blocks in cfglayout mode does not matter, so the fallthrough
3971 edge can go to any basic block (and not just to bb->next_bb, like in
3972 cfgrtl mode). */
3973 else if (single_pred_p (else_edge->dest)
3974 && single_succ_p (else_edge->dest)
3975 && single_succ (else_edge->dest) == then_edge->dest)
3976 {
3977 /* The noce transformations do not apply to IF-ELSE-JOIN blocks.
3978 To make this work, we have to invert the THEN and ELSE blocks
3979 and reverse the jump condition. */
3980 then_bb = else_edge->dest;
3981 else_bb = NULL_BLOCK;
3982 join_bb = single_succ (then_bb);
3983 then_else_reversed = true;
3984 }
3985 else
3986 /* Not a form we can handle. */
3987 return FALSE;
3988
3989 /* The edges of the THEN and ELSE blocks cannot have complex edges. */
3990 if (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
3991 return FALSE;
3992 if (else_bb
3993 && single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
3994 return FALSE;
3995
3996 num_possible_if_blocks++;
3997
3998 if (dump_file)
3999 {
4000 fprintf (dump_file,
4001 "\nIF-THEN%s-JOIN block found, pass %d, test %d, then %d",
4002 (else_bb) ? "-ELSE" : "",
4003 pass, test_bb->index, then_bb->index);
4004
4005 if (else_bb)
4006 fprintf (dump_file, ", else %d", else_bb->index);
4007
4008 fprintf (dump_file, ", join %d\n", join_bb->index);
4009 }
4010
4011 /* If the conditional jump is more than just a conditional
4012 jump, then we can not do if-conversion on this block. */
4013 jump = BB_END (test_bb);
4014 if (! onlyjump_p (jump))
4015 return FALSE;
4016
4017 /* If this is not a standard conditional jump, we can't parse it. */
4018 cond = noce_get_condition (jump, &cond_earliest, then_else_reversed);
4019 if (!cond)
4020 return FALSE;
4021
4022 /* We must be comparing objects whose modes imply the size. */
4023 if (GET_MODE (XEXP (cond, 0)) == BLKmode)
4024 return FALSE;
4025
4026 /* Initialize an IF_INFO struct to pass around. */
4027 memset (&if_info, 0, sizeof if_info);
4028 if_info.test_bb = test_bb;
4029 if_info.then_bb = then_bb;
4030 if_info.else_bb = else_bb;
4031 if_info.join_bb = join_bb;
4032 if_info.cond = cond;
4033 if_info.cond_earliest = cond_earliest;
4034 if_info.jump = jump;
4035 if_info.then_else_reversed = then_else_reversed;
4036 if_info.branch_cost = BRANCH_COST (optimize_bb_for_speed_p (test_bb),
4037 predictable_edge_p (then_edge));
4038
4039 /* Do the real work. */
4040
4041 if (noce_process_if_block (&if_info))
4042 return TRUE;
4043
4044 if (HAVE_conditional_move
4045 && cond_move_process_if_block (&if_info))
4046 return TRUE;
4047
4048 return FALSE;
4049 }
4050 \f
4051
4052 /* Merge the blocks and mark for local life update. */
4053
4054 static void
4055 merge_if_block (struct ce_if_block * ce_info)
4056 {
4057 basic_block test_bb = ce_info->test_bb; /* last test block */
4058 basic_block then_bb = ce_info->then_bb; /* THEN */
4059 basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
4060 basic_block join_bb = ce_info->join_bb; /* join block */
4061 basic_block combo_bb;
4062
4063 /* All block merging is done into the lower block numbers. */
4064
4065 combo_bb = test_bb;
4066 df_set_bb_dirty (test_bb);
4067
4068 /* Merge any basic blocks to handle && and || subtests. Each of
4069 the blocks are on the fallthru path from the predecessor block. */
4070 if (ce_info->num_multiple_test_blocks > 0)
4071 {
4072 basic_block bb = test_bb;
4073 basic_block last_test_bb = ce_info->last_test_bb;
4074 basic_block fallthru = block_fallthru (bb);
4075
4076 do
4077 {
4078 bb = fallthru;
4079 fallthru = block_fallthru (bb);
4080 merge_blocks (combo_bb, bb);
4081 num_true_changes++;
4082 }
4083 while (bb != last_test_bb);
4084 }
4085
4086 /* Merge TEST block into THEN block. Normally the THEN block won't have a
4087 label, but it might if there were || tests. That label's count should be
4088 zero, and it normally should be removed. */
4089
4090 if (then_bb)
4091 {
4092 /* If THEN_BB has no successors, then there's a BARRIER after it.
4093 If COMBO_BB has more than one successor (THEN_BB), then that BARRIER
4094 is no longer needed, and in fact it is incorrect to leave it in
4095 the insn stream. */
4096 if (EDGE_COUNT (then_bb->succs) == 0
4097 && EDGE_COUNT (combo_bb->succs) > 1)
4098 {
4099 rtx_insn *end = NEXT_INSN (BB_END (then_bb));
4100 while (end && NOTE_P (end) && !NOTE_INSN_BASIC_BLOCK_P (end))
4101 end = NEXT_INSN (end);
4102
4103 if (end && BARRIER_P (end))
4104 delete_insn (end);
4105 }
4106 merge_blocks (combo_bb, then_bb);
4107 num_true_changes++;
4108 }
4109
4110 /* The ELSE block, if it existed, had a label. That label count
4111 will almost always be zero, but odd things can happen when labels
4112 get their addresses taken. */
4113 if (else_bb)
4114 {
4115 /* If ELSE_BB has no successors, then there's a BARRIER after it.
4116 If COMBO_BB has more than one successor (ELSE_BB), then that BARRIER
4117 is no longer needed, and in fact it is incorrect to leave it in
4118 the insn stream. */
4119 if (EDGE_COUNT (else_bb->succs) == 0
4120 && EDGE_COUNT (combo_bb->succs) > 1)
4121 {
4122 rtx_insn *end = NEXT_INSN (BB_END (else_bb));
4123 while (end && NOTE_P (end) && !NOTE_INSN_BASIC_BLOCK_P (end))
4124 end = NEXT_INSN (end);
4125
4126 if (end && BARRIER_P (end))
4127 delete_insn (end);
4128 }
4129 merge_blocks (combo_bb, else_bb);
4130 num_true_changes++;
4131 }
4132
4133 /* If there was no join block reported, that means it was not adjacent
4134 to the others, and so we cannot merge them. */
4135
4136 if (! join_bb)
4137 {
4138 rtx_insn *last = BB_END (combo_bb);
4139
4140 /* The outgoing edge for the current COMBO block should already
4141 be correct. Verify this. */
4142 if (EDGE_COUNT (combo_bb->succs) == 0)
4143 gcc_assert (find_reg_note (last, REG_NORETURN, NULL)
4144 || (NONJUMP_INSN_P (last)
4145 && GET_CODE (PATTERN (last)) == TRAP_IF
4146 && (TRAP_CONDITION (PATTERN (last))
4147 == const_true_rtx)));
4148
4149 else
4150 /* There should still be something at the end of the THEN or ELSE
4151 blocks taking us to our final destination. */
4152 gcc_assert (JUMP_P (last)
4153 || (EDGE_SUCC (combo_bb, 0)->dest
4154 == EXIT_BLOCK_PTR_FOR_FN (cfun)
4155 && CALL_P (last)
4156 && SIBLING_CALL_P (last))
4157 || ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH)
4158 && can_throw_internal (last)));
4159 }
4160
4161 /* The JOIN block may have had quite a number of other predecessors too.
4162 Since we've already merged the TEST, THEN and ELSE blocks, we should
4163 have only one remaining edge from our if-then-else diamond. If there
4164 is more than one remaining edge, it must come from elsewhere. There
4165 may be zero incoming edges if the THEN block didn't actually join
4166 back up (as with a call to a non-return function). */
4167 else if (EDGE_COUNT (join_bb->preds) < 2
4168 && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4169 {
4170 /* We can merge the JOIN cleanly and update the dataflow try
4171 again on this pass.*/
4172 merge_blocks (combo_bb, join_bb);
4173 num_true_changes++;
4174 }
4175 else
4176 {
4177 /* We cannot merge the JOIN. */
4178
4179 /* The outgoing edge for the current COMBO block should already
4180 be correct. Verify this. */
4181 gcc_assert (single_succ_p (combo_bb)
4182 && single_succ (combo_bb) == join_bb);
4183
4184 /* Remove the jump and cruft from the end of the COMBO block. */
4185 if (join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4186 tidy_fallthru_edge (single_succ_edge (combo_bb));
4187 }
4188
4189 num_updated_if_blocks++;
4190 }
4191 \f
4192 /* Find a block ending in a simple IF condition and try to transform it
4193 in some way. When converting a multi-block condition, put the new code
4194 in the first such block and delete the rest. Return a pointer to this
4195 first block if some transformation was done. Return NULL otherwise. */
4196
4197 static basic_block
4198 find_if_header (basic_block test_bb, int pass)
4199 {
4200 ce_if_block ce_info;
4201 edge then_edge;
4202 edge else_edge;
4203
4204 /* The kind of block we're looking for has exactly two successors. */
4205 if (EDGE_COUNT (test_bb->succs) != 2)
4206 return NULL;
4207
4208 then_edge = EDGE_SUCC (test_bb, 0);
4209 else_edge = EDGE_SUCC (test_bb, 1);
4210
4211 if (df_get_bb_dirty (then_edge->dest))
4212 return NULL;
4213 if (df_get_bb_dirty (else_edge->dest))
4214 return NULL;
4215
4216 /* Neither edge should be abnormal. */
4217 if ((then_edge->flags & EDGE_COMPLEX)
4218 || (else_edge->flags & EDGE_COMPLEX))
4219 return NULL;
4220
4221 /* Nor exit the loop. */
4222 if ((then_edge->flags & EDGE_LOOP_EXIT)
4223 || (else_edge->flags & EDGE_LOOP_EXIT))
4224 return NULL;
4225
4226 /* The THEN edge is canonically the one that falls through. */
4227 if (then_edge->flags & EDGE_FALLTHRU)
4228 ;
4229 else if (else_edge->flags & EDGE_FALLTHRU)
4230 std::swap (then_edge, else_edge);
4231 else
4232 /* Otherwise this must be a multiway branch of some sort. */
4233 return NULL;
4234
4235 memset (&ce_info, 0, sizeof (ce_info));
4236 ce_info.test_bb = test_bb;
4237 ce_info.then_bb = then_edge->dest;
4238 ce_info.else_bb = else_edge->dest;
4239 ce_info.pass = pass;
4240
4241 #ifdef IFCVT_MACHDEP_INIT
4242 IFCVT_MACHDEP_INIT (&ce_info);
4243 #endif
4244
4245 if (!reload_completed
4246 && noce_find_if_block (test_bb, then_edge, else_edge, pass))
4247 goto success;
4248
4249 if (reload_completed
4250 && targetm.have_conditional_execution ()
4251 && cond_exec_find_if_block (&ce_info))
4252 goto success;
4253
4254 if (targetm.have_trap ()
4255 && optab_handler (ctrap_optab, word_mode) != CODE_FOR_nothing
4256 && find_cond_trap (test_bb, then_edge, else_edge))
4257 goto success;
4258
4259 if (dom_info_state (CDI_POST_DOMINATORS) >= DOM_NO_FAST_QUERY
4260 && (reload_completed || !targetm.have_conditional_execution ()))
4261 {
4262 if (find_if_case_1 (test_bb, then_edge, else_edge))
4263 goto success;
4264 if (find_if_case_2 (test_bb, then_edge, else_edge))
4265 goto success;
4266 }
4267
4268 return NULL;
4269
4270 success:
4271 if (dump_file)
4272 fprintf (dump_file, "Conversion succeeded on pass %d.\n", pass);
4273 /* Set this so we continue looking. */
4274 cond_exec_changed_p = TRUE;
4275 return ce_info.test_bb;
4276 }
4277
4278 /* Return true if a block has two edges, one of which falls through to the next
4279 block, and the other jumps to a specific block, so that we can tell if the
4280 block is part of an && test or an || test. Returns either -1 or the number
4281 of non-note, non-jump, non-USE/CLOBBER insns in the block. */
4282
4283 static int
4284 block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb)
4285 {
4286 edge cur_edge;
4287 int fallthru_p = FALSE;
4288 int jump_p = FALSE;
4289 rtx_insn *insn;
4290 rtx_insn *end;
4291 int n_insns = 0;
4292 edge_iterator ei;
4293
4294 if (!cur_bb || !target_bb)
4295 return -1;
4296
4297 /* If no edges, obviously it doesn't jump or fallthru. */
4298 if (EDGE_COUNT (cur_bb->succs) == 0)
4299 return FALSE;
4300
4301 FOR_EACH_EDGE (cur_edge, ei, cur_bb->succs)
4302 {
4303 if (cur_edge->flags & EDGE_COMPLEX)
4304 /* Anything complex isn't what we want. */
4305 return -1;
4306
4307 else if (cur_edge->flags & EDGE_FALLTHRU)
4308 fallthru_p = TRUE;
4309
4310 else if (cur_edge->dest == target_bb)
4311 jump_p = TRUE;
4312
4313 else
4314 return -1;
4315 }
4316
4317 if ((jump_p & fallthru_p) == 0)
4318 return -1;
4319
4320 /* Don't allow calls in the block, since this is used to group && and ||
4321 together for conditional execution support. ??? we should support
4322 conditional execution support across calls for IA-64 some day, but
4323 for now it makes the code simpler. */
4324 end = BB_END (cur_bb);
4325 insn = BB_HEAD (cur_bb);
4326
4327 while (insn != NULL_RTX)
4328 {
4329 if (CALL_P (insn))
4330 return -1;
4331
4332 if (INSN_P (insn)
4333 && !JUMP_P (insn)
4334 && !DEBUG_INSN_P (insn)
4335 && GET_CODE (PATTERN (insn)) != USE
4336 && GET_CODE (PATTERN (insn)) != CLOBBER)
4337 n_insns++;
4338
4339 if (insn == end)
4340 break;
4341
4342 insn = NEXT_INSN (insn);
4343 }
4344
4345 return n_insns;
4346 }
4347
4348 /* Determine if a given basic block heads a simple IF-THEN or IF-THEN-ELSE
4349 block. If so, we'll try to convert the insns to not require the branch.
4350 Return TRUE if we were successful at converting the block. */
4351
4352 static int
4353 cond_exec_find_if_block (struct ce_if_block * ce_info)
4354 {
4355 basic_block test_bb = ce_info->test_bb;
4356 basic_block then_bb = ce_info->then_bb;
4357 basic_block else_bb = ce_info->else_bb;
4358 basic_block join_bb = NULL_BLOCK;
4359 edge cur_edge;
4360 basic_block next;
4361 edge_iterator ei;
4362
4363 ce_info->last_test_bb = test_bb;
4364
4365 /* We only ever should get here after reload,
4366 and if we have conditional execution. */
4367 gcc_assert (reload_completed && targetm.have_conditional_execution ());
4368
4369 /* Discover if any fall through predecessors of the current test basic block
4370 were && tests (which jump to the else block) or || tests (which jump to
4371 the then block). */
4372 if (single_pred_p (test_bb)
4373 && single_pred_edge (test_bb)->flags == EDGE_FALLTHRU)
4374 {
4375 basic_block bb = single_pred (test_bb);
4376 basic_block target_bb;
4377 int max_insns = MAX_CONDITIONAL_EXECUTE;
4378 int n_insns;
4379
4380 /* Determine if the preceding block is an && or || block. */
4381 if ((n_insns = block_jumps_and_fallthru_p (bb, else_bb)) >= 0)
4382 {
4383 ce_info->and_and_p = TRUE;
4384 target_bb = else_bb;
4385 }
4386 else if ((n_insns = block_jumps_and_fallthru_p (bb, then_bb)) >= 0)
4387 {
4388 ce_info->and_and_p = FALSE;
4389 target_bb = then_bb;
4390 }
4391 else
4392 target_bb = NULL_BLOCK;
4393
4394 if (target_bb && n_insns <= max_insns)
4395 {
4396 int total_insns = 0;
4397 int blocks = 0;
4398
4399 ce_info->last_test_bb = test_bb;
4400
4401 /* Found at least one && or || block, look for more. */
4402 do
4403 {
4404 ce_info->test_bb = test_bb = bb;
4405 total_insns += n_insns;
4406 blocks++;
4407
4408 if (!single_pred_p (bb))
4409 break;
4410
4411 bb = single_pred (bb);
4412 n_insns = block_jumps_and_fallthru_p (bb, target_bb);
4413 }
4414 while (n_insns >= 0 && (total_insns + n_insns) <= max_insns);
4415
4416 ce_info->num_multiple_test_blocks = blocks;
4417 ce_info->num_multiple_test_insns = total_insns;
4418
4419 if (ce_info->and_and_p)
4420 ce_info->num_and_and_blocks = blocks;
4421 else
4422 ce_info->num_or_or_blocks = blocks;
4423 }
4424 }
4425
4426 /* The THEN block of an IF-THEN combo must have exactly one predecessor,
4427 other than any || blocks which jump to the THEN block. */
4428 if ((EDGE_COUNT (then_bb->preds) - ce_info->num_or_or_blocks) != 1)
4429 return FALSE;
4430
4431 /* The edges of the THEN and ELSE blocks cannot have complex edges. */
4432 FOR_EACH_EDGE (cur_edge, ei, then_bb->preds)
4433 {
4434 if (cur_edge->flags & EDGE_COMPLEX)
4435 return FALSE;
4436 }
4437
4438 FOR_EACH_EDGE (cur_edge, ei, else_bb->preds)
4439 {
4440 if (cur_edge->flags & EDGE_COMPLEX)
4441 return FALSE;
4442 }
4443
4444 /* The THEN block of an IF-THEN combo must have zero or one successors. */
4445 if (EDGE_COUNT (then_bb->succs) > 0
4446 && (!single_succ_p (then_bb)
4447 || (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
4448 || (epilogue_completed
4449 && tablejump_p (BB_END (then_bb), NULL, NULL))))
4450 return FALSE;
4451
4452 /* If the THEN block has no successors, conditional execution can still
4453 make a conditional call. Don't do this unless the ELSE block has
4454 only one incoming edge -- the CFG manipulation is too ugly otherwise.
4455 Check for the last insn of the THEN block being an indirect jump, which
4456 is listed as not having any successors, but confuses the rest of the CE
4457 code processing. ??? we should fix this in the future. */
4458 if (EDGE_COUNT (then_bb->succs) == 0)
4459 {
4460 if (single_pred_p (else_bb) && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4461 {
4462 rtx_insn *last_insn = BB_END (then_bb);
4463
4464 while (last_insn
4465 && NOTE_P (last_insn)
4466 && last_insn != BB_HEAD (then_bb))
4467 last_insn = PREV_INSN (last_insn);
4468
4469 if (last_insn
4470 && JUMP_P (last_insn)
4471 && ! simplejump_p (last_insn))
4472 return FALSE;
4473
4474 join_bb = else_bb;
4475 else_bb = NULL_BLOCK;
4476 }
4477 else
4478 return FALSE;
4479 }
4480
4481 /* If the THEN block's successor is the other edge out of the TEST block,
4482 then we have an IF-THEN combo without an ELSE. */
4483 else if (single_succ (then_bb) == else_bb)
4484 {
4485 join_bb = else_bb;
4486 else_bb = NULL_BLOCK;
4487 }
4488
4489 /* If the THEN and ELSE block meet in a subsequent block, and the ELSE
4490 has exactly one predecessor and one successor, and the outgoing edge
4491 is not complex, then we have an IF-THEN-ELSE combo. */
4492 else if (single_succ_p (else_bb)
4493 && single_succ (then_bb) == single_succ (else_bb)
4494 && single_pred_p (else_bb)
4495 && !(single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
4496 && !(epilogue_completed
4497 && tablejump_p (BB_END (else_bb), NULL, NULL)))
4498 join_bb = single_succ (else_bb);
4499
4500 /* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */
4501 else
4502 return FALSE;
4503
4504 num_possible_if_blocks++;
4505
4506 if (dump_file)
4507 {
4508 fprintf (dump_file,
4509 "\nIF-THEN%s block found, pass %d, start block %d "
4510 "[insn %d], then %d [%d]",
4511 (else_bb) ? "-ELSE" : "",
4512 ce_info->pass,
4513 test_bb->index,
4514 BB_HEAD (test_bb) ? (int)INSN_UID (BB_HEAD (test_bb)) : -1,
4515 then_bb->index,
4516 BB_HEAD (then_bb) ? (int)INSN_UID (BB_HEAD (then_bb)) : -1);
4517
4518 if (else_bb)
4519 fprintf (dump_file, ", else %d [%d]",
4520 else_bb->index,
4521 BB_HEAD (else_bb) ? (int)INSN_UID (BB_HEAD (else_bb)) : -1);
4522
4523 fprintf (dump_file, ", join %d [%d]",
4524 join_bb->index,
4525 BB_HEAD (join_bb) ? (int)INSN_UID (BB_HEAD (join_bb)) : -1);
4526
4527 if (ce_info->num_multiple_test_blocks > 0)
4528 fprintf (dump_file, ", %d %s block%s last test %d [%d]",
4529 ce_info->num_multiple_test_blocks,
4530 (ce_info->and_and_p) ? "&&" : "||",
4531 (ce_info->num_multiple_test_blocks == 1) ? "" : "s",
4532 ce_info->last_test_bb->index,
4533 ((BB_HEAD (ce_info->last_test_bb))
4534 ? (int)INSN_UID (BB_HEAD (ce_info->last_test_bb))
4535 : -1));
4536
4537 fputc ('\n', dump_file);
4538 }
4539
4540 /* Make sure IF, THEN, and ELSE, blocks are adjacent. Actually, we get the
4541 first condition for free, since we've already asserted that there's a
4542 fallthru edge from IF to THEN. Likewise for the && and || blocks, since
4543 we checked the FALLTHRU flag, those are already adjacent to the last IF
4544 block. */
4545 /* ??? As an enhancement, move the ELSE block. Have to deal with
4546 BLOCK notes, if by no other means than backing out the merge if they
4547 exist. Sticky enough I don't want to think about it now. */
4548 next = then_bb;
4549 if (else_bb && (next = next->next_bb) != else_bb)
4550 return FALSE;
4551 if ((next = next->next_bb) != join_bb
4552 && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4553 {
4554 if (else_bb)
4555 join_bb = NULL;
4556 else
4557 return FALSE;
4558 }
4559
4560 /* Do the real work. */
4561
4562 ce_info->else_bb = else_bb;
4563 ce_info->join_bb = join_bb;
4564
4565 /* If we have && and || tests, try to first handle combining the && and ||
4566 tests into the conditional code, and if that fails, go back and handle
4567 it without the && and ||, which at present handles the && case if there
4568 was no ELSE block. */
4569 if (cond_exec_process_if_block (ce_info, TRUE))
4570 return TRUE;
4571
4572 if (ce_info->num_multiple_test_blocks)
4573 {
4574 cancel_changes (0);
4575
4576 if (cond_exec_process_if_block (ce_info, FALSE))
4577 return TRUE;
4578 }
4579
4580 return FALSE;
4581 }
4582
4583 /* Convert a branch over a trap, or a branch
4584 to a trap, into a conditional trap. */
4585
4586 static int
4587 find_cond_trap (basic_block test_bb, edge then_edge, edge else_edge)
4588 {
4589 basic_block then_bb = then_edge->dest;
4590 basic_block else_bb = else_edge->dest;
4591 basic_block other_bb, trap_bb;
4592 rtx_insn *trap, *jump;
4593 rtx cond;
4594 rtx_insn *cond_earliest;
4595 enum rtx_code code;
4596
4597 /* Locate the block with the trap instruction. */
4598 /* ??? While we look for no successors, we really ought to allow
4599 EH successors. Need to fix merge_if_block for that to work. */
4600 if ((trap = block_has_only_trap (then_bb)) != NULL)
4601 trap_bb = then_bb, other_bb = else_bb;
4602 else if ((trap = block_has_only_trap (else_bb)) != NULL)
4603 trap_bb = else_bb, other_bb = then_bb;
4604 else
4605 return FALSE;
4606
4607 if (dump_file)
4608 {
4609 fprintf (dump_file, "\nTRAP-IF block found, start %d, trap %d\n",
4610 test_bb->index, trap_bb->index);
4611 }
4612
4613 /* If this is not a standard conditional jump, we can't parse it. */
4614 jump = BB_END (test_bb);
4615 cond = noce_get_condition (jump, &cond_earliest, false);
4616 if (! cond)
4617 return FALSE;
4618
4619 /* If the conditional jump is more than just a conditional jump, then
4620 we can not do if-conversion on this block. */
4621 if (! onlyjump_p (jump))
4622 return FALSE;
4623
4624 /* We must be comparing objects whose modes imply the size. */
4625 if (GET_MODE (XEXP (cond, 0)) == BLKmode)
4626 return FALSE;
4627
4628 /* Reverse the comparison code, if necessary. */
4629 code = GET_CODE (cond);
4630 if (then_bb == trap_bb)
4631 {
4632 code = reversed_comparison_code (cond, jump);
4633 if (code == UNKNOWN)
4634 return FALSE;
4635 }
4636
4637 /* Attempt to generate the conditional trap. */
4638 rtx_insn *seq = gen_cond_trap (code, copy_rtx (XEXP (cond, 0)),
4639 copy_rtx (XEXP (cond, 1)),
4640 TRAP_CODE (PATTERN (trap)));
4641 if (seq == NULL)
4642 return FALSE;
4643
4644 /* Emit the new insns before cond_earliest. */
4645 emit_insn_before_setloc (seq, cond_earliest, INSN_LOCATION (trap));
4646
4647 /* Delete the trap block if possible. */
4648 remove_edge (trap_bb == then_bb ? then_edge : else_edge);
4649 df_set_bb_dirty (test_bb);
4650 df_set_bb_dirty (then_bb);
4651 df_set_bb_dirty (else_bb);
4652
4653 if (EDGE_COUNT (trap_bb->preds) == 0)
4654 {
4655 delete_basic_block (trap_bb);
4656 num_true_changes++;
4657 }
4658
4659 /* Wire together the blocks again. */
4660 if (current_ir_type () == IR_RTL_CFGLAYOUT)
4661 single_succ_edge (test_bb)->flags |= EDGE_FALLTHRU;
4662 else if (trap_bb == then_bb)
4663 {
4664 rtx lab = JUMP_LABEL (jump);
4665 rtx_insn *seq = targetm.gen_jump (lab);
4666 rtx_jump_insn *newjump = emit_jump_insn_after (seq, jump);
4667 LABEL_NUSES (lab) += 1;
4668 JUMP_LABEL (newjump) = lab;
4669 emit_barrier_after (newjump);
4670 }
4671 delete_insn (jump);
4672
4673 if (can_merge_blocks_p (test_bb, other_bb))
4674 {
4675 merge_blocks (test_bb, other_bb);
4676 num_true_changes++;
4677 }
4678
4679 num_updated_if_blocks++;
4680 return TRUE;
4681 }
4682
4683 /* Subroutine of find_cond_trap: if BB contains only a trap insn,
4684 return it. */
4685
4686 static rtx_insn *
4687 block_has_only_trap (basic_block bb)
4688 {
4689 rtx_insn *trap;
4690
4691 /* We're not the exit block. */
4692 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4693 return NULL;
4694
4695 /* The block must have no successors. */
4696 if (EDGE_COUNT (bb->succs) > 0)
4697 return NULL;
4698
4699 /* The only instruction in the THEN block must be the trap. */
4700 trap = first_active_insn (bb);
4701 if (! (trap == BB_END (bb)
4702 && GET_CODE (PATTERN (trap)) == TRAP_IF
4703 && TRAP_CONDITION (PATTERN (trap)) == const_true_rtx))
4704 return NULL;
4705
4706 return trap;
4707 }
4708
4709 /* Look for IF-THEN-ELSE cases in which one of THEN or ELSE is
4710 transformable, but not necessarily the other. There need be no
4711 JOIN block.
4712
4713 Return TRUE if we were successful at converting the block.
4714
4715 Cases we'd like to look at:
4716
4717 (1)
4718 if (test) goto over; // x not live
4719 x = a;
4720 goto label;
4721 over:
4722
4723 becomes
4724
4725 x = a;
4726 if (! test) goto label;
4727
4728 (2)
4729 if (test) goto E; // x not live
4730 x = big();
4731 goto L;
4732 E:
4733 x = b;
4734 goto M;
4735
4736 becomes
4737
4738 x = b;
4739 if (test) goto M;
4740 x = big();
4741 goto L;
4742
4743 (3) // This one's really only interesting for targets that can do
4744 // multiway branching, e.g. IA-64 BBB bundles. For other targets
4745 // it results in multiple branches on a cache line, which often
4746 // does not sit well with predictors.
4747
4748 if (test1) goto E; // predicted not taken
4749 x = a;
4750 if (test2) goto F;
4751 ...
4752 E:
4753 x = b;
4754 J:
4755
4756 becomes
4757
4758 x = a;
4759 if (test1) goto E;
4760 if (test2) goto F;
4761
4762 Notes:
4763
4764 (A) Don't do (2) if the branch is predicted against the block we're
4765 eliminating. Do it anyway if we can eliminate a branch; this requires
4766 that the sole successor of the eliminated block postdominate the other
4767 side of the if.
4768
4769 (B) With CE, on (3) we can steal from both sides of the if, creating
4770
4771 if (test1) x = a;
4772 if (!test1) x = b;
4773 if (test1) goto J;
4774 if (test2) goto F;
4775 ...
4776 J:
4777
4778 Again, this is most useful if J postdominates.
4779
4780 (C) CE substitutes for helpful life information.
4781
4782 (D) These heuristics need a lot of work. */
4783
4784 /* Tests for case 1 above. */
4785
4786 static int
4787 find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
4788 {
4789 basic_block then_bb = then_edge->dest;
4790 basic_block else_bb = else_edge->dest;
4791 basic_block new_bb;
4792 int then_bb_index, then_prob;
4793 rtx else_target = NULL_RTX;
4794
4795 /* If we are partitioning hot/cold basic blocks, we don't want to
4796 mess up unconditional or indirect jumps that cross between hot
4797 and cold sections.
4798
4799 Basic block partitioning may result in some jumps that appear to
4800 be optimizable (or blocks that appear to be mergeable), but which really
4801 must be left untouched (they are required to make it safely across
4802 partition boundaries). See the comments at the top of
4803 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4804
4805 if ((BB_END (then_bb)
4806 && JUMP_P (BB_END (then_bb))
4807 && CROSSING_JUMP_P (BB_END (then_bb)))
4808 || (BB_END (test_bb)
4809 && JUMP_P (BB_END (test_bb))
4810 && CROSSING_JUMP_P (BB_END (test_bb)))
4811 || (BB_END (else_bb)
4812 && JUMP_P (BB_END (else_bb))
4813 && CROSSING_JUMP_P (BB_END (else_bb))))
4814 return FALSE;
4815
4816 /* THEN has one successor. */
4817 if (!single_succ_p (then_bb))
4818 return FALSE;
4819
4820 /* THEN does not fall through, but is not strange either. */
4821 if (single_succ_edge (then_bb)->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))
4822 return FALSE;
4823
4824 /* THEN has one predecessor. */
4825 if (!single_pred_p (then_bb))
4826 return FALSE;
4827
4828 /* THEN must do something. */
4829 if (forwarder_block_p (then_bb))
4830 return FALSE;
4831
4832 num_possible_if_blocks++;
4833 if (dump_file)
4834 fprintf (dump_file,
4835 "\nIF-CASE-1 found, start %d, then %d\n",
4836 test_bb->index, then_bb->index);
4837
4838 if (then_edge->probability)
4839 then_prob = REG_BR_PROB_BASE - then_edge->probability;
4840 else
4841 then_prob = REG_BR_PROB_BASE / 2;
4842
4843 /* We're speculating from the THEN path, we want to make sure the cost
4844 of speculation is within reason. */
4845 if (! cheap_bb_rtx_cost_p (then_bb, then_prob,
4846 COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (then_edge->src),
4847 predictable_edge_p (then_edge)))))
4848 return FALSE;
4849
4850 if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4851 {
4852 rtx_insn *jump = BB_END (else_edge->src);
4853 gcc_assert (JUMP_P (jump));
4854 else_target = JUMP_LABEL (jump);
4855 }
4856
4857 /* Registers set are dead, or are predicable. */
4858 if (! dead_or_predicable (test_bb, then_bb, else_bb,
4859 single_succ_edge (then_bb), 1))
4860 return FALSE;
4861
4862 /* Conversion went ok, including moving the insns and fixing up the
4863 jump. Adjust the CFG to match. */
4864
4865 /* We can avoid creating a new basic block if then_bb is immediately
4866 followed by else_bb, i.e. deleting then_bb allows test_bb to fall
4867 through to else_bb. */
4868
4869 if (then_bb->next_bb == else_bb
4870 && then_bb->prev_bb == test_bb
4871 && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4872 {
4873 redirect_edge_succ (FALLTHRU_EDGE (test_bb), else_bb);
4874 new_bb = 0;
4875 }
4876 else if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4877 new_bb = force_nonfallthru_and_redirect (FALLTHRU_EDGE (test_bb),
4878 else_bb, else_target);
4879 else
4880 new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb),
4881 else_bb);
4882
4883 df_set_bb_dirty (test_bb);
4884 df_set_bb_dirty (else_bb);
4885
4886 then_bb_index = then_bb->index;
4887 delete_basic_block (then_bb);
4888
4889 /* Make rest of code believe that the newly created block is the THEN_BB
4890 block we removed. */
4891 if (new_bb)
4892 {
4893 df_bb_replace (then_bb_index, new_bb);
4894 /* This should have been done above via force_nonfallthru_and_redirect
4895 (possibly called from redirect_edge_and_branch_force). */
4896 gcc_checking_assert (BB_PARTITION (new_bb) == BB_PARTITION (test_bb));
4897 }
4898
4899 num_true_changes++;
4900 num_updated_if_blocks++;
4901
4902 return TRUE;
4903 }
4904
4905 /* Test for case 2 above. */
4906
4907 static int
4908 find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge)
4909 {
4910 basic_block then_bb = then_edge->dest;
4911 basic_block else_bb = else_edge->dest;
4912 edge else_succ;
4913 int then_prob, else_prob;
4914
4915 /* We do not want to speculate (empty) loop latches. */
4916 if (current_loops
4917 && else_bb->loop_father->latch == else_bb)
4918 return FALSE;
4919
4920 /* If we are partitioning hot/cold basic blocks, we don't want to
4921 mess up unconditional or indirect jumps that cross between hot
4922 and cold sections.
4923
4924 Basic block partitioning may result in some jumps that appear to
4925 be optimizable (or blocks that appear to be mergeable), but which really
4926 must be left untouched (they are required to make it safely across
4927 partition boundaries). See the comments at the top of
4928 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4929
4930 if ((BB_END (then_bb)
4931 && JUMP_P (BB_END (then_bb))
4932 && CROSSING_JUMP_P (BB_END (then_bb)))
4933 || (BB_END (test_bb)
4934 && JUMP_P (BB_END (test_bb))
4935 && CROSSING_JUMP_P (BB_END (test_bb)))
4936 || (BB_END (else_bb)
4937 && JUMP_P (BB_END (else_bb))
4938 && CROSSING_JUMP_P (BB_END (else_bb))))
4939 return FALSE;
4940
4941 /* ELSE has one successor. */
4942 if (!single_succ_p (else_bb))
4943 return FALSE;
4944 else
4945 else_succ = single_succ_edge (else_bb);
4946
4947 /* ELSE outgoing edge is not complex. */
4948 if (else_succ->flags & EDGE_COMPLEX)
4949 return FALSE;
4950
4951 /* ELSE has one predecessor. */
4952 if (!single_pred_p (else_bb))
4953 return FALSE;
4954
4955 /* THEN is not EXIT. */
4956 if (then_bb->index < NUM_FIXED_BLOCKS)
4957 return FALSE;
4958
4959 if (else_edge->probability)
4960 {
4961 else_prob = else_edge->probability;
4962 then_prob = REG_BR_PROB_BASE - else_prob;
4963 }
4964 else
4965 {
4966 else_prob = REG_BR_PROB_BASE / 2;
4967 then_prob = REG_BR_PROB_BASE / 2;
4968 }
4969
4970 /* ELSE is predicted or SUCC(ELSE) postdominates THEN. */
4971 if (else_prob > then_prob)
4972 ;
4973 else if (else_succ->dest->index < NUM_FIXED_BLOCKS
4974 || dominated_by_p (CDI_POST_DOMINATORS, then_bb,
4975 else_succ->dest))
4976 ;
4977 else
4978 return FALSE;
4979
4980 num_possible_if_blocks++;
4981 if (dump_file)
4982 fprintf (dump_file,
4983 "\nIF-CASE-2 found, start %d, else %d\n",
4984 test_bb->index, else_bb->index);
4985
4986 /* We're speculating from the ELSE path, we want to make sure the cost
4987 of speculation is within reason. */
4988 if (! cheap_bb_rtx_cost_p (else_bb, else_prob,
4989 COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (else_edge->src),
4990 predictable_edge_p (else_edge)))))
4991 return FALSE;
4992
4993 /* Registers set are dead, or are predicable. */
4994 if (! dead_or_predicable (test_bb, else_bb, then_bb, else_succ, 0))
4995 return FALSE;
4996
4997 /* Conversion went ok, including moving the insns and fixing up the
4998 jump. Adjust the CFG to match. */
4999
5000 df_set_bb_dirty (test_bb);
5001 df_set_bb_dirty (then_bb);
5002 delete_basic_block (else_bb);
5003
5004 num_true_changes++;
5005 num_updated_if_blocks++;
5006
5007 /* ??? We may now fallthru from one of THEN's successors into a join
5008 block. Rerun cleanup_cfg? Examine things manually? Wait? */
5009
5010 return TRUE;
5011 }
5012
5013 /* Used by the code above to perform the actual rtl transformations.
5014 Return TRUE if successful.
5015
5016 TEST_BB is the block containing the conditional branch. MERGE_BB
5017 is the block containing the code to manipulate. DEST_EDGE is an
5018 edge representing a jump to the join block; after the conversion,
5019 TEST_BB should be branching to its destination.
5020 REVERSEP is true if the sense of the branch should be reversed. */
5021
5022 static int
5023 dead_or_predicable (basic_block test_bb, basic_block merge_bb,
5024 basic_block other_bb, edge dest_edge, int reversep)
5025 {
5026 basic_block new_dest = dest_edge->dest;
5027 rtx_insn *head, *end, *jump;
5028 rtx_insn *earliest = NULL;
5029 rtx old_dest;
5030 bitmap merge_set = NULL;
5031 /* Number of pending changes. */
5032 int n_validated_changes = 0;
5033 rtx new_dest_label = NULL_RTX;
5034
5035 jump = BB_END (test_bb);
5036
5037 /* Find the extent of the real code in the merge block. */
5038 head = BB_HEAD (merge_bb);
5039 end = BB_END (merge_bb);
5040
5041 while (DEBUG_INSN_P (end) && end != head)
5042 end = PREV_INSN (end);
5043
5044 /* If merge_bb ends with a tablejump, predicating/moving insn's
5045 into test_bb and then deleting merge_bb will result in the jumptable
5046 that follows merge_bb being removed along with merge_bb and then we
5047 get an unresolved reference to the jumptable. */
5048 if (tablejump_p (end, NULL, NULL))
5049 return FALSE;
5050
5051 if (LABEL_P (head))
5052 head = NEXT_INSN (head);
5053 while (DEBUG_INSN_P (head) && head != end)
5054 head = NEXT_INSN (head);
5055 if (NOTE_P (head))
5056 {
5057 if (head == end)
5058 {
5059 head = end = NULL;
5060 goto no_body;
5061 }
5062 head = NEXT_INSN (head);
5063 while (DEBUG_INSN_P (head) && head != end)
5064 head = NEXT_INSN (head);
5065 }
5066
5067 if (JUMP_P (end))
5068 {
5069 if (!onlyjump_p (end))
5070 return FALSE;
5071 if (head == end)
5072 {
5073 head = end = NULL;
5074 goto no_body;
5075 }
5076 end = PREV_INSN (end);
5077 while (DEBUG_INSN_P (end) && end != head)
5078 end = PREV_INSN (end);
5079 }
5080
5081 /* Don't move frame-related insn across the conditional branch. This
5082 can lead to one of the paths of the branch having wrong unwind info. */
5083 if (epilogue_completed)
5084 {
5085 rtx_insn *insn = head;
5086 while (1)
5087 {
5088 if (INSN_P (insn) && RTX_FRAME_RELATED_P (insn))
5089 return FALSE;
5090 if (insn == end)
5091 break;
5092 insn = NEXT_INSN (insn);
5093 }
5094 }
5095
5096 /* Disable handling dead code by conditional execution if the machine needs
5097 to do anything funny with the tests, etc. */
5098 #ifndef IFCVT_MODIFY_TESTS
5099 if (targetm.have_conditional_execution ())
5100 {
5101 /* In the conditional execution case, we have things easy. We know
5102 the condition is reversible. We don't have to check life info
5103 because we're going to conditionally execute the code anyway.
5104 All that's left is making sure the insns involved can actually
5105 be predicated. */
5106
5107 rtx cond;
5108
5109 cond = cond_exec_get_condition (jump);
5110 if (! cond)
5111 return FALSE;
5112
5113 rtx note = find_reg_note (jump, REG_BR_PROB, NULL_RTX);
5114 int prob_val = (note ? XINT (note, 0) : -1);
5115
5116 if (reversep)
5117 {
5118 enum rtx_code rev = reversed_comparison_code (cond, jump);
5119 if (rev == UNKNOWN)
5120 return FALSE;
5121 cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0),
5122 XEXP (cond, 1));
5123 if (prob_val >= 0)
5124 prob_val = REG_BR_PROB_BASE - prob_val;
5125 }
5126
5127 if (cond_exec_process_insns (NULL, head, end, cond, prob_val, 0)
5128 && verify_changes (0))
5129 n_validated_changes = num_validated_changes ();
5130 else
5131 cancel_changes (0);
5132
5133 earliest = jump;
5134 }
5135 #endif
5136
5137 /* If we allocated new pseudos (e.g. in the conditional move
5138 expander called from noce_emit_cmove), we must resize the
5139 array first. */
5140 if (max_regno < max_reg_num ())
5141 max_regno = max_reg_num ();
5142
5143 /* Try the NCE path if the CE path did not result in any changes. */
5144 if (n_validated_changes == 0)
5145 {
5146 rtx cond;
5147 rtx_insn *insn;
5148 regset live;
5149 bool success;
5150
5151 /* In the non-conditional execution case, we have to verify that there
5152 are no trapping operations, no calls, no references to memory, and
5153 that any registers modified are dead at the branch site. */
5154
5155 if (!any_condjump_p (jump))
5156 return FALSE;
5157
5158 /* Find the extent of the conditional. */
5159 cond = noce_get_condition (jump, &earliest, false);
5160 if (!cond)
5161 return FALSE;
5162
5163 live = BITMAP_ALLOC (&reg_obstack);
5164 simulate_backwards_to_point (merge_bb, live, end);
5165 success = can_move_insns_across (head, end, earliest, jump,
5166 merge_bb, live,
5167 df_get_live_in (other_bb), NULL);
5168 BITMAP_FREE (live);
5169 if (!success)
5170 return FALSE;
5171
5172 /* Collect the set of registers set in MERGE_BB. */
5173 merge_set = BITMAP_ALLOC (&reg_obstack);
5174
5175 FOR_BB_INSNS (merge_bb, insn)
5176 if (NONDEBUG_INSN_P (insn))
5177 df_simulate_find_defs (insn, merge_set);
5178
5179 /* If shrink-wrapping, disable this optimization when test_bb is
5180 the first basic block and merge_bb exits. The idea is to not
5181 move code setting up a return register as that may clobber a
5182 register used to pass function parameters, which then must be
5183 saved in caller-saved regs. A caller-saved reg requires the
5184 prologue, killing a shrink-wrap opportunity. */
5185 if ((SHRINK_WRAPPING_ENABLED && !epilogue_completed)
5186 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == test_bb
5187 && single_succ_p (new_dest)
5188 && single_succ (new_dest) == EXIT_BLOCK_PTR_FOR_FN (cfun)
5189 && bitmap_intersect_p (df_get_live_in (new_dest), merge_set))
5190 {
5191 regset return_regs;
5192 unsigned int i;
5193
5194 return_regs = BITMAP_ALLOC (&reg_obstack);
5195
5196 /* Start off with the intersection of regs used to pass
5197 params and regs used to return values. */
5198 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5199 if (FUNCTION_ARG_REGNO_P (i)
5200 && targetm.calls.function_value_regno_p (i))
5201 bitmap_set_bit (return_regs, INCOMING_REGNO (i));
5202
5203 bitmap_and_into (return_regs,
5204 df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
5205 bitmap_and_into (return_regs,
5206 df_get_live_in (EXIT_BLOCK_PTR_FOR_FN (cfun)));
5207 if (!bitmap_empty_p (return_regs))
5208 {
5209 FOR_BB_INSNS_REVERSE (new_dest, insn)
5210 if (NONDEBUG_INSN_P (insn))
5211 {
5212 df_ref def;
5213
5214 /* If this insn sets any reg in return_regs, add all
5215 reg uses to the set of regs we're interested in. */
5216 FOR_EACH_INSN_DEF (def, insn)
5217 if (bitmap_bit_p (return_regs, DF_REF_REGNO (def)))
5218 {
5219 df_simulate_uses (insn, return_regs);
5220 break;
5221 }
5222 }
5223 if (bitmap_intersect_p (merge_set, return_regs))
5224 {
5225 BITMAP_FREE (return_regs);
5226 BITMAP_FREE (merge_set);
5227 return FALSE;
5228 }
5229 }
5230 BITMAP_FREE (return_regs);
5231 }
5232 }
5233
5234 no_body:
5235 /* We don't want to use normal invert_jump or redirect_jump because
5236 we don't want to delete_insn called. Also, we want to do our own
5237 change group management. */
5238
5239 old_dest = JUMP_LABEL (jump);
5240 if (other_bb != new_dest)
5241 {
5242 if (!any_condjump_p (jump))
5243 goto cancel;
5244
5245 if (JUMP_P (BB_END (dest_edge->src)))
5246 new_dest_label = JUMP_LABEL (BB_END (dest_edge->src));
5247 else if (new_dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
5248 new_dest_label = ret_rtx;
5249 else
5250 new_dest_label = block_label (new_dest);
5251
5252 rtx_jump_insn *jump_insn = as_a <rtx_jump_insn *> (jump);
5253 if (reversep
5254 ? ! invert_jump_1 (jump_insn, new_dest_label)
5255 : ! redirect_jump_1 (jump_insn, new_dest_label))
5256 goto cancel;
5257 }
5258
5259 if (verify_changes (n_validated_changes))
5260 confirm_change_group ();
5261 else
5262 goto cancel;
5263
5264 if (other_bb != new_dest)
5265 {
5266 redirect_jump_2 (as_a <rtx_jump_insn *> (jump), old_dest, new_dest_label,
5267 0, reversep);
5268
5269 redirect_edge_succ (BRANCH_EDGE (test_bb), new_dest);
5270 if (reversep)
5271 {
5272 std::swap (BRANCH_EDGE (test_bb)->count,
5273 FALLTHRU_EDGE (test_bb)->count);
5274 std::swap (BRANCH_EDGE (test_bb)->probability,
5275 FALLTHRU_EDGE (test_bb)->probability);
5276 update_br_prob_note (test_bb);
5277 }
5278 }
5279
5280 /* Move the insns out of MERGE_BB to before the branch. */
5281 if (head != NULL)
5282 {
5283 rtx_insn *insn;
5284
5285 if (end == BB_END (merge_bb))
5286 BB_END (merge_bb) = PREV_INSN (head);
5287
5288 /* PR 21767: when moving insns above a conditional branch, the REG_EQUAL
5289 notes being moved might become invalid. */
5290 insn = head;
5291 do
5292 {
5293 rtx note;
5294
5295 if (! INSN_P (insn))
5296 continue;
5297 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5298 if (! note)
5299 continue;
5300 remove_note (insn, note);
5301 } while (insn != end && (insn = NEXT_INSN (insn)));
5302
5303 /* PR46315: when moving insns above a conditional branch, the REG_EQUAL
5304 notes referring to the registers being set might become invalid. */
5305 if (merge_set)
5306 {
5307 unsigned i;
5308 bitmap_iterator bi;
5309
5310 EXECUTE_IF_SET_IN_BITMAP (merge_set, 0, i, bi)
5311 remove_reg_equal_equiv_notes_for_regno (i);
5312
5313 BITMAP_FREE (merge_set);
5314 }
5315
5316 reorder_insns (head, end, PREV_INSN (earliest));
5317 }
5318
5319 /* Remove the jump and edge if we can. */
5320 if (other_bb == new_dest)
5321 {
5322 delete_insn (jump);
5323 remove_edge (BRANCH_EDGE (test_bb));
5324 /* ??? Can't merge blocks here, as then_bb is still in use.
5325 At minimum, the merge will get done just before bb-reorder. */
5326 }
5327
5328 return TRUE;
5329
5330 cancel:
5331 cancel_changes (0);
5332
5333 if (merge_set)
5334 BITMAP_FREE (merge_set);
5335
5336 return FALSE;
5337 }
5338 \f
5339 /* Main entry point for all if-conversion. AFTER_COMBINE is true if
5340 we are after combine pass. */
5341
5342 static void
5343 if_convert (bool after_combine)
5344 {
5345 basic_block bb;
5346 int pass;
5347
5348 if (optimize == 1)
5349 {
5350 df_live_add_problem ();
5351 df_live_set_all_dirty ();
5352 }
5353
5354 /* Record whether we are after combine pass. */
5355 ifcvt_after_combine = after_combine;
5356 have_cbranchcc4 = (direct_optab_handler (cbranch_optab, CCmode)
5357 != CODE_FOR_nothing);
5358 num_possible_if_blocks = 0;
5359 num_updated_if_blocks = 0;
5360 num_true_changes = 0;
5361
5362 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
5363 mark_loop_exit_edges ();
5364 loop_optimizer_finalize ();
5365 free_dominance_info (CDI_DOMINATORS);
5366
5367 /* Compute postdominators. */
5368 calculate_dominance_info (CDI_POST_DOMINATORS);
5369
5370 df_set_flags (DF_LR_RUN_DCE);
5371
5372 /* Go through each of the basic blocks looking for things to convert. If we
5373 have conditional execution, we make multiple passes to allow us to handle
5374 IF-THEN{-ELSE} blocks within other IF-THEN{-ELSE} blocks. */
5375 pass = 0;
5376 do
5377 {
5378 df_analyze ();
5379 /* Only need to do dce on the first pass. */
5380 df_clear_flags (DF_LR_RUN_DCE);
5381 cond_exec_changed_p = FALSE;
5382 pass++;
5383
5384 #ifdef IFCVT_MULTIPLE_DUMPS
5385 if (dump_file && pass > 1)
5386 fprintf (dump_file, "\n\n========== Pass %d ==========\n", pass);
5387 #endif
5388
5389 FOR_EACH_BB_FN (bb, cfun)
5390 {
5391 basic_block new_bb;
5392 while (!df_get_bb_dirty (bb)
5393 && (new_bb = find_if_header (bb, pass)) != NULL)
5394 bb = new_bb;
5395 }
5396
5397 #ifdef IFCVT_MULTIPLE_DUMPS
5398 if (dump_file && cond_exec_changed_p)
5399 print_rtl_with_bb (dump_file, get_insns (), dump_flags);
5400 #endif
5401 }
5402 while (cond_exec_changed_p);
5403
5404 #ifdef IFCVT_MULTIPLE_DUMPS
5405 if (dump_file)
5406 fprintf (dump_file, "\n\n========== no more changes\n");
5407 #endif
5408
5409 free_dominance_info (CDI_POST_DOMINATORS);
5410
5411 if (dump_file)
5412 fflush (dump_file);
5413
5414 clear_aux_for_blocks ();
5415
5416 /* If we allocated new pseudos, we must resize the array for sched1. */
5417 if (max_regno < max_reg_num ())
5418 max_regno = max_reg_num ();
5419
5420 /* Write the final stats. */
5421 if (dump_file && num_possible_if_blocks > 0)
5422 {
5423 fprintf (dump_file,
5424 "\n%d possible IF blocks searched.\n",
5425 num_possible_if_blocks);
5426 fprintf (dump_file,
5427 "%d IF blocks converted.\n",
5428 num_updated_if_blocks);
5429 fprintf (dump_file,
5430 "%d true changes made.\n\n\n",
5431 num_true_changes);
5432 }
5433
5434 if (optimize == 1)
5435 df_remove_problem (df_live);
5436
5437 checking_verify_flow_info ();
5438 }
5439 \f
5440 /* If-conversion and CFG cleanup. */
5441 static unsigned int
5442 rest_of_handle_if_conversion (void)
5443 {
5444 if (flag_if_conversion)
5445 {
5446 if (dump_file)
5447 {
5448 dump_reg_info (dump_file);
5449 dump_flow_info (dump_file, dump_flags);
5450 }
5451 cleanup_cfg (CLEANUP_EXPENSIVE);
5452 if_convert (false);
5453 }
5454
5455 cleanup_cfg (0);
5456 return 0;
5457 }
5458
5459 namespace {
5460
5461 const pass_data pass_data_rtl_ifcvt =
5462 {
5463 RTL_PASS, /* type */
5464 "ce1", /* name */
5465 OPTGROUP_NONE, /* optinfo_flags */
5466 TV_IFCVT, /* tv_id */
5467 0, /* properties_required */
5468 0, /* properties_provided */
5469 0, /* properties_destroyed */
5470 0, /* todo_flags_start */
5471 TODO_df_finish, /* todo_flags_finish */
5472 };
5473
5474 class pass_rtl_ifcvt : public rtl_opt_pass
5475 {
5476 public:
5477 pass_rtl_ifcvt (gcc::context *ctxt)
5478 : rtl_opt_pass (pass_data_rtl_ifcvt, ctxt)
5479 {}
5480
5481 /* opt_pass methods: */
5482 virtual bool gate (function *)
5483 {
5484 return (optimize > 0) && dbg_cnt (if_conversion);
5485 }
5486
5487 virtual unsigned int execute (function *)
5488 {
5489 return rest_of_handle_if_conversion ();
5490 }
5491
5492 }; // class pass_rtl_ifcvt
5493
5494 } // anon namespace
5495
5496 rtl_opt_pass *
5497 make_pass_rtl_ifcvt (gcc::context *ctxt)
5498 {
5499 return new pass_rtl_ifcvt (ctxt);
5500 }
5501
5502
5503 /* Rerun if-conversion, as combine may have simplified things enough
5504 to now meet sequence length restrictions. */
5505
5506 namespace {
5507
5508 const pass_data pass_data_if_after_combine =
5509 {
5510 RTL_PASS, /* type */
5511 "ce2", /* name */
5512 OPTGROUP_NONE, /* optinfo_flags */
5513 TV_IFCVT, /* tv_id */
5514 0, /* properties_required */
5515 0, /* properties_provided */
5516 0, /* properties_destroyed */
5517 0, /* todo_flags_start */
5518 TODO_df_finish, /* todo_flags_finish */
5519 };
5520
5521 class pass_if_after_combine : public rtl_opt_pass
5522 {
5523 public:
5524 pass_if_after_combine (gcc::context *ctxt)
5525 : rtl_opt_pass (pass_data_if_after_combine, ctxt)
5526 {}
5527
5528 /* opt_pass methods: */
5529 virtual bool gate (function *)
5530 {
5531 return optimize > 0 && flag_if_conversion
5532 && dbg_cnt (if_after_combine);
5533 }
5534
5535 virtual unsigned int execute (function *)
5536 {
5537 if_convert (true);
5538 return 0;
5539 }
5540
5541 }; // class pass_if_after_combine
5542
5543 } // anon namespace
5544
5545 rtl_opt_pass *
5546 make_pass_if_after_combine (gcc::context *ctxt)
5547 {
5548 return new pass_if_after_combine (ctxt);
5549 }
5550
5551
5552 namespace {
5553
5554 const pass_data pass_data_if_after_reload =
5555 {
5556 RTL_PASS, /* type */
5557 "ce3", /* name */
5558 OPTGROUP_NONE, /* optinfo_flags */
5559 TV_IFCVT2, /* tv_id */
5560 0, /* properties_required */
5561 0, /* properties_provided */
5562 0, /* properties_destroyed */
5563 0, /* todo_flags_start */
5564 TODO_df_finish, /* todo_flags_finish */
5565 };
5566
5567 class pass_if_after_reload : public rtl_opt_pass
5568 {
5569 public:
5570 pass_if_after_reload (gcc::context *ctxt)
5571 : rtl_opt_pass (pass_data_if_after_reload, ctxt)
5572 {}
5573
5574 /* opt_pass methods: */
5575 virtual bool gate (function *)
5576 {
5577 return optimize > 0 && flag_if_conversion2
5578 && dbg_cnt (if_after_reload);
5579 }
5580
5581 virtual unsigned int execute (function *)
5582 {
5583 if_convert (true);
5584 return 0;
5585 }
5586
5587 }; // class pass_if_after_reload
5588
5589 } // anon namespace
5590
5591 rtl_opt_pass *
5592 make_pass_if_after_reload (gcc::context *ctxt)
5593 {
5594 return new pass_if_after_reload (ctxt);
5595 }