1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
155 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
156 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
157 && GET_RTX_CLASS (code
) != RTX_COMPARE
158 && GET_RTX_CLASS (code
) != RTX_UNARY
)
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Return the optab used for computing the operation given by
243 the tree code, CODE. This function is not always usable (for
244 example, it cannot give complete results for multiplication
245 or division) but probably ought to be relied on more widely
246 throughout the expander. */
248 optab_for_tree_code (enum tree_code code
, tree type
)
260 return one_cmpl_optab
;
269 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
277 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
283 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
292 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
295 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
297 case REALIGN_LOAD_EXPR
:
298 return vec_realign_load_optab
;
304 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
308 return trapv
? addv_optab
: add_optab
;
311 return trapv
? subv_optab
: sub_optab
;
314 return trapv
? smulv_optab
: smul_optab
;
317 return trapv
? negv_optab
: neg_optab
;
320 return trapv
? absv_optab
: abs_optab
;
328 /* Generate code to perform an operation specified by TERNARY_OPTAB
329 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
331 UNSIGNEDP is for the case where we have to widen the operands
332 to perform the operation. It says to use zero-extension.
334 If TARGET is nonzero, the value
335 is generated there, if it is convenient to do so.
336 In all cases an rtx is returned for the locus of the value;
337 this may or may not be TARGET. */
340 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
341 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
343 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
344 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
345 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
346 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
349 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
351 if (ternary_optab
->handlers
[(int) mode
].insn_code
== CODE_FOR_nothing
)
355 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
356 temp
= gen_reg_rtx (mode
);
360 /* In case the insn wants input operands in modes different from
361 those of the actual operands, convert the operands. It would
362 seem that we don't need to convert CONST_INTs, but we do, so
363 that they're properly zero-extended, sign-extended or truncated
366 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
367 xop0
= convert_modes (mode0
,
368 GET_MODE (op0
) != VOIDmode
373 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
374 xop1
= convert_modes (mode1
,
375 GET_MODE (op1
) != VOIDmode
380 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
381 xop2
= convert_modes (mode2
,
382 GET_MODE (op2
) != VOIDmode
387 /* Now, if insn's predicates don't allow our operands, put them into
390 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
391 && mode0
!= VOIDmode
)
392 xop0
= copy_to_mode_reg (mode0
, xop0
);
394 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
395 && mode1
!= VOIDmode
)
396 xop1
= copy_to_mode_reg (mode1
, xop1
);
398 if (! (*insn_data
[icode
].operand
[3].predicate
) (xop2
, mode2
)
399 && mode2
!= VOIDmode
)
400 xop2
= copy_to_mode_reg (mode2
, xop2
);
402 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
409 /* Like expand_binop, but return a constant rtx if the result can be
410 calculated at compile time. The arguments and return value are
411 otherwise the same as for expand_binop. */
414 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
415 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
416 enum optab_methods methods
)
418 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
419 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
421 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
424 /* Like simplify_expand_binop, but always put the result in TARGET.
425 Return true if the expansion succeeded. */
428 force_expand_binop (enum machine_mode mode
, optab binoptab
,
429 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
430 enum optab_methods methods
)
432 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
433 target
, unsignedp
, methods
);
437 emit_move_insn (target
, x
);
441 /* This subroutine of expand_doubleword_shift handles the cases in which
442 the effective shift value is >= BITS_PER_WORD. The arguments and return
443 value are the same as for the parent routine, except that SUPERWORD_OP1
444 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
445 INTO_TARGET may be null if the caller has decided to calculate it. */
448 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
449 rtx outof_target
, rtx into_target
,
450 int unsignedp
, enum optab_methods methods
)
452 if (into_target
!= 0)
453 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
454 into_target
, unsignedp
, methods
))
457 if (outof_target
!= 0)
459 /* For a signed right shift, we must fill OUTOF_TARGET with copies
460 of the sign bit, otherwise we must fill it with zeros. */
461 if (binoptab
!= ashr_optab
)
462 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
464 if (!force_expand_binop (word_mode
, binoptab
,
465 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
466 outof_target
, unsignedp
, methods
))
472 /* This subroutine of expand_doubleword_shift handles the cases in which
473 the effective shift value is < BITS_PER_WORD. The arguments and return
474 value are the same as for the parent routine. */
477 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
478 rtx outof_input
, rtx into_input
, rtx op1
,
479 rtx outof_target
, rtx into_target
,
480 int unsignedp
, enum optab_methods methods
,
481 unsigned HOST_WIDE_INT shift_mask
)
483 optab reverse_unsigned_shift
, unsigned_shift
;
486 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
487 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
489 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
490 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
491 the opposite direction to BINOPTAB. */
492 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
494 carries
= outof_input
;
495 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
496 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
501 /* We must avoid shifting by BITS_PER_WORD bits since that is either
502 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
503 has unknown behavior. Do a single shift first, then shift by the
504 remainder. It's OK to use ~OP1 as the remainder if shift counts
505 are truncated to the mode size. */
506 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
507 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
508 if (shift_mask
== BITS_PER_WORD
- 1)
510 tmp
= immed_double_const (-1, -1, op1_mode
);
511 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
516 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
517 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
521 if (tmp
== 0 || carries
== 0)
523 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
524 carries
, tmp
, 0, unsignedp
, methods
);
528 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
529 so the result can go directly into INTO_TARGET if convenient. */
530 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
531 into_target
, unsignedp
, methods
);
535 /* Now OR in the bits carried over from OUTOF_INPUT. */
536 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
537 into_target
, unsignedp
, methods
))
540 /* Use a standard word_mode shift for the out-of half. */
541 if (outof_target
!= 0)
542 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
543 outof_target
, unsignedp
, methods
))
550 #ifdef HAVE_conditional_move
551 /* Try implementing expand_doubleword_shift using conditional moves.
552 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
553 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
554 are the shift counts to use in the former and latter case. All other
555 arguments are the same as the parent routine. */
558 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
559 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
560 rtx outof_input
, rtx into_input
,
561 rtx subword_op1
, rtx superword_op1
,
562 rtx outof_target
, rtx into_target
,
563 int unsignedp
, enum optab_methods methods
,
564 unsigned HOST_WIDE_INT shift_mask
)
566 rtx outof_superword
, into_superword
;
568 /* Put the superword version of the output into OUTOF_SUPERWORD and
570 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
571 if (outof_target
!= 0 && subword_op1
== superword_op1
)
573 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
574 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
575 into_superword
= outof_target
;
576 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
577 outof_superword
, 0, unsignedp
, methods
))
582 into_superword
= gen_reg_rtx (word_mode
);
583 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
584 outof_superword
, into_superword
,
589 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
590 if (!expand_subword_shift (op1_mode
, binoptab
,
591 outof_input
, into_input
, subword_op1
,
592 outof_target
, into_target
,
593 unsignedp
, methods
, shift_mask
))
596 /* Select between them. Do the INTO half first because INTO_SUPERWORD
597 might be the current value of OUTOF_TARGET. */
598 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
599 into_target
, into_superword
, word_mode
, false))
602 if (outof_target
!= 0)
603 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
604 outof_target
, outof_superword
,
612 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
613 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
614 input operand; the shift moves bits in the direction OUTOF_INPUT->
615 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
616 of the target. OP1 is the shift count and OP1_MODE is its mode.
617 If OP1 is constant, it will have been truncated as appropriate
618 and is known to be nonzero.
620 If SHIFT_MASK is zero, the result of word shifts is undefined when the
621 shift count is outside the range [0, BITS_PER_WORD). This routine must
622 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
624 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
625 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
626 fill with zeros or sign bits as appropriate.
628 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
629 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
630 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
631 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
634 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
635 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
636 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
637 function wants to calculate it itself.
639 Return true if the shift could be successfully synthesized. */
642 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
643 rtx outof_input
, rtx into_input
, rtx op1
,
644 rtx outof_target
, rtx into_target
,
645 int unsignedp
, enum optab_methods methods
,
646 unsigned HOST_WIDE_INT shift_mask
)
648 rtx superword_op1
, tmp
, cmp1
, cmp2
;
649 rtx subword_label
, done_label
;
650 enum rtx_code cmp_code
;
652 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
653 fill the result with sign or zero bits as appropriate. If so, the value
654 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
655 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
656 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
658 This isn't worthwhile for constant shifts since the optimizers will
659 cope better with in-range shift counts. */
660 if (shift_mask
>= BITS_PER_WORD
662 && !CONSTANT_P (op1
))
664 if (!expand_doubleword_shift (op1_mode
, binoptab
,
665 outof_input
, into_input
, op1
,
667 unsignedp
, methods
, shift_mask
))
669 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
670 outof_target
, unsignedp
, methods
))
675 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
676 is true when the effective shift value is less than BITS_PER_WORD.
677 Set SUPERWORD_OP1 to the shift count that should be used to shift
678 OUTOF_INPUT into INTO_TARGET when the condition is false. */
679 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
680 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
682 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
683 is a subword shift count. */
684 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
686 cmp2
= CONST0_RTX (op1_mode
);
692 /* Set CMP1 to OP1 - BITS_PER_WORD. */
693 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
695 cmp2
= CONST0_RTX (op1_mode
);
697 superword_op1
= cmp1
;
702 /* If we can compute the condition at compile time, pick the
703 appropriate subroutine. */
704 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
705 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
707 if (tmp
== const0_rtx
)
708 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
709 outof_target
, into_target
,
712 return expand_subword_shift (op1_mode
, binoptab
,
713 outof_input
, into_input
, op1
,
714 outof_target
, into_target
,
715 unsignedp
, methods
, shift_mask
);
718 #ifdef HAVE_conditional_move
719 /* Try using conditional moves to generate straight-line code. */
721 rtx start
= get_last_insn ();
722 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
723 cmp_code
, cmp1
, cmp2
,
724 outof_input
, into_input
,
726 outof_target
, into_target
,
727 unsignedp
, methods
, shift_mask
))
729 delete_insns_since (start
);
733 /* As a last resort, use branches to select the correct alternative. */
734 subword_label
= gen_label_rtx ();
735 done_label
= gen_label_rtx ();
737 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
738 0, 0, subword_label
);
740 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
741 outof_target
, into_target
,
745 emit_jump_insn (gen_jump (done_label
));
747 emit_label (subword_label
);
749 if (!expand_subword_shift (op1_mode
, binoptab
,
750 outof_input
, into_input
, op1
,
751 outof_target
, into_target
,
752 unsignedp
, methods
, shift_mask
))
755 emit_label (done_label
);
759 /* Subroutine of expand_binop. Perform a double word multiplication of
760 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
761 as the target's word_mode. This function return NULL_RTX if anything
762 goes wrong, in which case it may have already emitted instructions
763 which need to be deleted.
765 If we want to multiply two two-word values and have normal and widening
766 multiplies of single-word values, we can do this with three smaller
767 multiplications. Note that we do not make a REG_NO_CONFLICT block here
768 because we are not operating on one word at a time.
770 The multiplication proceeds as follows:
771 _______________________
772 [__op0_high_|__op0_low__]
773 _______________________
774 * [__op1_high_|__op1_low__]
775 _______________________________________________
776 _______________________
777 (1) [__op0_low__*__op1_low__]
778 _______________________
779 (2a) [__op0_low__*__op1_high_]
780 _______________________
781 (2b) [__op0_high_*__op1_low__]
782 _______________________
783 (3) [__op0_high_*__op1_high_]
786 This gives a 4-word result. Since we are only interested in the
787 lower 2 words, partial result (3) and the upper words of (2a) and
788 (2b) don't need to be calculated. Hence (2a) and (2b) can be
789 calculated using non-widening multiplication.
791 (1), however, needs to be calculated with an unsigned widening
792 multiplication. If this operation is not directly supported we
793 try using a signed widening multiplication and adjust the result.
794 This adjustment works as follows:
796 If both operands are positive then no adjustment is needed.
798 If the operands have different signs, for example op0_low < 0 and
799 op1_low >= 0, the instruction treats the most significant bit of
800 op0_low as a sign bit instead of a bit with significance
801 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
802 with 2**BITS_PER_WORD - op0_low, and two's complements the
803 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
806 Similarly, if both operands are negative, we need to add
807 (op0_low + op1_low) * 2**BITS_PER_WORD.
809 We use a trick to adjust quickly. We logically shift op0_low right
810 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
811 op0_high (op1_high) before it is used to calculate 2b (2a). If no
812 logical shift exists, we do an arithmetic right shift and subtract
816 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
817 bool umulp
, enum optab_methods methods
)
819 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
820 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
821 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
822 rtx product
, adjust
, product_high
, temp
;
824 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
825 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
826 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
827 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
829 /* If we're using an unsigned multiply to directly compute the product
830 of the low-order words of the operands and perform any required
831 adjustments of the operands, we begin by trying two more multiplications
832 and then computing the appropriate sum.
834 We have checked above that the required addition is provided.
835 Full-word addition will normally always succeed, especially if
836 it is provided at all, so we don't worry about its failure. The
837 multiplication may well fail, however, so we do handle that. */
841 /* ??? This could be done with emit_store_flag where available. */
842 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
843 NULL_RTX
, 1, methods
);
845 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
846 op0_high
, 0, OPTAB_DIRECT
);
849 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
850 NULL_RTX
, 0, methods
);
853 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
854 op0_high
, 0, OPTAB_DIRECT
);
861 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
862 NULL_RTX
, 0, OPTAB_DIRECT
);
866 /* OP0_HIGH should now be dead. */
870 /* ??? This could be done with emit_store_flag where available. */
871 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
872 NULL_RTX
, 1, methods
);
874 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
875 op1_high
, 0, OPTAB_DIRECT
);
878 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
879 NULL_RTX
, 0, methods
);
882 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
883 op1_high
, 0, OPTAB_DIRECT
);
890 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
891 NULL_RTX
, 0, OPTAB_DIRECT
);
895 /* OP1_HIGH should now be dead. */
897 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
898 adjust
, 0, OPTAB_DIRECT
);
900 if (target
&& !REG_P (target
))
904 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
905 target
, 1, OPTAB_DIRECT
);
907 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
908 target
, 1, OPTAB_DIRECT
);
913 product_high
= operand_subword (product
, high
, 1, mode
);
914 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
915 REG_P (product_high
) ? product_high
: adjust
,
917 emit_move_insn (product_high
, adjust
);
921 /* Wrapper around expand_binop which takes an rtx code to specify
922 the operation to perform, not an optab pointer. All other
923 arguments are the same. */
925 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
926 rtx op1
, rtx target
, int unsignedp
,
927 enum optab_methods methods
)
929 optab binop
= code_to_optab
[(int) code
];
933 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
936 /* Generate code to perform an operation specified by BINOPTAB
937 on operands OP0 and OP1, with result having machine-mode MODE.
939 UNSIGNEDP is for the case where we have to widen the operands
940 to perform the operation. It says to use zero-extension.
942 If TARGET is nonzero, the value
943 is generated there, if it is convenient to do so.
944 In all cases an rtx is returned for the locus of the value;
945 this may or may not be TARGET. */
948 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
949 rtx target
, int unsignedp
, enum optab_methods methods
)
951 enum optab_methods next_methods
952 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
953 ? OPTAB_WIDEN
: methods
);
954 enum mode_class
class;
955 enum machine_mode wider_mode
;
957 int commutative_op
= 0;
958 int shift_op
= (binoptab
->code
== ASHIFT
959 || binoptab
->code
== ASHIFTRT
960 || binoptab
->code
== LSHIFTRT
961 || binoptab
->code
== ROTATE
962 || binoptab
->code
== ROTATERT
);
963 rtx entry_last
= get_last_insn ();
966 class = GET_MODE_CLASS (mode
);
970 /* Load duplicate non-volatile operands once. */
971 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
973 op0
= force_not_mem (op0
);
978 op0
= force_not_mem (op0
);
979 op1
= force_not_mem (op1
);
983 /* If subtracting an integer constant, convert this into an addition of
984 the negated constant. */
986 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
988 op1
= negate_rtx (mode
, op1
);
989 binoptab
= add_optab
;
992 /* If we are inside an appropriately-short loop and we are optimizing,
993 force expensive constants into a register. */
994 if (CONSTANT_P (op0
) && optimize
995 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
997 if (GET_MODE (op0
) != VOIDmode
)
998 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
999 op0
= force_reg (mode
, op0
);
1002 if (CONSTANT_P (op1
) && optimize
1003 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1005 if (GET_MODE (op1
) != VOIDmode
)
1006 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1007 op1
= force_reg (mode
, op1
);
1010 /* Record where to delete back to if we backtrack. */
1011 last
= get_last_insn ();
1013 /* If operation is commutative,
1014 try to make the first operand a register.
1015 Even better, try to make it the same as the target.
1016 Also try to make the last operand a constant. */
1017 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1018 || binoptab
== smul_widen_optab
1019 || binoptab
== umul_widen_optab
1020 || binoptab
== smul_highpart_optab
1021 || binoptab
== umul_highpart_optab
)
1025 if (((target
== 0 || REG_P (target
))
1029 : rtx_equal_p (op1
, target
))
1030 || GET_CODE (op0
) == CONST_INT
)
1038 /* If we can do it with a three-operand insn, do so. */
1040 if (methods
!= OPTAB_MUST_WIDEN
1041 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1043 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1044 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1045 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1047 rtx xop0
= op0
, xop1
= op1
;
1052 temp
= gen_reg_rtx (mode
);
1054 /* If it is a commutative operator and the modes would match
1055 if we would swap the operands, we can save the conversions. */
1058 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1059 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1063 tmp
= op0
; op0
= op1
; op1
= tmp
;
1064 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1068 /* In case the insn wants input operands in modes different from
1069 those of the actual operands, convert the operands. It would
1070 seem that we don't need to convert CONST_INTs, but we do, so
1071 that they're properly zero-extended, sign-extended or truncated
1074 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1075 xop0
= convert_modes (mode0
,
1076 GET_MODE (op0
) != VOIDmode
1081 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1082 xop1
= convert_modes (mode1
,
1083 GET_MODE (op1
) != VOIDmode
1088 /* Now, if insn's predicates don't allow our operands, put them into
1091 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
1092 && mode0
!= VOIDmode
)
1093 xop0
= copy_to_mode_reg (mode0
, xop0
);
1095 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
1096 && mode1
!= VOIDmode
)
1097 xop1
= copy_to_mode_reg (mode1
, xop1
);
1099 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
1100 temp
= gen_reg_rtx (mode
);
1102 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1105 /* If PAT is composed of more than one insn, try to add an appropriate
1106 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1107 operand, call ourselves again, this time without a target. */
1108 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1109 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1111 delete_insns_since (last
);
1112 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1113 unsignedp
, methods
);
1120 delete_insns_since (last
);
1123 /* If this is a multiply, see if we can do a widening operation that
1124 takes operands of this mode and makes a wider mode. */
1126 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
1127 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1128 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1129 != CODE_FOR_nothing
))
1131 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1132 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1133 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1137 if (GET_MODE_CLASS (mode
) == MODE_INT
)
1138 return gen_lowpart (mode
, temp
);
1140 return convert_to_mode (mode
, temp
, unsignedp
);
1144 /* Look for a wider mode of the same class for which we think we
1145 can open-code the operation. Check for a widening multiply at the
1146 wider mode as well. */
1148 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1149 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1150 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1151 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1153 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1154 || (binoptab
== smul_optab
1155 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1156 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1157 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1158 != CODE_FOR_nothing
)))
1160 rtx xop0
= op0
, xop1
= op1
;
1163 /* For certain integer operations, we need not actually extend
1164 the narrow operands, as long as we will truncate
1165 the results to the same narrowness. */
1167 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1168 || binoptab
== xor_optab
1169 || binoptab
== add_optab
|| binoptab
== sub_optab
1170 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1171 && class == MODE_INT
)
1174 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1176 /* The second operand of a shift must always be extended. */
1177 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1178 no_extend
&& binoptab
!= ashl_optab
);
1180 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1181 unsignedp
, OPTAB_DIRECT
);
1184 if (class != MODE_INT
)
1187 target
= gen_reg_rtx (mode
);
1188 convert_move (target
, temp
, 0);
1192 return gen_lowpart (mode
, temp
);
1195 delete_insns_since (last
);
1199 /* These can be done a word at a time. */
1200 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1201 && class == MODE_INT
1202 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1203 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1209 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1210 won't be accurate, so use a new target. */
1211 if (target
== 0 || target
== op0
|| target
== op1
)
1212 target
= gen_reg_rtx (mode
);
1216 /* Do the actual arithmetic. */
1217 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1219 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1220 rtx x
= expand_binop (word_mode
, binoptab
,
1221 operand_subword_force (op0
, i
, mode
),
1222 operand_subword_force (op1
, i
, mode
),
1223 target_piece
, unsignedp
, next_methods
);
1228 if (target_piece
!= x
)
1229 emit_move_insn (target_piece
, x
);
1232 insns
= get_insns ();
1235 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1237 if (binoptab
->code
!= UNKNOWN
)
1239 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1240 copy_rtx (op0
), copy_rtx (op1
));
1244 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1249 /* Synthesize double word shifts from single word shifts. */
1250 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1251 || binoptab
== ashr_optab
)
1252 && class == MODE_INT
1253 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1254 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1255 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1256 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1257 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1259 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1260 enum machine_mode op1_mode
;
1262 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1263 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1264 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1266 /* Apply the truncation to constant shifts. */
1267 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1268 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1270 if (op1
== CONST0_RTX (op1_mode
))
1273 /* Make sure that this is a combination that expand_doubleword_shift
1274 can handle. See the comments there for details. */
1275 if (double_shift_mask
== 0
1276 || (shift_mask
== BITS_PER_WORD
- 1
1277 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1279 rtx insns
, equiv_value
;
1280 rtx into_target
, outof_target
;
1281 rtx into_input
, outof_input
;
1282 int left_shift
, outof_word
;
1284 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1285 won't be accurate, so use a new target. */
1286 if (target
== 0 || target
== op0
|| target
== op1
)
1287 target
= gen_reg_rtx (mode
);
1291 /* OUTOF_* is the word we are shifting bits away from, and
1292 INTO_* is the word that we are shifting bits towards, thus
1293 they differ depending on the direction of the shift and
1294 WORDS_BIG_ENDIAN. */
1296 left_shift
= binoptab
== ashl_optab
;
1297 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1299 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1300 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1302 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1303 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1305 if (expand_doubleword_shift (op1_mode
, binoptab
,
1306 outof_input
, into_input
, op1
,
1307 outof_target
, into_target
,
1308 unsignedp
, methods
, shift_mask
))
1310 insns
= get_insns ();
1313 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1314 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1321 /* Synthesize double word rotates from single word shifts. */
1322 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1323 && class == MODE_INT
1324 && GET_CODE (op1
) == CONST_INT
1325 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1326 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1327 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1329 rtx insns
, equiv_value
;
1330 rtx into_target
, outof_target
;
1331 rtx into_input
, outof_input
;
1333 int shift_count
, left_shift
, outof_word
;
1335 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1336 won't be accurate, so use a new target. Do this also if target is not
1337 a REG, first because having a register instead may open optimization
1338 opportunities, and second because if target and op0 happen to be MEMs
1339 designating the same location, we would risk clobbering it too early
1340 in the code sequence we generate below. */
1341 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1342 target
= gen_reg_rtx (mode
);
1346 shift_count
= INTVAL (op1
);
1348 /* OUTOF_* is the word we are shifting bits away from, and
1349 INTO_* is the word that we are shifting bits towards, thus
1350 they differ depending on the direction of the shift and
1351 WORDS_BIG_ENDIAN. */
1353 left_shift
= (binoptab
== rotl_optab
);
1354 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1356 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1357 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1359 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1360 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1362 if (shift_count
== BITS_PER_WORD
)
1364 /* This is just a word swap. */
1365 emit_move_insn (outof_target
, into_input
);
1366 emit_move_insn (into_target
, outof_input
);
1371 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1372 rtx first_shift_count
, second_shift_count
;
1373 optab reverse_unsigned_shift
, unsigned_shift
;
1375 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1376 ? lshr_optab
: ashl_optab
);
1378 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1379 ? ashl_optab
: lshr_optab
);
1381 if (shift_count
> BITS_PER_WORD
)
1383 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1384 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1388 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1389 second_shift_count
= GEN_INT (shift_count
);
1392 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1393 outof_input
, first_shift_count
,
1394 NULL_RTX
, unsignedp
, next_methods
);
1395 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1396 into_input
, second_shift_count
,
1397 NULL_RTX
, unsignedp
, next_methods
);
1399 if (into_temp1
!= 0 && into_temp2
!= 0)
1400 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1401 into_target
, unsignedp
, next_methods
);
1405 if (inter
!= 0 && inter
!= into_target
)
1406 emit_move_insn (into_target
, inter
);
1408 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1409 into_input
, first_shift_count
,
1410 NULL_RTX
, unsignedp
, next_methods
);
1411 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1412 outof_input
, second_shift_count
,
1413 NULL_RTX
, unsignedp
, next_methods
);
1415 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1416 inter
= expand_binop (word_mode
, ior_optab
,
1417 outof_temp1
, outof_temp2
,
1418 outof_target
, unsignedp
, next_methods
);
1420 if (inter
!= 0 && inter
!= outof_target
)
1421 emit_move_insn (outof_target
, inter
);
1424 insns
= get_insns ();
1429 if (binoptab
->code
!= UNKNOWN
)
1430 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1434 /* We can't make this a no conflict block if this is a word swap,
1435 because the word swap case fails if the input and output values
1436 are in the same register. */
1437 if (shift_count
!= BITS_PER_WORD
)
1438 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1447 /* These can be done a word at a time by propagating carries. */
1448 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1449 && class == MODE_INT
1450 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1451 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1454 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1455 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1456 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1457 rtx xop0
, xop1
, xtarget
;
1459 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1460 value is one of those, use it. Otherwise, use 1 since it is the
1461 one easiest to get. */
1462 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1463 int normalizep
= STORE_FLAG_VALUE
;
1468 /* Prepare the operands. */
1469 xop0
= force_reg (mode
, op0
);
1470 xop1
= force_reg (mode
, op1
);
1472 xtarget
= gen_reg_rtx (mode
);
1474 if (target
== 0 || !REG_P (target
))
1477 /* Indicate for flow that the entire target reg is being set. */
1479 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1481 /* Do the actual arithmetic. */
1482 for (i
= 0; i
< nwords
; i
++)
1484 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1485 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1486 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1487 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1490 /* Main add/subtract of the input operands. */
1491 x
= expand_binop (word_mode
, binoptab
,
1492 op0_piece
, op1_piece
,
1493 target_piece
, unsignedp
, next_methods
);
1499 /* Store carry from main add/subtract. */
1500 carry_out
= gen_reg_rtx (word_mode
);
1501 carry_out
= emit_store_flag_force (carry_out
,
1502 (binoptab
== add_optab
1505 word_mode
, 1, normalizep
);
1512 /* Add/subtract previous carry to main result. */
1513 newx
= expand_binop (word_mode
,
1514 normalizep
== 1 ? binoptab
: otheroptab
,
1516 NULL_RTX
, 1, next_methods
);
1520 /* Get out carry from adding/subtracting carry in. */
1521 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1522 carry_tmp
= emit_store_flag_force (carry_tmp
,
1523 (binoptab
== add_optab
1526 word_mode
, 1, normalizep
);
1528 /* Logical-ior the two poss. carry together. */
1529 carry_out
= expand_binop (word_mode
, ior_optab
,
1530 carry_out
, carry_tmp
,
1531 carry_out
, 0, next_methods
);
1535 emit_move_insn (target_piece
, newx
);
1538 carry_in
= carry_out
;
1541 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1543 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1544 || ! rtx_equal_p (target
, xtarget
))
1546 rtx temp
= emit_move_insn (target
, xtarget
);
1548 set_unique_reg_note (temp
,
1550 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1561 delete_insns_since (last
);
1564 /* Attempt to synthesize double word multiplies using a sequence of word
1565 mode multiplications. We first attempt to generate a sequence using a
1566 more efficient unsigned widening multiply, and if that fails we then
1567 try using a signed widening multiply. */
1569 if (binoptab
== smul_optab
1570 && class == MODE_INT
1571 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1572 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1573 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1575 rtx product
= NULL_RTX
;
1577 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1578 != CODE_FOR_nothing
)
1580 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1583 delete_insns_since (last
);
1586 if (product
== NULL_RTX
1587 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1588 != CODE_FOR_nothing
)
1590 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1593 delete_insns_since (last
);
1596 if (product
!= NULL_RTX
)
1598 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1600 temp
= emit_move_insn (target
? target
: product
, product
);
1601 set_unique_reg_note (temp
,
1603 gen_rtx_fmt_ee (MULT
, mode
,
1611 /* It can't be open-coded in this mode.
1612 Use a library call if one is available and caller says that's ok. */
1614 if (binoptab
->handlers
[(int) mode
].libfunc
1615 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1619 enum machine_mode op1_mode
= mode
;
1626 op1_mode
= word_mode
;
1627 /* Specify unsigned here,
1628 since negative shift counts are meaningless. */
1629 op1x
= convert_to_mode (word_mode
, op1
, 1);
1632 if (GET_MODE (op0
) != VOIDmode
1633 && GET_MODE (op0
) != mode
)
1634 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1636 /* Pass 1 for NO_QUEUE so we don't lose any increments
1637 if the libcall is cse'd or moved. */
1638 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1639 NULL_RTX
, LCT_CONST
, mode
, 2,
1640 op0
, mode
, op1x
, op1_mode
);
1642 insns
= get_insns ();
1645 target
= gen_reg_rtx (mode
);
1646 emit_libcall_block (insns
, target
, value
,
1647 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1652 delete_insns_since (last
);
1654 /* It can't be done in this mode. Can we do it in a wider mode? */
1656 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1657 || methods
== OPTAB_MUST_WIDEN
))
1659 /* Caller says, don't even try. */
1660 delete_insns_since (entry_last
);
1664 /* Compute the value of METHODS to pass to recursive calls.
1665 Don't allow widening to be tried recursively. */
1667 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1669 /* Look for a wider mode of the same class for which it appears we can do
1672 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1674 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1675 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1677 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1678 != CODE_FOR_nothing
)
1679 || (methods
== OPTAB_LIB
1680 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1682 rtx xop0
= op0
, xop1
= op1
;
1685 /* For certain integer operations, we need not actually extend
1686 the narrow operands, as long as we will truncate
1687 the results to the same narrowness. */
1689 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1690 || binoptab
== xor_optab
1691 || binoptab
== add_optab
|| binoptab
== sub_optab
1692 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1693 && class == MODE_INT
)
1696 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1697 unsignedp
, no_extend
);
1699 /* The second operand of a shift must always be extended. */
1700 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1701 no_extend
&& binoptab
!= ashl_optab
);
1703 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1704 unsignedp
, methods
);
1707 if (class != MODE_INT
)
1710 target
= gen_reg_rtx (mode
);
1711 convert_move (target
, temp
, 0);
1715 return gen_lowpart (mode
, temp
);
1718 delete_insns_since (last
);
1723 delete_insns_since (entry_last
);
1727 /* Expand a binary operator which has both signed and unsigned forms.
1728 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1731 If we widen unsigned operands, we may use a signed wider operation instead
1732 of an unsigned wider operation, since the result would be the same. */
1735 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1736 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1737 enum optab_methods methods
)
1740 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1741 struct optab wide_soptab
;
1743 /* Do it without widening, if possible. */
1744 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1745 unsignedp
, OPTAB_DIRECT
);
1746 if (temp
|| methods
== OPTAB_DIRECT
)
1749 /* Try widening to a signed int. Make a fake signed optab that
1750 hides any signed insn for direct use. */
1751 wide_soptab
= *soptab
;
1752 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1753 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1755 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1756 unsignedp
, OPTAB_WIDEN
);
1758 /* For unsigned operands, try widening to an unsigned int. */
1759 if (temp
== 0 && unsignedp
)
1760 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1761 unsignedp
, OPTAB_WIDEN
);
1762 if (temp
|| methods
== OPTAB_WIDEN
)
1765 /* Use the right width lib call if that exists. */
1766 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1767 if (temp
|| methods
== OPTAB_LIB
)
1770 /* Must widen and use a lib call, use either signed or unsigned. */
1771 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1772 unsignedp
, methods
);
1776 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1777 unsignedp
, methods
);
1781 /* Generate code to perform an operation specified by UNOPPTAB
1782 on operand OP0, with two results to TARG0 and TARG1.
1783 We assume that the order of the operands for the instruction
1784 is TARG0, TARG1, OP0.
1786 Either TARG0 or TARG1 may be zero, but what that means is that
1787 the result is not actually wanted. We will generate it into
1788 a dummy pseudo-reg and discard it. They may not both be zero.
1790 Returns 1 if this operation can be performed; 0 if not. */
1793 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1796 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1797 enum mode_class
class;
1798 enum machine_mode wider_mode
;
1799 rtx entry_last
= get_last_insn ();
1802 class = GET_MODE_CLASS (mode
);
1805 op0
= force_not_mem (op0
);
1808 targ0
= gen_reg_rtx (mode
);
1810 targ1
= gen_reg_rtx (mode
);
1812 /* Record where to go back to if we fail. */
1813 last
= get_last_insn ();
1815 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1817 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1818 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1822 if (GET_MODE (xop0
) != VOIDmode
1823 && GET_MODE (xop0
) != mode0
)
1824 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1826 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1827 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop0
, mode0
))
1828 xop0
= copy_to_mode_reg (mode0
, xop0
);
1830 /* We could handle this, but we should always be called with a pseudo
1831 for our targets and all insns should take them as outputs. */
1832 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1833 || ! (*insn_data
[icode
].operand
[1].predicate
) (targ1
, mode
))
1836 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1843 delete_insns_since (last
);
1846 /* It can't be done in this mode. Can we do it in a wider mode? */
1848 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1850 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1851 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1853 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1854 != CODE_FOR_nothing
)
1856 rtx t0
= gen_reg_rtx (wider_mode
);
1857 rtx t1
= gen_reg_rtx (wider_mode
);
1858 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1860 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1862 convert_move (targ0
, t0
, unsignedp
);
1863 convert_move (targ1
, t1
, unsignedp
);
1867 delete_insns_since (last
);
1872 delete_insns_since (entry_last
);
1876 /* Generate code to perform an operation specified by BINOPTAB
1877 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1878 We assume that the order of the operands for the instruction
1879 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1880 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1882 Either TARG0 or TARG1 may be zero, but what that means is that
1883 the result is not actually wanted. We will generate it into
1884 a dummy pseudo-reg and discard it. They may not both be zero.
1886 Returns 1 if this operation can be performed; 0 if not. */
1889 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1892 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1893 enum mode_class
class;
1894 enum machine_mode wider_mode
;
1895 rtx entry_last
= get_last_insn ();
1898 class = GET_MODE_CLASS (mode
);
1902 op0
= force_not_mem (op0
);
1903 op1
= force_not_mem (op1
);
1906 /* If we are inside an appropriately-short loop and we are optimizing,
1907 force expensive constants into a register. */
1908 if (CONSTANT_P (op0
) && optimize
1909 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1910 op0
= force_reg (mode
, op0
);
1912 if (CONSTANT_P (op1
) && optimize
1913 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1914 op1
= force_reg (mode
, op1
);
1917 targ0
= gen_reg_rtx (mode
);
1919 targ1
= gen_reg_rtx (mode
);
1921 /* Record where to go back to if we fail. */
1922 last
= get_last_insn ();
1924 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1926 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1927 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1928 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1930 rtx xop0
= op0
, xop1
= op1
;
1932 /* In case the insn wants input operands in modes different from
1933 those of the actual operands, convert the operands. It would
1934 seem that we don't need to convert CONST_INTs, but we do, so
1935 that they're properly zero-extended, sign-extended or truncated
1938 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1939 xop0
= convert_modes (mode0
,
1940 GET_MODE (op0
) != VOIDmode
1945 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1946 xop1
= convert_modes (mode1
,
1947 GET_MODE (op1
) != VOIDmode
1952 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1953 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1954 xop0
= copy_to_mode_reg (mode0
, xop0
);
1956 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1957 xop1
= copy_to_mode_reg (mode1
, xop1
);
1959 /* We could handle this, but we should always be called with a pseudo
1960 for our targets and all insns should take them as outputs. */
1961 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1962 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1965 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1972 delete_insns_since (last
);
1975 /* It can't be done in this mode. Can we do it in a wider mode? */
1977 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1979 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1980 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1982 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1983 != CODE_FOR_nothing
)
1985 rtx t0
= gen_reg_rtx (wider_mode
);
1986 rtx t1
= gen_reg_rtx (wider_mode
);
1987 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1988 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1990 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1993 convert_move (targ0
, t0
, unsignedp
);
1994 convert_move (targ1
, t1
, unsignedp
);
1998 delete_insns_since (last
);
2003 delete_insns_since (entry_last
);
2007 /* Expand the two-valued library call indicated by BINOPTAB, but
2008 preserve only one of the values. If TARG0 is non-NULL, the first
2009 value is placed into TARG0; otherwise the second value is placed
2010 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2011 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2012 This routine assumes that the value returned by the library call is
2013 as if the return value was of an integral mode twice as wide as the
2014 mode of OP0. Returns 1 if the call was successful. */
2017 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2018 rtx targ0
, rtx targ1
, enum rtx_code code
)
2020 enum machine_mode mode
;
2021 enum machine_mode libval_mode
;
2025 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2026 if (!((targ0
!= NULL_RTX
) ^ (targ1
!= NULL_RTX
)))
2029 mode
= GET_MODE (op0
);
2030 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2033 /* The value returned by the library function will have twice as
2034 many bits as the nominal MODE. */
2035 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2038 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2039 NULL_RTX
, LCT_CONST
,
2043 /* Get the part of VAL containing the value that we want. */
2044 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2045 targ0
? 0 : GET_MODE_SIZE (mode
));
2046 insns
= get_insns ();
2048 /* Move the into the desired location. */
2049 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2050 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2056 /* Wrapper around expand_unop which takes an rtx code to specify
2057 the operation to perform, not an optab pointer. All other
2058 arguments are the same. */
2060 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2061 rtx target
, int unsignedp
)
2063 optab unop
= code_to_optab
[(int) code
];
2067 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2073 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2075 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2077 enum mode_class
class = GET_MODE_CLASS (mode
);
2078 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2080 enum machine_mode wider_mode
;
2081 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2082 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2084 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2085 != CODE_FOR_nothing
)
2087 rtx xop0
, temp
, last
;
2089 last
= get_last_insn ();
2092 target
= gen_reg_rtx (mode
);
2093 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2094 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2096 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2097 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2098 - GET_MODE_BITSIZE (mode
)),
2099 target
, true, OPTAB_DIRECT
);
2101 delete_insns_since (last
);
2110 /* Try calculating (parity x) as (and (popcount x) 1), where
2111 popcount can also be done in a wider mode. */
2113 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2115 enum mode_class
class = GET_MODE_CLASS (mode
);
2116 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2118 enum machine_mode wider_mode
;
2119 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2120 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2122 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2123 != CODE_FOR_nothing
)
2125 rtx xop0
, temp
, last
;
2127 last
= get_last_insn ();
2130 target
= gen_reg_rtx (mode
);
2131 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2132 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2135 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2136 target
, true, OPTAB_DIRECT
);
2138 delete_insns_since (last
);
2147 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2148 conditions, VAL may already be a SUBREG against which we cannot generate
2149 a further SUBREG. In this case, we expect forcing the value into a
2150 register will work around the situation. */
2153 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2154 enum machine_mode imode
)
2157 ret
= lowpart_subreg (omode
, val
, imode
);
2160 val
= force_reg (imode
, val
);
2161 ret
= lowpart_subreg (omode
, val
, imode
);
2162 gcc_assert (ret
!= NULL
);
2167 /* Expand a floating point absolute value or negation operation via a
2168 logical operation on the sign bit. */
2171 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2172 rtx op0
, rtx target
)
2174 const struct real_format
*fmt
;
2175 int bitpos
, word
, nwords
, i
;
2176 enum machine_mode imode
;
2177 HOST_WIDE_INT hi
, lo
;
2180 /* The format has to have a simple sign bit. */
2181 fmt
= REAL_MODE_FORMAT (mode
);
2185 bitpos
= fmt
->signbit_rw
;
2189 /* Don't create negative zeros if the format doesn't support them. */
2190 if (code
== NEG
&& !fmt
->has_signed_zero
)
2193 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2195 imode
= int_mode_for_mode (mode
);
2196 if (imode
== BLKmode
)
2205 if (FLOAT_WORDS_BIG_ENDIAN
)
2206 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2208 word
= bitpos
/ BITS_PER_WORD
;
2209 bitpos
= bitpos
% BITS_PER_WORD
;
2210 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2213 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2216 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2220 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2226 if (target
== 0 || target
== op0
)
2227 target
= gen_reg_rtx (mode
);
2233 for (i
= 0; i
< nwords
; ++i
)
2235 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2236 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2240 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2242 immed_double_const (lo
, hi
, imode
),
2243 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2244 if (temp
!= targ_piece
)
2245 emit_move_insn (targ_piece
, temp
);
2248 emit_move_insn (targ_piece
, op0_piece
);
2251 insns
= get_insns ();
2254 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2255 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2259 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2260 gen_lowpart (imode
, op0
),
2261 immed_double_const (lo
, hi
, imode
),
2262 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2263 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2265 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2266 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2272 /* Generate code to perform an operation specified by UNOPTAB
2273 on operand OP0, with result having machine-mode MODE.
2275 UNSIGNEDP is for the case where we have to widen the operands
2276 to perform the operation. It says to use zero-extension.
2278 If TARGET is nonzero, the value
2279 is generated there, if it is convenient to do so.
2280 In all cases an rtx is returned for the locus of the value;
2281 this may or may not be TARGET. */
2284 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2287 enum mode_class
class;
2288 enum machine_mode wider_mode
;
2290 rtx last
= get_last_insn ();
2293 class = GET_MODE_CLASS (mode
);
2296 op0
= force_not_mem (op0
);
2298 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2300 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2301 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2307 temp
= gen_reg_rtx (mode
);
2309 if (GET_MODE (xop0
) != VOIDmode
2310 && GET_MODE (xop0
) != mode0
)
2311 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2313 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2315 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2316 xop0
= copy_to_mode_reg (mode0
, xop0
);
2318 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2319 temp
= gen_reg_rtx (mode
);
2321 pat
= GEN_FCN (icode
) (temp
, xop0
);
2324 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2325 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2327 delete_insns_since (last
);
2328 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2336 delete_insns_since (last
);
2339 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2341 /* Widening clz needs special treatment. */
2342 if (unoptab
== clz_optab
)
2344 temp
= widen_clz (mode
, op0
, target
);
2351 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2352 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2353 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2355 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2359 /* For certain operations, we need not actually extend
2360 the narrow operand, as long as we will truncate the
2361 results to the same narrowness. */
2363 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2364 (unoptab
== neg_optab
2365 || unoptab
== one_cmpl_optab
)
2366 && class == MODE_INT
);
2368 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2373 if (class != MODE_INT
)
2376 target
= gen_reg_rtx (mode
);
2377 convert_move (target
, temp
, 0);
2381 return gen_lowpart (mode
, temp
);
2384 delete_insns_since (last
);
2388 /* These can be done a word at a time. */
2389 if (unoptab
== one_cmpl_optab
2390 && class == MODE_INT
2391 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2392 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2397 if (target
== 0 || target
== op0
)
2398 target
= gen_reg_rtx (mode
);
2402 /* Do the actual arithmetic. */
2403 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2405 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2406 rtx x
= expand_unop (word_mode
, unoptab
,
2407 operand_subword_force (op0
, i
, mode
),
2408 target_piece
, unsignedp
);
2410 if (target_piece
!= x
)
2411 emit_move_insn (target_piece
, x
);
2414 insns
= get_insns ();
2417 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2418 gen_rtx_fmt_e (unoptab
->code
, mode
,
2423 if (unoptab
->code
== NEG
)
2425 /* Try negating floating point values by flipping the sign bit. */
2426 if (class == MODE_FLOAT
)
2428 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2433 /* If there is no negation pattern, and we have no negative zero,
2434 try subtracting from zero. */
2435 if (!HONOR_SIGNED_ZEROS (mode
))
2437 temp
= expand_binop (mode
, (unoptab
== negv_optab
2438 ? subv_optab
: sub_optab
),
2439 CONST0_RTX (mode
), op0
, target
,
2440 unsignedp
, OPTAB_DIRECT
);
2446 /* Try calculating parity (x) as popcount (x) % 2. */
2447 if (unoptab
== parity_optab
)
2449 temp
= expand_parity (mode
, op0
, target
);
2455 /* Now try a library call in this mode. */
2456 if (unoptab
->handlers
[(int) mode
].libfunc
)
2460 enum machine_mode outmode
= mode
;
2462 /* All of these functions return small values. Thus we choose to
2463 have them return something that isn't a double-word. */
2464 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2465 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2467 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2471 /* Pass 1 for NO_QUEUE so we don't lose any increments
2472 if the libcall is cse'd or moved. */
2473 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2474 NULL_RTX
, LCT_CONST
, outmode
,
2476 insns
= get_insns ();
2479 target
= gen_reg_rtx (outmode
);
2480 emit_libcall_block (insns
, target
, value
,
2481 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2486 /* It can't be done in this mode. Can we do it in a wider mode? */
2488 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2490 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2491 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2493 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2494 != CODE_FOR_nothing
)
2495 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2499 /* For certain operations, we need not actually extend
2500 the narrow operand, as long as we will truncate the
2501 results to the same narrowness. */
2503 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2504 (unoptab
== neg_optab
2505 || unoptab
== one_cmpl_optab
)
2506 && class == MODE_INT
);
2508 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2511 /* If we are generating clz using wider mode, adjust the
2513 if (unoptab
== clz_optab
&& temp
!= 0)
2514 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2515 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2516 - GET_MODE_BITSIZE (mode
)),
2517 target
, true, OPTAB_DIRECT
);
2521 if (class != MODE_INT
)
2524 target
= gen_reg_rtx (mode
);
2525 convert_move (target
, temp
, 0);
2529 return gen_lowpart (mode
, temp
);
2532 delete_insns_since (last
);
2537 /* One final attempt at implementing negation via subtraction,
2538 this time allowing widening of the operand. */
2539 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2542 temp
= expand_binop (mode
,
2543 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2544 CONST0_RTX (mode
), op0
,
2545 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2553 /* Emit code to compute the absolute value of OP0, with result to
2554 TARGET if convenient. (TARGET may be 0.) The return value says
2555 where the result actually is to be found.
2557 MODE is the mode of the operand; the mode of the result is
2558 different but can be deduced from MODE.
2563 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2564 int result_unsignedp
)
2569 result_unsignedp
= 1;
2571 /* First try to do it with a special abs instruction. */
2572 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2577 /* For floating point modes, try clearing the sign bit. */
2578 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2580 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2585 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2586 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2587 && !HONOR_SIGNED_ZEROS (mode
))
2589 rtx last
= get_last_insn ();
2591 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2593 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2599 delete_insns_since (last
);
2602 /* If this machine has expensive jumps, we can do integer absolute
2603 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2604 where W is the width of MODE. */
2606 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2608 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2609 size_int (GET_MODE_BITSIZE (mode
) - 1),
2612 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2615 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2616 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2626 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2627 int result_unsignedp
, int safe
)
2632 result_unsignedp
= 1;
2634 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2638 /* If that does not win, use conditional jump and negate. */
2640 /* It is safe to use the target if it is the same
2641 as the source if this is also a pseudo register */
2642 if (op0
== target
&& REG_P (op0
)
2643 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2646 op1
= gen_label_rtx ();
2647 if (target
== 0 || ! safe
2648 || GET_MODE (target
) != mode
2649 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2651 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2652 target
= gen_reg_rtx (mode
);
2654 emit_move_insn (target
, op0
);
2657 /* If this mode is an integer too wide to compare properly,
2658 compare word by word. Rely on CSE to optimize constant cases. */
2659 if (GET_MODE_CLASS (mode
) == MODE_INT
2660 && ! can_compare_p (GE
, mode
, ccp_jump
))
2661 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2664 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2665 NULL_RTX
, NULL_RTX
, op1
);
2667 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2670 emit_move_insn (target
, op0
);
2676 /* A subroutine of expand_copysign, perform the copysign operation using the
2677 abs and neg primitives advertised to exist on the target. The assumption
2678 is that we have a split register file, and leaving op0 in fp registers,
2679 and not playing with subregs so much, will help the register allocator. */
2682 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2683 int bitpos
, bool op0_is_abs
)
2685 enum machine_mode imode
;
2686 HOST_WIDE_INT hi
, lo
;
2695 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2702 if (target
== NULL_RTX
)
2703 target
= copy_to_reg (op0
);
2705 emit_move_insn (target
, op0
);
2708 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2710 imode
= int_mode_for_mode (mode
);
2711 if (imode
== BLKmode
)
2713 op1
= gen_lowpart (imode
, op1
);
2718 if (FLOAT_WORDS_BIG_ENDIAN
)
2719 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2721 word
= bitpos
/ BITS_PER_WORD
;
2722 bitpos
= bitpos
% BITS_PER_WORD
;
2723 op1
= operand_subword_force (op1
, word
, mode
);
2726 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2729 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2733 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2737 op1
= expand_binop (imode
, and_optab
, op1
,
2738 immed_double_const (lo
, hi
, imode
),
2739 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2741 label
= gen_label_rtx ();
2742 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2744 if (GET_CODE (op0
) == CONST_DOUBLE
)
2745 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2747 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2749 emit_move_insn (target
, op0
);
2757 /* A subroutine of expand_copysign, perform the entire copysign operation
2758 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2759 is true if op0 is known to have its sign bit clear. */
2762 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2763 int bitpos
, bool op0_is_abs
)
2765 enum machine_mode imode
;
2766 HOST_WIDE_INT hi
, lo
;
2767 int word
, nwords
, i
;
2770 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2772 imode
= int_mode_for_mode (mode
);
2773 if (imode
== BLKmode
)
2782 if (FLOAT_WORDS_BIG_ENDIAN
)
2783 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2785 word
= bitpos
/ BITS_PER_WORD
;
2786 bitpos
= bitpos
% BITS_PER_WORD
;
2787 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2790 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2793 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2797 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2801 if (target
== 0 || target
== op0
|| target
== op1
)
2802 target
= gen_reg_rtx (mode
);
2808 for (i
= 0; i
< nwords
; ++i
)
2810 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2811 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2816 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2817 immed_double_const (~lo
, ~hi
, imode
),
2818 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2820 op1
= expand_binop (imode
, and_optab
,
2821 operand_subword_force (op1
, i
, mode
),
2822 immed_double_const (lo
, hi
, imode
),
2823 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2825 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2826 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2827 if (temp
!= targ_piece
)
2828 emit_move_insn (targ_piece
, temp
);
2831 emit_move_insn (targ_piece
, op0_piece
);
2834 insns
= get_insns ();
2837 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2841 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2842 immed_double_const (lo
, hi
, imode
),
2843 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2845 op0
= gen_lowpart (imode
, op0
);
2847 op0
= expand_binop (imode
, and_optab
, op0
,
2848 immed_double_const (~lo
, ~hi
, imode
),
2849 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2851 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2852 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2853 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2859 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2860 scalar floating point mode. Return NULL if we do not know how to
2861 expand the operation inline. */
2864 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2866 enum machine_mode mode
= GET_MODE (op0
);
2867 const struct real_format
*fmt
;
2872 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2873 gcc_assert (GET_MODE (op1
) == mode
);
2875 /* First try to do it with a special instruction. */
2876 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2877 target
, 0, OPTAB_DIRECT
);
2881 fmt
= REAL_MODE_FORMAT (mode
);
2882 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2885 bitpos
= fmt
->signbit_rw
;
2890 if (GET_CODE (op0
) == CONST_DOUBLE
)
2892 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2893 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2897 if (GET_CODE (op0
) == CONST_DOUBLE
2898 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2899 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
))
2901 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2902 bitpos
, op0_is_abs
);
2907 return expand_copysign_bit (mode
, op0
, op1
, target
, bitpos
, op0_is_abs
);
2910 /* Generate an instruction whose insn-code is INSN_CODE,
2911 with two operands: an output TARGET and an input OP0.
2912 TARGET *must* be nonzero, and the output is always stored there.
2913 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2914 the value that is stored into TARGET. */
2917 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2920 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2925 /* Sign and zero extension from memory is often done specially on
2926 RISC machines, so forcing into a register here can pessimize
2928 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2929 op0
= force_not_mem (op0
);
2931 /* Now, if insn does not accept our operands, put them into pseudos. */
2933 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2934 op0
= copy_to_mode_reg (mode0
, op0
);
2936 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2937 || (flag_force_mem
&& MEM_P (temp
)))
2938 temp
= gen_reg_rtx (GET_MODE (temp
));
2940 pat
= GEN_FCN (icode
) (temp
, op0
);
2942 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2943 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2948 emit_move_insn (target
, temp
);
2951 /* Emit code to perform a series of operations on a multi-word quantity, one
2954 Such a block is preceded by a CLOBBER of the output, consists of multiple
2955 insns, each setting one word of the output, and followed by a SET copying
2956 the output to itself.
2958 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2959 note indicating that it doesn't conflict with the (also multi-word)
2960 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2963 INSNS is a block of code generated to perform the operation, not including
2964 the CLOBBER and final copy. All insns that compute intermediate values
2965 are first emitted, followed by the block as described above.
2967 TARGET, OP0, and OP1 are the output and inputs of the operations,
2968 respectively. OP1 may be zero for a unary operation.
2970 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2973 If TARGET is not a register, INSNS is simply emitted with no special
2974 processing. Likewise if anything in INSNS is not an INSN or if
2975 there is a libcall block inside INSNS.
2977 The final insn emitted is returned. */
2980 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
2982 rtx prev
, next
, first
, last
, insn
;
2984 if (!REG_P (target
) || reload_in_progress
)
2985 return emit_insn (insns
);
2987 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2988 if (!NONJUMP_INSN_P (insn
)
2989 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2990 return emit_insn (insns
);
2992 /* First emit all insns that do not store into words of the output and remove
2993 these from the list. */
2994 for (insn
= insns
; insn
; insn
= next
)
2999 next
= NEXT_INSN (insn
);
3001 /* Some ports (cris) create a libcall regions at their own. We must
3002 avoid any potential nesting of LIBCALLs. */
3003 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3004 remove_note (insn
, note
);
3005 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3006 remove_note (insn
, note
);
3008 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3009 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3010 set
= PATTERN (insn
);
3011 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3013 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3014 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3016 set
= XVECEXP (PATTERN (insn
), 0, i
);
3024 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3026 if (PREV_INSN (insn
))
3027 NEXT_INSN (PREV_INSN (insn
)) = next
;
3032 PREV_INSN (next
) = PREV_INSN (insn
);
3038 prev
= get_last_insn ();
3040 /* Now write the CLOBBER of the output, followed by the setting of each
3041 of the words, followed by the final copy. */
3042 if (target
!= op0
&& target
!= op1
)
3043 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3045 for (insn
= insns
; insn
; insn
= next
)
3047 next
= NEXT_INSN (insn
);
3050 if (op1
&& REG_P (op1
))
3051 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3054 if (op0
&& REG_P (op0
))
3055 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3059 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3060 != CODE_FOR_nothing
)
3062 last
= emit_move_insn (target
, target
);
3064 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3068 last
= get_last_insn ();
3070 /* Remove any existing REG_EQUAL note from "last", or else it will
3071 be mistaken for a note referring to the full contents of the
3072 alleged libcall value when found together with the REG_RETVAL
3073 note added below. An existing note can come from an insn
3074 expansion at "last". */
3075 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3079 first
= get_insns ();
3081 first
= NEXT_INSN (prev
);
3083 /* Encapsulate the block so it gets manipulated as a unit. */
3084 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3086 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3091 /* Emit code to make a call to a constant function or a library call.
3093 INSNS is a list containing all insns emitted in the call.
3094 These insns leave the result in RESULT. Our block is to copy RESULT
3095 to TARGET, which is logically equivalent to EQUIV.
3097 We first emit any insns that set a pseudo on the assumption that these are
3098 loading constants into registers; doing so allows them to be safely cse'ed
3099 between blocks. Then we emit all the other insns in the block, followed by
3100 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3101 note with an operand of EQUIV.
3103 Moving assignments to pseudos outside of the block is done to improve
3104 the generated code, but is not required to generate correct code,
3105 hence being unable to move an assignment is not grounds for not making
3106 a libcall block. There are two reasons why it is safe to leave these
3107 insns inside the block: First, we know that these pseudos cannot be
3108 used in generated RTL outside the block since they are created for
3109 temporary purposes within the block. Second, CSE will not record the
3110 values of anything set inside a libcall block, so we know they must
3111 be dead at the end of the block.
3113 Except for the first group of insns (the ones setting pseudos), the
3114 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3117 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3119 rtx final_dest
= target
;
3120 rtx prev
, next
, first
, last
, insn
;
3122 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3123 into a MEM later. Protect the libcall block from this change. */
3124 if (! REG_P (target
) || REG_USERVAR_P (target
))
3125 target
= gen_reg_rtx (GET_MODE (target
));
3127 /* If we're using non-call exceptions, a libcall corresponding to an
3128 operation that may trap may also trap. */
3129 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3131 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3134 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3136 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3137 remove_note (insn
, note
);
3141 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3142 reg note to indicate that this call cannot throw or execute a nonlocal
3143 goto (unless there is already a REG_EH_REGION note, in which case
3145 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3148 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3151 XEXP (note
, 0) = constm1_rtx
;
3153 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3157 /* First emit all insns that set pseudos. Remove them from the list as
3158 we go. Avoid insns that set pseudos which were referenced in previous
3159 insns. These can be generated by move_by_pieces, for example,
3160 to update an address. Similarly, avoid insns that reference things
3161 set in previous insns. */
3163 for (insn
= insns
; insn
; insn
= next
)
3165 rtx set
= single_set (insn
);
3168 /* Some ports (cris) create a libcall regions at their own. We must
3169 avoid any potential nesting of LIBCALLs. */
3170 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3171 remove_note (insn
, note
);
3172 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3173 remove_note (insn
, note
);
3175 next
= NEXT_INSN (insn
);
3177 if (set
!= 0 && REG_P (SET_DEST (set
))
3178 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3180 || ((! INSN_P(insns
)
3181 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3182 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3183 && ! modified_in_p (SET_SRC (set
), insns
)
3184 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3186 if (PREV_INSN (insn
))
3187 NEXT_INSN (PREV_INSN (insn
)) = next
;
3192 PREV_INSN (next
) = PREV_INSN (insn
);
3197 /* Some ports use a loop to copy large arguments onto the stack.
3198 Don't move anything outside such a loop. */
3203 prev
= get_last_insn ();
3205 /* Write the remaining insns followed by the final copy. */
3207 for (insn
= insns
; insn
; insn
= next
)
3209 next
= NEXT_INSN (insn
);
3214 last
= emit_move_insn (target
, result
);
3215 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3216 != CODE_FOR_nothing
)
3217 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3220 /* Remove any existing REG_EQUAL note from "last", or else it will
3221 be mistaken for a note referring to the full contents of the
3222 libcall value when found together with the REG_RETVAL note added
3223 below. An existing note can come from an insn expansion at
3225 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3228 if (final_dest
!= target
)
3229 emit_move_insn (final_dest
, target
);
3232 first
= get_insns ();
3234 first
= NEXT_INSN (prev
);
3236 /* Encapsulate the block so it gets manipulated as a unit. */
3237 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3239 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3240 when the encapsulated region would not be in one basic block,
3241 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3243 bool attach_libcall_retval_notes
= true;
3244 next
= NEXT_INSN (last
);
3245 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3246 if (control_flow_insn_p (insn
))
3248 attach_libcall_retval_notes
= false;
3252 if (attach_libcall_retval_notes
)
3254 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3256 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3262 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3263 PURPOSE describes how this comparison will be used. CODE is the rtx
3264 comparison code we will be using.
3266 ??? Actually, CODE is slightly weaker than that. A target is still
3267 required to implement all of the normal bcc operations, but not
3268 required to implement all (or any) of the unordered bcc operations. */
3271 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3272 enum can_compare_purpose purpose
)
3276 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3278 if (purpose
== ccp_jump
)
3279 return bcc_gen_fctn
[(int) code
] != NULL
;
3280 else if (purpose
== ccp_store_flag
)
3281 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3283 /* There's only one cmov entry point, and it's allowed to fail. */
3286 if (purpose
== ccp_jump
3287 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3289 if (purpose
== ccp_cmov
3290 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3292 if (purpose
== ccp_store_flag
3293 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3295 mode
= GET_MODE_WIDER_MODE (mode
);
3297 while (mode
!= VOIDmode
);
3302 /* This function is called when we are going to emit a compare instruction that
3303 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3305 *PMODE is the mode of the inputs (in case they are const_int).
3306 *PUNSIGNEDP nonzero says that the operands are unsigned;
3307 this matters if they need to be widened.
3309 If they have mode BLKmode, then SIZE specifies the size of both operands.
3311 This function performs all the setup necessary so that the caller only has
3312 to emit a single comparison insn. This setup can involve doing a BLKmode
3313 comparison or emitting a library call to perform the comparison if no insn
3314 is available to handle it.
3315 The values which are passed in through pointers can be modified; the caller
3316 should perform the comparison on the modified values. */
3319 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3320 enum machine_mode
*pmode
, int *punsignedp
,
3321 enum can_compare_purpose purpose
)
3323 enum machine_mode mode
= *pmode
;
3324 rtx x
= *px
, y
= *py
;
3325 int unsignedp
= *punsignedp
;
3326 enum mode_class
class;
3328 class = GET_MODE_CLASS (mode
);
3330 /* They could both be VOIDmode if both args are immediate constants,
3331 but we should fold that at an earlier stage.
3332 With no special code here, this will call abort,
3333 reminding the programmer to implement such folding. */
3335 if (mode
!= BLKmode
&& flag_force_mem
)
3337 /* Load duplicate non-volatile operands once. */
3338 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3340 x
= force_not_mem (x
);
3345 x
= force_not_mem (x
);
3346 y
= force_not_mem (y
);
3350 /* If we are inside an appropriately-short loop and we are optimizing,
3351 force expensive constants into a register. */
3352 if (CONSTANT_P (x
) && optimize
3353 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3354 x
= force_reg (mode
, x
);
3356 if (CONSTANT_P (y
) && optimize
3357 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3358 y
= force_reg (mode
, y
);
3361 /* Abort if we have a non-canonical comparison. The RTL documentation
3362 states that canonical comparisons are required only for targets which
3364 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3368 /* Don't let both operands fail to indicate the mode. */
3369 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3370 x
= force_reg (mode
, x
);
3372 /* Handle all BLKmode compares. */
3374 if (mode
== BLKmode
)
3376 enum machine_mode cmp_mode
, result_mode
;
3377 enum insn_code cmp_code
;
3382 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3387 /* Try to use a memory block compare insn - either cmpstr
3388 or cmpmem will do. */
3389 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3390 cmp_mode
!= VOIDmode
;
3391 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3393 cmp_code
= cmpmem_optab
[cmp_mode
];
3394 if (cmp_code
== CODE_FOR_nothing
)
3395 cmp_code
= cmpstr_optab
[cmp_mode
];
3396 if (cmp_code
== CODE_FOR_nothing
)
3399 /* Must make sure the size fits the insn's mode. */
3400 if ((GET_CODE (size
) == CONST_INT
3401 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3402 || (GET_MODE_BITSIZE (GET_MODE (size
))
3403 > GET_MODE_BITSIZE (cmp_mode
)))
3406 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3407 result
= gen_reg_rtx (result_mode
);
3408 size
= convert_to_mode (cmp_mode
, size
, 1);
3409 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3413 *pmode
= result_mode
;
3417 /* Otherwise call a library function, memcmp. */
3418 libfunc
= memcmp_libfunc
;
3419 length_type
= sizetype
;
3420 result_mode
= TYPE_MODE (integer_type_node
);
3421 cmp_mode
= TYPE_MODE (length_type
);
3422 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3423 TYPE_UNSIGNED (length_type
));
3425 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3432 *pmode
= result_mode
;
3436 /* Don't allow operands to the compare to trap, as that can put the
3437 compare and branch in different basic blocks. */
3438 if (flag_non_call_exceptions
)
3441 x
= force_reg (mode
, x
);
3443 y
= force_reg (mode
, y
);
3448 if (can_compare_p (*pcomparison
, mode
, purpose
))
3451 /* Handle a lib call just for the mode we are using. */
3453 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3455 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3458 /* If we want unsigned, and this mode has a distinct unsigned
3459 comparison routine, use that. */
3460 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3461 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3463 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3464 word_mode
, 2, x
, mode
, y
, mode
);
3468 if (TARGET_LIB_INT_CMP_BIASED
)
3469 /* Integer comparison returns a result that must be compared
3470 against 1, so that even if we do an unsigned compare
3471 afterward, there is still a value that can represent the
3472 result "less than". */
3482 if (class == MODE_FLOAT
)
3483 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3489 /* Before emitting an insn with code ICODE, make sure that X, which is going
3490 to be used for operand OPNUM of the insn, is converted from mode MODE to
3491 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3492 that it is accepted by the operand predicate. Return the new value. */
3495 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3496 enum machine_mode wider_mode
, int unsignedp
)
3498 if (mode
!= wider_mode
)
3499 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3501 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3502 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3506 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3512 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3513 we can do the comparison.
3514 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3515 be NULL_RTX which indicates that only a comparison is to be generated. */
3518 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3519 enum rtx_code comparison
, int unsignedp
, rtx label
)
3521 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3522 enum mode_class
class = GET_MODE_CLASS (mode
);
3523 enum machine_mode wider_mode
= mode
;
3525 /* Try combined insns first. */
3528 enum insn_code icode
;
3529 PUT_MODE (test
, wider_mode
);
3533 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3535 if (icode
!= CODE_FOR_nothing
3536 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3538 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3539 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3540 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3545 /* Handle some compares against zero. */
3546 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3547 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3549 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3550 emit_insn (GEN_FCN (icode
) (x
));
3552 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3556 /* Handle compares for which there is a directly suitable insn. */
3558 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3559 if (icode
!= CODE_FOR_nothing
)
3561 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3562 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3563 emit_insn (GEN_FCN (icode
) (x
, y
));
3565 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3569 if (class != MODE_INT
&& class != MODE_FLOAT
3570 && class != MODE_COMPLEX_FLOAT
)
3573 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3575 while (wider_mode
!= VOIDmode
);
3580 /* Generate code to compare X with Y so that the condition codes are
3581 set and to jump to LABEL if the condition is true. If X is a
3582 constant and Y is not a constant, then the comparison is swapped to
3583 ensure that the comparison RTL has the canonical form.
3585 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3586 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3587 the proper branch condition code.
3589 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3591 MODE is the mode of the inputs (in case they are const_int).
3593 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3594 be passed unchanged to emit_cmp_insn, then potentially converted into an
3595 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3598 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3599 enum machine_mode mode
, int unsignedp
, rtx label
)
3601 rtx op0
= x
, op1
= y
;
3603 /* Swap operands and condition to ensure canonical RTL. */
3604 if (swap_commutative_operands_p (x
, y
))
3606 /* If we're not emitting a branch, this means some caller
3612 comparison
= swap_condition (comparison
);
3616 /* If OP0 is still a constant, then both X and Y must be constants. Force
3617 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3619 if (CONSTANT_P (op0
))
3620 op0
= force_reg (mode
, op0
);
3624 comparison
= unsigned_condition (comparison
);
3626 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3628 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3631 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3634 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3635 enum machine_mode mode
, int unsignedp
)
3637 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3640 /* Emit a library call comparison between floating point X and Y.
3641 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3644 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3645 enum machine_mode
*pmode
, int *punsignedp
)
3647 enum rtx_code comparison
= *pcomparison
;
3648 enum rtx_code swapped
= swap_condition (comparison
);
3649 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3652 enum machine_mode orig_mode
= GET_MODE (x
);
3653 enum machine_mode mode
;
3654 rtx value
, target
, insns
, equiv
;
3656 bool reversed_p
= false;
3658 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3660 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3663 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3666 tmp
= x
; x
= y
; y
= tmp
;
3667 comparison
= swapped
;
3671 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3672 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3674 comparison
= reversed
;
3680 if (mode
== VOIDmode
)
3683 if (mode
!= orig_mode
)
3685 x
= convert_to_mode (mode
, x
, 0);
3686 y
= convert_to_mode (mode
, y
, 0);
3689 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3690 the RTL. The allows the RTL optimizers to delete the libcall if the
3691 condition can be determined at compile-time. */
3692 if (comparison
== UNORDERED
)
3694 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3695 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3696 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3697 temp
, const_true_rtx
, equiv
);
3701 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3702 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3704 rtx true_rtx
, false_rtx
;
3709 true_rtx
= const0_rtx
;
3710 false_rtx
= const_true_rtx
;
3714 true_rtx
= const_true_rtx
;
3715 false_rtx
= const0_rtx
;
3719 true_rtx
= const1_rtx
;
3720 false_rtx
= const0_rtx
;
3724 true_rtx
= const0_rtx
;
3725 false_rtx
= constm1_rtx
;
3729 true_rtx
= constm1_rtx
;
3730 false_rtx
= const0_rtx
;
3734 true_rtx
= const0_rtx
;
3735 false_rtx
= const1_rtx
;
3741 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3742 equiv
, true_rtx
, false_rtx
);
3747 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3748 word_mode
, 2, x
, mode
, y
, mode
);
3749 insns
= get_insns ();
3752 target
= gen_reg_rtx (word_mode
);
3753 emit_libcall_block (insns
, target
, value
, equiv
);
3755 if (comparison
== UNORDERED
3756 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3757 comparison
= reversed_p
? EQ
: NE
;
3762 *pcomparison
= comparison
;
3766 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3769 emit_indirect_jump (rtx loc
)
3771 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3773 loc
= copy_to_mode_reg (Pmode
, loc
);
3775 emit_jump_insn (gen_indirect_jump (loc
));
3779 #ifdef HAVE_conditional_move
3781 /* Emit a conditional move instruction if the machine supports one for that
3782 condition and machine mode.
3784 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3785 the mode to use should they be constants. If it is VOIDmode, they cannot
3788 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3789 should be stored there. MODE is the mode to use should they be constants.
3790 If it is VOIDmode, they cannot both be constants.
3792 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3793 is not supported. */
3796 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3797 enum machine_mode cmode
, rtx op2
, rtx op3
,
3798 enum machine_mode mode
, int unsignedp
)
3800 rtx tem
, subtarget
, comparison
, insn
;
3801 enum insn_code icode
;
3802 enum rtx_code reversed
;
3804 /* If one operand is constant, make it the second one. Only do this
3805 if the other operand is not constant as well. */
3807 if (swap_commutative_operands_p (op0
, op1
))
3812 code
= swap_condition (code
);
3815 /* get_condition will prefer to generate LT and GT even if the old
3816 comparison was against zero, so undo that canonicalization here since
3817 comparisons against zero are cheaper. */
3818 if (code
== LT
&& op1
== const1_rtx
)
3819 code
= LE
, op1
= const0_rtx
;
3820 else if (code
== GT
&& op1
== constm1_rtx
)
3821 code
= GE
, op1
= const0_rtx
;
3823 if (cmode
== VOIDmode
)
3824 cmode
= GET_MODE (op0
);
3826 if (swap_commutative_operands_p (op2
, op3
)
3827 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3836 if (mode
== VOIDmode
)
3837 mode
= GET_MODE (op2
);
3839 icode
= movcc_gen_code
[mode
];
3841 if (icode
== CODE_FOR_nothing
)
3846 op2
= force_not_mem (op2
);
3847 op3
= force_not_mem (op3
);
3851 target
= gen_reg_rtx (mode
);
3855 /* If the insn doesn't accept these operands, put them in pseudos. */
3857 if (! (*insn_data
[icode
].operand
[0].predicate
)
3858 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3859 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3861 if (! (*insn_data
[icode
].operand
[2].predicate
)
3862 (op2
, insn_data
[icode
].operand
[2].mode
))
3863 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3865 if (! (*insn_data
[icode
].operand
[3].predicate
)
3866 (op3
, insn_data
[icode
].operand
[3].mode
))
3867 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3869 /* Everything should now be in the suitable form, so emit the compare insn
3870 and then the conditional move. */
3873 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3875 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3876 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3877 return NULL and let the caller figure out how best to deal with this
3879 if (GET_CODE (comparison
) != code
)
3882 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3884 /* If that failed, then give up. */
3890 if (subtarget
!= target
)
3891 convert_move (target
, subtarget
, 0);
3896 /* Return nonzero if a conditional move of mode MODE is supported.
3898 This function is for combine so it can tell whether an insn that looks
3899 like a conditional move is actually supported by the hardware. If we
3900 guess wrong we lose a bit on optimization, but that's it. */
3901 /* ??? sparc64 supports conditionally moving integers values based on fp
3902 comparisons, and vice versa. How do we handle them? */
3905 can_conditionally_move_p (enum machine_mode mode
)
3907 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3913 #endif /* HAVE_conditional_move */
3915 /* Emit a conditional addition instruction if the machine supports one for that
3916 condition and machine mode.
3918 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3919 the mode to use should they be constants. If it is VOIDmode, they cannot
3922 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3923 should be stored there. MODE is the mode to use should they be constants.
3924 If it is VOIDmode, they cannot both be constants.
3926 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3927 is not supported. */
3930 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3931 enum machine_mode cmode
, rtx op2
, rtx op3
,
3932 enum machine_mode mode
, int unsignedp
)
3934 rtx tem
, subtarget
, comparison
, insn
;
3935 enum insn_code icode
;
3936 enum rtx_code reversed
;
3938 /* If one operand is constant, make it the second one. Only do this
3939 if the other operand is not constant as well. */
3941 if (swap_commutative_operands_p (op0
, op1
))
3946 code
= swap_condition (code
);
3949 /* get_condition will prefer to generate LT and GT even if the old
3950 comparison was against zero, so undo that canonicalization here since
3951 comparisons against zero are cheaper. */
3952 if (code
== LT
&& op1
== const1_rtx
)
3953 code
= LE
, op1
= const0_rtx
;
3954 else if (code
== GT
&& op1
== constm1_rtx
)
3955 code
= GE
, op1
= const0_rtx
;
3957 if (cmode
== VOIDmode
)
3958 cmode
= GET_MODE (op0
);
3960 if (swap_commutative_operands_p (op2
, op3
)
3961 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3970 if (mode
== VOIDmode
)
3971 mode
= GET_MODE (op2
);
3973 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3975 if (icode
== CODE_FOR_nothing
)
3980 op2
= force_not_mem (op2
);
3981 op3
= force_not_mem (op3
);
3985 target
= gen_reg_rtx (mode
);
3987 /* If the insn doesn't accept these operands, put them in pseudos. */
3989 if (! (*insn_data
[icode
].operand
[0].predicate
)
3990 (target
, insn_data
[icode
].operand
[0].mode
))
3991 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3995 if (! (*insn_data
[icode
].operand
[2].predicate
)
3996 (op2
, insn_data
[icode
].operand
[2].mode
))
3997 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3999 if (! (*insn_data
[icode
].operand
[3].predicate
)
4000 (op3
, insn_data
[icode
].operand
[3].mode
))
4001 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4003 /* Everything should now be in the suitable form, so emit the compare insn
4004 and then the conditional move. */
4007 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4009 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4010 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4011 return NULL and let the caller figure out how best to deal with this
4013 if (GET_CODE (comparison
) != code
)
4016 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4018 /* If that failed, then give up. */
4024 if (subtarget
!= target
)
4025 convert_move (target
, subtarget
, 0);
4030 /* These functions attempt to generate an insn body, rather than
4031 emitting the insn, but if the gen function already emits them, we
4032 make no attempt to turn them back into naked patterns. */
4034 /* Generate and return an insn body to add Y to X. */
4037 gen_add2_insn (rtx x
, rtx y
)
4039 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4041 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4042 (x
, insn_data
[icode
].operand
[0].mode
))
4043 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4044 (x
, insn_data
[icode
].operand
[1].mode
))
4045 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4046 (y
, insn_data
[icode
].operand
[2].mode
)))
4049 return (GEN_FCN (icode
) (x
, x
, y
));
4052 /* Generate and return an insn body to add r1 and c,
4053 storing the result in r0. */
4055 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4057 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4059 if (icode
== CODE_FOR_nothing
4060 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4061 (r0
, insn_data
[icode
].operand
[0].mode
))
4062 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4063 (r1
, insn_data
[icode
].operand
[1].mode
))
4064 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4065 (c
, insn_data
[icode
].operand
[2].mode
)))
4068 return (GEN_FCN (icode
) (r0
, r1
, c
));
4072 have_add2_insn (rtx x
, rtx y
)
4076 if (GET_MODE (x
) == VOIDmode
)
4079 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4081 if (icode
== CODE_FOR_nothing
)
4084 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4085 (x
, insn_data
[icode
].operand
[0].mode
))
4086 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4087 (x
, insn_data
[icode
].operand
[1].mode
))
4088 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4089 (y
, insn_data
[icode
].operand
[2].mode
)))
4095 /* Generate and return an insn body to subtract Y from X. */
4098 gen_sub2_insn (rtx x
, rtx y
)
4100 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4102 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4103 (x
, insn_data
[icode
].operand
[0].mode
))
4104 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4105 (x
, insn_data
[icode
].operand
[1].mode
))
4106 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4107 (y
, insn_data
[icode
].operand
[2].mode
)))
4110 return (GEN_FCN (icode
) (x
, x
, y
));
4113 /* Generate and return an insn body to subtract r1 and c,
4114 storing the result in r0. */
4116 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4118 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4120 if (icode
== CODE_FOR_nothing
4121 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4122 (r0
, insn_data
[icode
].operand
[0].mode
))
4123 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4124 (r1
, insn_data
[icode
].operand
[1].mode
))
4125 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4126 (c
, insn_data
[icode
].operand
[2].mode
)))
4129 return (GEN_FCN (icode
) (r0
, r1
, c
));
4133 have_sub2_insn (rtx x
, rtx y
)
4137 if (GET_MODE (x
) == VOIDmode
)
4140 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4142 if (icode
== CODE_FOR_nothing
)
4145 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4146 (x
, insn_data
[icode
].operand
[0].mode
))
4147 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4148 (x
, insn_data
[icode
].operand
[1].mode
))
4149 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4150 (y
, insn_data
[icode
].operand
[2].mode
)))
4156 /* Generate the body of an instruction to copy Y into X.
4157 It may be a list of insns, if one insn isn't enough. */
4160 gen_move_insn (rtx x
, rtx y
)
4165 emit_move_insn_1 (x
, y
);
4171 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4172 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4173 no such operation exists, CODE_FOR_nothing will be returned. */
4176 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4180 #ifdef HAVE_ptr_extend
4182 return CODE_FOR_ptr_extend
;
4185 tab
= unsignedp
? zext_optab
: sext_optab
;
4186 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4189 /* Generate the body of an insn to extend Y (with mode MFROM)
4190 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4193 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4194 enum machine_mode mfrom
, int unsignedp
)
4196 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4197 return GEN_FCN (icode
) (x
, y
);
4200 /* can_fix_p and can_float_p say whether the target machine
4201 can directly convert a given fixed point type to
4202 a given floating point type, or vice versa.
4203 The returned value is the CODE_FOR_... value to use,
4204 or CODE_FOR_nothing if these modes cannot be directly converted.
4206 *TRUNCP_PTR is set to 1 if it is necessary to output
4207 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4209 static enum insn_code
4210 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4211 int unsignedp
, int *truncp_ptr
)
4214 enum insn_code icode
;
4216 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4217 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4218 if (icode
!= CODE_FOR_nothing
)
4224 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4225 for this to work. We need to rework the fix* and ftrunc* patterns
4226 and documentation. */
4227 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4228 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4229 if (icode
!= CODE_FOR_nothing
4230 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4237 return CODE_FOR_nothing
;
4240 static enum insn_code
4241 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4246 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4247 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4250 /* Generate code to convert FROM to floating point
4251 and store in TO. FROM must be fixed point and not VOIDmode.
4252 UNSIGNEDP nonzero means regard FROM as unsigned.
4253 Normally this is done by correcting the final value
4254 if it is negative. */
4257 expand_float (rtx to
, rtx from
, int unsignedp
)
4259 enum insn_code icode
;
4261 enum machine_mode fmode
, imode
;
4263 /* Crash now, because we won't be able to decide which mode to use. */
4264 if (GET_MODE (from
) == VOIDmode
)
4267 /* Look for an insn to do the conversion. Do it in the specified
4268 modes if possible; otherwise convert either input, output or both to
4269 wider mode. If the integer mode is wider than the mode of FROM,
4270 we can do the conversion signed even if the input is unsigned. */
4272 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4273 fmode
= GET_MODE_WIDER_MODE (fmode
))
4274 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4275 imode
= GET_MODE_WIDER_MODE (imode
))
4277 int doing_unsigned
= unsignedp
;
4279 if (fmode
!= GET_MODE (to
)
4280 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4283 icode
= can_float_p (fmode
, imode
, unsignedp
);
4284 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4285 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4287 if (icode
!= CODE_FOR_nothing
)
4289 if (imode
!= GET_MODE (from
))
4290 from
= convert_to_mode (imode
, from
, unsignedp
);
4292 if (fmode
!= GET_MODE (to
))
4293 target
= gen_reg_rtx (fmode
);
4295 emit_unop_insn (icode
, target
, from
,
4296 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4299 convert_move (to
, target
, 0);
4304 /* Unsigned integer, and no way to convert directly.
4305 Convert as signed, then conditionally adjust the result. */
4308 rtx label
= gen_label_rtx ();
4310 REAL_VALUE_TYPE offset
;
4313 from
= force_not_mem (from
);
4315 /* Look for a usable floating mode FMODE wider than the source and at
4316 least as wide as the target. Using FMODE will avoid rounding woes
4317 with unsigned values greater than the signed maximum value. */
4319 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4320 fmode
= GET_MODE_WIDER_MODE (fmode
))
4321 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4322 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4325 if (fmode
== VOIDmode
)
4327 /* There is no such mode. Pretend the target is wide enough. */
4328 fmode
= GET_MODE (to
);
4330 /* Avoid double-rounding when TO is narrower than FROM. */
4331 if ((significand_size (fmode
) + 1)
4332 < GET_MODE_BITSIZE (GET_MODE (from
)))
4335 rtx neglabel
= gen_label_rtx ();
4337 /* Don't use TARGET if it isn't a register, is a hard register,
4338 or is the wrong mode. */
4340 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4341 || GET_MODE (target
) != fmode
)
4342 target
= gen_reg_rtx (fmode
);
4344 imode
= GET_MODE (from
);
4345 do_pending_stack_adjust ();
4347 /* Test whether the sign bit is set. */
4348 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4351 /* The sign bit is not set. Convert as signed. */
4352 expand_float (target
, from
, 0);
4353 emit_jump_insn (gen_jump (label
));
4356 /* The sign bit is set.
4357 Convert to a usable (positive signed) value by shifting right
4358 one bit, while remembering if a nonzero bit was shifted
4359 out; i.e., compute (from & 1) | (from >> 1). */
4361 emit_label (neglabel
);
4362 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4363 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4364 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4366 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4368 expand_float (target
, temp
, 0);
4370 /* Multiply by 2 to undo the shift above. */
4371 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4372 target
, 0, OPTAB_LIB_WIDEN
);
4374 emit_move_insn (target
, temp
);
4376 do_pending_stack_adjust ();
4382 /* If we are about to do some arithmetic to correct for an
4383 unsigned operand, do it in a pseudo-register. */
4385 if (GET_MODE (to
) != fmode
4386 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4387 target
= gen_reg_rtx (fmode
);
4389 /* Convert as signed integer to floating. */
4390 expand_float (target
, from
, 0);
4392 /* If FROM is negative (and therefore TO is negative),
4393 correct its value by 2**bitwidth. */
4395 do_pending_stack_adjust ();
4396 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4400 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4401 temp
= expand_binop (fmode
, add_optab
, target
,
4402 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4403 target
, 0, OPTAB_LIB_WIDEN
);
4405 emit_move_insn (target
, temp
);
4407 do_pending_stack_adjust ();
4412 /* No hardware instruction available; call a library routine. */
4417 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4419 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4420 from
= convert_to_mode (SImode
, from
, unsignedp
);
4423 from
= force_not_mem (from
);
4425 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4431 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4432 GET_MODE (to
), 1, from
,
4434 insns
= get_insns ();
4437 emit_libcall_block (insns
, target
, value
,
4438 gen_rtx_FLOAT (GET_MODE (to
), from
));
4443 /* Copy result to requested destination
4444 if we have been computing in a temp location. */
4448 if (GET_MODE (target
) == GET_MODE (to
))
4449 emit_move_insn (to
, target
);
4451 convert_move (to
, target
, 0);
4455 /* Generate code to convert FROM to fixed point and store in TO. FROM
4456 must be floating point. */
4459 expand_fix (rtx to
, rtx from
, int unsignedp
)
4461 enum insn_code icode
;
4463 enum machine_mode fmode
, imode
;
4466 /* We first try to find a pair of modes, one real and one integer, at
4467 least as wide as FROM and TO, respectively, in which we can open-code
4468 this conversion. If the integer mode is wider than the mode of TO,
4469 we can do the conversion either signed or unsigned. */
4471 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4472 fmode
= GET_MODE_WIDER_MODE (fmode
))
4473 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4474 imode
= GET_MODE_WIDER_MODE (imode
))
4476 int doing_unsigned
= unsignedp
;
4478 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4479 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4480 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4482 if (icode
!= CODE_FOR_nothing
)
4484 if (fmode
!= GET_MODE (from
))
4485 from
= convert_to_mode (fmode
, from
, 0);
4489 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4490 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4494 if (imode
!= GET_MODE (to
))
4495 target
= gen_reg_rtx (imode
);
4497 emit_unop_insn (icode
, target
, from
,
4498 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4500 convert_move (to
, target
, unsignedp
);
4505 /* For an unsigned conversion, there is one more way to do it.
4506 If we have a signed conversion, we generate code that compares
4507 the real value to the largest representable positive number. If if
4508 is smaller, the conversion is done normally. Otherwise, subtract
4509 one plus the highest signed number, convert, and add it back.
4511 We only need to check all real modes, since we know we didn't find
4512 anything with a wider integer mode.
4514 This code used to extend FP value into mode wider than the destination.
4515 This is not needed. Consider, for instance conversion from SFmode
4518 The hot path trought the code is dealing with inputs smaller than 2^63
4519 and doing just the conversion, so there is no bits to lose.
4521 In the other path we know the value is positive in the range 2^63..2^64-1
4522 inclusive. (as for other imput overflow happens and result is undefined)
4523 So we know that the most important bit set in mantissa corresponds to
4524 2^63. The subtraction of 2^63 should not generate any rounding as it
4525 simply clears out that bit. The rest is trivial. */
4527 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4528 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4529 fmode
= GET_MODE_WIDER_MODE (fmode
))
4530 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4534 REAL_VALUE_TYPE offset
;
4535 rtx limit
, lab1
, lab2
, insn
;
4537 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4538 real_2expN (&offset
, bitsize
- 1);
4539 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4540 lab1
= gen_label_rtx ();
4541 lab2
= gen_label_rtx ();
4544 from
= force_not_mem (from
);
4546 if (fmode
!= GET_MODE (from
))
4547 from
= convert_to_mode (fmode
, from
, 0);
4549 /* See if we need to do the subtraction. */
4550 do_pending_stack_adjust ();
4551 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4554 /* If not, do the signed "fix" and branch around fixup code. */
4555 expand_fix (to
, from
, 0);
4556 emit_jump_insn (gen_jump (lab2
));
4559 /* Otherwise, subtract 2**(N-1), convert to signed number,
4560 then add 2**(N-1). Do the addition using XOR since this
4561 will often generate better code. */
4563 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4564 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4565 expand_fix (to
, target
, 0);
4566 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4568 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4570 to
, 1, OPTAB_LIB_WIDEN
);
4573 emit_move_insn (to
, target
);
4577 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4578 != CODE_FOR_nothing
)
4580 /* Make a place for a REG_NOTE and add it. */
4581 insn
= emit_move_insn (to
, to
);
4582 set_unique_reg_note (insn
,
4584 gen_rtx_fmt_e (UNSIGNED_FIX
,
4592 /* We can't do it with an insn, so use a library call. But first ensure
4593 that the mode of TO is at least as wide as SImode, since those are the
4594 only library calls we know about. */
4596 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4598 target
= gen_reg_rtx (SImode
);
4600 expand_fix (target
, from
, unsignedp
);
4608 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4609 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4614 from
= force_not_mem (from
);
4618 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4619 GET_MODE (to
), 1, from
,
4621 insns
= get_insns ();
4624 emit_libcall_block (insns
, target
, value
,
4625 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4626 GET_MODE (to
), from
));
4631 if (GET_MODE (to
) == GET_MODE (target
))
4632 emit_move_insn (to
, target
);
4634 convert_move (to
, target
, 0);
4638 /* Report whether we have an instruction to perform the operation
4639 specified by CODE on operands of mode MODE. */
4641 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4643 return (code_to_optab
[(int) code
] != 0
4644 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4645 != CODE_FOR_nothing
));
4648 /* Create a blank optab. */
4653 optab op
= ggc_alloc (sizeof (struct optab
));
4654 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4656 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4657 op
->handlers
[i
].libfunc
= 0;
4663 static convert_optab
4664 new_convert_optab (void)
4667 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4668 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4669 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4671 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4672 op
->handlers
[i
][j
].libfunc
= 0;
4677 /* Same, but fill in its code as CODE, and write it into the
4678 code_to_optab table. */
4680 init_optab (enum rtx_code code
)
4682 optab op
= new_optab ();
4684 code_to_optab
[(int) code
] = op
;
4688 /* Same, but fill in its code as CODE, and do _not_ write it into
4689 the code_to_optab table. */
4691 init_optabv (enum rtx_code code
)
4693 optab op
= new_optab ();
4698 /* Conversion optabs never go in the code_to_optab table. */
4699 static inline convert_optab
4700 init_convert_optab (enum rtx_code code
)
4702 convert_optab op
= new_convert_optab ();
4707 /* Initialize the libfunc fields of an entire group of entries in some
4708 optab. Each entry is set equal to a string consisting of a leading
4709 pair of underscores followed by a generic operation name followed by
4710 a mode name (downshifted to lowercase) followed by a single character
4711 representing the number of operands for the given operation (which is
4712 usually one of the characters '2', '3', or '4').
4714 OPTABLE is the table in which libfunc fields are to be initialized.
4715 FIRST_MODE is the first machine mode index in the given optab to
4717 LAST_MODE is the last machine mode index in the given optab to
4719 OPNAME is the generic (string) name of the operation.
4720 SUFFIX is the character which specifies the number of operands for
4721 the given generic operation.
4725 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4726 const char *opname
, int suffix
)
4729 unsigned opname_len
= strlen (opname
);
4731 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4732 mode
= (enum machine_mode
) ((int) mode
+ 1))
4734 const char *mname
= GET_MODE_NAME (mode
);
4735 unsigned mname_len
= strlen (mname
);
4736 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4743 for (q
= opname
; *q
; )
4745 for (q
= mname
; *q
; q
++)
4746 *p
++ = TOLOWER (*q
);
4750 optable
->handlers
[(int) mode
].libfunc
4751 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4755 /* Initialize the libfunc fields of an entire group of entries in some
4756 optab which correspond to all integer mode operations. The parameters
4757 have the same meaning as similarly named ones for the `init_libfuncs'
4758 routine. (See above). */
4761 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4763 int maxsize
= 2*BITS_PER_WORD
;
4764 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4765 maxsize
= LONG_LONG_TYPE_SIZE
;
4766 init_libfuncs (optable
, word_mode
,
4767 mode_for_size (maxsize
, MODE_INT
, 0),
4771 /* Initialize the libfunc fields of an entire group of entries in some
4772 optab which correspond to all real mode operations. The parameters
4773 have the same meaning as similarly named ones for the `init_libfuncs'
4774 routine. (See above). */
4777 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4779 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4782 /* Initialize the libfunc fields of an entire group of entries of an
4783 inter-mode-class conversion optab. The string formation rules are
4784 similar to the ones for init_libfuncs, above, but instead of having
4785 a mode name and an operand count these functions have two mode names
4786 and no operand count. */
4788 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4789 enum mode_class from_class
,
4790 enum mode_class to_class
)
4792 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4793 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4794 size_t opname_len
= strlen (opname
);
4795 size_t max_mname_len
= 0;
4797 enum machine_mode fmode
, tmode
;
4798 const char *fname
, *tname
;
4800 char *libfunc_name
, *suffix
;
4803 for (fmode
= first_from_mode
;
4805 fmode
= GET_MODE_WIDER_MODE (fmode
))
4806 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4808 for (tmode
= first_to_mode
;
4810 tmode
= GET_MODE_WIDER_MODE (tmode
))
4811 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4813 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4814 libfunc_name
[0] = '_';
4815 libfunc_name
[1] = '_';
4816 memcpy (&libfunc_name
[2], opname
, opname_len
);
4817 suffix
= libfunc_name
+ opname_len
+ 2;
4819 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4820 fmode
= GET_MODE_WIDER_MODE (fmode
))
4821 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4822 tmode
= GET_MODE_WIDER_MODE (tmode
))
4824 fname
= GET_MODE_NAME (fmode
);
4825 tname
= GET_MODE_NAME (tmode
);
4828 for (q
= fname
; *q
; p
++, q
++)
4830 for (q
= tname
; *q
; p
++, q
++)
4835 tab
->handlers
[tmode
][fmode
].libfunc
4836 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4841 /* Initialize the libfunc fields of an entire group of entries of an
4842 intra-mode-class conversion optab. The string formation rules are
4843 similar to the ones for init_libfunc, above. WIDENING says whether
4844 the optab goes from narrow to wide modes or vice versa. These functions
4845 have two mode names _and_ an operand count. */
4847 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4848 enum mode_class
class, bool widening
)
4850 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4851 size_t opname_len
= strlen (opname
);
4852 size_t max_mname_len
= 0;
4854 enum machine_mode nmode
, wmode
;
4855 const char *nname
, *wname
;
4857 char *libfunc_name
, *suffix
;
4860 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4861 nmode
= GET_MODE_WIDER_MODE (nmode
))
4862 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4864 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4865 libfunc_name
[0] = '_';
4866 libfunc_name
[1] = '_';
4867 memcpy (&libfunc_name
[2], opname
, opname_len
);
4868 suffix
= libfunc_name
+ opname_len
+ 2;
4870 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4871 nmode
= GET_MODE_WIDER_MODE (nmode
))
4872 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4873 wmode
= GET_MODE_WIDER_MODE (wmode
))
4875 nname
= GET_MODE_NAME (nmode
);
4876 wname
= GET_MODE_NAME (wmode
);
4879 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4881 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4887 tab
->handlers
[widening
? wmode
: nmode
]
4888 [widening
? nmode
: wmode
].libfunc
4889 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4896 init_one_libfunc (const char *name
)
4900 /* Create a FUNCTION_DECL that can be passed to
4901 targetm.encode_section_info. */
4902 /* ??? We don't have any type information except for this is
4903 a function. Pretend this is "int foo()". */
4904 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4905 build_function_type (integer_type_node
, NULL_TREE
));
4906 DECL_ARTIFICIAL (decl
) = 1;
4907 DECL_EXTERNAL (decl
) = 1;
4908 TREE_PUBLIC (decl
) = 1;
4910 symbol
= XEXP (DECL_RTL (decl
), 0);
4912 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4913 are the flags assigned by targetm.encode_section_info. */
4914 SYMBOL_REF_DECL (symbol
) = 0;
4919 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4920 MODE to NAME, which should be either 0 or a string constant. */
4922 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4925 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4927 optable
->handlers
[mode
].libfunc
= 0;
4930 /* Call this to reset the function entry for one conversion optab
4931 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4932 either 0 or a string constant. */
4934 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4935 enum machine_mode fmode
, const char *name
)
4938 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4940 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4943 /* Call this once to initialize the contents of the optabs
4944 appropriately for the current target machine. */
4951 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4953 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4954 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4956 #ifdef HAVE_conditional_move
4957 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4958 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4961 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4963 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4964 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4967 add_optab
= init_optab (PLUS
);
4968 addv_optab
= init_optabv (PLUS
);
4969 sub_optab
= init_optab (MINUS
);
4970 subv_optab
= init_optabv (MINUS
);
4971 smul_optab
= init_optab (MULT
);
4972 smulv_optab
= init_optabv (MULT
);
4973 smul_highpart_optab
= init_optab (UNKNOWN
);
4974 umul_highpart_optab
= init_optab (UNKNOWN
);
4975 smul_widen_optab
= init_optab (UNKNOWN
);
4976 umul_widen_optab
= init_optab (UNKNOWN
);
4977 sdiv_optab
= init_optab (DIV
);
4978 sdivv_optab
= init_optabv (DIV
);
4979 sdivmod_optab
= init_optab (UNKNOWN
);
4980 udiv_optab
= init_optab (UDIV
);
4981 udivmod_optab
= init_optab (UNKNOWN
);
4982 smod_optab
= init_optab (MOD
);
4983 umod_optab
= init_optab (UMOD
);
4984 fmod_optab
= init_optab (UNKNOWN
);
4985 drem_optab
= init_optab (UNKNOWN
);
4986 ftrunc_optab
= init_optab (UNKNOWN
);
4987 and_optab
= init_optab (AND
);
4988 ior_optab
= init_optab (IOR
);
4989 xor_optab
= init_optab (XOR
);
4990 ashl_optab
= init_optab (ASHIFT
);
4991 ashr_optab
= init_optab (ASHIFTRT
);
4992 lshr_optab
= init_optab (LSHIFTRT
);
4993 rotl_optab
= init_optab (ROTATE
);
4994 rotr_optab
= init_optab (ROTATERT
);
4995 smin_optab
= init_optab (SMIN
);
4996 smax_optab
= init_optab (SMAX
);
4997 umin_optab
= init_optab (UMIN
);
4998 umax_optab
= init_optab (UMAX
);
4999 pow_optab
= init_optab (UNKNOWN
);
5000 atan2_optab
= init_optab (UNKNOWN
);
5002 /* These three have codes assigned exclusively for the sake of
5004 mov_optab
= init_optab (SET
);
5005 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5006 cmp_optab
= init_optab (COMPARE
);
5008 ucmp_optab
= init_optab (UNKNOWN
);
5009 tst_optab
= init_optab (UNKNOWN
);
5011 eq_optab
= init_optab (EQ
);
5012 ne_optab
= init_optab (NE
);
5013 gt_optab
= init_optab (GT
);
5014 ge_optab
= init_optab (GE
);
5015 lt_optab
= init_optab (LT
);
5016 le_optab
= init_optab (LE
);
5017 unord_optab
= init_optab (UNORDERED
);
5019 neg_optab
= init_optab (NEG
);
5020 negv_optab
= init_optabv (NEG
);
5021 abs_optab
= init_optab (ABS
);
5022 absv_optab
= init_optabv (ABS
);
5023 addcc_optab
= init_optab (UNKNOWN
);
5024 one_cmpl_optab
= init_optab (NOT
);
5025 ffs_optab
= init_optab (FFS
);
5026 clz_optab
= init_optab (CLZ
);
5027 ctz_optab
= init_optab (CTZ
);
5028 popcount_optab
= init_optab (POPCOUNT
);
5029 parity_optab
= init_optab (PARITY
);
5030 sqrt_optab
= init_optab (SQRT
);
5031 floor_optab
= init_optab (UNKNOWN
);
5032 ceil_optab
= init_optab (UNKNOWN
);
5033 round_optab
= init_optab (UNKNOWN
);
5034 btrunc_optab
= init_optab (UNKNOWN
);
5035 nearbyint_optab
= init_optab (UNKNOWN
);
5036 rint_optab
= init_optab (UNKNOWN
);
5037 sincos_optab
= init_optab (UNKNOWN
);
5038 sin_optab
= init_optab (UNKNOWN
);
5039 asin_optab
= init_optab (UNKNOWN
);
5040 cos_optab
= init_optab (UNKNOWN
);
5041 acos_optab
= init_optab (UNKNOWN
);
5042 exp_optab
= init_optab (UNKNOWN
);
5043 exp10_optab
= init_optab (UNKNOWN
);
5044 exp2_optab
= init_optab (UNKNOWN
);
5045 expm1_optab
= init_optab (UNKNOWN
);
5046 ldexp_optab
= init_optab (UNKNOWN
);
5047 logb_optab
= init_optab (UNKNOWN
);
5048 ilogb_optab
= init_optab (UNKNOWN
);
5049 log_optab
= init_optab (UNKNOWN
);
5050 log10_optab
= init_optab (UNKNOWN
);
5051 log2_optab
= init_optab (UNKNOWN
);
5052 log1p_optab
= init_optab (UNKNOWN
);
5053 tan_optab
= init_optab (UNKNOWN
);
5054 atan_optab
= init_optab (UNKNOWN
);
5055 copysign_optab
= init_optab (UNKNOWN
);
5057 strlen_optab
= init_optab (UNKNOWN
);
5058 cbranch_optab
= init_optab (UNKNOWN
);
5059 cmov_optab
= init_optab (UNKNOWN
);
5060 cstore_optab
= init_optab (UNKNOWN
);
5061 push_optab
= init_optab (UNKNOWN
);
5063 vec_extract_optab
= init_optab (UNKNOWN
);
5064 vec_set_optab
= init_optab (UNKNOWN
);
5065 vec_init_optab
= init_optab (UNKNOWN
);
5066 vec_realign_load_optab
= init_optab (UNKNOWN
);
5067 movmisalign_optab
= init_optab (UNKNOWN
);
5069 powi_optab
= init_optab (UNKNOWN
);
5072 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5073 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5074 trunc_optab
= init_convert_optab (TRUNCATE
);
5075 sfix_optab
= init_convert_optab (FIX
);
5076 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5077 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5078 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5079 sfloat_optab
= init_convert_optab (FLOAT
);
5080 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5082 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5084 movmem_optab
[i
] = CODE_FOR_nothing
;
5085 clrmem_optab
[i
] = CODE_FOR_nothing
;
5086 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5087 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5089 #ifdef HAVE_SECONDARY_RELOADS
5090 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5094 /* Fill in the optabs with the insns we support. */
5097 /* Initialize the optabs with the names of the library functions. */
5098 init_integral_libfuncs (add_optab
, "add", '3');
5099 init_floating_libfuncs (add_optab
, "add", '3');
5100 init_integral_libfuncs (addv_optab
, "addv", '3');
5101 init_floating_libfuncs (addv_optab
, "add", '3');
5102 init_integral_libfuncs (sub_optab
, "sub", '3');
5103 init_floating_libfuncs (sub_optab
, "sub", '3');
5104 init_integral_libfuncs (subv_optab
, "subv", '3');
5105 init_floating_libfuncs (subv_optab
, "sub", '3');
5106 init_integral_libfuncs (smul_optab
, "mul", '3');
5107 init_floating_libfuncs (smul_optab
, "mul", '3');
5108 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5109 init_floating_libfuncs (smulv_optab
, "mul", '3');
5110 init_integral_libfuncs (sdiv_optab
, "div", '3');
5111 init_floating_libfuncs (sdiv_optab
, "div", '3');
5112 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5113 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5114 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5115 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5116 init_integral_libfuncs (smod_optab
, "mod", '3');
5117 init_integral_libfuncs (umod_optab
, "umod", '3');
5118 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5119 init_integral_libfuncs (and_optab
, "and", '3');
5120 init_integral_libfuncs (ior_optab
, "ior", '3');
5121 init_integral_libfuncs (xor_optab
, "xor", '3');
5122 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5123 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5124 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5125 init_integral_libfuncs (smin_optab
, "min", '3');
5126 init_floating_libfuncs (smin_optab
, "min", '3');
5127 init_integral_libfuncs (smax_optab
, "max", '3');
5128 init_floating_libfuncs (smax_optab
, "max", '3');
5129 init_integral_libfuncs (umin_optab
, "umin", '3');
5130 init_integral_libfuncs (umax_optab
, "umax", '3');
5131 init_integral_libfuncs (neg_optab
, "neg", '2');
5132 init_floating_libfuncs (neg_optab
, "neg", '2');
5133 init_integral_libfuncs (negv_optab
, "negv", '2');
5134 init_floating_libfuncs (negv_optab
, "neg", '2');
5135 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5136 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5137 init_integral_libfuncs (clz_optab
, "clz", '2');
5138 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5139 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5140 init_integral_libfuncs (parity_optab
, "parity", '2');
5142 /* Comparison libcalls for integers MUST come in pairs,
5144 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5145 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5146 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5148 /* EQ etc are floating point only. */
5149 init_floating_libfuncs (eq_optab
, "eq", '2');
5150 init_floating_libfuncs (ne_optab
, "ne", '2');
5151 init_floating_libfuncs (gt_optab
, "gt", '2');
5152 init_floating_libfuncs (ge_optab
, "ge", '2');
5153 init_floating_libfuncs (lt_optab
, "lt", '2');
5154 init_floating_libfuncs (le_optab
, "le", '2');
5155 init_floating_libfuncs (unord_optab
, "unord", '2');
5157 init_floating_libfuncs (powi_optab
, "powi", '2');
5160 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5161 MODE_INT
, MODE_FLOAT
);
5162 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5163 MODE_FLOAT
, MODE_INT
);
5164 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5165 MODE_FLOAT
, MODE_INT
);
5167 /* sext_optab is also used for FLOAT_EXTEND. */
5168 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5169 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5171 /* Use cabs for double complex abs, since systems generally have cabs.
5172 Don't define any libcall for float complex, so that cabs will be used. */
5173 if (complex_double_type_node
)
5174 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5175 = init_one_libfunc ("cabs");
5177 /* The ffs function operates on `int'. */
5178 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5179 = init_one_libfunc ("ffs");
5181 abort_libfunc
= init_one_libfunc ("abort");
5182 memcpy_libfunc
= init_one_libfunc ("memcpy");
5183 memmove_libfunc
= init_one_libfunc ("memmove");
5184 memcmp_libfunc
= init_one_libfunc ("memcmp");
5185 memset_libfunc
= init_one_libfunc ("memset");
5186 setbits_libfunc
= init_one_libfunc ("__setbits");
5188 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5189 ? "_Unwind_SjLj_Resume"
5190 : "_Unwind_Resume");
5191 #ifndef DONT_USE_BUILTIN_SETJMP
5192 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5193 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5195 setjmp_libfunc
= init_one_libfunc ("setjmp");
5196 longjmp_libfunc
= init_one_libfunc ("longjmp");
5198 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5199 unwind_sjlj_unregister_libfunc
5200 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5202 /* For function entry/exit instrumentation. */
5203 profile_function_entry_libfunc
5204 = init_one_libfunc ("__cyg_profile_func_enter");
5205 profile_function_exit_libfunc
5206 = init_one_libfunc ("__cyg_profile_func_exit");
5208 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5210 if (HAVE_conditional_trap
)
5211 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5213 /* Allow the target to add more libcalls or rename some, etc. */
5214 targetm
.init_libfuncs ();
5219 /* Print information about the current contents of the optabs on
5223 debug_optab_libfuncs (void)
5229 /* Dump the arithmetic optabs. */
5230 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5231 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5234 struct optab_handlers
*h
;
5237 h
= &o
->handlers
[j
];
5240 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5242 fprintf (stderr
, "%s\t%s:\t%s\n",
5243 GET_RTX_NAME (o
->code
),
5245 XSTR (h
->libfunc
, 0));
5249 /* Dump the conversion optabs. */
5250 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
5251 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5252 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5255 struct optab_handlers
*h
;
5257 o
= &convert_optab_table
[i
];
5258 h
= &o
->handlers
[j
][k
];
5261 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5263 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5264 GET_RTX_NAME (o
->code
),
5267 XSTR (h
->libfunc
, 0));
5275 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5276 CODE. Return 0 on failure. */
5279 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5280 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5282 enum machine_mode mode
= GET_MODE (op1
);
5283 enum insn_code icode
;
5286 if (!HAVE_conditional_trap
)
5289 if (mode
== VOIDmode
)
5292 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5293 if (icode
== CODE_FOR_nothing
)
5297 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5298 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5304 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5306 PUT_CODE (trap_rtx
, code
);
5307 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5311 insn
= get_insns ();
5318 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5319 or unsigned operation code. */
5321 static enum rtx_code
5322 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5334 code
= unsignedp
? LTU
: LT
;
5337 code
= unsignedp
? LEU
: LE
;
5340 code
= unsignedp
? GTU
: GT
;
5343 code
= unsignedp
? GEU
: GE
;
5346 case UNORDERED_EXPR
:
5377 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5378 unsigned operators. Do not generate compare instruction. */
5381 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5383 enum rtx_code rcode
;
5385 rtx rtx_op0
, rtx_op1
;
5387 if (!COMPARISON_CLASS_P (cond
))
5389 /* This is unlikely. While generating VEC_COND_EXPR,
5390 auto vectorizer ensures that condition is a relational
5396 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5397 t_op0
= TREE_OPERAND (cond
, 0);
5398 t_op1
= TREE_OPERAND (cond
, 1);
5401 /* Expand operands. */
5402 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5403 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5405 if (!(*insn_data
[icode
].operand
[4].predicate
) (rtx_op0
, GET_MODE (rtx_op0
))
5406 && GET_MODE (rtx_op0
) != VOIDmode
)
5407 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5409 if (!(*insn_data
[icode
].operand
[5].predicate
) (rtx_op1
, GET_MODE (rtx_op1
))
5410 && GET_MODE (rtx_op1
) != VOIDmode
)
5411 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5413 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5416 /* Return insn code for VEC_COND_EXPR EXPR. */
5418 static inline enum insn_code
5419 get_vcond_icode (tree expr
, enum machine_mode mode
)
5421 enum insn_code icode
= CODE_FOR_nothing
;
5423 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5424 icode
= vcondu_gen_code
[mode
];
5426 icode
= vcond_gen_code
[mode
];
5430 /* Return TRUE iff, appropriate vector insns are available
5431 for vector cond expr expr in VMODE mode. */
5434 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5436 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5441 /* Generate insns for VEC_COND_EXPR. */
5444 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5446 enum insn_code icode
;
5447 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5448 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5449 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5451 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5452 if (icode
== CODE_FOR_nothing
)
5456 target
= gen_reg_rtx (mode
);
5458 /* Get comparison rtx. First expand both cond expr operands. */
5459 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5461 cc_op0
= XEXP (comparison
, 0);
5462 cc_op1
= XEXP (comparison
, 1);
5463 /* Expand both operands and force them in reg, if required. */
5464 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5465 NULL_RTX
, VOIDmode
, 1);
5466 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode
)
5467 && mode
!= VOIDmode
)
5468 rtx_op1
= force_reg (mode
, rtx_op1
);
5470 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5471 NULL_RTX
, VOIDmode
, 1);
5472 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode
)
5473 && mode
!= VOIDmode
)
5474 rtx_op2
= force_reg (mode
, rtx_op2
);
5476 /* Emit instruction! */
5477 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5478 comparison
, cc_op0
, cc_op1
));
5482 #include "gt-optabs.h"