1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* The insn generating function can not take an rtx_code argument.
88 TRAP_RTX is used as an rtx argument. Its code is replaced with
89 the code to be used in the trap insn and all other fields are ignored. */
90 static GTY(()) rtx trap_rtx
;
92 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
93 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
95 static int expand_cmplxdiv_straight (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
,
96 enum machine_mode
, int,
97 enum optab_methods
, enum mode_class
,
99 static int expand_cmplxdiv_wide (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
,
100 enum machine_mode
, int, enum optab_methods
,
101 enum mode_class
, optab
);
102 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
103 enum machine_mode
*, int *,
104 enum can_compare_purpose
);
105 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
107 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
108 static rtx
ftruncify (rtx
);
109 static optab
new_optab (void);
110 static convert_optab
new_convert_optab (void);
111 static inline optab
init_optab (enum rtx_code
);
112 static inline optab
init_optabv (enum rtx_code
);
113 static inline convert_optab
init_convert_optab (enum rtx_code
);
114 static void init_libfuncs (optab
, int, int, const char *, int);
115 static void init_integral_libfuncs (optab
, const char *, int);
116 static void init_floating_libfuncs (optab
, const char *, int);
117 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, enum mode_class
);
119 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
120 enum mode_class
, bool);
121 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
122 enum rtx_code
, int, rtx
);
123 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
124 enum machine_mode
*, int *);
125 static rtx
expand_vector_binop (enum machine_mode
, optab
, rtx
, rtx
, rtx
, int,
127 static rtx
expand_vector_unop (enum machine_mode
, optab
, rtx
, rtx
, int);
128 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
129 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
131 #ifndef HAVE_conditional_trap
132 #define HAVE_conditional_trap 0
133 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
136 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
137 the result of operation CODE applied to OP0 (and OP1 if it is a binary
140 If the last insn does not set TARGET, don't do anything, but return 1.
142 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
143 don't add the REG_EQUAL note but return 0. Our caller can then try
144 again, ensuring that TARGET is not one of the operands. */
147 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
149 rtx last_insn
, insn
, set
;
154 || NEXT_INSN (insns
) == NULL_RTX
)
157 if (GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
158 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == '1')
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Generate code to perform a straightforward complex divide. */
245 expand_cmplxdiv_straight (rtx real0
, rtx real1
, rtx imag0
, rtx imag1
,
246 rtx realr
, rtx imagr
, enum machine_mode submode
,
247 int unsignedp
, enum optab_methods methods
,
248 enum mode_class
class, optab binoptab
)
254 optab this_add_optab
= add_optab
;
255 optab this_sub_optab
= sub_optab
;
256 optab this_neg_optab
= neg_optab
;
257 optab this_mul_optab
= smul_optab
;
259 if (binoptab
== sdivv_optab
)
261 this_add_optab
= addv_optab
;
262 this_sub_optab
= subv_optab
;
263 this_neg_optab
= negv_optab
;
264 this_mul_optab
= smulv_optab
;
267 /* Don't fetch these from memory more than once. */
268 real0
= force_reg (submode
, real0
);
269 real1
= force_reg (submode
, real1
);
272 imag0
= force_reg (submode
, imag0
);
274 imag1
= force_reg (submode
, imag1
);
276 /* Divisor: c*c + d*d. */
277 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
278 NULL_RTX
, unsignedp
, methods
);
280 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
281 NULL_RTX
, unsignedp
, methods
);
283 if (temp1
== 0 || temp2
== 0)
286 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
287 NULL_RTX
, unsignedp
, methods
);
293 /* Mathematically, ((a)(c-id))/divisor. */
294 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
296 /* Calculate the dividend. */
297 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
298 NULL_RTX
, unsignedp
, methods
);
300 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
301 NULL_RTX
, unsignedp
, methods
);
303 if (real_t
== 0 || imag_t
== 0)
306 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
307 NULL_RTX
, unsignedp
);
311 /* Mathematically, ((a+ib)(c-id))/divider. */
312 /* Calculate the dividend. */
313 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
314 NULL_RTX
, unsignedp
, methods
);
316 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
317 NULL_RTX
, unsignedp
, methods
);
319 if (temp1
== 0 || temp2
== 0)
322 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
323 NULL_RTX
, unsignedp
, methods
);
325 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
326 NULL_RTX
, unsignedp
, methods
);
328 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
329 NULL_RTX
, unsignedp
, methods
);
331 if (temp1
== 0 || temp2
== 0)
334 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
335 NULL_RTX
, unsignedp
, methods
);
337 if (real_t
== 0 || imag_t
== 0)
341 if (class == MODE_COMPLEX_FLOAT
)
342 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
343 realr
, unsignedp
, methods
);
345 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
346 real_t
, divisor
, realr
, unsignedp
);
352 emit_move_insn (realr
, res
);
354 if (class == MODE_COMPLEX_FLOAT
)
355 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
356 imagr
, unsignedp
, methods
);
358 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
359 imag_t
, divisor
, imagr
, unsignedp
);
365 emit_move_insn (imagr
, res
);
370 /* Generate code to perform a wide-input-range-acceptable complex divide. */
373 expand_cmplxdiv_wide (rtx real0
, rtx real1
, rtx imag0
, rtx imag1
, rtx realr
,
374 rtx imagr
, enum machine_mode submode
, int unsignedp
,
375 enum optab_methods methods
, enum mode_class
class,
380 rtx temp1
, temp2
, lab1
, lab2
;
381 enum machine_mode mode
;
383 optab this_add_optab
= add_optab
;
384 optab this_sub_optab
= sub_optab
;
385 optab this_neg_optab
= neg_optab
;
386 optab this_mul_optab
= smul_optab
;
388 if (binoptab
== sdivv_optab
)
390 this_add_optab
= addv_optab
;
391 this_sub_optab
= subv_optab
;
392 this_neg_optab
= negv_optab
;
393 this_mul_optab
= smulv_optab
;
396 /* Don't fetch these from memory more than once. */
397 real0
= force_reg (submode
, real0
);
398 real1
= force_reg (submode
, real1
);
401 imag0
= force_reg (submode
, imag0
);
403 imag1
= force_reg (submode
, imag1
);
405 /* XXX What's an "unsigned" complex number? */
413 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
414 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
417 if (temp1
== 0 || temp2
== 0)
420 mode
= GET_MODE (temp1
);
421 lab1
= gen_label_rtx ();
422 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
423 mode
, unsignedp
, lab1
);
425 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
427 if (class == MODE_COMPLEX_FLOAT
)
428 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
429 NULL_RTX
, unsignedp
, methods
);
431 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
432 imag1
, real1
, NULL_RTX
, unsignedp
);
437 /* Calculate divisor. */
439 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
440 NULL_RTX
, unsignedp
, methods
);
445 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
446 NULL_RTX
, unsignedp
, methods
);
451 /* Calculate dividend. */
457 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
459 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
460 NULL_RTX
, unsignedp
, methods
);
465 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
466 NULL_RTX
, unsignedp
);
468 if (real_t
== 0 || imag_t
== 0)
473 /* Compute (a+ib)/(c+id) as
474 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
476 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
477 NULL_RTX
, unsignedp
, methods
);
482 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
483 NULL_RTX
, unsignedp
, methods
);
485 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
486 NULL_RTX
, unsignedp
, methods
);
491 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
492 NULL_RTX
, unsignedp
, methods
);
494 if (real_t
== 0 || imag_t
== 0)
498 if (class == MODE_COMPLEX_FLOAT
)
499 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
500 realr
, unsignedp
, methods
);
502 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
503 real_t
, divisor
, realr
, unsignedp
);
509 emit_move_insn (realr
, res
);
511 if (class == MODE_COMPLEX_FLOAT
)
512 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
513 imagr
, unsignedp
, methods
);
515 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
516 imag_t
, divisor
, imagr
, unsignedp
);
522 emit_move_insn (imagr
, res
);
524 lab2
= gen_label_rtx ();
525 emit_jump_insn (gen_jump (lab2
));
530 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
532 if (class == MODE_COMPLEX_FLOAT
)
533 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
534 NULL_RTX
, unsignedp
, methods
);
536 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
537 real1
, imag1
, NULL_RTX
, unsignedp
);
542 /* Calculate divisor. */
544 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
545 NULL_RTX
, unsignedp
, methods
);
550 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
551 NULL_RTX
, unsignedp
, methods
);
556 /* Calculate dividend. */
560 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
562 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
563 NULL_RTX
, unsignedp
, methods
);
565 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
566 NULL_RTX
, unsignedp
);
568 if (real_t
== 0 || imag_t
== 0)
573 /* Compute (a+ib)/(c+id) as
574 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
576 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
577 NULL_RTX
, unsignedp
, methods
);
582 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
583 NULL_RTX
, unsignedp
, methods
);
585 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
586 NULL_RTX
, unsignedp
, methods
);
591 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
592 NULL_RTX
, unsignedp
, methods
);
594 if (real_t
== 0 || imag_t
== 0)
598 if (class == MODE_COMPLEX_FLOAT
)
599 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
600 realr
, unsignedp
, methods
);
602 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
603 real_t
, divisor
, realr
, unsignedp
);
609 emit_move_insn (realr
, res
);
611 if (class == MODE_COMPLEX_FLOAT
)
612 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
613 imagr
, unsignedp
, methods
);
615 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
616 imag_t
, divisor
, imagr
, unsignedp
);
622 emit_move_insn (imagr
, res
);
629 /* Wrapper around expand_binop which takes an rtx code to specify
630 the operation to perform, not an optab pointer. All other
631 arguments are the same. */
633 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
634 rtx op1
, rtx target
, int unsignedp
,
635 enum optab_methods methods
)
637 optab binop
= code_to_optab
[(int) code
];
641 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
644 /* Generate code to perform an operation specified by BINOPTAB
645 on operands OP0 and OP1, with result having machine-mode MODE.
647 UNSIGNEDP is for the case where we have to widen the operands
648 to perform the operation. It says to use zero-extension.
650 If TARGET is nonzero, the value
651 is generated there, if it is convenient to do so.
652 In all cases an rtx is returned for the locus of the value;
653 this may or may not be TARGET. */
656 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
657 rtx target
, int unsignedp
, enum optab_methods methods
)
659 enum optab_methods next_methods
660 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
661 ? OPTAB_WIDEN
: methods
);
662 enum mode_class
class;
663 enum machine_mode wider_mode
;
665 int commutative_op
= 0;
666 int shift_op
= (binoptab
->code
== ASHIFT
667 || binoptab
->code
== ASHIFTRT
668 || binoptab
->code
== LSHIFTRT
669 || binoptab
->code
== ROTATE
670 || binoptab
->code
== ROTATERT
);
671 rtx entry_last
= get_last_insn ();
674 class = GET_MODE_CLASS (mode
);
676 op0
= protect_from_queue (op0
, 0);
677 op1
= protect_from_queue (op1
, 0);
679 target
= protect_from_queue (target
, 1);
683 /* Load duplicate non-volatile operands once. */
684 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
686 op0
= force_not_mem (op0
);
691 op0
= force_not_mem (op0
);
692 op1
= force_not_mem (op1
);
696 /* If subtracting an integer constant, convert this into an addition of
697 the negated constant. */
699 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
701 op1
= negate_rtx (mode
, op1
);
702 binoptab
= add_optab
;
705 /* If we are inside an appropriately-short loop and one operand is an
706 expensive constant, force it into a register. */
707 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
708 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
709 op0
= force_reg (mode
, op0
);
711 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
712 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
713 op1
= force_reg (mode
, op1
);
715 /* Record where to delete back to if we backtrack. */
716 last
= get_last_insn ();
718 /* If operation is commutative,
719 try to make the first operand a register.
720 Even better, try to make it the same as the target.
721 Also try to make the last operand a constant. */
722 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
723 || binoptab
== smul_widen_optab
724 || binoptab
== umul_widen_optab
725 || binoptab
== smul_highpart_optab
726 || binoptab
== umul_highpart_optab
)
730 if (((target
== 0 || GET_CODE (target
) == REG
)
731 ? ((GET_CODE (op1
) == REG
732 && GET_CODE (op0
) != REG
)
734 : rtx_equal_p (op1
, target
))
735 || GET_CODE (op0
) == CONST_INT
)
743 /* If we can do it with a three-operand insn, do so. */
745 if (methods
!= OPTAB_MUST_WIDEN
746 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
748 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
749 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
750 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
752 rtx xop0
= op0
, xop1
= op1
;
757 temp
= gen_reg_rtx (mode
);
759 /* If it is a commutative operator and the modes would match
760 if we would swap the operands, we can save the conversions. */
763 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
764 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
768 tmp
= op0
; op0
= op1
; op1
= tmp
;
769 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
773 /* In case the insn wants input operands in modes different from
774 those of the actual operands, convert the operands. It would
775 seem that we don't need to convert CONST_INTs, but we do, so
776 that they're properly zero-extended, sign-extended or truncated
779 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
780 xop0
= convert_modes (mode0
,
781 GET_MODE (op0
) != VOIDmode
786 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
787 xop1
= convert_modes (mode1
,
788 GET_MODE (op1
) != VOIDmode
793 /* Now, if insn's predicates don't allow our operands, put them into
796 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
797 && mode0
!= VOIDmode
)
798 xop0
= copy_to_mode_reg (mode0
, xop0
);
800 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
801 && mode1
!= VOIDmode
)
802 xop1
= copy_to_mode_reg (mode1
, xop1
);
804 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
805 temp
= gen_reg_rtx (mode
);
807 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
810 /* If PAT is composed of more than one insn, try to add an appropriate
811 REG_EQUAL note to it. If we can't because TEMP conflicts with an
812 operand, call ourselves again, this time without a target. */
813 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
814 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
816 delete_insns_since (last
);
817 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
825 delete_insns_since (last
);
828 /* If this is a multiply, see if we can do a widening operation that
829 takes operands of this mode and makes a wider mode. */
831 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
832 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
833 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
834 != CODE_FOR_nothing
))
836 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
837 unsignedp
? umul_widen_optab
: smul_widen_optab
,
838 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
842 if (GET_MODE_CLASS (mode
) == MODE_INT
)
843 return gen_lowpart (mode
, temp
);
845 return convert_to_mode (mode
, temp
, unsignedp
);
849 /* Look for a wider mode of the same class for which we think we
850 can open-code the operation. Check for a widening multiply at the
851 wider mode as well. */
853 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
854 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
855 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
856 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
858 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
859 || (binoptab
== smul_optab
860 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
861 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
862 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
863 != CODE_FOR_nothing
)))
865 rtx xop0
= op0
, xop1
= op1
;
868 /* For certain integer operations, we need not actually extend
869 the narrow operands, as long as we will truncate
870 the results to the same narrowness. */
872 if ((binoptab
== ior_optab
|| binoptab
== and_optab
873 || binoptab
== xor_optab
874 || binoptab
== add_optab
|| binoptab
== sub_optab
875 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
876 && class == MODE_INT
)
879 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
881 /* The second operand of a shift must always be extended. */
882 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
883 no_extend
&& binoptab
!= ashl_optab
);
885 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
886 unsignedp
, OPTAB_DIRECT
);
889 if (class != MODE_INT
)
892 target
= gen_reg_rtx (mode
);
893 convert_move (target
, temp
, 0);
897 return gen_lowpart (mode
, temp
);
900 delete_insns_since (last
);
904 /* These can be done a word at a time. */
905 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
907 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
908 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
914 /* If TARGET is the same as one of the operands, the REG_EQUAL note
915 won't be accurate, so use a new target. */
916 if (target
== 0 || target
== op0
|| target
== op1
)
917 target
= gen_reg_rtx (mode
);
921 /* Do the actual arithmetic. */
922 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
924 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
925 rtx x
= expand_binop (word_mode
, binoptab
,
926 operand_subword_force (op0
, i
, mode
),
927 operand_subword_force (op1
, i
, mode
),
928 target_piece
, unsignedp
, next_methods
);
933 if (target_piece
!= x
)
934 emit_move_insn (target_piece
, x
);
937 insns
= get_insns ();
940 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
942 if (binoptab
->code
!= UNKNOWN
)
944 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
945 copy_rtx (op0
), copy_rtx (op1
));
949 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
954 /* Synthesize double word shifts from single word shifts. */
955 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
956 || binoptab
== ashr_optab
)
958 && GET_CODE (op1
) == CONST_INT
959 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
960 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
961 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
962 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
964 rtx insns
, inter
, equiv_value
;
965 rtx into_target
, outof_target
;
966 rtx into_input
, outof_input
;
967 int shift_count
, left_shift
, outof_word
;
969 /* If TARGET is the same as one of the operands, the REG_EQUAL note
970 won't be accurate, so use a new target. */
971 if (target
== 0 || target
== op0
|| target
== op1
)
972 target
= gen_reg_rtx (mode
);
976 shift_count
= INTVAL (op1
);
978 /* OUTOF_* is the word we are shifting bits away from, and
979 INTO_* is the word that we are shifting bits towards, thus
980 they differ depending on the direction of the shift and
983 left_shift
= binoptab
== ashl_optab
;
984 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
986 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
987 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
989 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
990 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
992 if (shift_count
>= BITS_PER_WORD
)
994 inter
= expand_binop (word_mode
, binoptab
,
996 GEN_INT (shift_count
- BITS_PER_WORD
),
997 into_target
, unsignedp
, next_methods
);
999 if (inter
!= 0 && inter
!= into_target
)
1000 emit_move_insn (into_target
, inter
);
1002 /* For a signed right shift, we must fill the word we are shifting
1003 out of with copies of the sign bit. Otherwise it is zeroed. */
1004 if (inter
!= 0 && binoptab
!= ashr_optab
)
1005 inter
= CONST0_RTX (word_mode
);
1006 else if (inter
!= 0)
1007 inter
= expand_binop (word_mode
, binoptab
,
1009 GEN_INT (BITS_PER_WORD
- 1),
1010 outof_target
, unsignedp
, next_methods
);
1012 if (inter
!= 0 && inter
!= outof_target
)
1013 emit_move_insn (outof_target
, inter
);
1018 optab reverse_unsigned_shift
, unsigned_shift
;
1020 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1021 we must do a logical shift in the opposite direction of the
1024 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1026 /* For a shift of less than BITS_PER_WORD, to compute the word
1027 shifted towards, we need to unsigned shift the orig value of
1030 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1032 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1034 GEN_INT (BITS_PER_WORD
- shift_count
),
1035 0, unsignedp
, next_methods
);
1040 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1041 op1
, 0, unsignedp
, next_methods
);
1044 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1045 into_target
, unsignedp
, next_methods
);
1047 if (inter
!= 0 && inter
!= into_target
)
1048 emit_move_insn (into_target
, inter
);
1051 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1052 op1
, outof_target
, unsignedp
, next_methods
);
1054 if (inter
!= 0 && inter
!= outof_target
)
1055 emit_move_insn (outof_target
, inter
);
1058 insns
= get_insns ();
1063 if (binoptab
->code
!= UNKNOWN
)
1064 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1068 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1073 /* Synthesize double word rotates from single word shifts. */
1074 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1075 && class == MODE_INT
1076 && GET_CODE (op1
) == CONST_INT
1077 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1078 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1079 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1081 rtx insns
, equiv_value
;
1082 rtx into_target
, outof_target
;
1083 rtx into_input
, outof_input
;
1085 int shift_count
, left_shift
, outof_word
;
1087 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1088 won't be accurate, so use a new target. */
1089 if (target
== 0 || target
== op0
|| target
== op1
)
1090 target
= gen_reg_rtx (mode
);
1094 shift_count
= INTVAL (op1
);
1096 /* OUTOF_* is the word we are shifting bits away from, and
1097 INTO_* is the word that we are shifting bits towards, thus
1098 they differ depending on the direction of the shift and
1099 WORDS_BIG_ENDIAN. */
1101 left_shift
= (binoptab
== rotl_optab
);
1102 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1104 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1105 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1107 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1108 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1110 if (shift_count
== BITS_PER_WORD
)
1112 /* This is just a word swap. */
1113 emit_move_insn (outof_target
, into_input
);
1114 emit_move_insn (into_target
, outof_input
);
1119 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1120 rtx first_shift_count
, second_shift_count
;
1121 optab reverse_unsigned_shift
, unsigned_shift
;
1123 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1124 ? lshr_optab
: ashl_optab
);
1126 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1127 ? ashl_optab
: lshr_optab
);
1129 if (shift_count
> BITS_PER_WORD
)
1131 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1132 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1136 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1137 second_shift_count
= GEN_INT (shift_count
);
1140 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1141 outof_input
, first_shift_count
,
1142 NULL_RTX
, unsignedp
, next_methods
);
1143 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1144 into_input
, second_shift_count
,
1145 NULL_RTX
, unsignedp
, next_methods
);
1147 if (into_temp1
!= 0 && into_temp2
!= 0)
1148 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1149 into_target
, unsignedp
, next_methods
);
1153 if (inter
!= 0 && inter
!= into_target
)
1154 emit_move_insn (into_target
, inter
);
1156 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1157 into_input
, first_shift_count
,
1158 NULL_RTX
, unsignedp
, next_methods
);
1159 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1160 outof_input
, second_shift_count
,
1161 NULL_RTX
, unsignedp
, next_methods
);
1163 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1164 inter
= expand_binop (word_mode
, ior_optab
,
1165 outof_temp1
, outof_temp2
,
1166 outof_target
, unsignedp
, next_methods
);
1168 if (inter
!= 0 && inter
!= outof_target
)
1169 emit_move_insn (outof_target
, inter
);
1172 insns
= get_insns ();
1177 if (binoptab
->code
!= UNKNOWN
)
1178 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1182 /* We can't make this a no conflict block if this is a word swap,
1183 because the word swap case fails if the input and output values
1184 are in the same register. */
1185 if (shift_count
!= BITS_PER_WORD
)
1186 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1195 /* These can be done a word at a time by propagating carries. */
1196 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1197 && class == MODE_INT
1198 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1199 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1202 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1203 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1204 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1205 rtx xop0
, xop1
, xtarget
;
1207 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1208 value is one of those, use it. Otherwise, use 1 since it is the
1209 one easiest to get. */
1210 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1211 int normalizep
= STORE_FLAG_VALUE
;
1216 /* Prepare the operands. */
1217 xop0
= force_reg (mode
, op0
);
1218 xop1
= force_reg (mode
, op1
);
1220 xtarget
= gen_reg_rtx (mode
);
1222 if (target
== 0 || GET_CODE (target
) != REG
)
1225 /* Indicate for flow that the entire target reg is being set. */
1226 if (GET_CODE (target
) == REG
)
1227 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1229 /* Do the actual arithmetic. */
1230 for (i
= 0; i
< nwords
; i
++)
1232 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1233 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1234 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1235 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1238 /* Main add/subtract of the input operands. */
1239 x
= expand_binop (word_mode
, binoptab
,
1240 op0_piece
, op1_piece
,
1241 target_piece
, unsignedp
, next_methods
);
1247 /* Store carry from main add/subtract. */
1248 carry_out
= gen_reg_rtx (word_mode
);
1249 carry_out
= emit_store_flag_force (carry_out
,
1250 (binoptab
== add_optab
1253 word_mode
, 1, normalizep
);
1260 /* Add/subtract previous carry to main result. */
1261 newx
= expand_binop (word_mode
,
1262 normalizep
== 1 ? binoptab
: otheroptab
,
1264 NULL_RTX
, 1, next_methods
);
1268 /* Get out carry from adding/subtracting carry in. */
1269 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1270 carry_tmp
= emit_store_flag_force (carry_tmp
,
1271 (binoptab
== add_optab
1274 word_mode
, 1, normalizep
);
1276 /* Logical-ior the two poss. carry together. */
1277 carry_out
= expand_binop (word_mode
, ior_optab
,
1278 carry_out
, carry_tmp
,
1279 carry_out
, 0, next_methods
);
1283 emit_move_insn (target_piece
, newx
);
1286 carry_in
= carry_out
;
1289 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1291 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1292 || ! rtx_equal_p (target
, xtarget
))
1294 rtx temp
= emit_move_insn (target
, xtarget
);
1296 set_unique_reg_note (temp
,
1298 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1309 delete_insns_since (last
);
1312 /* If we want to multiply two two-word values and have normal and widening
1313 multiplies of single-word values, we can do this with three smaller
1314 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1315 because we are not operating on one word at a time.
1317 The multiplication proceeds as follows:
1318 _______________________
1319 [__op0_high_|__op0_low__]
1320 _______________________
1321 * [__op1_high_|__op1_low__]
1322 _______________________________________________
1323 _______________________
1324 (1) [__op0_low__*__op1_low__]
1325 _______________________
1326 (2a) [__op0_low__*__op1_high_]
1327 _______________________
1328 (2b) [__op0_high_*__op1_low__]
1329 _______________________
1330 (3) [__op0_high_*__op1_high_]
1333 This gives a 4-word result. Since we are only interested in the
1334 lower 2 words, partial result (3) and the upper words of (2a) and
1335 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1336 calculated using non-widening multiplication.
1338 (1), however, needs to be calculated with an unsigned widening
1339 multiplication. If this operation is not directly supported we
1340 try using a signed widening multiplication and adjust the result.
1341 This adjustment works as follows:
1343 If both operands are positive then no adjustment is needed.
1345 If the operands have different signs, for example op0_low < 0 and
1346 op1_low >= 0, the instruction treats the most significant bit of
1347 op0_low as a sign bit instead of a bit with significance
1348 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1349 with 2**BITS_PER_WORD - op0_low, and two's complements the
1350 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1353 Similarly, if both operands are negative, we need to add
1354 (op0_low + op1_low) * 2**BITS_PER_WORD.
1356 We use a trick to adjust quickly. We logically shift op0_low right
1357 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1358 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1359 logical shift exists, we do an arithmetic right shift and subtract
1362 if (binoptab
== smul_optab
1363 && class == MODE_INT
1364 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1365 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1366 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1367 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1368 != CODE_FOR_nothing
)
1369 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1370 != CODE_FOR_nothing
)))
1372 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1373 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1374 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1375 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1376 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1377 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1379 rtx op0_xhigh
= NULL_RTX
;
1380 rtx op1_xhigh
= NULL_RTX
;
1382 /* If the target is the same as one of the inputs, don't use it. This
1383 prevents problems with the REG_EQUAL note. */
1384 if (target
== op0
|| target
== op1
1385 || (target
!= 0 && GET_CODE (target
) != REG
))
1388 /* Multiply the two lower words to get a double-word product.
1389 If unsigned widening multiplication is available, use that;
1390 otherwise use the signed form and compensate. */
1392 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1394 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1395 target
, 1, OPTAB_DIRECT
);
1397 /* If we didn't succeed, delete everything we did so far. */
1399 delete_insns_since (last
);
1401 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1405 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1406 != CODE_FOR_nothing
)
1408 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1409 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1410 target
, 1, OPTAB_DIRECT
);
1411 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1412 NULL_RTX
, 1, next_methods
);
1414 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1415 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1418 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1419 NULL_RTX
, 0, next_methods
);
1421 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1422 op0_xhigh
, op0_xhigh
, 0,
1426 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1427 NULL_RTX
, 1, next_methods
);
1429 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1430 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1433 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1434 NULL_RTX
, 0, next_methods
);
1436 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1437 op1_xhigh
, op1_xhigh
, 0,
1442 /* If we have been able to directly compute the product of the
1443 low-order words of the operands and perform any required adjustments
1444 of the operands, we proceed by trying two more multiplications
1445 and then computing the appropriate sum.
1447 We have checked above that the required addition is provided.
1448 Full-word addition will normally always succeed, especially if
1449 it is provided at all, so we don't worry about its failure. The
1450 multiplication may well fail, however, so we do handle that. */
1452 if (product
&& op0_xhigh
&& op1_xhigh
)
1454 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1455 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1456 NULL_RTX
, 0, OPTAB_DIRECT
);
1458 if (!REG_P (product_high
))
1459 product_high
= force_reg (word_mode
, product_high
);
1462 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1463 product_high
, 0, next_methods
);
1465 if (temp
!= 0 && temp
!= product_high
)
1466 emit_move_insn (product_high
, temp
);
1469 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1470 NULL_RTX
, 0, OPTAB_DIRECT
);
1473 temp
= expand_binop (word_mode
, add_optab
, temp
,
1474 product_high
, product_high
,
1477 if (temp
!= 0 && temp
!= product_high
)
1478 emit_move_insn (product_high
, temp
);
1480 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1484 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1486 temp
= emit_move_insn (product
, product
);
1487 set_unique_reg_note (temp
,
1489 gen_rtx_fmt_ee (MULT
, mode
,
1498 /* If we get here, we couldn't do it for some reason even though we
1499 originally thought we could. Delete anything we've emitted in
1502 delete_insns_since (last
);
1505 /* Open-code the vector operations if we have no hardware support
1507 if (class == MODE_VECTOR_INT
|| class == MODE_VECTOR_FLOAT
)
1508 return expand_vector_binop (mode
, binoptab
, op0
, op1
, target
,
1509 unsignedp
, methods
);
1511 /* We need to open-code the complex type operations: '+, -, * and /' */
1513 /* At this point we allow operations between two similar complex
1514 numbers, and also if one of the operands is not a complex number
1515 but rather of MODE_FLOAT or MODE_INT. However, the caller
1516 must make sure that the MODE of the non-complex operand matches
1517 the SUBMODE of the complex operand. */
1519 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1521 rtx real0
= 0, imag0
= 0;
1522 rtx real1
= 0, imag1
= 0;
1523 rtx realr
, imagr
, res
;
1528 /* Find the correct mode for the real and imaginary parts. */
1529 enum machine_mode submode
= GET_MODE_INNER(mode
);
1531 if (submode
== BLKmode
)
1535 target
= gen_reg_rtx (mode
);
1539 realr
= gen_realpart (submode
, target
);
1540 imagr
= gen_imagpart (submode
, target
);
1542 if (GET_MODE (op0
) == mode
)
1544 real0
= gen_realpart (submode
, op0
);
1545 imag0
= gen_imagpart (submode
, op0
);
1550 if (GET_MODE (op1
) == mode
)
1552 real1
= gen_realpart (submode
, op1
);
1553 imag1
= gen_imagpart (submode
, op1
);
1558 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1561 switch (binoptab
->code
)
1564 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1566 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1567 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1568 realr
, unsignedp
, methods
);
1572 else if (res
!= realr
)
1573 emit_move_insn (realr
, res
);
1575 if (imag0
!= 0 && imag1
!= 0)
1576 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1577 imagr
, unsignedp
, methods
);
1578 else if (imag0
!= 0)
1580 else if (binoptab
->code
== MINUS
)
1581 res
= expand_unop (submode
,
1582 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1583 imag1
, imagr
, unsignedp
);
1589 else if (res
!= imagr
)
1590 emit_move_insn (imagr
, res
);
1596 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1598 if (imag0
!= 0 && imag1
!= 0)
1602 /* Don't fetch these from memory more than once. */
1603 real0
= force_reg (submode
, real0
);
1604 real1
= force_reg (submode
, real1
);
1605 imag0
= force_reg (submode
, imag0
);
1606 imag1
= force_reg (submode
, imag1
);
1608 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1609 unsignedp
, methods
);
1611 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1612 unsignedp
, methods
);
1614 if (temp1
== 0 || temp2
== 0)
1619 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1620 temp1
, temp2
, realr
, unsignedp
, methods
));
1624 else if (res
!= realr
)
1625 emit_move_insn (realr
, res
);
1627 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1628 NULL_RTX
, unsignedp
, methods
);
1630 /* Avoid expanding redundant multiplication for the common
1631 case of squaring a complex number. */
1632 if (rtx_equal_p (real0
, real1
) && rtx_equal_p (imag0
, imag1
))
1635 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1636 NULL_RTX
, unsignedp
, methods
);
1638 if (temp1
== 0 || temp2
== 0)
1643 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1644 temp1
, temp2
, imagr
, unsignedp
, methods
));
1648 else if (res
!= imagr
)
1649 emit_move_insn (imagr
, res
);
1655 /* Don't fetch these from memory more than once. */
1656 real0
= force_reg (submode
, real0
);
1657 real1
= force_reg (submode
, real1
);
1659 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1660 realr
, unsignedp
, methods
);
1663 else if (res
!= realr
)
1664 emit_move_insn (realr
, res
);
1667 res
= expand_binop (submode
, binoptab
,
1668 real1
, imag0
, imagr
, unsignedp
, methods
);
1670 res
= expand_binop (submode
, binoptab
,
1671 real0
, imag1
, imagr
, unsignedp
, methods
);
1675 else if (res
!= imagr
)
1676 emit_move_insn (imagr
, res
);
1683 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1687 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1689 /* Don't fetch these from memory more than once. */
1690 real1
= force_reg (submode
, real1
);
1692 /* Simply divide the real and imaginary parts by `c' */
1693 if (class == MODE_COMPLEX_FLOAT
)
1694 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1695 realr
, unsignedp
, methods
);
1697 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1698 real0
, real1
, realr
, unsignedp
);
1702 else if (res
!= realr
)
1703 emit_move_insn (realr
, res
);
1705 if (class == MODE_COMPLEX_FLOAT
)
1706 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1707 imagr
, unsignedp
, methods
);
1709 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1710 imag0
, real1
, imagr
, unsignedp
);
1714 else if (res
!= imagr
)
1715 emit_move_insn (imagr
, res
);
1721 switch (flag_complex_divide_method
)
1724 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1725 realr
, imagr
, submode
,
1731 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1732 realr
, imagr
, submode
,
1752 if (binoptab
->code
!= UNKNOWN
)
1754 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1755 copy_rtx (op0
), copy_rtx (op1
));
1759 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1765 /* It can't be open-coded in this mode.
1766 Use a library call if one is available and caller says that's ok. */
1768 if (binoptab
->handlers
[(int) mode
].libfunc
1769 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1773 enum machine_mode op1_mode
= mode
;
1780 op1_mode
= word_mode
;
1781 /* Specify unsigned here,
1782 since negative shift counts are meaningless. */
1783 op1x
= convert_to_mode (word_mode
, op1
, 1);
1786 if (GET_MODE (op0
) != VOIDmode
1787 && GET_MODE (op0
) != mode
)
1788 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1790 /* Pass 1 for NO_QUEUE so we don't lose any increments
1791 if the libcall is cse'd or moved. */
1792 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1793 NULL_RTX
, LCT_CONST
, mode
, 2,
1794 op0
, mode
, op1x
, op1_mode
);
1796 insns
= get_insns ();
1799 target
= gen_reg_rtx (mode
);
1800 emit_libcall_block (insns
, target
, value
,
1801 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1806 delete_insns_since (last
);
1808 /* It can't be done in this mode. Can we do it in a wider mode? */
1810 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1811 || methods
== OPTAB_MUST_WIDEN
))
1813 /* Caller says, don't even try. */
1814 delete_insns_since (entry_last
);
1818 /* Compute the value of METHODS to pass to recursive calls.
1819 Don't allow widening to be tried recursively. */
1821 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1823 /* Look for a wider mode of the same class for which it appears we can do
1826 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1828 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1829 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1831 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1832 != CODE_FOR_nothing
)
1833 || (methods
== OPTAB_LIB
1834 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1836 rtx xop0
= op0
, xop1
= op1
;
1839 /* For certain integer operations, we need not actually extend
1840 the narrow operands, as long as we will truncate
1841 the results to the same narrowness. */
1843 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1844 || binoptab
== xor_optab
1845 || binoptab
== add_optab
|| binoptab
== sub_optab
1846 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1847 && class == MODE_INT
)
1850 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1851 unsignedp
, no_extend
);
1853 /* The second operand of a shift must always be extended. */
1854 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1855 no_extend
&& binoptab
!= ashl_optab
);
1857 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1858 unsignedp
, methods
);
1861 if (class != MODE_INT
)
1864 target
= gen_reg_rtx (mode
);
1865 convert_move (target
, temp
, 0);
1869 return gen_lowpart (mode
, temp
);
1872 delete_insns_since (last
);
1877 delete_insns_since (entry_last
);
1881 /* Like expand_binop, but for open-coding vectors binops. */
1884 expand_vector_binop (enum machine_mode mode
, optab binoptab
, rtx op0
,
1885 rtx op1
, rtx target
, int unsignedp
,
1886 enum optab_methods methods
)
1888 enum machine_mode submode
, tmode
;
1889 int size
, elts
, subsize
, subbitsize
, i
;
1890 rtx t
, a
, b
, res
, seq
;
1891 enum mode_class
class;
1893 class = GET_MODE_CLASS (mode
);
1895 size
= GET_MODE_SIZE (mode
);
1896 submode
= GET_MODE_INNER (mode
);
1898 /* Search for the widest vector mode with the same inner mode that is
1899 still narrower than MODE and that allows to open-code this operator.
1900 Note, if we find such a mode and the handler later decides it can't
1901 do the expansion, we'll be called recursively with the narrower mode. */
1902 for (tmode
= GET_CLASS_NARROWEST_MODE (class);
1903 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
1904 tmode
= GET_MODE_WIDER_MODE (tmode
))
1906 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
1907 && binoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
1911 switch (binoptab
->code
)
1916 tmode
= int_mode_for_mode (mode
);
1917 if (tmode
!= BLKmode
)
1923 subsize
= GET_MODE_SIZE (submode
);
1924 subbitsize
= GET_MODE_BITSIZE (submode
);
1925 elts
= size
/ subsize
;
1927 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1928 but that we operate on more than one element at a time. */
1929 if (subsize
== GET_MODE_UNIT_SIZE (mode
) && methods
== OPTAB_DIRECT
)
1934 /* Errors can leave us with a const0_rtx as operand. */
1935 if (GET_MODE (op0
) != mode
)
1936 op0
= copy_to_mode_reg (mode
, op0
);
1937 if (GET_MODE (op1
) != mode
)
1938 op1
= copy_to_mode_reg (mode
, op1
);
1941 target
= gen_reg_rtx (mode
);
1943 for (i
= 0; i
< elts
; ++i
)
1945 /* If this is part of a register, and not the first item in the
1946 word, we can't store using a SUBREG - that would clobber
1948 And storing with a SUBREG is only possible for the least
1949 significant part, hence we can't do it for big endian
1950 (unless we want to permute the evaluation order. */
1951 if (GET_CODE (target
) == REG
1952 && (BYTES_BIG_ENDIAN
1953 ? subsize
< UNITS_PER_WORD
1954 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
1957 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
1958 if (CONSTANT_P (op0
))
1959 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
1961 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
1962 NULL_RTX
, submode
, submode
, size
);
1963 if (CONSTANT_P (op1
))
1964 b
= simplify_gen_subreg (submode
, op1
, mode
, i
* subsize
);
1966 b
= extract_bit_field (op1
, subbitsize
, i
* subbitsize
, unsignedp
,
1967 NULL_RTX
, submode
, submode
, size
);
1969 if (binoptab
->code
== DIV
)
1971 if (class == MODE_VECTOR_FLOAT
)
1972 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1973 unsignedp
, methods
);
1975 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1976 a
, b
, t
, unsignedp
);
1979 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1980 unsignedp
, methods
);
1986 emit_move_insn (t
, res
);
1988 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2004 /* Like expand_unop but for open-coding vector unops. */
2007 expand_vector_unop (enum machine_mode mode
, optab unoptab
, rtx op0
,
2008 rtx target
, int unsignedp
)
2010 enum machine_mode submode
, tmode
;
2011 int size
, elts
, subsize
, subbitsize
, i
;
2014 size
= GET_MODE_SIZE (mode
);
2015 submode
= GET_MODE_INNER (mode
);
2017 /* Search for the widest vector mode with the same inner mode that is
2018 still narrower than MODE and that allows to open-code this operator.
2019 Note, if we find such a mode and the handler later decides it can't
2020 do the expansion, we'll be called recursively with the narrower mode. */
2021 for (tmode
= GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode
));
2022 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
2023 tmode
= GET_MODE_WIDER_MODE (tmode
))
2025 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
2026 && unoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
2029 /* If there is no negate operation, try doing a subtract from zero. */
2030 if (unoptab
== neg_optab
&& GET_MODE_CLASS (submode
) == MODE_INT
2031 /* Avoid infinite recursion when an
2032 error has left us with the wrong mode. */
2033 && GET_MODE (op0
) == mode
)
2036 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2037 target
, unsignedp
, OPTAB_DIRECT
);
2042 if (unoptab
== one_cmpl_optab
)
2044 tmode
= int_mode_for_mode (mode
);
2045 if (tmode
!= BLKmode
)
2049 subsize
= GET_MODE_SIZE (submode
);
2050 subbitsize
= GET_MODE_BITSIZE (submode
);
2051 elts
= size
/ subsize
;
2053 /* Errors can leave us with a const0_rtx as operand. */
2054 if (GET_MODE (op0
) != mode
)
2055 op0
= copy_to_mode_reg (mode
, op0
);
2058 target
= gen_reg_rtx (mode
);
2062 for (i
= 0; i
< elts
; ++i
)
2064 /* If this is part of a register, and not the first item in the
2065 word, we can't store using a SUBREG - that would clobber
2067 And storing with a SUBREG is only possible for the least
2068 significant part, hence we can't do it for big endian
2069 (unless we want to permute the evaluation order. */
2070 if (GET_CODE (target
) == REG
2071 && (BYTES_BIG_ENDIAN
2072 ? subsize
< UNITS_PER_WORD
2073 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
2076 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
2077 if (CONSTANT_P (op0
))
2078 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
2080 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
2081 t
, submode
, submode
, size
);
2083 res
= expand_unop (submode
, unoptab
, a
, t
, unsignedp
);
2086 emit_move_insn (t
, res
);
2088 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2099 /* Expand a binary operator which has both signed and unsigned forms.
2100 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2103 If we widen unsigned operands, we may use a signed wider operation instead
2104 of an unsigned wider operation, since the result would be the same. */
2107 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2108 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2109 enum optab_methods methods
)
2112 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2113 struct optab wide_soptab
;
2115 /* Do it without widening, if possible. */
2116 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2117 unsignedp
, OPTAB_DIRECT
);
2118 if (temp
|| methods
== OPTAB_DIRECT
)
2121 /* Try widening to a signed int. Make a fake signed optab that
2122 hides any signed insn for direct use. */
2123 wide_soptab
= *soptab
;
2124 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2125 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2127 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2128 unsignedp
, OPTAB_WIDEN
);
2130 /* For unsigned operands, try widening to an unsigned int. */
2131 if (temp
== 0 && unsignedp
)
2132 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2133 unsignedp
, OPTAB_WIDEN
);
2134 if (temp
|| methods
== OPTAB_WIDEN
)
2137 /* Use the right width lib call if that exists. */
2138 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2139 if (temp
|| methods
== OPTAB_LIB
)
2142 /* Must widen and use a lib call, use either signed or unsigned. */
2143 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2144 unsignedp
, methods
);
2148 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2149 unsignedp
, methods
);
2153 /* Generate code to perform an operation specified by BINOPTAB
2154 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2155 We assume that the order of the operands for the instruction
2156 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2157 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2159 Either TARG0 or TARG1 may be zero, but what that means is that
2160 the result is not actually wanted. We will generate it into
2161 a dummy pseudo-reg and discard it. They may not both be zero.
2163 Returns 1 if this operation can be performed; 0 if not. */
2166 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2169 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2170 enum mode_class
class;
2171 enum machine_mode wider_mode
;
2172 rtx entry_last
= get_last_insn ();
2175 class = GET_MODE_CLASS (mode
);
2177 op0
= protect_from_queue (op0
, 0);
2178 op1
= protect_from_queue (op1
, 0);
2182 op0
= force_not_mem (op0
);
2183 op1
= force_not_mem (op1
);
2186 /* If we are inside an appropriately-short loop and one operand is an
2187 expensive constant, force it into a register. */
2188 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
2189 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2190 op0
= force_reg (mode
, op0
);
2192 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
2193 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2194 op1
= force_reg (mode
, op1
);
2197 targ0
= protect_from_queue (targ0
, 1);
2199 targ0
= gen_reg_rtx (mode
);
2201 targ1
= protect_from_queue (targ1
, 1);
2203 targ1
= gen_reg_rtx (mode
);
2205 /* Record where to go back to if we fail. */
2206 last
= get_last_insn ();
2208 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2210 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2211 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2212 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2214 rtx xop0
= op0
, xop1
= op1
;
2216 /* In case the insn wants input operands in modes different from
2217 those of the actual operands, convert the operands. It would
2218 seem that we don't need to convert CONST_INTs, but we do, so
2219 that they're properly zero-extended, sign-extended or truncated
2222 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2223 xop0
= convert_modes (mode0
,
2224 GET_MODE (op0
) != VOIDmode
2229 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2230 xop1
= convert_modes (mode1
,
2231 GET_MODE (op1
) != VOIDmode
2236 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2237 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2238 xop0
= copy_to_mode_reg (mode0
, xop0
);
2240 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2241 xop1
= copy_to_mode_reg (mode1
, xop1
);
2243 /* We could handle this, but we should always be called with a pseudo
2244 for our targets and all insns should take them as outputs. */
2245 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2246 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2249 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2256 delete_insns_since (last
);
2259 /* It can't be done in this mode. Can we do it in a wider mode? */
2261 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2263 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2264 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2266 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2267 != CODE_FOR_nothing
)
2269 rtx t0
= gen_reg_rtx (wider_mode
);
2270 rtx t1
= gen_reg_rtx (wider_mode
);
2271 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2272 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2274 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2277 convert_move (targ0
, t0
, unsignedp
);
2278 convert_move (targ1
, t1
, unsignedp
);
2282 delete_insns_since (last
);
2287 delete_insns_since (entry_last
);
2291 /* Wrapper around expand_unop which takes an rtx code to specify
2292 the operation to perform, not an optab pointer. All other
2293 arguments are the same. */
2295 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2296 rtx target
, int unsignedp
)
2298 optab unop
= code_to_optab
[(int) code
];
2302 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2308 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2310 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2312 enum mode_class
class = GET_MODE_CLASS (mode
);
2313 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2315 enum machine_mode wider_mode
;
2316 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2317 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2319 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2320 != CODE_FOR_nothing
)
2322 rtx xop0
, temp
, last
;
2324 last
= get_last_insn ();
2327 target
= gen_reg_rtx (mode
);
2328 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2329 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2331 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2332 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2333 - GET_MODE_BITSIZE (mode
)),
2334 target
, true, OPTAB_DIRECT
);
2336 delete_insns_since (last
);
2345 /* Try calculating (parity x) as (and (popcount x) 1), where
2346 popcount can also be done in a wider mode. */
2348 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2350 enum mode_class
class = GET_MODE_CLASS (mode
);
2351 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2353 enum machine_mode wider_mode
;
2354 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2355 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2357 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2358 != CODE_FOR_nothing
)
2360 rtx xop0
, temp
, last
;
2362 last
= get_last_insn ();
2365 target
= gen_reg_rtx (mode
);
2366 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2367 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2370 temp
= expand_binop (wider_mode
, and_optab
, temp
, GEN_INT (1),
2371 target
, true, OPTAB_DIRECT
);
2373 delete_insns_since (last
);
2382 /* Generate code to perform an operation specified by UNOPTAB
2383 on operand OP0, with result having machine-mode MODE.
2385 UNSIGNEDP is for the case where we have to widen the operands
2386 to perform the operation. It says to use zero-extension.
2388 If TARGET is nonzero, the value
2389 is generated there, if it is convenient to do so.
2390 In all cases an rtx is returned for the locus of the value;
2391 this may or may not be TARGET. */
2394 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2397 enum mode_class
class;
2398 enum machine_mode wider_mode
;
2400 rtx last
= get_last_insn ();
2403 class = GET_MODE_CLASS (mode
);
2405 op0
= protect_from_queue (op0
, 0);
2409 op0
= force_not_mem (op0
);
2413 target
= protect_from_queue (target
, 1);
2415 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2417 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2418 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2424 temp
= gen_reg_rtx (mode
);
2426 if (GET_MODE (xop0
) != VOIDmode
2427 && GET_MODE (xop0
) != mode0
)
2428 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2430 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2432 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2433 xop0
= copy_to_mode_reg (mode0
, xop0
);
2435 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2436 temp
= gen_reg_rtx (mode
);
2438 pat
= GEN_FCN (icode
) (temp
, xop0
);
2441 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2442 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2444 delete_insns_since (last
);
2445 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2453 delete_insns_since (last
);
2456 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2458 /* Widening clz needs special treatment. */
2459 if (unoptab
== clz_optab
)
2461 temp
= widen_clz (mode
, op0
, target
);
2468 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2469 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2470 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2472 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2476 /* For certain operations, we need not actually extend
2477 the narrow operand, as long as we will truncate the
2478 results to the same narrowness. */
2480 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2481 (unoptab
== neg_optab
2482 || unoptab
== one_cmpl_optab
)
2483 && class == MODE_INT
);
2485 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2490 if (class != MODE_INT
)
2493 target
= gen_reg_rtx (mode
);
2494 convert_move (target
, temp
, 0);
2498 return gen_lowpart (mode
, temp
);
2501 delete_insns_since (last
);
2505 /* These can be done a word at a time. */
2506 if (unoptab
== one_cmpl_optab
2507 && class == MODE_INT
2508 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2509 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2514 if (target
== 0 || target
== op0
)
2515 target
= gen_reg_rtx (mode
);
2519 /* Do the actual arithmetic. */
2520 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2522 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2523 rtx x
= expand_unop (word_mode
, unoptab
,
2524 operand_subword_force (op0
, i
, mode
),
2525 target_piece
, unsignedp
);
2527 if (target_piece
!= x
)
2528 emit_move_insn (target_piece
, x
);
2531 insns
= get_insns ();
2534 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2535 gen_rtx_fmt_e (unoptab
->code
, mode
,
2540 /* Open-code the complex negation operation. */
2541 else if (unoptab
->code
== NEG
2542 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2548 /* Find the correct mode for the real and imaginary parts. */
2549 enum machine_mode submode
= GET_MODE_INNER (mode
);
2551 if (submode
== BLKmode
)
2555 target
= gen_reg_rtx (mode
);
2559 target_piece
= gen_imagpart (submode
, target
);
2560 x
= expand_unop (submode
, unoptab
,
2561 gen_imagpart (submode
, op0
),
2562 target_piece
, unsignedp
);
2563 if (target_piece
!= x
)
2564 emit_move_insn (target_piece
, x
);
2566 target_piece
= gen_realpart (submode
, target
);
2567 x
= expand_unop (submode
, unoptab
,
2568 gen_realpart (submode
, op0
),
2569 target_piece
, unsignedp
);
2570 if (target_piece
!= x
)
2571 emit_move_insn (target_piece
, x
);
2576 emit_no_conflict_block (seq
, target
, op0
, 0,
2577 gen_rtx_fmt_e (unoptab
->code
, mode
,
2582 /* Try negating floating point values by flipping the sign bit. */
2583 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2584 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2586 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2587 enum machine_mode imode
= int_mode_for_mode (mode
);
2588 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2590 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2592 HOST_WIDE_INT hi
, lo
;
2593 rtx last
= get_last_insn ();
2595 /* Handle targets with different FP word orders. */
2596 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2598 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2599 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2600 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2603 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2606 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2610 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2613 temp
= expand_binop (imode
, xor_optab
,
2614 gen_lowpart (imode
, op0
),
2615 immed_double_const (lo
, hi
, imode
),
2616 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2621 target
= gen_reg_rtx (mode
);
2622 insn
= emit_move_insn (target
, gen_lowpart (mode
, temp
));
2623 set_unique_reg_note (insn
, REG_EQUAL
,
2624 gen_rtx_fmt_e (NEG
, mode
,
2628 delete_insns_since (last
);
2632 /* Try calculating parity (x) as popcount (x) % 2. */
2633 if (unoptab
== parity_optab
)
2635 temp
= expand_parity (mode
, op0
, target
);
2641 /* Now try a library call in this mode. */
2642 if (unoptab
->handlers
[(int) mode
].libfunc
)
2646 enum machine_mode outmode
= mode
;
2648 /* All of these functions return small values. Thus we choose to
2649 have them return something that isn't a double-word. */
2650 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2651 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2653 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2657 /* Pass 1 for NO_QUEUE so we don't lose any increments
2658 if the libcall is cse'd or moved. */
2659 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2660 NULL_RTX
, LCT_CONST
, outmode
,
2662 insns
= get_insns ();
2665 target
= gen_reg_rtx (outmode
);
2666 emit_libcall_block (insns
, target
, value
,
2667 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2672 if (class == MODE_VECTOR_FLOAT
|| class == MODE_VECTOR_INT
)
2673 return expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
);
2675 /* It can't be done in this mode. Can we do it in a wider mode? */
2677 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2679 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2680 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2682 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2683 != CODE_FOR_nothing
)
2684 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2688 /* For certain operations, we need not actually extend
2689 the narrow operand, as long as we will truncate the
2690 results to the same narrowness. */
2692 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2693 (unoptab
== neg_optab
2694 || unoptab
== one_cmpl_optab
)
2695 && class == MODE_INT
);
2697 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2700 /* If we are generating clz using wider mode, adjust the
2702 if (unoptab
== clz_optab
&& temp
!= 0)
2703 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2704 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2705 - GET_MODE_BITSIZE (mode
)),
2706 target
, true, OPTAB_DIRECT
);
2710 if (class != MODE_INT
)
2713 target
= gen_reg_rtx (mode
);
2714 convert_move (target
, temp
, 0);
2718 return gen_lowpart (mode
, temp
);
2721 delete_insns_since (last
);
2726 /* If there is no negate operation, try doing a subtract from zero.
2727 The US Software GOFAST library needs this. */
2728 if (unoptab
->code
== NEG
)
2731 temp
= expand_binop (mode
,
2732 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2733 CONST0_RTX (mode
), op0
,
2734 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2742 /* Emit code to compute the absolute value of OP0, with result to
2743 TARGET if convenient. (TARGET may be 0.) The return value says
2744 where the result actually is to be found.
2746 MODE is the mode of the operand; the mode of the result is
2747 different but can be deduced from MODE.
2752 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2753 int result_unsignedp
)
2758 result_unsignedp
= 1;
2760 /* First try to do it with a special abs instruction. */
2761 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2766 /* For floating point modes, try clearing the sign bit. */
2767 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2768 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2770 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2771 enum machine_mode imode
= int_mode_for_mode (mode
);
2772 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2774 if (imode
!= BLKmode
&& bitpos
>= 0)
2776 HOST_WIDE_INT hi
, lo
;
2777 rtx last
= get_last_insn ();
2779 /* Handle targets with different FP word orders. */
2780 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2782 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2783 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2784 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2787 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2790 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2794 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2797 temp
= expand_binop (imode
, and_optab
,
2798 gen_lowpart (imode
, op0
),
2799 immed_double_const (~lo
, ~hi
, imode
),
2800 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2805 target
= gen_reg_rtx (mode
);
2806 insn
= emit_move_insn (target
, gen_lowpart (mode
, temp
));
2807 set_unique_reg_note (insn
, REG_EQUAL
,
2808 gen_rtx_fmt_e (ABS
, mode
,
2812 delete_insns_since (last
);
2816 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2817 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2819 rtx last
= get_last_insn ();
2821 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2823 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2829 delete_insns_since (last
);
2832 /* If this machine has expensive jumps, we can do integer absolute
2833 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2834 where W is the width of MODE. */
2836 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2838 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2839 size_int (GET_MODE_BITSIZE (mode
) - 1),
2842 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2845 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2846 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2856 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2857 int result_unsignedp
, int safe
)
2862 result_unsignedp
= 1;
2864 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2868 /* If that does not win, use conditional jump and negate. */
2870 /* It is safe to use the target if it is the same
2871 as the source if this is also a pseudo register */
2872 if (op0
== target
&& GET_CODE (op0
) == REG
2873 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2876 op1
= gen_label_rtx ();
2877 if (target
== 0 || ! safe
2878 || GET_MODE (target
) != mode
2879 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2880 || (GET_CODE (target
) == REG
2881 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2882 target
= gen_reg_rtx (mode
);
2884 emit_move_insn (target
, op0
);
2887 /* If this mode is an integer too wide to compare properly,
2888 compare word by word. Rely on CSE to optimize constant cases. */
2889 if (GET_MODE_CLASS (mode
) == MODE_INT
2890 && ! can_compare_p (GE
, mode
, ccp_jump
))
2891 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2894 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2895 NULL_RTX
, NULL_RTX
, op1
);
2897 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2900 emit_move_insn (target
, op0
);
2906 /* Emit code to compute the absolute value of OP0, with result to
2907 TARGET if convenient. (TARGET may be 0.) The return value says
2908 where the result actually is to be found.
2910 MODE is the mode of the operand; the mode of the result is
2911 different but can be deduced from MODE.
2913 UNSIGNEDP is relevant for complex integer modes. */
2916 expand_complex_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2919 enum mode_class
class = GET_MODE_CLASS (mode
);
2920 enum machine_mode wider_mode
;
2922 rtx entry_last
= get_last_insn ();
2925 optab this_abs_optab
;
2927 /* Find the correct mode for the real and imaginary parts. */
2928 enum machine_mode submode
= GET_MODE_INNER (mode
);
2930 if (submode
== BLKmode
)
2933 op0
= protect_from_queue (op0
, 0);
2937 op0
= force_not_mem (op0
);
2940 last
= get_last_insn ();
2943 target
= protect_from_queue (target
, 1);
2945 this_abs_optab
= ! unsignedp
&& flag_trapv
2946 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2947 ? absv_optab
: abs_optab
;
2949 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2951 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2952 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2958 temp
= gen_reg_rtx (submode
);
2960 if (GET_MODE (xop0
) != VOIDmode
2961 && GET_MODE (xop0
) != mode0
)
2962 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2964 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2966 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2967 xop0
= copy_to_mode_reg (mode0
, xop0
);
2969 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2970 temp
= gen_reg_rtx (submode
);
2972 pat
= GEN_FCN (icode
) (temp
, xop0
);
2975 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2976 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2979 delete_insns_since (last
);
2980 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2989 delete_insns_since (last
);
2992 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2994 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2995 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2997 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2998 != CODE_FOR_nothing
)
3002 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3003 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3007 if (class != MODE_COMPLEX_INT
)
3010 target
= gen_reg_rtx (submode
);
3011 convert_move (target
, temp
, 0);
3015 return gen_lowpart (submode
, temp
);
3018 delete_insns_since (last
);
3022 /* Open-code the complex absolute-value operation
3023 if we can open-code sqrt. Otherwise it's not worth while. */
3024 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
3027 rtx real
, imag
, total
;
3029 real
= gen_realpart (submode
, op0
);
3030 imag
= gen_imagpart (submode
, op0
);
3032 /* Square both parts. */
3033 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
3034 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
3036 /* Sum the parts. */
3037 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
3038 0, OPTAB_LIB_WIDEN
);
3040 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3041 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
3043 delete_insns_since (last
);
3048 /* Now try a library call in this mode. */
3049 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
3056 /* Pass 1 for NO_QUEUE so we don't lose any increments
3057 if the libcall is cse'd or moved. */
3058 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
3059 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
3060 insns
= get_insns ();
3063 target
= gen_reg_rtx (submode
);
3064 emit_libcall_block (insns
, target
, value
,
3065 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
3070 /* It can't be done in this mode. Can we do it in a wider mode? */
3072 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3073 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3075 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
3076 != CODE_FOR_nothing
)
3077 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
3081 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3083 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3087 if (class != MODE_COMPLEX_INT
)
3090 target
= gen_reg_rtx (submode
);
3091 convert_move (target
, temp
, 0);
3095 return gen_lowpart (submode
, temp
);
3098 delete_insns_since (last
);
3102 delete_insns_since (entry_last
);
3106 /* Generate an instruction whose insn-code is INSN_CODE,
3107 with two operands: an output TARGET and an input OP0.
3108 TARGET *must* be nonzero, and the output is always stored there.
3109 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3110 the value that is stored into TARGET. */
3113 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3116 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3119 temp
= target
= protect_from_queue (target
, 1);
3121 op0
= protect_from_queue (op0
, 0);
3123 /* Sign and zero extension from memory is often done specially on
3124 RISC machines, so forcing into a register here can pessimize
3126 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
3127 op0
= force_not_mem (op0
);
3129 /* Now, if insn does not accept our operands, put them into pseudos. */
3131 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
3132 op0
= copy_to_mode_reg (mode0
, op0
);
3134 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
3135 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
3136 temp
= gen_reg_rtx (GET_MODE (temp
));
3138 pat
= GEN_FCN (icode
) (temp
, op0
);
3140 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3141 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3146 emit_move_insn (target
, temp
);
3149 /* Emit code to perform a series of operations on a multi-word quantity, one
3152 Such a block is preceded by a CLOBBER of the output, consists of multiple
3153 insns, each setting one word of the output, and followed by a SET copying
3154 the output to itself.
3156 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3157 note indicating that it doesn't conflict with the (also multi-word)
3158 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3161 INSNS is a block of code generated to perform the operation, not including
3162 the CLOBBER and final copy. All insns that compute intermediate values
3163 are first emitted, followed by the block as described above.
3165 TARGET, OP0, and OP1 are the output and inputs of the operations,
3166 respectively. OP1 may be zero for a unary operation.
3168 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3171 If TARGET is not a register, INSNS is simply emitted with no special
3172 processing. Likewise if anything in INSNS is not an INSN or if
3173 there is a libcall block inside INSNS.
3175 The final insn emitted is returned. */
3178 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3180 rtx prev
, next
, first
, last
, insn
;
3182 if (GET_CODE (target
) != REG
|| reload_in_progress
)
3183 return emit_insn (insns
);
3185 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3186 if (GET_CODE (insn
) != INSN
3187 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3188 return emit_insn (insns
);
3190 /* First emit all insns that do not store into words of the output and remove
3191 these from the list. */
3192 for (insn
= insns
; insn
; insn
= next
)
3197 next
= NEXT_INSN (insn
);
3199 /* Some ports (cris) create a libcall regions at their own. We must
3200 avoid any potential nesting of LIBCALLs. */
3201 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3202 remove_note (insn
, note
);
3203 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3204 remove_note (insn
, note
);
3206 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3207 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3208 set
= PATTERN (insn
);
3209 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3211 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3212 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3214 set
= XVECEXP (PATTERN (insn
), 0, i
);
3222 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3224 if (PREV_INSN (insn
))
3225 NEXT_INSN (PREV_INSN (insn
)) = next
;
3230 PREV_INSN (next
) = PREV_INSN (insn
);
3236 prev
= get_last_insn ();
3238 /* Now write the CLOBBER of the output, followed by the setting of each
3239 of the words, followed by the final copy. */
3240 if (target
!= op0
&& target
!= op1
)
3241 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3243 for (insn
= insns
; insn
; insn
= next
)
3245 next
= NEXT_INSN (insn
);
3248 if (op1
&& GET_CODE (op1
) == REG
)
3249 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3252 if (op0
&& GET_CODE (op0
) == REG
)
3253 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3257 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3258 != CODE_FOR_nothing
)
3260 last
= emit_move_insn (target
, target
);
3262 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3266 last
= get_last_insn ();
3268 /* Remove any existing REG_EQUAL note from "last", or else it will
3269 be mistaken for a note referring to the full contents of the
3270 alleged libcall value when found together with the REG_RETVAL
3271 note added below. An existing note can come from an insn
3272 expansion at "last". */
3273 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3277 first
= get_insns ();
3279 first
= NEXT_INSN (prev
);
3281 /* Encapsulate the block so it gets manipulated as a unit. */
3282 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3284 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3289 /* Emit code to make a call to a constant function or a library call.
3291 INSNS is a list containing all insns emitted in the call.
3292 These insns leave the result in RESULT. Our block is to copy RESULT
3293 to TARGET, which is logically equivalent to EQUIV.
3295 We first emit any insns that set a pseudo on the assumption that these are
3296 loading constants into registers; doing so allows them to be safely cse'ed
3297 between blocks. Then we emit all the other insns in the block, followed by
3298 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3299 note with an operand of EQUIV.
3301 Moving assignments to pseudos outside of the block is done to improve
3302 the generated code, but is not required to generate correct code,
3303 hence being unable to move an assignment is not grounds for not making
3304 a libcall block. There are two reasons why it is safe to leave these
3305 insns inside the block: First, we know that these pseudos cannot be
3306 used in generated RTL outside the block since they are created for
3307 temporary purposes within the block. Second, CSE will not record the
3308 values of anything set inside a libcall block, so we know they must
3309 be dead at the end of the block.
3311 Except for the first group of insns (the ones setting pseudos), the
3312 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3315 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3317 rtx final_dest
= target
;
3318 rtx prev
, next
, first
, last
, insn
;
3320 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3321 into a MEM later. Protect the libcall block from this change. */
3322 if (! REG_P (target
) || REG_USERVAR_P (target
))
3323 target
= gen_reg_rtx (GET_MODE (target
));
3325 /* If we're using non-call exceptions, a libcall corresponding to an
3326 operation that may trap may also trap. */
3327 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3329 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3330 if (GET_CODE (insn
) == CALL_INSN
)
3332 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3334 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3335 remove_note (insn
, note
);
3339 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3340 reg note to indicate that this call cannot throw or execute a nonlocal
3341 goto (unless there is already a REG_EH_REGION note, in which case
3343 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3344 if (GET_CODE (insn
) == CALL_INSN
)
3346 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3349 XEXP (note
, 0) = GEN_INT (-1);
3351 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
3355 /* First emit all insns that set pseudos. Remove them from the list as
3356 we go. Avoid insns that set pseudos which were referenced in previous
3357 insns. These can be generated by move_by_pieces, for example,
3358 to update an address. Similarly, avoid insns that reference things
3359 set in previous insns. */
3361 for (insn
= insns
; insn
; insn
= next
)
3363 rtx set
= single_set (insn
);
3366 /* Some ports (cris) create a libcall regions at their own. We must
3367 avoid any potential nesting of LIBCALLs. */
3368 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3369 remove_note (insn
, note
);
3370 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3371 remove_note (insn
, note
);
3373 next
= NEXT_INSN (insn
);
3375 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
3376 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3378 || ((! INSN_P(insns
)
3379 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3380 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3381 && ! modified_in_p (SET_SRC (set
), insns
)
3382 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3384 if (PREV_INSN (insn
))
3385 NEXT_INSN (PREV_INSN (insn
)) = next
;
3390 PREV_INSN (next
) = PREV_INSN (insn
);
3395 /* Some ports use a loop to copy large arguments onto the stack.
3396 Don't move anything outside such a loop. */
3397 if (GET_CODE (insn
) == CODE_LABEL
)
3401 prev
= get_last_insn ();
3403 /* Write the remaining insns followed by the final copy. */
3405 for (insn
= insns
; insn
; insn
= next
)
3407 next
= NEXT_INSN (insn
);
3412 last
= emit_move_insn (target
, result
);
3413 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3414 != CODE_FOR_nothing
)
3415 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3418 /* Remove any existing REG_EQUAL note from "last", or else it will
3419 be mistaken for a note referring to the full contents of the
3420 libcall value when found together with the REG_RETVAL note added
3421 below. An existing note can come from an insn expansion at
3423 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3426 if (final_dest
!= target
)
3427 emit_move_insn (final_dest
, target
);
3430 first
= get_insns ();
3432 first
= NEXT_INSN (prev
);
3434 /* Encapsulate the block so it gets manipulated as a unit. */
3435 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3437 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3438 when the encapsulated region would not be in one basic block,
3439 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3441 bool attach_libcall_retval_notes
= true;
3442 next
= NEXT_INSN (last
);
3443 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3444 if (control_flow_insn_p (insn
))
3446 attach_libcall_retval_notes
= false;
3450 if (attach_libcall_retval_notes
)
3452 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3454 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3460 /* Generate code to store zero in X. */
3463 emit_clr_insn (rtx x
)
3465 emit_move_insn (x
, const0_rtx
);
3468 /* Generate code to store 1 in X
3469 assuming it contains zero beforehand. */
3472 emit_0_to_1_insn (rtx x
)
3474 emit_move_insn (x
, const1_rtx
);
3477 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3478 PURPOSE describes how this comparison will be used. CODE is the rtx
3479 comparison code we will be using.
3481 ??? Actually, CODE is slightly weaker than that. A target is still
3482 required to implement all of the normal bcc operations, but not
3483 required to implement all (or any) of the unordered bcc operations. */
3486 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3487 enum can_compare_purpose purpose
)
3491 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3493 if (purpose
== ccp_jump
)
3494 return bcc_gen_fctn
[(int) code
] != NULL
;
3495 else if (purpose
== ccp_store_flag
)
3496 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3498 /* There's only one cmov entry point, and it's allowed to fail. */
3501 if (purpose
== ccp_jump
3502 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3504 if (purpose
== ccp_cmov
3505 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3507 if (purpose
== ccp_store_flag
3508 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3511 mode
= GET_MODE_WIDER_MODE (mode
);
3513 while (mode
!= VOIDmode
);
3518 /* This function is called when we are going to emit a compare instruction that
3519 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3521 *PMODE is the mode of the inputs (in case they are const_int).
3522 *PUNSIGNEDP nonzero says that the operands are unsigned;
3523 this matters if they need to be widened.
3525 If they have mode BLKmode, then SIZE specifies the size of both operands.
3527 This function performs all the setup necessary so that the caller only has
3528 to emit a single comparison insn. This setup can involve doing a BLKmode
3529 comparison or emitting a library call to perform the comparison if no insn
3530 is available to handle it.
3531 The values which are passed in through pointers can be modified; the caller
3532 should perform the comparison on the modified values. */
3535 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3536 enum machine_mode
*pmode
, int *punsignedp
,
3537 enum can_compare_purpose purpose
)
3539 enum machine_mode mode
= *pmode
;
3540 rtx x
= *px
, y
= *py
;
3541 int unsignedp
= *punsignedp
;
3542 enum mode_class
class;
3544 class = GET_MODE_CLASS (mode
);
3546 /* They could both be VOIDmode if both args are immediate constants,
3547 but we should fold that at an earlier stage.
3548 With no special code here, this will call abort,
3549 reminding the programmer to implement such folding. */
3551 if (mode
!= BLKmode
&& flag_force_mem
)
3553 /* Load duplicate non-volatile operands once. */
3554 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3556 x
= force_not_mem (x
);
3561 x
= force_not_mem (x
);
3562 y
= force_not_mem (y
);
3566 /* If we are inside an appropriately-short loop and one operand is an
3567 expensive constant, force it into a register. */
3568 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3569 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3570 x
= force_reg (mode
, x
);
3572 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3573 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3574 y
= force_reg (mode
, y
);
3577 /* Abort if we have a non-canonical comparison. The RTL documentation
3578 states that canonical comparisons are required only for targets which
3580 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3584 /* Don't let both operands fail to indicate the mode. */
3585 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3586 x
= force_reg (mode
, x
);
3588 /* Handle all BLKmode compares. */
3590 if (mode
== BLKmode
)
3592 enum machine_mode cmp_mode
, result_mode
;
3593 enum insn_code cmp_code
;
3598 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3604 x
= protect_from_queue (x
, 0);
3605 y
= protect_from_queue (y
, 0);
3606 size
= protect_from_queue (size
, 0);
3608 /* Try to use a memory block compare insn - either cmpstr
3609 or cmpmem will do. */
3610 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3611 cmp_mode
!= VOIDmode
;
3612 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3614 cmp_code
= cmpmem_optab
[cmp_mode
];
3615 if (cmp_code
== CODE_FOR_nothing
)
3616 cmp_code
= cmpstr_optab
[cmp_mode
];
3617 if (cmp_code
== CODE_FOR_nothing
)
3620 /* Must make sure the size fits the insn's mode. */
3621 if ((GET_CODE (size
) == CONST_INT
3622 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3623 || (GET_MODE_BITSIZE (GET_MODE (size
))
3624 > GET_MODE_BITSIZE (cmp_mode
)))
3627 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3628 result
= gen_reg_rtx (result_mode
);
3629 size
= convert_to_mode (cmp_mode
, size
, 1);
3630 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3634 *pmode
= result_mode
;
3638 /* Otherwise call a library function, memcmp if we've got it,
3640 #ifdef TARGET_MEM_FUNCTIONS
3641 libfunc
= memcmp_libfunc
;
3642 length_type
= sizetype
;
3644 libfunc
= bcmp_libfunc
;
3645 length_type
= integer_type_node
;
3647 result_mode
= TYPE_MODE (integer_type_node
);
3648 cmp_mode
= TYPE_MODE (length_type
);
3649 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3650 TREE_UNSIGNED (length_type
));
3652 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3659 *pmode
= result_mode
;
3665 if (can_compare_p (*pcomparison
, mode
, purpose
))
3668 /* Handle a lib call just for the mode we are using. */
3670 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3672 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3675 /* If we want unsigned, and this mode has a distinct unsigned
3676 comparison routine, use that. */
3677 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3678 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3680 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3681 word_mode
, 2, x
, mode
, y
, mode
);
3683 /* Integer comparison returns a result that must be compared against 1,
3684 so that even if we do an unsigned compare afterward,
3685 there is still a value that can represent the result "less than". */
3692 if (class == MODE_FLOAT
)
3693 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3699 /* Before emitting an insn with code ICODE, make sure that X, which is going
3700 to be used for operand OPNUM of the insn, is converted from mode MODE to
3701 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3702 that it is accepted by the operand predicate. Return the new value. */
3705 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3706 enum machine_mode wider_mode
, int unsignedp
)
3708 x
= protect_from_queue (x
, 0);
3710 if (mode
!= wider_mode
)
3711 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3713 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3714 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3718 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3724 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3725 we can do the comparison.
3726 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3727 be NULL_RTX which indicates that only a comparison is to be generated. */
3730 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3731 enum rtx_code comparison
, int unsignedp
, rtx label
)
3733 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3734 enum mode_class
class = GET_MODE_CLASS (mode
);
3735 enum machine_mode wider_mode
= mode
;
3737 /* Try combined insns first. */
3740 enum insn_code icode
;
3741 PUT_MODE (test
, wider_mode
);
3745 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3747 if (icode
!= CODE_FOR_nothing
3748 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3750 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3751 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3752 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3757 /* Handle some compares against zero. */
3758 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3759 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3761 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3762 emit_insn (GEN_FCN (icode
) (x
));
3764 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3768 /* Handle compares for which there is a directly suitable insn. */
3770 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3771 if (icode
!= CODE_FOR_nothing
)
3773 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3774 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3775 emit_insn (GEN_FCN (icode
) (x
, y
));
3777 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3781 if (class != MODE_INT
&& class != MODE_FLOAT
3782 && class != MODE_COMPLEX_FLOAT
)
3785 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3787 while (wider_mode
!= VOIDmode
);
3792 /* Generate code to compare X with Y so that the condition codes are
3793 set and to jump to LABEL if the condition is true. If X is a
3794 constant and Y is not a constant, then the comparison is swapped to
3795 ensure that the comparison RTL has the canonical form.
3797 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3798 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3799 the proper branch condition code.
3801 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3803 MODE is the mode of the inputs (in case they are const_int).
3805 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3806 be passed unchanged to emit_cmp_insn, then potentially converted into an
3807 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3810 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3811 enum machine_mode mode
, int unsignedp
, rtx label
)
3813 rtx op0
= x
, op1
= y
;
3815 /* Swap operands and condition to ensure canonical RTL. */
3816 if (swap_commutative_operands_p (x
, y
))
3818 /* If we're not emitting a branch, this means some caller
3824 comparison
= swap_condition (comparison
);
3828 /* If OP0 is still a constant, then both X and Y must be constants. Force
3829 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3831 if (CONSTANT_P (op0
))
3832 op0
= force_reg (mode
, op0
);
3837 comparison
= unsigned_condition (comparison
);
3839 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3841 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3844 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3847 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3848 enum machine_mode mode
, int unsignedp
)
3850 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3853 /* Emit a library call comparison between floating point X and Y.
3854 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3857 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3858 enum machine_mode
*pmode
, int *punsignedp
)
3860 enum rtx_code comparison
= *pcomparison
;
3861 enum rtx_code swapped
= swap_condition (comparison
);
3862 rtx x
= protect_from_queue (*px
, 0);
3863 rtx y
= protect_from_queue (*py
, 0);
3864 enum machine_mode orig_mode
= GET_MODE (x
);
3865 enum machine_mode mode
;
3866 rtx value
, target
, insns
, equiv
;
3869 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3871 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3874 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3877 tmp
= x
; x
= y
; y
= tmp
;
3878 comparison
= swapped
;
3883 if (mode
== VOIDmode
)
3886 if (mode
!= orig_mode
)
3888 x
= convert_to_mode (mode
, x
, 0);
3889 y
= convert_to_mode (mode
, y
, 0);
3892 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3893 the RTL. The allows the RTL optimizers to delete the libcall if the
3894 condition can be determined at compile-time. */
3895 if (comparison
== UNORDERED
)
3897 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3898 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3899 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3900 temp
, const_true_rtx
, equiv
);
3904 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3905 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3907 rtx true_rtx
, false_rtx
;
3912 true_rtx
= const0_rtx
;
3913 false_rtx
= const_true_rtx
;
3917 true_rtx
= const_true_rtx
;
3918 false_rtx
= const0_rtx
;
3922 true_rtx
= const1_rtx
;
3923 false_rtx
= const0_rtx
;
3927 true_rtx
= const0_rtx
;
3928 false_rtx
= constm1_rtx
;
3932 true_rtx
= constm1_rtx
;
3933 false_rtx
= const0_rtx
;
3937 true_rtx
= const0_rtx
;
3938 false_rtx
= const1_rtx
;
3944 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3945 equiv
, true_rtx
, false_rtx
);
3950 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3951 word_mode
, 2, x
, mode
, y
, mode
);
3952 insns
= get_insns ();
3955 target
= gen_reg_rtx (word_mode
);
3956 emit_libcall_block (insns
, target
, value
, equiv
);
3959 if (comparison
== UNORDERED
3960 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3966 *pcomparison
= comparison
;
3970 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3973 emit_indirect_jump (rtx loc
)
3975 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3977 loc
= copy_to_mode_reg (Pmode
, loc
);
3979 emit_jump_insn (gen_indirect_jump (loc
));
3983 #ifdef HAVE_conditional_move
3985 /* Emit a conditional move instruction if the machine supports one for that
3986 condition and machine mode.
3988 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3989 the mode to use should they be constants. If it is VOIDmode, they cannot
3992 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3993 should be stored there. MODE is the mode to use should they be constants.
3994 If it is VOIDmode, they cannot both be constants.
3996 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3997 is not supported. */
4000 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4001 enum machine_mode cmode
, rtx op2
, rtx op3
,
4002 enum machine_mode mode
, int unsignedp
)
4004 rtx tem
, subtarget
, comparison
, insn
;
4005 enum insn_code icode
;
4006 enum rtx_code reversed
;
4008 /* If one operand is constant, make it the second one. Only do this
4009 if the other operand is not constant as well. */
4011 if (swap_commutative_operands_p (op0
, op1
))
4016 code
= swap_condition (code
);
4019 /* get_condition will prefer to generate LT and GT even if the old
4020 comparison was against zero, so undo that canonicalization here since
4021 comparisons against zero are cheaper. */
4022 if (code
== LT
&& op1
== const1_rtx
)
4023 code
= LE
, op1
= const0_rtx
;
4024 else if (code
== GT
&& op1
== constm1_rtx
)
4025 code
= GE
, op1
= const0_rtx
;
4027 if (cmode
== VOIDmode
)
4028 cmode
= GET_MODE (op0
);
4030 if (swap_commutative_operands_p (op2
, op3
)
4031 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4040 if (mode
== VOIDmode
)
4041 mode
= GET_MODE (op2
);
4043 icode
= movcc_gen_code
[mode
];
4045 if (icode
== CODE_FOR_nothing
)
4050 op2
= force_not_mem (op2
);
4051 op3
= force_not_mem (op3
);
4055 target
= protect_from_queue (target
, 1);
4057 target
= gen_reg_rtx (mode
);
4063 op2
= protect_from_queue (op2
, 0);
4064 op3
= protect_from_queue (op3
, 0);
4066 /* If the insn doesn't accept these operands, put them in pseudos. */
4068 if (! (*insn_data
[icode
].operand
[0].predicate
)
4069 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4070 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4072 if (! (*insn_data
[icode
].operand
[2].predicate
)
4073 (op2
, insn_data
[icode
].operand
[2].mode
))
4074 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4076 if (! (*insn_data
[icode
].operand
[3].predicate
)
4077 (op3
, insn_data
[icode
].operand
[3].mode
))
4078 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4080 /* Everything should now be in the suitable form, so emit the compare insn
4081 and then the conditional move. */
4084 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4086 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4087 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4088 return NULL and let the caller figure out how best to deal with this
4090 if (GET_CODE (comparison
) != code
)
4093 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4095 /* If that failed, then give up. */
4101 if (subtarget
!= target
)
4102 convert_move (target
, subtarget
, 0);
4107 /* Return nonzero if a conditional move of mode MODE is supported.
4109 This function is for combine so it can tell whether an insn that looks
4110 like a conditional move is actually supported by the hardware. If we
4111 guess wrong we lose a bit on optimization, but that's it. */
4112 /* ??? sparc64 supports conditionally moving integers values based on fp
4113 comparisons, and vice versa. How do we handle them? */
4116 can_conditionally_move_p (enum machine_mode mode
)
4118 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4124 #endif /* HAVE_conditional_move */
4126 /* Emit a conditional addition instruction if the machine supports one for that
4127 condition and machine mode.
4129 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4130 the mode to use should they be constants. If it is VOIDmode, they cannot
4133 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4134 should be stored there. MODE is the mode to use should they be constants.
4135 If it is VOIDmode, they cannot both be constants.
4137 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4138 is not supported. */
4141 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4142 enum machine_mode cmode
, rtx op2
, rtx op3
,
4143 enum machine_mode mode
, int unsignedp
)
4145 rtx tem
, subtarget
, comparison
, insn
;
4146 enum insn_code icode
;
4147 enum rtx_code reversed
;
4149 /* If one operand is constant, make it the second one. Only do this
4150 if the other operand is not constant as well. */
4152 if (swap_commutative_operands_p (op0
, op1
))
4157 code
= swap_condition (code
);
4160 /* get_condition will prefer to generate LT and GT even if the old
4161 comparison was against zero, so undo that canonicalization here since
4162 comparisons against zero are cheaper. */
4163 if (code
== LT
&& op1
== const1_rtx
)
4164 code
= LE
, op1
= const0_rtx
;
4165 else if (code
== GT
&& op1
== constm1_rtx
)
4166 code
= GE
, op1
= const0_rtx
;
4168 if (cmode
== VOIDmode
)
4169 cmode
= GET_MODE (op0
);
4171 if (swap_commutative_operands_p (op2
, op3
)
4172 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4181 if (mode
== VOIDmode
)
4182 mode
= GET_MODE (op2
);
4184 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4186 if (icode
== CODE_FOR_nothing
)
4191 op2
= force_not_mem (op2
);
4192 op3
= force_not_mem (op3
);
4196 target
= protect_from_queue (target
, 1);
4198 target
= gen_reg_rtx (mode
);
4204 op2
= protect_from_queue (op2
, 0);
4205 op3
= protect_from_queue (op3
, 0);
4207 /* If the insn doesn't accept these operands, put them in pseudos. */
4209 if (! (*insn_data
[icode
].operand
[0].predicate
)
4210 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4211 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4213 if (! (*insn_data
[icode
].operand
[2].predicate
)
4214 (op2
, insn_data
[icode
].operand
[2].mode
))
4215 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4217 if (! (*insn_data
[icode
].operand
[3].predicate
)
4218 (op3
, insn_data
[icode
].operand
[3].mode
))
4219 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4221 /* Everything should now be in the suitable form, so emit the compare insn
4222 and then the conditional move. */
4225 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4227 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4228 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4229 return NULL and let the caller figure out how best to deal with this
4231 if (GET_CODE (comparison
) != code
)
4234 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4236 /* If that failed, then give up. */
4242 if (subtarget
!= target
)
4243 convert_move (target
, subtarget
, 0);
4248 /* These functions attempt to generate an insn body, rather than
4249 emitting the insn, but if the gen function already emits them, we
4250 make no attempt to turn them back into naked patterns.
4252 They do not protect from queued increments,
4253 because they may be used 1) in protect_from_queue itself
4254 and 2) in other passes where there is no queue. */
4256 /* Generate and return an insn body to add Y to X. */
4259 gen_add2_insn (rtx x
, rtx y
)
4261 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4263 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4264 (x
, insn_data
[icode
].operand
[0].mode
))
4265 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4266 (x
, insn_data
[icode
].operand
[1].mode
))
4267 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4268 (y
, insn_data
[icode
].operand
[2].mode
)))
4271 return (GEN_FCN (icode
) (x
, x
, y
));
4274 /* Generate and return an insn body to add r1 and c,
4275 storing the result in r0. */
4277 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4279 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4281 if (icode
== CODE_FOR_nothing
4282 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4283 (r0
, insn_data
[icode
].operand
[0].mode
))
4284 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4285 (r1
, insn_data
[icode
].operand
[1].mode
))
4286 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4287 (c
, insn_data
[icode
].operand
[2].mode
)))
4290 return (GEN_FCN (icode
) (r0
, r1
, c
));
4294 have_add2_insn (rtx x
, rtx y
)
4298 if (GET_MODE (x
) == VOIDmode
)
4301 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4303 if (icode
== CODE_FOR_nothing
)
4306 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4307 (x
, insn_data
[icode
].operand
[0].mode
))
4308 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4309 (x
, insn_data
[icode
].operand
[1].mode
))
4310 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4311 (y
, insn_data
[icode
].operand
[2].mode
)))
4317 /* Generate and return an insn body to subtract Y from X. */
4320 gen_sub2_insn (rtx x
, rtx y
)
4322 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4324 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4325 (x
, insn_data
[icode
].operand
[0].mode
))
4326 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4327 (x
, insn_data
[icode
].operand
[1].mode
))
4328 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4329 (y
, insn_data
[icode
].operand
[2].mode
)))
4332 return (GEN_FCN (icode
) (x
, x
, y
));
4335 /* Generate and return an insn body to subtract r1 and c,
4336 storing the result in r0. */
4338 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4340 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4342 if (icode
== CODE_FOR_nothing
4343 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4344 (r0
, insn_data
[icode
].operand
[0].mode
))
4345 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4346 (r1
, insn_data
[icode
].operand
[1].mode
))
4347 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4348 (c
, insn_data
[icode
].operand
[2].mode
)))
4351 return (GEN_FCN (icode
) (r0
, r1
, c
));
4355 have_sub2_insn (rtx x
, rtx y
)
4359 if (GET_MODE (x
) == VOIDmode
)
4362 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4364 if (icode
== CODE_FOR_nothing
)
4367 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4368 (x
, insn_data
[icode
].operand
[0].mode
))
4369 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4370 (x
, insn_data
[icode
].operand
[1].mode
))
4371 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4372 (y
, insn_data
[icode
].operand
[2].mode
)))
4378 /* Generate the body of an instruction to copy Y into X.
4379 It may be a list of insns, if one insn isn't enough. */
4382 gen_move_insn (rtx x
, rtx y
)
4387 emit_move_insn_1 (x
, y
);
4393 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4394 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4395 no such operation exists, CODE_FOR_nothing will be returned. */
4398 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4402 #ifdef HAVE_ptr_extend
4404 return CODE_FOR_ptr_extend
;
4407 tab
= unsignedp
? zext_optab
: sext_optab
;
4408 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4411 /* Generate the body of an insn to extend Y (with mode MFROM)
4412 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4415 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4416 enum machine_mode mfrom
, int unsignedp
)
4418 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4419 return GEN_FCN (icode
) (x
, y
);
4422 /* can_fix_p and can_float_p say whether the target machine
4423 can directly convert a given fixed point type to
4424 a given floating point type, or vice versa.
4425 The returned value is the CODE_FOR_... value to use,
4426 or CODE_FOR_nothing if these modes cannot be directly converted.
4428 *TRUNCP_PTR is set to 1 if it is necessary to output
4429 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4431 static enum insn_code
4432 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4433 int unsignedp
, int *truncp_ptr
)
4436 enum insn_code icode
;
4438 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4439 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4440 if (icode
!= CODE_FOR_nothing
)
4446 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4447 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4448 if (icode
!= CODE_FOR_nothing
4449 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4456 return CODE_FOR_nothing
;
4459 static enum insn_code
4460 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4465 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4466 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4469 /* Generate code to convert FROM to floating point
4470 and store in TO. FROM must be fixed point and not VOIDmode.
4471 UNSIGNEDP nonzero means regard FROM as unsigned.
4472 Normally this is done by correcting the final value
4473 if it is negative. */
4476 expand_float (rtx to
, rtx from
, int unsignedp
)
4478 enum insn_code icode
;
4480 enum machine_mode fmode
, imode
;
4482 /* Crash now, because we won't be able to decide which mode to use. */
4483 if (GET_MODE (from
) == VOIDmode
)
4486 /* Look for an insn to do the conversion. Do it in the specified
4487 modes if possible; otherwise convert either input, output or both to
4488 wider mode. If the integer mode is wider than the mode of FROM,
4489 we can do the conversion signed even if the input is unsigned. */
4491 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4492 fmode
= GET_MODE_WIDER_MODE (fmode
))
4493 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4494 imode
= GET_MODE_WIDER_MODE (imode
))
4496 int doing_unsigned
= unsignedp
;
4498 if (fmode
!= GET_MODE (to
)
4499 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4502 icode
= can_float_p (fmode
, imode
, unsignedp
);
4503 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4504 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4506 if (icode
!= CODE_FOR_nothing
)
4508 to
= protect_from_queue (to
, 1);
4509 from
= protect_from_queue (from
, 0);
4511 if (imode
!= GET_MODE (from
))
4512 from
= convert_to_mode (imode
, from
, unsignedp
);
4514 if (fmode
!= GET_MODE (to
))
4515 target
= gen_reg_rtx (fmode
);
4517 emit_unop_insn (icode
, target
, from
,
4518 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4521 convert_move (to
, target
, 0);
4526 /* Unsigned integer, and no way to convert directly.
4527 Convert as signed, then conditionally adjust the result. */
4530 rtx label
= gen_label_rtx ();
4532 REAL_VALUE_TYPE offset
;
4536 to
= protect_from_queue (to
, 1);
4537 from
= protect_from_queue (from
, 0);
4540 from
= force_not_mem (from
);
4542 /* Look for a usable floating mode FMODE wider than the source and at
4543 least as wide as the target. Using FMODE will avoid rounding woes
4544 with unsigned values greater than the signed maximum value. */
4546 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4547 fmode
= GET_MODE_WIDER_MODE (fmode
))
4548 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4549 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4552 if (fmode
== VOIDmode
)
4554 /* There is no such mode. Pretend the target is wide enough. */
4555 fmode
= GET_MODE (to
);
4557 /* Avoid double-rounding when TO is narrower than FROM. */
4558 if ((significand_size (fmode
) + 1)
4559 < GET_MODE_BITSIZE (GET_MODE (from
)))
4562 rtx neglabel
= gen_label_rtx ();
4564 /* Don't use TARGET if it isn't a register, is a hard register,
4565 or is the wrong mode. */
4566 if (GET_CODE (target
) != REG
4567 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4568 || GET_MODE (target
) != fmode
)
4569 target
= gen_reg_rtx (fmode
);
4571 imode
= GET_MODE (from
);
4572 do_pending_stack_adjust ();
4574 /* Test whether the sign bit is set. */
4575 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4578 /* The sign bit is not set. Convert as signed. */
4579 expand_float (target
, from
, 0);
4580 emit_jump_insn (gen_jump (label
));
4583 /* The sign bit is set.
4584 Convert to a usable (positive signed) value by shifting right
4585 one bit, while remembering if a nonzero bit was shifted
4586 out; i.e., compute (from & 1) | (from >> 1). */
4588 emit_label (neglabel
);
4589 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4590 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4591 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4593 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4595 expand_float (target
, temp
, 0);
4597 /* Multiply by 2 to undo the shift above. */
4598 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4599 target
, 0, OPTAB_LIB_WIDEN
);
4601 emit_move_insn (target
, temp
);
4603 do_pending_stack_adjust ();
4609 /* If we are about to do some arithmetic to correct for an
4610 unsigned operand, do it in a pseudo-register. */
4612 if (GET_MODE (to
) != fmode
4613 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4614 target
= gen_reg_rtx (fmode
);
4616 /* Convert as signed integer to floating. */
4617 expand_float (target
, from
, 0);
4619 /* If FROM is negative (and therefore TO is negative),
4620 correct its value by 2**bitwidth. */
4622 do_pending_stack_adjust ();
4623 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4627 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4628 temp
= expand_binop (fmode
, add_optab
, target
,
4629 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4630 target
, 0, OPTAB_LIB_WIDEN
);
4632 emit_move_insn (target
, temp
);
4634 do_pending_stack_adjust ();
4639 /* No hardware instruction available; call a library routine. */
4644 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4646 to
= protect_from_queue (to
, 1);
4647 from
= protect_from_queue (from
, 0);
4649 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4650 from
= convert_to_mode (SImode
, from
, unsignedp
);
4653 from
= force_not_mem (from
);
4655 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4661 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4662 GET_MODE (to
), 1, from
,
4664 insns
= get_insns ();
4667 emit_libcall_block (insns
, target
, value
,
4668 gen_rtx_FLOAT (GET_MODE (to
), from
));
4673 /* Copy result to requested destination
4674 if we have been computing in a temp location. */
4678 if (GET_MODE (target
) == GET_MODE (to
))
4679 emit_move_insn (to
, target
);
4681 convert_move (to
, target
, 0);
4685 /* expand_fix: generate code to convert FROM to fixed point
4686 and store in TO. FROM must be floating point. */
4691 rtx temp
= gen_reg_rtx (GET_MODE (x
));
4692 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
4696 expand_fix (rtx to
, rtx from
, int unsignedp
)
4698 enum insn_code icode
;
4700 enum machine_mode fmode
, imode
;
4703 /* We first try to find a pair of modes, one real and one integer, at
4704 least as wide as FROM and TO, respectively, in which we can open-code
4705 this conversion. If the integer mode is wider than the mode of TO,
4706 we can do the conversion either signed or unsigned. */
4708 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4709 fmode
= GET_MODE_WIDER_MODE (fmode
))
4710 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4711 imode
= GET_MODE_WIDER_MODE (imode
))
4713 int doing_unsigned
= unsignedp
;
4715 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4716 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4717 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4719 if (icode
!= CODE_FOR_nothing
)
4721 to
= protect_from_queue (to
, 1);
4722 from
= protect_from_queue (from
, 0);
4724 if (fmode
!= GET_MODE (from
))
4725 from
= convert_to_mode (fmode
, from
, 0);
4728 from
= ftruncify (from
);
4730 if (imode
!= GET_MODE (to
))
4731 target
= gen_reg_rtx (imode
);
4733 emit_unop_insn (icode
, target
, from
,
4734 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4736 convert_move (to
, target
, unsignedp
);
4741 /* For an unsigned conversion, there is one more way to do it.
4742 If we have a signed conversion, we generate code that compares
4743 the real value to the largest representable positive number. If if
4744 is smaller, the conversion is done normally. Otherwise, subtract
4745 one plus the highest signed number, convert, and add it back.
4747 We only need to check all real modes, since we know we didn't find
4748 anything with a wider integer mode.
4750 This code used to extend FP value into mode wider than the destination.
4751 This is not needed. Consider, for instance conversion from SFmode
4754 The hot path trought the code is dealing with inputs smaller than 2^63
4755 and doing just the conversion, so there is no bits to lose.
4757 In the other path we know the value is positive in the range 2^63..2^64-1
4758 inclusive. (as for other imput overflow happens and result is undefined)
4759 So we know that the most important bit set in mantissa corresponds to
4760 2^63. The subtraction of 2^63 should not generate any rounding as it
4761 simply clears out that bit. The rest is trivial. */
4763 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4764 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4765 fmode
= GET_MODE_WIDER_MODE (fmode
))
4766 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4770 REAL_VALUE_TYPE offset
;
4771 rtx limit
, lab1
, lab2
, insn
;
4773 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4774 real_2expN (&offset
, bitsize
- 1);
4775 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4776 lab1
= gen_label_rtx ();
4777 lab2
= gen_label_rtx ();
4780 to
= protect_from_queue (to
, 1);
4781 from
= protect_from_queue (from
, 0);
4784 from
= force_not_mem (from
);
4786 if (fmode
!= GET_MODE (from
))
4787 from
= convert_to_mode (fmode
, from
, 0);
4789 /* See if we need to do the subtraction. */
4790 do_pending_stack_adjust ();
4791 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4794 /* If not, do the signed "fix" and branch around fixup code. */
4795 expand_fix (to
, from
, 0);
4796 emit_jump_insn (gen_jump (lab2
));
4799 /* Otherwise, subtract 2**(N-1), convert to signed number,
4800 then add 2**(N-1). Do the addition using XOR since this
4801 will often generate better code. */
4803 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4804 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4805 expand_fix (to
, target
, 0);
4806 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4808 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4810 to
, 1, OPTAB_LIB_WIDEN
);
4813 emit_move_insn (to
, target
);
4817 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4818 != CODE_FOR_nothing
)
4820 /* Make a place for a REG_NOTE and add it. */
4821 insn
= emit_move_insn (to
, to
);
4822 set_unique_reg_note (insn
,
4824 gen_rtx_fmt_e (UNSIGNED_FIX
,
4832 /* We can't do it with an insn, so use a library call. But first ensure
4833 that the mode of TO is at least as wide as SImode, since those are the
4834 only library calls we know about. */
4836 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4838 target
= gen_reg_rtx (SImode
);
4840 expand_fix (target
, from
, unsignedp
);
4848 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4849 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4853 to
= protect_from_queue (to
, 1);
4854 from
= protect_from_queue (from
, 0);
4857 from
= force_not_mem (from
);
4861 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4862 GET_MODE (to
), 1, from
,
4864 insns
= get_insns ();
4867 emit_libcall_block (insns
, target
, value
,
4868 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4869 GET_MODE (to
), from
));
4874 if (GET_MODE (to
) == GET_MODE (target
))
4875 emit_move_insn (to
, target
);
4877 convert_move (to
, target
, 0);
4881 /* Report whether we have an instruction to perform the operation
4882 specified by CODE on operands of mode MODE. */
4884 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4886 return (code_to_optab
[(int) code
] != 0
4887 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4888 != CODE_FOR_nothing
));
4891 /* Create a blank optab. */
4896 optab op
= ggc_alloc (sizeof (struct optab
));
4897 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4899 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4900 op
->handlers
[i
].libfunc
= 0;
4906 static convert_optab
4907 new_convert_optab (void)
4910 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4911 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4912 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4914 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4915 op
->handlers
[i
][j
].libfunc
= 0;
4920 /* Same, but fill in its code as CODE, and write it into the
4921 code_to_optab table. */
4923 init_optab (enum rtx_code code
)
4925 optab op
= new_optab ();
4927 code_to_optab
[(int) code
] = op
;
4931 /* Same, but fill in its code as CODE, and do _not_ write it into
4932 the code_to_optab table. */
4934 init_optabv (enum rtx_code code
)
4936 optab op
= new_optab ();
4941 /* Conversion optabs never go in the code_to_optab table. */
4942 static inline convert_optab
4943 init_convert_optab (enum rtx_code code
)
4945 convert_optab op
= new_convert_optab ();
4950 /* Initialize the libfunc fields of an entire group of entries in some
4951 optab. Each entry is set equal to a string consisting of a leading
4952 pair of underscores followed by a generic operation name followed by
4953 a mode name (downshifted to lowercase) followed by a single character
4954 representing the number of operands for the given operation (which is
4955 usually one of the characters '2', '3', or '4').
4957 OPTABLE is the table in which libfunc fields are to be initialized.
4958 FIRST_MODE is the first machine mode index in the given optab to
4960 LAST_MODE is the last machine mode index in the given optab to
4962 OPNAME is the generic (string) name of the operation.
4963 SUFFIX is the character which specifies the number of operands for
4964 the given generic operation.
4968 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4969 const char *opname
, int suffix
)
4972 unsigned opname_len
= strlen (opname
);
4974 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4975 mode
= (enum machine_mode
) ((int) mode
+ 1))
4977 const char *mname
= GET_MODE_NAME (mode
);
4978 unsigned mname_len
= strlen (mname
);
4979 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4986 for (q
= opname
; *q
; )
4988 for (q
= mname
; *q
; q
++)
4989 *p
++ = TOLOWER (*q
);
4993 optable
->handlers
[(int) mode
].libfunc
4994 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4998 /* Initialize the libfunc fields of an entire group of entries in some
4999 optab which correspond to all integer mode operations. The parameters
5000 have the same meaning as similarly named ones for the `init_libfuncs'
5001 routine. (See above). */
5004 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
5006 int maxsize
= 2*BITS_PER_WORD
;
5007 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5008 maxsize
= LONG_LONG_TYPE_SIZE
;
5009 init_libfuncs (optable
, word_mode
,
5010 mode_for_size (maxsize
, MODE_INT
, 0),
5014 /* Initialize the libfunc fields of an entire group of entries in some
5015 optab which correspond to all real mode operations. The parameters
5016 have the same meaning as similarly named ones for the `init_libfuncs'
5017 routine. (See above). */
5020 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
5022 enum machine_mode fmode
, dmode
, lmode
;
5024 fmode
= float_type_node
? TYPE_MODE (float_type_node
) : VOIDmode
;
5025 dmode
= double_type_node
? TYPE_MODE (double_type_node
) : VOIDmode
;
5026 lmode
= long_double_type_node
? TYPE_MODE (long_double_type_node
) : VOIDmode
;
5028 if (fmode
!= VOIDmode
)
5029 init_libfuncs (optable
, fmode
, fmode
, opname
, suffix
);
5030 if (dmode
!= fmode
&& dmode
!= VOIDmode
)
5031 init_libfuncs (optable
, dmode
, dmode
, opname
, suffix
);
5032 if (lmode
!= dmode
&& lmode
!= VOIDmode
)
5033 init_libfuncs (optable
, lmode
, lmode
, opname
, suffix
);
5036 /* Initialize the libfunc fields of an entire group of entries of an
5037 inter-mode-class conversion optab. The string formation rules are
5038 similar to the ones for init_libfuncs, above, but instead of having
5039 a mode name and an operand count these functions have two mode names
5040 and no operand count. */
5042 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5043 enum mode_class from_class
,
5044 enum mode_class to_class
)
5046 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
5047 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
5048 size_t opname_len
= strlen (opname
);
5049 size_t max_mname_len
= 0;
5051 enum machine_mode fmode
, tmode
;
5052 const char *fname
, *tname
;
5054 char *libfunc_name
, *suffix
;
5057 for (fmode
= first_from_mode
;
5059 fmode
= GET_MODE_WIDER_MODE (fmode
))
5060 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
5062 for (tmode
= first_to_mode
;
5064 tmode
= GET_MODE_WIDER_MODE (tmode
))
5065 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
5067 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5068 libfunc_name
[0] = '_';
5069 libfunc_name
[1] = '_';
5070 memcpy (&libfunc_name
[2], opname
, opname_len
);
5071 suffix
= libfunc_name
+ opname_len
+ 2;
5073 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
5074 fmode
= GET_MODE_WIDER_MODE (fmode
))
5075 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
5076 tmode
= GET_MODE_WIDER_MODE (tmode
))
5078 fname
= GET_MODE_NAME (fmode
);
5079 tname
= GET_MODE_NAME (tmode
);
5082 for (q
= fname
; *q
; p
++, q
++)
5084 for (q
= tname
; *q
; p
++, q
++)
5089 tab
->handlers
[tmode
][fmode
].libfunc
5090 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5095 /* Initialize the libfunc fields of an entire group of entries of an
5096 intra-mode-class conversion optab. The string formation rules are
5097 similar to the ones for init_libfunc, above. WIDENING says whether
5098 the optab goes from narrow to wide modes or vice versa. These functions
5099 have two mode names _and_ an operand count. */
5101 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5102 enum mode_class
class, bool widening
)
5104 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
5105 size_t opname_len
= strlen (opname
);
5106 size_t max_mname_len
= 0;
5108 enum machine_mode nmode
, wmode
;
5109 const char *nname
, *wname
;
5111 char *libfunc_name
, *suffix
;
5114 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5115 nmode
= GET_MODE_WIDER_MODE (nmode
))
5116 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
5118 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5119 libfunc_name
[0] = '_';
5120 libfunc_name
[1] = '_';
5121 memcpy (&libfunc_name
[2], opname
, opname_len
);
5122 suffix
= libfunc_name
+ opname_len
+ 2;
5124 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5125 nmode
= GET_MODE_WIDER_MODE (nmode
))
5126 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
5127 wmode
= GET_MODE_WIDER_MODE (wmode
))
5129 nname
= GET_MODE_NAME (nmode
);
5130 wname
= GET_MODE_NAME (wmode
);
5133 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
5135 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
5141 tab
->handlers
[widening
? wmode
: nmode
]
5142 [widening
? nmode
: wmode
].libfunc
5143 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5150 init_one_libfunc (const char *name
)
5154 /* Create a FUNCTION_DECL that can be passed to
5155 targetm.encode_section_info. */
5156 /* ??? We don't have any type information except for this is
5157 a function. Pretend this is "int foo()". */
5158 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5159 build_function_type (integer_type_node
, NULL_TREE
));
5160 DECL_ARTIFICIAL (decl
) = 1;
5161 DECL_EXTERNAL (decl
) = 1;
5162 TREE_PUBLIC (decl
) = 1;
5164 symbol
= XEXP (DECL_RTL (decl
), 0);
5166 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5167 are the flags assigned by targetm.encode_section_info. */
5168 SYMBOL_REF_DECL (symbol
) = 0;
5173 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5174 MODE to NAME, which should be either 0 or a string constant. */
5176 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
5179 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
5181 optable
->handlers
[mode
].libfunc
= 0;
5184 /* Call this to reset the function entry for one conversion optab
5185 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5186 either 0 or a string constant. */
5188 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
5189 enum machine_mode fmode
, const char *name
)
5192 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
5194 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
5197 /* Call this once to initialize the contents of the optabs
5198 appropriately for the current target machine. */
5205 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5207 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5208 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5210 #ifdef HAVE_conditional_move
5211 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5212 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5215 add_optab
= init_optab (PLUS
);
5216 addv_optab
= init_optabv (PLUS
);
5217 sub_optab
= init_optab (MINUS
);
5218 subv_optab
= init_optabv (MINUS
);
5219 smul_optab
= init_optab (MULT
);
5220 smulv_optab
= init_optabv (MULT
);
5221 smul_highpart_optab
= init_optab (UNKNOWN
);
5222 umul_highpart_optab
= init_optab (UNKNOWN
);
5223 smul_widen_optab
= init_optab (UNKNOWN
);
5224 umul_widen_optab
= init_optab (UNKNOWN
);
5225 sdiv_optab
= init_optab (DIV
);
5226 sdivv_optab
= init_optabv (DIV
);
5227 sdivmod_optab
= init_optab (UNKNOWN
);
5228 udiv_optab
= init_optab (UDIV
);
5229 udivmod_optab
= init_optab (UNKNOWN
);
5230 smod_optab
= init_optab (MOD
);
5231 umod_optab
= init_optab (UMOD
);
5232 ftrunc_optab
= init_optab (UNKNOWN
);
5233 and_optab
= init_optab (AND
);
5234 ior_optab
= init_optab (IOR
);
5235 xor_optab
= init_optab (XOR
);
5236 ashl_optab
= init_optab (ASHIFT
);
5237 ashr_optab
= init_optab (ASHIFTRT
);
5238 lshr_optab
= init_optab (LSHIFTRT
);
5239 rotl_optab
= init_optab (ROTATE
);
5240 rotr_optab
= init_optab (ROTATERT
);
5241 smin_optab
= init_optab (SMIN
);
5242 smax_optab
= init_optab (SMAX
);
5243 umin_optab
= init_optab (UMIN
);
5244 umax_optab
= init_optab (UMAX
);
5245 pow_optab
= init_optab (UNKNOWN
);
5246 atan2_optab
= init_optab (UNKNOWN
);
5248 /* These three have codes assigned exclusively for the sake of
5250 mov_optab
= init_optab (SET
);
5251 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5252 cmp_optab
= init_optab (COMPARE
);
5254 ucmp_optab
= init_optab (UNKNOWN
);
5255 tst_optab
= init_optab (UNKNOWN
);
5257 eq_optab
= init_optab (EQ
);
5258 ne_optab
= init_optab (NE
);
5259 gt_optab
= init_optab (GT
);
5260 ge_optab
= init_optab (GE
);
5261 lt_optab
= init_optab (LT
);
5262 le_optab
= init_optab (LE
);
5263 unord_optab
= init_optab (UNORDERED
);
5265 neg_optab
= init_optab (NEG
);
5266 negv_optab
= init_optabv (NEG
);
5267 abs_optab
= init_optab (ABS
);
5268 absv_optab
= init_optabv (ABS
);
5269 addcc_optab
= init_optab (UNKNOWN
);
5270 one_cmpl_optab
= init_optab (NOT
);
5271 ffs_optab
= init_optab (FFS
);
5272 clz_optab
= init_optab (CLZ
);
5273 ctz_optab
= init_optab (CTZ
);
5274 popcount_optab
= init_optab (POPCOUNT
);
5275 parity_optab
= init_optab (PARITY
);
5276 sqrt_optab
= init_optab (SQRT
);
5277 floor_optab
= init_optab (UNKNOWN
);
5278 ceil_optab
= init_optab (UNKNOWN
);
5279 round_optab
= init_optab (UNKNOWN
);
5280 btrunc_optab
= init_optab (UNKNOWN
);
5281 nearbyint_optab
= init_optab (UNKNOWN
);
5282 sin_optab
= init_optab (UNKNOWN
);
5283 cos_optab
= init_optab (UNKNOWN
);
5284 exp_optab
= init_optab (UNKNOWN
);
5285 log_optab
= init_optab (UNKNOWN
);
5286 tan_optab
= init_optab (UNKNOWN
);
5287 atan_optab
= init_optab (UNKNOWN
);
5288 strlen_optab
= init_optab (UNKNOWN
);
5289 cbranch_optab
= init_optab (UNKNOWN
);
5290 cmov_optab
= init_optab (UNKNOWN
);
5291 cstore_optab
= init_optab (UNKNOWN
);
5292 push_optab
= init_optab (UNKNOWN
);
5295 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5296 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5297 trunc_optab
= init_convert_optab (TRUNCATE
);
5298 sfix_optab
= init_convert_optab (FIX
);
5299 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5300 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5301 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5302 sfloat_optab
= init_convert_optab (FLOAT
);
5303 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5305 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5307 movstr_optab
[i
] = CODE_FOR_nothing
;
5308 clrstr_optab
[i
] = CODE_FOR_nothing
;
5309 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5310 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5312 #ifdef HAVE_SECONDARY_RELOADS
5313 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5317 /* Fill in the optabs with the insns we support. */
5320 /* Initialize the optabs with the names of the library functions. */
5321 init_integral_libfuncs (add_optab
, "add", '3');
5322 init_floating_libfuncs (add_optab
, "add", '3');
5323 init_integral_libfuncs (addv_optab
, "addv", '3');
5324 init_floating_libfuncs (addv_optab
, "add", '3');
5325 init_integral_libfuncs (sub_optab
, "sub", '3');
5326 init_floating_libfuncs (sub_optab
, "sub", '3');
5327 init_integral_libfuncs (subv_optab
, "subv", '3');
5328 init_floating_libfuncs (subv_optab
, "sub", '3');
5329 init_integral_libfuncs (smul_optab
, "mul", '3');
5330 init_floating_libfuncs (smul_optab
, "mul", '3');
5331 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5332 init_floating_libfuncs (smulv_optab
, "mul", '3');
5333 init_integral_libfuncs (sdiv_optab
, "div", '3');
5334 init_floating_libfuncs (sdiv_optab
, "div", '3');
5335 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5336 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5337 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5338 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5339 init_integral_libfuncs (smod_optab
, "mod", '3');
5340 init_integral_libfuncs (umod_optab
, "umod", '3');
5341 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5342 init_integral_libfuncs (and_optab
, "and", '3');
5343 init_integral_libfuncs (ior_optab
, "ior", '3');
5344 init_integral_libfuncs (xor_optab
, "xor", '3');
5345 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5346 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5347 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5348 init_integral_libfuncs (smin_optab
, "min", '3');
5349 init_floating_libfuncs (smin_optab
, "min", '3');
5350 init_integral_libfuncs (smax_optab
, "max", '3');
5351 init_floating_libfuncs (smax_optab
, "max", '3');
5352 init_integral_libfuncs (umin_optab
, "umin", '3');
5353 init_integral_libfuncs (umax_optab
, "umax", '3');
5354 init_integral_libfuncs (neg_optab
, "neg", '2');
5355 init_floating_libfuncs (neg_optab
, "neg", '2');
5356 init_integral_libfuncs (negv_optab
, "negv", '2');
5357 init_floating_libfuncs (negv_optab
, "neg", '2');
5358 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5359 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5360 init_integral_libfuncs (clz_optab
, "clz", '2');
5361 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5362 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5363 init_integral_libfuncs (parity_optab
, "parity", '2');
5365 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5366 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5367 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5368 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5370 /* EQ etc are floating point only. */
5371 init_floating_libfuncs (eq_optab
, "eq", '2');
5372 init_floating_libfuncs (ne_optab
, "ne", '2');
5373 init_floating_libfuncs (gt_optab
, "gt", '2');
5374 init_floating_libfuncs (ge_optab
, "ge", '2');
5375 init_floating_libfuncs (lt_optab
, "lt", '2');
5376 init_floating_libfuncs (le_optab
, "le", '2');
5377 init_floating_libfuncs (unord_optab
, "unord", '2');
5380 init_interclass_conv_libfuncs (sfloat_optab
, "float", MODE_INT
, MODE_FLOAT
);
5381 init_interclass_conv_libfuncs (sfix_optab
, "fix", MODE_FLOAT
, MODE_INT
);
5382 init_interclass_conv_libfuncs (ufix_optab
, "fixuns", MODE_FLOAT
, MODE_INT
);
5384 /* sext_optab is also used for FLOAT_EXTEND. */
5385 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5386 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5388 /* Use cabs for double complex abs, since systems generally have cabs.
5389 Don't define any libcall for float complex, so that cabs will be used. */
5390 if (complex_double_type_node
)
5391 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5392 = init_one_libfunc ("cabs");
5394 /* The ffs function op[1erates on `int'. */
5395 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5396 = init_one_libfunc ("ffs");
5398 abort_libfunc
= init_one_libfunc ("abort");
5399 memcpy_libfunc
= init_one_libfunc ("memcpy");
5400 memmove_libfunc
= init_one_libfunc ("memmove");
5401 bcopy_libfunc
= init_one_libfunc ("bcopy");
5402 memcmp_libfunc
= init_one_libfunc ("memcmp");
5403 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5404 memset_libfunc
= init_one_libfunc ("memset");
5405 bzero_libfunc
= init_one_libfunc ("bzero");
5406 setbits_libfunc
= init_one_libfunc ("__setbits");
5408 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5409 ? "_Unwind_SjLj_Resume"
5410 : "_Unwind_Resume");
5411 #ifndef DONT_USE_BUILTIN_SETJMP
5412 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5413 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5415 setjmp_libfunc
= init_one_libfunc ("setjmp");
5416 longjmp_libfunc
= init_one_libfunc ("longjmp");
5418 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5419 unwind_sjlj_unregister_libfunc
5420 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5422 /* For function entry/exit instrumentation. */
5423 profile_function_entry_libfunc
5424 = init_one_libfunc ("__cyg_profile_func_enter");
5425 profile_function_exit_libfunc
5426 = init_one_libfunc ("__cyg_profile_func_exit");
5428 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5429 gcov_init_libfunc
= init_one_libfunc ("__gcov_init");
5431 if (HAVE_conditional_trap
)
5432 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5434 /* Allow the target to add more libcalls or rename some, etc. */
5435 targetm
.init_libfuncs ();
5438 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5439 CODE. Return 0 on failure. */
5442 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5443 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5445 enum machine_mode mode
= GET_MODE (op1
);
5446 enum insn_code icode
;
5449 if (!HAVE_conditional_trap
)
5452 if (mode
== VOIDmode
)
5455 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5456 if (icode
== CODE_FOR_nothing
)
5460 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5461 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5467 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5469 PUT_CODE (trap_rtx
, code
);
5470 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5474 insn
= get_insns ();
5481 #include "gt-optabs.h"