1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* The insn generating function can not take an rtx_code argument.
88 TRAP_RTX is used as an rtx argument. Its code is replaced with
89 the code to be used in the trap insn and all other fields are ignored. */
90 static GTY(()) rtx trap_rtx
;
92 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
93 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
95 static int expand_cmplxdiv_straight (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
,
96 enum machine_mode
, int,
97 enum optab_methods
, enum mode_class
,
99 static int expand_cmplxdiv_wide (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
,
100 enum machine_mode
, int, enum optab_methods
,
101 enum mode_class
, optab
);
102 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
103 enum machine_mode
*, int *,
104 enum can_compare_purpose
);
105 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
107 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
108 static optab
new_optab (void);
109 static convert_optab
new_convert_optab (void);
110 static inline optab
init_optab (enum rtx_code
);
111 static inline optab
init_optabv (enum rtx_code
);
112 static inline convert_optab
init_convert_optab (enum rtx_code
);
113 static void init_libfuncs (optab
, int, int, const char *, int);
114 static void init_integral_libfuncs (optab
, const char *, int);
115 static void init_floating_libfuncs (optab
, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
117 enum mode_class
, enum mode_class
);
118 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
119 enum mode_class
, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
121 enum rtx_code
, int, rtx
);
122 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
123 enum machine_mode
*, int *);
124 static rtx
expand_vector_binop (enum machine_mode
, optab
, rtx
, rtx
, rtx
, int,
126 static rtx
expand_vector_unop (enum machine_mode
, optab
, rtx
, rtx
, int);
127 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
128 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
130 #ifndef HAVE_conditional_trap
131 #define HAVE_conditional_trap 0
132 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
135 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
136 the result of operation CODE applied to OP0 (and OP1 if it is a binary
139 If the last insn does not set TARGET, don't do anything, but return 1.
141 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
142 don't add the REG_EQUAL note but return 0. Our caller can then try
143 again, ensuring that TARGET is not one of the operands. */
146 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
148 rtx last_insn
, insn
, set
;
153 || NEXT_INSN (insns
) == NULL_RTX
)
156 if (GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
157 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
160 if (GET_CODE (target
) == ZERO_EXTRACT
)
163 for (last_insn
= insns
;
164 NEXT_INSN (last_insn
) != NULL_RTX
;
165 last_insn
= NEXT_INSN (last_insn
))
168 set
= single_set (last_insn
);
172 if (! rtx_equal_p (SET_DEST (set
), target
)
173 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
174 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
175 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
178 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
179 besides the last insn. */
180 if (reg_overlap_mentioned_p (target
, op0
)
181 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
183 insn
= PREV_INSN (last_insn
);
184 while (insn
!= NULL_RTX
)
186 if (reg_set_p (target
, insn
))
189 insn
= PREV_INSN (insn
);
193 if (GET_RTX_CLASS (code
) == '1')
194 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
196 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
198 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
203 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
204 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
205 not actually do a sign-extend or zero-extend, but can leave the
206 higher-order bits of the result rtx undefined, for example, in the case
207 of logical operations, but not right shifts. */
210 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
211 int unsignedp
, int no_extend
)
215 /* If we don't have to extend and this is a constant, return it. */
216 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
219 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
220 extend since it will be more efficient to do so unless the signedness of
221 a promoted object differs from our extension. */
223 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
224 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
225 return convert_modes (mode
, oldmode
, op
, unsignedp
);
227 /* If MODE is no wider than a single word, we return a paradoxical
229 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
230 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
232 /* Otherwise, get an object of MODE, clobber it, and set the low-order
235 result
= gen_reg_rtx (mode
);
236 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
237 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
241 /* Generate code to perform a straightforward complex divide. */
244 expand_cmplxdiv_straight (rtx real0
, rtx real1
, rtx imag0
, rtx imag1
,
245 rtx realr
, rtx imagr
, enum machine_mode submode
,
246 int unsignedp
, enum optab_methods methods
,
247 enum mode_class
class, optab binoptab
)
253 optab this_add_optab
= add_optab
;
254 optab this_sub_optab
= sub_optab
;
255 optab this_neg_optab
= neg_optab
;
256 optab this_mul_optab
= smul_optab
;
258 if (binoptab
== sdivv_optab
)
260 this_add_optab
= addv_optab
;
261 this_sub_optab
= subv_optab
;
262 this_neg_optab
= negv_optab
;
263 this_mul_optab
= smulv_optab
;
266 /* Don't fetch these from memory more than once. */
267 real0
= force_reg (submode
, real0
);
268 real1
= force_reg (submode
, real1
);
271 imag0
= force_reg (submode
, imag0
);
273 imag1
= force_reg (submode
, imag1
);
275 /* Divisor: c*c + d*d. */
276 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
277 NULL_RTX
, unsignedp
, methods
);
279 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
280 NULL_RTX
, unsignedp
, methods
);
282 if (temp1
== 0 || temp2
== 0)
285 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
286 NULL_RTX
, unsignedp
, methods
);
292 /* Mathematically, ((a)(c-id))/divisor. */
293 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
295 /* Calculate the dividend. */
296 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
297 NULL_RTX
, unsignedp
, methods
);
299 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
300 NULL_RTX
, unsignedp
, methods
);
302 if (real_t
== 0 || imag_t
== 0)
305 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
306 NULL_RTX
, unsignedp
);
310 /* Mathematically, ((a+ib)(c-id))/divider. */
311 /* Calculate the dividend. */
312 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
313 NULL_RTX
, unsignedp
, methods
);
315 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
316 NULL_RTX
, unsignedp
, methods
);
318 if (temp1
== 0 || temp2
== 0)
321 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
322 NULL_RTX
, unsignedp
, methods
);
324 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
325 NULL_RTX
, unsignedp
, methods
);
327 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
328 NULL_RTX
, unsignedp
, methods
);
330 if (temp1
== 0 || temp2
== 0)
333 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
334 NULL_RTX
, unsignedp
, methods
);
336 if (real_t
== 0 || imag_t
== 0)
340 if (class == MODE_COMPLEX_FLOAT
)
341 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
342 realr
, unsignedp
, methods
);
344 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
345 real_t
, divisor
, realr
, unsignedp
);
351 emit_move_insn (realr
, res
);
353 if (class == MODE_COMPLEX_FLOAT
)
354 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
355 imagr
, unsignedp
, methods
);
357 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
358 imag_t
, divisor
, imagr
, unsignedp
);
364 emit_move_insn (imagr
, res
);
369 /* Generate code to perform a wide-input-range-acceptable complex divide. */
372 expand_cmplxdiv_wide (rtx real0
, rtx real1
, rtx imag0
, rtx imag1
, rtx realr
,
373 rtx imagr
, enum machine_mode submode
, int unsignedp
,
374 enum optab_methods methods
, enum mode_class
class,
379 rtx temp1
, temp2
, lab1
, lab2
;
380 enum machine_mode mode
;
382 optab this_add_optab
= add_optab
;
383 optab this_sub_optab
= sub_optab
;
384 optab this_neg_optab
= neg_optab
;
385 optab this_mul_optab
= smul_optab
;
387 if (binoptab
== sdivv_optab
)
389 this_add_optab
= addv_optab
;
390 this_sub_optab
= subv_optab
;
391 this_neg_optab
= negv_optab
;
392 this_mul_optab
= smulv_optab
;
395 /* Don't fetch these from memory more than once. */
396 real0
= force_reg (submode
, real0
);
397 real1
= force_reg (submode
, real1
);
400 imag0
= force_reg (submode
, imag0
);
402 imag1
= force_reg (submode
, imag1
);
404 /* XXX What's an "unsigned" complex number? */
412 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
413 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
416 if (temp1
== 0 || temp2
== 0)
419 mode
= GET_MODE (temp1
);
420 lab1
= gen_label_rtx ();
421 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
422 mode
, unsignedp
, lab1
);
424 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
426 if (class == MODE_COMPLEX_FLOAT
)
427 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
428 NULL_RTX
, unsignedp
, methods
);
430 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
431 imag1
, real1
, NULL_RTX
, unsignedp
);
436 /* Calculate divisor. */
438 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
439 NULL_RTX
, unsignedp
, methods
);
444 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
445 NULL_RTX
, unsignedp
, methods
);
450 /* Calculate dividend. */
456 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
458 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
459 NULL_RTX
, unsignedp
, methods
);
464 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
465 NULL_RTX
, unsignedp
);
467 if (real_t
== 0 || imag_t
== 0)
472 /* Compute (a+ib)/(c+id) as
473 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
475 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
476 NULL_RTX
, unsignedp
, methods
);
481 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
482 NULL_RTX
, unsignedp
, methods
);
484 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
485 NULL_RTX
, unsignedp
, methods
);
490 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
491 NULL_RTX
, unsignedp
, methods
);
493 if (real_t
== 0 || imag_t
== 0)
497 if (class == MODE_COMPLEX_FLOAT
)
498 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
499 realr
, unsignedp
, methods
);
501 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
502 real_t
, divisor
, realr
, unsignedp
);
508 emit_move_insn (realr
, res
);
510 if (class == MODE_COMPLEX_FLOAT
)
511 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
512 imagr
, unsignedp
, methods
);
514 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
515 imag_t
, divisor
, imagr
, unsignedp
);
521 emit_move_insn (imagr
, res
);
523 lab2
= gen_label_rtx ();
524 emit_jump_insn (gen_jump (lab2
));
529 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
531 if (class == MODE_COMPLEX_FLOAT
)
532 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
533 NULL_RTX
, unsignedp
, methods
);
535 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
536 real1
, imag1
, NULL_RTX
, unsignedp
);
541 /* Calculate divisor. */
543 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
544 NULL_RTX
, unsignedp
, methods
);
549 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
550 NULL_RTX
, unsignedp
, methods
);
555 /* Calculate dividend. */
559 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
561 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
562 NULL_RTX
, unsignedp
, methods
);
564 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
565 NULL_RTX
, unsignedp
);
567 if (real_t
== 0 || imag_t
== 0)
572 /* Compute (a+ib)/(c+id) as
573 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
575 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
576 NULL_RTX
, unsignedp
, methods
);
581 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
582 NULL_RTX
, unsignedp
, methods
);
584 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
585 NULL_RTX
, unsignedp
, methods
);
590 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
591 NULL_RTX
, unsignedp
, methods
);
593 if (real_t
== 0 || imag_t
== 0)
597 if (class == MODE_COMPLEX_FLOAT
)
598 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
599 realr
, unsignedp
, methods
);
601 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
602 real_t
, divisor
, realr
, unsignedp
);
608 emit_move_insn (realr
, res
);
610 if (class == MODE_COMPLEX_FLOAT
)
611 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
612 imagr
, unsignedp
, methods
);
614 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
615 imag_t
, divisor
, imagr
, unsignedp
);
621 emit_move_insn (imagr
, res
);
628 /* Wrapper around expand_binop which takes an rtx code to specify
629 the operation to perform, not an optab pointer. All other
630 arguments are the same. */
632 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
633 rtx op1
, rtx target
, int unsignedp
,
634 enum optab_methods methods
)
636 optab binop
= code_to_optab
[(int) code
];
640 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
643 /* Generate code to perform an operation specified by BINOPTAB
644 on operands OP0 and OP1, with result having machine-mode MODE.
646 UNSIGNEDP is for the case where we have to widen the operands
647 to perform the operation. It says to use zero-extension.
649 If TARGET is nonzero, the value
650 is generated there, if it is convenient to do so.
651 In all cases an rtx is returned for the locus of the value;
652 this may or may not be TARGET. */
655 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
656 rtx target
, int unsignedp
, enum optab_methods methods
)
658 enum optab_methods next_methods
659 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
660 ? OPTAB_WIDEN
: methods
);
661 enum mode_class
class;
662 enum machine_mode wider_mode
;
664 int commutative_op
= 0;
665 int shift_op
= (binoptab
->code
== ASHIFT
666 || binoptab
->code
== ASHIFTRT
667 || binoptab
->code
== LSHIFTRT
668 || binoptab
->code
== ROTATE
669 || binoptab
->code
== ROTATERT
);
670 rtx entry_last
= get_last_insn ();
673 class = GET_MODE_CLASS (mode
);
675 op0
= protect_from_queue (op0
, 0);
676 op1
= protect_from_queue (op1
, 0);
678 target
= protect_from_queue (target
, 1);
682 /* Load duplicate non-volatile operands once. */
683 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
685 op0
= force_not_mem (op0
);
690 op0
= force_not_mem (op0
);
691 op1
= force_not_mem (op1
);
695 /* If subtracting an integer constant, convert this into an addition of
696 the negated constant. */
698 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
700 op1
= negate_rtx (mode
, op1
);
701 binoptab
= add_optab
;
704 /* If we are inside an appropriately-short loop and one operand is an
705 expensive constant, force it into a register. */
706 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
707 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
708 op0
= force_reg (mode
, op0
);
710 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
711 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
712 op1
= force_reg (mode
, op1
);
714 /* Record where to delete back to if we backtrack. */
715 last
= get_last_insn ();
717 /* If operation is commutative,
718 try to make the first operand a register.
719 Even better, try to make it the same as the target.
720 Also try to make the last operand a constant. */
721 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
722 || binoptab
== smul_widen_optab
723 || binoptab
== umul_widen_optab
724 || binoptab
== smul_highpart_optab
725 || binoptab
== umul_highpart_optab
)
729 if (((target
== 0 || GET_CODE (target
) == REG
)
730 ? ((GET_CODE (op1
) == REG
731 && GET_CODE (op0
) != REG
)
733 : rtx_equal_p (op1
, target
))
734 || GET_CODE (op0
) == CONST_INT
)
742 /* If we can do it with a three-operand insn, do so. */
744 if (methods
!= OPTAB_MUST_WIDEN
745 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
747 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
748 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
749 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
751 rtx xop0
= op0
, xop1
= op1
;
756 temp
= gen_reg_rtx (mode
);
758 /* If it is a commutative operator and the modes would match
759 if we would swap the operands, we can save the conversions. */
762 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
763 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
767 tmp
= op0
; op0
= op1
; op1
= tmp
;
768 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
772 /* In case the insn wants input operands in modes different from
773 those of the actual operands, convert the operands. It would
774 seem that we don't need to convert CONST_INTs, but we do, so
775 that they're properly zero-extended, sign-extended or truncated
778 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
779 xop0
= convert_modes (mode0
,
780 GET_MODE (op0
) != VOIDmode
785 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
786 xop1
= convert_modes (mode1
,
787 GET_MODE (op1
) != VOIDmode
792 /* Now, if insn's predicates don't allow our operands, put them into
795 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
796 && mode0
!= VOIDmode
)
797 xop0
= copy_to_mode_reg (mode0
, xop0
);
799 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
800 && mode1
!= VOIDmode
)
801 xop1
= copy_to_mode_reg (mode1
, xop1
);
803 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
804 temp
= gen_reg_rtx (mode
);
806 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
809 /* If PAT is composed of more than one insn, try to add an appropriate
810 REG_EQUAL note to it. If we can't because TEMP conflicts with an
811 operand, call ourselves again, this time without a target. */
812 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
813 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
815 delete_insns_since (last
);
816 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
824 delete_insns_since (last
);
827 /* If this is a multiply, see if we can do a widening operation that
828 takes operands of this mode and makes a wider mode. */
830 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
831 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
832 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
833 != CODE_FOR_nothing
))
835 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
836 unsignedp
? umul_widen_optab
: smul_widen_optab
,
837 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
841 if (GET_MODE_CLASS (mode
) == MODE_INT
)
842 return gen_lowpart (mode
, temp
);
844 return convert_to_mode (mode
, temp
, unsignedp
);
848 /* Look for a wider mode of the same class for which we think we
849 can open-code the operation. Check for a widening multiply at the
850 wider mode as well. */
852 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
853 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
854 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
855 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
857 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
858 || (binoptab
== smul_optab
859 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
860 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
861 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
862 != CODE_FOR_nothing
)))
864 rtx xop0
= op0
, xop1
= op1
;
867 /* For certain integer operations, we need not actually extend
868 the narrow operands, as long as we will truncate
869 the results to the same narrowness. */
871 if ((binoptab
== ior_optab
|| binoptab
== and_optab
872 || binoptab
== xor_optab
873 || binoptab
== add_optab
|| binoptab
== sub_optab
874 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
875 && class == MODE_INT
)
878 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
880 /* The second operand of a shift must always be extended. */
881 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
882 no_extend
&& binoptab
!= ashl_optab
);
884 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
885 unsignedp
, OPTAB_DIRECT
);
888 if (class != MODE_INT
)
891 target
= gen_reg_rtx (mode
);
892 convert_move (target
, temp
, 0);
896 return gen_lowpart (mode
, temp
);
899 delete_insns_since (last
);
903 /* These can be done a word at a time. */
904 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
906 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
907 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
913 /* If TARGET is the same as one of the operands, the REG_EQUAL note
914 won't be accurate, so use a new target. */
915 if (target
== 0 || target
== op0
|| target
== op1
)
916 target
= gen_reg_rtx (mode
);
920 /* Do the actual arithmetic. */
921 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
923 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
924 rtx x
= expand_binop (word_mode
, binoptab
,
925 operand_subword_force (op0
, i
, mode
),
926 operand_subword_force (op1
, i
, mode
),
927 target_piece
, unsignedp
, next_methods
);
932 if (target_piece
!= x
)
933 emit_move_insn (target_piece
, x
);
936 insns
= get_insns ();
939 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
941 if (binoptab
->code
!= UNKNOWN
)
943 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
944 copy_rtx (op0
), copy_rtx (op1
));
948 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
953 /* Synthesize double word shifts from single word shifts. */
954 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
955 || binoptab
== ashr_optab
)
957 && GET_CODE (op1
) == CONST_INT
958 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
959 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
960 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
961 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
963 rtx insns
, inter
, equiv_value
;
964 rtx into_target
, outof_target
;
965 rtx into_input
, outof_input
;
966 int shift_count
, left_shift
, outof_word
;
968 /* If TARGET is the same as one of the operands, the REG_EQUAL note
969 won't be accurate, so use a new target. */
970 if (target
== 0 || target
== op0
|| target
== op1
)
971 target
= gen_reg_rtx (mode
);
975 shift_count
= INTVAL (op1
);
977 /* OUTOF_* is the word we are shifting bits away from, and
978 INTO_* is the word that we are shifting bits towards, thus
979 they differ depending on the direction of the shift and
982 left_shift
= binoptab
== ashl_optab
;
983 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
985 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
986 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
988 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
989 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
991 if (shift_count
>= BITS_PER_WORD
)
993 inter
= expand_binop (word_mode
, binoptab
,
995 GEN_INT (shift_count
- BITS_PER_WORD
),
996 into_target
, unsignedp
, next_methods
);
998 if (inter
!= 0 && inter
!= into_target
)
999 emit_move_insn (into_target
, inter
);
1001 /* For a signed right shift, we must fill the word we are shifting
1002 out of with copies of the sign bit. Otherwise it is zeroed. */
1003 if (inter
!= 0 && binoptab
!= ashr_optab
)
1004 inter
= CONST0_RTX (word_mode
);
1005 else if (inter
!= 0)
1006 inter
= expand_binop (word_mode
, binoptab
,
1008 GEN_INT (BITS_PER_WORD
- 1),
1009 outof_target
, unsignedp
, next_methods
);
1011 if (inter
!= 0 && inter
!= outof_target
)
1012 emit_move_insn (outof_target
, inter
);
1017 optab reverse_unsigned_shift
, unsigned_shift
;
1019 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1020 we must do a logical shift in the opposite direction of the
1023 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1025 /* For a shift of less than BITS_PER_WORD, to compute the word
1026 shifted towards, we need to unsigned shift the orig value of
1029 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1031 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1033 GEN_INT (BITS_PER_WORD
- shift_count
),
1034 0, unsignedp
, next_methods
);
1039 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1040 op1
, 0, unsignedp
, next_methods
);
1043 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1044 into_target
, unsignedp
, next_methods
);
1046 if (inter
!= 0 && inter
!= into_target
)
1047 emit_move_insn (into_target
, inter
);
1050 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1051 op1
, outof_target
, unsignedp
, next_methods
);
1053 if (inter
!= 0 && inter
!= outof_target
)
1054 emit_move_insn (outof_target
, inter
);
1057 insns
= get_insns ();
1062 if (binoptab
->code
!= UNKNOWN
)
1063 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1067 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1072 /* Synthesize double word rotates from single word shifts. */
1073 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1074 && class == MODE_INT
1075 && GET_CODE (op1
) == CONST_INT
1076 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1077 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1078 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1080 rtx insns
, equiv_value
;
1081 rtx into_target
, outof_target
;
1082 rtx into_input
, outof_input
;
1084 int shift_count
, left_shift
, outof_word
;
1086 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1087 won't be accurate, so use a new target. */
1088 if (target
== 0 || target
== op0
|| target
== op1
)
1089 target
= gen_reg_rtx (mode
);
1093 shift_count
= INTVAL (op1
);
1095 /* OUTOF_* is the word we are shifting bits away from, and
1096 INTO_* is the word that we are shifting bits towards, thus
1097 they differ depending on the direction of the shift and
1098 WORDS_BIG_ENDIAN. */
1100 left_shift
= (binoptab
== rotl_optab
);
1101 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1103 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1104 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1106 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1107 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1109 if (shift_count
== BITS_PER_WORD
)
1111 /* This is just a word swap. */
1112 emit_move_insn (outof_target
, into_input
);
1113 emit_move_insn (into_target
, outof_input
);
1118 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1119 rtx first_shift_count
, second_shift_count
;
1120 optab reverse_unsigned_shift
, unsigned_shift
;
1122 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1123 ? lshr_optab
: ashl_optab
);
1125 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1126 ? ashl_optab
: lshr_optab
);
1128 if (shift_count
> BITS_PER_WORD
)
1130 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1131 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1135 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1136 second_shift_count
= GEN_INT (shift_count
);
1139 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1140 outof_input
, first_shift_count
,
1141 NULL_RTX
, unsignedp
, next_methods
);
1142 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1143 into_input
, second_shift_count
,
1144 NULL_RTX
, unsignedp
, next_methods
);
1146 if (into_temp1
!= 0 && into_temp2
!= 0)
1147 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1148 into_target
, unsignedp
, next_methods
);
1152 if (inter
!= 0 && inter
!= into_target
)
1153 emit_move_insn (into_target
, inter
);
1155 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1156 into_input
, first_shift_count
,
1157 NULL_RTX
, unsignedp
, next_methods
);
1158 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1159 outof_input
, second_shift_count
,
1160 NULL_RTX
, unsignedp
, next_methods
);
1162 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1163 inter
= expand_binop (word_mode
, ior_optab
,
1164 outof_temp1
, outof_temp2
,
1165 outof_target
, unsignedp
, next_methods
);
1167 if (inter
!= 0 && inter
!= outof_target
)
1168 emit_move_insn (outof_target
, inter
);
1171 insns
= get_insns ();
1176 if (binoptab
->code
!= UNKNOWN
)
1177 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1181 /* We can't make this a no conflict block if this is a word swap,
1182 because the word swap case fails if the input and output values
1183 are in the same register. */
1184 if (shift_count
!= BITS_PER_WORD
)
1185 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1194 /* These can be done a word at a time by propagating carries. */
1195 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1196 && class == MODE_INT
1197 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1198 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1201 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1202 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1203 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1204 rtx xop0
, xop1
, xtarget
;
1206 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1207 value is one of those, use it. Otherwise, use 1 since it is the
1208 one easiest to get. */
1209 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1210 int normalizep
= STORE_FLAG_VALUE
;
1215 /* Prepare the operands. */
1216 xop0
= force_reg (mode
, op0
);
1217 xop1
= force_reg (mode
, op1
);
1219 xtarget
= gen_reg_rtx (mode
);
1221 if (target
== 0 || GET_CODE (target
) != REG
)
1224 /* Indicate for flow that the entire target reg is being set. */
1225 if (GET_CODE (target
) == REG
)
1226 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1228 /* Do the actual arithmetic. */
1229 for (i
= 0; i
< nwords
; i
++)
1231 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1232 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1233 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1234 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1237 /* Main add/subtract of the input operands. */
1238 x
= expand_binop (word_mode
, binoptab
,
1239 op0_piece
, op1_piece
,
1240 target_piece
, unsignedp
, next_methods
);
1246 /* Store carry from main add/subtract. */
1247 carry_out
= gen_reg_rtx (word_mode
);
1248 carry_out
= emit_store_flag_force (carry_out
,
1249 (binoptab
== add_optab
1252 word_mode
, 1, normalizep
);
1259 /* Add/subtract previous carry to main result. */
1260 newx
= expand_binop (word_mode
,
1261 normalizep
== 1 ? binoptab
: otheroptab
,
1263 NULL_RTX
, 1, next_methods
);
1267 /* Get out carry from adding/subtracting carry in. */
1268 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1269 carry_tmp
= emit_store_flag_force (carry_tmp
,
1270 (binoptab
== add_optab
1273 word_mode
, 1, normalizep
);
1275 /* Logical-ior the two poss. carry together. */
1276 carry_out
= expand_binop (word_mode
, ior_optab
,
1277 carry_out
, carry_tmp
,
1278 carry_out
, 0, next_methods
);
1282 emit_move_insn (target_piece
, newx
);
1285 carry_in
= carry_out
;
1288 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1290 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1291 || ! rtx_equal_p (target
, xtarget
))
1293 rtx temp
= emit_move_insn (target
, xtarget
);
1295 set_unique_reg_note (temp
,
1297 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1308 delete_insns_since (last
);
1311 /* If we want to multiply two two-word values and have normal and widening
1312 multiplies of single-word values, we can do this with three smaller
1313 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1314 because we are not operating on one word at a time.
1316 The multiplication proceeds as follows:
1317 _______________________
1318 [__op0_high_|__op0_low__]
1319 _______________________
1320 * [__op1_high_|__op1_low__]
1321 _______________________________________________
1322 _______________________
1323 (1) [__op0_low__*__op1_low__]
1324 _______________________
1325 (2a) [__op0_low__*__op1_high_]
1326 _______________________
1327 (2b) [__op0_high_*__op1_low__]
1328 _______________________
1329 (3) [__op0_high_*__op1_high_]
1332 This gives a 4-word result. Since we are only interested in the
1333 lower 2 words, partial result (3) and the upper words of (2a) and
1334 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1335 calculated using non-widening multiplication.
1337 (1), however, needs to be calculated with an unsigned widening
1338 multiplication. If this operation is not directly supported we
1339 try using a signed widening multiplication and adjust the result.
1340 This adjustment works as follows:
1342 If both operands are positive then no adjustment is needed.
1344 If the operands have different signs, for example op0_low < 0 and
1345 op1_low >= 0, the instruction treats the most significant bit of
1346 op0_low as a sign bit instead of a bit with significance
1347 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1348 with 2**BITS_PER_WORD - op0_low, and two's complements the
1349 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1352 Similarly, if both operands are negative, we need to add
1353 (op0_low + op1_low) * 2**BITS_PER_WORD.
1355 We use a trick to adjust quickly. We logically shift op0_low right
1356 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1357 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1358 logical shift exists, we do an arithmetic right shift and subtract
1361 if (binoptab
== smul_optab
1362 && class == MODE_INT
1363 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1364 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1365 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1366 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1367 != CODE_FOR_nothing
)
1368 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1369 != CODE_FOR_nothing
)))
1371 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1372 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1373 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1374 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1375 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1376 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1378 rtx op0_xhigh
= NULL_RTX
;
1379 rtx op1_xhigh
= NULL_RTX
;
1381 /* If the target is the same as one of the inputs, don't use it. This
1382 prevents problems with the REG_EQUAL note. */
1383 if (target
== op0
|| target
== op1
1384 || (target
!= 0 && GET_CODE (target
) != REG
))
1387 /* Multiply the two lower words to get a double-word product.
1388 If unsigned widening multiplication is available, use that;
1389 otherwise use the signed form and compensate. */
1391 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1393 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1394 target
, 1, OPTAB_DIRECT
);
1396 /* If we didn't succeed, delete everything we did so far. */
1398 delete_insns_since (last
);
1400 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1404 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1405 != CODE_FOR_nothing
)
1407 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1408 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1409 target
, 1, OPTAB_DIRECT
);
1410 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1411 NULL_RTX
, 1, next_methods
);
1413 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1414 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1417 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1418 NULL_RTX
, 0, next_methods
);
1420 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1421 op0_xhigh
, op0_xhigh
, 0,
1425 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1426 NULL_RTX
, 1, next_methods
);
1428 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1429 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1432 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1433 NULL_RTX
, 0, next_methods
);
1435 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1436 op1_xhigh
, op1_xhigh
, 0,
1441 /* If we have been able to directly compute the product of the
1442 low-order words of the operands and perform any required adjustments
1443 of the operands, we proceed by trying two more multiplications
1444 and then computing the appropriate sum.
1446 We have checked above that the required addition is provided.
1447 Full-word addition will normally always succeed, especially if
1448 it is provided at all, so we don't worry about its failure. The
1449 multiplication may well fail, however, so we do handle that. */
1451 if (product
&& op0_xhigh
&& op1_xhigh
)
1453 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1454 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1455 NULL_RTX
, 0, OPTAB_DIRECT
);
1457 if (!REG_P (product_high
))
1458 product_high
= force_reg (word_mode
, product_high
);
1461 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1462 product_high
, 0, next_methods
);
1464 if (temp
!= 0 && temp
!= product_high
)
1465 emit_move_insn (product_high
, temp
);
1468 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1469 NULL_RTX
, 0, OPTAB_DIRECT
);
1472 temp
= expand_binop (word_mode
, add_optab
, temp
,
1473 product_high
, product_high
,
1476 if (temp
!= 0 && temp
!= product_high
)
1477 emit_move_insn (product_high
, temp
);
1479 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1483 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1485 temp
= emit_move_insn (product
, product
);
1486 set_unique_reg_note (temp
,
1488 gen_rtx_fmt_ee (MULT
, mode
,
1497 /* If we get here, we couldn't do it for some reason even though we
1498 originally thought we could. Delete anything we've emitted in
1501 delete_insns_since (last
);
1504 /* Open-code the vector operations if we have no hardware support
1506 if (class == MODE_VECTOR_INT
|| class == MODE_VECTOR_FLOAT
)
1507 return expand_vector_binop (mode
, binoptab
, op0
, op1
, target
,
1508 unsignedp
, methods
);
1510 /* We need to open-code the complex type operations: '+, -, * and /' */
1512 /* At this point we allow operations between two similar complex
1513 numbers, and also if one of the operands is not a complex number
1514 but rather of MODE_FLOAT or MODE_INT. However, the caller
1515 must make sure that the MODE of the non-complex operand matches
1516 the SUBMODE of the complex operand. */
1518 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1520 rtx real0
= 0, imag0
= 0;
1521 rtx real1
= 0, imag1
= 0;
1522 rtx realr
, imagr
, res
;
1526 /* Find the correct mode for the real and imaginary parts. */
1527 enum machine_mode submode
= GET_MODE_INNER (mode
);
1529 if (submode
== BLKmode
)
1534 if (GET_MODE (op0
) == mode
)
1536 real0
= gen_realpart (submode
, op0
);
1537 imag0
= gen_imagpart (submode
, op0
);
1542 if (GET_MODE (op1
) == mode
)
1544 real1
= gen_realpart (submode
, op1
);
1545 imag1
= gen_imagpart (submode
, op1
);
1550 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1553 result
= gen_reg_rtx (mode
);
1554 realr
= gen_realpart (submode
, result
);
1555 imagr
= gen_imagpart (submode
, result
);
1557 switch (binoptab
->code
)
1560 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1562 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1563 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1564 realr
, unsignedp
, methods
);
1568 else if (res
!= realr
)
1569 emit_move_insn (realr
, res
);
1571 if (imag0
!= 0 && imag1
!= 0)
1572 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1573 imagr
, unsignedp
, methods
);
1574 else if (imag0
!= 0)
1576 else if (binoptab
->code
== MINUS
)
1577 res
= expand_unop (submode
,
1578 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1579 imag1
, imagr
, unsignedp
);
1585 else if (res
!= imagr
)
1586 emit_move_insn (imagr
, res
);
1592 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1594 if (imag0
!= 0 && imag1
!= 0)
1598 /* Don't fetch these from memory more than once. */
1599 real0
= force_reg (submode
, real0
);
1600 real1
= force_reg (submode
, real1
);
1601 imag0
= force_reg (submode
, imag0
);
1602 imag1
= force_reg (submode
, imag1
);
1604 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1605 unsignedp
, methods
);
1607 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1608 unsignedp
, methods
);
1610 if (temp1
== 0 || temp2
== 0)
1615 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1616 temp1
, temp2
, realr
, unsignedp
, methods
));
1620 else if (res
!= realr
)
1621 emit_move_insn (realr
, res
);
1623 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1624 NULL_RTX
, unsignedp
, methods
);
1626 /* Avoid expanding redundant multiplication for the common
1627 case of squaring a complex number. */
1628 if (rtx_equal_p (real0
, real1
) && rtx_equal_p (imag0
, imag1
))
1631 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1632 NULL_RTX
, unsignedp
, methods
);
1634 if (temp1
== 0 || temp2
== 0)
1639 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1640 temp1
, temp2
, imagr
, unsignedp
, methods
));
1644 else if (res
!= imagr
)
1645 emit_move_insn (imagr
, res
);
1651 /* Don't fetch these from memory more than once. */
1652 real0
= force_reg (submode
, real0
);
1653 real1
= force_reg (submode
, real1
);
1655 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1656 realr
, unsignedp
, methods
);
1659 else if (res
!= realr
)
1660 emit_move_insn (realr
, res
);
1663 res
= expand_binop (submode
, binoptab
,
1664 real1
, imag0
, imagr
, unsignedp
, methods
);
1666 res
= expand_binop (submode
, binoptab
,
1667 real0
, imag1
, imagr
, unsignedp
, methods
);
1671 else if (res
!= imagr
)
1672 emit_move_insn (imagr
, res
);
1679 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1683 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1685 /* Don't fetch these from memory more than once. */
1686 real1
= force_reg (submode
, real1
);
1688 /* Simply divide the real and imaginary parts by `c' */
1689 if (class == MODE_COMPLEX_FLOAT
)
1690 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1691 realr
, unsignedp
, methods
);
1693 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1694 real0
, real1
, realr
, unsignedp
);
1698 else if (res
!= realr
)
1699 emit_move_insn (realr
, res
);
1701 if (class == MODE_COMPLEX_FLOAT
)
1702 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1703 imagr
, unsignedp
, methods
);
1705 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1706 imag0
, real1
, imagr
, unsignedp
);
1710 else if (res
!= imagr
)
1711 emit_move_insn (imagr
, res
);
1717 switch (flag_complex_divide_method
)
1720 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1721 realr
, imagr
, submode
,
1727 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1728 realr
, imagr
, submode
,
1748 rtx equiv
= gen_rtx_fmt_ee (binoptab
->code
, mode
,
1749 copy_rtx (op0
), copy_rtx (op1
));
1750 emit_no_conflict_block (seq
, result
, op0
, op1
, equiv
);
1755 /* It can't be open-coded in this mode.
1756 Use a library call if one is available and caller says that's ok. */
1758 if (binoptab
->handlers
[(int) mode
].libfunc
1759 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1763 enum machine_mode op1_mode
= mode
;
1770 op1_mode
= word_mode
;
1771 /* Specify unsigned here,
1772 since negative shift counts are meaningless. */
1773 op1x
= convert_to_mode (word_mode
, op1
, 1);
1776 if (GET_MODE (op0
) != VOIDmode
1777 && GET_MODE (op0
) != mode
)
1778 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1780 /* Pass 1 for NO_QUEUE so we don't lose any increments
1781 if the libcall is cse'd or moved. */
1782 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1783 NULL_RTX
, LCT_CONST
, mode
, 2,
1784 op0
, mode
, op1x
, op1_mode
);
1786 insns
= get_insns ();
1789 target
= gen_reg_rtx (mode
);
1790 emit_libcall_block (insns
, target
, value
,
1791 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1796 delete_insns_since (last
);
1798 /* It can't be done in this mode. Can we do it in a wider mode? */
1800 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1801 || methods
== OPTAB_MUST_WIDEN
))
1803 /* Caller says, don't even try. */
1804 delete_insns_since (entry_last
);
1808 /* Compute the value of METHODS to pass to recursive calls.
1809 Don't allow widening to be tried recursively. */
1811 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1813 /* Look for a wider mode of the same class for which it appears we can do
1816 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1818 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1819 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1821 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1822 != CODE_FOR_nothing
)
1823 || (methods
== OPTAB_LIB
1824 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1826 rtx xop0
= op0
, xop1
= op1
;
1829 /* For certain integer operations, we need not actually extend
1830 the narrow operands, as long as we will truncate
1831 the results to the same narrowness. */
1833 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1834 || binoptab
== xor_optab
1835 || binoptab
== add_optab
|| binoptab
== sub_optab
1836 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1837 && class == MODE_INT
)
1840 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1841 unsignedp
, no_extend
);
1843 /* The second operand of a shift must always be extended. */
1844 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1845 no_extend
&& binoptab
!= ashl_optab
);
1847 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1848 unsignedp
, methods
);
1851 if (class != MODE_INT
)
1854 target
= gen_reg_rtx (mode
);
1855 convert_move (target
, temp
, 0);
1859 return gen_lowpart (mode
, temp
);
1862 delete_insns_since (last
);
1867 delete_insns_since (entry_last
);
1871 /* Like expand_binop, but for open-coding vectors binops. */
1874 expand_vector_binop (enum machine_mode mode
, optab binoptab
, rtx op0
,
1875 rtx op1
, rtx target
, int unsignedp
,
1876 enum optab_methods methods
)
1878 enum machine_mode submode
, tmode
;
1879 int size
, elts
, subsize
, subbitsize
, i
;
1880 rtx t
, a
, b
, res
, seq
;
1881 enum mode_class
class;
1883 class = GET_MODE_CLASS (mode
);
1885 size
= GET_MODE_SIZE (mode
);
1886 submode
= GET_MODE_INNER (mode
);
1888 /* Search for the widest vector mode with the same inner mode that is
1889 still narrower than MODE and that allows to open-code this operator.
1890 Note, if we find such a mode and the handler later decides it can't
1891 do the expansion, we'll be called recursively with the narrower mode. */
1892 for (tmode
= GET_CLASS_NARROWEST_MODE (class);
1893 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
1894 tmode
= GET_MODE_WIDER_MODE (tmode
))
1896 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
1897 && binoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
1901 switch (binoptab
->code
)
1906 tmode
= int_mode_for_mode (mode
);
1907 if (tmode
!= BLKmode
)
1913 subsize
= GET_MODE_SIZE (submode
);
1914 subbitsize
= GET_MODE_BITSIZE (submode
);
1915 elts
= size
/ subsize
;
1917 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1918 but that we operate on more than one element at a time. */
1919 if (subsize
== GET_MODE_UNIT_SIZE (mode
) && methods
== OPTAB_DIRECT
)
1924 /* Errors can leave us with a const0_rtx as operand. */
1925 if (GET_MODE (op0
) != mode
)
1926 op0
= copy_to_mode_reg (mode
, op0
);
1927 if (GET_MODE (op1
) != mode
)
1928 op1
= copy_to_mode_reg (mode
, op1
);
1931 target
= gen_reg_rtx (mode
);
1933 for (i
= 0; i
< elts
; ++i
)
1935 /* If this is part of a register, and not the first item in the
1936 word, we can't store using a SUBREG - that would clobber
1938 And storing with a SUBREG is only possible for the least
1939 significant part, hence we can't do it for big endian
1940 (unless we want to permute the evaluation order. */
1941 if (GET_CODE (target
) == REG
1942 && (BYTES_BIG_ENDIAN
1943 ? subsize
< UNITS_PER_WORD
1944 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
1947 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
1948 if (CONSTANT_P (op0
))
1949 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
1951 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
1952 NULL_RTX
, submode
, submode
, size
);
1953 if (CONSTANT_P (op1
))
1954 b
= simplify_gen_subreg (submode
, op1
, mode
, i
* subsize
);
1956 b
= extract_bit_field (op1
, subbitsize
, i
* subbitsize
, unsignedp
,
1957 NULL_RTX
, submode
, submode
, size
);
1959 if (binoptab
->code
== DIV
)
1961 if (class == MODE_VECTOR_FLOAT
)
1962 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1963 unsignedp
, methods
);
1965 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1966 a
, b
, t
, unsignedp
);
1969 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1970 unsignedp
, methods
);
1976 emit_move_insn (t
, res
);
1978 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
1994 /* Like expand_unop but for open-coding vector unops. */
1997 expand_vector_unop (enum machine_mode mode
, optab unoptab
, rtx op0
,
1998 rtx target
, int unsignedp
)
2000 enum machine_mode submode
, tmode
;
2001 int size
, elts
, subsize
, subbitsize
, i
;
2004 size
= GET_MODE_SIZE (mode
);
2005 submode
= GET_MODE_INNER (mode
);
2007 /* Search for the widest vector mode with the same inner mode that is
2008 still narrower than MODE and that allows to open-code this operator.
2009 Note, if we find such a mode and the handler later decides it can't
2010 do the expansion, we'll be called recursively with the narrower mode. */
2011 for (tmode
= GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode
));
2012 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
2013 tmode
= GET_MODE_WIDER_MODE (tmode
))
2015 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
2016 && unoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
2019 /* If there is no negate operation, try doing a subtract from zero. */
2020 if (unoptab
== neg_optab
&& GET_MODE_CLASS (submode
) == MODE_INT
2021 /* Avoid infinite recursion when an
2022 error has left us with the wrong mode. */
2023 && GET_MODE (op0
) == mode
)
2026 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2027 target
, unsignedp
, OPTAB_DIRECT
);
2032 if (unoptab
== one_cmpl_optab
)
2034 tmode
= int_mode_for_mode (mode
);
2035 if (tmode
!= BLKmode
)
2039 subsize
= GET_MODE_SIZE (submode
);
2040 subbitsize
= GET_MODE_BITSIZE (submode
);
2041 elts
= size
/ subsize
;
2043 /* Errors can leave us with a const0_rtx as operand. */
2044 if (GET_MODE (op0
) != mode
)
2045 op0
= copy_to_mode_reg (mode
, op0
);
2048 target
= gen_reg_rtx (mode
);
2052 for (i
= 0; i
< elts
; ++i
)
2054 /* If this is part of a register, and not the first item in the
2055 word, we can't store using a SUBREG - that would clobber
2057 And storing with a SUBREG is only possible for the least
2058 significant part, hence we can't do it for big endian
2059 (unless we want to permute the evaluation order. */
2060 if (GET_CODE (target
) == REG
2061 && (BYTES_BIG_ENDIAN
2062 ? subsize
< UNITS_PER_WORD
2063 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
2066 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
2067 if (CONSTANT_P (op0
))
2068 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
2070 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
2071 t
, submode
, submode
, size
);
2073 res
= expand_unop (submode
, unoptab
, a
, t
, unsignedp
);
2076 emit_move_insn (t
, res
);
2078 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2089 /* Expand a binary operator which has both signed and unsigned forms.
2090 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2093 If we widen unsigned operands, we may use a signed wider operation instead
2094 of an unsigned wider operation, since the result would be the same. */
2097 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2098 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2099 enum optab_methods methods
)
2102 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2103 struct optab wide_soptab
;
2105 /* Do it without widening, if possible. */
2106 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2107 unsignedp
, OPTAB_DIRECT
);
2108 if (temp
|| methods
== OPTAB_DIRECT
)
2111 /* Try widening to a signed int. Make a fake signed optab that
2112 hides any signed insn for direct use. */
2113 wide_soptab
= *soptab
;
2114 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2115 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2117 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2118 unsignedp
, OPTAB_WIDEN
);
2120 /* For unsigned operands, try widening to an unsigned int. */
2121 if (temp
== 0 && unsignedp
)
2122 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2123 unsignedp
, OPTAB_WIDEN
);
2124 if (temp
|| methods
== OPTAB_WIDEN
)
2127 /* Use the right width lib call if that exists. */
2128 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2129 if (temp
|| methods
== OPTAB_LIB
)
2132 /* Must widen and use a lib call, use either signed or unsigned. */
2133 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2134 unsignedp
, methods
);
2138 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2139 unsignedp
, methods
);
2143 /* Generate code to perform an operation specified by BINOPTAB
2144 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2145 We assume that the order of the operands for the instruction
2146 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2147 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2149 Either TARG0 or TARG1 may be zero, but what that means is that
2150 the result is not actually wanted. We will generate it into
2151 a dummy pseudo-reg and discard it. They may not both be zero.
2153 Returns 1 if this operation can be performed; 0 if not. */
2156 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2159 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2160 enum mode_class
class;
2161 enum machine_mode wider_mode
;
2162 rtx entry_last
= get_last_insn ();
2165 class = GET_MODE_CLASS (mode
);
2167 op0
= protect_from_queue (op0
, 0);
2168 op1
= protect_from_queue (op1
, 0);
2172 op0
= force_not_mem (op0
);
2173 op1
= force_not_mem (op1
);
2176 /* If we are inside an appropriately-short loop and one operand is an
2177 expensive constant, force it into a register. */
2178 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
2179 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2180 op0
= force_reg (mode
, op0
);
2182 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
2183 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2184 op1
= force_reg (mode
, op1
);
2187 targ0
= protect_from_queue (targ0
, 1);
2189 targ0
= gen_reg_rtx (mode
);
2191 targ1
= protect_from_queue (targ1
, 1);
2193 targ1
= gen_reg_rtx (mode
);
2195 /* Record where to go back to if we fail. */
2196 last
= get_last_insn ();
2198 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2200 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2201 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2202 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2204 rtx xop0
= op0
, xop1
= op1
;
2206 /* In case the insn wants input operands in modes different from
2207 those of the actual operands, convert the operands. It would
2208 seem that we don't need to convert CONST_INTs, but we do, so
2209 that they're properly zero-extended, sign-extended or truncated
2212 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2213 xop0
= convert_modes (mode0
,
2214 GET_MODE (op0
) != VOIDmode
2219 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2220 xop1
= convert_modes (mode1
,
2221 GET_MODE (op1
) != VOIDmode
2226 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2227 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2228 xop0
= copy_to_mode_reg (mode0
, xop0
);
2230 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2231 xop1
= copy_to_mode_reg (mode1
, xop1
);
2233 /* We could handle this, but we should always be called with a pseudo
2234 for our targets and all insns should take them as outputs. */
2235 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2236 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2239 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2246 delete_insns_since (last
);
2249 /* It can't be done in this mode. Can we do it in a wider mode? */
2251 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2253 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2254 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2256 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2257 != CODE_FOR_nothing
)
2259 rtx t0
= gen_reg_rtx (wider_mode
);
2260 rtx t1
= gen_reg_rtx (wider_mode
);
2261 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2262 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2264 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2267 convert_move (targ0
, t0
, unsignedp
);
2268 convert_move (targ1
, t1
, unsignedp
);
2272 delete_insns_since (last
);
2277 delete_insns_since (entry_last
);
2281 /* Wrapper around expand_unop which takes an rtx code to specify
2282 the operation to perform, not an optab pointer. All other
2283 arguments are the same. */
2285 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2286 rtx target
, int unsignedp
)
2288 optab unop
= code_to_optab
[(int) code
];
2292 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2298 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2300 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2302 enum mode_class
class = GET_MODE_CLASS (mode
);
2303 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2305 enum machine_mode wider_mode
;
2306 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2307 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2309 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2310 != CODE_FOR_nothing
)
2312 rtx xop0
, temp
, last
;
2314 last
= get_last_insn ();
2317 target
= gen_reg_rtx (mode
);
2318 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2319 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2321 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2322 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2323 - GET_MODE_BITSIZE (mode
)),
2324 target
, true, OPTAB_DIRECT
);
2326 delete_insns_since (last
);
2335 /* Try calculating (parity x) as (and (popcount x) 1), where
2336 popcount can also be done in a wider mode. */
2338 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2340 enum mode_class
class = GET_MODE_CLASS (mode
);
2341 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2343 enum machine_mode wider_mode
;
2344 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2345 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2347 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2348 != CODE_FOR_nothing
)
2350 rtx xop0
, temp
, last
;
2352 last
= get_last_insn ();
2355 target
= gen_reg_rtx (mode
);
2356 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2357 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2360 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2361 target
, true, OPTAB_DIRECT
);
2363 delete_insns_since (last
);
2372 /* Generate code to perform an operation specified by UNOPTAB
2373 on operand OP0, with result having machine-mode MODE.
2375 UNSIGNEDP is for the case where we have to widen the operands
2376 to perform the operation. It says to use zero-extension.
2378 If TARGET is nonzero, the value
2379 is generated there, if it is convenient to do so.
2380 In all cases an rtx is returned for the locus of the value;
2381 this may or may not be TARGET. */
2384 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2387 enum mode_class
class;
2388 enum machine_mode wider_mode
;
2390 rtx last
= get_last_insn ();
2393 class = GET_MODE_CLASS (mode
);
2395 op0
= protect_from_queue (op0
, 0);
2399 op0
= force_not_mem (op0
);
2403 target
= protect_from_queue (target
, 1);
2405 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2407 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2408 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2414 temp
= gen_reg_rtx (mode
);
2416 if (GET_MODE (xop0
) != VOIDmode
2417 && GET_MODE (xop0
) != mode0
)
2418 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2420 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2422 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2423 xop0
= copy_to_mode_reg (mode0
, xop0
);
2425 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2426 temp
= gen_reg_rtx (mode
);
2428 pat
= GEN_FCN (icode
) (temp
, xop0
);
2431 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2432 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2434 delete_insns_since (last
);
2435 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2443 delete_insns_since (last
);
2446 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2448 /* Widening clz needs special treatment. */
2449 if (unoptab
== clz_optab
)
2451 temp
= widen_clz (mode
, op0
, target
);
2458 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2459 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2460 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2462 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2466 /* For certain operations, we need not actually extend
2467 the narrow operand, as long as we will truncate the
2468 results to the same narrowness. */
2470 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2471 (unoptab
== neg_optab
2472 || unoptab
== one_cmpl_optab
)
2473 && class == MODE_INT
);
2475 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2480 if (class != MODE_INT
)
2483 target
= gen_reg_rtx (mode
);
2484 convert_move (target
, temp
, 0);
2488 return gen_lowpart (mode
, temp
);
2491 delete_insns_since (last
);
2495 /* These can be done a word at a time. */
2496 if (unoptab
== one_cmpl_optab
2497 && class == MODE_INT
2498 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2499 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2504 if (target
== 0 || target
== op0
)
2505 target
= gen_reg_rtx (mode
);
2509 /* Do the actual arithmetic. */
2510 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2512 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2513 rtx x
= expand_unop (word_mode
, unoptab
,
2514 operand_subword_force (op0
, i
, mode
),
2515 target_piece
, unsignedp
);
2517 if (target_piece
!= x
)
2518 emit_move_insn (target_piece
, x
);
2521 insns
= get_insns ();
2524 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2525 gen_rtx_fmt_e (unoptab
->code
, mode
,
2530 /* Open-code the complex negation operation. */
2531 else if (unoptab
->code
== NEG
2532 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2538 /* Find the correct mode for the real and imaginary parts. */
2539 enum machine_mode submode
= GET_MODE_INNER (mode
);
2541 if (submode
== BLKmode
)
2545 target
= gen_reg_rtx (mode
);
2549 target_piece
= gen_imagpart (submode
, target
);
2550 x
= expand_unop (submode
, unoptab
,
2551 gen_imagpart (submode
, op0
),
2552 target_piece
, unsignedp
);
2553 if (target_piece
!= x
)
2554 emit_move_insn (target_piece
, x
);
2556 target_piece
= gen_realpart (submode
, target
);
2557 x
= expand_unop (submode
, unoptab
,
2558 gen_realpart (submode
, op0
),
2559 target_piece
, unsignedp
);
2560 if (target_piece
!= x
)
2561 emit_move_insn (target_piece
, x
);
2566 emit_no_conflict_block (seq
, target
, op0
, 0,
2567 gen_rtx_fmt_e (unoptab
->code
, mode
,
2572 /* Try negating floating point values by flipping the sign bit. */
2573 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2574 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2576 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2577 enum machine_mode imode
= int_mode_for_mode (mode
);
2578 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2580 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2582 HOST_WIDE_INT hi
, lo
;
2583 rtx last
= get_last_insn ();
2585 /* Handle targets with different FP word orders. */
2586 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2588 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2589 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2590 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2593 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2596 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2600 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2603 temp
= expand_binop (imode
, xor_optab
,
2604 gen_lowpart (imode
, op0
),
2605 immed_double_const (lo
, hi
, imode
),
2606 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2611 target
= gen_reg_rtx (mode
);
2612 insn
= emit_move_insn (target
, gen_lowpart (mode
, temp
));
2613 set_unique_reg_note (insn
, REG_EQUAL
,
2614 gen_rtx_fmt_e (NEG
, mode
,
2618 delete_insns_since (last
);
2622 /* Try calculating parity (x) as popcount (x) % 2. */
2623 if (unoptab
== parity_optab
)
2625 temp
= expand_parity (mode
, op0
, target
);
2631 /* Now try a library call in this mode. */
2632 if (unoptab
->handlers
[(int) mode
].libfunc
)
2636 enum machine_mode outmode
= mode
;
2638 /* All of these functions return small values. Thus we choose to
2639 have them return something that isn't a double-word. */
2640 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2641 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2643 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2647 /* Pass 1 for NO_QUEUE so we don't lose any increments
2648 if the libcall is cse'd or moved. */
2649 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2650 NULL_RTX
, LCT_CONST
, outmode
,
2652 insns
= get_insns ();
2655 target
= gen_reg_rtx (outmode
);
2656 emit_libcall_block (insns
, target
, value
,
2657 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2662 if (class == MODE_VECTOR_FLOAT
|| class == MODE_VECTOR_INT
)
2663 return expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
);
2665 /* It can't be done in this mode. Can we do it in a wider mode? */
2667 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2669 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2670 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2672 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2673 != CODE_FOR_nothing
)
2674 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2678 /* For certain operations, we need not actually extend
2679 the narrow operand, as long as we will truncate the
2680 results to the same narrowness. */
2682 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2683 (unoptab
== neg_optab
2684 || unoptab
== one_cmpl_optab
)
2685 && class == MODE_INT
);
2687 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2690 /* If we are generating clz using wider mode, adjust the
2692 if (unoptab
== clz_optab
&& temp
!= 0)
2693 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2694 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2695 - GET_MODE_BITSIZE (mode
)),
2696 target
, true, OPTAB_DIRECT
);
2700 if (class != MODE_INT
)
2703 target
= gen_reg_rtx (mode
);
2704 convert_move (target
, temp
, 0);
2708 return gen_lowpart (mode
, temp
);
2711 delete_insns_since (last
);
2716 /* If there is no negate operation, try doing a subtract from zero.
2717 The US Software GOFAST library needs this. */
2718 if (unoptab
->code
== NEG
)
2721 temp
= expand_binop (mode
,
2722 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2723 CONST0_RTX (mode
), op0
,
2724 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2732 /* Emit code to compute the absolute value of OP0, with result to
2733 TARGET if convenient. (TARGET may be 0.) The return value says
2734 where the result actually is to be found.
2736 MODE is the mode of the operand; the mode of the result is
2737 different but can be deduced from MODE.
2742 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2743 int result_unsignedp
)
2748 result_unsignedp
= 1;
2750 /* First try to do it with a special abs instruction. */
2751 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2756 /* For floating point modes, try clearing the sign bit. */
2757 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2758 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2760 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2761 enum machine_mode imode
= int_mode_for_mode (mode
);
2762 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2764 if (imode
!= BLKmode
&& bitpos
>= 0)
2766 HOST_WIDE_INT hi
, lo
;
2767 rtx last
= get_last_insn ();
2769 /* Handle targets with different FP word orders. */
2770 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2772 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2773 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2774 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2777 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2780 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2784 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2787 temp
= expand_binop (imode
, and_optab
,
2788 gen_lowpart (imode
, op0
),
2789 immed_double_const (~lo
, ~hi
, imode
),
2790 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2795 target
= gen_reg_rtx (mode
);
2796 insn
= emit_move_insn (target
, gen_lowpart (mode
, temp
));
2797 set_unique_reg_note (insn
, REG_EQUAL
,
2798 gen_rtx_fmt_e (ABS
, mode
,
2802 delete_insns_since (last
);
2806 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2807 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2809 rtx last
= get_last_insn ();
2811 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2813 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2819 delete_insns_since (last
);
2822 /* If this machine has expensive jumps, we can do integer absolute
2823 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2824 where W is the width of MODE. */
2826 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2828 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2829 size_int (GET_MODE_BITSIZE (mode
) - 1),
2832 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2835 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2836 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2846 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2847 int result_unsignedp
, int safe
)
2852 result_unsignedp
= 1;
2854 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2858 /* If that does not win, use conditional jump and negate. */
2860 /* It is safe to use the target if it is the same
2861 as the source if this is also a pseudo register */
2862 if (op0
== target
&& GET_CODE (op0
) == REG
2863 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2866 op1
= gen_label_rtx ();
2867 if (target
== 0 || ! safe
2868 || GET_MODE (target
) != mode
2869 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2870 || (GET_CODE (target
) == REG
2871 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2872 target
= gen_reg_rtx (mode
);
2874 emit_move_insn (target
, op0
);
2877 /* If this mode is an integer too wide to compare properly,
2878 compare word by word. Rely on CSE to optimize constant cases. */
2879 if (GET_MODE_CLASS (mode
) == MODE_INT
2880 && ! can_compare_p (GE
, mode
, ccp_jump
))
2881 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2884 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2885 NULL_RTX
, NULL_RTX
, op1
);
2887 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2890 emit_move_insn (target
, op0
);
2896 /* Emit code to compute the absolute value of OP0, with result to
2897 TARGET if convenient. (TARGET may be 0.) The return value says
2898 where the result actually is to be found.
2900 MODE is the mode of the operand; the mode of the result is
2901 different but can be deduced from MODE.
2903 UNSIGNEDP is relevant for complex integer modes. */
2906 expand_complex_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2909 enum mode_class
class = GET_MODE_CLASS (mode
);
2910 enum machine_mode wider_mode
;
2912 rtx entry_last
= get_last_insn ();
2915 optab this_abs_optab
;
2917 /* Find the correct mode for the real and imaginary parts. */
2918 enum machine_mode submode
= GET_MODE_INNER (mode
);
2920 if (submode
== BLKmode
)
2923 op0
= protect_from_queue (op0
, 0);
2927 op0
= force_not_mem (op0
);
2930 last
= get_last_insn ();
2933 target
= protect_from_queue (target
, 1);
2935 this_abs_optab
= ! unsignedp
&& flag_trapv
2936 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2937 ? absv_optab
: abs_optab
;
2939 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2941 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2942 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2948 temp
= gen_reg_rtx (submode
);
2950 if (GET_MODE (xop0
) != VOIDmode
2951 && GET_MODE (xop0
) != mode0
)
2952 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2954 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2956 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2957 xop0
= copy_to_mode_reg (mode0
, xop0
);
2959 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2960 temp
= gen_reg_rtx (submode
);
2962 pat
= GEN_FCN (icode
) (temp
, xop0
);
2965 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2966 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2969 delete_insns_since (last
);
2970 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2979 delete_insns_since (last
);
2982 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2984 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2985 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2987 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2988 != CODE_FOR_nothing
)
2992 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2993 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2997 if (class != MODE_COMPLEX_INT
)
3000 target
= gen_reg_rtx (submode
);
3001 convert_move (target
, temp
, 0);
3005 return gen_lowpart (submode
, temp
);
3008 delete_insns_since (last
);
3012 /* Open-code the complex absolute-value operation
3013 if we can open-code sqrt. Otherwise it's not worth while. */
3014 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
3017 rtx real
, imag
, total
;
3019 real
= gen_realpart (submode
, op0
);
3020 imag
= gen_imagpart (submode
, op0
);
3022 /* Square both parts. */
3023 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
3024 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
3026 /* Sum the parts. */
3027 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
3028 0, OPTAB_LIB_WIDEN
);
3030 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3031 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
3033 delete_insns_since (last
);
3038 /* Now try a library call in this mode. */
3039 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
3046 /* Pass 1 for NO_QUEUE so we don't lose any increments
3047 if the libcall is cse'd or moved. */
3048 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
3049 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
3050 insns
= get_insns ();
3053 target
= gen_reg_rtx (submode
);
3054 emit_libcall_block (insns
, target
, value
,
3055 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
3060 /* It can't be done in this mode. Can we do it in a wider mode? */
3062 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3063 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3065 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
3066 != CODE_FOR_nothing
)
3067 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
3071 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3073 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3077 if (class != MODE_COMPLEX_INT
)
3080 target
= gen_reg_rtx (submode
);
3081 convert_move (target
, temp
, 0);
3085 return gen_lowpart (submode
, temp
);
3088 delete_insns_since (last
);
3092 delete_insns_since (entry_last
);
3096 /* Generate an instruction whose insn-code is INSN_CODE,
3097 with two operands: an output TARGET and an input OP0.
3098 TARGET *must* be nonzero, and the output is always stored there.
3099 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3100 the value that is stored into TARGET. */
3103 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3106 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3109 temp
= target
= protect_from_queue (target
, 1);
3111 op0
= protect_from_queue (op0
, 0);
3113 /* Sign and zero extension from memory is often done specially on
3114 RISC machines, so forcing into a register here can pessimize
3116 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
3117 op0
= force_not_mem (op0
);
3119 /* Now, if insn does not accept our operands, put them into pseudos. */
3121 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
3122 op0
= copy_to_mode_reg (mode0
, op0
);
3124 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
3125 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
3126 temp
= gen_reg_rtx (GET_MODE (temp
));
3128 pat
= GEN_FCN (icode
) (temp
, op0
);
3130 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3131 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3136 emit_move_insn (target
, temp
);
3139 /* Emit code to perform a series of operations on a multi-word quantity, one
3142 Such a block is preceded by a CLOBBER of the output, consists of multiple
3143 insns, each setting one word of the output, and followed by a SET copying
3144 the output to itself.
3146 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3147 note indicating that it doesn't conflict with the (also multi-word)
3148 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3151 INSNS is a block of code generated to perform the operation, not including
3152 the CLOBBER and final copy. All insns that compute intermediate values
3153 are first emitted, followed by the block as described above.
3155 TARGET, OP0, and OP1 are the output and inputs of the operations,
3156 respectively. OP1 may be zero for a unary operation.
3158 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3161 If TARGET is not a register, INSNS is simply emitted with no special
3162 processing. Likewise if anything in INSNS is not an INSN or if
3163 there is a libcall block inside INSNS.
3165 The final insn emitted is returned. */
3168 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3170 rtx prev
, next
, first
, last
, insn
;
3172 if (GET_CODE (target
) != REG
|| reload_in_progress
)
3173 return emit_insn (insns
);
3175 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3176 if (GET_CODE (insn
) != INSN
3177 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3178 return emit_insn (insns
);
3180 /* First emit all insns that do not store into words of the output and remove
3181 these from the list. */
3182 for (insn
= insns
; insn
; insn
= next
)
3187 next
= NEXT_INSN (insn
);
3189 /* Some ports (cris) create a libcall regions at their own. We must
3190 avoid any potential nesting of LIBCALLs. */
3191 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3192 remove_note (insn
, note
);
3193 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3194 remove_note (insn
, note
);
3196 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3197 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3198 set
= PATTERN (insn
);
3199 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3201 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3202 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3204 set
= XVECEXP (PATTERN (insn
), 0, i
);
3212 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3214 if (PREV_INSN (insn
))
3215 NEXT_INSN (PREV_INSN (insn
)) = next
;
3220 PREV_INSN (next
) = PREV_INSN (insn
);
3226 prev
= get_last_insn ();
3228 /* Now write the CLOBBER of the output, followed by the setting of each
3229 of the words, followed by the final copy. */
3230 if (target
!= op0
&& target
!= op1
)
3231 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3233 for (insn
= insns
; insn
; insn
= next
)
3235 next
= NEXT_INSN (insn
);
3238 if (op1
&& GET_CODE (op1
) == REG
)
3239 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3242 if (op0
&& GET_CODE (op0
) == REG
)
3243 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3247 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3248 != CODE_FOR_nothing
)
3250 last
= emit_move_insn (target
, target
);
3252 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3256 last
= get_last_insn ();
3258 /* Remove any existing REG_EQUAL note from "last", or else it will
3259 be mistaken for a note referring to the full contents of the
3260 alleged libcall value when found together with the REG_RETVAL
3261 note added below. An existing note can come from an insn
3262 expansion at "last". */
3263 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3267 first
= get_insns ();
3269 first
= NEXT_INSN (prev
);
3271 /* Encapsulate the block so it gets manipulated as a unit. */
3272 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3274 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3279 /* Emit code to make a call to a constant function or a library call.
3281 INSNS is a list containing all insns emitted in the call.
3282 These insns leave the result in RESULT. Our block is to copy RESULT
3283 to TARGET, which is logically equivalent to EQUIV.
3285 We first emit any insns that set a pseudo on the assumption that these are
3286 loading constants into registers; doing so allows them to be safely cse'ed
3287 between blocks. Then we emit all the other insns in the block, followed by
3288 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3289 note with an operand of EQUIV.
3291 Moving assignments to pseudos outside of the block is done to improve
3292 the generated code, but is not required to generate correct code,
3293 hence being unable to move an assignment is not grounds for not making
3294 a libcall block. There are two reasons why it is safe to leave these
3295 insns inside the block: First, we know that these pseudos cannot be
3296 used in generated RTL outside the block since they are created for
3297 temporary purposes within the block. Second, CSE will not record the
3298 values of anything set inside a libcall block, so we know they must
3299 be dead at the end of the block.
3301 Except for the first group of insns (the ones setting pseudos), the
3302 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3305 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3307 rtx final_dest
= target
;
3308 rtx prev
, next
, first
, last
, insn
;
3310 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3311 into a MEM later. Protect the libcall block from this change. */
3312 if (! REG_P (target
) || REG_USERVAR_P (target
))
3313 target
= gen_reg_rtx (GET_MODE (target
));
3315 /* If we're using non-call exceptions, a libcall corresponding to an
3316 operation that may trap may also trap. */
3317 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3319 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3320 if (GET_CODE (insn
) == CALL_INSN
)
3322 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3324 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3325 remove_note (insn
, note
);
3329 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3330 reg note to indicate that this call cannot throw or execute a nonlocal
3331 goto (unless there is already a REG_EH_REGION note, in which case
3333 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3334 if (GET_CODE (insn
) == CALL_INSN
)
3336 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3339 XEXP (note
, 0) = constm1_rtx
;
3341 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3345 /* First emit all insns that set pseudos. Remove them from the list as
3346 we go. Avoid insns that set pseudos which were referenced in previous
3347 insns. These can be generated by move_by_pieces, for example,
3348 to update an address. Similarly, avoid insns that reference things
3349 set in previous insns. */
3351 for (insn
= insns
; insn
; insn
= next
)
3353 rtx set
= single_set (insn
);
3356 /* Some ports (cris) create a libcall regions at their own. We must
3357 avoid any potential nesting of LIBCALLs. */
3358 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3359 remove_note (insn
, note
);
3360 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3361 remove_note (insn
, note
);
3363 next
= NEXT_INSN (insn
);
3365 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
3366 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3368 || ((! INSN_P(insns
)
3369 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3370 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3371 && ! modified_in_p (SET_SRC (set
), insns
)
3372 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3374 if (PREV_INSN (insn
))
3375 NEXT_INSN (PREV_INSN (insn
)) = next
;
3380 PREV_INSN (next
) = PREV_INSN (insn
);
3385 /* Some ports use a loop to copy large arguments onto the stack.
3386 Don't move anything outside such a loop. */
3387 if (GET_CODE (insn
) == CODE_LABEL
)
3391 prev
= get_last_insn ();
3393 /* Write the remaining insns followed by the final copy. */
3395 for (insn
= insns
; insn
; insn
= next
)
3397 next
= NEXT_INSN (insn
);
3402 last
= emit_move_insn (target
, result
);
3403 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3404 != CODE_FOR_nothing
)
3405 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3408 /* Remove any existing REG_EQUAL note from "last", or else it will
3409 be mistaken for a note referring to the full contents of the
3410 libcall value when found together with the REG_RETVAL note added
3411 below. An existing note can come from an insn expansion at
3413 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3416 if (final_dest
!= target
)
3417 emit_move_insn (final_dest
, target
);
3420 first
= get_insns ();
3422 first
= NEXT_INSN (prev
);
3424 /* Encapsulate the block so it gets manipulated as a unit. */
3425 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3427 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3428 when the encapsulated region would not be in one basic block,
3429 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3431 bool attach_libcall_retval_notes
= true;
3432 next
= NEXT_INSN (last
);
3433 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3434 if (control_flow_insn_p (insn
))
3436 attach_libcall_retval_notes
= false;
3440 if (attach_libcall_retval_notes
)
3442 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3444 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3450 /* Generate code to store zero in X. */
3453 emit_clr_insn (rtx x
)
3455 emit_move_insn (x
, const0_rtx
);
3458 /* Generate code to store 1 in X
3459 assuming it contains zero beforehand. */
3462 emit_0_to_1_insn (rtx x
)
3464 emit_move_insn (x
, const1_rtx
);
3467 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3468 PURPOSE describes how this comparison will be used. CODE is the rtx
3469 comparison code we will be using.
3471 ??? Actually, CODE is slightly weaker than that. A target is still
3472 required to implement all of the normal bcc operations, but not
3473 required to implement all (or any) of the unordered bcc operations. */
3476 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3477 enum can_compare_purpose purpose
)
3481 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3483 if (purpose
== ccp_jump
)
3484 return bcc_gen_fctn
[(int) code
] != NULL
;
3485 else if (purpose
== ccp_store_flag
)
3486 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3488 /* There's only one cmov entry point, and it's allowed to fail. */
3491 if (purpose
== ccp_jump
3492 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3494 if (purpose
== ccp_cmov
3495 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3497 if (purpose
== ccp_store_flag
3498 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3501 mode
= GET_MODE_WIDER_MODE (mode
);
3503 while (mode
!= VOIDmode
);
3508 /* This function is called when we are going to emit a compare instruction that
3509 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3511 *PMODE is the mode of the inputs (in case they are const_int).
3512 *PUNSIGNEDP nonzero says that the operands are unsigned;
3513 this matters if they need to be widened.
3515 If they have mode BLKmode, then SIZE specifies the size of both operands.
3517 This function performs all the setup necessary so that the caller only has
3518 to emit a single comparison insn. This setup can involve doing a BLKmode
3519 comparison or emitting a library call to perform the comparison if no insn
3520 is available to handle it.
3521 The values which are passed in through pointers can be modified; the caller
3522 should perform the comparison on the modified values. */
3525 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3526 enum machine_mode
*pmode
, int *punsignedp
,
3527 enum can_compare_purpose purpose
)
3529 enum machine_mode mode
= *pmode
;
3530 rtx x
= *px
, y
= *py
;
3531 int unsignedp
= *punsignedp
;
3532 enum mode_class
class;
3534 class = GET_MODE_CLASS (mode
);
3536 /* They could both be VOIDmode if both args are immediate constants,
3537 but we should fold that at an earlier stage.
3538 With no special code here, this will call abort,
3539 reminding the programmer to implement such folding. */
3541 if (mode
!= BLKmode
&& flag_force_mem
)
3543 /* Load duplicate non-volatile operands once. */
3544 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3546 x
= force_not_mem (x
);
3551 x
= force_not_mem (x
);
3552 y
= force_not_mem (y
);
3556 /* If we are inside an appropriately-short loop and one operand is an
3557 expensive constant, force it into a register. */
3558 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3559 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3560 x
= force_reg (mode
, x
);
3562 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3563 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3564 y
= force_reg (mode
, y
);
3567 /* Abort if we have a non-canonical comparison. The RTL documentation
3568 states that canonical comparisons are required only for targets which
3570 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3574 /* Don't let both operands fail to indicate the mode. */
3575 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3576 x
= force_reg (mode
, x
);
3578 /* Handle all BLKmode compares. */
3580 if (mode
== BLKmode
)
3582 enum machine_mode cmp_mode
, result_mode
;
3583 enum insn_code cmp_code
;
3588 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3594 x
= protect_from_queue (x
, 0);
3595 y
= protect_from_queue (y
, 0);
3596 size
= protect_from_queue (size
, 0);
3598 /* Try to use a memory block compare insn - either cmpstr
3599 or cmpmem will do. */
3600 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3601 cmp_mode
!= VOIDmode
;
3602 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3604 cmp_code
= cmpmem_optab
[cmp_mode
];
3605 if (cmp_code
== CODE_FOR_nothing
)
3606 cmp_code
= cmpstr_optab
[cmp_mode
];
3607 if (cmp_code
== CODE_FOR_nothing
)
3610 /* Must make sure the size fits the insn's mode. */
3611 if ((GET_CODE (size
) == CONST_INT
3612 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3613 || (GET_MODE_BITSIZE (GET_MODE (size
))
3614 > GET_MODE_BITSIZE (cmp_mode
)))
3617 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3618 result
= gen_reg_rtx (result_mode
);
3619 size
= convert_to_mode (cmp_mode
, size
, 1);
3620 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3624 *pmode
= result_mode
;
3628 /* Otherwise call a library function, memcmp if we've got it,
3630 #ifdef TARGET_MEM_FUNCTIONS
3631 libfunc
= memcmp_libfunc
;
3632 length_type
= sizetype
;
3634 libfunc
= bcmp_libfunc
;
3635 length_type
= integer_type_node
;
3637 result_mode
= TYPE_MODE (integer_type_node
);
3638 cmp_mode
= TYPE_MODE (length_type
);
3639 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3640 TREE_UNSIGNED (length_type
));
3642 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3649 *pmode
= result_mode
;
3653 /* Don't allow operands to the compare to trap, as that can put the
3654 compare and branch in different basic blocks. */
3655 if (flag_non_call_exceptions
)
3658 x
= force_reg (mode
, x
);
3660 y
= force_reg (mode
, y
);
3665 if (can_compare_p (*pcomparison
, mode
, purpose
))
3668 /* Handle a lib call just for the mode we are using. */
3670 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3672 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3675 /* If we want unsigned, and this mode has a distinct unsigned
3676 comparison routine, use that. */
3677 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3678 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3680 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3681 word_mode
, 2, x
, mode
, y
, mode
);
3683 /* Integer comparison returns a result that must be compared against 1,
3684 so that even if we do an unsigned compare afterward,
3685 there is still a value that can represent the result "less than". */
3692 if (class == MODE_FLOAT
)
3693 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3699 /* Before emitting an insn with code ICODE, make sure that X, which is going
3700 to be used for operand OPNUM of the insn, is converted from mode MODE to
3701 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3702 that it is accepted by the operand predicate. Return the new value. */
3705 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3706 enum machine_mode wider_mode
, int unsignedp
)
3708 x
= protect_from_queue (x
, 0);
3710 if (mode
!= wider_mode
)
3711 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3713 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3714 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3718 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3724 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3725 we can do the comparison.
3726 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3727 be NULL_RTX which indicates that only a comparison is to be generated. */
3730 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3731 enum rtx_code comparison
, int unsignedp
, rtx label
)
3733 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3734 enum mode_class
class = GET_MODE_CLASS (mode
);
3735 enum machine_mode wider_mode
= mode
;
3737 /* Try combined insns first. */
3740 enum insn_code icode
;
3741 PUT_MODE (test
, wider_mode
);
3745 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3747 if (icode
!= CODE_FOR_nothing
3748 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3750 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3751 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3752 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3757 /* Handle some compares against zero. */
3758 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3759 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3761 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3762 emit_insn (GEN_FCN (icode
) (x
));
3764 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3768 /* Handle compares for which there is a directly suitable insn. */
3770 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3771 if (icode
!= CODE_FOR_nothing
)
3773 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3774 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3775 emit_insn (GEN_FCN (icode
) (x
, y
));
3777 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3781 if (class != MODE_INT
&& class != MODE_FLOAT
3782 && class != MODE_COMPLEX_FLOAT
)
3785 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3787 while (wider_mode
!= VOIDmode
);
3792 /* Generate code to compare X with Y so that the condition codes are
3793 set and to jump to LABEL if the condition is true. If X is a
3794 constant and Y is not a constant, then the comparison is swapped to
3795 ensure that the comparison RTL has the canonical form.
3797 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3798 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3799 the proper branch condition code.
3801 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3803 MODE is the mode of the inputs (in case they are const_int).
3805 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3806 be passed unchanged to emit_cmp_insn, then potentially converted into an
3807 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3810 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3811 enum machine_mode mode
, int unsignedp
, rtx label
)
3813 rtx op0
= x
, op1
= y
;
3815 /* Swap operands and condition to ensure canonical RTL. */
3816 if (swap_commutative_operands_p (x
, y
))
3818 /* If we're not emitting a branch, this means some caller
3824 comparison
= swap_condition (comparison
);
3828 /* If OP0 is still a constant, then both X and Y must be constants. Force
3829 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3831 if (CONSTANT_P (op0
))
3832 op0
= force_reg (mode
, op0
);
3837 comparison
= unsigned_condition (comparison
);
3839 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3841 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3844 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3847 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3848 enum machine_mode mode
, int unsignedp
)
3850 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3853 /* Emit a library call comparison between floating point X and Y.
3854 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3857 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3858 enum machine_mode
*pmode
, int *punsignedp
)
3860 enum rtx_code comparison
= *pcomparison
;
3861 enum rtx_code swapped
= swap_condition (comparison
);
3862 rtx x
= protect_from_queue (*px
, 0);
3863 rtx y
= protect_from_queue (*py
, 0);
3864 enum machine_mode orig_mode
= GET_MODE (x
);
3865 enum machine_mode mode
;
3866 rtx value
, target
, insns
, equiv
;
3869 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3871 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3874 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3877 tmp
= x
; x
= y
; y
= tmp
;
3878 comparison
= swapped
;
3883 if (mode
== VOIDmode
)
3886 if (mode
!= orig_mode
)
3888 x
= convert_to_mode (mode
, x
, 0);
3889 y
= convert_to_mode (mode
, y
, 0);
3892 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3893 the RTL. The allows the RTL optimizers to delete the libcall if the
3894 condition can be determined at compile-time. */
3895 if (comparison
== UNORDERED
)
3897 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3898 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3899 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3900 temp
, const_true_rtx
, equiv
);
3904 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3905 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3907 rtx true_rtx
, false_rtx
;
3912 true_rtx
= const0_rtx
;
3913 false_rtx
= const_true_rtx
;
3917 true_rtx
= const_true_rtx
;
3918 false_rtx
= const0_rtx
;
3922 true_rtx
= const1_rtx
;
3923 false_rtx
= const0_rtx
;
3927 true_rtx
= const0_rtx
;
3928 false_rtx
= constm1_rtx
;
3932 true_rtx
= constm1_rtx
;
3933 false_rtx
= const0_rtx
;
3937 true_rtx
= const0_rtx
;
3938 false_rtx
= const1_rtx
;
3944 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3945 equiv
, true_rtx
, false_rtx
);
3950 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3951 word_mode
, 2, x
, mode
, y
, mode
);
3952 insns
= get_insns ();
3955 target
= gen_reg_rtx (word_mode
);
3956 emit_libcall_block (insns
, target
, value
, equiv
);
3959 if (comparison
== UNORDERED
3960 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3966 *pcomparison
= comparison
;
3970 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3973 emit_indirect_jump (rtx loc
)
3975 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3977 loc
= copy_to_mode_reg (Pmode
, loc
);
3979 emit_jump_insn (gen_indirect_jump (loc
));
3983 #ifdef HAVE_conditional_move
3985 /* Emit a conditional move instruction if the machine supports one for that
3986 condition and machine mode.
3988 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3989 the mode to use should they be constants. If it is VOIDmode, they cannot
3992 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3993 should be stored there. MODE is the mode to use should they be constants.
3994 If it is VOIDmode, they cannot both be constants.
3996 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3997 is not supported. */
4000 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4001 enum machine_mode cmode
, rtx op2
, rtx op3
,
4002 enum machine_mode mode
, int unsignedp
)
4004 rtx tem
, subtarget
, comparison
, insn
;
4005 enum insn_code icode
;
4006 enum rtx_code reversed
;
4008 /* If one operand is constant, make it the second one. Only do this
4009 if the other operand is not constant as well. */
4011 if (swap_commutative_operands_p (op0
, op1
))
4016 code
= swap_condition (code
);
4019 /* get_condition will prefer to generate LT and GT even if the old
4020 comparison was against zero, so undo that canonicalization here since
4021 comparisons against zero are cheaper. */
4022 if (code
== LT
&& op1
== const1_rtx
)
4023 code
= LE
, op1
= const0_rtx
;
4024 else if (code
== GT
&& op1
== constm1_rtx
)
4025 code
= GE
, op1
= const0_rtx
;
4027 if (cmode
== VOIDmode
)
4028 cmode
= GET_MODE (op0
);
4030 if (swap_commutative_operands_p (op2
, op3
)
4031 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4040 if (mode
== VOIDmode
)
4041 mode
= GET_MODE (op2
);
4043 icode
= movcc_gen_code
[mode
];
4045 if (icode
== CODE_FOR_nothing
)
4050 op2
= force_not_mem (op2
);
4051 op3
= force_not_mem (op3
);
4055 target
= protect_from_queue (target
, 1);
4057 target
= gen_reg_rtx (mode
);
4063 op2
= protect_from_queue (op2
, 0);
4064 op3
= protect_from_queue (op3
, 0);
4066 /* If the insn doesn't accept these operands, put them in pseudos. */
4068 if (! (*insn_data
[icode
].operand
[0].predicate
)
4069 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4070 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4072 if (! (*insn_data
[icode
].operand
[2].predicate
)
4073 (op2
, insn_data
[icode
].operand
[2].mode
))
4074 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4076 if (! (*insn_data
[icode
].operand
[3].predicate
)
4077 (op3
, insn_data
[icode
].operand
[3].mode
))
4078 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4080 /* Everything should now be in the suitable form, so emit the compare insn
4081 and then the conditional move. */
4084 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4086 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4087 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4088 return NULL and let the caller figure out how best to deal with this
4090 if (GET_CODE (comparison
) != code
)
4093 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4095 /* If that failed, then give up. */
4101 if (subtarget
!= target
)
4102 convert_move (target
, subtarget
, 0);
4107 /* Return nonzero if a conditional move of mode MODE is supported.
4109 This function is for combine so it can tell whether an insn that looks
4110 like a conditional move is actually supported by the hardware. If we
4111 guess wrong we lose a bit on optimization, but that's it. */
4112 /* ??? sparc64 supports conditionally moving integers values based on fp
4113 comparisons, and vice versa. How do we handle them? */
4116 can_conditionally_move_p (enum machine_mode mode
)
4118 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4124 #endif /* HAVE_conditional_move */
4126 /* Emit a conditional addition instruction if the machine supports one for that
4127 condition and machine mode.
4129 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4130 the mode to use should they be constants. If it is VOIDmode, they cannot
4133 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4134 should be stored there. MODE is the mode to use should they be constants.
4135 If it is VOIDmode, they cannot both be constants.
4137 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4138 is not supported. */
4141 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4142 enum machine_mode cmode
, rtx op2
, rtx op3
,
4143 enum machine_mode mode
, int unsignedp
)
4145 rtx tem
, subtarget
, comparison
, insn
;
4146 enum insn_code icode
;
4147 enum rtx_code reversed
;
4149 /* If one operand is constant, make it the second one. Only do this
4150 if the other operand is not constant as well. */
4152 if (swap_commutative_operands_p (op0
, op1
))
4157 code
= swap_condition (code
);
4160 /* get_condition will prefer to generate LT and GT even if the old
4161 comparison was against zero, so undo that canonicalization here since
4162 comparisons against zero are cheaper. */
4163 if (code
== LT
&& op1
== const1_rtx
)
4164 code
= LE
, op1
= const0_rtx
;
4165 else if (code
== GT
&& op1
== constm1_rtx
)
4166 code
= GE
, op1
= const0_rtx
;
4168 if (cmode
== VOIDmode
)
4169 cmode
= GET_MODE (op0
);
4171 if (swap_commutative_operands_p (op2
, op3
)
4172 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4181 if (mode
== VOIDmode
)
4182 mode
= GET_MODE (op2
);
4184 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4186 if (icode
== CODE_FOR_nothing
)
4191 op2
= force_not_mem (op2
);
4192 op3
= force_not_mem (op3
);
4196 target
= protect_from_queue (target
, 1);
4198 target
= gen_reg_rtx (mode
);
4204 op2
= protect_from_queue (op2
, 0);
4205 op3
= protect_from_queue (op3
, 0);
4207 /* If the insn doesn't accept these operands, put them in pseudos. */
4209 if (! (*insn_data
[icode
].operand
[0].predicate
)
4210 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4211 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4213 if (! (*insn_data
[icode
].operand
[2].predicate
)
4214 (op2
, insn_data
[icode
].operand
[2].mode
))
4215 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4217 if (! (*insn_data
[icode
].operand
[3].predicate
)
4218 (op3
, insn_data
[icode
].operand
[3].mode
))
4219 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4221 /* Everything should now be in the suitable form, so emit the compare insn
4222 and then the conditional move. */
4225 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4227 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4228 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4229 return NULL and let the caller figure out how best to deal with this
4231 if (GET_CODE (comparison
) != code
)
4234 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4236 /* If that failed, then give up. */
4242 if (subtarget
!= target
)
4243 convert_move (target
, subtarget
, 0);
4248 /* These functions attempt to generate an insn body, rather than
4249 emitting the insn, but if the gen function already emits them, we
4250 make no attempt to turn them back into naked patterns.
4252 They do not protect from queued increments,
4253 because they may be used 1) in protect_from_queue itself
4254 and 2) in other passes where there is no queue. */
4256 /* Generate and return an insn body to add Y to X. */
4259 gen_add2_insn (rtx x
, rtx y
)
4261 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4263 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4264 (x
, insn_data
[icode
].operand
[0].mode
))
4265 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4266 (x
, insn_data
[icode
].operand
[1].mode
))
4267 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4268 (y
, insn_data
[icode
].operand
[2].mode
)))
4271 return (GEN_FCN (icode
) (x
, x
, y
));
4274 /* Generate and return an insn body to add r1 and c,
4275 storing the result in r0. */
4277 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4279 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4281 if (icode
== CODE_FOR_nothing
4282 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4283 (r0
, insn_data
[icode
].operand
[0].mode
))
4284 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4285 (r1
, insn_data
[icode
].operand
[1].mode
))
4286 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4287 (c
, insn_data
[icode
].operand
[2].mode
)))
4290 return (GEN_FCN (icode
) (r0
, r1
, c
));
4294 have_add2_insn (rtx x
, rtx y
)
4298 if (GET_MODE (x
) == VOIDmode
)
4301 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4303 if (icode
== CODE_FOR_nothing
)
4306 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4307 (x
, insn_data
[icode
].operand
[0].mode
))
4308 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4309 (x
, insn_data
[icode
].operand
[1].mode
))
4310 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4311 (y
, insn_data
[icode
].operand
[2].mode
)))
4317 /* Generate and return an insn body to subtract Y from X. */
4320 gen_sub2_insn (rtx x
, rtx y
)
4322 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4324 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4325 (x
, insn_data
[icode
].operand
[0].mode
))
4326 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4327 (x
, insn_data
[icode
].operand
[1].mode
))
4328 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4329 (y
, insn_data
[icode
].operand
[2].mode
)))
4332 return (GEN_FCN (icode
) (x
, x
, y
));
4335 /* Generate and return an insn body to subtract r1 and c,
4336 storing the result in r0. */
4338 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4340 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4342 if (icode
== CODE_FOR_nothing
4343 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4344 (r0
, insn_data
[icode
].operand
[0].mode
))
4345 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4346 (r1
, insn_data
[icode
].operand
[1].mode
))
4347 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4348 (c
, insn_data
[icode
].operand
[2].mode
)))
4351 return (GEN_FCN (icode
) (r0
, r1
, c
));
4355 have_sub2_insn (rtx x
, rtx y
)
4359 if (GET_MODE (x
) == VOIDmode
)
4362 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4364 if (icode
== CODE_FOR_nothing
)
4367 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4368 (x
, insn_data
[icode
].operand
[0].mode
))
4369 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4370 (x
, insn_data
[icode
].operand
[1].mode
))
4371 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4372 (y
, insn_data
[icode
].operand
[2].mode
)))
4378 /* Generate the body of an instruction to copy Y into X.
4379 It may be a list of insns, if one insn isn't enough. */
4382 gen_move_insn (rtx x
, rtx y
)
4387 emit_move_insn_1 (x
, y
);
4393 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4394 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4395 no such operation exists, CODE_FOR_nothing will be returned. */
4398 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4402 #ifdef HAVE_ptr_extend
4404 return CODE_FOR_ptr_extend
;
4407 tab
= unsignedp
? zext_optab
: sext_optab
;
4408 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4411 /* Generate the body of an insn to extend Y (with mode MFROM)
4412 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4415 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4416 enum machine_mode mfrom
, int unsignedp
)
4418 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4419 return GEN_FCN (icode
) (x
, y
);
4422 /* can_fix_p and can_float_p say whether the target machine
4423 can directly convert a given fixed point type to
4424 a given floating point type, or vice versa.
4425 The returned value is the CODE_FOR_... value to use,
4426 or CODE_FOR_nothing if these modes cannot be directly converted.
4428 *TRUNCP_PTR is set to 1 if it is necessary to output
4429 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4431 static enum insn_code
4432 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4433 int unsignedp
, int *truncp_ptr
)
4436 enum insn_code icode
;
4438 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4439 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4440 if (icode
!= CODE_FOR_nothing
)
4446 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4447 for this to work. We need to rework the fix* and ftrunc* patterns
4448 and documentation. */
4449 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4450 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4451 if (icode
!= CODE_FOR_nothing
4452 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4459 return CODE_FOR_nothing
;
4462 static enum insn_code
4463 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4468 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4469 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4472 /* Generate code to convert FROM to floating point
4473 and store in TO. FROM must be fixed point and not VOIDmode.
4474 UNSIGNEDP nonzero means regard FROM as unsigned.
4475 Normally this is done by correcting the final value
4476 if it is negative. */
4479 expand_float (rtx to
, rtx from
, int unsignedp
)
4481 enum insn_code icode
;
4483 enum machine_mode fmode
, imode
;
4485 /* Crash now, because we won't be able to decide which mode to use. */
4486 if (GET_MODE (from
) == VOIDmode
)
4489 /* Look for an insn to do the conversion. Do it in the specified
4490 modes if possible; otherwise convert either input, output or both to
4491 wider mode. If the integer mode is wider than the mode of FROM,
4492 we can do the conversion signed even if the input is unsigned. */
4494 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4495 fmode
= GET_MODE_WIDER_MODE (fmode
))
4496 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4497 imode
= GET_MODE_WIDER_MODE (imode
))
4499 int doing_unsigned
= unsignedp
;
4501 if (fmode
!= GET_MODE (to
)
4502 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4505 icode
= can_float_p (fmode
, imode
, unsignedp
);
4506 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4507 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4509 if (icode
!= CODE_FOR_nothing
)
4511 to
= protect_from_queue (to
, 1);
4512 from
= protect_from_queue (from
, 0);
4514 if (imode
!= GET_MODE (from
))
4515 from
= convert_to_mode (imode
, from
, unsignedp
);
4517 if (fmode
!= GET_MODE (to
))
4518 target
= gen_reg_rtx (fmode
);
4520 emit_unop_insn (icode
, target
, from
,
4521 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4524 convert_move (to
, target
, 0);
4529 /* Unsigned integer, and no way to convert directly.
4530 Convert as signed, then conditionally adjust the result. */
4533 rtx label
= gen_label_rtx ();
4535 REAL_VALUE_TYPE offset
;
4539 to
= protect_from_queue (to
, 1);
4540 from
= protect_from_queue (from
, 0);
4543 from
= force_not_mem (from
);
4545 /* Look for a usable floating mode FMODE wider than the source and at
4546 least as wide as the target. Using FMODE will avoid rounding woes
4547 with unsigned values greater than the signed maximum value. */
4549 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4550 fmode
= GET_MODE_WIDER_MODE (fmode
))
4551 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4552 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4555 if (fmode
== VOIDmode
)
4557 /* There is no such mode. Pretend the target is wide enough. */
4558 fmode
= GET_MODE (to
);
4560 /* Avoid double-rounding when TO is narrower than FROM. */
4561 if ((significand_size (fmode
) + 1)
4562 < GET_MODE_BITSIZE (GET_MODE (from
)))
4565 rtx neglabel
= gen_label_rtx ();
4567 /* Don't use TARGET if it isn't a register, is a hard register,
4568 or is the wrong mode. */
4569 if (GET_CODE (target
) != REG
4570 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4571 || GET_MODE (target
) != fmode
)
4572 target
= gen_reg_rtx (fmode
);
4574 imode
= GET_MODE (from
);
4575 do_pending_stack_adjust ();
4577 /* Test whether the sign bit is set. */
4578 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4581 /* The sign bit is not set. Convert as signed. */
4582 expand_float (target
, from
, 0);
4583 emit_jump_insn (gen_jump (label
));
4586 /* The sign bit is set.
4587 Convert to a usable (positive signed) value by shifting right
4588 one bit, while remembering if a nonzero bit was shifted
4589 out; i.e., compute (from & 1) | (from >> 1). */
4591 emit_label (neglabel
);
4592 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4593 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4594 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4596 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4598 expand_float (target
, temp
, 0);
4600 /* Multiply by 2 to undo the shift above. */
4601 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4602 target
, 0, OPTAB_LIB_WIDEN
);
4604 emit_move_insn (target
, temp
);
4606 do_pending_stack_adjust ();
4612 /* If we are about to do some arithmetic to correct for an
4613 unsigned operand, do it in a pseudo-register. */
4615 if (GET_MODE (to
) != fmode
4616 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4617 target
= gen_reg_rtx (fmode
);
4619 /* Convert as signed integer to floating. */
4620 expand_float (target
, from
, 0);
4622 /* If FROM is negative (and therefore TO is negative),
4623 correct its value by 2**bitwidth. */
4625 do_pending_stack_adjust ();
4626 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4630 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4631 temp
= expand_binop (fmode
, add_optab
, target
,
4632 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4633 target
, 0, OPTAB_LIB_WIDEN
);
4635 emit_move_insn (target
, temp
);
4637 do_pending_stack_adjust ();
4642 /* No hardware instruction available; call a library routine. */
4647 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4649 to
= protect_from_queue (to
, 1);
4650 from
= protect_from_queue (from
, 0);
4652 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4653 from
= convert_to_mode (SImode
, from
, unsignedp
);
4656 from
= force_not_mem (from
);
4658 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4664 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4665 GET_MODE (to
), 1, from
,
4667 insns
= get_insns ();
4670 emit_libcall_block (insns
, target
, value
,
4671 gen_rtx_FLOAT (GET_MODE (to
), from
));
4676 /* Copy result to requested destination
4677 if we have been computing in a temp location. */
4681 if (GET_MODE (target
) == GET_MODE (to
))
4682 emit_move_insn (to
, target
);
4684 convert_move (to
, target
, 0);
4688 /* Generate code to convert FROM to fixed point and store in TO. FROM
4689 must be floating point. */
4692 expand_fix (rtx to
, rtx from
, int unsignedp
)
4694 enum insn_code icode
;
4696 enum machine_mode fmode
, imode
;
4699 /* We first try to find a pair of modes, one real and one integer, at
4700 least as wide as FROM and TO, respectively, in which we can open-code
4701 this conversion. If the integer mode is wider than the mode of TO,
4702 we can do the conversion either signed or unsigned. */
4704 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4705 fmode
= GET_MODE_WIDER_MODE (fmode
))
4706 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4707 imode
= GET_MODE_WIDER_MODE (imode
))
4709 int doing_unsigned
= unsignedp
;
4711 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4712 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4713 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4715 if (icode
!= CODE_FOR_nothing
)
4717 to
= protect_from_queue (to
, 1);
4718 from
= protect_from_queue (from
, 0);
4720 if (fmode
!= GET_MODE (from
))
4721 from
= convert_to_mode (fmode
, from
, 0);
4725 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4726 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4730 if (imode
!= GET_MODE (to
))
4731 target
= gen_reg_rtx (imode
);
4733 emit_unop_insn (icode
, target
, from
,
4734 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4736 convert_move (to
, target
, unsignedp
);
4741 /* For an unsigned conversion, there is one more way to do it.
4742 If we have a signed conversion, we generate code that compares
4743 the real value to the largest representable positive number. If if
4744 is smaller, the conversion is done normally. Otherwise, subtract
4745 one plus the highest signed number, convert, and add it back.
4747 We only need to check all real modes, since we know we didn't find
4748 anything with a wider integer mode.
4750 This code used to extend FP value into mode wider than the destination.
4751 This is not needed. Consider, for instance conversion from SFmode
4754 The hot path trought the code is dealing with inputs smaller than 2^63
4755 and doing just the conversion, so there is no bits to lose.
4757 In the other path we know the value is positive in the range 2^63..2^64-1
4758 inclusive. (as for other imput overflow happens and result is undefined)
4759 So we know that the most important bit set in mantissa corresponds to
4760 2^63. The subtraction of 2^63 should not generate any rounding as it
4761 simply clears out that bit. The rest is trivial. */
4763 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4764 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4765 fmode
= GET_MODE_WIDER_MODE (fmode
))
4766 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4770 REAL_VALUE_TYPE offset
;
4771 rtx limit
, lab1
, lab2
, insn
;
4773 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4774 real_2expN (&offset
, bitsize
- 1);
4775 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4776 lab1
= gen_label_rtx ();
4777 lab2
= gen_label_rtx ();
4780 to
= protect_from_queue (to
, 1);
4781 from
= protect_from_queue (from
, 0);
4784 from
= force_not_mem (from
);
4786 if (fmode
!= GET_MODE (from
))
4787 from
= convert_to_mode (fmode
, from
, 0);
4789 /* See if we need to do the subtraction. */
4790 do_pending_stack_adjust ();
4791 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4794 /* If not, do the signed "fix" and branch around fixup code. */
4795 expand_fix (to
, from
, 0);
4796 emit_jump_insn (gen_jump (lab2
));
4799 /* Otherwise, subtract 2**(N-1), convert to signed number,
4800 then add 2**(N-1). Do the addition using XOR since this
4801 will often generate better code. */
4803 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4804 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4805 expand_fix (to
, target
, 0);
4806 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4808 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4810 to
, 1, OPTAB_LIB_WIDEN
);
4813 emit_move_insn (to
, target
);
4817 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4818 != CODE_FOR_nothing
)
4820 /* Make a place for a REG_NOTE and add it. */
4821 insn
= emit_move_insn (to
, to
);
4822 set_unique_reg_note (insn
,
4824 gen_rtx_fmt_e (UNSIGNED_FIX
,
4832 /* We can't do it with an insn, so use a library call. But first ensure
4833 that the mode of TO is at least as wide as SImode, since those are the
4834 only library calls we know about. */
4836 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4838 target
= gen_reg_rtx (SImode
);
4840 expand_fix (target
, from
, unsignedp
);
4848 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4849 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4853 to
= protect_from_queue (to
, 1);
4854 from
= protect_from_queue (from
, 0);
4857 from
= force_not_mem (from
);
4861 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4862 GET_MODE (to
), 1, from
,
4864 insns
= get_insns ();
4867 emit_libcall_block (insns
, target
, value
,
4868 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4869 GET_MODE (to
), from
));
4874 if (GET_MODE (to
) == GET_MODE (target
))
4875 emit_move_insn (to
, target
);
4877 convert_move (to
, target
, 0);
4881 /* Report whether we have an instruction to perform the operation
4882 specified by CODE on operands of mode MODE. */
4884 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4886 return (code_to_optab
[(int) code
] != 0
4887 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4888 != CODE_FOR_nothing
));
4891 /* Create a blank optab. */
4896 optab op
= ggc_alloc (sizeof (struct optab
));
4897 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4899 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4900 op
->handlers
[i
].libfunc
= 0;
4906 static convert_optab
4907 new_convert_optab (void)
4910 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4911 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4912 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4914 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4915 op
->handlers
[i
][j
].libfunc
= 0;
4920 /* Same, but fill in its code as CODE, and write it into the
4921 code_to_optab table. */
4923 init_optab (enum rtx_code code
)
4925 optab op
= new_optab ();
4927 code_to_optab
[(int) code
] = op
;
4931 /* Same, but fill in its code as CODE, and do _not_ write it into
4932 the code_to_optab table. */
4934 init_optabv (enum rtx_code code
)
4936 optab op
= new_optab ();
4941 /* Conversion optabs never go in the code_to_optab table. */
4942 static inline convert_optab
4943 init_convert_optab (enum rtx_code code
)
4945 convert_optab op
= new_convert_optab ();
4950 /* Initialize the libfunc fields of an entire group of entries in some
4951 optab. Each entry is set equal to a string consisting of a leading
4952 pair of underscores followed by a generic operation name followed by
4953 a mode name (downshifted to lowercase) followed by a single character
4954 representing the number of operands for the given operation (which is
4955 usually one of the characters '2', '3', or '4').
4957 OPTABLE is the table in which libfunc fields are to be initialized.
4958 FIRST_MODE is the first machine mode index in the given optab to
4960 LAST_MODE is the last machine mode index in the given optab to
4962 OPNAME is the generic (string) name of the operation.
4963 SUFFIX is the character which specifies the number of operands for
4964 the given generic operation.
4968 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4969 const char *opname
, int suffix
)
4972 unsigned opname_len
= strlen (opname
);
4974 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4975 mode
= (enum machine_mode
) ((int) mode
+ 1))
4977 const char *mname
= GET_MODE_NAME (mode
);
4978 unsigned mname_len
= strlen (mname
);
4979 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4986 for (q
= opname
; *q
; )
4988 for (q
= mname
; *q
; q
++)
4989 *p
++ = TOLOWER (*q
);
4993 optable
->handlers
[(int) mode
].libfunc
4994 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4998 /* Initialize the libfunc fields of an entire group of entries in some
4999 optab which correspond to all integer mode operations. The parameters
5000 have the same meaning as similarly named ones for the `init_libfuncs'
5001 routine. (See above). */
5004 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
5006 int maxsize
= 2*BITS_PER_WORD
;
5007 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5008 maxsize
= LONG_LONG_TYPE_SIZE
;
5009 init_libfuncs (optable
, word_mode
,
5010 mode_for_size (maxsize
, MODE_INT
, 0),
5014 /* Initialize the libfunc fields of an entire group of entries in some
5015 optab which correspond to all real mode operations. The parameters
5016 have the same meaning as similarly named ones for the `init_libfuncs'
5017 routine. (See above). */
5020 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
5022 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
5025 /* Initialize the libfunc fields of an entire group of entries of an
5026 inter-mode-class conversion optab. The string formation rules are
5027 similar to the ones for init_libfuncs, above, but instead of having
5028 a mode name and an operand count these functions have two mode names
5029 and no operand count. */
5031 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5032 enum mode_class from_class
,
5033 enum mode_class to_class
)
5035 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
5036 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
5037 size_t opname_len
= strlen (opname
);
5038 size_t max_mname_len
= 0;
5040 enum machine_mode fmode
, tmode
;
5041 const char *fname
, *tname
;
5043 char *libfunc_name
, *suffix
;
5046 for (fmode
= first_from_mode
;
5048 fmode
= GET_MODE_WIDER_MODE (fmode
))
5049 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
5051 for (tmode
= first_to_mode
;
5053 tmode
= GET_MODE_WIDER_MODE (tmode
))
5054 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
5056 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5057 libfunc_name
[0] = '_';
5058 libfunc_name
[1] = '_';
5059 memcpy (&libfunc_name
[2], opname
, opname_len
);
5060 suffix
= libfunc_name
+ opname_len
+ 2;
5062 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
5063 fmode
= GET_MODE_WIDER_MODE (fmode
))
5064 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
5065 tmode
= GET_MODE_WIDER_MODE (tmode
))
5067 fname
= GET_MODE_NAME (fmode
);
5068 tname
= GET_MODE_NAME (tmode
);
5071 for (q
= fname
; *q
; p
++, q
++)
5073 for (q
= tname
; *q
; p
++, q
++)
5078 tab
->handlers
[tmode
][fmode
].libfunc
5079 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5084 /* Initialize the libfunc fields of an entire group of entries of an
5085 intra-mode-class conversion optab. The string formation rules are
5086 similar to the ones for init_libfunc, above. WIDENING says whether
5087 the optab goes from narrow to wide modes or vice versa. These functions
5088 have two mode names _and_ an operand count. */
5090 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5091 enum mode_class
class, bool widening
)
5093 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
5094 size_t opname_len
= strlen (opname
);
5095 size_t max_mname_len
= 0;
5097 enum machine_mode nmode
, wmode
;
5098 const char *nname
, *wname
;
5100 char *libfunc_name
, *suffix
;
5103 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5104 nmode
= GET_MODE_WIDER_MODE (nmode
))
5105 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
5107 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5108 libfunc_name
[0] = '_';
5109 libfunc_name
[1] = '_';
5110 memcpy (&libfunc_name
[2], opname
, opname_len
);
5111 suffix
= libfunc_name
+ opname_len
+ 2;
5113 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5114 nmode
= GET_MODE_WIDER_MODE (nmode
))
5115 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
5116 wmode
= GET_MODE_WIDER_MODE (wmode
))
5118 nname
= GET_MODE_NAME (nmode
);
5119 wname
= GET_MODE_NAME (wmode
);
5122 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
5124 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
5130 tab
->handlers
[widening
? wmode
: nmode
]
5131 [widening
? nmode
: wmode
].libfunc
5132 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5139 init_one_libfunc (const char *name
)
5143 /* Create a FUNCTION_DECL that can be passed to
5144 targetm.encode_section_info. */
5145 /* ??? We don't have any type information except for this is
5146 a function. Pretend this is "int foo()". */
5147 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5148 build_function_type (integer_type_node
, NULL_TREE
));
5149 DECL_ARTIFICIAL (decl
) = 1;
5150 DECL_EXTERNAL (decl
) = 1;
5151 TREE_PUBLIC (decl
) = 1;
5153 symbol
= XEXP (DECL_RTL (decl
), 0);
5155 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5156 are the flags assigned by targetm.encode_section_info. */
5157 SYMBOL_REF_DECL (symbol
) = 0;
5162 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5163 MODE to NAME, which should be either 0 or a string constant. */
5165 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
5168 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
5170 optable
->handlers
[mode
].libfunc
= 0;
5173 /* Call this to reset the function entry for one conversion optab
5174 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5175 either 0 or a string constant. */
5177 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
5178 enum machine_mode fmode
, const char *name
)
5181 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
5183 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
5186 /* Call this once to initialize the contents of the optabs
5187 appropriately for the current target machine. */
5194 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5196 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5197 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5199 #ifdef HAVE_conditional_move
5200 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5201 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5204 add_optab
= init_optab (PLUS
);
5205 addv_optab
= init_optabv (PLUS
);
5206 sub_optab
= init_optab (MINUS
);
5207 subv_optab
= init_optabv (MINUS
);
5208 smul_optab
= init_optab (MULT
);
5209 smulv_optab
= init_optabv (MULT
);
5210 smul_highpart_optab
= init_optab (UNKNOWN
);
5211 umul_highpart_optab
= init_optab (UNKNOWN
);
5212 smul_widen_optab
= init_optab (UNKNOWN
);
5213 umul_widen_optab
= init_optab (UNKNOWN
);
5214 sdiv_optab
= init_optab (DIV
);
5215 sdivv_optab
= init_optabv (DIV
);
5216 sdivmod_optab
= init_optab (UNKNOWN
);
5217 udiv_optab
= init_optab (UDIV
);
5218 udivmod_optab
= init_optab (UNKNOWN
);
5219 smod_optab
= init_optab (MOD
);
5220 umod_optab
= init_optab (UMOD
);
5221 ftrunc_optab
= init_optab (UNKNOWN
);
5222 and_optab
= init_optab (AND
);
5223 ior_optab
= init_optab (IOR
);
5224 xor_optab
= init_optab (XOR
);
5225 ashl_optab
= init_optab (ASHIFT
);
5226 ashr_optab
= init_optab (ASHIFTRT
);
5227 lshr_optab
= init_optab (LSHIFTRT
);
5228 rotl_optab
= init_optab (ROTATE
);
5229 rotr_optab
= init_optab (ROTATERT
);
5230 smin_optab
= init_optab (SMIN
);
5231 smax_optab
= init_optab (SMAX
);
5232 umin_optab
= init_optab (UMIN
);
5233 umax_optab
= init_optab (UMAX
);
5234 pow_optab
= init_optab (UNKNOWN
);
5235 atan2_optab
= init_optab (UNKNOWN
);
5237 /* These three have codes assigned exclusively for the sake of
5239 mov_optab
= init_optab (SET
);
5240 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5241 cmp_optab
= init_optab (COMPARE
);
5243 ucmp_optab
= init_optab (UNKNOWN
);
5244 tst_optab
= init_optab (UNKNOWN
);
5246 eq_optab
= init_optab (EQ
);
5247 ne_optab
= init_optab (NE
);
5248 gt_optab
= init_optab (GT
);
5249 ge_optab
= init_optab (GE
);
5250 lt_optab
= init_optab (LT
);
5251 le_optab
= init_optab (LE
);
5252 unord_optab
= init_optab (UNORDERED
);
5254 neg_optab
= init_optab (NEG
);
5255 negv_optab
= init_optabv (NEG
);
5256 abs_optab
= init_optab (ABS
);
5257 absv_optab
= init_optabv (ABS
);
5258 addcc_optab
= init_optab (UNKNOWN
);
5259 one_cmpl_optab
= init_optab (NOT
);
5260 ffs_optab
= init_optab (FFS
);
5261 clz_optab
= init_optab (CLZ
);
5262 ctz_optab
= init_optab (CTZ
);
5263 popcount_optab
= init_optab (POPCOUNT
);
5264 parity_optab
= init_optab (PARITY
);
5265 sqrt_optab
= init_optab (SQRT
);
5266 floor_optab
= init_optab (UNKNOWN
);
5267 ceil_optab
= init_optab (UNKNOWN
);
5268 round_optab
= init_optab (UNKNOWN
);
5269 btrunc_optab
= init_optab (UNKNOWN
);
5270 nearbyint_optab
= init_optab (UNKNOWN
);
5271 sin_optab
= init_optab (UNKNOWN
);
5272 cos_optab
= init_optab (UNKNOWN
);
5273 exp_optab
= init_optab (UNKNOWN
);
5274 log_optab
= init_optab (UNKNOWN
);
5275 log10_optab
= init_optab (UNKNOWN
);
5276 log2_optab
= init_optab (UNKNOWN
);
5277 tan_optab
= init_optab (UNKNOWN
);
5278 atan_optab
= init_optab (UNKNOWN
);
5279 strlen_optab
= init_optab (UNKNOWN
);
5280 cbranch_optab
= init_optab (UNKNOWN
);
5281 cmov_optab
= init_optab (UNKNOWN
);
5282 cstore_optab
= init_optab (UNKNOWN
);
5283 push_optab
= init_optab (UNKNOWN
);
5285 vec_extract_optab
= init_optab (UNKNOWN
);
5286 vec_set_optab
= init_optab (UNKNOWN
);
5287 vec_init_optab
= init_optab (UNKNOWN
);
5289 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5290 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5291 trunc_optab
= init_convert_optab (TRUNCATE
);
5292 sfix_optab
= init_convert_optab (FIX
);
5293 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5294 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5295 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5296 sfloat_optab
= init_convert_optab (FLOAT
);
5297 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5299 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5301 movstr_optab
[i
] = CODE_FOR_nothing
;
5302 clrstr_optab
[i
] = CODE_FOR_nothing
;
5303 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5304 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5306 #ifdef HAVE_SECONDARY_RELOADS
5307 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5311 /* Fill in the optabs with the insns we support. */
5314 /* Initialize the optabs with the names of the library functions. */
5315 init_integral_libfuncs (add_optab
, "add", '3');
5316 init_floating_libfuncs (add_optab
, "add", '3');
5317 init_integral_libfuncs (addv_optab
, "addv", '3');
5318 init_floating_libfuncs (addv_optab
, "add", '3');
5319 init_integral_libfuncs (sub_optab
, "sub", '3');
5320 init_floating_libfuncs (sub_optab
, "sub", '3');
5321 init_integral_libfuncs (subv_optab
, "subv", '3');
5322 init_floating_libfuncs (subv_optab
, "sub", '3');
5323 init_integral_libfuncs (smul_optab
, "mul", '3');
5324 init_floating_libfuncs (smul_optab
, "mul", '3');
5325 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5326 init_floating_libfuncs (smulv_optab
, "mul", '3');
5327 init_integral_libfuncs (sdiv_optab
, "div", '3');
5328 init_floating_libfuncs (sdiv_optab
, "div", '3');
5329 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5330 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5331 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5332 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5333 init_integral_libfuncs (smod_optab
, "mod", '3');
5334 init_integral_libfuncs (umod_optab
, "umod", '3');
5335 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5336 init_integral_libfuncs (and_optab
, "and", '3');
5337 init_integral_libfuncs (ior_optab
, "ior", '3');
5338 init_integral_libfuncs (xor_optab
, "xor", '3');
5339 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5340 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5341 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5342 init_integral_libfuncs (smin_optab
, "min", '3');
5343 init_floating_libfuncs (smin_optab
, "min", '3');
5344 init_integral_libfuncs (smax_optab
, "max", '3');
5345 init_floating_libfuncs (smax_optab
, "max", '3');
5346 init_integral_libfuncs (umin_optab
, "umin", '3');
5347 init_integral_libfuncs (umax_optab
, "umax", '3');
5348 init_integral_libfuncs (neg_optab
, "neg", '2');
5349 init_floating_libfuncs (neg_optab
, "neg", '2');
5350 init_integral_libfuncs (negv_optab
, "negv", '2');
5351 init_floating_libfuncs (negv_optab
, "neg", '2');
5352 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5353 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5354 init_integral_libfuncs (clz_optab
, "clz", '2');
5355 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5356 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5357 init_integral_libfuncs (parity_optab
, "parity", '2');
5359 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5360 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5361 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5362 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5364 /* EQ etc are floating point only. */
5365 init_floating_libfuncs (eq_optab
, "eq", '2');
5366 init_floating_libfuncs (ne_optab
, "ne", '2');
5367 init_floating_libfuncs (gt_optab
, "gt", '2');
5368 init_floating_libfuncs (ge_optab
, "ge", '2');
5369 init_floating_libfuncs (lt_optab
, "lt", '2');
5370 init_floating_libfuncs (le_optab
, "le", '2');
5371 init_floating_libfuncs (unord_optab
, "unord", '2');
5374 init_interclass_conv_libfuncs (sfloat_optab
, "float", MODE_INT
, MODE_FLOAT
);
5375 init_interclass_conv_libfuncs (sfix_optab
, "fix", MODE_FLOAT
, MODE_INT
);
5376 init_interclass_conv_libfuncs (ufix_optab
, "fixuns", MODE_FLOAT
, MODE_INT
);
5378 /* sext_optab is also used for FLOAT_EXTEND. */
5379 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5380 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5382 /* Use cabs for double complex abs, since systems generally have cabs.
5383 Don't define any libcall for float complex, so that cabs will be used. */
5384 if (complex_double_type_node
)
5385 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5386 = init_one_libfunc ("cabs");
5388 /* The ffs function operates on `int'. */
5389 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5390 = init_one_libfunc ("ffs");
5392 abort_libfunc
= init_one_libfunc ("abort");
5393 memcpy_libfunc
= init_one_libfunc ("memcpy");
5394 memmove_libfunc
= init_one_libfunc ("memmove");
5395 bcopy_libfunc
= init_one_libfunc ("bcopy");
5396 memcmp_libfunc
= init_one_libfunc ("memcmp");
5397 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5398 memset_libfunc
= init_one_libfunc ("memset");
5399 bzero_libfunc
= init_one_libfunc ("bzero");
5400 setbits_libfunc
= init_one_libfunc ("__setbits");
5402 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5403 ? "_Unwind_SjLj_Resume"
5404 : "_Unwind_Resume");
5405 #ifndef DONT_USE_BUILTIN_SETJMP
5406 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5407 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5409 setjmp_libfunc
= init_one_libfunc ("setjmp");
5410 longjmp_libfunc
= init_one_libfunc ("longjmp");
5412 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5413 unwind_sjlj_unregister_libfunc
5414 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5416 /* For function entry/exit instrumentation. */
5417 profile_function_entry_libfunc
5418 = init_one_libfunc ("__cyg_profile_func_enter");
5419 profile_function_exit_libfunc
5420 = init_one_libfunc ("__cyg_profile_func_exit");
5422 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5423 gcov_init_libfunc
= init_one_libfunc ("__gcov_init");
5425 if (HAVE_conditional_trap
)
5426 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5428 /* Allow the target to add more libcalls or rename some, etc. */
5429 targetm
.init_libfuncs ();
5432 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5433 CODE. Return 0 on failure. */
5436 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5437 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5439 enum machine_mode mode
= GET_MODE (op1
);
5440 enum insn_code icode
;
5443 if (!HAVE_conditional_trap
)
5446 if (mode
== VOIDmode
)
5449 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5450 if (icode
== CODE_FOR_nothing
)
5454 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5455 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5461 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5463 PUT_CODE (trap_rtx
, code
);
5464 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5468 insn
= get_insns ();
5475 #include "gt-optabs.h"