Commit ChangeLog entries.
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
47
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
51
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
54
55 See expr.h for documentation of these optabs. */
56
57 optab optab_table[OTI_MAX];
58
59 rtx libfunc_table[LTI_MAX];
60
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
63
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
66
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
69
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
71
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
75
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
77
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
83
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
86
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
89
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
92
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
97
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
127
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
132 \f
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
136
137 If the last insn does not set TARGET, don't do anything, but return 1.
138
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
142
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
145 {
146 rtx last_insn, insn, set;
147 rtx note;
148
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
150
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
157
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
160
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
164 ;
165
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
169
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
175
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
180 {
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
183 {
184 if (reg_set_p (target, insn))
185 return 0;
186
187 insn = PREV_INSN (insn);
188 }
189 }
190
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
195
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
197
198 return 1;
199 }
200 \f
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
206
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
210 {
211 rtx result;
212
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
216
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
224
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
229
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
232
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
237 }
238 \f
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
246 {
247 bool trapv;
248 switch (code)
249 {
250 case BIT_AND_EXPR:
251 return and_optab;
252
253 case BIT_IOR_EXPR:
254 return ior_optab;
255
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
258
259 case BIT_XOR_EXPR:
260 return xor_optab;
261
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
267
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
275
276 case LSHIFT_EXPR:
277 return ashl_optab;
278
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
281
282 case LROTATE_EXPR:
283 return rotl_optab;
284
285 case RROTATE_EXPR:
286 return rotr_optab;
287
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
290
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
293
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
296
297 case WIDEN_SUM_EXPR:
298 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
299
300 case DOT_PROD_EXPR:
301 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
302
303 case REDUC_MAX_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
305
306 case REDUC_MIN_EXPR:
307 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
308
309 case REDUC_PLUS_EXPR:
310 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
311
312 case VEC_LSHIFT_EXPR:
313 return vec_shl_optab;
314
315 case VEC_RSHIFT_EXPR:
316 return vec_shr_optab;
317
318 default:
319 break;
320 }
321
322 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
323 switch (code)
324 {
325 case PLUS_EXPR:
326 return trapv ? addv_optab : add_optab;
327
328 case MINUS_EXPR:
329 return trapv ? subv_optab : sub_optab;
330
331 case MULT_EXPR:
332 return trapv ? smulv_optab : smul_optab;
333
334 case NEGATE_EXPR:
335 return trapv ? negv_optab : neg_optab;
336
337 case ABS_EXPR:
338 return trapv ? absv_optab : abs_optab;
339
340 default:
341 return NULL;
342 }
343 }
344 \f
345
346 /* Expand vector widening operations.
347
348 There are two different classes of operations handled here:
349 1) Operations whose result is wider than all the arguments to the operation.
350 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
351 In this case OP0 and optionally OP1 would be initialized,
352 but WIDE_OP wouldn't (not relevant for this case).
353 2) Operations whose result is of the same size as the last argument to the
354 operation, but wider than all the other arguments to the operation.
355 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
356 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
357
358 E.g, when called to expand the following operations, this is how
359 the arguments will be initialized:
360 nops OP0 OP1 WIDE_OP
361 widening-sum 2 oprnd0 - oprnd1
362 widening-dot-product 3 oprnd0 oprnd1 oprnd2
363 widening-mult 2 oprnd0 oprnd1 -
364 type-promotion (vec-unpack) 1 oprnd0 - - */
365
366 rtx
367 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
368 int unsignedp)
369 {
370 tree oprnd0, oprnd1, oprnd2;
371 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
372 optab widen_pattern_optab;
373 int icode;
374 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
375 rtx temp;
376 rtx pat;
377 rtx xop0, xop1, wxop;
378 int nops = TREE_CODE_LENGTH (TREE_CODE (exp));
379
380 oprnd0 = TREE_OPERAND (exp, 0);
381 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
382 widen_pattern_optab =
383 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
384 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
385 gcc_assert (icode != CODE_FOR_nothing);
386 xmode0 = insn_data[icode].operand[1].mode;
387
388 if (nops >= 2)
389 {
390 oprnd1 = TREE_OPERAND (exp, 1);
391 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
392 xmode1 = insn_data[icode].operand[2].mode;
393 }
394
395 /* The last operand is of a wider mode than the rest of the operands. */
396 if (nops == 2)
397 {
398 wmode = tmode1;
399 wxmode = xmode1;
400 }
401 else if (nops == 3)
402 {
403 gcc_assert (tmode1 == tmode0);
404 gcc_assert (op1);
405 oprnd2 = TREE_OPERAND (exp, 2);
406 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
407 wxmode = insn_data[icode].operand[3].mode;
408 }
409
410 if (!wide_op)
411 wmode = wxmode = insn_data[icode].operand[0].mode;
412
413 if (!target
414 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
415 temp = gen_reg_rtx (wmode);
416 else
417 temp = target;
418
419 xop0 = op0;
420 xop1 = op1;
421 wxop = wide_op;
422
423 /* In case the insn wants input operands in modes different from
424 those of the actual operands, convert the operands. It would
425 seem that we don't need to convert CONST_INTs, but we do, so
426 that they're properly zero-extended, sign-extended or truncated
427 for their mode. */
428
429 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
430 xop0 = convert_modes (xmode0,
431 GET_MODE (op0) != VOIDmode
432 ? GET_MODE (op0)
433 : tmode0,
434 xop0, unsignedp);
435
436 if (op1)
437 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
438 xop1 = convert_modes (xmode1,
439 GET_MODE (op1) != VOIDmode
440 ? GET_MODE (op1)
441 : tmode1,
442 xop1, unsignedp);
443
444 if (wide_op)
445 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
446 wxop = convert_modes (wxmode,
447 GET_MODE (wide_op) != VOIDmode
448 ? GET_MODE (wide_op)
449 : wmode,
450 wxop, unsignedp);
451
452 /* Now, if insn's predicates don't allow our operands, put them into
453 pseudo regs. */
454
455 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
456 && xmode0 != VOIDmode)
457 xop0 = copy_to_mode_reg (xmode0, xop0);
458
459 if (op1)
460 {
461 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
462 && xmode1 != VOIDmode)
463 xop1 = copy_to_mode_reg (xmode1, xop1);
464
465 if (wide_op)
466 {
467 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
468 && wxmode != VOIDmode)
469 wxop = copy_to_mode_reg (wxmode, wxop);
470
471 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
472 }
473 else
474 pat = GEN_FCN (icode) (temp, xop0, xop1);
475 }
476 else
477 {
478 if (wide_op)
479 {
480 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
481 && wxmode != VOIDmode)
482 wxop = copy_to_mode_reg (wxmode, wxop);
483
484 pat = GEN_FCN (icode) (temp, xop0, wxop);
485 }
486 else
487 pat = GEN_FCN (icode) (temp, xop0);
488 }
489
490 emit_insn (pat);
491 return temp;
492 }
493
494 /* Generate code to perform an operation specified by TERNARY_OPTAB
495 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
496
497 UNSIGNEDP is for the case where we have to widen the operands
498 to perform the operation. It says to use zero-extension.
499
500 If TARGET is nonzero, the value
501 is generated there, if it is convenient to do so.
502 In all cases an rtx is returned for the locus of the value;
503 this may or may not be TARGET. */
504
505 rtx
506 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
507 rtx op1, rtx op2, rtx target, int unsignedp)
508 {
509 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
510 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
511 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
512 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
513 rtx temp;
514 rtx pat;
515 rtx xop0 = op0, xop1 = op1, xop2 = op2;
516
517 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
518 != CODE_FOR_nothing);
519
520 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
521 temp = gen_reg_rtx (mode);
522 else
523 temp = target;
524
525 /* In case the insn wants input operands in modes different from
526 those of the actual operands, convert the operands. It would
527 seem that we don't need to convert CONST_INTs, but we do, so
528 that they're properly zero-extended, sign-extended or truncated
529 for their mode. */
530
531 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
532 xop0 = convert_modes (mode0,
533 GET_MODE (op0) != VOIDmode
534 ? GET_MODE (op0)
535 : mode,
536 xop0, unsignedp);
537
538 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
539 xop1 = convert_modes (mode1,
540 GET_MODE (op1) != VOIDmode
541 ? GET_MODE (op1)
542 : mode,
543 xop1, unsignedp);
544
545 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
546 xop2 = convert_modes (mode2,
547 GET_MODE (op2) != VOIDmode
548 ? GET_MODE (op2)
549 : mode,
550 xop2, unsignedp);
551
552 /* Now, if insn's predicates don't allow our operands, put them into
553 pseudo regs. */
554
555 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
556 && mode0 != VOIDmode)
557 xop0 = copy_to_mode_reg (mode0, xop0);
558
559 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
560 && mode1 != VOIDmode)
561 xop1 = copy_to_mode_reg (mode1, xop1);
562
563 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
564 && mode2 != VOIDmode)
565 xop2 = copy_to_mode_reg (mode2, xop2);
566
567 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
568
569 emit_insn (pat);
570 return temp;
571 }
572
573
574 /* Like expand_binop, but return a constant rtx if the result can be
575 calculated at compile time. The arguments and return value are
576 otherwise the same as for expand_binop. */
577
578 static rtx
579 simplify_expand_binop (enum machine_mode mode, optab binoptab,
580 rtx op0, rtx op1, rtx target, int unsignedp,
581 enum optab_methods methods)
582 {
583 if (CONSTANT_P (op0) && CONSTANT_P (op1))
584 {
585 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
586
587 if (x)
588 return x;
589 }
590
591 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
592 }
593
594 /* Like simplify_expand_binop, but always put the result in TARGET.
595 Return true if the expansion succeeded. */
596
597 bool
598 force_expand_binop (enum machine_mode mode, optab binoptab,
599 rtx op0, rtx op1, rtx target, int unsignedp,
600 enum optab_methods methods)
601 {
602 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
603 target, unsignedp, methods);
604 if (x == 0)
605 return false;
606 if (x != target)
607 emit_move_insn (target, x);
608 return true;
609 }
610
611 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
612
613 rtx
614 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
615 {
616 enum insn_code icode;
617 rtx rtx_op1, rtx_op2;
618 enum machine_mode mode1;
619 enum machine_mode mode2;
620 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
621 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
622 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
623 optab shift_optab;
624 rtx pat;
625
626 switch (TREE_CODE (vec_shift_expr))
627 {
628 case VEC_RSHIFT_EXPR:
629 shift_optab = vec_shr_optab;
630 break;
631 case VEC_LSHIFT_EXPR:
632 shift_optab = vec_shl_optab;
633 break;
634 default:
635 gcc_unreachable ();
636 }
637
638 icode = (int) shift_optab->handlers[(int) mode].insn_code;
639 gcc_assert (icode != CODE_FOR_nothing);
640
641 mode1 = insn_data[icode].operand[1].mode;
642 mode2 = insn_data[icode].operand[2].mode;
643
644 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
645 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
646 && mode1 != VOIDmode)
647 rtx_op1 = force_reg (mode1, rtx_op1);
648
649 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
650 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
651 && mode2 != VOIDmode)
652 rtx_op2 = force_reg (mode2, rtx_op2);
653
654 if (!target
655 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
656 target = gen_reg_rtx (mode);
657
658 /* Emit instruction */
659 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
660 gcc_assert (pat);
661 emit_insn (pat);
662
663 return target;
664 }
665
666 /* This subroutine of expand_doubleword_shift handles the cases in which
667 the effective shift value is >= BITS_PER_WORD. The arguments and return
668 value are the same as for the parent routine, except that SUPERWORD_OP1
669 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
670 INTO_TARGET may be null if the caller has decided to calculate it. */
671
672 static bool
673 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
674 rtx outof_target, rtx into_target,
675 int unsignedp, enum optab_methods methods)
676 {
677 if (into_target != 0)
678 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
679 into_target, unsignedp, methods))
680 return false;
681
682 if (outof_target != 0)
683 {
684 /* For a signed right shift, we must fill OUTOF_TARGET with copies
685 of the sign bit, otherwise we must fill it with zeros. */
686 if (binoptab != ashr_optab)
687 emit_move_insn (outof_target, CONST0_RTX (word_mode));
688 else
689 if (!force_expand_binop (word_mode, binoptab,
690 outof_input, GEN_INT (BITS_PER_WORD - 1),
691 outof_target, unsignedp, methods))
692 return false;
693 }
694 return true;
695 }
696
697 /* This subroutine of expand_doubleword_shift handles the cases in which
698 the effective shift value is < BITS_PER_WORD. The arguments and return
699 value are the same as for the parent routine. */
700
701 static bool
702 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
703 rtx outof_input, rtx into_input, rtx op1,
704 rtx outof_target, rtx into_target,
705 int unsignedp, enum optab_methods methods,
706 unsigned HOST_WIDE_INT shift_mask)
707 {
708 optab reverse_unsigned_shift, unsigned_shift;
709 rtx tmp, carries;
710
711 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
712 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
713
714 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
715 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
716 the opposite direction to BINOPTAB. */
717 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
718 {
719 carries = outof_input;
720 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
721 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
722 0, true, methods);
723 }
724 else
725 {
726 /* We must avoid shifting by BITS_PER_WORD bits since that is either
727 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
728 has unknown behavior. Do a single shift first, then shift by the
729 remainder. It's OK to use ~OP1 as the remainder if shift counts
730 are truncated to the mode size. */
731 carries = expand_binop (word_mode, reverse_unsigned_shift,
732 outof_input, const1_rtx, 0, unsignedp, methods);
733 if (shift_mask == BITS_PER_WORD - 1)
734 {
735 tmp = immed_double_const (-1, -1, op1_mode);
736 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
737 0, true, methods);
738 }
739 else
740 {
741 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
742 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
743 0, true, methods);
744 }
745 }
746 if (tmp == 0 || carries == 0)
747 return false;
748 carries = expand_binop (word_mode, reverse_unsigned_shift,
749 carries, tmp, 0, unsignedp, methods);
750 if (carries == 0)
751 return false;
752
753 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
754 so the result can go directly into INTO_TARGET if convenient. */
755 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
756 into_target, unsignedp, methods);
757 if (tmp == 0)
758 return false;
759
760 /* Now OR in the bits carried over from OUTOF_INPUT. */
761 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
762 into_target, unsignedp, methods))
763 return false;
764
765 /* Use a standard word_mode shift for the out-of half. */
766 if (outof_target != 0)
767 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
768 outof_target, unsignedp, methods))
769 return false;
770
771 return true;
772 }
773
774
775 #ifdef HAVE_conditional_move
776 /* Try implementing expand_doubleword_shift using conditional moves.
777 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
778 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
779 are the shift counts to use in the former and latter case. All other
780 arguments are the same as the parent routine. */
781
782 static bool
783 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
784 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
785 rtx outof_input, rtx into_input,
786 rtx subword_op1, rtx superword_op1,
787 rtx outof_target, rtx into_target,
788 int unsignedp, enum optab_methods methods,
789 unsigned HOST_WIDE_INT shift_mask)
790 {
791 rtx outof_superword, into_superword;
792
793 /* Put the superword version of the output into OUTOF_SUPERWORD and
794 INTO_SUPERWORD. */
795 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
796 if (outof_target != 0 && subword_op1 == superword_op1)
797 {
798 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
799 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
800 into_superword = outof_target;
801 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
802 outof_superword, 0, unsignedp, methods))
803 return false;
804 }
805 else
806 {
807 into_superword = gen_reg_rtx (word_mode);
808 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
809 outof_superword, into_superword,
810 unsignedp, methods))
811 return false;
812 }
813
814 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, subword_op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
819 return false;
820
821 /* Select between them. Do the INTO half first because INTO_SUPERWORD
822 might be the current value of OUTOF_TARGET. */
823 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
824 into_target, into_superword, word_mode, false))
825 return false;
826
827 if (outof_target != 0)
828 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
829 outof_target, outof_superword,
830 word_mode, false))
831 return false;
832
833 return true;
834 }
835 #endif
836
837 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
838 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
839 input operand; the shift moves bits in the direction OUTOF_INPUT->
840 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
841 of the target. OP1 is the shift count and OP1_MODE is its mode.
842 If OP1 is constant, it will have been truncated as appropriate
843 and is known to be nonzero.
844
845 If SHIFT_MASK is zero, the result of word shifts is undefined when the
846 shift count is outside the range [0, BITS_PER_WORD). This routine must
847 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
848
849 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
850 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
851 fill with zeros or sign bits as appropriate.
852
853 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
854 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
855 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
856 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
857 are undefined.
858
859 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
860 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
861 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
862 function wants to calculate it itself.
863
864 Return true if the shift could be successfully synthesized. */
865
866 static bool
867 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
868 rtx outof_input, rtx into_input, rtx op1,
869 rtx outof_target, rtx into_target,
870 int unsignedp, enum optab_methods methods,
871 unsigned HOST_WIDE_INT shift_mask)
872 {
873 rtx superword_op1, tmp, cmp1, cmp2;
874 rtx subword_label, done_label;
875 enum rtx_code cmp_code;
876
877 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
878 fill the result with sign or zero bits as appropriate. If so, the value
879 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
880 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
881 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
882
883 This isn't worthwhile for constant shifts since the optimizers will
884 cope better with in-range shift counts. */
885 if (shift_mask >= BITS_PER_WORD
886 && outof_target != 0
887 && !CONSTANT_P (op1))
888 {
889 if (!expand_doubleword_shift (op1_mode, binoptab,
890 outof_input, into_input, op1,
891 0, into_target,
892 unsignedp, methods, shift_mask))
893 return false;
894 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
895 outof_target, unsignedp, methods))
896 return false;
897 return true;
898 }
899
900 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
901 is true when the effective shift value is less than BITS_PER_WORD.
902 Set SUPERWORD_OP1 to the shift count that should be used to shift
903 OUTOF_INPUT into INTO_TARGET when the condition is false. */
904 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
905 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
906 {
907 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
908 is a subword shift count. */
909 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
910 0, true, methods);
911 cmp2 = CONST0_RTX (op1_mode);
912 cmp_code = EQ;
913 superword_op1 = op1;
914 }
915 else
916 {
917 /* Set CMP1 to OP1 - BITS_PER_WORD. */
918 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
919 0, true, methods);
920 cmp2 = CONST0_RTX (op1_mode);
921 cmp_code = LT;
922 superword_op1 = cmp1;
923 }
924 if (cmp1 == 0)
925 return false;
926
927 /* If we can compute the condition at compile time, pick the
928 appropriate subroutine. */
929 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
930 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
931 {
932 if (tmp == const0_rtx)
933 return expand_superword_shift (binoptab, outof_input, superword_op1,
934 outof_target, into_target,
935 unsignedp, methods);
936 else
937 return expand_subword_shift (op1_mode, binoptab,
938 outof_input, into_input, op1,
939 outof_target, into_target,
940 unsignedp, methods, shift_mask);
941 }
942
943 #ifdef HAVE_conditional_move
944 /* Try using conditional moves to generate straight-line code. */
945 {
946 rtx start = get_last_insn ();
947 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
948 cmp_code, cmp1, cmp2,
949 outof_input, into_input,
950 op1, superword_op1,
951 outof_target, into_target,
952 unsignedp, methods, shift_mask))
953 return true;
954 delete_insns_since (start);
955 }
956 #endif
957
958 /* As a last resort, use branches to select the correct alternative. */
959 subword_label = gen_label_rtx ();
960 done_label = gen_label_rtx ();
961
962 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
963 0, 0, subword_label);
964
965 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
966 outof_target, into_target,
967 unsignedp, methods))
968 return false;
969
970 emit_jump_insn (gen_jump (done_label));
971 emit_barrier ();
972 emit_label (subword_label);
973
974 if (!expand_subword_shift (op1_mode, binoptab,
975 outof_input, into_input, op1,
976 outof_target, into_target,
977 unsignedp, methods, shift_mask))
978 return false;
979
980 emit_label (done_label);
981 return true;
982 }
983 \f
984 /* Subroutine of expand_binop. Perform a double word multiplication of
985 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
986 as the target's word_mode. This function return NULL_RTX if anything
987 goes wrong, in which case it may have already emitted instructions
988 which need to be deleted.
989
990 If we want to multiply two two-word values and have normal and widening
991 multiplies of single-word values, we can do this with three smaller
992 multiplications. Note that we do not make a REG_NO_CONFLICT block here
993 because we are not operating on one word at a time.
994
995 The multiplication proceeds as follows:
996 _______________________
997 [__op0_high_|__op0_low__]
998 _______________________
999 * [__op1_high_|__op1_low__]
1000 _______________________________________________
1001 _______________________
1002 (1) [__op0_low__*__op1_low__]
1003 _______________________
1004 (2a) [__op0_low__*__op1_high_]
1005 _______________________
1006 (2b) [__op0_high_*__op1_low__]
1007 _______________________
1008 (3) [__op0_high_*__op1_high_]
1009
1010
1011 This gives a 4-word result. Since we are only interested in the
1012 lower 2 words, partial result (3) and the upper words of (2a) and
1013 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1014 calculated using non-widening multiplication.
1015
1016 (1), however, needs to be calculated with an unsigned widening
1017 multiplication. If this operation is not directly supported we
1018 try using a signed widening multiplication and adjust the result.
1019 This adjustment works as follows:
1020
1021 If both operands are positive then no adjustment is needed.
1022
1023 If the operands have different signs, for example op0_low < 0 and
1024 op1_low >= 0, the instruction treats the most significant bit of
1025 op0_low as a sign bit instead of a bit with significance
1026 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1027 with 2**BITS_PER_WORD - op0_low, and two's complements the
1028 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1029 the result.
1030
1031 Similarly, if both operands are negative, we need to add
1032 (op0_low + op1_low) * 2**BITS_PER_WORD.
1033
1034 We use a trick to adjust quickly. We logically shift op0_low right
1035 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1036 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1037 logical shift exists, we do an arithmetic right shift and subtract
1038 the 0 or -1. */
1039
1040 static rtx
1041 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1042 bool umulp, enum optab_methods methods)
1043 {
1044 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1045 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1046 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1047 rtx product, adjust, product_high, temp;
1048
1049 rtx op0_high = operand_subword_force (op0, high, mode);
1050 rtx op0_low = operand_subword_force (op0, low, mode);
1051 rtx op1_high = operand_subword_force (op1, high, mode);
1052 rtx op1_low = operand_subword_force (op1, low, mode);
1053
1054 /* If we're using an unsigned multiply to directly compute the product
1055 of the low-order words of the operands and perform any required
1056 adjustments of the operands, we begin by trying two more multiplications
1057 and then computing the appropriate sum.
1058
1059 We have checked above that the required addition is provided.
1060 Full-word addition will normally always succeed, especially if
1061 it is provided at all, so we don't worry about its failure. The
1062 multiplication may well fail, however, so we do handle that. */
1063
1064 if (!umulp)
1065 {
1066 /* ??? This could be done with emit_store_flag where available. */
1067 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1068 NULL_RTX, 1, methods);
1069 if (temp)
1070 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1071 NULL_RTX, 0, OPTAB_DIRECT);
1072 else
1073 {
1074 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1075 NULL_RTX, 0, methods);
1076 if (!temp)
1077 return NULL_RTX;
1078 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1079 NULL_RTX, 0, OPTAB_DIRECT);
1080 }
1081
1082 if (!op0_high)
1083 return NULL_RTX;
1084 }
1085
1086 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1087 NULL_RTX, 0, OPTAB_DIRECT);
1088 if (!adjust)
1089 return NULL_RTX;
1090
1091 /* OP0_HIGH should now be dead. */
1092
1093 if (!umulp)
1094 {
1095 /* ??? This could be done with emit_store_flag where available. */
1096 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1097 NULL_RTX, 1, methods);
1098 if (temp)
1099 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1100 NULL_RTX, 0, OPTAB_DIRECT);
1101 else
1102 {
1103 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1104 NULL_RTX, 0, methods);
1105 if (!temp)
1106 return NULL_RTX;
1107 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1108 NULL_RTX, 0, OPTAB_DIRECT);
1109 }
1110
1111 if (!op1_high)
1112 return NULL_RTX;
1113 }
1114
1115 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1116 NULL_RTX, 0, OPTAB_DIRECT);
1117 if (!temp)
1118 return NULL_RTX;
1119
1120 /* OP1_HIGH should now be dead. */
1121
1122 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1123 adjust, 0, OPTAB_DIRECT);
1124
1125 if (target && !REG_P (target))
1126 target = NULL_RTX;
1127
1128 if (umulp)
1129 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1130 target, 1, OPTAB_DIRECT);
1131 else
1132 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1133 target, 1, OPTAB_DIRECT);
1134
1135 if (!product)
1136 return NULL_RTX;
1137
1138 product_high = operand_subword (product, high, 1, mode);
1139 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1140 REG_P (product_high) ? product_high : adjust,
1141 0, OPTAB_DIRECT);
1142 emit_move_insn (product_high, adjust);
1143 return product;
1144 }
1145 \f
1146 /* Wrapper around expand_binop which takes an rtx code to specify
1147 the operation to perform, not an optab pointer. All other
1148 arguments are the same. */
1149 rtx
1150 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1151 rtx op1, rtx target, int unsignedp,
1152 enum optab_methods methods)
1153 {
1154 optab binop = code_to_optab[(int) code];
1155 gcc_assert (binop);
1156
1157 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1158 }
1159
1160 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1161 binop. Order them according to commutative_operand_precedence and, if
1162 possible, try to put TARGET or a pseudo first. */
1163 static bool
1164 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1165 {
1166 int op0_prec = commutative_operand_precedence (op0);
1167 int op1_prec = commutative_operand_precedence (op1);
1168
1169 if (op0_prec < op1_prec)
1170 return true;
1171
1172 if (op0_prec > op1_prec)
1173 return false;
1174
1175 /* With equal precedence, both orders are ok, but it is better if the
1176 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1177 if (target == 0 || REG_P (target))
1178 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1179 else
1180 return rtx_equal_p (op1, target);
1181 }
1182
1183
1184 /* Generate code to perform an operation specified by BINOPTAB
1185 on operands OP0 and OP1, with result having machine-mode MODE.
1186
1187 UNSIGNEDP is for the case where we have to widen the operands
1188 to perform the operation. It says to use zero-extension.
1189
1190 If TARGET is nonzero, the value
1191 is generated there, if it is convenient to do so.
1192 In all cases an rtx is returned for the locus of the value;
1193 this may or may not be TARGET. */
1194
1195 rtx
1196 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1197 rtx target, int unsignedp, enum optab_methods methods)
1198 {
1199 enum optab_methods next_methods
1200 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1201 ? OPTAB_WIDEN : methods);
1202 enum mode_class class;
1203 enum machine_mode wider_mode;
1204 rtx temp;
1205 int commutative_op = 0;
1206 int shift_op = (binoptab->code == ASHIFT
1207 || binoptab->code == ASHIFTRT
1208 || binoptab->code == LSHIFTRT
1209 || binoptab->code == ROTATE
1210 || binoptab->code == ROTATERT);
1211 rtx entry_last = get_last_insn ();
1212 rtx last;
1213 bool first_pass_p = true;
1214
1215 class = GET_MODE_CLASS (mode);
1216
1217 /* If subtracting an integer constant, convert this into an addition of
1218 the negated constant. */
1219
1220 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1221 {
1222 op1 = negate_rtx (mode, op1);
1223 binoptab = add_optab;
1224 }
1225
1226 /* If we are inside an appropriately-short loop and we are optimizing,
1227 force expensive constants into a register. */
1228 if (CONSTANT_P (op0) && optimize
1229 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1230 {
1231 if (GET_MODE (op0) != VOIDmode)
1232 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1233 op0 = force_reg (mode, op0);
1234 }
1235
1236 if (CONSTANT_P (op1) && optimize
1237 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1238 {
1239 if (GET_MODE (op1) != VOIDmode)
1240 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1241 op1 = force_reg (mode, op1);
1242 }
1243
1244 /* Record where to delete back to if we backtrack. */
1245 last = get_last_insn ();
1246
1247 /* If operation is commutative,
1248 try to make the first operand a register.
1249 Even better, try to make it the same as the target.
1250 Also try to make the last operand a constant. */
1251 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1252 || binoptab == smul_widen_optab
1253 || binoptab == umul_widen_optab
1254 || binoptab == smul_highpart_optab
1255 || binoptab == umul_highpart_optab)
1256 {
1257 commutative_op = 1;
1258
1259 if (swap_commutative_operands_with_target (target, op0, op1))
1260 {
1261 temp = op1;
1262 op1 = op0;
1263 op0 = temp;
1264 }
1265 }
1266
1267 retry:
1268
1269 /* If we can do it with a three-operand insn, do so. */
1270
1271 if (methods != OPTAB_MUST_WIDEN
1272 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1273 {
1274 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1275 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1276 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1277 rtx pat;
1278 rtx xop0 = op0, xop1 = op1;
1279
1280 if (target)
1281 temp = target;
1282 else
1283 temp = gen_reg_rtx (mode);
1284
1285 /* If it is a commutative operator and the modes would match
1286 if we would swap the operands, we can save the conversions. */
1287 if (commutative_op)
1288 {
1289 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1290 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1291 {
1292 rtx tmp;
1293
1294 tmp = op0; op0 = op1; op1 = tmp;
1295 tmp = xop0; xop0 = xop1; xop1 = tmp;
1296 }
1297 }
1298
1299 /* In case the insn wants input operands in modes different from
1300 those of the actual operands, convert the operands. It would
1301 seem that we don't need to convert CONST_INTs, but we do, so
1302 that they're properly zero-extended, sign-extended or truncated
1303 for their mode. */
1304
1305 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1306 xop0 = convert_modes (mode0,
1307 GET_MODE (op0) != VOIDmode
1308 ? GET_MODE (op0)
1309 : mode,
1310 xop0, unsignedp);
1311
1312 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1313 xop1 = convert_modes (mode1,
1314 GET_MODE (op1) != VOIDmode
1315 ? GET_MODE (op1)
1316 : mode,
1317 xop1, unsignedp);
1318
1319 /* Now, if insn's predicates don't allow our operands, put them into
1320 pseudo regs. */
1321
1322 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1323 && mode0 != VOIDmode)
1324 xop0 = copy_to_mode_reg (mode0, xop0);
1325
1326 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1327 && mode1 != VOIDmode)
1328 xop1 = copy_to_mode_reg (mode1, xop1);
1329
1330 if (!insn_data[icode].operand[0].predicate (temp, mode))
1331 temp = gen_reg_rtx (mode);
1332
1333 pat = GEN_FCN (icode) (temp, xop0, xop1);
1334 if (pat)
1335 {
1336 /* If PAT is composed of more than one insn, try to add an appropriate
1337 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1338 operand, call ourselves again, this time without a target. */
1339 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1340 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1341 {
1342 delete_insns_since (last);
1343 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1344 unsignedp, methods);
1345 }
1346
1347 emit_insn (pat);
1348 return temp;
1349 }
1350 else
1351 delete_insns_since (last);
1352 }
1353
1354 /* If we were trying to rotate by a constant value, and that didn't
1355 work, try rotating the other direction before falling back to
1356 shifts and bitwise-or. */
1357 if (first_pass_p
1358 && (binoptab == rotl_optab || binoptab == rotr_optab)
1359 && class == MODE_INT
1360 && GET_CODE (op1) == CONST_INT
1361 && INTVAL (op1) > 0
1362 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1363 {
1364 first_pass_p = false;
1365 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1366 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1367 goto retry;
1368 }
1369
1370 /* If this is a multiply, see if we can do a widening operation that
1371 takes operands of this mode and makes a wider mode. */
1372
1373 if (binoptab == smul_optab
1374 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1375 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1376 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1377 != CODE_FOR_nothing))
1378 {
1379 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1380 unsignedp ? umul_widen_optab : smul_widen_optab,
1381 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1382
1383 if (temp != 0)
1384 {
1385 if (GET_MODE_CLASS (mode) == MODE_INT
1386 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1387 GET_MODE_BITSIZE (GET_MODE (temp))))
1388 return gen_lowpart (mode, temp);
1389 else
1390 return convert_to_mode (mode, temp, unsignedp);
1391 }
1392 }
1393
1394 /* Look for a wider mode of the same class for which we think we
1395 can open-code the operation. Check for a widening multiply at the
1396 wider mode as well. */
1397
1398 if (CLASS_HAS_WIDER_MODES_P (class)
1399 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1400 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1401 wider_mode != VOIDmode;
1402 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1403 {
1404 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1405 || (binoptab == smul_optab
1406 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1407 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1408 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1409 != CODE_FOR_nothing)))
1410 {
1411 rtx xop0 = op0, xop1 = op1;
1412 int no_extend = 0;
1413
1414 /* For certain integer operations, we need not actually extend
1415 the narrow operands, as long as we will truncate
1416 the results to the same narrowness. */
1417
1418 if ((binoptab == ior_optab || binoptab == and_optab
1419 || binoptab == xor_optab
1420 || binoptab == add_optab || binoptab == sub_optab
1421 || binoptab == smul_optab || binoptab == ashl_optab)
1422 && class == MODE_INT)
1423 no_extend = 1;
1424
1425 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1426
1427 /* The second operand of a shift must always be extended. */
1428 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1429 no_extend && binoptab != ashl_optab);
1430
1431 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1432 unsignedp, OPTAB_DIRECT);
1433 if (temp)
1434 {
1435 if (class != MODE_INT
1436 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1437 GET_MODE_BITSIZE (wider_mode)))
1438 {
1439 if (target == 0)
1440 target = gen_reg_rtx (mode);
1441 convert_move (target, temp, 0);
1442 return target;
1443 }
1444 else
1445 return gen_lowpart (mode, temp);
1446 }
1447 else
1448 delete_insns_since (last);
1449 }
1450 }
1451
1452 /* These can be done a word at a time. */
1453 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1454 && class == MODE_INT
1455 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1456 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1457 {
1458 int i;
1459 rtx insns;
1460 rtx equiv_value;
1461
1462 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1463 won't be accurate, so use a new target. */
1464 if (target == 0 || target == op0 || target == op1)
1465 target = gen_reg_rtx (mode);
1466
1467 start_sequence ();
1468
1469 /* Do the actual arithmetic. */
1470 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1471 {
1472 rtx target_piece = operand_subword (target, i, 1, mode);
1473 rtx x = expand_binop (word_mode, binoptab,
1474 operand_subword_force (op0, i, mode),
1475 operand_subword_force (op1, i, mode),
1476 target_piece, unsignedp, next_methods);
1477
1478 if (x == 0)
1479 break;
1480
1481 if (target_piece != x)
1482 emit_move_insn (target_piece, x);
1483 }
1484
1485 insns = get_insns ();
1486 end_sequence ();
1487
1488 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1489 {
1490 if (binoptab->code != UNKNOWN)
1491 equiv_value
1492 = gen_rtx_fmt_ee (binoptab->code, mode,
1493 copy_rtx (op0), copy_rtx (op1));
1494 else
1495 equiv_value = 0;
1496
1497 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1498 return target;
1499 }
1500 }
1501
1502 /* Synthesize double word shifts from single word shifts. */
1503 if ((binoptab == lshr_optab || binoptab == ashl_optab
1504 || binoptab == ashr_optab)
1505 && class == MODE_INT
1506 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1507 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1508 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1509 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1510 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1511 {
1512 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1513 enum machine_mode op1_mode;
1514
1515 double_shift_mask = targetm.shift_truncation_mask (mode);
1516 shift_mask = targetm.shift_truncation_mask (word_mode);
1517 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1518
1519 /* Apply the truncation to constant shifts. */
1520 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1521 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1522
1523 if (op1 == CONST0_RTX (op1_mode))
1524 return op0;
1525
1526 /* Make sure that this is a combination that expand_doubleword_shift
1527 can handle. See the comments there for details. */
1528 if (double_shift_mask == 0
1529 || (shift_mask == BITS_PER_WORD - 1
1530 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1531 {
1532 rtx insns, equiv_value;
1533 rtx into_target, outof_target;
1534 rtx into_input, outof_input;
1535 int left_shift, outof_word;
1536
1537 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1538 won't be accurate, so use a new target. */
1539 if (target == 0 || target == op0 || target == op1)
1540 target = gen_reg_rtx (mode);
1541
1542 start_sequence ();
1543
1544 /* OUTOF_* is the word we are shifting bits away from, and
1545 INTO_* is the word that we are shifting bits towards, thus
1546 they differ depending on the direction of the shift and
1547 WORDS_BIG_ENDIAN. */
1548
1549 left_shift = binoptab == ashl_optab;
1550 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1551
1552 outof_target = operand_subword (target, outof_word, 1, mode);
1553 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1554
1555 outof_input = operand_subword_force (op0, outof_word, mode);
1556 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1557
1558 if (expand_doubleword_shift (op1_mode, binoptab,
1559 outof_input, into_input, op1,
1560 outof_target, into_target,
1561 unsignedp, methods, shift_mask))
1562 {
1563 insns = get_insns ();
1564 end_sequence ();
1565
1566 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1567 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1568 return target;
1569 }
1570 end_sequence ();
1571 }
1572 }
1573
1574 /* Synthesize double word rotates from single word shifts. */
1575 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1576 && class == MODE_INT
1577 && GET_CODE (op1) == CONST_INT
1578 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1579 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1580 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1581 {
1582 rtx insns;
1583 rtx into_target, outof_target;
1584 rtx into_input, outof_input;
1585 rtx inter;
1586 int shift_count, left_shift, outof_word;
1587
1588 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1589 won't be accurate, so use a new target. Do this also if target is not
1590 a REG, first because having a register instead may open optimization
1591 opportunities, and second because if target and op0 happen to be MEMs
1592 designating the same location, we would risk clobbering it too early
1593 in the code sequence we generate below. */
1594 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1595 target = gen_reg_rtx (mode);
1596
1597 start_sequence ();
1598
1599 shift_count = INTVAL (op1);
1600
1601 /* OUTOF_* is the word we are shifting bits away from, and
1602 INTO_* is the word that we are shifting bits towards, thus
1603 they differ depending on the direction of the shift and
1604 WORDS_BIG_ENDIAN. */
1605
1606 left_shift = (binoptab == rotl_optab);
1607 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1608
1609 outof_target = operand_subword (target, outof_word, 1, mode);
1610 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1611
1612 outof_input = operand_subword_force (op0, outof_word, mode);
1613 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1614
1615 if (shift_count == BITS_PER_WORD)
1616 {
1617 /* This is just a word swap. */
1618 emit_move_insn (outof_target, into_input);
1619 emit_move_insn (into_target, outof_input);
1620 inter = const0_rtx;
1621 }
1622 else
1623 {
1624 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1625 rtx first_shift_count, second_shift_count;
1626 optab reverse_unsigned_shift, unsigned_shift;
1627
1628 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1629 ? lshr_optab : ashl_optab);
1630
1631 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1632 ? ashl_optab : lshr_optab);
1633
1634 if (shift_count > BITS_PER_WORD)
1635 {
1636 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1637 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1638 }
1639 else
1640 {
1641 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1642 second_shift_count = GEN_INT (shift_count);
1643 }
1644
1645 into_temp1 = expand_binop (word_mode, unsigned_shift,
1646 outof_input, first_shift_count,
1647 NULL_RTX, unsignedp, next_methods);
1648 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1649 into_input, second_shift_count,
1650 NULL_RTX, unsignedp, next_methods);
1651
1652 if (into_temp1 != 0 && into_temp2 != 0)
1653 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1654 into_target, unsignedp, next_methods);
1655 else
1656 inter = 0;
1657
1658 if (inter != 0 && inter != into_target)
1659 emit_move_insn (into_target, inter);
1660
1661 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1662 into_input, first_shift_count,
1663 NULL_RTX, unsignedp, next_methods);
1664 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1665 outof_input, second_shift_count,
1666 NULL_RTX, unsignedp, next_methods);
1667
1668 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1669 inter = expand_binop (word_mode, ior_optab,
1670 outof_temp1, outof_temp2,
1671 outof_target, unsignedp, next_methods);
1672
1673 if (inter != 0 && inter != outof_target)
1674 emit_move_insn (outof_target, inter);
1675 }
1676
1677 insns = get_insns ();
1678 end_sequence ();
1679
1680 if (inter != 0)
1681 {
1682 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1683 block to help the register allocator a bit. But a multi-word
1684 rotate will need all the input bits when setting the output
1685 bits, so there clearly is a conflict between the input and
1686 output registers. So we can't use a no-conflict block here. */
1687 emit_insn (insns);
1688 return target;
1689 }
1690 }
1691
1692 /* These can be done a word at a time by propagating carries. */
1693 if ((binoptab == add_optab || binoptab == sub_optab)
1694 && class == MODE_INT
1695 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1696 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1697 {
1698 unsigned int i;
1699 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1700 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1701 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1702 rtx xop0, xop1, xtarget;
1703
1704 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1705 value is one of those, use it. Otherwise, use 1 since it is the
1706 one easiest to get. */
1707 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1708 int normalizep = STORE_FLAG_VALUE;
1709 #else
1710 int normalizep = 1;
1711 #endif
1712
1713 /* Prepare the operands. */
1714 xop0 = force_reg (mode, op0);
1715 xop1 = force_reg (mode, op1);
1716
1717 xtarget = gen_reg_rtx (mode);
1718
1719 if (target == 0 || !REG_P (target))
1720 target = xtarget;
1721
1722 /* Indicate for flow that the entire target reg is being set. */
1723 if (REG_P (target))
1724 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1725
1726 /* Do the actual arithmetic. */
1727 for (i = 0; i < nwords; i++)
1728 {
1729 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1730 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1731 rtx op0_piece = operand_subword_force (xop0, index, mode);
1732 rtx op1_piece = operand_subword_force (xop1, index, mode);
1733 rtx x;
1734
1735 /* Main add/subtract of the input operands. */
1736 x = expand_binop (word_mode, binoptab,
1737 op0_piece, op1_piece,
1738 target_piece, unsignedp, next_methods);
1739 if (x == 0)
1740 break;
1741
1742 if (i + 1 < nwords)
1743 {
1744 /* Store carry from main add/subtract. */
1745 carry_out = gen_reg_rtx (word_mode);
1746 carry_out = emit_store_flag_force (carry_out,
1747 (binoptab == add_optab
1748 ? LT : GT),
1749 x, op0_piece,
1750 word_mode, 1, normalizep);
1751 }
1752
1753 if (i > 0)
1754 {
1755 rtx newx;
1756
1757 /* Add/subtract previous carry to main result. */
1758 newx = expand_binop (word_mode,
1759 normalizep == 1 ? binoptab : otheroptab,
1760 x, carry_in,
1761 NULL_RTX, 1, next_methods);
1762
1763 if (i + 1 < nwords)
1764 {
1765 /* Get out carry from adding/subtracting carry in. */
1766 rtx carry_tmp = gen_reg_rtx (word_mode);
1767 carry_tmp = emit_store_flag_force (carry_tmp,
1768 (binoptab == add_optab
1769 ? LT : GT),
1770 newx, x,
1771 word_mode, 1, normalizep);
1772
1773 /* Logical-ior the two poss. carry together. */
1774 carry_out = expand_binop (word_mode, ior_optab,
1775 carry_out, carry_tmp,
1776 carry_out, 0, next_methods);
1777 if (carry_out == 0)
1778 break;
1779 }
1780 emit_move_insn (target_piece, newx);
1781 }
1782 else
1783 {
1784 if (x != target_piece)
1785 emit_move_insn (target_piece, x);
1786 }
1787
1788 carry_in = carry_out;
1789 }
1790
1791 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1792 {
1793 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1794 || ! rtx_equal_p (target, xtarget))
1795 {
1796 rtx temp = emit_move_insn (target, xtarget);
1797
1798 set_unique_reg_note (temp,
1799 REG_EQUAL,
1800 gen_rtx_fmt_ee (binoptab->code, mode,
1801 copy_rtx (xop0),
1802 copy_rtx (xop1)));
1803 }
1804 else
1805 target = xtarget;
1806
1807 return target;
1808 }
1809
1810 else
1811 delete_insns_since (last);
1812 }
1813
1814 /* Attempt to synthesize double word multiplies using a sequence of word
1815 mode multiplications. We first attempt to generate a sequence using a
1816 more efficient unsigned widening multiply, and if that fails we then
1817 try using a signed widening multiply. */
1818
1819 if (binoptab == smul_optab
1820 && class == MODE_INT
1821 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1822 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1823 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1824 {
1825 rtx product = NULL_RTX;
1826
1827 if (umul_widen_optab->handlers[(int) mode].insn_code
1828 != CODE_FOR_nothing)
1829 {
1830 product = expand_doubleword_mult (mode, op0, op1, target,
1831 true, methods);
1832 if (!product)
1833 delete_insns_since (last);
1834 }
1835
1836 if (product == NULL_RTX
1837 && smul_widen_optab->handlers[(int) mode].insn_code
1838 != CODE_FOR_nothing)
1839 {
1840 product = expand_doubleword_mult (mode, op0, op1, target,
1841 false, methods);
1842 if (!product)
1843 delete_insns_since (last);
1844 }
1845
1846 if (product != NULL_RTX)
1847 {
1848 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1849 {
1850 temp = emit_move_insn (target ? target : product, product);
1851 set_unique_reg_note (temp,
1852 REG_EQUAL,
1853 gen_rtx_fmt_ee (MULT, mode,
1854 copy_rtx (op0),
1855 copy_rtx (op1)));
1856 }
1857 return product;
1858 }
1859 }
1860
1861 /* It can't be open-coded in this mode.
1862 Use a library call if one is available and caller says that's ok. */
1863
1864 if (binoptab->handlers[(int) mode].libfunc
1865 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1866 {
1867 rtx insns;
1868 rtx op1x = op1;
1869 enum machine_mode op1_mode = mode;
1870 rtx value;
1871
1872 start_sequence ();
1873
1874 if (shift_op)
1875 {
1876 op1_mode = word_mode;
1877 /* Specify unsigned here,
1878 since negative shift counts are meaningless. */
1879 op1x = convert_to_mode (word_mode, op1, 1);
1880 }
1881
1882 if (GET_MODE (op0) != VOIDmode
1883 && GET_MODE (op0) != mode)
1884 op0 = convert_to_mode (mode, op0, unsignedp);
1885
1886 /* Pass 1 for NO_QUEUE so we don't lose any increments
1887 if the libcall is cse'd or moved. */
1888 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1889 NULL_RTX, LCT_CONST, mode, 2,
1890 op0, mode, op1x, op1_mode);
1891
1892 insns = get_insns ();
1893 end_sequence ();
1894
1895 target = gen_reg_rtx (mode);
1896 emit_libcall_block (insns, target, value,
1897 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1898
1899 return target;
1900 }
1901
1902 delete_insns_since (last);
1903
1904 /* It can't be done in this mode. Can we do it in a wider mode? */
1905
1906 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1907 || methods == OPTAB_MUST_WIDEN))
1908 {
1909 /* Caller says, don't even try. */
1910 delete_insns_since (entry_last);
1911 return 0;
1912 }
1913
1914 /* Compute the value of METHODS to pass to recursive calls.
1915 Don't allow widening to be tried recursively. */
1916
1917 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1918
1919 /* Look for a wider mode of the same class for which it appears we can do
1920 the operation. */
1921
1922 if (CLASS_HAS_WIDER_MODES_P (class))
1923 {
1924 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1925 wider_mode != VOIDmode;
1926 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1927 {
1928 if ((binoptab->handlers[(int) wider_mode].insn_code
1929 != CODE_FOR_nothing)
1930 || (methods == OPTAB_LIB
1931 && binoptab->handlers[(int) wider_mode].libfunc))
1932 {
1933 rtx xop0 = op0, xop1 = op1;
1934 int no_extend = 0;
1935
1936 /* For certain integer operations, we need not actually extend
1937 the narrow operands, as long as we will truncate
1938 the results to the same narrowness. */
1939
1940 if ((binoptab == ior_optab || binoptab == and_optab
1941 || binoptab == xor_optab
1942 || binoptab == add_optab || binoptab == sub_optab
1943 || binoptab == smul_optab || binoptab == ashl_optab)
1944 && class == MODE_INT)
1945 no_extend = 1;
1946
1947 xop0 = widen_operand (xop0, wider_mode, mode,
1948 unsignedp, no_extend);
1949
1950 /* The second operand of a shift must always be extended. */
1951 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1952 no_extend && binoptab != ashl_optab);
1953
1954 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1955 unsignedp, methods);
1956 if (temp)
1957 {
1958 if (class != MODE_INT
1959 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1960 GET_MODE_BITSIZE (wider_mode)))
1961 {
1962 if (target == 0)
1963 target = gen_reg_rtx (mode);
1964 convert_move (target, temp, 0);
1965 return target;
1966 }
1967 else
1968 return gen_lowpart (mode, temp);
1969 }
1970 else
1971 delete_insns_since (last);
1972 }
1973 }
1974 }
1975
1976 delete_insns_since (entry_last);
1977 return 0;
1978 }
1979 \f
1980 /* Expand a binary operator which has both signed and unsigned forms.
1981 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1982 signed operations.
1983
1984 If we widen unsigned operands, we may use a signed wider operation instead
1985 of an unsigned wider operation, since the result would be the same. */
1986
1987 rtx
1988 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1989 rtx op0, rtx op1, rtx target, int unsignedp,
1990 enum optab_methods methods)
1991 {
1992 rtx temp;
1993 optab direct_optab = unsignedp ? uoptab : soptab;
1994 struct optab wide_soptab;
1995
1996 /* Do it without widening, if possible. */
1997 temp = expand_binop (mode, direct_optab, op0, op1, target,
1998 unsignedp, OPTAB_DIRECT);
1999 if (temp || methods == OPTAB_DIRECT)
2000 return temp;
2001
2002 /* Try widening to a signed int. Make a fake signed optab that
2003 hides any signed insn for direct use. */
2004 wide_soptab = *soptab;
2005 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2006 wide_soptab.handlers[(int) mode].libfunc = 0;
2007
2008 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2009 unsignedp, OPTAB_WIDEN);
2010
2011 /* For unsigned operands, try widening to an unsigned int. */
2012 if (temp == 0 && unsignedp)
2013 temp = expand_binop (mode, uoptab, op0, op1, target,
2014 unsignedp, OPTAB_WIDEN);
2015 if (temp || methods == OPTAB_WIDEN)
2016 return temp;
2017
2018 /* Use the right width lib call if that exists. */
2019 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2020 if (temp || methods == OPTAB_LIB)
2021 return temp;
2022
2023 /* Must widen and use a lib call, use either signed or unsigned. */
2024 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2025 unsignedp, methods);
2026 if (temp != 0)
2027 return temp;
2028 if (unsignedp)
2029 return expand_binop (mode, uoptab, op0, op1, target,
2030 unsignedp, methods);
2031 return 0;
2032 }
2033 \f
2034 /* Generate code to perform an operation specified by UNOPPTAB
2035 on operand OP0, with two results to TARG0 and TARG1.
2036 We assume that the order of the operands for the instruction
2037 is TARG0, TARG1, OP0.
2038
2039 Either TARG0 or TARG1 may be zero, but what that means is that
2040 the result is not actually wanted. We will generate it into
2041 a dummy pseudo-reg and discard it. They may not both be zero.
2042
2043 Returns 1 if this operation can be performed; 0 if not. */
2044
2045 int
2046 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2047 int unsignedp)
2048 {
2049 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2050 enum mode_class class;
2051 enum machine_mode wider_mode;
2052 rtx entry_last = get_last_insn ();
2053 rtx last;
2054
2055 class = GET_MODE_CLASS (mode);
2056
2057 if (!targ0)
2058 targ0 = gen_reg_rtx (mode);
2059 if (!targ1)
2060 targ1 = gen_reg_rtx (mode);
2061
2062 /* Record where to go back to if we fail. */
2063 last = get_last_insn ();
2064
2065 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2066 {
2067 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2068 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2069 rtx pat;
2070 rtx xop0 = op0;
2071
2072 if (GET_MODE (xop0) != VOIDmode
2073 && GET_MODE (xop0) != mode0)
2074 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2075
2076 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2077 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2078 xop0 = copy_to_mode_reg (mode0, xop0);
2079
2080 /* We could handle this, but we should always be called with a pseudo
2081 for our targets and all insns should take them as outputs. */
2082 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2083 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2084
2085 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2086 if (pat)
2087 {
2088 emit_insn (pat);
2089 return 1;
2090 }
2091 else
2092 delete_insns_since (last);
2093 }
2094
2095 /* It can't be done in this mode. Can we do it in a wider mode? */
2096
2097 if (CLASS_HAS_WIDER_MODES_P (class))
2098 {
2099 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2100 wider_mode != VOIDmode;
2101 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2102 {
2103 if (unoptab->handlers[(int) wider_mode].insn_code
2104 != CODE_FOR_nothing)
2105 {
2106 rtx t0 = gen_reg_rtx (wider_mode);
2107 rtx t1 = gen_reg_rtx (wider_mode);
2108 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2109
2110 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2111 {
2112 convert_move (targ0, t0, unsignedp);
2113 convert_move (targ1, t1, unsignedp);
2114 return 1;
2115 }
2116 else
2117 delete_insns_since (last);
2118 }
2119 }
2120 }
2121
2122 delete_insns_since (entry_last);
2123 return 0;
2124 }
2125 \f
2126 /* Generate code to perform an operation specified by BINOPTAB
2127 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2128 We assume that the order of the operands for the instruction
2129 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2130 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2131
2132 Either TARG0 or TARG1 may be zero, but what that means is that
2133 the result is not actually wanted. We will generate it into
2134 a dummy pseudo-reg and discard it. They may not both be zero.
2135
2136 Returns 1 if this operation can be performed; 0 if not. */
2137
2138 int
2139 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2140 int unsignedp)
2141 {
2142 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2143 enum mode_class class;
2144 enum machine_mode wider_mode;
2145 rtx entry_last = get_last_insn ();
2146 rtx last;
2147
2148 class = GET_MODE_CLASS (mode);
2149
2150 /* If we are inside an appropriately-short loop and we are optimizing,
2151 force expensive constants into a register. */
2152 if (CONSTANT_P (op0) && optimize
2153 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2154 op0 = force_reg (mode, op0);
2155
2156 if (CONSTANT_P (op1) && optimize
2157 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2158 op1 = force_reg (mode, op1);
2159
2160 if (!targ0)
2161 targ0 = gen_reg_rtx (mode);
2162 if (!targ1)
2163 targ1 = gen_reg_rtx (mode);
2164
2165 /* Record where to go back to if we fail. */
2166 last = get_last_insn ();
2167
2168 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2169 {
2170 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2171 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2172 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2173 rtx pat;
2174 rtx xop0 = op0, xop1 = op1;
2175
2176 /* In case the insn wants input operands in modes different from
2177 those of the actual operands, convert the operands. It would
2178 seem that we don't need to convert CONST_INTs, but we do, so
2179 that they're properly zero-extended, sign-extended or truncated
2180 for their mode. */
2181
2182 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2183 xop0 = convert_modes (mode0,
2184 GET_MODE (op0) != VOIDmode
2185 ? GET_MODE (op0)
2186 : mode,
2187 xop0, unsignedp);
2188
2189 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2190 xop1 = convert_modes (mode1,
2191 GET_MODE (op1) != VOIDmode
2192 ? GET_MODE (op1)
2193 : mode,
2194 xop1, unsignedp);
2195
2196 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2197 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2198 xop0 = copy_to_mode_reg (mode0, xop0);
2199
2200 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2201 xop1 = copy_to_mode_reg (mode1, xop1);
2202
2203 /* We could handle this, but we should always be called with a pseudo
2204 for our targets and all insns should take them as outputs. */
2205 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2206 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2207
2208 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2209 if (pat)
2210 {
2211 emit_insn (pat);
2212 return 1;
2213 }
2214 else
2215 delete_insns_since (last);
2216 }
2217
2218 /* It can't be done in this mode. Can we do it in a wider mode? */
2219
2220 if (CLASS_HAS_WIDER_MODES_P (class))
2221 {
2222 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2223 wider_mode != VOIDmode;
2224 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2225 {
2226 if (binoptab->handlers[(int) wider_mode].insn_code
2227 != CODE_FOR_nothing)
2228 {
2229 rtx t0 = gen_reg_rtx (wider_mode);
2230 rtx t1 = gen_reg_rtx (wider_mode);
2231 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2232 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2233
2234 if (expand_twoval_binop (binoptab, cop0, cop1,
2235 t0, t1, unsignedp))
2236 {
2237 convert_move (targ0, t0, unsignedp);
2238 convert_move (targ1, t1, unsignedp);
2239 return 1;
2240 }
2241 else
2242 delete_insns_since (last);
2243 }
2244 }
2245 }
2246
2247 delete_insns_since (entry_last);
2248 return 0;
2249 }
2250
2251 /* Expand the two-valued library call indicated by BINOPTAB, but
2252 preserve only one of the values. If TARG0 is non-NULL, the first
2253 value is placed into TARG0; otherwise the second value is placed
2254 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2255 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2256 This routine assumes that the value returned by the library call is
2257 as if the return value was of an integral mode twice as wide as the
2258 mode of OP0. Returns 1 if the call was successful. */
2259
2260 bool
2261 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2262 rtx targ0, rtx targ1, enum rtx_code code)
2263 {
2264 enum machine_mode mode;
2265 enum machine_mode libval_mode;
2266 rtx libval;
2267 rtx insns;
2268
2269 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2270 gcc_assert (!targ0 != !targ1);
2271
2272 mode = GET_MODE (op0);
2273 if (!binoptab->handlers[(int) mode].libfunc)
2274 return false;
2275
2276 /* The value returned by the library function will have twice as
2277 many bits as the nominal MODE. */
2278 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2279 MODE_INT);
2280 start_sequence ();
2281 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2282 NULL_RTX, LCT_CONST,
2283 libval_mode, 2,
2284 op0, mode,
2285 op1, mode);
2286 /* Get the part of VAL containing the value that we want. */
2287 libval = simplify_gen_subreg (mode, libval, libval_mode,
2288 targ0 ? 0 : GET_MODE_SIZE (mode));
2289 insns = get_insns ();
2290 end_sequence ();
2291 /* Move the into the desired location. */
2292 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2293 gen_rtx_fmt_ee (code, mode, op0, op1));
2294
2295 return true;
2296 }
2297
2298 \f
2299 /* Wrapper around expand_unop which takes an rtx code to specify
2300 the operation to perform, not an optab pointer. All other
2301 arguments are the same. */
2302 rtx
2303 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2304 rtx target, int unsignedp)
2305 {
2306 optab unop = code_to_optab[(int) code];
2307 gcc_assert (unop);
2308
2309 return expand_unop (mode, unop, op0, target, unsignedp);
2310 }
2311
2312 /* Try calculating
2313 (clz:narrow x)
2314 as
2315 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2316 static rtx
2317 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2318 {
2319 enum mode_class class = GET_MODE_CLASS (mode);
2320 if (CLASS_HAS_WIDER_MODES_P (class))
2321 {
2322 enum machine_mode wider_mode;
2323 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2324 wider_mode != VOIDmode;
2325 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2326 {
2327 if (clz_optab->handlers[(int) wider_mode].insn_code
2328 != CODE_FOR_nothing)
2329 {
2330 rtx xop0, temp, last;
2331
2332 last = get_last_insn ();
2333
2334 if (target == 0)
2335 target = gen_reg_rtx (mode);
2336 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2337 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2338 if (temp != 0)
2339 temp = expand_binop (wider_mode, sub_optab, temp,
2340 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2341 - GET_MODE_BITSIZE (mode)),
2342 target, true, OPTAB_DIRECT);
2343 if (temp == 0)
2344 delete_insns_since (last);
2345
2346 return temp;
2347 }
2348 }
2349 }
2350 return 0;
2351 }
2352
2353 /* Try calculating (parity x) as (and (popcount x) 1), where
2354 popcount can also be done in a wider mode. */
2355 static rtx
2356 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2357 {
2358 enum mode_class class = GET_MODE_CLASS (mode);
2359 if (CLASS_HAS_WIDER_MODES_P (class))
2360 {
2361 enum machine_mode wider_mode;
2362 for (wider_mode = mode; wider_mode != VOIDmode;
2363 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2364 {
2365 if (popcount_optab->handlers[(int) wider_mode].insn_code
2366 != CODE_FOR_nothing)
2367 {
2368 rtx xop0, temp, last;
2369
2370 last = get_last_insn ();
2371
2372 if (target == 0)
2373 target = gen_reg_rtx (mode);
2374 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2375 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2376 true);
2377 if (temp != 0)
2378 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2379 target, true, OPTAB_DIRECT);
2380 if (temp == 0)
2381 delete_insns_since (last);
2382
2383 return temp;
2384 }
2385 }
2386 }
2387 return 0;
2388 }
2389
2390 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2391 conditions, VAL may already be a SUBREG against which we cannot generate
2392 a further SUBREG. In this case, we expect forcing the value into a
2393 register will work around the situation. */
2394
2395 static rtx
2396 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2397 enum machine_mode imode)
2398 {
2399 rtx ret;
2400 ret = lowpart_subreg (omode, val, imode);
2401 if (ret == NULL)
2402 {
2403 val = force_reg (imode, val);
2404 ret = lowpart_subreg (omode, val, imode);
2405 gcc_assert (ret != NULL);
2406 }
2407 return ret;
2408 }
2409
2410 /* Expand a floating point absolute value or negation operation via a
2411 logical operation on the sign bit. */
2412
2413 static rtx
2414 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2415 rtx op0, rtx target)
2416 {
2417 const struct real_format *fmt;
2418 int bitpos, word, nwords, i;
2419 enum machine_mode imode;
2420 HOST_WIDE_INT hi, lo;
2421 rtx temp, insns;
2422
2423 /* The format has to have a simple sign bit. */
2424 fmt = REAL_MODE_FORMAT (mode);
2425 if (fmt == NULL)
2426 return NULL_RTX;
2427
2428 bitpos = fmt->signbit_rw;
2429 if (bitpos < 0)
2430 return NULL_RTX;
2431
2432 /* Don't create negative zeros if the format doesn't support them. */
2433 if (code == NEG && !fmt->has_signed_zero)
2434 return NULL_RTX;
2435
2436 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2437 {
2438 imode = int_mode_for_mode (mode);
2439 if (imode == BLKmode)
2440 return NULL_RTX;
2441 word = 0;
2442 nwords = 1;
2443 }
2444 else
2445 {
2446 imode = word_mode;
2447
2448 if (FLOAT_WORDS_BIG_ENDIAN)
2449 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2450 else
2451 word = bitpos / BITS_PER_WORD;
2452 bitpos = bitpos % BITS_PER_WORD;
2453 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2454 }
2455
2456 if (bitpos < HOST_BITS_PER_WIDE_INT)
2457 {
2458 hi = 0;
2459 lo = (HOST_WIDE_INT) 1 << bitpos;
2460 }
2461 else
2462 {
2463 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2464 lo = 0;
2465 }
2466 if (code == ABS)
2467 lo = ~lo, hi = ~hi;
2468
2469 if (target == 0 || target == op0)
2470 target = gen_reg_rtx (mode);
2471
2472 if (nwords > 1)
2473 {
2474 start_sequence ();
2475
2476 for (i = 0; i < nwords; ++i)
2477 {
2478 rtx targ_piece = operand_subword (target, i, 1, mode);
2479 rtx op0_piece = operand_subword_force (op0, i, mode);
2480
2481 if (i == word)
2482 {
2483 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2484 op0_piece,
2485 immed_double_const (lo, hi, imode),
2486 targ_piece, 1, OPTAB_LIB_WIDEN);
2487 if (temp != targ_piece)
2488 emit_move_insn (targ_piece, temp);
2489 }
2490 else
2491 emit_move_insn (targ_piece, op0_piece);
2492 }
2493
2494 insns = get_insns ();
2495 end_sequence ();
2496
2497 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2498 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2499 }
2500 else
2501 {
2502 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2503 gen_lowpart (imode, op0),
2504 immed_double_const (lo, hi, imode),
2505 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2506 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2507
2508 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2509 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2510 }
2511
2512 return target;
2513 }
2514
2515 /* Generate code to perform an operation specified by UNOPTAB
2516 on operand OP0, with result having machine-mode MODE.
2517
2518 UNSIGNEDP is for the case where we have to widen the operands
2519 to perform the operation. It says to use zero-extension.
2520
2521 If TARGET is nonzero, the value
2522 is generated there, if it is convenient to do so.
2523 In all cases an rtx is returned for the locus of the value;
2524 this may or may not be TARGET. */
2525
2526 rtx
2527 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2528 int unsignedp)
2529 {
2530 enum mode_class class;
2531 enum machine_mode wider_mode;
2532 rtx temp;
2533 rtx last = get_last_insn ();
2534 rtx pat;
2535
2536 class = GET_MODE_CLASS (mode);
2537
2538 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2539 {
2540 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2541 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2542 rtx xop0 = op0;
2543
2544 if (target)
2545 temp = target;
2546 else
2547 temp = gen_reg_rtx (mode);
2548
2549 if (GET_MODE (xop0) != VOIDmode
2550 && GET_MODE (xop0) != mode0)
2551 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2552
2553 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2554
2555 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2556 xop0 = copy_to_mode_reg (mode0, xop0);
2557
2558 if (!insn_data[icode].operand[0].predicate (temp, mode))
2559 temp = gen_reg_rtx (mode);
2560
2561 pat = GEN_FCN (icode) (temp, xop0);
2562 if (pat)
2563 {
2564 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2565 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2566 {
2567 delete_insns_since (last);
2568 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2569 }
2570
2571 emit_insn (pat);
2572
2573 return temp;
2574 }
2575 else
2576 delete_insns_since (last);
2577 }
2578
2579 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2580
2581 /* Widening clz needs special treatment. */
2582 if (unoptab == clz_optab)
2583 {
2584 temp = widen_clz (mode, op0, target);
2585 if (temp)
2586 return temp;
2587 else
2588 goto try_libcall;
2589 }
2590
2591 if (CLASS_HAS_WIDER_MODES_P (class))
2592 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2593 wider_mode != VOIDmode;
2594 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2595 {
2596 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2597 {
2598 rtx xop0 = op0;
2599
2600 /* For certain operations, we need not actually extend
2601 the narrow operand, as long as we will truncate the
2602 results to the same narrowness. */
2603
2604 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2605 (unoptab == neg_optab
2606 || unoptab == one_cmpl_optab)
2607 && class == MODE_INT);
2608
2609 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2610 unsignedp);
2611
2612 if (temp)
2613 {
2614 if (class != MODE_INT
2615 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2616 GET_MODE_BITSIZE (wider_mode)))
2617 {
2618 if (target == 0)
2619 target = gen_reg_rtx (mode);
2620 convert_move (target, temp, 0);
2621 return target;
2622 }
2623 else
2624 return gen_lowpart (mode, temp);
2625 }
2626 else
2627 delete_insns_since (last);
2628 }
2629 }
2630
2631 /* These can be done a word at a time. */
2632 if (unoptab == one_cmpl_optab
2633 && class == MODE_INT
2634 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2635 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2636 {
2637 int i;
2638 rtx insns;
2639
2640 if (target == 0 || target == op0)
2641 target = gen_reg_rtx (mode);
2642
2643 start_sequence ();
2644
2645 /* Do the actual arithmetic. */
2646 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2647 {
2648 rtx target_piece = operand_subword (target, i, 1, mode);
2649 rtx x = expand_unop (word_mode, unoptab,
2650 operand_subword_force (op0, i, mode),
2651 target_piece, unsignedp);
2652
2653 if (target_piece != x)
2654 emit_move_insn (target_piece, x);
2655 }
2656
2657 insns = get_insns ();
2658 end_sequence ();
2659
2660 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2661 gen_rtx_fmt_e (unoptab->code, mode,
2662 copy_rtx (op0)));
2663 return target;
2664 }
2665
2666 if (unoptab->code == NEG)
2667 {
2668 /* Try negating floating point values by flipping the sign bit. */
2669 if (SCALAR_FLOAT_MODE_P (mode))
2670 {
2671 temp = expand_absneg_bit (NEG, mode, op0, target);
2672 if (temp)
2673 return temp;
2674 }
2675
2676 /* If there is no negation pattern, and we have no negative zero,
2677 try subtracting from zero. */
2678 if (!HONOR_SIGNED_ZEROS (mode))
2679 {
2680 temp = expand_binop (mode, (unoptab == negv_optab
2681 ? subv_optab : sub_optab),
2682 CONST0_RTX (mode), op0, target,
2683 unsignedp, OPTAB_DIRECT);
2684 if (temp)
2685 return temp;
2686 }
2687 }
2688
2689 /* Try calculating parity (x) as popcount (x) % 2. */
2690 if (unoptab == parity_optab)
2691 {
2692 temp = expand_parity (mode, op0, target);
2693 if (temp)
2694 return temp;
2695 }
2696
2697 try_libcall:
2698 /* Now try a library call in this mode. */
2699 if (unoptab->handlers[(int) mode].libfunc)
2700 {
2701 rtx insns;
2702 rtx value;
2703 enum machine_mode outmode = mode;
2704
2705 /* All of these functions return small values. Thus we choose to
2706 have them return something that isn't a double-word. */
2707 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2708 || unoptab == popcount_optab || unoptab == parity_optab)
2709 outmode
2710 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2711
2712 start_sequence ();
2713
2714 /* Pass 1 for NO_QUEUE so we don't lose any increments
2715 if the libcall is cse'd or moved. */
2716 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2717 NULL_RTX, LCT_CONST, outmode,
2718 1, op0, mode);
2719 insns = get_insns ();
2720 end_sequence ();
2721
2722 target = gen_reg_rtx (outmode);
2723 emit_libcall_block (insns, target, value,
2724 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2725
2726 return target;
2727 }
2728
2729 /* It can't be done in this mode. Can we do it in a wider mode? */
2730
2731 if (CLASS_HAS_WIDER_MODES_P (class))
2732 {
2733 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2734 wider_mode != VOIDmode;
2735 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2736 {
2737 if ((unoptab->handlers[(int) wider_mode].insn_code
2738 != CODE_FOR_nothing)
2739 || unoptab->handlers[(int) wider_mode].libfunc)
2740 {
2741 rtx xop0 = op0;
2742
2743 /* For certain operations, we need not actually extend
2744 the narrow operand, as long as we will truncate the
2745 results to the same narrowness. */
2746
2747 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2748 (unoptab == neg_optab
2749 || unoptab == one_cmpl_optab)
2750 && class == MODE_INT);
2751
2752 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2753 unsignedp);
2754
2755 /* If we are generating clz using wider mode, adjust the
2756 result. */
2757 if (unoptab == clz_optab && temp != 0)
2758 temp = expand_binop (wider_mode, sub_optab, temp,
2759 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2760 - GET_MODE_BITSIZE (mode)),
2761 target, true, OPTAB_DIRECT);
2762
2763 if (temp)
2764 {
2765 if (class != MODE_INT)
2766 {
2767 if (target == 0)
2768 target = gen_reg_rtx (mode);
2769 convert_move (target, temp, 0);
2770 return target;
2771 }
2772 else
2773 return gen_lowpart (mode, temp);
2774 }
2775 else
2776 delete_insns_since (last);
2777 }
2778 }
2779 }
2780
2781 /* One final attempt at implementing negation via subtraction,
2782 this time allowing widening of the operand. */
2783 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2784 {
2785 rtx temp;
2786 temp = expand_binop (mode,
2787 unoptab == negv_optab ? subv_optab : sub_optab,
2788 CONST0_RTX (mode), op0,
2789 target, unsignedp, OPTAB_LIB_WIDEN);
2790 if (temp)
2791 return temp;
2792 }
2793
2794 return 0;
2795 }
2796 \f
2797 /* Emit code to compute the absolute value of OP0, with result to
2798 TARGET if convenient. (TARGET may be 0.) The return value says
2799 where the result actually is to be found.
2800
2801 MODE is the mode of the operand; the mode of the result is
2802 different but can be deduced from MODE.
2803
2804 */
2805
2806 rtx
2807 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2808 int result_unsignedp)
2809 {
2810 rtx temp;
2811
2812 if (! flag_trapv)
2813 result_unsignedp = 1;
2814
2815 /* First try to do it with a special abs instruction. */
2816 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2817 op0, target, 0);
2818 if (temp != 0)
2819 return temp;
2820
2821 /* For floating point modes, try clearing the sign bit. */
2822 if (SCALAR_FLOAT_MODE_P (mode))
2823 {
2824 temp = expand_absneg_bit (ABS, mode, op0, target);
2825 if (temp)
2826 return temp;
2827 }
2828
2829 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2830 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2831 && !HONOR_SIGNED_ZEROS (mode))
2832 {
2833 rtx last = get_last_insn ();
2834
2835 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2836 if (temp != 0)
2837 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2838 OPTAB_WIDEN);
2839
2840 if (temp != 0)
2841 return temp;
2842
2843 delete_insns_since (last);
2844 }
2845
2846 /* If this machine has expensive jumps, we can do integer absolute
2847 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2848 where W is the width of MODE. */
2849
2850 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2851 {
2852 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2853 size_int (GET_MODE_BITSIZE (mode) - 1),
2854 NULL_RTX, 0);
2855
2856 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2857 OPTAB_LIB_WIDEN);
2858 if (temp != 0)
2859 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2860 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2861
2862 if (temp != 0)
2863 return temp;
2864 }
2865
2866 return NULL_RTX;
2867 }
2868
2869 rtx
2870 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2871 int result_unsignedp, int safe)
2872 {
2873 rtx temp, op1;
2874
2875 if (! flag_trapv)
2876 result_unsignedp = 1;
2877
2878 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2879 if (temp != 0)
2880 return temp;
2881
2882 /* If that does not win, use conditional jump and negate. */
2883
2884 /* It is safe to use the target if it is the same
2885 as the source if this is also a pseudo register */
2886 if (op0 == target && REG_P (op0)
2887 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2888 safe = 1;
2889
2890 op1 = gen_label_rtx ();
2891 if (target == 0 || ! safe
2892 || GET_MODE (target) != mode
2893 || (MEM_P (target) && MEM_VOLATILE_P (target))
2894 || (REG_P (target)
2895 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2896 target = gen_reg_rtx (mode);
2897
2898 emit_move_insn (target, op0);
2899 NO_DEFER_POP;
2900
2901 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2902 NULL_RTX, NULL_RTX, op1);
2903
2904 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2905 target, target, 0);
2906 if (op0 != target)
2907 emit_move_insn (target, op0);
2908 emit_label (op1);
2909 OK_DEFER_POP;
2910 return target;
2911 }
2912
2913 /* A subroutine of expand_copysign, perform the copysign operation using the
2914 abs and neg primitives advertised to exist on the target. The assumption
2915 is that we have a split register file, and leaving op0 in fp registers,
2916 and not playing with subregs so much, will help the register allocator. */
2917
2918 static rtx
2919 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2920 int bitpos, bool op0_is_abs)
2921 {
2922 enum machine_mode imode;
2923 HOST_WIDE_INT hi, lo;
2924 int word;
2925 rtx label;
2926
2927 if (target == op1)
2928 target = NULL_RTX;
2929
2930 if (!op0_is_abs)
2931 {
2932 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2933 if (op0 == NULL)
2934 return NULL_RTX;
2935 target = op0;
2936 }
2937 else
2938 {
2939 if (target == NULL_RTX)
2940 target = copy_to_reg (op0);
2941 else
2942 emit_move_insn (target, op0);
2943 }
2944
2945 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2946 {
2947 imode = int_mode_for_mode (mode);
2948 if (imode == BLKmode)
2949 return NULL_RTX;
2950 op1 = gen_lowpart (imode, op1);
2951 }
2952 else
2953 {
2954 imode = word_mode;
2955 if (FLOAT_WORDS_BIG_ENDIAN)
2956 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2957 else
2958 word = bitpos / BITS_PER_WORD;
2959 bitpos = bitpos % BITS_PER_WORD;
2960 op1 = operand_subword_force (op1, word, mode);
2961 }
2962
2963 if (bitpos < HOST_BITS_PER_WIDE_INT)
2964 {
2965 hi = 0;
2966 lo = (HOST_WIDE_INT) 1 << bitpos;
2967 }
2968 else
2969 {
2970 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2971 lo = 0;
2972 }
2973
2974 op1 = expand_binop (imode, and_optab, op1,
2975 immed_double_const (lo, hi, imode),
2976 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2977
2978 label = gen_label_rtx ();
2979 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2980
2981 if (GET_CODE (op0) == CONST_DOUBLE)
2982 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2983 else
2984 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2985 if (op0 != target)
2986 emit_move_insn (target, op0);
2987
2988 emit_label (label);
2989
2990 return target;
2991 }
2992
2993
2994 /* A subroutine of expand_copysign, perform the entire copysign operation
2995 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2996 is true if op0 is known to have its sign bit clear. */
2997
2998 static rtx
2999 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3000 int bitpos, bool op0_is_abs)
3001 {
3002 enum machine_mode imode;
3003 HOST_WIDE_INT hi, lo;
3004 int word, nwords, i;
3005 rtx temp, insns;
3006
3007 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3008 {
3009 imode = int_mode_for_mode (mode);
3010 if (imode == BLKmode)
3011 return NULL_RTX;
3012 word = 0;
3013 nwords = 1;
3014 }
3015 else
3016 {
3017 imode = word_mode;
3018
3019 if (FLOAT_WORDS_BIG_ENDIAN)
3020 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3021 else
3022 word = bitpos / BITS_PER_WORD;
3023 bitpos = bitpos % BITS_PER_WORD;
3024 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3025 }
3026
3027 if (bitpos < HOST_BITS_PER_WIDE_INT)
3028 {
3029 hi = 0;
3030 lo = (HOST_WIDE_INT) 1 << bitpos;
3031 }
3032 else
3033 {
3034 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3035 lo = 0;
3036 }
3037
3038 if (target == 0 || target == op0 || target == op1)
3039 target = gen_reg_rtx (mode);
3040
3041 if (nwords > 1)
3042 {
3043 start_sequence ();
3044
3045 for (i = 0; i < nwords; ++i)
3046 {
3047 rtx targ_piece = operand_subword (target, i, 1, mode);
3048 rtx op0_piece = operand_subword_force (op0, i, mode);
3049
3050 if (i == word)
3051 {
3052 if (!op0_is_abs)
3053 op0_piece = expand_binop (imode, and_optab, op0_piece,
3054 immed_double_const (~lo, ~hi, imode),
3055 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3056
3057 op1 = expand_binop (imode, and_optab,
3058 operand_subword_force (op1, i, mode),
3059 immed_double_const (lo, hi, imode),
3060 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3061
3062 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3063 targ_piece, 1, OPTAB_LIB_WIDEN);
3064 if (temp != targ_piece)
3065 emit_move_insn (targ_piece, temp);
3066 }
3067 else
3068 emit_move_insn (targ_piece, op0_piece);
3069 }
3070
3071 insns = get_insns ();
3072 end_sequence ();
3073
3074 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3075 }
3076 else
3077 {
3078 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3079 immed_double_const (lo, hi, imode),
3080 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3081
3082 op0 = gen_lowpart (imode, op0);
3083 if (!op0_is_abs)
3084 op0 = expand_binop (imode, and_optab, op0,
3085 immed_double_const (~lo, ~hi, imode),
3086 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3087
3088 temp = expand_binop (imode, ior_optab, op0, op1,
3089 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3090 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3091 }
3092
3093 return target;
3094 }
3095
3096 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3097 scalar floating point mode. Return NULL if we do not know how to
3098 expand the operation inline. */
3099
3100 rtx
3101 expand_copysign (rtx op0, rtx op1, rtx target)
3102 {
3103 enum machine_mode mode = GET_MODE (op0);
3104 const struct real_format *fmt;
3105 bool op0_is_abs;
3106 rtx temp;
3107
3108 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3109 gcc_assert (GET_MODE (op1) == mode);
3110
3111 /* First try to do it with a special instruction. */
3112 temp = expand_binop (mode, copysign_optab, op0, op1,
3113 target, 0, OPTAB_DIRECT);
3114 if (temp)
3115 return temp;
3116
3117 fmt = REAL_MODE_FORMAT (mode);
3118 if (fmt == NULL || !fmt->has_signed_zero)
3119 return NULL_RTX;
3120
3121 op0_is_abs = false;
3122 if (GET_CODE (op0) == CONST_DOUBLE)
3123 {
3124 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3125 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3126 op0_is_abs = true;
3127 }
3128
3129 if (fmt->signbit_ro >= 0
3130 && (GET_CODE (op0) == CONST_DOUBLE
3131 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3132 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3133 {
3134 temp = expand_copysign_absneg (mode, op0, op1, target,
3135 fmt->signbit_ro, op0_is_abs);
3136 if (temp)
3137 return temp;
3138 }
3139
3140 if (fmt->signbit_rw < 0)
3141 return NULL_RTX;
3142 return expand_copysign_bit (mode, op0, op1, target,
3143 fmt->signbit_rw, op0_is_abs);
3144 }
3145 \f
3146 /* Generate an instruction whose insn-code is INSN_CODE,
3147 with two operands: an output TARGET and an input OP0.
3148 TARGET *must* be nonzero, and the output is always stored there.
3149 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3150 the value that is stored into TARGET. */
3151
3152 void
3153 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3154 {
3155 rtx temp;
3156 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3157 rtx pat;
3158
3159 temp = target;
3160
3161 /* Now, if insn does not accept our operands, put them into pseudos. */
3162
3163 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3164 op0 = copy_to_mode_reg (mode0, op0);
3165
3166 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3167 temp = gen_reg_rtx (GET_MODE (temp));
3168
3169 pat = GEN_FCN (icode) (temp, op0);
3170
3171 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3172 add_equal_note (pat, temp, code, op0, NULL_RTX);
3173
3174 emit_insn (pat);
3175
3176 if (temp != target)
3177 emit_move_insn (target, temp);
3178 }
3179 \f
3180 struct no_conflict_data
3181 {
3182 rtx target, first, insn;
3183 bool must_stay;
3184 };
3185
3186 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3187 Set P->must_stay if the currently examined clobber / store has to stay
3188 in the list of insns that constitute the actual no_conflict block /
3189 libcall block. */
3190 static void
3191 no_conflict_move_test (rtx dest, rtx set, void *p0)
3192 {
3193 struct no_conflict_data *p= p0;
3194
3195 /* If this inns directly contributes to setting the target, it must stay. */
3196 if (reg_overlap_mentioned_p (p->target, dest))
3197 p->must_stay = true;
3198 /* If we haven't committed to keeping any other insns in the list yet,
3199 there is nothing more to check. */
3200 else if (p->insn == p->first)
3201 return;
3202 /* If this insn sets / clobbers a register that feeds one of the insns
3203 already in the list, this insn has to stay too. */
3204 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3205 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3206 || reg_used_between_p (dest, p->first, p->insn)
3207 /* Likewise if this insn depends on a register set by a previous
3208 insn in the list, or if it sets a result (presumably a hard
3209 register) that is set or clobbered by a previous insn.
3210 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3211 SET_DEST perform the former check on the address, and the latter
3212 check on the MEM. */
3213 || (GET_CODE (set) == SET
3214 && (modified_in_p (SET_SRC (set), p->first)
3215 || modified_in_p (SET_DEST (set), p->first)
3216 || modified_between_p (SET_SRC (set), p->first, p->insn)
3217 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3218 p->must_stay = true;
3219 }
3220
3221 /* Emit code to perform a series of operations on a multi-word quantity, one
3222 word at a time.
3223
3224 Such a block is preceded by a CLOBBER of the output, consists of multiple
3225 insns, each setting one word of the output, and followed by a SET copying
3226 the output to itself.
3227
3228 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3229 note indicating that it doesn't conflict with the (also multi-word)
3230 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3231 notes.
3232
3233 INSNS is a block of code generated to perform the operation, not including
3234 the CLOBBER and final copy. All insns that compute intermediate values
3235 are first emitted, followed by the block as described above.
3236
3237 TARGET, OP0, and OP1 are the output and inputs of the operations,
3238 respectively. OP1 may be zero for a unary operation.
3239
3240 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3241 on the last insn.
3242
3243 If TARGET is not a register, INSNS is simply emitted with no special
3244 processing. Likewise if anything in INSNS is not an INSN or if
3245 there is a libcall block inside INSNS.
3246
3247 The final insn emitted is returned. */
3248
3249 rtx
3250 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3251 {
3252 rtx prev, next, first, last, insn;
3253
3254 if (!REG_P (target) || reload_in_progress)
3255 return emit_insn (insns);
3256 else
3257 for (insn = insns; insn; insn = NEXT_INSN (insn))
3258 if (!NONJUMP_INSN_P (insn)
3259 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3260 return emit_insn (insns);
3261
3262 /* First emit all insns that do not store into words of the output and remove
3263 these from the list. */
3264 for (insn = insns; insn; insn = next)
3265 {
3266 rtx note;
3267 struct no_conflict_data data;
3268
3269 next = NEXT_INSN (insn);
3270
3271 /* Some ports (cris) create a libcall regions at their own. We must
3272 avoid any potential nesting of LIBCALLs. */
3273 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3274 remove_note (insn, note);
3275 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3276 remove_note (insn, note);
3277
3278 data.target = target;
3279 data.first = insns;
3280 data.insn = insn;
3281 data.must_stay = 0;
3282 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3283 if (! data.must_stay)
3284 {
3285 if (PREV_INSN (insn))
3286 NEXT_INSN (PREV_INSN (insn)) = next;
3287 else
3288 insns = next;
3289
3290 if (next)
3291 PREV_INSN (next) = PREV_INSN (insn);
3292
3293 add_insn (insn);
3294 }
3295 }
3296
3297 prev = get_last_insn ();
3298
3299 /* Now write the CLOBBER of the output, followed by the setting of each
3300 of the words, followed by the final copy. */
3301 if (target != op0 && target != op1)
3302 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3303
3304 for (insn = insns; insn; insn = next)
3305 {
3306 next = NEXT_INSN (insn);
3307 add_insn (insn);
3308
3309 if (op1 && REG_P (op1))
3310 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3311 REG_NOTES (insn));
3312
3313 if (op0 && REG_P (op0))
3314 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3315 REG_NOTES (insn));
3316 }
3317
3318 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3319 != CODE_FOR_nothing)
3320 {
3321 last = emit_move_insn (target, target);
3322 if (equiv)
3323 set_unique_reg_note (last, REG_EQUAL, equiv);
3324 }
3325 else
3326 {
3327 last = get_last_insn ();
3328
3329 /* Remove any existing REG_EQUAL note from "last", or else it will
3330 be mistaken for a note referring to the full contents of the
3331 alleged libcall value when found together with the REG_RETVAL
3332 note added below. An existing note can come from an insn
3333 expansion at "last". */
3334 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3335 }
3336
3337 if (prev == 0)
3338 first = get_insns ();
3339 else
3340 first = NEXT_INSN (prev);
3341
3342 /* Encapsulate the block so it gets manipulated as a unit. */
3343 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3344 REG_NOTES (first));
3345 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3346
3347 return last;
3348 }
3349 \f
3350 /* Emit code to make a call to a constant function or a library call.
3351
3352 INSNS is a list containing all insns emitted in the call.
3353 These insns leave the result in RESULT. Our block is to copy RESULT
3354 to TARGET, which is logically equivalent to EQUIV.
3355
3356 We first emit any insns that set a pseudo on the assumption that these are
3357 loading constants into registers; doing so allows them to be safely cse'ed
3358 between blocks. Then we emit all the other insns in the block, followed by
3359 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3360 note with an operand of EQUIV.
3361
3362 Moving assignments to pseudos outside of the block is done to improve
3363 the generated code, but is not required to generate correct code,
3364 hence being unable to move an assignment is not grounds for not making
3365 a libcall block. There are two reasons why it is safe to leave these
3366 insns inside the block: First, we know that these pseudos cannot be
3367 used in generated RTL outside the block since they are created for
3368 temporary purposes within the block. Second, CSE will not record the
3369 values of anything set inside a libcall block, so we know they must
3370 be dead at the end of the block.
3371
3372 Except for the first group of insns (the ones setting pseudos), the
3373 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3374
3375 void
3376 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3377 {
3378 rtx final_dest = target;
3379 rtx prev, next, first, last, insn;
3380
3381 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3382 into a MEM later. Protect the libcall block from this change. */
3383 if (! REG_P (target) || REG_USERVAR_P (target))
3384 target = gen_reg_rtx (GET_MODE (target));
3385
3386 /* If we're using non-call exceptions, a libcall corresponding to an
3387 operation that may trap may also trap. */
3388 if (flag_non_call_exceptions && may_trap_p (equiv))
3389 {
3390 for (insn = insns; insn; insn = NEXT_INSN (insn))
3391 if (CALL_P (insn))
3392 {
3393 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3394
3395 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3396 remove_note (insn, note);
3397 }
3398 }
3399 else
3400 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3401 reg note to indicate that this call cannot throw or execute a nonlocal
3402 goto (unless there is already a REG_EH_REGION note, in which case
3403 we update it). */
3404 for (insn = insns; insn; insn = NEXT_INSN (insn))
3405 if (CALL_P (insn))
3406 {
3407 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3408
3409 if (note != 0)
3410 XEXP (note, 0) = constm1_rtx;
3411 else
3412 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3413 REG_NOTES (insn));
3414 }
3415
3416 /* First emit all insns that set pseudos. Remove them from the list as
3417 we go. Avoid insns that set pseudos which were referenced in previous
3418 insns. These can be generated by move_by_pieces, for example,
3419 to update an address. Similarly, avoid insns that reference things
3420 set in previous insns. */
3421
3422 for (insn = insns; insn; insn = next)
3423 {
3424 rtx set = single_set (insn);
3425 rtx note;
3426
3427 /* Some ports (cris) create a libcall regions at their own. We must
3428 avoid any potential nesting of LIBCALLs. */
3429 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3430 remove_note (insn, note);
3431 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3432 remove_note (insn, note);
3433
3434 next = NEXT_INSN (insn);
3435
3436 if (set != 0 && REG_P (SET_DEST (set))
3437 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3438 {
3439 struct no_conflict_data data;
3440
3441 data.target = const0_rtx;
3442 data.first = insns;
3443 data.insn = insn;
3444 data.must_stay = 0;
3445 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3446 if (! data.must_stay)
3447 {
3448 if (PREV_INSN (insn))
3449 NEXT_INSN (PREV_INSN (insn)) = next;
3450 else
3451 insns = next;
3452
3453 if (next)
3454 PREV_INSN (next) = PREV_INSN (insn);
3455
3456 add_insn (insn);
3457 }
3458 }
3459
3460 /* Some ports use a loop to copy large arguments onto the stack.
3461 Don't move anything outside such a loop. */
3462 if (LABEL_P (insn))
3463 break;
3464 }
3465
3466 prev = get_last_insn ();
3467
3468 /* Write the remaining insns followed by the final copy. */
3469
3470 for (insn = insns; insn; insn = next)
3471 {
3472 next = NEXT_INSN (insn);
3473
3474 add_insn (insn);
3475 }
3476
3477 last = emit_move_insn (target, result);
3478 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3479 != CODE_FOR_nothing)
3480 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3481 else
3482 {
3483 /* Remove any existing REG_EQUAL note from "last", or else it will
3484 be mistaken for a note referring to the full contents of the
3485 libcall value when found together with the REG_RETVAL note added
3486 below. An existing note can come from an insn expansion at
3487 "last". */
3488 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3489 }
3490
3491 if (final_dest != target)
3492 emit_move_insn (final_dest, target);
3493
3494 if (prev == 0)
3495 first = get_insns ();
3496 else
3497 first = NEXT_INSN (prev);
3498
3499 /* Encapsulate the block so it gets manipulated as a unit. */
3500 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3501 {
3502 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3503 when the encapsulated region would not be in one basic block,
3504 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3505 */
3506 bool attach_libcall_retval_notes = true;
3507 next = NEXT_INSN (last);
3508 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3509 if (control_flow_insn_p (insn))
3510 {
3511 attach_libcall_retval_notes = false;
3512 break;
3513 }
3514
3515 if (attach_libcall_retval_notes)
3516 {
3517 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3518 REG_NOTES (first));
3519 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3520 REG_NOTES (last));
3521 }
3522 }
3523 }
3524 \f
3525 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3526 PURPOSE describes how this comparison will be used. CODE is the rtx
3527 comparison code we will be using.
3528
3529 ??? Actually, CODE is slightly weaker than that. A target is still
3530 required to implement all of the normal bcc operations, but not
3531 required to implement all (or any) of the unordered bcc operations. */
3532
3533 int
3534 can_compare_p (enum rtx_code code, enum machine_mode mode,
3535 enum can_compare_purpose purpose)
3536 {
3537 do
3538 {
3539 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3540 {
3541 if (purpose == ccp_jump)
3542 return bcc_gen_fctn[(int) code] != NULL;
3543 else if (purpose == ccp_store_flag)
3544 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3545 else
3546 /* There's only one cmov entry point, and it's allowed to fail. */
3547 return 1;
3548 }
3549 if (purpose == ccp_jump
3550 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3551 return 1;
3552 if (purpose == ccp_cmov
3553 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3554 return 1;
3555 if (purpose == ccp_store_flag
3556 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3557 return 1;
3558 mode = GET_MODE_WIDER_MODE (mode);
3559 }
3560 while (mode != VOIDmode);
3561
3562 return 0;
3563 }
3564
3565 /* This function is called when we are going to emit a compare instruction that
3566 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3567
3568 *PMODE is the mode of the inputs (in case they are const_int).
3569 *PUNSIGNEDP nonzero says that the operands are unsigned;
3570 this matters if they need to be widened.
3571
3572 If they have mode BLKmode, then SIZE specifies the size of both operands.
3573
3574 This function performs all the setup necessary so that the caller only has
3575 to emit a single comparison insn. This setup can involve doing a BLKmode
3576 comparison or emitting a library call to perform the comparison if no insn
3577 is available to handle it.
3578 The values which are passed in through pointers can be modified; the caller
3579 should perform the comparison on the modified values. Constant
3580 comparisons must have already been folded. */
3581
3582 static void
3583 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3584 enum machine_mode *pmode, int *punsignedp,
3585 enum can_compare_purpose purpose)
3586 {
3587 enum machine_mode mode = *pmode;
3588 rtx x = *px, y = *py;
3589 int unsignedp = *punsignedp;
3590
3591 /* If we are inside an appropriately-short loop and we are optimizing,
3592 force expensive constants into a register. */
3593 if (CONSTANT_P (x) && optimize
3594 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3595 x = force_reg (mode, x);
3596
3597 if (CONSTANT_P (y) && optimize
3598 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3599 y = force_reg (mode, y);
3600
3601 #ifdef HAVE_cc0
3602 /* Make sure if we have a canonical comparison. The RTL
3603 documentation states that canonical comparisons are required only
3604 for targets which have cc0. */
3605 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3606 #endif
3607
3608 /* Don't let both operands fail to indicate the mode. */
3609 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3610 x = force_reg (mode, x);
3611
3612 /* Handle all BLKmode compares. */
3613
3614 if (mode == BLKmode)
3615 {
3616 enum machine_mode cmp_mode, result_mode;
3617 enum insn_code cmp_code;
3618 tree length_type;
3619 rtx libfunc;
3620 rtx result;
3621 rtx opalign
3622 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3623
3624 gcc_assert (size);
3625
3626 /* Try to use a memory block compare insn - either cmpstr
3627 or cmpmem will do. */
3628 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3629 cmp_mode != VOIDmode;
3630 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3631 {
3632 cmp_code = cmpmem_optab[cmp_mode];
3633 if (cmp_code == CODE_FOR_nothing)
3634 cmp_code = cmpstr_optab[cmp_mode];
3635 if (cmp_code == CODE_FOR_nothing)
3636 cmp_code = cmpstrn_optab[cmp_mode];
3637 if (cmp_code == CODE_FOR_nothing)
3638 continue;
3639
3640 /* Must make sure the size fits the insn's mode. */
3641 if ((GET_CODE (size) == CONST_INT
3642 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3643 || (GET_MODE_BITSIZE (GET_MODE (size))
3644 > GET_MODE_BITSIZE (cmp_mode)))
3645 continue;
3646
3647 result_mode = insn_data[cmp_code].operand[0].mode;
3648 result = gen_reg_rtx (result_mode);
3649 size = convert_to_mode (cmp_mode, size, 1);
3650 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3651
3652 *px = result;
3653 *py = const0_rtx;
3654 *pmode = result_mode;
3655 return;
3656 }
3657
3658 /* Otherwise call a library function, memcmp. */
3659 libfunc = memcmp_libfunc;
3660 length_type = sizetype;
3661 result_mode = TYPE_MODE (integer_type_node);
3662 cmp_mode = TYPE_MODE (length_type);
3663 size = convert_to_mode (TYPE_MODE (length_type), size,
3664 TYPE_UNSIGNED (length_type));
3665
3666 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3667 result_mode, 3,
3668 XEXP (x, 0), Pmode,
3669 XEXP (y, 0), Pmode,
3670 size, cmp_mode);
3671 *px = result;
3672 *py = const0_rtx;
3673 *pmode = result_mode;
3674 return;
3675 }
3676
3677 /* Don't allow operands to the compare to trap, as that can put the
3678 compare and branch in different basic blocks. */
3679 if (flag_non_call_exceptions)
3680 {
3681 if (may_trap_p (x))
3682 x = force_reg (mode, x);
3683 if (may_trap_p (y))
3684 y = force_reg (mode, y);
3685 }
3686
3687 *px = x;
3688 *py = y;
3689 if (can_compare_p (*pcomparison, mode, purpose))
3690 return;
3691
3692 /* Handle a lib call just for the mode we are using. */
3693
3694 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3695 {
3696 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3697 rtx result;
3698
3699 /* If we want unsigned, and this mode has a distinct unsigned
3700 comparison routine, use that. */
3701 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3702 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3703
3704 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3705 word_mode, 2, x, mode, y, mode);
3706
3707 /* There are two kinds of comparison routines. Biased routines
3708 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3709 of gcc expect that the comparison operation is equivalent
3710 to the modified comparison. For signed comparisons compare the
3711 result against 1 in the biased case, and zero in the unbiased
3712 case. For unsigned comparisons always compare against 1 after
3713 biasing the unbiased result by adding 1. This gives us a way to
3714 represent LTU. */
3715 *px = result;
3716 *pmode = word_mode;
3717 *py = const1_rtx;
3718
3719 if (!TARGET_LIB_INT_CMP_BIASED)
3720 {
3721 if (*punsignedp)
3722 *px = plus_constant (result, 1);
3723 else
3724 *py = const0_rtx;
3725 }
3726 return;
3727 }
3728
3729 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3730 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3731 }
3732
3733 /* Before emitting an insn with code ICODE, make sure that X, which is going
3734 to be used for operand OPNUM of the insn, is converted from mode MODE to
3735 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3736 that it is accepted by the operand predicate. Return the new value. */
3737
3738 static rtx
3739 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3740 enum machine_mode wider_mode, int unsignedp)
3741 {
3742 if (mode != wider_mode)
3743 x = convert_modes (wider_mode, mode, x, unsignedp);
3744
3745 if (!insn_data[icode].operand[opnum].predicate
3746 (x, insn_data[icode].operand[opnum].mode))
3747 {
3748 if (no_new_pseudos)
3749 return NULL_RTX;
3750 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3751 }
3752
3753 return x;
3754 }
3755
3756 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3757 we can do the comparison.
3758 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3759 be NULL_RTX which indicates that only a comparison is to be generated. */
3760
3761 static void
3762 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3763 enum rtx_code comparison, int unsignedp, rtx label)
3764 {
3765 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3766 enum mode_class class = GET_MODE_CLASS (mode);
3767 enum machine_mode wider_mode = mode;
3768
3769 /* Try combined insns first. */
3770 do
3771 {
3772 enum insn_code icode;
3773 PUT_MODE (test, wider_mode);
3774
3775 if (label)
3776 {
3777 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3778
3779 if (icode != CODE_FOR_nothing
3780 && insn_data[icode].operand[0].predicate (test, wider_mode))
3781 {
3782 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3783 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3784 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3785 return;
3786 }
3787 }
3788
3789 /* Handle some compares against zero. */
3790 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3791 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3792 {
3793 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3794 emit_insn (GEN_FCN (icode) (x));
3795 if (label)
3796 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3797 return;
3798 }
3799
3800 /* Handle compares for which there is a directly suitable insn. */
3801
3802 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3803 if (icode != CODE_FOR_nothing)
3804 {
3805 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3806 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3807 emit_insn (GEN_FCN (icode) (x, y));
3808 if (label)
3809 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3810 return;
3811 }
3812
3813 if (!CLASS_HAS_WIDER_MODES_P (class))
3814 break;
3815
3816 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3817 }
3818 while (wider_mode != VOIDmode);
3819
3820 gcc_unreachable ();
3821 }
3822
3823 /* Generate code to compare X with Y so that the condition codes are
3824 set and to jump to LABEL if the condition is true. If X is a
3825 constant and Y is not a constant, then the comparison is swapped to
3826 ensure that the comparison RTL has the canonical form.
3827
3828 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3829 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3830 the proper branch condition code.
3831
3832 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3833
3834 MODE is the mode of the inputs (in case they are const_int).
3835
3836 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3837 be passed unchanged to emit_cmp_insn, then potentially converted into an
3838 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3839
3840 void
3841 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3842 enum machine_mode mode, int unsignedp, rtx label)
3843 {
3844 rtx op0 = x, op1 = y;
3845
3846 /* Swap operands and condition to ensure canonical RTL. */
3847 if (swap_commutative_operands_p (x, y))
3848 {
3849 /* If we're not emitting a branch, this means some caller
3850 is out of sync. */
3851 gcc_assert (label);
3852
3853 op0 = y, op1 = x;
3854 comparison = swap_condition (comparison);
3855 }
3856
3857 #ifdef HAVE_cc0
3858 /* If OP0 is still a constant, then both X and Y must be constants.
3859 Force X into a register to create canonical RTL. */
3860 if (CONSTANT_P (op0))
3861 op0 = force_reg (mode, op0);
3862 #endif
3863
3864 if (unsignedp)
3865 comparison = unsigned_condition (comparison);
3866
3867 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3868 ccp_jump);
3869 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3870 }
3871
3872 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3873
3874 void
3875 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3876 enum machine_mode mode, int unsignedp)
3877 {
3878 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3879 }
3880 \f
3881 /* Emit a library call comparison between floating point X and Y.
3882 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3883
3884 static void
3885 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3886 enum machine_mode *pmode, int *punsignedp)
3887 {
3888 enum rtx_code comparison = *pcomparison;
3889 enum rtx_code swapped = swap_condition (comparison);
3890 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3891 rtx x = *px;
3892 rtx y = *py;
3893 enum machine_mode orig_mode = GET_MODE (x);
3894 enum machine_mode mode;
3895 rtx value, target, insns, equiv;
3896 rtx libfunc = 0;
3897 bool reversed_p = false;
3898
3899 for (mode = orig_mode;
3900 mode != VOIDmode;
3901 mode = GET_MODE_WIDER_MODE (mode))
3902 {
3903 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3904 break;
3905
3906 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3907 {
3908 rtx tmp;
3909 tmp = x; x = y; y = tmp;
3910 comparison = swapped;
3911 break;
3912 }
3913
3914 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3915 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3916 {
3917 comparison = reversed;
3918 reversed_p = true;
3919 break;
3920 }
3921 }
3922
3923 gcc_assert (mode != VOIDmode);
3924
3925 if (mode != orig_mode)
3926 {
3927 x = convert_to_mode (mode, x, 0);
3928 y = convert_to_mode (mode, y, 0);
3929 }
3930
3931 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3932 the RTL. The allows the RTL optimizers to delete the libcall if the
3933 condition can be determined at compile-time. */
3934 if (comparison == UNORDERED)
3935 {
3936 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3937 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3938 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3939 temp, const_true_rtx, equiv);
3940 }
3941 else
3942 {
3943 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3944 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3945 {
3946 rtx true_rtx, false_rtx;
3947
3948 switch (comparison)
3949 {
3950 case EQ:
3951 true_rtx = const0_rtx;
3952 false_rtx = const_true_rtx;
3953 break;
3954
3955 case NE:
3956 true_rtx = const_true_rtx;
3957 false_rtx = const0_rtx;
3958 break;
3959
3960 case GT:
3961 true_rtx = const1_rtx;
3962 false_rtx = const0_rtx;
3963 break;
3964
3965 case GE:
3966 true_rtx = const0_rtx;
3967 false_rtx = constm1_rtx;
3968 break;
3969
3970 case LT:
3971 true_rtx = constm1_rtx;
3972 false_rtx = const0_rtx;
3973 break;
3974
3975 case LE:
3976 true_rtx = const0_rtx;
3977 false_rtx = const1_rtx;
3978 break;
3979
3980 default:
3981 gcc_unreachable ();
3982 }
3983 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3984 equiv, true_rtx, false_rtx);
3985 }
3986 }
3987
3988 start_sequence ();
3989 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3990 word_mode, 2, x, mode, y, mode);
3991 insns = get_insns ();
3992 end_sequence ();
3993
3994 target = gen_reg_rtx (word_mode);
3995 emit_libcall_block (insns, target, value, equiv);
3996
3997 if (comparison == UNORDERED
3998 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3999 comparison = reversed_p ? EQ : NE;
4000
4001 *px = target;
4002 *py = const0_rtx;
4003 *pmode = word_mode;
4004 *pcomparison = comparison;
4005 *punsignedp = 0;
4006 }
4007 \f
4008 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4009
4010 void
4011 emit_indirect_jump (rtx loc)
4012 {
4013 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4014 (loc, Pmode))
4015 loc = copy_to_mode_reg (Pmode, loc);
4016
4017 emit_jump_insn (gen_indirect_jump (loc));
4018 emit_barrier ();
4019 }
4020 \f
4021 #ifdef HAVE_conditional_move
4022
4023 /* Emit a conditional move instruction if the machine supports one for that
4024 condition and machine mode.
4025
4026 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4027 the mode to use should they be constants. If it is VOIDmode, they cannot
4028 both be constants.
4029
4030 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4031 should be stored there. MODE is the mode to use should they be constants.
4032 If it is VOIDmode, they cannot both be constants.
4033
4034 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4035 is not supported. */
4036
4037 rtx
4038 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4039 enum machine_mode cmode, rtx op2, rtx op3,
4040 enum machine_mode mode, int unsignedp)
4041 {
4042 rtx tem, subtarget, comparison, insn;
4043 enum insn_code icode;
4044 enum rtx_code reversed;
4045
4046 /* If one operand is constant, make it the second one. Only do this
4047 if the other operand is not constant as well. */
4048
4049 if (swap_commutative_operands_p (op0, op1))
4050 {
4051 tem = op0;
4052 op0 = op1;
4053 op1 = tem;
4054 code = swap_condition (code);
4055 }
4056
4057 /* get_condition will prefer to generate LT and GT even if the old
4058 comparison was against zero, so undo that canonicalization here since
4059 comparisons against zero are cheaper. */
4060 if (code == LT && op1 == const1_rtx)
4061 code = LE, op1 = const0_rtx;
4062 else if (code == GT && op1 == constm1_rtx)
4063 code = GE, op1 = const0_rtx;
4064
4065 if (cmode == VOIDmode)
4066 cmode = GET_MODE (op0);
4067
4068 if (swap_commutative_operands_p (op2, op3)
4069 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4070 != UNKNOWN))
4071 {
4072 tem = op2;
4073 op2 = op3;
4074 op3 = tem;
4075 code = reversed;
4076 }
4077
4078 if (mode == VOIDmode)
4079 mode = GET_MODE (op2);
4080
4081 icode = movcc_gen_code[mode];
4082
4083 if (icode == CODE_FOR_nothing)
4084 return 0;
4085
4086 if (!target)
4087 target = gen_reg_rtx (mode);
4088
4089 subtarget = target;
4090
4091 /* If the insn doesn't accept these operands, put them in pseudos. */
4092
4093 if (!insn_data[icode].operand[0].predicate
4094 (subtarget, insn_data[icode].operand[0].mode))
4095 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4096
4097 if (!insn_data[icode].operand[2].predicate
4098 (op2, insn_data[icode].operand[2].mode))
4099 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4100
4101 if (!insn_data[icode].operand[3].predicate
4102 (op3, insn_data[icode].operand[3].mode))
4103 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4104
4105 /* Everything should now be in the suitable form, so emit the compare insn
4106 and then the conditional move. */
4107
4108 comparison
4109 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4110
4111 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4112 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4113 return NULL and let the caller figure out how best to deal with this
4114 situation. */
4115 if (GET_CODE (comparison) != code)
4116 return NULL_RTX;
4117
4118 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4119
4120 /* If that failed, then give up. */
4121 if (insn == 0)
4122 return 0;
4123
4124 emit_insn (insn);
4125
4126 if (subtarget != target)
4127 convert_move (target, subtarget, 0);
4128
4129 return target;
4130 }
4131
4132 /* Return nonzero if a conditional move of mode MODE is supported.
4133
4134 This function is for combine so it can tell whether an insn that looks
4135 like a conditional move is actually supported by the hardware. If we
4136 guess wrong we lose a bit on optimization, but that's it. */
4137 /* ??? sparc64 supports conditionally moving integers values based on fp
4138 comparisons, and vice versa. How do we handle them? */
4139
4140 int
4141 can_conditionally_move_p (enum machine_mode mode)
4142 {
4143 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4144 return 1;
4145
4146 return 0;
4147 }
4148
4149 #endif /* HAVE_conditional_move */
4150
4151 /* Emit a conditional addition instruction if the machine supports one for that
4152 condition and machine mode.
4153
4154 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4155 the mode to use should they be constants. If it is VOIDmode, they cannot
4156 both be constants.
4157
4158 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4159 should be stored there. MODE is the mode to use should they be constants.
4160 If it is VOIDmode, they cannot both be constants.
4161
4162 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4163 is not supported. */
4164
4165 rtx
4166 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4167 enum machine_mode cmode, rtx op2, rtx op3,
4168 enum machine_mode mode, int unsignedp)
4169 {
4170 rtx tem, subtarget, comparison, insn;
4171 enum insn_code icode;
4172 enum rtx_code reversed;
4173
4174 /* If one operand is constant, make it the second one. Only do this
4175 if the other operand is not constant as well. */
4176
4177 if (swap_commutative_operands_p (op0, op1))
4178 {
4179 tem = op0;
4180 op0 = op1;
4181 op1 = tem;
4182 code = swap_condition (code);
4183 }
4184
4185 /* get_condition will prefer to generate LT and GT even if the old
4186 comparison was against zero, so undo that canonicalization here since
4187 comparisons against zero are cheaper. */
4188 if (code == LT && op1 == const1_rtx)
4189 code = LE, op1 = const0_rtx;
4190 else if (code == GT && op1 == constm1_rtx)
4191 code = GE, op1 = const0_rtx;
4192
4193 if (cmode == VOIDmode)
4194 cmode = GET_MODE (op0);
4195
4196 if (swap_commutative_operands_p (op2, op3)
4197 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4198 != UNKNOWN))
4199 {
4200 tem = op2;
4201 op2 = op3;
4202 op3 = tem;
4203 code = reversed;
4204 }
4205
4206 if (mode == VOIDmode)
4207 mode = GET_MODE (op2);
4208
4209 icode = addcc_optab->handlers[(int) mode].insn_code;
4210
4211 if (icode == CODE_FOR_nothing)
4212 return 0;
4213
4214 if (!target)
4215 target = gen_reg_rtx (mode);
4216
4217 /* If the insn doesn't accept these operands, put them in pseudos. */
4218
4219 if (!insn_data[icode].operand[0].predicate
4220 (target, insn_data[icode].operand[0].mode))
4221 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4222 else
4223 subtarget = target;
4224
4225 if (!insn_data[icode].operand[2].predicate
4226 (op2, insn_data[icode].operand[2].mode))
4227 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4228
4229 if (!insn_data[icode].operand[3].predicate
4230 (op3, insn_data[icode].operand[3].mode))
4231 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4232
4233 /* Everything should now be in the suitable form, so emit the compare insn
4234 and then the conditional move. */
4235
4236 comparison
4237 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4238
4239 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4240 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4241 return NULL and let the caller figure out how best to deal with this
4242 situation. */
4243 if (GET_CODE (comparison) != code)
4244 return NULL_RTX;
4245
4246 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4247
4248 /* If that failed, then give up. */
4249 if (insn == 0)
4250 return 0;
4251
4252 emit_insn (insn);
4253
4254 if (subtarget != target)
4255 convert_move (target, subtarget, 0);
4256
4257 return target;
4258 }
4259 \f
4260 /* These functions attempt to generate an insn body, rather than
4261 emitting the insn, but if the gen function already emits them, we
4262 make no attempt to turn them back into naked patterns. */
4263
4264 /* Generate and return an insn body to add Y to X. */
4265
4266 rtx
4267 gen_add2_insn (rtx x, rtx y)
4268 {
4269 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4270
4271 gcc_assert (insn_data[icode].operand[0].predicate
4272 (x, insn_data[icode].operand[0].mode));
4273 gcc_assert (insn_data[icode].operand[1].predicate
4274 (x, insn_data[icode].operand[1].mode));
4275 gcc_assert (insn_data[icode].operand[2].predicate
4276 (y, insn_data[icode].operand[2].mode));
4277
4278 return GEN_FCN (icode) (x, x, y);
4279 }
4280
4281 /* Generate and return an insn body to add r1 and c,
4282 storing the result in r0. */
4283 rtx
4284 gen_add3_insn (rtx r0, rtx r1, rtx c)
4285 {
4286 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4287
4288 if (icode == CODE_FOR_nothing
4289 || !(insn_data[icode].operand[0].predicate
4290 (r0, insn_data[icode].operand[0].mode))
4291 || !(insn_data[icode].operand[1].predicate
4292 (r1, insn_data[icode].operand[1].mode))
4293 || !(insn_data[icode].operand[2].predicate
4294 (c, insn_data[icode].operand[2].mode)))
4295 return NULL_RTX;
4296
4297 return GEN_FCN (icode) (r0, r1, c);
4298 }
4299
4300 int
4301 have_add2_insn (rtx x, rtx y)
4302 {
4303 int icode;
4304
4305 gcc_assert (GET_MODE (x) != VOIDmode);
4306
4307 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4308
4309 if (icode == CODE_FOR_nothing)
4310 return 0;
4311
4312 if (!(insn_data[icode].operand[0].predicate
4313 (x, insn_data[icode].operand[0].mode))
4314 || !(insn_data[icode].operand[1].predicate
4315 (x, insn_data[icode].operand[1].mode))
4316 || !(insn_data[icode].operand[2].predicate
4317 (y, insn_data[icode].operand[2].mode)))
4318 return 0;
4319
4320 return 1;
4321 }
4322
4323 /* Generate and return an insn body to subtract Y from X. */
4324
4325 rtx
4326 gen_sub2_insn (rtx x, rtx y)
4327 {
4328 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4329
4330 gcc_assert (insn_data[icode].operand[0].predicate
4331 (x, insn_data[icode].operand[0].mode));
4332 gcc_assert (insn_data[icode].operand[1].predicate
4333 (x, insn_data[icode].operand[1].mode));
4334 gcc_assert (insn_data[icode].operand[2].predicate
4335 (y, insn_data[icode].operand[2].mode));
4336
4337 return GEN_FCN (icode) (x, x, y);
4338 }
4339
4340 /* Generate and return an insn body to subtract r1 and c,
4341 storing the result in r0. */
4342 rtx
4343 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4344 {
4345 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4346
4347 if (icode == CODE_FOR_nothing
4348 || !(insn_data[icode].operand[0].predicate
4349 (r0, insn_data[icode].operand[0].mode))
4350 || !(insn_data[icode].operand[1].predicate
4351 (r1, insn_data[icode].operand[1].mode))
4352 || !(insn_data[icode].operand[2].predicate
4353 (c, insn_data[icode].operand[2].mode)))
4354 return NULL_RTX;
4355
4356 return GEN_FCN (icode) (r0, r1, c);
4357 }
4358
4359 int
4360 have_sub2_insn (rtx x, rtx y)
4361 {
4362 int icode;
4363
4364 gcc_assert (GET_MODE (x) != VOIDmode);
4365
4366 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4367
4368 if (icode == CODE_FOR_nothing)
4369 return 0;
4370
4371 if (!(insn_data[icode].operand[0].predicate
4372 (x, insn_data[icode].operand[0].mode))
4373 || !(insn_data[icode].operand[1].predicate
4374 (x, insn_data[icode].operand[1].mode))
4375 || !(insn_data[icode].operand[2].predicate
4376 (y, insn_data[icode].operand[2].mode)))
4377 return 0;
4378
4379 return 1;
4380 }
4381
4382 /* Generate the body of an instruction to copy Y into X.
4383 It may be a list of insns, if one insn isn't enough. */
4384
4385 rtx
4386 gen_move_insn (rtx x, rtx y)
4387 {
4388 rtx seq;
4389
4390 start_sequence ();
4391 emit_move_insn_1 (x, y);
4392 seq = get_insns ();
4393 end_sequence ();
4394 return seq;
4395 }
4396 \f
4397 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4398 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4399 no such operation exists, CODE_FOR_nothing will be returned. */
4400
4401 enum insn_code
4402 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4403 int unsignedp)
4404 {
4405 convert_optab tab;
4406 #ifdef HAVE_ptr_extend
4407 if (unsignedp < 0)
4408 return CODE_FOR_ptr_extend;
4409 #endif
4410
4411 tab = unsignedp ? zext_optab : sext_optab;
4412 return tab->handlers[to_mode][from_mode].insn_code;
4413 }
4414
4415 /* Generate the body of an insn to extend Y (with mode MFROM)
4416 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4417
4418 rtx
4419 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4420 enum machine_mode mfrom, int unsignedp)
4421 {
4422 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4423 return GEN_FCN (icode) (x, y);
4424 }
4425 \f
4426 /* can_fix_p and can_float_p say whether the target machine
4427 can directly convert a given fixed point type to
4428 a given floating point type, or vice versa.
4429 The returned value is the CODE_FOR_... value to use,
4430 or CODE_FOR_nothing if these modes cannot be directly converted.
4431
4432 *TRUNCP_PTR is set to 1 if it is necessary to output
4433 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4434
4435 static enum insn_code
4436 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4437 int unsignedp, int *truncp_ptr)
4438 {
4439 convert_optab tab;
4440 enum insn_code icode;
4441
4442 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4443 icode = tab->handlers[fixmode][fltmode].insn_code;
4444 if (icode != CODE_FOR_nothing)
4445 {
4446 *truncp_ptr = 0;
4447 return icode;
4448 }
4449
4450 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4451 for this to work. We need to rework the fix* and ftrunc* patterns
4452 and documentation. */
4453 tab = unsignedp ? ufix_optab : sfix_optab;
4454 icode = tab->handlers[fixmode][fltmode].insn_code;
4455 if (icode != CODE_FOR_nothing
4456 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4457 {
4458 *truncp_ptr = 1;
4459 return icode;
4460 }
4461
4462 *truncp_ptr = 0;
4463 return CODE_FOR_nothing;
4464 }
4465
4466 static enum insn_code
4467 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4468 int unsignedp)
4469 {
4470 convert_optab tab;
4471
4472 tab = unsignedp ? ufloat_optab : sfloat_optab;
4473 return tab->handlers[fltmode][fixmode].insn_code;
4474 }
4475 \f
4476 /* Generate code to convert FROM to floating point
4477 and store in TO. FROM must be fixed point and not VOIDmode.
4478 UNSIGNEDP nonzero means regard FROM as unsigned.
4479 Normally this is done by correcting the final value
4480 if it is negative. */
4481
4482 void
4483 expand_float (rtx to, rtx from, int unsignedp)
4484 {
4485 enum insn_code icode;
4486 rtx target = to;
4487 enum machine_mode fmode, imode;
4488 bool can_do_signed = false;
4489
4490 /* Crash now, because we won't be able to decide which mode to use. */
4491 gcc_assert (GET_MODE (from) != VOIDmode);
4492
4493 /* Look for an insn to do the conversion. Do it in the specified
4494 modes if possible; otherwise convert either input, output or both to
4495 wider mode. If the integer mode is wider than the mode of FROM,
4496 we can do the conversion signed even if the input is unsigned. */
4497
4498 for (fmode = GET_MODE (to); fmode != VOIDmode;
4499 fmode = GET_MODE_WIDER_MODE (fmode))
4500 for (imode = GET_MODE (from); imode != VOIDmode;
4501 imode = GET_MODE_WIDER_MODE (imode))
4502 {
4503 int doing_unsigned = unsignedp;
4504
4505 if (fmode != GET_MODE (to)
4506 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4507 continue;
4508
4509 icode = can_float_p (fmode, imode, unsignedp);
4510 if (icode == CODE_FOR_nothing && unsignedp)
4511 {
4512 enum insn_code scode = can_float_p (fmode, imode, 0);
4513 if (scode != CODE_FOR_nothing)
4514 can_do_signed = true;
4515 if (imode != GET_MODE (from))
4516 icode = scode, doing_unsigned = 0;
4517 }
4518
4519 if (icode != CODE_FOR_nothing)
4520 {
4521 if (imode != GET_MODE (from))
4522 from = convert_to_mode (imode, from, unsignedp);
4523
4524 if (fmode != GET_MODE (to))
4525 target = gen_reg_rtx (fmode);
4526
4527 emit_unop_insn (icode, target, from,
4528 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4529
4530 if (target != to)
4531 convert_move (to, target, 0);
4532 return;
4533 }
4534 }
4535
4536 /* Unsigned integer, and no way to convert directly. For binary
4537 floating point modes, convert as signed, then conditionally adjust
4538 the result. */
4539 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4540 {
4541 rtx label = gen_label_rtx ();
4542 rtx temp;
4543 REAL_VALUE_TYPE offset;
4544
4545 /* Look for a usable floating mode FMODE wider than the source and at
4546 least as wide as the target. Using FMODE will avoid rounding woes
4547 with unsigned values greater than the signed maximum value. */
4548
4549 for (fmode = GET_MODE (to); fmode != VOIDmode;
4550 fmode = GET_MODE_WIDER_MODE (fmode))
4551 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4552 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4553 break;
4554
4555 if (fmode == VOIDmode)
4556 {
4557 /* There is no such mode. Pretend the target is wide enough. */
4558 fmode = GET_MODE (to);
4559
4560 /* Avoid double-rounding when TO is narrower than FROM. */
4561 if ((significand_size (fmode) + 1)
4562 < GET_MODE_BITSIZE (GET_MODE (from)))
4563 {
4564 rtx temp1;
4565 rtx neglabel = gen_label_rtx ();
4566
4567 /* Don't use TARGET if it isn't a register, is a hard register,
4568 or is the wrong mode. */
4569 if (!REG_P (target)
4570 || REGNO (target) < FIRST_PSEUDO_REGISTER
4571 || GET_MODE (target) != fmode)
4572 target = gen_reg_rtx (fmode);
4573
4574 imode = GET_MODE (from);
4575 do_pending_stack_adjust ();
4576
4577 /* Test whether the sign bit is set. */
4578 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4579 0, neglabel);
4580
4581 /* The sign bit is not set. Convert as signed. */
4582 expand_float (target, from, 0);
4583 emit_jump_insn (gen_jump (label));
4584 emit_barrier ();
4585
4586 /* The sign bit is set.
4587 Convert to a usable (positive signed) value by shifting right
4588 one bit, while remembering if a nonzero bit was shifted
4589 out; i.e., compute (from & 1) | (from >> 1). */
4590
4591 emit_label (neglabel);
4592 temp = expand_binop (imode, and_optab, from, const1_rtx,
4593 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4594 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4595 NULL_RTX, 1);
4596 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4597 OPTAB_LIB_WIDEN);
4598 expand_float (target, temp, 0);
4599
4600 /* Multiply by 2 to undo the shift above. */
4601 temp = expand_binop (fmode, add_optab, target, target,
4602 target, 0, OPTAB_LIB_WIDEN);
4603 if (temp != target)
4604 emit_move_insn (target, temp);
4605
4606 do_pending_stack_adjust ();
4607 emit_label (label);
4608 goto done;
4609 }
4610 }
4611
4612 /* If we are about to do some arithmetic to correct for an
4613 unsigned operand, do it in a pseudo-register. */
4614
4615 if (GET_MODE (to) != fmode
4616 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4617 target = gen_reg_rtx (fmode);
4618
4619 /* Convert as signed integer to floating. */
4620 expand_float (target, from, 0);
4621
4622 /* If FROM is negative (and therefore TO is negative),
4623 correct its value by 2**bitwidth. */
4624
4625 do_pending_stack_adjust ();
4626 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4627 0, label);
4628
4629
4630 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4631 temp = expand_binop (fmode, add_optab, target,
4632 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4633 target, 0, OPTAB_LIB_WIDEN);
4634 if (temp != target)
4635 emit_move_insn (target, temp);
4636
4637 do_pending_stack_adjust ();
4638 emit_label (label);
4639 goto done;
4640 }
4641
4642 /* No hardware instruction available; call a library routine. */
4643 {
4644 rtx libfunc;
4645 rtx insns;
4646 rtx value;
4647 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4648
4649 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4650 from = convert_to_mode (SImode, from, unsignedp);
4651
4652 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4653 gcc_assert (libfunc);
4654
4655 start_sequence ();
4656
4657 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4658 GET_MODE (to), 1, from,
4659 GET_MODE (from));
4660 insns = get_insns ();
4661 end_sequence ();
4662
4663 emit_libcall_block (insns, target, value,
4664 gen_rtx_FLOAT (GET_MODE (to), from));
4665 }
4666
4667 done:
4668
4669 /* Copy result to requested destination
4670 if we have been computing in a temp location. */
4671
4672 if (target != to)
4673 {
4674 if (GET_MODE (target) == GET_MODE (to))
4675 emit_move_insn (to, target);
4676 else
4677 convert_move (to, target, 0);
4678 }
4679 }
4680 \f
4681 /* Generate code to convert FROM to fixed point and store in TO. FROM
4682 must be floating point. */
4683
4684 void
4685 expand_fix (rtx to, rtx from, int unsignedp)
4686 {
4687 enum insn_code icode;
4688 rtx target = to;
4689 enum machine_mode fmode, imode;
4690 int must_trunc = 0;
4691
4692 /* We first try to find a pair of modes, one real and one integer, at
4693 least as wide as FROM and TO, respectively, in which we can open-code
4694 this conversion. If the integer mode is wider than the mode of TO,
4695 we can do the conversion either signed or unsigned. */
4696
4697 for (fmode = GET_MODE (from); fmode != VOIDmode;
4698 fmode = GET_MODE_WIDER_MODE (fmode))
4699 for (imode = GET_MODE (to); imode != VOIDmode;
4700 imode = GET_MODE_WIDER_MODE (imode))
4701 {
4702 int doing_unsigned = unsignedp;
4703
4704 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4705 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4706 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4707
4708 if (icode != CODE_FOR_nothing)
4709 {
4710 if (fmode != GET_MODE (from))
4711 from = convert_to_mode (fmode, from, 0);
4712
4713 if (must_trunc)
4714 {
4715 rtx temp = gen_reg_rtx (GET_MODE (from));
4716 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4717 temp, 0);
4718 }
4719
4720 if (imode != GET_MODE (to))
4721 target = gen_reg_rtx (imode);
4722
4723 emit_unop_insn (icode, target, from,
4724 doing_unsigned ? UNSIGNED_FIX : FIX);
4725 if (target != to)
4726 convert_move (to, target, unsignedp);
4727 return;
4728 }
4729 }
4730
4731 /* For an unsigned conversion, there is one more way to do it.
4732 If we have a signed conversion, we generate code that compares
4733 the real value to the largest representable positive number. If if
4734 is smaller, the conversion is done normally. Otherwise, subtract
4735 one plus the highest signed number, convert, and add it back.
4736
4737 We only need to check all real modes, since we know we didn't find
4738 anything with a wider integer mode.
4739
4740 This code used to extend FP value into mode wider than the destination.
4741 This is not needed. Consider, for instance conversion from SFmode
4742 into DImode.
4743
4744 The hot path trought the code is dealing with inputs smaller than 2^63
4745 and doing just the conversion, so there is no bits to lose.
4746
4747 In the other path we know the value is positive in the range 2^63..2^64-1
4748 inclusive. (as for other imput overflow happens and result is undefined)
4749 So we know that the most important bit set in mantissa corresponds to
4750 2^63. The subtraction of 2^63 should not generate any rounding as it
4751 simply clears out that bit. The rest is trivial. */
4752
4753 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4754 for (fmode = GET_MODE (from); fmode != VOIDmode;
4755 fmode = GET_MODE_WIDER_MODE (fmode))
4756 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4757 &must_trunc))
4758 {
4759 int bitsize;
4760 REAL_VALUE_TYPE offset;
4761 rtx limit, lab1, lab2, insn;
4762
4763 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4764 real_2expN (&offset, bitsize - 1);
4765 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4766 lab1 = gen_label_rtx ();
4767 lab2 = gen_label_rtx ();
4768
4769 if (fmode != GET_MODE (from))
4770 from = convert_to_mode (fmode, from, 0);
4771
4772 /* See if we need to do the subtraction. */
4773 do_pending_stack_adjust ();
4774 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4775 0, lab1);
4776
4777 /* If not, do the signed "fix" and branch around fixup code. */
4778 expand_fix (to, from, 0);
4779 emit_jump_insn (gen_jump (lab2));
4780 emit_barrier ();
4781
4782 /* Otherwise, subtract 2**(N-1), convert to signed number,
4783 then add 2**(N-1). Do the addition using XOR since this
4784 will often generate better code. */
4785 emit_label (lab1);
4786 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4787 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4788 expand_fix (to, target, 0);
4789 target = expand_binop (GET_MODE (to), xor_optab, to,
4790 gen_int_mode
4791 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4792 GET_MODE (to)),
4793 to, 1, OPTAB_LIB_WIDEN);
4794
4795 if (target != to)
4796 emit_move_insn (to, target);
4797
4798 emit_label (lab2);
4799
4800 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4801 != CODE_FOR_nothing)
4802 {
4803 /* Make a place for a REG_NOTE and add it. */
4804 insn = emit_move_insn (to, to);
4805 set_unique_reg_note (insn,
4806 REG_EQUAL,
4807 gen_rtx_fmt_e (UNSIGNED_FIX,
4808 GET_MODE (to),
4809 copy_rtx (from)));
4810 }
4811
4812 return;
4813 }
4814
4815 /* We can't do it with an insn, so use a library call. But first ensure
4816 that the mode of TO is at least as wide as SImode, since those are the
4817 only library calls we know about. */
4818
4819 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4820 {
4821 target = gen_reg_rtx (SImode);
4822
4823 expand_fix (target, from, unsignedp);
4824 }
4825 else
4826 {
4827 rtx insns;
4828 rtx value;
4829 rtx libfunc;
4830
4831 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4832 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4833 gcc_assert (libfunc);
4834
4835 start_sequence ();
4836
4837 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4838 GET_MODE (to), 1, from,
4839 GET_MODE (from));
4840 insns = get_insns ();
4841 end_sequence ();
4842
4843 emit_libcall_block (insns, target, value,
4844 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4845 GET_MODE (to), from));
4846 }
4847
4848 if (target != to)
4849 {
4850 if (GET_MODE (to) == GET_MODE (target))
4851 emit_move_insn (to, target);
4852 else
4853 convert_move (to, target, 0);
4854 }
4855 }
4856 \f
4857 /* Report whether we have an instruction to perform the operation
4858 specified by CODE on operands of mode MODE. */
4859 int
4860 have_insn_for (enum rtx_code code, enum machine_mode mode)
4861 {
4862 return (code_to_optab[(int) code] != 0
4863 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4864 != CODE_FOR_nothing));
4865 }
4866
4867 /* Create a blank optab. */
4868 static optab
4869 new_optab (void)
4870 {
4871 int i;
4872 optab op = ggc_alloc (sizeof (struct optab));
4873 for (i = 0; i < NUM_MACHINE_MODES; i++)
4874 {
4875 op->handlers[i].insn_code = CODE_FOR_nothing;
4876 op->handlers[i].libfunc = 0;
4877 }
4878
4879 return op;
4880 }
4881
4882 static convert_optab
4883 new_convert_optab (void)
4884 {
4885 int i, j;
4886 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4887 for (i = 0; i < NUM_MACHINE_MODES; i++)
4888 for (j = 0; j < NUM_MACHINE_MODES; j++)
4889 {
4890 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4891 op->handlers[i][j].libfunc = 0;
4892 }
4893 return op;
4894 }
4895
4896 /* Same, but fill in its code as CODE, and write it into the
4897 code_to_optab table. */
4898 static inline optab
4899 init_optab (enum rtx_code code)
4900 {
4901 optab op = new_optab ();
4902 op->code = code;
4903 code_to_optab[(int) code] = op;
4904 return op;
4905 }
4906
4907 /* Same, but fill in its code as CODE, and do _not_ write it into
4908 the code_to_optab table. */
4909 static inline optab
4910 init_optabv (enum rtx_code code)
4911 {
4912 optab op = new_optab ();
4913 op->code = code;
4914 return op;
4915 }
4916
4917 /* Conversion optabs never go in the code_to_optab table. */
4918 static inline convert_optab
4919 init_convert_optab (enum rtx_code code)
4920 {
4921 convert_optab op = new_convert_optab ();
4922 op->code = code;
4923 return op;
4924 }
4925
4926 /* Initialize the libfunc fields of an entire group of entries in some
4927 optab. Each entry is set equal to a string consisting of a leading
4928 pair of underscores followed by a generic operation name followed by
4929 a mode name (downshifted to lowercase) followed by a single character
4930 representing the number of operands for the given operation (which is
4931 usually one of the characters '2', '3', or '4').
4932
4933 OPTABLE is the table in which libfunc fields are to be initialized.
4934 FIRST_MODE is the first machine mode index in the given optab to
4935 initialize.
4936 LAST_MODE is the last machine mode index in the given optab to
4937 initialize.
4938 OPNAME is the generic (string) name of the operation.
4939 SUFFIX is the character which specifies the number of operands for
4940 the given generic operation.
4941 */
4942
4943 static void
4944 init_libfuncs (optab optable, int first_mode, int last_mode,
4945 const char *opname, int suffix)
4946 {
4947 int mode;
4948 unsigned opname_len = strlen (opname);
4949
4950 for (mode = first_mode; (int) mode <= (int) last_mode;
4951 mode = (enum machine_mode) ((int) mode + 1))
4952 {
4953 const char *mname = GET_MODE_NAME (mode);
4954 unsigned mname_len = strlen (mname);
4955 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4956 char *p;
4957 const char *q;
4958
4959 p = libfunc_name;
4960 *p++ = '_';
4961 *p++ = '_';
4962 for (q = opname; *q; )
4963 *p++ = *q++;
4964 for (q = mname; *q; q++)
4965 *p++ = TOLOWER (*q);
4966 *p++ = suffix;
4967 *p = '\0';
4968
4969 optable->handlers[(int) mode].libfunc
4970 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4971 }
4972 }
4973
4974 /* Initialize the libfunc fields of an entire group of entries in some
4975 optab which correspond to all integer mode operations. The parameters
4976 have the same meaning as similarly named ones for the `init_libfuncs'
4977 routine. (See above). */
4978
4979 static void
4980 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4981 {
4982 int maxsize = 2*BITS_PER_WORD;
4983 if (maxsize < LONG_LONG_TYPE_SIZE)
4984 maxsize = LONG_LONG_TYPE_SIZE;
4985 init_libfuncs (optable, word_mode,
4986 mode_for_size (maxsize, MODE_INT, 0),
4987 opname, suffix);
4988 }
4989
4990 /* Initialize the libfunc fields of an entire group of entries in some
4991 optab which correspond to all real mode operations. The parameters
4992 have the same meaning as similarly named ones for the `init_libfuncs'
4993 routine. (See above). */
4994
4995 static void
4996 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4997 {
4998 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4999 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5000 opname, suffix);
5001 }
5002
5003 /* Initialize the libfunc fields of an entire group of entries of an
5004 inter-mode-class conversion optab. The string formation rules are
5005 similar to the ones for init_libfuncs, above, but instead of having
5006 a mode name and an operand count these functions have two mode names
5007 and no operand count. */
5008 static void
5009 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5010 enum mode_class from_class,
5011 enum mode_class to_class)
5012 {
5013 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5014 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5015 size_t opname_len = strlen (opname);
5016 size_t max_mname_len = 0;
5017
5018 enum machine_mode fmode, tmode;
5019 const char *fname, *tname;
5020 const char *q;
5021 char *libfunc_name, *suffix;
5022 char *p;
5023
5024 for (fmode = first_from_mode;
5025 fmode != VOIDmode;
5026 fmode = GET_MODE_WIDER_MODE (fmode))
5027 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5028
5029 for (tmode = first_to_mode;
5030 tmode != VOIDmode;
5031 tmode = GET_MODE_WIDER_MODE (tmode))
5032 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5033
5034 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5035 libfunc_name[0] = '_';
5036 libfunc_name[1] = '_';
5037 memcpy (&libfunc_name[2], opname, opname_len);
5038 suffix = libfunc_name + opname_len + 2;
5039
5040 for (fmode = first_from_mode; fmode != VOIDmode;
5041 fmode = GET_MODE_WIDER_MODE (fmode))
5042 for (tmode = first_to_mode; tmode != VOIDmode;
5043 tmode = GET_MODE_WIDER_MODE (tmode))
5044 {
5045 fname = GET_MODE_NAME (fmode);
5046 tname = GET_MODE_NAME (tmode);
5047
5048 p = suffix;
5049 for (q = fname; *q; p++, q++)
5050 *p = TOLOWER (*q);
5051 for (q = tname; *q; p++, q++)
5052 *p = TOLOWER (*q);
5053
5054 *p = '\0';
5055
5056 tab->handlers[tmode][fmode].libfunc
5057 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5058 p - libfunc_name));
5059 }
5060 }
5061
5062 /* Initialize the libfunc fields of an entire group of entries of an
5063 intra-mode-class conversion optab. The string formation rules are
5064 similar to the ones for init_libfunc, above. WIDENING says whether
5065 the optab goes from narrow to wide modes or vice versa. These functions
5066 have two mode names _and_ an operand count. */
5067 static void
5068 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5069 enum mode_class class, bool widening)
5070 {
5071 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5072 size_t opname_len = strlen (opname);
5073 size_t max_mname_len = 0;
5074
5075 enum machine_mode nmode, wmode;
5076 const char *nname, *wname;
5077 const char *q;
5078 char *libfunc_name, *suffix;
5079 char *p;
5080
5081 for (nmode = first_mode; nmode != VOIDmode;
5082 nmode = GET_MODE_WIDER_MODE (nmode))
5083 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5084
5085 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5086 libfunc_name[0] = '_';
5087 libfunc_name[1] = '_';
5088 memcpy (&libfunc_name[2], opname, opname_len);
5089 suffix = libfunc_name + opname_len + 2;
5090
5091 for (nmode = first_mode; nmode != VOIDmode;
5092 nmode = GET_MODE_WIDER_MODE (nmode))
5093 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5094 wmode = GET_MODE_WIDER_MODE (wmode))
5095 {
5096 nname = GET_MODE_NAME (nmode);
5097 wname = GET_MODE_NAME (wmode);
5098
5099 p = suffix;
5100 for (q = widening ? nname : wname; *q; p++, q++)
5101 *p = TOLOWER (*q);
5102 for (q = widening ? wname : nname; *q; p++, q++)
5103 *p = TOLOWER (*q);
5104
5105 *p++ = '2';
5106 *p = '\0';
5107
5108 tab->handlers[widening ? wmode : nmode]
5109 [widening ? nmode : wmode].libfunc
5110 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5111 p - libfunc_name));
5112 }
5113 }
5114
5115
5116 rtx
5117 init_one_libfunc (const char *name)
5118 {
5119 rtx symbol;
5120
5121 /* Create a FUNCTION_DECL that can be passed to
5122 targetm.encode_section_info. */
5123 /* ??? We don't have any type information except for this is
5124 a function. Pretend this is "int foo()". */
5125 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5126 build_function_type (integer_type_node, NULL_TREE));
5127 DECL_ARTIFICIAL (decl) = 1;
5128 DECL_EXTERNAL (decl) = 1;
5129 TREE_PUBLIC (decl) = 1;
5130
5131 symbol = XEXP (DECL_RTL (decl), 0);
5132
5133 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5134 are the flags assigned by targetm.encode_section_info. */
5135 SET_SYMBOL_REF_DECL (symbol, 0);
5136
5137 return symbol;
5138 }
5139
5140 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5141 MODE to NAME, which should be either 0 or a string constant. */
5142 void
5143 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5144 {
5145 if (name)
5146 optable->handlers[mode].libfunc = init_one_libfunc (name);
5147 else
5148 optable->handlers[mode].libfunc = 0;
5149 }
5150
5151 /* Call this to reset the function entry for one conversion optab
5152 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5153 either 0 or a string constant. */
5154 void
5155 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5156 enum machine_mode fmode, const char *name)
5157 {
5158 if (name)
5159 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5160 else
5161 optable->handlers[tmode][fmode].libfunc = 0;
5162 }
5163
5164 /* Call this once to initialize the contents of the optabs
5165 appropriately for the current target machine. */
5166
5167 void
5168 init_optabs (void)
5169 {
5170 unsigned int i;
5171
5172 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5173
5174 for (i = 0; i < NUM_RTX_CODE; i++)
5175 setcc_gen_code[i] = CODE_FOR_nothing;
5176
5177 #ifdef HAVE_conditional_move
5178 for (i = 0; i < NUM_MACHINE_MODES; i++)
5179 movcc_gen_code[i] = CODE_FOR_nothing;
5180 #endif
5181
5182 for (i = 0; i < NUM_MACHINE_MODES; i++)
5183 {
5184 vcond_gen_code[i] = CODE_FOR_nothing;
5185 vcondu_gen_code[i] = CODE_FOR_nothing;
5186 }
5187
5188 add_optab = init_optab (PLUS);
5189 addv_optab = init_optabv (PLUS);
5190 sub_optab = init_optab (MINUS);
5191 subv_optab = init_optabv (MINUS);
5192 smul_optab = init_optab (MULT);
5193 smulv_optab = init_optabv (MULT);
5194 smul_highpart_optab = init_optab (UNKNOWN);
5195 umul_highpart_optab = init_optab (UNKNOWN);
5196 smul_widen_optab = init_optab (UNKNOWN);
5197 umul_widen_optab = init_optab (UNKNOWN);
5198 usmul_widen_optab = init_optab (UNKNOWN);
5199 sdiv_optab = init_optab (DIV);
5200 sdivv_optab = init_optabv (DIV);
5201 sdivmod_optab = init_optab (UNKNOWN);
5202 udiv_optab = init_optab (UDIV);
5203 udivmod_optab = init_optab (UNKNOWN);
5204 smod_optab = init_optab (MOD);
5205 umod_optab = init_optab (UMOD);
5206 fmod_optab = init_optab (UNKNOWN);
5207 drem_optab = init_optab (UNKNOWN);
5208 ftrunc_optab = init_optab (UNKNOWN);
5209 and_optab = init_optab (AND);
5210 ior_optab = init_optab (IOR);
5211 xor_optab = init_optab (XOR);
5212 ashl_optab = init_optab (ASHIFT);
5213 ashr_optab = init_optab (ASHIFTRT);
5214 lshr_optab = init_optab (LSHIFTRT);
5215 rotl_optab = init_optab (ROTATE);
5216 rotr_optab = init_optab (ROTATERT);
5217 smin_optab = init_optab (SMIN);
5218 smax_optab = init_optab (SMAX);
5219 umin_optab = init_optab (UMIN);
5220 umax_optab = init_optab (UMAX);
5221 pow_optab = init_optab (UNKNOWN);
5222 atan2_optab = init_optab (UNKNOWN);
5223
5224 /* These three have codes assigned exclusively for the sake of
5225 have_insn_for. */
5226 mov_optab = init_optab (SET);
5227 movstrict_optab = init_optab (STRICT_LOW_PART);
5228 cmp_optab = init_optab (COMPARE);
5229
5230 ucmp_optab = init_optab (UNKNOWN);
5231 tst_optab = init_optab (UNKNOWN);
5232
5233 eq_optab = init_optab (EQ);
5234 ne_optab = init_optab (NE);
5235 gt_optab = init_optab (GT);
5236 ge_optab = init_optab (GE);
5237 lt_optab = init_optab (LT);
5238 le_optab = init_optab (LE);
5239 unord_optab = init_optab (UNORDERED);
5240
5241 neg_optab = init_optab (NEG);
5242 negv_optab = init_optabv (NEG);
5243 abs_optab = init_optab (ABS);
5244 absv_optab = init_optabv (ABS);
5245 addcc_optab = init_optab (UNKNOWN);
5246 one_cmpl_optab = init_optab (NOT);
5247 ffs_optab = init_optab (FFS);
5248 clz_optab = init_optab (CLZ);
5249 ctz_optab = init_optab (CTZ);
5250 popcount_optab = init_optab (POPCOUNT);
5251 parity_optab = init_optab (PARITY);
5252 sqrt_optab = init_optab (SQRT);
5253 floor_optab = init_optab (UNKNOWN);
5254 lfloor_optab = init_optab (UNKNOWN);
5255 ceil_optab = init_optab (UNKNOWN);
5256 lceil_optab = init_optab (UNKNOWN);
5257 round_optab = init_optab (UNKNOWN);
5258 btrunc_optab = init_optab (UNKNOWN);
5259 nearbyint_optab = init_optab (UNKNOWN);
5260 rint_optab = init_optab (UNKNOWN);
5261 lrint_optab = init_optab (UNKNOWN);
5262 sincos_optab = init_optab (UNKNOWN);
5263 sin_optab = init_optab (UNKNOWN);
5264 asin_optab = init_optab (UNKNOWN);
5265 cos_optab = init_optab (UNKNOWN);
5266 acos_optab = init_optab (UNKNOWN);
5267 exp_optab = init_optab (UNKNOWN);
5268 exp10_optab = init_optab (UNKNOWN);
5269 exp2_optab = init_optab (UNKNOWN);
5270 expm1_optab = init_optab (UNKNOWN);
5271 ldexp_optab = init_optab (UNKNOWN);
5272 logb_optab = init_optab (UNKNOWN);
5273 ilogb_optab = init_optab (UNKNOWN);
5274 log_optab = init_optab (UNKNOWN);
5275 log10_optab = init_optab (UNKNOWN);
5276 log2_optab = init_optab (UNKNOWN);
5277 log1p_optab = init_optab (UNKNOWN);
5278 tan_optab = init_optab (UNKNOWN);
5279 atan_optab = init_optab (UNKNOWN);
5280 copysign_optab = init_optab (UNKNOWN);
5281
5282 strlen_optab = init_optab (UNKNOWN);
5283 cbranch_optab = init_optab (UNKNOWN);
5284 cmov_optab = init_optab (UNKNOWN);
5285 cstore_optab = init_optab (UNKNOWN);
5286 push_optab = init_optab (UNKNOWN);
5287
5288 reduc_smax_optab = init_optab (UNKNOWN);
5289 reduc_umax_optab = init_optab (UNKNOWN);
5290 reduc_smin_optab = init_optab (UNKNOWN);
5291 reduc_umin_optab = init_optab (UNKNOWN);
5292 reduc_splus_optab = init_optab (UNKNOWN);
5293 reduc_uplus_optab = init_optab (UNKNOWN);
5294
5295 ssum_widen_optab = init_optab (UNKNOWN);
5296 usum_widen_optab = init_optab (UNKNOWN);
5297 sdot_prod_optab = init_optab (UNKNOWN);
5298 udot_prod_optab = init_optab (UNKNOWN);
5299
5300 vec_extract_optab = init_optab (UNKNOWN);
5301 vec_set_optab = init_optab (UNKNOWN);
5302 vec_init_optab = init_optab (UNKNOWN);
5303 vec_shl_optab = init_optab (UNKNOWN);
5304 vec_shr_optab = init_optab (UNKNOWN);
5305 vec_realign_load_optab = init_optab (UNKNOWN);
5306 movmisalign_optab = init_optab (UNKNOWN);
5307
5308 powi_optab = init_optab (UNKNOWN);
5309
5310 /* Conversions. */
5311 sext_optab = init_convert_optab (SIGN_EXTEND);
5312 zext_optab = init_convert_optab (ZERO_EXTEND);
5313 trunc_optab = init_convert_optab (TRUNCATE);
5314 sfix_optab = init_convert_optab (FIX);
5315 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5316 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5317 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5318 sfloat_optab = init_convert_optab (FLOAT);
5319 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5320
5321 for (i = 0; i < NUM_MACHINE_MODES; i++)
5322 {
5323 movmem_optab[i] = CODE_FOR_nothing;
5324 cmpstr_optab[i] = CODE_FOR_nothing;
5325 cmpstrn_optab[i] = CODE_FOR_nothing;
5326 cmpmem_optab[i] = CODE_FOR_nothing;
5327 setmem_optab[i] = CODE_FOR_nothing;
5328
5329 sync_add_optab[i] = CODE_FOR_nothing;
5330 sync_sub_optab[i] = CODE_FOR_nothing;
5331 sync_ior_optab[i] = CODE_FOR_nothing;
5332 sync_and_optab[i] = CODE_FOR_nothing;
5333 sync_xor_optab[i] = CODE_FOR_nothing;
5334 sync_nand_optab[i] = CODE_FOR_nothing;
5335 sync_old_add_optab[i] = CODE_FOR_nothing;
5336 sync_old_sub_optab[i] = CODE_FOR_nothing;
5337 sync_old_ior_optab[i] = CODE_FOR_nothing;
5338 sync_old_and_optab[i] = CODE_FOR_nothing;
5339 sync_old_xor_optab[i] = CODE_FOR_nothing;
5340 sync_old_nand_optab[i] = CODE_FOR_nothing;
5341 sync_new_add_optab[i] = CODE_FOR_nothing;
5342 sync_new_sub_optab[i] = CODE_FOR_nothing;
5343 sync_new_ior_optab[i] = CODE_FOR_nothing;
5344 sync_new_and_optab[i] = CODE_FOR_nothing;
5345 sync_new_xor_optab[i] = CODE_FOR_nothing;
5346 sync_new_nand_optab[i] = CODE_FOR_nothing;
5347 sync_compare_and_swap[i] = CODE_FOR_nothing;
5348 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5349 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5350 sync_lock_release[i] = CODE_FOR_nothing;
5351
5352 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5353 }
5354
5355 /* Fill in the optabs with the insns we support. */
5356 init_all_optabs ();
5357
5358 /* Initialize the optabs with the names of the library functions. */
5359 init_integral_libfuncs (add_optab, "add", '3');
5360 init_floating_libfuncs (add_optab, "add", '3');
5361 init_integral_libfuncs (addv_optab, "addv", '3');
5362 init_floating_libfuncs (addv_optab, "add", '3');
5363 init_integral_libfuncs (sub_optab, "sub", '3');
5364 init_floating_libfuncs (sub_optab, "sub", '3');
5365 init_integral_libfuncs (subv_optab, "subv", '3');
5366 init_floating_libfuncs (subv_optab, "sub", '3');
5367 init_integral_libfuncs (smul_optab, "mul", '3');
5368 init_floating_libfuncs (smul_optab, "mul", '3');
5369 init_integral_libfuncs (smulv_optab, "mulv", '3');
5370 init_floating_libfuncs (smulv_optab, "mul", '3');
5371 init_integral_libfuncs (sdiv_optab, "div", '3');
5372 init_floating_libfuncs (sdiv_optab, "div", '3');
5373 init_integral_libfuncs (sdivv_optab, "divv", '3');
5374 init_integral_libfuncs (udiv_optab, "udiv", '3');
5375 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5376 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5377 init_integral_libfuncs (smod_optab, "mod", '3');
5378 init_integral_libfuncs (umod_optab, "umod", '3');
5379 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5380 init_integral_libfuncs (and_optab, "and", '3');
5381 init_integral_libfuncs (ior_optab, "ior", '3');
5382 init_integral_libfuncs (xor_optab, "xor", '3');
5383 init_integral_libfuncs (ashl_optab, "ashl", '3');
5384 init_integral_libfuncs (ashr_optab, "ashr", '3');
5385 init_integral_libfuncs (lshr_optab, "lshr", '3');
5386 init_integral_libfuncs (smin_optab, "min", '3');
5387 init_floating_libfuncs (smin_optab, "min", '3');
5388 init_integral_libfuncs (smax_optab, "max", '3');
5389 init_floating_libfuncs (smax_optab, "max", '3');
5390 init_integral_libfuncs (umin_optab, "umin", '3');
5391 init_integral_libfuncs (umax_optab, "umax", '3');
5392 init_integral_libfuncs (neg_optab, "neg", '2');
5393 init_floating_libfuncs (neg_optab, "neg", '2');
5394 init_integral_libfuncs (negv_optab, "negv", '2');
5395 init_floating_libfuncs (negv_optab, "neg", '2');
5396 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5397 init_integral_libfuncs (ffs_optab, "ffs", '2');
5398 init_integral_libfuncs (clz_optab, "clz", '2');
5399 init_integral_libfuncs (ctz_optab, "ctz", '2');
5400 init_integral_libfuncs (popcount_optab, "popcount", '2');
5401 init_integral_libfuncs (parity_optab, "parity", '2');
5402
5403 /* Comparison libcalls for integers MUST come in pairs,
5404 signed/unsigned. */
5405 init_integral_libfuncs (cmp_optab, "cmp", '2');
5406 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5407 init_floating_libfuncs (cmp_optab, "cmp", '2');
5408
5409 /* EQ etc are floating point only. */
5410 init_floating_libfuncs (eq_optab, "eq", '2');
5411 init_floating_libfuncs (ne_optab, "ne", '2');
5412 init_floating_libfuncs (gt_optab, "gt", '2');
5413 init_floating_libfuncs (ge_optab, "ge", '2');
5414 init_floating_libfuncs (lt_optab, "lt", '2');
5415 init_floating_libfuncs (le_optab, "le", '2');
5416 init_floating_libfuncs (unord_optab, "unord", '2');
5417
5418 init_floating_libfuncs (powi_optab, "powi", '2');
5419
5420 /* Conversions. */
5421 init_interclass_conv_libfuncs (sfloat_optab, "float",
5422 MODE_INT, MODE_FLOAT);
5423 init_interclass_conv_libfuncs (sfloat_optab, "float",
5424 MODE_INT, MODE_DECIMAL_FLOAT);
5425 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5426 MODE_INT, MODE_FLOAT);
5427 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5428 MODE_INT, MODE_DECIMAL_FLOAT);
5429 init_interclass_conv_libfuncs (sfix_optab, "fix",
5430 MODE_FLOAT, MODE_INT);
5431 init_interclass_conv_libfuncs (sfix_optab, "fix",
5432 MODE_DECIMAL_FLOAT, MODE_INT);
5433 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5434 MODE_FLOAT, MODE_INT);
5435 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5436 MODE_DECIMAL_FLOAT, MODE_INT);
5437 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5438 MODE_INT, MODE_DECIMAL_FLOAT);
5439
5440 /* sext_optab is also used for FLOAT_EXTEND. */
5441 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5442 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5443 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5444 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5445 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5446 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5447 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5448 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5449
5450 /* Use cabs for double complex abs, since systems generally have cabs.
5451 Don't define any libcall for float complex, so that cabs will be used. */
5452 if (complex_double_type_node)
5453 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5454 = init_one_libfunc ("cabs");
5455
5456 /* The ffs function operates on `int'. */
5457 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5458 = init_one_libfunc ("ffs");
5459
5460 abort_libfunc = init_one_libfunc ("abort");
5461 memcpy_libfunc = init_one_libfunc ("memcpy");
5462 memmove_libfunc = init_one_libfunc ("memmove");
5463 memcmp_libfunc = init_one_libfunc ("memcmp");
5464 memset_libfunc = init_one_libfunc ("memset");
5465 setbits_libfunc = init_one_libfunc ("__setbits");
5466
5467 #ifndef DONT_USE_BUILTIN_SETJMP
5468 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5469 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5470 #else
5471 setjmp_libfunc = init_one_libfunc ("setjmp");
5472 longjmp_libfunc = init_one_libfunc ("longjmp");
5473 #endif
5474 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5475 unwind_sjlj_unregister_libfunc
5476 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5477
5478 /* For function entry/exit instrumentation. */
5479 profile_function_entry_libfunc
5480 = init_one_libfunc ("__cyg_profile_func_enter");
5481 profile_function_exit_libfunc
5482 = init_one_libfunc ("__cyg_profile_func_exit");
5483
5484 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5485
5486 if (HAVE_conditional_trap)
5487 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5488
5489 /* Allow the target to add more libcalls or rename some, etc. */
5490 targetm.init_libfuncs ();
5491 }
5492
5493 #ifdef DEBUG
5494
5495 /* Print information about the current contents of the optabs on
5496 STDERR. */
5497
5498 static void
5499 debug_optab_libfuncs (void)
5500 {
5501 int i;
5502 int j;
5503 int k;
5504
5505 /* Dump the arithmetic optabs. */
5506 for (i = 0; i != (int) OTI_MAX; i++)
5507 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5508 {
5509 optab o;
5510 struct optab_handlers *h;
5511
5512 o = optab_table[i];
5513 h = &o->handlers[j];
5514 if (h->libfunc)
5515 {
5516 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5517 fprintf (stderr, "%s\t%s:\t%s\n",
5518 GET_RTX_NAME (o->code),
5519 GET_MODE_NAME (j),
5520 XSTR (h->libfunc, 0));
5521 }
5522 }
5523
5524 /* Dump the conversion optabs. */
5525 for (i = 0; i < (int) COI_MAX; ++i)
5526 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5527 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5528 {
5529 convert_optab o;
5530 struct optab_handlers *h;
5531
5532 o = &convert_optab_table[i];
5533 h = &o->handlers[j][k];
5534 if (h->libfunc)
5535 {
5536 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5537 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5538 GET_RTX_NAME (o->code),
5539 GET_MODE_NAME (j),
5540 GET_MODE_NAME (k),
5541 XSTR (h->libfunc, 0));
5542 }
5543 }
5544 }
5545
5546 #endif /* DEBUG */
5547
5548 \f
5549 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5550 CODE. Return 0 on failure. */
5551
5552 rtx
5553 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5554 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5555 {
5556 enum machine_mode mode = GET_MODE (op1);
5557 enum insn_code icode;
5558 rtx insn;
5559
5560 if (!HAVE_conditional_trap)
5561 return 0;
5562
5563 if (mode == VOIDmode)
5564 return 0;
5565
5566 icode = cmp_optab->handlers[(int) mode].insn_code;
5567 if (icode == CODE_FOR_nothing)
5568 return 0;
5569
5570 start_sequence ();
5571 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5572 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5573 if (!op1 || !op2)
5574 {
5575 end_sequence ();
5576 return 0;
5577 }
5578 emit_insn (GEN_FCN (icode) (op1, op2));
5579
5580 PUT_CODE (trap_rtx, code);
5581 gcc_assert (HAVE_conditional_trap);
5582 insn = gen_conditional_trap (trap_rtx, tcode);
5583 if (insn)
5584 {
5585 emit_insn (insn);
5586 insn = get_insns ();
5587 }
5588 end_sequence ();
5589
5590 return insn;
5591 }
5592
5593 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5594 or unsigned operation code. */
5595
5596 static enum rtx_code
5597 get_rtx_code (enum tree_code tcode, bool unsignedp)
5598 {
5599 enum rtx_code code;
5600 switch (tcode)
5601 {
5602 case EQ_EXPR:
5603 code = EQ;
5604 break;
5605 case NE_EXPR:
5606 code = NE;
5607 break;
5608 case LT_EXPR:
5609 code = unsignedp ? LTU : LT;
5610 break;
5611 case LE_EXPR:
5612 code = unsignedp ? LEU : LE;
5613 break;
5614 case GT_EXPR:
5615 code = unsignedp ? GTU : GT;
5616 break;
5617 case GE_EXPR:
5618 code = unsignedp ? GEU : GE;
5619 break;
5620
5621 case UNORDERED_EXPR:
5622 code = UNORDERED;
5623 break;
5624 case ORDERED_EXPR:
5625 code = ORDERED;
5626 break;
5627 case UNLT_EXPR:
5628 code = UNLT;
5629 break;
5630 case UNLE_EXPR:
5631 code = UNLE;
5632 break;
5633 case UNGT_EXPR:
5634 code = UNGT;
5635 break;
5636 case UNGE_EXPR:
5637 code = UNGE;
5638 break;
5639 case UNEQ_EXPR:
5640 code = UNEQ;
5641 break;
5642 case LTGT_EXPR:
5643 code = LTGT;
5644 break;
5645
5646 default:
5647 gcc_unreachable ();
5648 }
5649 return code;
5650 }
5651
5652 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5653 unsigned operators. Do not generate compare instruction. */
5654
5655 static rtx
5656 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5657 {
5658 enum rtx_code rcode;
5659 tree t_op0, t_op1;
5660 rtx rtx_op0, rtx_op1;
5661
5662 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5663 ensures that condition is a relational operation. */
5664 gcc_assert (COMPARISON_CLASS_P (cond));
5665
5666 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5667 t_op0 = TREE_OPERAND (cond, 0);
5668 t_op1 = TREE_OPERAND (cond, 1);
5669
5670 /* Expand operands. */
5671 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5672 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5673
5674 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5675 && GET_MODE (rtx_op0) != VOIDmode)
5676 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5677
5678 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5679 && GET_MODE (rtx_op1) != VOIDmode)
5680 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5681
5682 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5683 }
5684
5685 /* Return insn code for VEC_COND_EXPR EXPR. */
5686
5687 static inline enum insn_code
5688 get_vcond_icode (tree expr, enum machine_mode mode)
5689 {
5690 enum insn_code icode = CODE_FOR_nothing;
5691
5692 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5693 icode = vcondu_gen_code[mode];
5694 else
5695 icode = vcond_gen_code[mode];
5696 return icode;
5697 }
5698
5699 /* Return TRUE iff, appropriate vector insns are available
5700 for vector cond expr expr in VMODE mode. */
5701
5702 bool
5703 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5704 {
5705 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5706 return false;
5707 return true;
5708 }
5709
5710 /* Generate insns for VEC_COND_EXPR. */
5711
5712 rtx
5713 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5714 {
5715 enum insn_code icode;
5716 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5717 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5718 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5719
5720 icode = get_vcond_icode (vec_cond_expr, mode);
5721 if (icode == CODE_FOR_nothing)
5722 return 0;
5723
5724 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5725 target = gen_reg_rtx (mode);
5726
5727 /* Get comparison rtx. First expand both cond expr operands. */
5728 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5729 unsignedp, icode);
5730 cc_op0 = XEXP (comparison, 0);
5731 cc_op1 = XEXP (comparison, 1);
5732 /* Expand both operands and force them in reg, if required. */
5733 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5734 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5735 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5736 && mode != VOIDmode)
5737 rtx_op1 = force_reg (mode, rtx_op1);
5738
5739 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5740 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5741 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5742 && mode != VOIDmode)
5743 rtx_op2 = force_reg (mode, rtx_op2);
5744
5745 /* Emit instruction! */
5746 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5747 comparison, cc_op0, cc_op1));
5748
5749 return target;
5750 }
5751
5752 \f
5753 /* This is an internal subroutine of the other compare_and_swap expanders.
5754 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5755 operation. TARGET is an optional place to store the value result of
5756 the operation. ICODE is the particular instruction to expand. Return
5757 the result of the operation. */
5758
5759 static rtx
5760 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5761 rtx target, enum insn_code icode)
5762 {
5763 enum machine_mode mode = GET_MODE (mem);
5764 rtx insn;
5765
5766 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5767 target = gen_reg_rtx (mode);
5768
5769 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5770 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5771 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5772 old_val = force_reg (mode, old_val);
5773
5774 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5775 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5776 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5777 new_val = force_reg (mode, new_val);
5778
5779 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5780 if (insn == NULL_RTX)
5781 return NULL_RTX;
5782 emit_insn (insn);
5783
5784 return target;
5785 }
5786
5787 /* Expand a compare-and-swap operation and return its value. */
5788
5789 rtx
5790 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5791 {
5792 enum machine_mode mode = GET_MODE (mem);
5793 enum insn_code icode = sync_compare_and_swap[mode];
5794
5795 if (icode == CODE_FOR_nothing)
5796 return NULL_RTX;
5797
5798 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5799 }
5800
5801 /* Expand a compare-and-swap operation and store true into the result if
5802 the operation was successful and false otherwise. Return the result.
5803 Unlike other routines, TARGET is not optional. */
5804
5805 rtx
5806 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5807 {
5808 enum machine_mode mode = GET_MODE (mem);
5809 enum insn_code icode;
5810 rtx subtarget, label0, label1;
5811
5812 /* If the target supports a compare-and-swap pattern that simultaneously
5813 sets some flag for success, then use it. Otherwise use the regular
5814 compare-and-swap and follow that immediately with a compare insn. */
5815 icode = sync_compare_and_swap_cc[mode];
5816 switch (icode)
5817 {
5818 default:
5819 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5820 NULL_RTX, icode);
5821 if (subtarget != NULL_RTX)
5822 break;
5823
5824 /* FALLTHRU */
5825 case CODE_FOR_nothing:
5826 icode = sync_compare_and_swap[mode];
5827 if (icode == CODE_FOR_nothing)
5828 return NULL_RTX;
5829
5830 /* Ensure that if old_val == mem, that we're not comparing
5831 against an old value. */
5832 if (MEM_P (old_val))
5833 old_val = force_reg (mode, old_val);
5834
5835 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5836 NULL_RTX, icode);
5837 if (subtarget == NULL_RTX)
5838 return NULL_RTX;
5839
5840 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5841 }
5842
5843 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5844 setcc instruction from the beginning. We don't work too hard here,
5845 but it's nice to not be stupid about initial code gen either. */
5846 if (STORE_FLAG_VALUE == 1)
5847 {
5848 icode = setcc_gen_code[EQ];
5849 if (icode != CODE_FOR_nothing)
5850 {
5851 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5852 rtx insn;
5853
5854 subtarget = target;
5855 if (!insn_data[icode].operand[0].predicate (target, cmode))
5856 subtarget = gen_reg_rtx (cmode);
5857
5858 insn = GEN_FCN (icode) (subtarget);
5859 if (insn)
5860 {
5861 emit_insn (insn);
5862 if (GET_MODE (target) != GET_MODE (subtarget))
5863 {
5864 convert_move (target, subtarget, 1);
5865 subtarget = target;
5866 }
5867 return subtarget;
5868 }
5869 }
5870 }
5871
5872 /* Without an appropriate setcc instruction, use a set of branches to
5873 get 1 and 0 stored into target. Presumably if the target has a
5874 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5875
5876 label0 = gen_label_rtx ();
5877 label1 = gen_label_rtx ();
5878
5879 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5880 emit_move_insn (target, const0_rtx);
5881 emit_jump_insn (gen_jump (label1));
5882 emit_barrier ();
5883 emit_label (label0);
5884 emit_move_insn (target, const1_rtx);
5885 emit_label (label1);
5886
5887 return target;
5888 }
5889
5890 /* This is a helper function for the other atomic operations. This function
5891 emits a loop that contains SEQ that iterates until a compare-and-swap
5892 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5893 a set of instructions that takes a value from OLD_REG as an input and
5894 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5895 set to the current contents of MEM. After SEQ, a compare-and-swap will
5896 attempt to update MEM with NEW_REG. The function returns true when the
5897 loop was generated successfully. */
5898
5899 static bool
5900 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5901 {
5902 enum machine_mode mode = GET_MODE (mem);
5903 enum insn_code icode;
5904 rtx label, cmp_reg, subtarget;
5905
5906 /* The loop we want to generate looks like
5907
5908 cmp_reg = mem;
5909 label:
5910 old_reg = cmp_reg;
5911 seq;
5912 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5913 if (cmp_reg != old_reg)
5914 goto label;
5915
5916 Note that we only do the plain load from memory once. Subsequent
5917 iterations use the value loaded by the compare-and-swap pattern. */
5918
5919 label = gen_label_rtx ();
5920 cmp_reg = gen_reg_rtx (mode);
5921
5922 emit_move_insn (cmp_reg, mem);
5923 emit_label (label);
5924 emit_move_insn (old_reg, cmp_reg);
5925 if (seq)
5926 emit_insn (seq);
5927
5928 /* If the target supports a compare-and-swap pattern that simultaneously
5929 sets some flag for success, then use it. Otherwise use the regular
5930 compare-and-swap and follow that immediately with a compare insn. */
5931 icode = sync_compare_and_swap_cc[mode];
5932 switch (icode)
5933 {
5934 default:
5935 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5936 cmp_reg, icode);
5937 if (subtarget != NULL_RTX)
5938 {
5939 gcc_assert (subtarget == cmp_reg);
5940 break;
5941 }
5942
5943 /* FALLTHRU */
5944 case CODE_FOR_nothing:
5945 icode = sync_compare_and_swap[mode];
5946 if (icode == CODE_FOR_nothing)
5947 return false;
5948
5949 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5950 cmp_reg, icode);
5951 if (subtarget == NULL_RTX)
5952 return false;
5953 if (subtarget != cmp_reg)
5954 emit_move_insn (cmp_reg, subtarget);
5955
5956 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5957 }
5958
5959 /* ??? Mark this jump predicted not taken? */
5960 emit_jump_insn (bcc_gen_fctn[NE] (label));
5961
5962 return true;
5963 }
5964
5965 /* This function generates the atomic operation MEM CODE= VAL. In this
5966 case, we do not care about any resulting value. Returns NULL if we
5967 cannot generate the operation. */
5968
5969 rtx
5970 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5971 {
5972 enum machine_mode mode = GET_MODE (mem);
5973 enum insn_code icode;
5974 rtx insn;
5975
5976 /* Look to see if the target supports the operation directly. */
5977 switch (code)
5978 {
5979 case PLUS:
5980 icode = sync_add_optab[mode];
5981 break;
5982 case IOR:
5983 icode = sync_ior_optab[mode];
5984 break;
5985 case XOR:
5986 icode = sync_xor_optab[mode];
5987 break;
5988 case AND:
5989 icode = sync_and_optab[mode];
5990 break;
5991 case NOT:
5992 icode = sync_nand_optab[mode];
5993 break;
5994
5995 case MINUS:
5996 icode = sync_sub_optab[mode];
5997 if (icode == CODE_FOR_nothing)
5998 {
5999 icode = sync_add_optab[mode];
6000 if (icode != CODE_FOR_nothing)
6001 {
6002 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6003 code = PLUS;
6004 }
6005 }
6006 break;
6007
6008 default:
6009 gcc_unreachable ();
6010 }
6011
6012 /* Generate the direct operation, if present. */
6013 if (icode != CODE_FOR_nothing)
6014 {
6015 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6016 val = convert_modes (mode, GET_MODE (val), val, 1);
6017 if (!insn_data[icode].operand[1].predicate (val, mode))
6018 val = force_reg (mode, val);
6019
6020 insn = GEN_FCN (icode) (mem, val);
6021 if (insn)
6022 {
6023 emit_insn (insn);
6024 return const0_rtx;
6025 }
6026 }
6027
6028 /* Failing that, generate a compare-and-swap loop in which we perform the
6029 operation with normal arithmetic instructions. */
6030 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6031 {
6032 rtx t0 = gen_reg_rtx (mode), t1;
6033
6034 start_sequence ();
6035
6036 t1 = t0;
6037 if (code == NOT)
6038 {
6039 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6040 code = AND;
6041 }
6042 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6043 true, OPTAB_LIB_WIDEN);
6044
6045 insn = get_insns ();
6046 end_sequence ();
6047
6048 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6049 return const0_rtx;
6050 }
6051
6052 return NULL_RTX;
6053 }
6054
6055 /* This function generates the atomic operation MEM CODE= VAL. In this
6056 case, we do care about the resulting value: if AFTER is true then
6057 return the value MEM holds after the operation, if AFTER is false
6058 then return the value MEM holds before the operation. TARGET is an
6059 optional place for the result value to be stored. */
6060
6061 rtx
6062 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6063 bool after, rtx target)
6064 {
6065 enum machine_mode mode = GET_MODE (mem);
6066 enum insn_code old_code, new_code, icode;
6067 bool compensate;
6068 rtx insn;
6069
6070 /* Look to see if the target supports the operation directly. */
6071 switch (code)
6072 {
6073 case PLUS:
6074 old_code = sync_old_add_optab[mode];
6075 new_code = sync_new_add_optab[mode];
6076 break;
6077 case IOR:
6078 old_code = sync_old_ior_optab[mode];
6079 new_code = sync_new_ior_optab[mode];
6080 break;
6081 case XOR:
6082 old_code = sync_old_xor_optab[mode];
6083 new_code = sync_new_xor_optab[mode];
6084 break;
6085 case AND:
6086 old_code = sync_old_and_optab[mode];
6087 new_code = sync_new_and_optab[mode];
6088 break;
6089 case NOT:
6090 old_code = sync_old_nand_optab[mode];
6091 new_code = sync_new_nand_optab[mode];
6092 break;
6093
6094 case MINUS:
6095 old_code = sync_old_sub_optab[mode];
6096 new_code = sync_new_sub_optab[mode];
6097 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6098 {
6099 old_code = sync_old_add_optab[mode];
6100 new_code = sync_new_add_optab[mode];
6101 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6102 {
6103 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6104 code = PLUS;
6105 }
6106 }
6107 break;
6108
6109 default:
6110 gcc_unreachable ();
6111 }
6112
6113 /* If the target does supports the proper new/old operation, great. But
6114 if we only support the opposite old/new operation, check to see if we
6115 can compensate. In the case in which the old value is supported, then
6116 we can always perform the operation again with normal arithmetic. In
6117 the case in which the new value is supported, then we can only handle
6118 this in the case the operation is reversible. */
6119 compensate = false;
6120 if (after)
6121 {
6122 icode = new_code;
6123 if (icode == CODE_FOR_nothing)
6124 {
6125 icode = old_code;
6126 if (icode != CODE_FOR_nothing)
6127 compensate = true;
6128 }
6129 }
6130 else
6131 {
6132 icode = old_code;
6133 if (icode == CODE_FOR_nothing
6134 && (code == PLUS || code == MINUS || code == XOR))
6135 {
6136 icode = new_code;
6137 if (icode != CODE_FOR_nothing)
6138 compensate = true;
6139 }
6140 }
6141
6142 /* If we found something supported, great. */
6143 if (icode != CODE_FOR_nothing)
6144 {
6145 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6146 target = gen_reg_rtx (mode);
6147
6148 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6149 val = convert_modes (mode, GET_MODE (val), val, 1);
6150 if (!insn_data[icode].operand[2].predicate (val, mode))
6151 val = force_reg (mode, val);
6152
6153 insn = GEN_FCN (icode) (target, mem, val);
6154 if (insn)
6155 {
6156 emit_insn (insn);
6157
6158 /* If we need to compensate for using an operation with the
6159 wrong return value, do so now. */
6160 if (compensate)
6161 {
6162 if (!after)
6163 {
6164 if (code == PLUS)
6165 code = MINUS;
6166 else if (code == MINUS)
6167 code = PLUS;
6168 }
6169
6170 if (code == NOT)
6171 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6172 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6173 true, OPTAB_LIB_WIDEN);
6174 }
6175
6176 return target;
6177 }
6178 }
6179
6180 /* Failing that, generate a compare-and-swap loop in which we perform the
6181 operation with normal arithmetic instructions. */
6182 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6183 {
6184 rtx t0 = gen_reg_rtx (mode), t1;
6185
6186 if (!target || !register_operand (target, mode))
6187 target = gen_reg_rtx (mode);
6188
6189 start_sequence ();
6190
6191 if (!after)
6192 emit_move_insn (target, t0);
6193 t1 = t0;
6194 if (code == NOT)
6195 {
6196 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6197 code = AND;
6198 }
6199 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6200 true, OPTAB_LIB_WIDEN);
6201 if (after)
6202 emit_move_insn (target, t1);
6203
6204 insn = get_insns ();
6205 end_sequence ();
6206
6207 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6208 return target;
6209 }
6210
6211 return NULL_RTX;
6212 }
6213
6214 /* This function expands a test-and-set operation. Ideally we atomically
6215 store VAL in MEM and return the previous value in MEM. Some targets
6216 may not support this operation and only support VAL with the constant 1;
6217 in this case while the return value will be 0/1, but the exact value
6218 stored in MEM is target defined. TARGET is an option place to stick
6219 the return value. */
6220
6221 rtx
6222 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6223 {
6224 enum machine_mode mode = GET_MODE (mem);
6225 enum insn_code icode;
6226 rtx insn;
6227
6228 /* If the target supports the test-and-set directly, great. */
6229 icode = sync_lock_test_and_set[mode];
6230 if (icode != CODE_FOR_nothing)
6231 {
6232 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6233 target = gen_reg_rtx (mode);
6234
6235 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6236 val = convert_modes (mode, GET_MODE (val), val, 1);
6237 if (!insn_data[icode].operand[2].predicate (val, mode))
6238 val = force_reg (mode, val);
6239
6240 insn = GEN_FCN (icode) (target, mem, val);
6241 if (insn)
6242 {
6243 emit_insn (insn);
6244 return target;
6245 }
6246 }
6247
6248 /* Otherwise, use a compare-and-swap loop for the exchange. */
6249 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6250 {
6251 if (!target || !register_operand (target, mode))
6252 target = gen_reg_rtx (mode);
6253 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6254 val = convert_modes (mode, GET_MODE (val), val, 1);
6255 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6256 return target;
6257 }
6258
6259 return NULL_RTX;
6260 }
6261
6262 #include "gt-optabs.h"