errors.h (warning, [...]): Mark as cold.
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
33 #include "rtl.h"
34 #include "tree.h"
35 #include "tm_p.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "except.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "libfuncs.h"
42 #include "recog.h"
43 #include "reload.h"
44 #include "ggc.h"
45 #include "real.h"
46 #include "basic-block.h"
47 #include "target.h"
48
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
52
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
55
56 See expr.h for documentation of these optabs. */
57
58 optab optab_table[OTI_MAX];
59
60 rtx libfunc_table[LTI_MAX];
61
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table[COI_MAX];
64
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab[NUM_RTX_CODE + 1];
67
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
70
71 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
76
77 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
84
85 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
86 #endif
87
88 /* Indexed by the machine mode, gives the insn code for vector conditional
89 operation. */
90
91 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
92 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx;
98
99 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
100 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 int);
102 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103 enum machine_mode *, int *,
104 enum can_compare_purpose);
105 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 int *);
107 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108 static optab new_optab (void);
109 static convert_optab new_convert_optab (void);
110 static inline optab init_optab (enum rtx_code);
111 static inline optab init_optabv (enum rtx_code);
112 static inline convert_optab init_convert_optab (enum rtx_code);
113 static void init_libfuncs (optab, int, int, const char *, int);
114 static void init_integral_libfuncs (optab, const char *, int);
115 static void init_floating_libfuncs (optab, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab, const char *,
117 enum mode_class, enum mode_class);
118 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119 enum mode_class, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx widen_clz (enum machine_mode, rtx, rtx);
125 static rtx expand_parity (enum machine_mode, rtx, rtx);
126 static enum rtx_code get_rtx_code (enum tree_code, bool);
127 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128
129 #ifndef HAVE_conditional_trap
130 #define HAVE_conditional_trap 0
131 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
132 #endif
133 \f
134 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
135 the result of operation CODE applied to OP0 (and OP1 if it is a binary
136 operation).
137
138 If the last insn does not set TARGET, don't do anything, but return 1.
139
140 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
141 don't add the REG_EQUAL note but return 0. Our caller can then try
142 again, ensuring that TARGET is not one of the operands. */
143
144 static int
145 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 {
147 rtx last_insn, insn, set;
148 rtx note;
149
150 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151
152 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
153 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
154 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
155 && GET_RTX_CLASS (code) != RTX_COMPARE
156 && GET_RTX_CLASS (code) != RTX_UNARY)
157 return 1;
158
159 if (GET_CODE (target) == ZERO_EXTRACT)
160 return 1;
161
162 for (last_insn = insns;
163 NEXT_INSN (last_insn) != NULL_RTX;
164 last_insn = NEXT_INSN (last_insn))
165 ;
166
167 set = single_set (last_insn);
168 if (set == NULL_RTX)
169 return 1;
170
171 if (! rtx_equal_p (SET_DEST (set), target)
172 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
173 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
174 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
175 return 1;
176
177 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
178 besides the last insn. */
179 if (reg_overlap_mentioned_p (target, op0)
180 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 {
182 insn = PREV_INSN (last_insn);
183 while (insn != NULL_RTX)
184 {
185 if (reg_set_p (target, insn))
186 return 0;
187
188 insn = PREV_INSN (insn);
189 }
190 }
191
192 if (GET_RTX_CLASS (code) == RTX_UNARY)
193 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 else
195 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196
197 set_unique_reg_note (last_insn, REG_EQUAL, note);
198
199 return 1;
200 }
201 \f
202 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
203 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
204 not actually do a sign-extend or zero-extend, but can leave the
205 higher-order bits of the result rtx undefined, for example, in the case
206 of logical operations, but not right shifts. */
207
208 static rtx
209 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
210 int unsignedp, int no_extend)
211 {
212 rtx result;
213
214 /* If we don't have to extend and this is a constant, return it. */
215 if (no_extend && GET_MODE (op) == VOIDmode)
216 return op;
217
218 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
219 extend since it will be more efficient to do so unless the signedness of
220 a promoted object differs from our extension. */
221 if (! no_extend
222 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
223 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
224 return convert_modes (mode, oldmode, op, unsignedp);
225
226 /* If MODE is no wider than a single word, we return a paradoxical
227 SUBREG. */
228 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
229 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230
231 /* Otherwise, get an object of MODE, clobber it, and set the low-order
232 part to OP. */
233
234 result = gen_reg_rtx (mode);
235 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
236 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
237 return result;
238 }
239 \f
240 /* Return the optab used for computing the operation given by
241 the tree code, CODE. This function is not always usable (for
242 example, it cannot give complete results for multiplication
243 or division) but probably ought to be relied on more widely
244 throughout the expander. */
245 optab
246 optab_for_tree_code (enum tree_code code, tree type)
247 {
248 bool trapv;
249 switch (code)
250 {
251 case BIT_AND_EXPR:
252 return and_optab;
253
254 case BIT_IOR_EXPR:
255 return ior_optab;
256
257 case BIT_NOT_EXPR:
258 return one_cmpl_optab;
259
260 case BIT_XOR_EXPR:
261 return xor_optab;
262
263 case TRUNC_MOD_EXPR:
264 case CEIL_MOD_EXPR:
265 case FLOOR_MOD_EXPR:
266 case ROUND_MOD_EXPR:
267 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268
269 case RDIV_EXPR:
270 case TRUNC_DIV_EXPR:
271 case CEIL_DIV_EXPR:
272 case FLOOR_DIV_EXPR:
273 case ROUND_DIV_EXPR:
274 case EXACT_DIV_EXPR:
275 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276
277 case LSHIFT_EXPR:
278 return ashl_optab;
279
280 case RSHIFT_EXPR:
281 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282
283 case LROTATE_EXPR:
284 return rotl_optab;
285
286 case RROTATE_EXPR:
287 return rotr_optab;
288
289 case MAX_EXPR:
290 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291
292 case MIN_EXPR:
293 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294
295 case REALIGN_LOAD_EXPR:
296 return vec_realign_load_optab;
297
298 case WIDEN_SUM_EXPR:
299 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
300
301 case DOT_PROD_EXPR:
302 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
303
304 case REDUC_MAX_EXPR:
305 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
306
307 case REDUC_MIN_EXPR:
308 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
309
310 case REDUC_PLUS_EXPR:
311 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
312
313 case VEC_LSHIFT_EXPR:
314 return vec_shl_optab;
315
316 case VEC_RSHIFT_EXPR:
317 return vec_shr_optab;
318
319 case VEC_WIDEN_MULT_HI_EXPR:
320 return TYPE_UNSIGNED (type) ?
321 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
322
323 case VEC_WIDEN_MULT_LO_EXPR:
324 return TYPE_UNSIGNED (type) ?
325 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
326
327 case VEC_UNPACK_HI_EXPR:
328 return TYPE_UNSIGNED (type) ?
329 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
330
331 case VEC_UNPACK_LO_EXPR:
332 return TYPE_UNSIGNED (type) ?
333 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
334
335 case VEC_PACK_MOD_EXPR:
336 return vec_pack_mod_optab;
337
338 case VEC_PACK_SAT_EXPR:
339 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
340
341 default:
342 break;
343 }
344
345 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
346 switch (code)
347 {
348 case PLUS_EXPR:
349 return trapv ? addv_optab : add_optab;
350
351 case MINUS_EXPR:
352 return trapv ? subv_optab : sub_optab;
353
354 case MULT_EXPR:
355 return trapv ? smulv_optab : smul_optab;
356
357 case NEGATE_EXPR:
358 return trapv ? negv_optab : neg_optab;
359
360 case ABS_EXPR:
361 return trapv ? absv_optab : abs_optab;
362
363 case VEC_EXTRACT_EVEN_EXPR:
364 return vec_extract_even_optab;
365
366 case VEC_EXTRACT_ODD_EXPR:
367 return vec_extract_odd_optab;
368
369 case VEC_INTERLEAVE_HIGH_EXPR:
370 return vec_interleave_high_optab;
371
372 case VEC_INTERLEAVE_LOW_EXPR:
373 return vec_interleave_low_optab;
374
375 default:
376 return NULL;
377 }
378 }
379 \f
380
381 /* Expand vector widening operations.
382
383 There are two different classes of operations handled here:
384 1) Operations whose result is wider than all the arguments to the operation.
385 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
386 In this case OP0 and optionally OP1 would be initialized,
387 but WIDE_OP wouldn't (not relevant for this case).
388 2) Operations whose result is of the same size as the last argument to the
389 operation, but wider than all the other arguments to the operation.
390 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
391 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
392
393 E.g, when called to expand the following operations, this is how
394 the arguments will be initialized:
395 nops OP0 OP1 WIDE_OP
396 widening-sum 2 oprnd0 - oprnd1
397 widening-dot-product 3 oprnd0 oprnd1 oprnd2
398 widening-mult 2 oprnd0 oprnd1 -
399 type-promotion (vec-unpack) 1 oprnd0 - - */
400
401 rtx
402 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
403 int unsignedp)
404 {
405 tree oprnd0, oprnd1, oprnd2;
406 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
407 optab widen_pattern_optab;
408 int icode;
409 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
410 rtx temp;
411 rtx pat;
412 rtx xop0, xop1, wxop;
413 int nops = TREE_OPERAND_LENGTH (exp);
414
415 oprnd0 = TREE_OPERAND (exp, 0);
416 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
417 widen_pattern_optab =
418 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
419 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
420 gcc_assert (icode != CODE_FOR_nothing);
421 xmode0 = insn_data[icode].operand[1].mode;
422
423 if (nops >= 2)
424 {
425 oprnd1 = TREE_OPERAND (exp, 1);
426 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
427 xmode1 = insn_data[icode].operand[2].mode;
428 }
429
430 /* The last operand is of a wider mode than the rest of the operands. */
431 if (nops == 2)
432 {
433 wmode = tmode1;
434 wxmode = xmode1;
435 }
436 else if (nops == 3)
437 {
438 gcc_assert (tmode1 == tmode0);
439 gcc_assert (op1);
440 oprnd2 = TREE_OPERAND (exp, 2);
441 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
442 wxmode = insn_data[icode].operand[3].mode;
443 }
444
445 if (!wide_op)
446 wmode = wxmode = insn_data[icode].operand[0].mode;
447
448 if (!target
449 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
450 temp = gen_reg_rtx (wmode);
451 else
452 temp = target;
453
454 xop0 = op0;
455 xop1 = op1;
456 wxop = wide_op;
457
458 /* In case the insn wants input operands in modes different from
459 those of the actual operands, convert the operands. It would
460 seem that we don't need to convert CONST_INTs, but we do, so
461 that they're properly zero-extended, sign-extended or truncated
462 for their mode. */
463
464 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
465 xop0 = convert_modes (xmode0,
466 GET_MODE (op0) != VOIDmode
467 ? GET_MODE (op0)
468 : tmode0,
469 xop0, unsignedp);
470
471 if (op1)
472 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
473 xop1 = convert_modes (xmode1,
474 GET_MODE (op1) != VOIDmode
475 ? GET_MODE (op1)
476 : tmode1,
477 xop1, unsignedp);
478
479 if (wide_op)
480 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
481 wxop = convert_modes (wxmode,
482 GET_MODE (wide_op) != VOIDmode
483 ? GET_MODE (wide_op)
484 : wmode,
485 wxop, unsignedp);
486
487 /* Now, if insn's predicates don't allow our operands, put them into
488 pseudo regs. */
489
490 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
491 && xmode0 != VOIDmode)
492 xop0 = copy_to_mode_reg (xmode0, xop0);
493
494 if (op1)
495 {
496 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
497 && xmode1 != VOIDmode)
498 xop1 = copy_to_mode_reg (xmode1, xop1);
499
500 if (wide_op)
501 {
502 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
503 && wxmode != VOIDmode)
504 wxop = copy_to_mode_reg (wxmode, wxop);
505
506 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
507 }
508 else
509 pat = GEN_FCN (icode) (temp, xop0, xop1);
510 }
511 else
512 {
513 if (wide_op)
514 {
515 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
516 && wxmode != VOIDmode)
517 wxop = copy_to_mode_reg (wxmode, wxop);
518
519 pat = GEN_FCN (icode) (temp, xop0, wxop);
520 }
521 else
522 pat = GEN_FCN (icode) (temp, xop0);
523 }
524
525 emit_insn (pat);
526 return temp;
527 }
528
529 /* Generate code to perform an operation specified by TERNARY_OPTAB
530 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
531
532 UNSIGNEDP is for the case where we have to widen the operands
533 to perform the operation. It says to use zero-extension.
534
535 If TARGET is nonzero, the value
536 is generated there, if it is convenient to do so.
537 In all cases an rtx is returned for the locus of the value;
538 this may or may not be TARGET. */
539
540 rtx
541 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
542 rtx op1, rtx op2, rtx target, int unsignedp)
543 {
544 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
545 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
546 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
547 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
548 rtx temp;
549 rtx pat;
550 rtx xop0 = op0, xop1 = op1, xop2 = op2;
551
552 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
553 != CODE_FOR_nothing);
554
555 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
556 temp = gen_reg_rtx (mode);
557 else
558 temp = target;
559
560 /* In case the insn wants input operands in modes different from
561 those of the actual operands, convert the operands. It would
562 seem that we don't need to convert CONST_INTs, but we do, so
563 that they're properly zero-extended, sign-extended or truncated
564 for their mode. */
565
566 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
567 xop0 = convert_modes (mode0,
568 GET_MODE (op0) != VOIDmode
569 ? GET_MODE (op0)
570 : mode,
571 xop0, unsignedp);
572
573 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
574 xop1 = convert_modes (mode1,
575 GET_MODE (op1) != VOIDmode
576 ? GET_MODE (op1)
577 : mode,
578 xop1, unsignedp);
579
580 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
581 xop2 = convert_modes (mode2,
582 GET_MODE (op2) != VOIDmode
583 ? GET_MODE (op2)
584 : mode,
585 xop2, unsignedp);
586
587 /* Now, if insn's predicates don't allow our operands, put them into
588 pseudo regs. */
589
590 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
591 && mode0 != VOIDmode)
592 xop0 = copy_to_mode_reg (mode0, xop0);
593
594 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
595 && mode1 != VOIDmode)
596 xop1 = copy_to_mode_reg (mode1, xop1);
597
598 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
599 && mode2 != VOIDmode)
600 xop2 = copy_to_mode_reg (mode2, xop2);
601
602 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
603
604 emit_insn (pat);
605 return temp;
606 }
607
608
609 /* Like expand_binop, but return a constant rtx if the result can be
610 calculated at compile time. The arguments and return value are
611 otherwise the same as for expand_binop. */
612
613 static rtx
614 simplify_expand_binop (enum machine_mode mode, optab binoptab,
615 rtx op0, rtx op1, rtx target, int unsignedp,
616 enum optab_methods methods)
617 {
618 if (CONSTANT_P (op0) && CONSTANT_P (op1))
619 {
620 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
621
622 if (x)
623 return x;
624 }
625
626 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
627 }
628
629 /* Like simplify_expand_binop, but always put the result in TARGET.
630 Return true if the expansion succeeded. */
631
632 bool
633 force_expand_binop (enum machine_mode mode, optab binoptab,
634 rtx op0, rtx op1, rtx target, int unsignedp,
635 enum optab_methods methods)
636 {
637 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
638 target, unsignedp, methods);
639 if (x == 0)
640 return false;
641 if (x != target)
642 emit_move_insn (target, x);
643 return true;
644 }
645
646 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
647
648 rtx
649 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
650 {
651 enum insn_code icode;
652 rtx rtx_op1, rtx_op2;
653 enum machine_mode mode1;
654 enum machine_mode mode2;
655 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
656 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
657 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
658 optab shift_optab;
659 rtx pat;
660
661 switch (TREE_CODE (vec_shift_expr))
662 {
663 case VEC_RSHIFT_EXPR:
664 shift_optab = vec_shr_optab;
665 break;
666 case VEC_LSHIFT_EXPR:
667 shift_optab = vec_shl_optab;
668 break;
669 default:
670 gcc_unreachable ();
671 }
672
673 icode = (int) shift_optab->handlers[(int) mode].insn_code;
674 gcc_assert (icode != CODE_FOR_nothing);
675
676 mode1 = insn_data[icode].operand[1].mode;
677 mode2 = insn_data[icode].operand[2].mode;
678
679 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
680 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
681 && mode1 != VOIDmode)
682 rtx_op1 = force_reg (mode1, rtx_op1);
683
684 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
685 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
686 && mode2 != VOIDmode)
687 rtx_op2 = force_reg (mode2, rtx_op2);
688
689 if (!target
690 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
691 target = gen_reg_rtx (mode);
692
693 /* Emit instruction */
694 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
695 gcc_assert (pat);
696 emit_insn (pat);
697
698 return target;
699 }
700
701 /* This subroutine of expand_doubleword_shift handles the cases in which
702 the effective shift value is >= BITS_PER_WORD. The arguments and return
703 value are the same as for the parent routine, except that SUPERWORD_OP1
704 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
705 INTO_TARGET may be null if the caller has decided to calculate it. */
706
707 static bool
708 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
709 rtx outof_target, rtx into_target,
710 int unsignedp, enum optab_methods methods)
711 {
712 if (into_target != 0)
713 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
714 into_target, unsignedp, methods))
715 return false;
716
717 if (outof_target != 0)
718 {
719 /* For a signed right shift, we must fill OUTOF_TARGET with copies
720 of the sign bit, otherwise we must fill it with zeros. */
721 if (binoptab != ashr_optab)
722 emit_move_insn (outof_target, CONST0_RTX (word_mode));
723 else
724 if (!force_expand_binop (word_mode, binoptab,
725 outof_input, GEN_INT (BITS_PER_WORD - 1),
726 outof_target, unsignedp, methods))
727 return false;
728 }
729 return true;
730 }
731
732 /* This subroutine of expand_doubleword_shift handles the cases in which
733 the effective shift value is < BITS_PER_WORD. The arguments and return
734 value are the same as for the parent routine. */
735
736 static bool
737 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
738 rtx outof_input, rtx into_input, rtx op1,
739 rtx outof_target, rtx into_target,
740 int unsignedp, enum optab_methods methods,
741 unsigned HOST_WIDE_INT shift_mask)
742 {
743 optab reverse_unsigned_shift, unsigned_shift;
744 rtx tmp, carries;
745
746 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
747 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
748
749 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
750 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
751 the opposite direction to BINOPTAB. */
752 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
753 {
754 carries = outof_input;
755 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
756 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
757 0, true, methods);
758 }
759 else
760 {
761 /* We must avoid shifting by BITS_PER_WORD bits since that is either
762 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
763 has unknown behavior. Do a single shift first, then shift by the
764 remainder. It's OK to use ~OP1 as the remainder if shift counts
765 are truncated to the mode size. */
766 carries = expand_binop (word_mode, reverse_unsigned_shift,
767 outof_input, const1_rtx, 0, unsignedp, methods);
768 if (shift_mask == BITS_PER_WORD - 1)
769 {
770 tmp = immed_double_const (-1, -1, op1_mode);
771 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
772 0, true, methods);
773 }
774 else
775 {
776 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
777 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
778 0, true, methods);
779 }
780 }
781 if (tmp == 0 || carries == 0)
782 return false;
783 carries = expand_binop (word_mode, reverse_unsigned_shift,
784 carries, tmp, 0, unsignedp, methods);
785 if (carries == 0)
786 return false;
787
788 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
789 so the result can go directly into INTO_TARGET if convenient. */
790 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
791 into_target, unsignedp, methods);
792 if (tmp == 0)
793 return false;
794
795 /* Now OR in the bits carried over from OUTOF_INPUT. */
796 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
797 into_target, unsignedp, methods))
798 return false;
799
800 /* Use a standard word_mode shift for the out-of half. */
801 if (outof_target != 0)
802 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
803 outof_target, unsignedp, methods))
804 return false;
805
806 return true;
807 }
808
809
810 #ifdef HAVE_conditional_move
811 /* Try implementing expand_doubleword_shift using conditional moves.
812 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
813 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
814 are the shift counts to use in the former and latter case. All other
815 arguments are the same as the parent routine. */
816
817 static bool
818 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
819 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
820 rtx outof_input, rtx into_input,
821 rtx subword_op1, rtx superword_op1,
822 rtx outof_target, rtx into_target,
823 int unsignedp, enum optab_methods methods,
824 unsigned HOST_WIDE_INT shift_mask)
825 {
826 rtx outof_superword, into_superword;
827
828 /* Put the superword version of the output into OUTOF_SUPERWORD and
829 INTO_SUPERWORD. */
830 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
831 if (outof_target != 0 && subword_op1 == superword_op1)
832 {
833 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
834 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
835 into_superword = outof_target;
836 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
837 outof_superword, 0, unsignedp, methods))
838 return false;
839 }
840 else
841 {
842 into_superword = gen_reg_rtx (word_mode);
843 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
844 outof_superword, into_superword,
845 unsignedp, methods))
846 return false;
847 }
848
849 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
850 if (!expand_subword_shift (op1_mode, binoptab,
851 outof_input, into_input, subword_op1,
852 outof_target, into_target,
853 unsignedp, methods, shift_mask))
854 return false;
855
856 /* Select between them. Do the INTO half first because INTO_SUPERWORD
857 might be the current value of OUTOF_TARGET. */
858 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
859 into_target, into_superword, word_mode, false))
860 return false;
861
862 if (outof_target != 0)
863 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
864 outof_target, outof_superword,
865 word_mode, false))
866 return false;
867
868 return true;
869 }
870 #endif
871
872 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
873 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
874 input operand; the shift moves bits in the direction OUTOF_INPUT->
875 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
876 of the target. OP1 is the shift count and OP1_MODE is its mode.
877 If OP1 is constant, it will have been truncated as appropriate
878 and is known to be nonzero.
879
880 If SHIFT_MASK is zero, the result of word shifts is undefined when the
881 shift count is outside the range [0, BITS_PER_WORD). This routine must
882 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
883
884 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
885 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
886 fill with zeros or sign bits as appropriate.
887
888 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
889 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
890 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
891 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
892 are undefined.
893
894 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
895 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
896 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
897 function wants to calculate it itself.
898
899 Return true if the shift could be successfully synthesized. */
900
901 static bool
902 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
903 rtx outof_input, rtx into_input, rtx op1,
904 rtx outof_target, rtx into_target,
905 int unsignedp, enum optab_methods methods,
906 unsigned HOST_WIDE_INT shift_mask)
907 {
908 rtx superword_op1, tmp, cmp1, cmp2;
909 rtx subword_label, done_label;
910 enum rtx_code cmp_code;
911
912 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
913 fill the result with sign or zero bits as appropriate. If so, the value
914 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
915 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
916 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
917
918 This isn't worthwhile for constant shifts since the optimizers will
919 cope better with in-range shift counts. */
920 if (shift_mask >= BITS_PER_WORD
921 && outof_target != 0
922 && !CONSTANT_P (op1))
923 {
924 if (!expand_doubleword_shift (op1_mode, binoptab,
925 outof_input, into_input, op1,
926 0, into_target,
927 unsignedp, methods, shift_mask))
928 return false;
929 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
930 outof_target, unsignedp, methods))
931 return false;
932 return true;
933 }
934
935 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
936 is true when the effective shift value is less than BITS_PER_WORD.
937 Set SUPERWORD_OP1 to the shift count that should be used to shift
938 OUTOF_INPUT into INTO_TARGET when the condition is false. */
939 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
940 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
941 {
942 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
943 is a subword shift count. */
944 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
945 0, true, methods);
946 cmp2 = CONST0_RTX (op1_mode);
947 cmp_code = EQ;
948 superword_op1 = op1;
949 }
950 else
951 {
952 /* Set CMP1 to OP1 - BITS_PER_WORD. */
953 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
954 0, true, methods);
955 cmp2 = CONST0_RTX (op1_mode);
956 cmp_code = LT;
957 superword_op1 = cmp1;
958 }
959 if (cmp1 == 0)
960 return false;
961
962 /* If we can compute the condition at compile time, pick the
963 appropriate subroutine. */
964 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
965 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
966 {
967 if (tmp == const0_rtx)
968 return expand_superword_shift (binoptab, outof_input, superword_op1,
969 outof_target, into_target,
970 unsignedp, methods);
971 else
972 return expand_subword_shift (op1_mode, binoptab,
973 outof_input, into_input, op1,
974 outof_target, into_target,
975 unsignedp, methods, shift_mask);
976 }
977
978 #ifdef HAVE_conditional_move
979 /* Try using conditional moves to generate straight-line code. */
980 {
981 rtx start = get_last_insn ();
982 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
983 cmp_code, cmp1, cmp2,
984 outof_input, into_input,
985 op1, superword_op1,
986 outof_target, into_target,
987 unsignedp, methods, shift_mask))
988 return true;
989 delete_insns_since (start);
990 }
991 #endif
992
993 /* As a last resort, use branches to select the correct alternative. */
994 subword_label = gen_label_rtx ();
995 done_label = gen_label_rtx ();
996
997 NO_DEFER_POP;
998 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
999 0, 0, subword_label);
1000 OK_DEFER_POP;
1001
1002 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1003 outof_target, into_target,
1004 unsignedp, methods))
1005 return false;
1006
1007 emit_jump_insn (gen_jump (done_label));
1008 emit_barrier ();
1009 emit_label (subword_label);
1010
1011 if (!expand_subword_shift (op1_mode, binoptab,
1012 outof_input, into_input, op1,
1013 outof_target, into_target,
1014 unsignedp, methods, shift_mask))
1015 return false;
1016
1017 emit_label (done_label);
1018 return true;
1019 }
1020 \f
1021 /* Subroutine of expand_binop. Perform a double word multiplication of
1022 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1023 as the target's word_mode. This function return NULL_RTX if anything
1024 goes wrong, in which case it may have already emitted instructions
1025 which need to be deleted.
1026
1027 If we want to multiply two two-word values and have normal and widening
1028 multiplies of single-word values, we can do this with three smaller
1029 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1030 because we are not operating on one word at a time.
1031
1032 The multiplication proceeds as follows:
1033 _______________________
1034 [__op0_high_|__op0_low__]
1035 _______________________
1036 * [__op1_high_|__op1_low__]
1037 _______________________________________________
1038 _______________________
1039 (1) [__op0_low__*__op1_low__]
1040 _______________________
1041 (2a) [__op0_low__*__op1_high_]
1042 _______________________
1043 (2b) [__op0_high_*__op1_low__]
1044 _______________________
1045 (3) [__op0_high_*__op1_high_]
1046
1047
1048 This gives a 4-word result. Since we are only interested in the
1049 lower 2 words, partial result (3) and the upper words of (2a) and
1050 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1051 calculated using non-widening multiplication.
1052
1053 (1), however, needs to be calculated with an unsigned widening
1054 multiplication. If this operation is not directly supported we
1055 try using a signed widening multiplication and adjust the result.
1056 This adjustment works as follows:
1057
1058 If both operands are positive then no adjustment is needed.
1059
1060 If the operands have different signs, for example op0_low < 0 and
1061 op1_low >= 0, the instruction treats the most significant bit of
1062 op0_low as a sign bit instead of a bit with significance
1063 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1064 with 2**BITS_PER_WORD - op0_low, and two's complements the
1065 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1066 the result.
1067
1068 Similarly, if both operands are negative, we need to add
1069 (op0_low + op1_low) * 2**BITS_PER_WORD.
1070
1071 We use a trick to adjust quickly. We logically shift op0_low right
1072 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1073 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1074 logical shift exists, we do an arithmetic right shift and subtract
1075 the 0 or -1. */
1076
1077 static rtx
1078 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1079 bool umulp, enum optab_methods methods)
1080 {
1081 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1082 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1083 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1084 rtx product, adjust, product_high, temp;
1085
1086 rtx op0_high = operand_subword_force (op0, high, mode);
1087 rtx op0_low = operand_subword_force (op0, low, mode);
1088 rtx op1_high = operand_subword_force (op1, high, mode);
1089 rtx op1_low = operand_subword_force (op1, low, mode);
1090
1091 /* If we're using an unsigned multiply to directly compute the product
1092 of the low-order words of the operands and perform any required
1093 adjustments of the operands, we begin by trying two more multiplications
1094 and then computing the appropriate sum.
1095
1096 We have checked above that the required addition is provided.
1097 Full-word addition will normally always succeed, especially if
1098 it is provided at all, so we don't worry about its failure. The
1099 multiplication may well fail, however, so we do handle that. */
1100
1101 if (!umulp)
1102 {
1103 /* ??? This could be done with emit_store_flag where available. */
1104 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1105 NULL_RTX, 1, methods);
1106 if (temp)
1107 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1108 NULL_RTX, 0, OPTAB_DIRECT);
1109 else
1110 {
1111 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1112 NULL_RTX, 0, methods);
1113 if (!temp)
1114 return NULL_RTX;
1115 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1116 NULL_RTX, 0, OPTAB_DIRECT);
1117 }
1118
1119 if (!op0_high)
1120 return NULL_RTX;
1121 }
1122
1123 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1124 NULL_RTX, 0, OPTAB_DIRECT);
1125 if (!adjust)
1126 return NULL_RTX;
1127
1128 /* OP0_HIGH should now be dead. */
1129
1130 if (!umulp)
1131 {
1132 /* ??? This could be done with emit_store_flag where available. */
1133 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1134 NULL_RTX, 1, methods);
1135 if (temp)
1136 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1137 NULL_RTX, 0, OPTAB_DIRECT);
1138 else
1139 {
1140 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1141 NULL_RTX, 0, methods);
1142 if (!temp)
1143 return NULL_RTX;
1144 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1145 NULL_RTX, 0, OPTAB_DIRECT);
1146 }
1147
1148 if (!op1_high)
1149 return NULL_RTX;
1150 }
1151
1152 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1153 NULL_RTX, 0, OPTAB_DIRECT);
1154 if (!temp)
1155 return NULL_RTX;
1156
1157 /* OP1_HIGH should now be dead. */
1158
1159 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1160 adjust, 0, OPTAB_DIRECT);
1161
1162 if (target && !REG_P (target))
1163 target = NULL_RTX;
1164
1165 if (umulp)
1166 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1167 target, 1, OPTAB_DIRECT);
1168 else
1169 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1170 target, 1, OPTAB_DIRECT);
1171
1172 if (!product)
1173 return NULL_RTX;
1174
1175 product_high = operand_subword (product, high, 1, mode);
1176 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1177 REG_P (product_high) ? product_high : adjust,
1178 0, OPTAB_DIRECT);
1179 emit_move_insn (product_high, adjust);
1180 return product;
1181 }
1182 \f
1183 /* Wrapper around expand_binop which takes an rtx code to specify
1184 the operation to perform, not an optab pointer. All other
1185 arguments are the same. */
1186 rtx
1187 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1188 rtx op1, rtx target, int unsignedp,
1189 enum optab_methods methods)
1190 {
1191 optab binop = code_to_optab[(int) code];
1192 gcc_assert (binop);
1193
1194 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1195 }
1196
1197 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1198 binop. Order them according to commutative_operand_precedence and, if
1199 possible, try to put TARGET or a pseudo first. */
1200 static bool
1201 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1202 {
1203 int op0_prec = commutative_operand_precedence (op0);
1204 int op1_prec = commutative_operand_precedence (op1);
1205
1206 if (op0_prec < op1_prec)
1207 return true;
1208
1209 if (op0_prec > op1_prec)
1210 return false;
1211
1212 /* With equal precedence, both orders are ok, but it is better if the
1213 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1214 if (target == 0 || REG_P (target))
1215 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1216 else
1217 return rtx_equal_p (op1, target);
1218 }
1219
1220
1221 /* Generate code to perform an operation specified by BINOPTAB
1222 on operands OP0 and OP1, with result having machine-mode MODE.
1223
1224 UNSIGNEDP is for the case where we have to widen the operands
1225 to perform the operation. It says to use zero-extension.
1226
1227 If TARGET is nonzero, the value
1228 is generated there, if it is convenient to do so.
1229 In all cases an rtx is returned for the locus of the value;
1230 this may or may not be TARGET. */
1231
1232 rtx
1233 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1234 rtx target, int unsignedp, enum optab_methods methods)
1235 {
1236 enum optab_methods next_methods
1237 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1238 ? OPTAB_WIDEN : methods);
1239 enum mode_class class;
1240 enum machine_mode wider_mode;
1241 rtx temp;
1242 int commutative_op = 0;
1243 int shift_op = (binoptab->code == ASHIFT
1244 || binoptab->code == ASHIFTRT
1245 || binoptab->code == LSHIFTRT
1246 || binoptab->code == ROTATE
1247 || binoptab->code == ROTATERT);
1248 rtx entry_last = get_last_insn ();
1249 rtx last;
1250 bool first_pass_p = true;
1251
1252 class = GET_MODE_CLASS (mode);
1253
1254 /* If subtracting an integer constant, convert this into an addition of
1255 the negated constant. */
1256
1257 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1258 {
1259 op1 = negate_rtx (mode, op1);
1260 binoptab = add_optab;
1261 }
1262
1263 /* If we are inside an appropriately-short loop and we are optimizing,
1264 force expensive constants into a register. */
1265 if (CONSTANT_P (op0) && optimize
1266 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1267 {
1268 if (GET_MODE (op0) != VOIDmode)
1269 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1270 op0 = force_reg (mode, op0);
1271 }
1272
1273 if (CONSTANT_P (op1) && optimize
1274 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1275 {
1276 if (GET_MODE (op1) != VOIDmode)
1277 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1278 op1 = force_reg (mode, op1);
1279 }
1280
1281 /* Record where to delete back to if we backtrack. */
1282 last = get_last_insn ();
1283
1284 /* If operation is commutative,
1285 try to make the first operand a register.
1286 Even better, try to make it the same as the target.
1287 Also try to make the last operand a constant. */
1288 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1289 || binoptab == smul_widen_optab
1290 || binoptab == umul_widen_optab
1291 || binoptab == smul_highpart_optab
1292 || binoptab == umul_highpart_optab)
1293 {
1294 commutative_op = 1;
1295
1296 if (swap_commutative_operands_with_target (target, op0, op1))
1297 {
1298 temp = op1;
1299 op1 = op0;
1300 op0 = temp;
1301 }
1302 }
1303
1304 retry:
1305
1306 /* If we can do it with a three-operand insn, do so. */
1307
1308 if (methods != OPTAB_MUST_WIDEN
1309 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1310 {
1311 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1312 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1313 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1314 enum machine_mode tmp_mode;
1315 rtx pat;
1316 rtx xop0 = op0, xop1 = op1;
1317
1318 if (target)
1319 temp = target;
1320 else
1321 temp = gen_reg_rtx (mode);
1322
1323 /* If it is a commutative operator and the modes would match
1324 if we would swap the operands, we can save the conversions. */
1325 if (commutative_op)
1326 {
1327 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1328 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1329 {
1330 rtx tmp;
1331
1332 tmp = op0; op0 = op1; op1 = tmp;
1333 tmp = xop0; xop0 = xop1; xop1 = tmp;
1334 }
1335 }
1336
1337 /* In case the insn wants input operands in modes different from
1338 those of the actual operands, convert the operands. It would
1339 seem that we don't need to convert CONST_INTs, but we do, so
1340 that they're properly zero-extended, sign-extended or truncated
1341 for their mode. */
1342
1343 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1344 xop0 = convert_modes (mode0,
1345 GET_MODE (op0) != VOIDmode
1346 ? GET_MODE (op0)
1347 : mode,
1348 xop0, unsignedp);
1349
1350 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1351 xop1 = convert_modes (mode1,
1352 GET_MODE (op1) != VOIDmode
1353 ? GET_MODE (op1)
1354 : mode,
1355 xop1, unsignedp);
1356
1357 /* Now, if insn's predicates don't allow our operands, put them into
1358 pseudo regs. */
1359
1360 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1361 && mode0 != VOIDmode)
1362 xop0 = copy_to_mode_reg (mode0, xop0);
1363
1364 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1365 && mode1 != VOIDmode)
1366 xop1 = copy_to_mode_reg (mode1, xop1);
1367
1368 if (binoptab == vec_pack_mod_optab
1369 || binoptab == vec_pack_usat_optab
1370 || binoptab == vec_pack_ssat_optab)
1371 {
1372 /* The mode of the result is different then the mode of the
1373 arguments. */
1374 tmp_mode = insn_data[icode].operand[0].mode;
1375 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1376 return 0;
1377 }
1378 else
1379 tmp_mode = mode;
1380
1381 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1382 temp = gen_reg_rtx (tmp_mode);
1383
1384 pat = GEN_FCN (icode) (temp, xop0, xop1);
1385 if (pat)
1386 {
1387 /* If PAT is composed of more than one insn, try to add an appropriate
1388 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1389 operand, call ourselves again, this time without a target. */
1390 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1391 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1392 {
1393 delete_insns_since (last);
1394 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1395 unsignedp, methods);
1396 }
1397
1398 emit_insn (pat);
1399 return temp;
1400 }
1401 else
1402 delete_insns_since (last);
1403 }
1404
1405 /* If we were trying to rotate by a constant value, and that didn't
1406 work, try rotating the other direction before falling back to
1407 shifts and bitwise-or. */
1408 if (first_pass_p
1409 && (binoptab == rotl_optab || binoptab == rotr_optab)
1410 && class == MODE_INT
1411 && GET_CODE (op1) == CONST_INT
1412 && INTVAL (op1) > 0
1413 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1414 {
1415 first_pass_p = false;
1416 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1417 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1418 goto retry;
1419 }
1420
1421 /* If this is a multiply, see if we can do a widening operation that
1422 takes operands of this mode and makes a wider mode. */
1423
1424 if (binoptab == smul_optab
1425 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1426 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1427 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1428 != CODE_FOR_nothing))
1429 {
1430 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1431 unsignedp ? umul_widen_optab : smul_widen_optab,
1432 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1433
1434 if (temp != 0)
1435 {
1436 if (GET_MODE_CLASS (mode) == MODE_INT
1437 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1438 GET_MODE_BITSIZE (GET_MODE (temp))))
1439 return gen_lowpart (mode, temp);
1440 else
1441 return convert_to_mode (mode, temp, unsignedp);
1442 }
1443 }
1444
1445 /* Look for a wider mode of the same class for which we think we
1446 can open-code the operation. Check for a widening multiply at the
1447 wider mode as well. */
1448
1449 if (CLASS_HAS_WIDER_MODES_P (class)
1450 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1451 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1452 wider_mode != VOIDmode;
1453 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1454 {
1455 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1456 || (binoptab == smul_optab
1457 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1458 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1459 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1460 != CODE_FOR_nothing)))
1461 {
1462 rtx xop0 = op0, xop1 = op1;
1463 int no_extend = 0;
1464
1465 /* For certain integer operations, we need not actually extend
1466 the narrow operands, as long as we will truncate
1467 the results to the same narrowness. */
1468
1469 if ((binoptab == ior_optab || binoptab == and_optab
1470 || binoptab == xor_optab
1471 || binoptab == add_optab || binoptab == sub_optab
1472 || binoptab == smul_optab || binoptab == ashl_optab)
1473 && class == MODE_INT)
1474 no_extend = 1;
1475
1476 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1477
1478 /* The second operand of a shift must always be extended. */
1479 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1480 no_extend && binoptab != ashl_optab);
1481
1482 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1483 unsignedp, OPTAB_DIRECT);
1484 if (temp)
1485 {
1486 if (class != MODE_INT
1487 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1488 GET_MODE_BITSIZE (wider_mode)))
1489 {
1490 if (target == 0)
1491 target = gen_reg_rtx (mode);
1492 convert_move (target, temp, 0);
1493 return target;
1494 }
1495 else
1496 return gen_lowpart (mode, temp);
1497 }
1498 else
1499 delete_insns_since (last);
1500 }
1501 }
1502
1503 /* These can be done a word at a time. */
1504 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1505 && class == MODE_INT
1506 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1507 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1508 {
1509 int i;
1510 rtx insns;
1511 rtx equiv_value;
1512
1513 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1514 won't be accurate, so use a new target. */
1515 if (target == 0 || target == op0 || target == op1)
1516 target = gen_reg_rtx (mode);
1517
1518 start_sequence ();
1519
1520 /* Do the actual arithmetic. */
1521 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1522 {
1523 rtx target_piece = operand_subword (target, i, 1, mode);
1524 rtx x = expand_binop (word_mode, binoptab,
1525 operand_subword_force (op0, i, mode),
1526 operand_subword_force (op1, i, mode),
1527 target_piece, unsignedp, next_methods);
1528
1529 if (x == 0)
1530 break;
1531
1532 if (target_piece != x)
1533 emit_move_insn (target_piece, x);
1534 }
1535
1536 insns = get_insns ();
1537 end_sequence ();
1538
1539 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1540 {
1541 if (binoptab->code != UNKNOWN)
1542 equiv_value
1543 = gen_rtx_fmt_ee (binoptab->code, mode,
1544 copy_rtx (op0), copy_rtx (op1));
1545 else
1546 equiv_value = 0;
1547
1548 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1549 return target;
1550 }
1551 }
1552
1553 /* Synthesize double word shifts from single word shifts. */
1554 if ((binoptab == lshr_optab || binoptab == ashl_optab
1555 || binoptab == ashr_optab)
1556 && class == MODE_INT
1557 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1558 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1559 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1560 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1561 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1562 {
1563 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1564 enum machine_mode op1_mode;
1565
1566 double_shift_mask = targetm.shift_truncation_mask (mode);
1567 shift_mask = targetm.shift_truncation_mask (word_mode);
1568 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1569
1570 /* Apply the truncation to constant shifts. */
1571 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1572 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1573
1574 if (op1 == CONST0_RTX (op1_mode))
1575 return op0;
1576
1577 /* Make sure that this is a combination that expand_doubleword_shift
1578 can handle. See the comments there for details. */
1579 if (double_shift_mask == 0
1580 || (shift_mask == BITS_PER_WORD - 1
1581 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1582 {
1583 rtx insns, equiv_value;
1584 rtx into_target, outof_target;
1585 rtx into_input, outof_input;
1586 int left_shift, outof_word;
1587
1588 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1589 won't be accurate, so use a new target. */
1590 if (target == 0 || target == op0 || target == op1)
1591 target = gen_reg_rtx (mode);
1592
1593 start_sequence ();
1594
1595 /* OUTOF_* is the word we are shifting bits away from, and
1596 INTO_* is the word that we are shifting bits towards, thus
1597 they differ depending on the direction of the shift and
1598 WORDS_BIG_ENDIAN. */
1599
1600 left_shift = binoptab == ashl_optab;
1601 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1602
1603 outof_target = operand_subword (target, outof_word, 1, mode);
1604 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1605
1606 outof_input = operand_subword_force (op0, outof_word, mode);
1607 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1608
1609 if (expand_doubleword_shift (op1_mode, binoptab,
1610 outof_input, into_input, op1,
1611 outof_target, into_target,
1612 unsignedp, next_methods, shift_mask))
1613 {
1614 insns = get_insns ();
1615 end_sequence ();
1616
1617 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1618 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1619 return target;
1620 }
1621 end_sequence ();
1622 }
1623 }
1624
1625 /* Synthesize double word rotates from single word shifts. */
1626 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1627 && class == MODE_INT
1628 && GET_CODE (op1) == CONST_INT
1629 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1630 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1631 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1632 {
1633 rtx insns;
1634 rtx into_target, outof_target;
1635 rtx into_input, outof_input;
1636 rtx inter;
1637 int shift_count, left_shift, outof_word;
1638
1639 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1640 won't be accurate, so use a new target. Do this also if target is not
1641 a REG, first because having a register instead may open optimization
1642 opportunities, and second because if target and op0 happen to be MEMs
1643 designating the same location, we would risk clobbering it too early
1644 in the code sequence we generate below. */
1645 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1646 target = gen_reg_rtx (mode);
1647
1648 start_sequence ();
1649
1650 shift_count = INTVAL (op1);
1651
1652 /* OUTOF_* is the word we are shifting bits away from, and
1653 INTO_* is the word that we are shifting bits towards, thus
1654 they differ depending on the direction of the shift and
1655 WORDS_BIG_ENDIAN. */
1656
1657 left_shift = (binoptab == rotl_optab);
1658 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1659
1660 outof_target = operand_subword (target, outof_word, 1, mode);
1661 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1662
1663 outof_input = operand_subword_force (op0, outof_word, mode);
1664 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1665
1666 if (shift_count == BITS_PER_WORD)
1667 {
1668 /* This is just a word swap. */
1669 emit_move_insn (outof_target, into_input);
1670 emit_move_insn (into_target, outof_input);
1671 inter = const0_rtx;
1672 }
1673 else
1674 {
1675 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1676 rtx first_shift_count, second_shift_count;
1677 optab reverse_unsigned_shift, unsigned_shift;
1678
1679 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1680 ? lshr_optab : ashl_optab);
1681
1682 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1683 ? ashl_optab : lshr_optab);
1684
1685 if (shift_count > BITS_PER_WORD)
1686 {
1687 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1688 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1689 }
1690 else
1691 {
1692 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1693 second_shift_count = GEN_INT (shift_count);
1694 }
1695
1696 into_temp1 = expand_binop (word_mode, unsigned_shift,
1697 outof_input, first_shift_count,
1698 NULL_RTX, unsignedp, next_methods);
1699 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1700 into_input, second_shift_count,
1701 NULL_RTX, unsignedp, next_methods);
1702
1703 if (into_temp1 != 0 && into_temp2 != 0)
1704 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1705 into_target, unsignedp, next_methods);
1706 else
1707 inter = 0;
1708
1709 if (inter != 0 && inter != into_target)
1710 emit_move_insn (into_target, inter);
1711
1712 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1713 into_input, first_shift_count,
1714 NULL_RTX, unsignedp, next_methods);
1715 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1716 outof_input, second_shift_count,
1717 NULL_RTX, unsignedp, next_methods);
1718
1719 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1720 inter = expand_binop (word_mode, ior_optab,
1721 outof_temp1, outof_temp2,
1722 outof_target, unsignedp, next_methods);
1723
1724 if (inter != 0 && inter != outof_target)
1725 emit_move_insn (outof_target, inter);
1726 }
1727
1728 insns = get_insns ();
1729 end_sequence ();
1730
1731 if (inter != 0)
1732 {
1733 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1734 block to help the register allocator a bit. But a multi-word
1735 rotate will need all the input bits when setting the output
1736 bits, so there clearly is a conflict between the input and
1737 output registers. So we can't use a no-conflict block here. */
1738 emit_insn (insns);
1739 return target;
1740 }
1741 }
1742
1743 /* These can be done a word at a time by propagating carries. */
1744 if ((binoptab == add_optab || binoptab == sub_optab)
1745 && class == MODE_INT
1746 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1747 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1748 {
1749 unsigned int i;
1750 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1751 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1752 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1753 rtx xop0, xop1, xtarget;
1754
1755 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1756 value is one of those, use it. Otherwise, use 1 since it is the
1757 one easiest to get. */
1758 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1759 int normalizep = STORE_FLAG_VALUE;
1760 #else
1761 int normalizep = 1;
1762 #endif
1763
1764 /* Prepare the operands. */
1765 xop0 = force_reg (mode, op0);
1766 xop1 = force_reg (mode, op1);
1767
1768 xtarget = gen_reg_rtx (mode);
1769
1770 if (target == 0 || !REG_P (target))
1771 target = xtarget;
1772
1773 /* Indicate for flow that the entire target reg is being set. */
1774 if (REG_P (target))
1775 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1776
1777 /* Do the actual arithmetic. */
1778 for (i = 0; i < nwords; i++)
1779 {
1780 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1781 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1782 rtx op0_piece = operand_subword_force (xop0, index, mode);
1783 rtx op1_piece = operand_subword_force (xop1, index, mode);
1784 rtx x;
1785
1786 /* Main add/subtract of the input operands. */
1787 x = expand_binop (word_mode, binoptab,
1788 op0_piece, op1_piece,
1789 target_piece, unsignedp, next_methods);
1790 if (x == 0)
1791 break;
1792
1793 if (i + 1 < nwords)
1794 {
1795 /* Store carry from main add/subtract. */
1796 carry_out = gen_reg_rtx (word_mode);
1797 carry_out = emit_store_flag_force (carry_out,
1798 (binoptab == add_optab
1799 ? LT : GT),
1800 x, op0_piece,
1801 word_mode, 1, normalizep);
1802 }
1803
1804 if (i > 0)
1805 {
1806 rtx newx;
1807
1808 /* Add/subtract previous carry to main result. */
1809 newx = expand_binop (word_mode,
1810 normalizep == 1 ? binoptab : otheroptab,
1811 x, carry_in,
1812 NULL_RTX, 1, next_methods);
1813
1814 if (i + 1 < nwords)
1815 {
1816 /* Get out carry from adding/subtracting carry in. */
1817 rtx carry_tmp = gen_reg_rtx (word_mode);
1818 carry_tmp = emit_store_flag_force (carry_tmp,
1819 (binoptab == add_optab
1820 ? LT : GT),
1821 newx, x,
1822 word_mode, 1, normalizep);
1823
1824 /* Logical-ior the two poss. carry together. */
1825 carry_out = expand_binop (word_mode, ior_optab,
1826 carry_out, carry_tmp,
1827 carry_out, 0, next_methods);
1828 if (carry_out == 0)
1829 break;
1830 }
1831 emit_move_insn (target_piece, newx);
1832 }
1833 else
1834 {
1835 if (x != target_piece)
1836 emit_move_insn (target_piece, x);
1837 }
1838
1839 carry_in = carry_out;
1840 }
1841
1842 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1843 {
1844 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1845 || ! rtx_equal_p (target, xtarget))
1846 {
1847 rtx temp = emit_move_insn (target, xtarget);
1848
1849 set_unique_reg_note (temp,
1850 REG_EQUAL,
1851 gen_rtx_fmt_ee (binoptab->code, mode,
1852 copy_rtx (xop0),
1853 copy_rtx (xop1)));
1854 }
1855 else
1856 target = xtarget;
1857
1858 return target;
1859 }
1860
1861 else
1862 delete_insns_since (last);
1863 }
1864
1865 /* Attempt to synthesize double word multiplies using a sequence of word
1866 mode multiplications. We first attempt to generate a sequence using a
1867 more efficient unsigned widening multiply, and if that fails we then
1868 try using a signed widening multiply. */
1869
1870 if (binoptab == smul_optab
1871 && class == MODE_INT
1872 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1873 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1874 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1875 {
1876 rtx product = NULL_RTX;
1877
1878 if (umul_widen_optab->handlers[(int) mode].insn_code
1879 != CODE_FOR_nothing)
1880 {
1881 product = expand_doubleword_mult (mode, op0, op1, target,
1882 true, methods);
1883 if (!product)
1884 delete_insns_since (last);
1885 }
1886
1887 if (product == NULL_RTX
1888 && smul_widen_optab->handlers[(int) mode].insn_code
1889 != CODE_FOR_nothing)
1890 {
1891 product = expand_doubleword_mult (mode, op0, op1, target,
1892 false, methods);
1893 if (!product)
1894 delete_insns_since (last);
1895 }
1896
1897 if (product != NULL_RTX)
1898 {
1899 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1900 {
1901 temp = emit_move_insn (target ? target : product, product);
1902 set_unique_reg_note (temp,
1903 REG_EQUAL,
1904 gen_rtx_fmt_ee (MULT, mode,
1905 copy_rtx (op0),
1906 copy_rtx (op1)));
1907 }
1908 return product;
1909 }
1910 }
1911
1912 /* It can't be open-coded in this mode.
1913 Use a library call if one is available and caller says that's ok. */
1914
1915 if (binoptab->handlers[(int) mode].libfunc
1916 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1917 {
1918 rtx insns;
1919 rtx op1x = op1;
1920 enum machine_mode op1_mode = mode;
1921 rtx value;
1922
1923 start_sequence ();
1924
1925 if (shift_op)
1926 {
1927 op1_mode = word_mode;
1928 /* Specify unsigned here,
1929 since negative shift counts are meaningless. */
1930 op1x = convert_to_mode (word_mode, op1, 1);
1931 }
1932
1933 if (GET_MODE (op0) != VOIDmode
1934 && GET_MODE (op0) != mode)
1935 op0 = convert_to_mode (mode, op0, unsignedp);
1936
1937 /* Pass 1 for NO_QUEUE so we don't lose any increments
1938 if the libcall is cse'd or moved. */
1939 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1940 NULL_RTX, LCT_CONST, mode, 2,
1941 op0, mode, op1x, op1_mode);
1942
1943 insns = get_insns ();
1944 end_sequence ();
1945
1946 target = gen_reg_rtx (mode);
1947 emit_libcall_block (insns, target, value,
1948 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1949
1950 return target;
1951 }
1952
1953 delete_insns_since (last);
1954
1955 /* It can't be done in this mode. Can we do it in a wider mode? */
1956
1957 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1958 || methods == OPTAB_MUST_WIDEN))
1959 {
1960 /* Caller says, don't even try. */
1961 delete_insns_since (entry_last);
1962 return 0;
1963 }
1964
1965 /* Compute the value of METHODS to pass to recursive calls.
1966 Don't allow widening to be tried recursively. */
1967
1968 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1969
1970 /* Look for a wider mode of the same class for which it appears we can do
1971 the operation. */
1972
1973 if (CLASS_HAS_WIDER_MODES_P (class))
1974 {
1975 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1976 wider_mode != VOIDmode;
1977 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1978 {
1979 if ((binoptab->handlers[(int) wider_mode].insn_code
1980 != CODE_FOR_nothing)
1981 || (methods == OPTAB_LIB
1982 && binoptab->handlers[(int) wider_mode].libfunc))
1983 {
1984 rtx xop0 = op0, xop1 = op1;
1985 int no_extend = 0;
1986
1987 /* For certain integer operations, we need not actually extend
1988 the narrow operands, as long as we will truncate
1989 the results to the same narrowness. */
1990
1991 if ((binoptab == ior_optab || binoptab == and_optab
1992 || binoptab == xor_optab
1993 || binoptab == add_optab || binoptab == sub_optab
1994 || binoptab == smul_optab || binoptab == ashl_optab)
1995 && class == MODE_INT)
1996 no_extend = 1;
1997
1998 xop0 = widen_operand (xop0, wider_mode, mode,
1999 unsignedp, no_extend);
2000
2001 /* The second operand of a shift must always be extended. */
2002 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2003 no_extend && binoptab != ashl_optab);
2004
2005 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2006 unsignedp, methods);
2007 if (temp)
2008 {
2009 if (class != MODE_INT
2010 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2011 GET_MODE_BITSIZE (wider_mode)))
2012 {
2013 if (target == 0)
2014 target = gen_reg_rtx (mode);
2015 convert_move (target, temp, 0);
2016 return target;
2017 }
2018 else
2019 return gen_lowpart (mode, temp);
2020 }
2021 else
2022 delete_insns_since (last);
2023 }
2024 }
2025 }
2026
2027 delete_insns_since (entry_last);
2028 return 0;
2029 }
2030 \f
2031 /* Expand a binary operator which has both signed and unsigned forms.
2032 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2033 signed operations.
2034
2035 If we widen unsigned operands, we may use a signed wider operation instead
2036 of an unsigned wider operation, since the result would be the same. */
2037
2038 rtx
2039 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2040 rtx op0, rtx op1, rtx target, int unsignedp,
2041 enum optab_methods methods)
2042 {
2043 rtx temp;
2044 optab direct_optab = unsignedp ? uoptab : soptab;
2045 struct optab wide_soptab;
2046
2047 /* Do it without widening, if possible. */
2048 temp = expand_binop (mode, direct_optab, op0, op1, target,
2049 unsignedp, OPTAB_DIRECT);
2050 if (temp || methods == OPTAB_DIRECT)
2051 return temp;
2052
2053 /* Try widening to a signed int. Make a fake signed optab that
2054 hides any signed insn for direct use. */
2055 wide_soptab = *soptab;
2056 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2057 wide_soptab.handlers[(int) mode].libfunc = 0;
2058
2059 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2060 unsignedp, OPTAB_WIDEN);
2061
2062 /* For unsigned operands, try widening to an unsigned int. */
2063 if (temp == 0 && unsignedp)
2064 temp = expand_binop (mode, uoptab, op0, op1, target,
2065 unsignedp, OPTAB_WIDEN);
2066 if (temp || methods == OPTAB_WIDEN)
2067 return temp;
2068
2069 /* Use the right width lib call if that exists. */
2070 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2071 if (temp || methods == OPTAB_LIB)
2072 return temp;
2073
2074 /* Must widen and use a lib call, use either signed or unsigned. */
2075 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2076 unsignedp, methods);
2077 if (temp != 0)
2078 return temp;
2079 if (unsignedp)
2080 return expand_binop (mode, uoptab, op0, op1, target,
2081 unsignedp, methods);
2082 return 0;
2083 }
2084 \f
2085 /* Generate code to perform an operation specified by UNOPPTAB
2086 on operand OP0, with two results to TARG0 and TARG1.
2087 We assume that the order of the operands for the instruction
2088 is TARG0, TARG1, OP0.
2089
2090 Either TARG0 or TARG1 may be zero, but what that means is that
2091 the result is not actually wanted. We will generate it into
2092 a dummy pseudo-reg and discard it. They may not both be zero.
2093
2094 Returns 1 if this operation can be performed; 0 if not. */
2095
2096 int
2097 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2098 int unsignedp)
2099 {
2100 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2101 enum mode_class class;
2102 enum machine_mode wider_mode;
2103 rtx entry_last = get_last_insn ();
2104 rtx last;
2105
2106 class = GET_MODE_CLASS (mode);
2107
2108 if (!targ0)
2109 targ0 = gen_reg_rtx (mode);
2110 if (!targ1)
2111 targ1 = gen_reg_rtx (mode);
2112
2113 /* Record where to go back to if we fail. */
2114 last = get_last_insn ();
2115
2116 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2117 {
2118 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2119 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2120 rtx pat;
2121 rtx xop0 = op0;
2122
2123 if (GET_MODE (xop0) != VOIDmode
2124 && GET_MODE (xop0) != mode0)
2125 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2126
2127 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2128 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2129 xop0 = copy_to_mode_reg (mode0, xop0);
2130
2131 /* We could handle this, but we should always be called with a pseudo
2132 for our targets and all insns should take them as outputs. */
2133 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2134 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2135
2136 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2137 if (pat)
2138 {
2139 emit_insn (pat);
2140 return 1;
2141 }
2142 else
2143 delete_insns_since (last);
2144 }
2145
2146 /* It can't be done in this mode. Can we do it in a wider mode? */
2147
2148 if (CLASS_HAS_WIDER_MODES_P (class))
2149 {
2150 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2151 wider_mode != VOIDmode;
2152 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2153 {
2154 if (unoptab->handlers[(int) wider_mode].insn_code
2155 != CODE_FOR_nothing)
2156 {
2157 rtx t0 = gen_reg_rtx (wider_mode);
2158 rtx t1 = gen_reg_rtx (wider_mode);
2159 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2160
2161 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2162 {
2163 convert_move (targ0, t0, unsignedp);
2164 convert_move (targ1, t1, unsignedp);
2165 return 1;
2166 }
2167 else
2168 delete_insns_since (last);
2169 }
2170 }
2171 }
2172
2173 delete_insns_since (entry_last);
2174 return 0;
2175 }
2176 \f
2177 /* Generate code to perform an operation specified by BINOPTAB
2178 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2179 We assume that the order of the operands for the instruction
2180 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2181 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2182
2183 Either TARG0 or TARG1 may be zero, but what that means is that
2184 the result is not actually wanted. We will generate it into
2185 a dummy pseudo-reg and discard it. They may not both be zero.
2186
2187 Returns 1 if this operation can be performed; 0 if not. */
2188
2189 int
2190 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2191 int unsignedp)
2192 {
2193 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2194 enum mode_class class;
2195 enum machine_mode wider_mode;
2196 rtx entry_last = get_last_insn ();
2197 rtx last;
2198
2199 class = GET_MODE_CLASS (mode);
2200
2201 /* If we are inside an appropriately-short loop and we are optimizing,
2202 force expensive constants into a register. */
2203 if (CONSTANT_P (op0) && optimize
2204 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2205 op0 = force_reg (mode, op0);
2206
2207 if (CONSTANT_P (op1) && optimize
2208 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2209 op1 = force_reg (mode, op1);
2210
2211 if (!targ0)
2212 targ0 = gen_reg_rtx (mode);
2213 if (!targ1)
2214 targ1 = gen_reg_rtx (mode);
2215
2216 /* Record where to go back to if we fail. */
2217 last = get_last_insn ();
2218
2219 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2220 {
2221 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2222 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2223 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2224 rtx pat;
2225 rtx xop0 = op0, xop1 = op1;
2226
2227 /* In case the insn wants input operands in modes different from
2228 those of the actual operands, convert the operands. It would
2229 seem that we don't need to convert CONST_INTs, but we do, so
2230 that they're properly zero-extended, sign-extended or truncated
2231 for their mode. */
2232
2233 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2234 xop0 = convert_modes (mode0,
2235 GET_MODE (op0) != VOIDmode
2236 ? GET_MODE (op0)
2237 : mode,
2238 xop0, unsignedp);
2239
2240 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2241 xop1 = convert_modes (mode1,
2242 GET_MODE (op1) != VOIDmode
2243 ? GET_MODE (op1)
2244 : mode,
2245 xop1, unsignedp);
2246
2247 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2248 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2249 xop0 = copy_to_mode_reg (mode0, xop0);
2250
2251 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2252 xop1 = copy_to_mode_reg (mode1, xop1);
2253
2254 /* We could handle this, but we should always be called with a pseudo
2255 for our targets and all insns should take them as outputs. */
2256 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2257 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2258
2259 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2260 if (pat)
2261 {
2262 emit_insn (pat);
2263 return 1;
2264 }
2265 else
2266 delete_insns_since (last);
2267 }
2268
2269 /* It can't be done in this mode. Can we do it in a wider mode? */
2270
2271 if (CLASS_HAS_WIDER_MODES_P (class))
2272 {
2273 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2274 wider_mode != VOIDmode;
2275 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2276 {
2277 if (binoptab->handlers[(int) wider_mode].insn_code
2278 != CODE_FOR_nothing)
2279 {
2280 rtx t0 = gen_reg_rtx (wider_mode);
2281 rtx t1 = gen_reg_rtx (wider_mode);
2282 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2283 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2284
2285 if (expand_twoval_binop (binoptab, cop0, cop1,
2286 t0, t1, unsignedp))
2287 {
2288 convert_move (targ0, t0, unsignedp);
2289 convert_move (targ1, t1, unsignedp);
2290 return 1;
2291 }
2292 else
2293 delete_insns_since (last);
2294 }
2295 }
2296 }
2297
2298 delete_insns_since (entry_last);
2299 return 0;
2300 }
2301
2302 /* Expand the two-valued library call indicated by BINOPTAB, but
2303 preserve only one of the values. If TARG0 is non-NULL, the first
2304 value is placed into TARG0; otherwise the second value is placed
2305 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2306 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2307 This routine assumes that the value returned by the library call is
2308 as if the return value was of an integral mode twice as wide as the
2309 mode of OP0. Returns 1 if the call was successful. */
2310
2311 bool
2312 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2313 rtx targ0, rtx targ1, enum rtx_code code)
2314 {
2315 enum machine_mode mode;
2316 enum machine_mode libval_mode;
2317 rtx libval;
2318 rtx insns;
2319
2320 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2321 gcc_assert (!targ0 != !targ1);
2322
2323 mode = GET_MODE (op0);
2324 if (!binoptab->handlers[(int) mode].libfunc)
2325 return false;
2326
2327 /* The value returned by the library function will have twice as
2328 many bits as the nominal MODE. */
2329 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2330 MODE_INT);
2331 start_sequence ();
2332 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2333 NULL_RTX, LCT_CONST,
2334 libval_mode, 2,
2335 op0, mode,
2336 op1, mode);
2337 /* Get the part of VAL containing the value that we want. */
2338 libval = simplify_gen_subreg (mode, libval, libval_mode,
2339 targ0 ? 0 : GET_MODE_SIZE (mode));
2340 insns = get_insns ();
2341 end_sequence ();
2342 /* Move the into the desired location. */
2343 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2344 gen_rtx_fmt_ee (code, mode, op0, op1));
2345
2346 return true;
2347 }
2348
2349 \f
2350 /* Wrapper around expand_unop which takes an rtx code to specify
2351 the operation to perform, not an optab pointer. All other
2352 arguments are the same. */
2353 rtx
2354 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2355 rtx target, int unsignedp)
2356 {
2357 optab unop = code_to_optab[(int) code];
2358 gcc_assert (unop);
2359
2360 return expand_unop (mode, unop, op0, target, unsignedp);
2361 }
2362
2363 /* Try calculating
2364 (clz:narrow x)
2365 as
2366 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2367 static rtx
2368 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2369 {
2370 enum mode_class class = GET_MODE_CLASS (mode);
2371 if (CLASS_HAS_WIDER_MODES_P (class))
2372 {
2373 enum machine_mode wider_mode;
2374 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2375 wider_mode != VOIDmode;
2376 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2377 {
2378 if (clz_optab->handlers[(int) wider_mode].insn_code
2379 != CODE_FOR_nothing)
2380 {
2381 rtx xop0, temp, last;
2382
2383 last = get_last_insn ();
2384
2385 if (target == 0)
2386 target = gen_reg_rtx (mode);
2387 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2388 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2389 if (temp != 0)
2390 temp = expand_binop (wider_mode, sub_optab, temp,
2391 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2392 - GET_MODE_BITSIZE (mode)),
2393 target, true, OPTAB_DIRECT);
2394 if (temp == 0)
2395 delete_insns_since (last);
2396
2397 return temp;
2398 }
2399 }
2400 }
2401 return 0;
2402 }
2403
2404 /* Try calculating
2405 (bswap:narrow x)
2406 as
2407 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2408 static rtx
2409 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2410 {
2411 enum mode_class class = GET_MODE_CLASS (mode);
2412 enum machine_mode wider_mode;
2413 rtx x, last;
2414
2415 if (!CLASS_HAS_WIDER_MODES_P (class))
2416 return NULL_RTX;
2417
2418 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2419 wider_mode != VOIDmode;
2420 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2421 if (bswap_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing)
2422 goto found;
2423 return NULL_RTX;
2424
2425 found:
2426 last = get_last_insn ();
2427
2428 x = widen_operand (op0, wider_mode, mode, true, true);
2429 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2430
2431 if (x != 0)
2432 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2433 size_int (GET_MODE_BITSIZE (wider_mode)
2434 - GET_MODE_BITSIZE (mode)),
2435 NULL_RTX, true);
2436
2437 if (x != 0)
2438 {
2439 if (target == 0)
2440 target = gen_reg_rtx (mode);
2441 emit_move_insn (target, gen_lowpart (mode, x));
2442 }
2443 else
2444 delete_insns_since (last);
2445
2446 return target;
2447 }
2448
2449 /* Try calculating bswap as two bswaps of two word-sized operands. */
2450
2451 static rtx
2452 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2453 {
2454 rtx t0, t1;
2455
2456 t1 = expand_unop (word_mode, bswap_optab,
2457 operand_subword_force (op, 0, mode), NULL_RTX, true);
2458 t0 = expand_unop (word_mode, bswap_optab,
2459 operand_subword_force (op, 1, mode), NULL_RTX, true);
2460
2461 if (target == 0)
2462 target = gen_reg_rtx (mode);
2463 if (REG_P (target))
2464 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2465 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2466 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2467
2468 return target;
2469 }
2470
2471 /* Try calculating (parity x) as (and (popcount x) 1), where
2472 popcount can also be done in a wider mode. */
2473 static rtx
2474 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2475 {
2476 enum mode_class class = GET_MODE_CLASS (mode);
2477 if (CLASS_HAS_WIDER_MODES_P (class))
2478 {
2479 enum machine_mode wider_mode;
2480 for (wider_mode = mode; wider_mode != VOIDmode;
2481 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2482 {
2483 if (popcount_optab->handlers[(int) wider_mode].insn_code
2484 != CODE_FOR_nothing)
2485 {
2486 rtx xop0, temp, last;
2487
2488 last = get_last_insn ();
2489
2490 if (target == 0)
2491 target = gen_reg_rtx (mode);
2492 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2493 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2494 true);
2495 if (temp != 0)
2496 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2497 target, true, OPTAB_DIRECT);
2498 if (temp == 0)
2499 delete_insns_since (last);
2500
2501 return temp;
2502 }
2503 }
2504 }
2505 return 0;
2506 }
2507
2508 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2509 conditions, VAL may already be a SUBREG against which we cannot generate
2510 a further SUBREG. In this case, we expect forcing the value into a
2511 register will work around the situation. */
2512
2513 static rtx
2514 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2515 enum machine_mode imode)
2516 {
2517 rtx ret;
2518 ret = lowpart_subreg (omode, val, imode);
2519 if (ret == NULL)
2520 {
2521 val = force_reg (imode, val);
2522 ret = lowpart_subreg (omode, val, imode);
2523 gcc_assert (ret != NULL);
2524 }
2525 return ret;
2526 }
2527
2528 /* Expand a floating point absolute value or negation operation via a
2529 logical operation on the sign bit. */
2530
2531 static rtx
2532 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2533 rtx op0, rtx target)
2534 {
2535 const struct real_format *fmt;
2536 int bitpos, word, nwords, i;
2537 enum machine_mode imode;
2538 HOST_WIDE_INT hi, lo;
2539 rtx temp, insns;
2540
2541 /* The format has to have a simple sign bit. */
2542 fmt = REAL_MODE_FORMAT (mode);
2543 if (fmt == NULL)
2544 return NULL_RTX;
2545
2546 bitpos = fmt->signbit_rw;
2547 if (bitpos < 0)
2548 return NULL_RTX;
2549
2550 /* Don't create negative zeros if the format doesn't support them. */
2551 if (code == NEG && !fmt->has_signed_zero)
2552 return NULL_RTX;
2553
2554 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2555 {
2556 imode = int_mode_for_mode (mode);
2557 if (imode == BLKmode)
2558 return NULL_RTX;
2559 word = 0;
2560 nwords = 1;
2561 }
2562 else
2563 {
2564 imode = word_mode;
2565
2566 if (FLOAT_WORDS_BIG_ENDIAN)
2567 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2568 else
2569 word = bitpos / BITS_PER_WORD;
2570 bitpos = bitpos % BITS_PER_WORD;
2571 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2572 }
2573
2574 if (bitpos < HOST_BITS_PER_WIDE_INT)
2575 {
2576 hi = 0;
2577 lo = (HOST_WIDE_INT) 1 << bitpos;
2578 }
2579 else
2580 {
2581 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2582 lo = 0;
2583 }
2584 if (code == ABS)
2585 lo = ~lo, hi = ~hi;
2586
2587 if (target == 0 || target == op0)
2588 target = gen_reg_rtx (mode);
2589
2590 if (nwords > 1)
2591 {
2592 start_sequence ();
2593
2594 for (i = 0; i < nwords; ++i)
2595 {
2596 rtx targ_piece = operand_subword (target, i, 1, mode);
2597 rtx op0_piece = operand_subword_force (op0, i, mode);
2598
2599 if (i == word)
2600 {
2601 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2602 op0_piece,
2603 immed_double_const (lo, hi, imode),
2604 targ_piece, 1, OPTAB_LIB_WIDEN);
2605 if (temp != targ_piece)
2606 emit_move_insn (targ_piece, temp);
2607 }
2608 else
2609 emit_move_insn (targ_piece, op0_piece);
2610 }
2611
2612 insns = get_insns ();
2613 end_sequence ();
2614
2615 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2616 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2617 }
2618 else
2619 {
2620 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2621 gen_lowpart (imode, op0),
2622 immed_double_const (lo, hi, imode),
2623 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2624 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2625
2626 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2627 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2628 }
2629
2630 return target;
2631 }
2632
2633 /* Generate code to perform an operation specified by UNOPTAB
2634 on operand OP0, with result having machine-mode MODE.
2635
2636 UNSIGNEDP is for the case where we have to widen the operands
2637 to perform the operation. It says to use zero-extension.
2638
2639 If TARGET is nonzero, the value
2640 is generated there, if it is convenient to do so.
2641 In all cases an rtx is returned for the locus of the value;
2642 this may or may not be TARGET. */
2643
2644 rtx
2645 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2646 int unsignedp)
2647 {
2648 enum mode_class class;
2649 enum machine_mode wider_mode;
2650 rtx temp;
2651 rtx last = get_last_insn ();
2652 rtx pat;
2653
2654 class = GET_MODE_CLASS (mode);
2655
2656 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2657 {
2658 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2659 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2660 rtx xop0 = op0;
2661
2662 if (target)
2663 temp = target;
2664 else
2665 temp = gen_reg_rtx (mode);
2666
2667 if (GET_MODE (xop0) != VOIDmode
2668 && GET_MODE (xop0) != mode0)
2669 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2670
2671 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2672
2673 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2674 xop0 = copy_to_mode_reg (mode0, xop0);
2675
2676 if (!insn_data[icode].operand[0].predicate (temp, mode))
2677 temp = gen_reg_rtx (mode);
2678
2679 pat = GEN_FCN (icode) (temp, xop0);
2680 if (pat)
2681 {
2682 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2683 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2684 {
2685 delete_insns_since (last);
2686 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2687 }
2688
2689 emit_insn (pat);
2690
2691 return temp;
2692 }
2693 else
2694 delete_insns_since (last);
2695 }
2696
2697 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2698
2699 /* Widening clz needs special treatment. */
2700 if (unoptab == clz_optab)
2701 {
2702 temp = widen_clz (mode, op0, target);
2703 if (temp)
2704 return temp;
2705 else
2706 goto try_libcall;
2707 }
2708
2709 /* Widening (or narrowing) bswap needs special treatment. */
2710 if (unoptab == bswap_optab)
2711 {
2712 temp = widen_bswap (mode, op0, target);
2713 if (temp)
2714 return temp;
2715
2716 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2717 && unoptab->handlers[word_mode].insn_code != CODE_FOR_nothing)
2718 {
2719 temp = expand_doubleword_bswap (mode, op0, target);
2720 if (temp)
2721 return temp;
2722 }
2723
2724 goto try_libcall;
2725 }
2726
2727 if (CLASS_HAS_WIDER_MODES_P (class))
2728 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2729 wider_mode != VOIDmode;
2730 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2731 {
2732 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2733 {
2734 rtx xop0 = op0;
2735
2736 /* For certain operations, we need not actually extend
2737 the narrow operand, as long as we will truncate the
2738 results to the same narrowness. */
2739
2740 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2741 (unoptab == neg_optab
2742 || unoptab == one_cmpl_optab)
2743 && class == MODE_INT);
2744
2745 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2746 unsignedp);
2747
2748 if (temp)
2749 {
2750 if (class != MODE_INT
2751 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2752 GET_MODE_BITSIZE (wider_mode)))
2753 {
2754 if (target == 0)
2755 target = gen_reg_rtx (mode);
2756 convert_move (target, temp, 0);
2757 return target;
2758 }
2759 else
2760 return gen_lowpart (mode, temp);
2761 }
2762 else
2763 delete_insns_since (last);
2764 }
2765 }
2766
2767 /* These can be done a word at a time. */
2768 if (unoptab == one_cmpl_optab
2769 && class == MODE_INT
2770 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2771 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2772 {
2773 int i;
2774 rtx insns;
2775
2776 if (target == 0 || target == op0)
2777 target = gen_reg_rtx (mode);
2778
2779 start_sequence ();
2780
2781 /* Do the actual arithmetic. */
2782 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2783 {
2784 rtx target_piece = operand_subword (target, i, 1, mode);
2785 rtx x = expand_unop (word_mode, unoptab,
2786 operand_subword_force (op0, i, mode),
2787 target_piece, unsignedp);
2788
2789 if (target_piece != x)
2790 emit_move_insn (target_piece, x);
2791 }
2792
2793 insns = get_insns ();
2794 end_sequence ();
2795
2796 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2797 gen_rtx_fmt_e (unoptab->code, mode,
2798 copy_rtx (op0)));
2799 return target;
2800 }
2801
2802 if (unoptab->code == NEG)
2803 {
2804 /* Try negating floating point values by flipping the sign bit. */
2805 if (SCALAR_FLOAT_MODE_P (mode))
2806 {
2807 temp = expand_absneg_bit (NEG, mode, op0, target);
2808 if (temp)
2809 return temp;
2810 }
2811
2812 /* If there is no negation pattern, and we have no negative zero,
2813 try subtracting from zero. */
2814 if (!HONOR_SIGNED_ZEROS (mode))
2815 {
2816 temp = expand_binop (mode, (unoptab == negv_optab
2817 ? subv_optab : sub_optab),
2818 CONST0_RTX (mode), op0, target,
2819 unsignedp, OPTAB_DIRECT);
2820 if (temp)
2821 return temp;
2822 }
2823 }
2824
2825 /* Try calculating parity (x) as popcount (x) % 2. */
2826 if (unoptab == parity_optab)
2827 {
2828 temp = expand_parity (mode, op0, target);
2829 if (temp)
2830 return temp;
2831 }
2832
2833 try_libcall:
2834 /* Now try a library call in this mode. */
2835 if (unoptab->handlers[(int) mode].libfunc)
2836 {
2837 rtx insns;
2838 rtx value;
2839 enum machine_mode outmode = mode;
2840
2841 /* All of these functions return small values. Thus we choose to
2842 have them return something that isn't a double-word. */
2843 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2844 || unoptab == popcount_optab || unoptab == parity_optab)
2845 outmode
2846 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2847
2848 start_sequence ();
2849
2850 /* Pass 1 for NO_QUEUE so we don't lose any increments
2851 if the libcall is cse'd or moved. */
2852 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2853 NULL_RTX, LCT_CONST, outmode,
2854 1, op0, mode);
2855 insns = get_insns ();
2856 end_sequence ();
2857
2858 target = gen_reg_rtx (outmode);
2859 emit_libcall_block (insns, target, value,
2860 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2861
2862 return target;
2863 }
2864
2865 /* It can't be done in this mode. Can we do it in a wider mode? */
2866
2867 if (CLASS_HAS_WIDER_MODES_P (class))
2868 {
2869 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2870 wider_mode != VOIDmode;
2871 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2872 {
2873 if ((unoptab->handlers[(int) wider_mode].insn_code
2874 != CODE_FOR_nothing)
2875 || unoptab->handlers[(int) wider_mode].libfunc)
2876 {
2877 rtx xop0 = op0;
2878
2879 /* For certain operations, we need not actually extend
2880 the narrow operand, as long as we will truncate the
2881 results to the same narrowness. */
2882
2883 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2884 (unoptab == neg_optab
2885 || unoptab == one_cmpl_optab)
2886 && class == MODE_INT);
2887
2888 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2889 unsignedp);
2890
2891 /* If we are generating clz using wider mode, adjust the
2892 result. */
2893 if (unoptab == clz_optab && temp != 0)
2894 temp = expand_binop (wider_mode, sub_optab, temp,
2895 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2896 - GET_MODE_BITSIZE (mode)),
2897 target, true, OPTAB_DIRECT);
2898
2899 if (temp)
2900 {
2901 if (class != MODE_INT)
2902 {
2903 if (target == 0)
2904 target = gen_reg_rtx (mode);
2905 convert_move (target, temp, 0);
2906 return target;
2907 }
2908 else
2909 return gen_lowpart (mode, temp);
2910 }
2911 else
2912 delete_insns_since (last);
2913 }
2914 }
2915 }
2916
2917 /* One final attempt at implementing negation via subtraction,
2918 this time allowing widening of the operand. */
2919 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2920 {
2921 rtx temp;
2922 temp = expand_binop (mode,
2923 unoptab == negv_optab ? subv_optab : sub_optab,
2924 CONST0_RTX (mode), op0,
2925 target, unsignedp, OPTAB_LIB_WIDEN);
2926 if (temp)
2927 return temp;
2928 }
2929
2930 return 0;
2931 }
2932 \f
2933 /* Emit code to compute the absolute value of OP0, with result to
2934 TARGET if convenient. (TARGET may be 0.) The return value says
2935 where the result actually is to be found.
2936
2937 MODE is the mode of the operand; the mode of the result is
2938 different but can be deduced from MODE.
2939
2940 */
2941
2942 rtx
2943 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2944 int result_unsignedp)
2945 {
2946 rtx temp;
2947
2948 if (! flag_trapv)
2949 result_unsignedp = 1;
2950
2951 /* First try to do it with a special abs instruction. */
2952 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2953 op0, target, 0);
2954 if (temp != 0)
2955 return temp;
2956
2957 /* For floating point modes, try clearing the sign bit. */
2958 if (SCALAR_FLOAT_MODE_P (mode))
2959 {
2960 temp = expand_absneg_bit (ABS, mode, op0, target);
2961 if (temp)
2962 return temp;
2963 }
2964
2965 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2966 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2967 && !HONOR_SIGNED_ZEROS (mode))
2968 {
2969 rtx last = get_last_insn ();
2970
2971 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2972 if (temp != 0)
2973 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2974 OPTAB_WIDEN);
2975
2976 if (temp != 0)
2977 return temp;
2978
2979 delete_insns_since (last);
2980 }
2981
2982 /* If this machine has expensive jumps, we can do integer absolute
2983 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2984 where W is the width of MODE. */
2985
2986 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2987 {
2988 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2989 size_int (GET_MODE_BITSIZE (mode) - 1),
2990 NULL_RTX, 0);
2991
2992 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2993 OPTAB_LIB_WIDEN);
2994 if (temp != 0)
2995 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2996 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2997
2998 if (temp != 0)
2999 return temp;
3000 }
3001
3002 return NULL_RTX;
3003 }
3004
3005 rtx
3006 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3007 int result_unsignedp, int safe)
3008 {
3009 rtx temp, op1;
3010
3011 if (! flag_trapv)
3012 result_unsignedp = 1;
3013
3014 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3015 if (temp != 0)
3016 return temp;
3017
3018 /* If that does not win, use conditional jump and negate. */
3019
3020 /* It is safe to use the target if it is the same
3021 as the source if this is also a pseudo register */
3022 if (op0 == target && REG_P (op0)
3023 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3024 safe = 1;
3025
3026 op1 = gen_label_rtx ();
3027 if (target == 0 || ! safe
3028 || GET_MODE (target) != mode
3029 || (MEM_P (target) && MEM_VOLATILE_P (target))
3030 || (REG_P (target)
3031 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3032 target = gen_reg_rtx (mode);
3033
3034 emit_move_insn (target, op0);
3035 NO_DEFER_POP;
3036
3037 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3038 NULL_RTX, NULL_RTX, op1);
3039
3040 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3041 target, target, 0);
3042 if (op0 != target)
3043 emit_move_insn (target, op0);
3044 emit_label (op1);
3045 OK_DEFER_POP;
3046 return target;
3047 }
3048
3049 /* A subroutine of expand_copysign, perform the copysign operation using the
3050 abs and neg primitives advertised to exist on the target. The assumption
3051 is that we have a split register file, and leaving op0 in fp registers,
3052 and not playing with subregs so much, will help the register allocator. */
3053
3054 static rtx
3055 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3056 int bitpos, bool op0_is_abs)
3057 {
3058 enum machine_mode imode;
3059 HOST_WIDE_INT hi, lo;
3060 int word;
3061 rtx label;
3062
3063 if (target == op1)
3064 target = NULL_RTX;
3065
3066 if (!op0_is_abs)
3067 {
3068 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3069 if (op0 == NULL)
3070 return NULL_RTX;
3071 target = op0;
3072 }
3073 else
3074 {
3075 if (target == NULL_RTX)
3076 target = copy_to_reg (op0);
3077 else
3078 emit_move_insn (target, op0);
3079 }
3080
3081 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3082 {
3083 imode = int_mode_for_mode (mode);
3084 if (imode == BLKmode)
3085 return NULL_RTX;
3086 op1 = gen_lowpart (imode, op1);
3087 }
3088 else
3089 {
3090 imode = word_mode;
3091 if (FLOAT_WORDS_BIG_ENDIAN)
3092 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3093 else
3094 word = bitpos / BITS_PER_WORD;
3095 bitpos = bitpos % BITS_PER_WORD;
3096 op1 = operand_subword_force (op1, word, mode);
3097 }
3098
3099 if (bitpos < HOST_BITS_PER_WIDE_INT)
3100 {
3101 hi = 0;
3102 lo = (HOST_WIDE_INT) 1 << bitpos;
3103 }
3104 else
3105 {
3106 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3107 lo = 0;
3108 }
3109
3110 op1 = expand_binop (imode, and_optab, op1,
3111 immed_double_const (lo, hi, imode),
3112 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3113
3114 label = gen_label_rtx ();
3115 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3116
3117 if (GET_CODE (op0) == CONST_DOUBLE)
3118 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3119 else
3120 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3121 if (op0 != target)
3122 emit_move_insn (target, op0);
3123
3124 emit_label (label);
3125
3126 return target;
3127 }
3128
3129
3130 /* A subroutine of expand_copysign, perform the entire copysign operation
3131 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3132 is true if op0 is known to have its sign bit clear. */
3133
3134 static rtx
3135 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3136 int bitpos, bool op0_is_abs)
3137 {
3138 enum machine_mode imode;
3139 HOST_WIDE_INT hi, lo;
3140 int word, nwords, i;
3141 rtx temp, insns;
3142
3143 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3144 {
3145 imode = int_mode_for_mode (mode);
3146 if (imode == BLKmode)
3147 return NULL_RTX;
3148 word = 0;
3149 nwords = 1;
3150 }
3151 else
3152 {
3153 imode = word_mode;
3154
3155 if (FLOAT_WORDS_BIG_ENDIAN)
3156 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3157 else
3158 word = bitpos / BITS_PER_WORD;
3159 bitpos = bitpos % BITS_PER_WORD;
3160 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3161 }
3162
3163 if (bitpos < HOST_BITS_PER_WIDE_INT)
3164 {
3165 hi = 0;
3166 lo = (HOST_WIDE_INT) 1 << bitpos;
3167 }
3168 else
3169 {
3170 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3171 lo = 0;
3172 }
3173
3174 if (target == 0 || target == op0 || target == op1)
3175 target = gen_reg_rtx (mode);
3176
3177 if (nwords > 1)
3178 {
3179 start_sequence ();
3180
3181 for (i = 0; i < nwords; ++i)
3182 {
3183 rtx targ_piece = operand_subword (target, i, 1, mode);
3184 rtx op0_piece = operand_subword_force (op0, i, mode);
3185
3186 if (i == word)
3187 {
3188 if (!op0_is_abs)
3189 op0_piece = expand_binop (imode, and_optab, op0_piece,
3190 immed_double_const (~lo, ~hi, imode),
3191 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3192
3193 op1 = expand_binop (imode, and_optab,
3194 operand_subword_force (op1, i, mode),
3195 immed_double_const (lo, hi, imode),
3196 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3197
3198 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3199 targ_piece, 1, OPTAB_LIB_WIDEN);
3200 if (temp != targ_piece)
3201 emit_move_insn (targ_piece, temp);
3202 }
3203 else
3204 emit_move_insn (targ_piece, op0_piece);
3205 }
3206
3207 insns = get_insns ();
3208 end_sequence ();
3209
3210 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3211 }
3212 else
3213 {
3214 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3215 immed_double_const (lo, hi, imode),
3216 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3217
3218 op0 = gen_lowpart (imode, op0);
3219 if (!op0_is_abs)
3220 op0 = expand_binop (imode, and_optab, op0,
3221 immed_double_const (~lo, ~hi, imode),
3222 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3223
3224 temp = expand_binop (imode, ior_optab, op0, op1,
3225 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3226 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3227 }
3228
3229 return target;
3230 }
3231
3232 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3233 scalar floating point mode. Return NULL if we do not know how to
3234 expand the operation inline. */
3235
3236 rtx
3237 expand_copysign (rtx op0, rtx op1, rtx target)
3238 {
3239 enum machine_mode mode = GET_MODE (op0);
3240 const struct real_format *fmt;
3241 bool op0_is_abs;
3242 rtx temp;
3243
3244 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3245 gcc_assert (GET_MODE (op1) == mode);
3246
3247 /* First try to do it with a special instruction. */
3248 temp = expand_binop (mode, copysign_optab, op0, op1,
3249 target, 0, OPTAB_DIRECT);
3250 if (temp)
3251 return temp;
3252
3253 fmt = REAL_MODE_FORMAT (mode);
3254 if (fmt == NULL || !fmt->has_signed_zero)
3255 return NULL_RTX;
3256
3257 op0_is_abs = false;
3258 if (GET_CODE (op0) == CONST_DOUBLE)
3259 {
3260 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3261 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3262 op0_is_abs = true;
3263 }
3264
3265 if (fmt->signbit_ro >= 0
3266 && (GET_CODE (op0) == CONST_DOUBLE
3267 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3268 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3269 {
3270 temp = expand_copysign_absneg (mode, op0, op1, target,
3271 fmt->signbit_ro, op0_is_abs);
3272 if (temp)
3273 return temp;
3274 }
3275
3276 if (fmt->signbit_rw < 0)
3277 return NULL_RTX;
3278 return expand_copysign_bit (mode, op0, op1, target,
3279 fmt->signbit_rw, op0_is_abs);
3280 }
3281 \f
3282 /* Generate an instruction whose insn-code is INSN_CODE,
3283 with two operands: an output TARGET and an input OP0.
3284 TARGET *must* be nonzero, and the output is always stored there.
3285 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3286 the value that is stored into TARGET. */
3287
3288 void
3289 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3290 {
3291 rtx temp;
3292 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3293 rtx pat;
3294
3295 temp = target;
3296
3297 /* Now, if insn does not accept our operands, put them into pseudos. */
3298
3299 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3300 op0 = copy_to_mode_reg (mode0, op0);
3301
3302 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3303 temp = gen_reg_rtx (GET_MODE (temp));
3304
3305 pat = GEN_FCN (icode) (temp, op0);
3306
3307 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3308 add_equal_note (pat, temp, code, op0, NULL_RTX);
3309
3310 emit_insn (pat);
3311
3312 if (temp != target)
3313 emit_move_insn (target, temp);
3314 }
3315 \f
3316 struct no_conflict_data
3317 {
3318 rtx target, first, insn;
3319 bool must_stay;
3320 };
3321
3322 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3323 Set P->must_stay if the currently examined clobber / store has to stay
3324 in the list of insns that constitute the actual no_conflict block /
3325 libcall block. */
3326 static void
3327 no_conflict_move_test (rtx dest, rtx set, void *p0)
3328 {
3329 struct no_conflict_data *p= p0;
3330
3331 /* If this inns directly contributes to setting the target, it must stay. */
3332 if (reg_overlap_mentioned_p (p->target, dest))
3333 p->must_stay = true;
3334 /* If we haven't committed to keeping any other insns in the list yet,
3335 there is nothing more to check. */
3336 else if (p->insn == p->first)
3337 return;
3338 /* If this insn sets / clobbers a register that feeds one of the insns
3339 already in the list, this insn has to stay too. */
3340 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3341 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3342 || reg_used_between_p (dest, p->first, p->insn)
3343 /* Likewise if this insn depends on a register set by a previous
3344 insn in the list, or if it sets a result (presumably a hard
3345 register) that is set or clobbered by a previous insn.
3346 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3347 SET_DEST perform the former check on the address, and the latter
3348 check on the MEM. */
3349 || (GET_CODE (set) == SET
3350 && (modified_in_p (SET_SRC (set), p->first)
3351 || modified_in_p (SET_DEST (set), p->first)
3352 || modified_between_p (SET_SRC (set), p->first, p->insn)
3353 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3354 p->must_stay = true;
3355 }
3356
3357 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3358 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3359 is possible to do so. */
3360
3361 static void
3362 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3363 {
3364 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3365 {
3366 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3367 encapsulated region would not be in one basic block, i.e. when
3368 there is a control_flow_insn_p insn between FIRST and LAST. */
3369 bool attach_libcall_retval_notes = true;
3370 rtx insn, next = NEXT_INSN (last);
3371
3372 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3373 if (control_flow_insn_p (insn))
3374 {
3375 attach_libcall_retval_notes = false;
3376 break;
3377 }
3378
3379 if (attach_libcall_retval_notes)
3380 {
3381 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3382 REG_NOTES (first));
3383 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3384 REG_NOTES (last));
3385 }
3386 }
3387 }
3388
3389 /* Emit code to perform a series of operations on a multi-word quantity, one
3390 word at a time.
3391
3392 Such a block is preceded by a CLOBBER of the output, consists of multiple
3393 insns, each setting one word of the output, and followed by a SET copying
3394 the output to itself.
3395
3396 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3397 note indicating that it doesn't conflict with the (also multi-word)
3398 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3399 notes.
3400
3401 INSNS is a block of code generated to perform the operation, not including
3402 the CLOBBER and final copy. All insns that compute intermediate values
3403 are first emitted, followed by the block as described above.
3404
3405 TARGET, OP0, and OP1 are the output and inputs of the operations,
3406 respectively. OP1 may be zero for a unary operation.
3407
3408 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3409 on the last insn.
3410
3411 If TARGET is not a register, INSNS is simply emitted with no special
3412 processing. Likewise if anything in INSNS is not an INSN or if
3413 there is a libcall block inside INSNS.
3414
3415 The final insn emitted is returned. */
3416
3417 rtx
3418 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3419 {
3420 rtx prev, next, first, last, insn;
3421
3422 if (!REG_P (target) || reload_in_progress)
3423 return emit_insn (insns);
3424 else
3425 for (insn = insns; insn; insn = NEXT_INSN (insn))
3426 if (!NONJUMP_INSN_P (insn)
3427 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3428 return emit_insn (insns);
3429
3430 /* First emit all insns that do not store into words of the output and remove
3431 these from the list. */
3432 for (insn = insns; insn; insn = next)
3433 {
3434 rtx note;
3435 struct no_conflict_data data;
3436
3437 next = NEXT_INSN (insn);
3438
3439 /* Some ports (cris) create a libcall regions at their own. We must
3440 avoid any potential nesting of LIBCALLs. */
3441 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3442 remove_note (insn, note);
3443 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3444 remove_note (insn, note);
3445
3446 data.target = target;
3447 data.first = insns;
3448 data.insn = insn;
3449 data.must_stay = 0;
3450 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3451 if (! data.must_stay)
3452 {
3453 if (PREV_INSN (insn))
3454 NEXT_INSN (PREV_INSN (insn)) = next;
3455 else
3456 insns = next;
3457
3458 if (next)
3459 PREV_INSN (next) = PREV_INSN (insn);
3460
3461 add_insn (insn);
3462 }
3463 }
3464
3465 prev = get_last_insn ();
3466
3467 /* Now write the CLOBBER of the output, followed by the setting of each
3468 of the words, followed by the final copy. */
3469 if (target != op0 && target != op1)
3470 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3471
3472 for (insn = insns; insn; insn = next)
3473 {
3474 next = NEXT_INSN (insn);
3475 add_insn (insn);
3476
3477 if (op1 && REG_P (op1))
3478 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3479 REG_NOTES (insn));
3480
3481 if (op0 && REG_P (op0))
3482 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3483 REG_NOTES (insn));
3484 }
3485
3486 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3487 != CODE_FOR_nothing)
3488 {
3489 last = emit_move_insn (target, target);
3490 if (equiv)
3491 set_unique_reg_note (last, REG_EQUAL, equiv);
3492 }
3493 else
3494 {
3495 last = get_last_insn ();
3496
3497 /* Remove any existing REG_EQUAL note from "last", or else it will
3498 be mistaken for a note referring to the full contents of the
3499 alleged libcall value when found together with the REG_RETVAL
3500 note added below. An existing note can come from an insn
3501 expansion at "last". */
3502 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3503 }
3504
3505 if (prev == 0)
3506 first = get_insns ();
3507 else
3508 first = NEXT_INSN (prev);
3509
3510 maybe_encapsulate_block (first, last, equiv);
3511
3512 return last;
3513 }
3514 \f
3515 /* Emit code to make a call to a constant function or a library call.
3516
3517 INSNS is a list containing all insns emitted in the call.
3518 These insns leave the result in RESULT. Our block is to copy RESULT
3519 to TARGET, which is logically equivalent to EQUIV.
3520
3521 We first emit any insns that set a pseudo on the assumption that these are
3522 loading constants into registers; doing so allows them to be safely cse'ed
3523 between blocks. Then we emit all the other insns in the block, followed by
3524 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3525 note with an operand of EQUIV.
3526
3527 Moving assignments to pseudos outside of the block is done to improve
3528 the generated code, but is not required to generate correct code,
3529 hence being unable to move an assignment is not grounds for not making
3530 a libcall block. There are two reasons why it is safe to leave these
3531 insns inside the block: First, we know that these pseudos cannot be
3532 used in generated RTL outside the block since they are created for
3533 temporary purposes within the block. Second, CSE will not record the
3534 values of anything set inside a libcall block, so we know they must
3535 be dead at the end of the block.
3536
3537 Except for the first group of insns (the ones setting pseudos), the
3538 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3539
3540 void
3541 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3542 {
3543 rtx final_dest = target;
3544 rtx prev, next, first, last, insn;
3545
3546 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3547 into a MEM later. Protect the libcall block from this change. */
3548 if (! REG_P (target) || REG_USERVAR_P (target))
3549 target = gen_reg_rtx (GET_MODE (target));
3550
3551 /* If we're using non-call exceptions, a libcall corresponding to an
3552 operation that may trap may also trap. */
3553 if (flag_non_call_exceptions && may_trap_p (equiv))
3554 {
3555 for (insn = insns; insn; insn = NEXT_INSN (insn))
3556 if (CALL_P (insn))
3557 {
3558 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3559
3560 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3561 remove_note (insn, note);
3562 }
3563 }
3564 else
3565 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3566 reg note to indicate that this call cannot throw or execute a nonlocal
3567 goto (unless there is already a REG_EH_REGION note, in which case
3568 we update it). */
3569 for (insn = insns; insn; insn = NEXT_INSN (insn))
3570 if (CALL_P (insn))
3571 {
3572 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3573
3574 if (note != 0)
3575 XEXP (note, 0) = constm1_rtx;
3576 else
3577 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3578 REG_NOTES (insn));
3579 }
3580
3581 /* First emit all insns that set pseudos. Remove them from the list as
3582 we go. Avoid insns that set pseudos which were referenced in previous
3583 insns. These can be generated by move_by_pieces, for example,
3584 to update an address. Similarly, avoid insns that reference things
3585 set in previous insns. */
3586
3587 for (insn = insns; insn; insn = next)
3588 {
3589 rtx set = single_set (insn);
3590 rtx note;
3591
3592 /* Some ports (cris) create a libcall regions at their own. We must
3593 avoid any potential nesting of LIBCALLs. */
3594 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3595 remove_note (insn, note);
3596 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3597 remove_note (insn, note);
3598
3599 next = NEXT_INSN (insn);
3600
3601 if (set != 0 && REG_P (SET_DEST (set))
3602 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3603 {
3604 struct no_conflict_data data;
3605
3606 data.target = const0_rtx;
3607 data.first = insns;
3608 data.insn = insn;
3609 data.must_stay = 0;
3610 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3611 if (! data.must_stay)
3612 {
3613 if (PREV_INSN (insn))
3614 NEXT_INSN (PREV_INSN (insn)) = next;
3615 else
3616 insns = next;
3617
3618 if (next)
3619 PREV_INSN (next) = PREV_INSN (insn);
3620
3621 add_insn (insn);
3622 }
3623 }
3624
3625 /* Some ports use a loop to copy large arguments onto the stack.
3626 Don't move anything outside such a loop. */
3627 if (LABEL_P (insn))
3628 break;
3629 }
3630
3631 prev = get_last_insn ();
3632
3633 /* Write the remaining insns followed by the final copy. */
3634
3635 for (insn = insns; insn; insn = next)
3636 {
3637 next = NEXT_INSN (insn);
3638
3639 add_insn (insn);
3640 }
3641
3642 last = emit_move_insn (target, result);
3643 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3644 != CODE_FOR_nothing)
3645 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3646 else
3647 {
3648 /* Remove any existing REG_EQUAL note from "last", or else it will
3649 be mistaken for a note referring to the full contents of the
3650 libcall value when found together with the REG_RETVAL note added
3651 below. An existing note can come from an insn expansion at
3652 "last". */
3653 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3654 }
3655
3656 if (final_dest != target)
3657 emit_move_insn (final_dest, target);
3658
3659 if (prev == 0)
3660 first = get_insns ();
3661 else
3662 first = NEXT_INSN (prev);
3663
3664 maybe_encapsulate_block (first, last, equiv);
3665 }
3666 \f
3667 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3668 PURPOSE describes how this comparison will be used. CODE is the rtx
3669 comparison code we will be using.
3670
3671 ??? Actually, CODE is slightly weaker than that. A target is still
3672 required to implement all of the normal bcc operations, but not
3673 required to implement all (or any) of the unordered bcc operations. */
3674
3675 int
3676 can_compare_p (enum rtx_code code, enum machine_mode mode,
3677 enum can_compare_purpose purpose)
3678 {
3679 do
3680 {
3681 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3682 {
3683 if (purpose == ccp_jump)
3684 return bcc_gen_fctn[(int) code] != NULL;
3685 else if (purpose == ccp_store_flag)
3686 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3687 else
3688 /* There's only one cmov entry point, and it's allowed to fail. */
3689 return 1;
3690 }
3691 if (purpose == ccp_jump
3692 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3693 return 1;
3694 if (purpose == ccp_cmov
3695 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3696 return 1;
3697 if (purpose == ccp_store_flag
3698 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3699 return 1;
3700 mode = GET_MODE_WIDER_MODE (mode);
3701 }
3702 while (mode != VOIDmode);
3703
3704 return 0;
3705 }
3706
3707 /* This function is called when we are going to emit a compare instruction that
3708 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3709
3710 *PMODE is the mode of the inputs (in case they are const_int).
3711 *PUNSIGNEDP nonzero says that the operands are unsigned;
3712 this matters if they need to be widened.
3713
3714 If they have mode BLKmode, then SIZE specifies the size of both operands.
3715
3716 This function performs all the setup necessary so that the caller only has
3717 to emit a single comparison insn. This setup can involve doing a BLKmode
3718 comparison or emitting a library call to perform the comparison if no insn
3719 is available to handle it.
3720 The values which are passed in through pointers can be modified; the caller
3721 should perform the comparison on the modified values. Constant
3722 comparisons must have already been folded. */
3723
3724 static void
3725 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3726 enum machine_mode *pmode, int *punsignedp,
3727 enum can_compare_purpose purpose)
3728 {
3729 enum machine_mode mode = *pmode;
3730 rtx x = *px, y = *py;
3731 int unsignedp = *punsignedp;
3732
3733 /* If we are inside an appropriately-short loop and we are optimizing,
3734 force expensive constants into a register. */
3735 if (CONSTANT_P (x) && optimize
3736 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3737 x = force_reg (mode, x);
3738
3739 if (CONSTANT_P (y) && optimize
3740 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3741 y = force_reg (mode, y);
3742
3743 #ifdef HAVE_cc0
3744 /* Make sure if we have a canonical comparison. The RTL
3745 documentation states that canonical comparisons are required only
3746 for targets which have cc0. */
3747 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3748 #endif
3749
3750 /* Don't let both operands fail to indicate the mode. */
3751 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3752 x = force_reg (mode, x);
3753
3754 /* Handle all BLKmode compares. */
3755
3756 if (mode == BLKmode)
3757 {
3758 enum machine_mode cmp_mode, result_mode;
3759 enum insn_code cmp_code;
3760 tree length_type;
3761 rtx libfunc;
3762 rtx result;
3763 rtx opalign
3764 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3765
3766 gcc_assert (size);
3767
3768 /* Try to use a memory block compare insn - either cmpstr
3769 or cmpmem will do. */
3770 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3771 cmp_mode != VOIDmode;
3772 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3773 {
3774 cmp_code = cmpmem_optab[cmp_mode];
3775 if (cmp_code == CODE_FOR_nothing)
3776 cmp_code = cmpstr_optab[cmp_mode];
3777 if (cmp_code == CODE_FOR_nothing)
3778 cmp_code = cmpstrn_optab[cmp_mode];
3779 if (cmp_code == CODE_FOR_nothing)
3780 continue;
3781
3782 /* Must make sure the size fits the insn's mode. */
3783 if ((GET_CODE (size) == CONST_INT
3784 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3785 || (GET_MODE_BITSIZE (GET_MODE (size))
3786 > GET_MODE_BITSIZE (cmp_mode)))
3787 continue;
3788
3789 result_mode = insn_data[cmp_code].operand[0].mode;
3790 result = gen_reg_rtx (result_mode);
3791 size = convert_to_mode (cmp_mode, size, 1);
3792 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3793
3794 *px = result;
3795 *py = const0_rtx;
3796 *pmode = result_mode;
3797 return;
3798 }
3799
3800 /* Otherwise call a library function, memcmp. */
3801 libfunc = memcmp_libfunc;
3802 length_type = sizetype;
3803 result_mode = TYPE_MODE (integer_type_node);
3804 cmp_mode = TYPE_MODE (length_type);
3805 size = convert_to_mode (TYPE_MODE (length_type), size,
3806 TYPE_UNSIGNED (length_type));
3807
3808 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3809 result_mode, 3,
3810 XEXP (x, 0), Pmode,
3811 XEXP (y, 0), Pmode,
3812 size, cmp_mode);
3813 *px = result;
3814 *py = const0_rtx;
3815 *pmode = result_mode;
3816 return;
3817 }
3818
3819 /* Don't allow operands to the compare to trap, as that can put the
3820 compare and branch in different basic blocks. */
3821 if (flag_non_call_exceptions)
3822 {
3823 if (may_trap_p (x))
3824 x = force_reg (mode, x);
3825 if (may_trap_p (y))
3826 y = force_reg (mode, y);
3827 }
3828
3829 *px = x;
3830 *py = y;
3831 if (can_compare_p (*pcomparison, mode, purpose))
3832 return;
3833
3834 /* Handle a lib call just for the mode we are using. */
3835
3836 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3837 {
3838 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3839 rtx result;
3840
3841 /* If we want unsigned, and this mode has a distinct unsigned
3842 comparison routine, use that. */
3843 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3844 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3845
3846 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3847 word_mode, 2, x, mode, y, mode);
3848
3849 /* There are two kinds of comparison routines. Biased routines
3850 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3851 of gcc expect that the comparison operation is equivalent
3852 to the modified comparison. For signed comparisons compare the
3853 result against 1 in the biased case, and zero in the unbiased
3854 case. For unsigned comparisons always compare against 1 after
3855 biasing the unbiased result by adding 1. This gives us a way to
3856 represent LTU. */
3857 *px = result;
3858 *pmode = word_mode;
3859 *py = const1_rtx;
3860
3861 if (!TARGET_LIB_INT_CMP_BIASED)
3862 {
3863 if (*punsignedp)
3864 *px = plus_constant (result, 1);
3865 else
3866 *py = const0_rtx;
3867 }
3868 return;
3869 }
3870
3871 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3872 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3873 }
3874
3875 /* Before emitting an insn with code ICODE, make sure that X, which is going
3876 to be used for operand OPNUM of the insn, is converted from mode MODE to
3877 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3878 that it is accepted by the operand predicate. Return the new value. */
3879
3880 static rtx
3881 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3882 enum machine_mode wider_mode, int unsignedp)
3883 {
3884 if (mode != wider_mode)
3885 x = convert_modes (wider_mode, mode, x, unsignedp);
3886
3887 if (!insn_data[icode].operand[opnum].predicate
3888 (x, insn_data[icode].operand[opnum].mode))
3889 {
3890 if (no_new_pseudos)
3891 return NULL_RTX;
3892 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3893 }
3894
3895 return x;
3896 }
3897
3898 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3899 we can do the comparison.
3900 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3901 be NULL_RTX which indicates that only a comparison is to be generated. */
3902
3903 static void
3904 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3905 enum rtx_code comparison, int unsignedp, rtx label)
3906 {
3907 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3908 enum mode_class class = GET_MODE_CLASS (mode);
3909 enum machine_mode wider_mode = mode;
3910
3911 /* Try combined insns first. */
3912 do
3913 {
3914 enum insn_code icode;
3915 PUT_MODE (test, wider_mode);
3916
3917 if (label)
3918 {
3919 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3920
3921 if (icode != CODE_FOR_nothing
3922 && insn_data[icode].operand[0].predicate (test, wider_mode))
3923 {
3924 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3925 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3926 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3927 return;
3928 }
3929 }
3930
3931 /* Handle some compares against zero. */
3932 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3933 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3934 {
3935 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3936 emit_insn (GEN_FCN (icode) (x));
3937 if (label)
3938 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3939 return;
3940 }
3941
3942 /* Handle compares for which there is a directly suitable insn. */
3943
3944 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3945 if (icode != CODE_FOR_nothing)
3946 {
3947 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3948 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3949 emit_insn (GEN_FCN (icode) (x, y));
3950 if (label)
3951 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3952 return;
3953 }
3954
3955 if (!CLASS_HAS_WIDER_MODES_P (class))
3956 break;
3957
3958 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3959 }
3960 while (wider_mode != VOIDmode);
3961
3962 gcc_unreachable ();
3963 }
3964
3965 /* Generate code to compare X with Y so that the condition codes are
3966 set and to jump to LABEL if the condition is true. If X is a
3967 constant and Y is not a constant, then the comparison is swapped to
3968 ensure that the comparison RTL has the canonical form.
3969
3970 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3971 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3972 the proper branch condition code.
3973
3974 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3975
3976 MODE is the mode of the inputs (in case they are const_int).
3977
3978 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3979 be passed unchanged to emit_cmp_insn, then potentially converted into an
3980 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3981
3982 void
3983 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3984 enum machine_mode mode, int unsignedp, rtx label)
3985 {
3986 rtx op0 = x, op1 = y;
3987
3988 /* Swap operands and condition to ensure canonical RTL. */
3989 if (swap_commutative_operands_p (x, y))
3990 {
3991 /* If we're not emitting a branch, this means some caller
3992 is out of sync. */
3993 gcc_assert (label);
3994
3995 op0 = y, op1 = x;
3996 comparison = swap_condition (comparison);
3997 }
3998
3999 #ifdef HAVE_cc0
4000 /* If OP0 is still a constant, then both X and Y must be constants.
4001 Force X into a register to create canonical RTL. */
4002 if (CONSTANT_P (op0))
4003 op0 = force_reg (mode, op0);
4004 #endif
4005
4006 if (unsignedp)
4007 comparison = unsigned_condition (comparison);
4008
4009 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4010 ccp_jump);
4011 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4012 }
4013
4014 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4015
4016 void
4017 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4018 enum machine_mode mode, int unsignedp)
4019 {
4020 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4021 }
4022 \f
4023 /* Emit a library call comparison between floating point X and Y.
4024 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4025
4026 static void
4027 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4028 enum machine_mode *pmode, int *punsignedp)
4029 {
4030 enum rtx_code comparison = *pcomparison;
4031 enum rtx_code swapped = swap_condition (comparison);
4032 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4033 rtx x = *px;
4034 rtx y = *py;
4035 enum machine_mode orig_mode = GET_MODE (x);
4036 enum machine_mode mode;
4037 rtx value, target, insns, equiv;
4038 rtx libfunc = 0;
4039 bool reversed_p = false;
4040
4041 for (mode = orig_mode;
4042 mode != VOIDmode;
4043 mode = GET_MODE_WIDER_MODE (mode))
4044 {
4045 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
4046 break;
4047
4048 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
4049 {
4050 rtx tmp;
4051 tmp = x; x = y; y = tmp;
4052 comparison = swapped;
4053 break;
4054 }
4055
4056 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
4057 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4058 {
4059 comparison = reversed;
4060 reversed_p = true;
4061 break;
4062 }
4063 }
4064
4065 gcc_assert (mode != VOIDmode);
4066
4067 if (mode != orig_mode)
4068 {
4069 x = convert_to_mode (mode, x, 0);
4070 y = convert_to_mode (mode, y, 0);
4071 }
4072
4073 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4074 the RTL. The allows the RTL optimizers to delete the libcall if the
4075 condition can be determined at compile-time. */
4076 if (comparison == UNORDERED)
4077 {
4078 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4079 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4080 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4081 temp, const_true_rtx, equiv);
4082 }
4083 else
4084 {
4085 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4086 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4087 {
4088 rtx true_rtx, false_rtx;
4089
4090 switch (comparison)
4091 {
4092 case EQ:
4093 true_rtx = const0_rtx;
4094 false_rtx = const_true_rtx;
4095 break;
4096
4097 case NE:
4098 true_rtx = const_true_rtx;
4099 false_rtx = const0_rtx;
4100 break;
4101
4102 case GT:
4103 true_rtx = const1_rtx;
4104 false_rtx = const0_rtx;
4105 break;
4106
4107 case GE:
4108 true_rtx = const0_rtx;
4109 false_rtx = constm1_rtx;
4110 break;
4111
4112 case LT:
4113 true_rtx = constm1_rtx;
4114 false_rtx = const0_rtx;
4115 break;
4116
4117 case LE:
4118 true_rtx = const0_rtx;
4119 false_rtx = const1_rtx;
4120 break;
4121
4122 default:
4123 gcc_unreachable ();
4124 }
4125 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4126 equiv, true_rtx, false_rtx);
4127 }
4128 }
4129
4130 start_sequence ();
4131 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4132 word_mode, 2, x, mode, y, mode);
4133 insns = get_insns ();
4134 end_sequence ();
4135
4136 target = gen_reg_rtx (word_mode);
4137 emit_libcall_block (insns, target, value, equiv);
4138
4139 if (comparison == UNORDERED
4140 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4141 comparison = reversed_p ? EQ : NE;
4142
4143 *px = target;
4144 *py = const0_rtx;
4145 *pmode = word_mode;
4146 *pcomparison = comparison;
4147 *punsignedp = 0;
4148 }
4149 \f
4150 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4151
4152 void
4153 emit_indirect_jump (rtx loc)
4154 {
4155 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4156 (loc, Pmode))
4157 loc = copy_to_mode_reg (Pmode, loc);
4158
4159 emit_jump_insn (gen_indirect_jump (loc));
4160 emit_barrier ();
4161 }
4162 \f
4163 #ifdef HAVE_conditional_move
4164
4165 /* Emit a conditional move instruction if the machine supports one for that
4166 condition and machine mode.
4167
4168 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4169 the mode to use should they be constants. If it is VOIDmode, they cannot
4170 both be constants.
4171
4172 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4173 should be stored there. MODE is the mode to use should they be constants.
4174 If it is VOIDmode, they cannot both be constants.
4175
4176 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4177 is not supported. */
4178
4179 rtx
4180 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4181 enum machine_mode cmode, rtx op2, rtx op3,
4182 enum machine_mode mode, int unsignedp)
4183 {
4184 rtx tem, subtarget, comparison, insn;
4185 enum insn_code icode;
4186 enum rtx_code reversed;
4187
4188 /* If one operand is constant, make it the second one. Only do this
4189 if the other operand is not constant as well. */
4190
4191 if (swap_commutative_operands_p (op0, op1))
4192 {
4193 tem = op0;
4194 op0 = op1;
4195 op1 = tem;
4196 code = swap_condition (code);
4197 }
4198
4199 /* get_condition will prefer to generate LT and GT even if the old
4200 comparison was against zero, so undo that canonicalization here since
4201 comparisons against zero are cheaper. */
4202 if (code == LT && op1 == const1_rtx)
4203 code = LE, op1 = const0_rtx;
4204 else if (code == GT && op1 == constm1_rtx)
4205 code = GE, op1 = const0_rtx;
4206
4207 if (cmode == VOIDmode)
4208 cmode = GET_MODE (op0);
4209
4210 if (swap_commutative_operands_p (op2, op3)
4211 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4212 != UNKNOWN))
4213 {
4214 tem = op2;
4215 op2 = op3;
4216 op3 = tem;
4217 code = reversed;
4218 }
4219
4220 if (mode == VOIDmode)
4221 mode = GET_MODE (op2);
4222
4223 icode = movcc_gen_code[mode];
4224
4225 if (icode == CODE_FOR_nothing)
4226 return 0;
4227
4228 if (!target)
4229 target = gen_reg_rtx (mode);
4230
4231 subtarget = target;
4232
4233 /* If the insn doesn't accept these operands, put them in pseudos. */
4234
4235 if (!insn_data[icode].operand[0].predicate
4236 (subtarget, insn_data[icode].operand[0].mode))
4237 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4238
4239 if (!insn_data[icode].operand[2].predicate
4240 (op2, insn_data[icode].operand[2].mode))
4241 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4242
4243 if (!insn_data[icode].operand[3].predicate
4244 (op3, insn_data[icode].operand[3].mode))
4245 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4246
4247 /* Everything should now be in the suitable form, so emit the compare insn
4248 and then the conditional move. */
4249
4250 comparison
4251 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4252
4253 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4254 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4255 return NULL and let the caller figure out how best to deal with this
4256 situation. */
4257 if (GET_CODE (comparison) != code)
4258 return NULL_RTX;
4259
4260 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4261
4262 /* If that failed, then give up. */
4263 if (insn == 0)
4264 return 0;
4265
4266 emit_insn (insn);
4267
4268 if (subtarget != target)
4269 convert_move (target, subtarget, 0);
4270
4271 return target;
4272 }
4273
4274 /* Return nonzero if a conditional move of mode MODE is supported.
4275
4276 This function is for combine so it can tell whether an insn that looks
4277 like a conditional move is actually supported by the hardware. If we
4278 guess wrong we lose a bit on optimization, but that's it. */
4279 /* ??? sparc64 supports conditionally moving integers values based on fp
4280 comparisons, and vice versa. How do we handle them? */
4281
4282 int
4283 can_conditionally_move_p (enum machine_mode mode)
4284 {
4285 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4286 return 1;
4287
4288 return 0;
4289 }
4290
4291 #endif /* HAVE_conditional_move */
4292
4293 /* Emit a conditional addition instruction if the machine supports one for that
4294 condition and machine mode.
4295
4296 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4297 the mode to use should they be constants. If it is VOIDmode, they cannot
4298 both be constants.
4299
4300 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4301 should be stored there. MODE is the mode to use should they be constants.
4302 If it is VOIDmode, they cannot both be constants.
4303
4304 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4305 is not supported. */
4306
4307 rtx
4308 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4309 enum machine_mode cmode, rtx op2, rtx op3,
4310 enum machine_mode mode, int unsignedp)
4311 {
4312 rtx tem, subtarget, comparison, insn;
4313 enum insn_code icode;
4314 enum rtx_code reversed;
4315
4316 /* If one operand is constant, make it the second one. Only do this
4317 if the other operand is not constant as well. */
4318
4319 if (swap_commutative_operands_p (op0, op1))
4320 {
4321 tem = op0;
4322 op0 = op1;
4323 op1 = tem;
4324 code = swap_condition (code);
4325 }
4326
4327 /* get_condition will prefer to generate LT and GT even if the old
4328 comparison was against zero, so undo that canonicalization here since
4329 comparisons against zero are cheaper. */
4330 if (code == LT && op1 == const1_rtx)
4331 code = LE, op1 = const0_rtx;
4332 else if (code == GT && op1 == constm1_rtx)
4333 code = GE, op1 = const0_rtx;
4334
4335 if (cmode == VOIDmode)
4336 cmode = GET_MODE (op0);
4337
4338 if (swap_commutative_operands_p (op2, op3)
4339 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4340 != UNKNOWN))
4341 {
4342 tem = op2;
4343 op2 = op3;
4344 op3 = tem;
4345 code = reversed;
4346 }
4347
4348 if (mode == VOIDmode)
4349 mode = GET_MODE (op2);
4350
4351 icode = addcc_optab->handlers[(int) mode].insn_code;
4352
4353 if (icode == CODE_FOR_nothing)
4354 return 0;
4355
4356 if (!target)
4357 target = gen_reg_rtx (mode);
4358
4359 /* If the insn doesn't accept these operands, put them in pseudos. */
4360
4361 if (!insn_data[icode].operand[0].predicate
4362 (target, insn_data[icode].operand[0].mode))
4363 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4364 else
4365 subtarget = target;
4366
4367 if (!insn_data[icode].operand[2].predicate
4368 (op2, insn_data[icode].operand[2].mode))
4369 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4370
4371 if (!insn_data[icode].operand[3].predicate
4372 (op3, insn_data[icode].operand[3].mode))
4373 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4374
4375 /* Everything should now be in the suitable form, so emit the compare insn
4376 and then the conditional move. */
4377
4378 comparison
4379 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4380
4381 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4382 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4383 return NULL and let the caller figure out how best to deal with this
4384 situation. */
4385 if (GET_CODE (comparison) != code)
4386 return NULL_RTX;
4387
4388 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4389
4390 /* If that failed, then give up. */
4391 if (insn == 0)
4392 return 0;
4393
4394 emit_insn (insn);
4395
4396 if (subtarget != target)
4397 convert_move (target, subtarget, 0);
4398
4399 return target;
4400 }
4401 \f
4402 /* These functions attempt to generate an insn body, rather than
4403 emitting the insn, but if the gen function already emits them, we
4404 make no attempt to turn them back into naked patterns. */
4405
4406 /* Generate and return an insn body to add Y to X. */
4407
4408 rtx
4409 gen_add2_insn (rtx x, rtx y)
4410 {
4411 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4412
4413 gcc_assert (insn_data[icode].operand[0].predicate
4414 (x, insn_data[icode].operand[0].mode));
4415 gcc_assert (insn_data[icode].operand[1].predicate
4416 (x, insn_data[icode].operand[1].mode));
4417 gcc_assert (insn_data[icode].operand[2].predicate
4418 (y, insn_data[icode].operand[2].mode));
4419
4420 return GEN_FCN (icode) (x, x, y);
4421 }
4422
4423 /* Generate and return an insn body to add r1 and c,
4424 storing the result in r0. */
4425 rtx
4426 gen_add3_insn (rtx r0, rtx r1, rtx c)
4427 {
4428 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4429
4430 if (icode == CODE_FOR_nothing
4431 || !(insn_data[icode].operand[0].predicate
4432 (r0, insn_data[icode].operand[0].mode))
4433 || !(insn_data[icode].operand[1].predicate
4434 (r1, insn_data[icode].operand[1].mode))
4435 || !(insn_data[icode].operand[2].predicate
4436 (c, insn_data[icode].operand[2].mode)))
4437 return NULL_RTX;
4438
4439 return GEN_FCN (icode) (r0, r1, c);
4440 }
4441
4442 int
4443 have_add2_insn (rtx x, rtx y)
4444 {
4445 int icode;
4446
4447 gcc_assert (GET_MODE (x) != VOIDmode);
4448
4449 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4450
4451 if (icode == CODE_FOR_nothing)
4452 return 0;
4453
4454 if (!(insn_data[icode].operand[0].predicate
4455 (x, insn_data[icode].operand[0].mode))
4456 || !(insn_data[icode].operand[1].predicate
4457 (x, insn_data[icode].operand[1].mode))
4458 || !(insn_data[icode].operand[2].predicate
4459 (y, insn_data[icode].operand[2].mode)))
4460 return 0;
4461
4462 return 1;
4463 }
4464
4465 /* Generate and return an insn body to subtract Y from X. */
4466
4467 rtx
4468 gen_sub2_insn (rtx x, rtx y)
4469 {
4470 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4471
4472 gcc_assert (insn_data[icode].operand[0].predicate
4473 (x, insn_data[icode].operand[0].mode));
4474 gcc_assert (insn_data[icode].operand[1].predicate
4475 (x, insn_data[icode].operand[1].mode));
4476 gcc_assert (insn_data[icode].operand[2].predicate
4477 (y, insn_data[icode].operand[2].mode));
4478
4479 return GEN_FCN (icode) (x, x, y);
4480 }
4481
4482 /* Generate and return an insn body to subtract r1 and c,
4483 storing the result in r0. */
4484 rtx
4485 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4486 {
4487 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4488
4489 if (icode == CODE_FOR_nothing
4490 || !(insn_data[icode].operand[0].predicate
4491 (r0, insn_data[icode].operand[0].mode))
4492 || !(insn_data[icode].operand[1].predicate
4493 (r1, insn_data[icode].operand[1].mode))
4494 || !(insn_data[icode].operand[2].predicate
4495 (c, insn_data[icode].operand[2].mode)))
4496 return NULL_RTX;
4497
4498 return GEN_FCN (icode) (r0, r1, c);
4499 }
4500
4501 int
4502 have_sub2_insn (rtx x, rtx y)
4503 {
4504 int icode;
4505
4506 gcc_assert (GET_MODE (x) != VOIDmode);
4507
4508 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4509
4510 if (icode == CODE_FOR_nothing)
4511 return 0;
4512
4513 if (!(insn_data[icode].operand[0].predicate
4514 (x, insn_data[icode].operand[0].mode))
4515 || !(insn_data[icode].operand[1].predicate
4516 (x, insn_data[icode].operand[1].mode))
4517 || !(insn_data[icode].operand[2].predicate
4518 (y, insn_data[icode].operand[2].mode)))
4519 return 0;
4520
4521 return 1;
4522 }
4523
4524 /* Generate the body of an instruction to copy Y into X.
4525 It may be a list of insns, if one insn isn't enough. */
4526
4527 rtx
4528 gen_move_insn (rtx x, rtx y)
4529 {
4530 rtx seq;
4531
4532 start_sequence ();
4533 emit_move_insn_1 (x, y);
4534 seq = get_insns ();
4535 end_sequence ();
4536 return seq;
4537 }
4538 \f
4539 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4540 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4541 no such operation exists, CODE_FOR_nothing will be returned. */
4542
4543 enum insn_code
4544 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4545 int unsignedp)
4546 {
4547 convert_optab tab;
4548 #ifdef HAVE_ptr_extend
4549 if (unsignedp < 0)
4550 return CODE_FOR_ptr_extend;
4551 #endif
4552
4553 tab = unsignedp ? zext_optab : sext_optab;
4554 return tab->handlers[to_mode][from_mode].insn_code;
4555 }
4556
4557 /* Generate the body of an insn to extend Y (with mode MFROM)
4558 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4559
4560 rtx
4561 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4562 enum machine_mode mfrom, int unsignedp)
4563 {
4564 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4565 return GEN_FCN (icode) (x, y);
4566 }
4567 \f
4568 /* can_fix_p and can_float_p say whether the target machine
4569 can directly convert a given fixed point type to
4570 a given floating point type, or vice versa.
4571 The returned value is the CODE_FOR_... value to use,
4572 or CODE_FOR_nothing if these modes cannot be directly converted.
4573
4574 *TRUNCP_PTR is set to 1 if it is necessary to output
4575 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4576
4577 static enum insn_code
4578 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4579 int unsignedp, int *truncp_ptr)
4580 {
4581 convert_optab tab;
4582 enum insn_code icode;
4583
4584 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4585 icode = tab->handlers[fixmode][fltmode].insn_code;
4586 if (icode != CODE_FOR_nothing)
4587 {
4588 *truncp_ptr = 0;
4589 return icode;
4590 }
4591
4592 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4593 for this to work. We need to rework the fix* and ftrunc* patterns
4594 and documentation. */
4595 tab = unsignedp ? ufix_optab : sfix_optab;
4596 icode = tab->handlers[fixmode][fltmode].insn_code;
4597 if (icode != CODE_FOR_nothing
4598 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4599 {
4600 *truncp_ptr = 1;
4601 return icode;
4602 }
4603
4604 *truncp_ptr = 0;
4605 return CODE_FOR_nothing;
4606 }
4607
4608 static enum insn_code
4609 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4610 int unsignedp)
4611 {
4612 convert_optab tab;
4613
4614 tab = unsignedp ? ufloat_optab : sfloat_optab;
4615 return tab->handlers[fltmode][fixmode].insn_code;
4616 }
4617 \f
4618 /* Generate code to convert FROM to floating point
4619 and store in TO. FROM must be fixed point and not VOIDmode.
4620 UNSIGNEDP nonzero means regard FROM as unsigned.
4621 Normally this is done by correcting the final value
4622 if it is negative. */
4623
4624 void
4625 expand_float (rtx to, rtx from, int unsignedp)
4626 {
4627 enum insn_code icode;
4628 rtx target = to;
4629 enum machine_mode fmode, imode;
4630 bool can_do_signed = false;
4631
4632 /* Crash now, because we won't be able to decide which mode to use. */
4633 gcc_assert (GET_MODE (from) != VOIDmode);
4634
4635 /* Look for an insn to do the conversion. Do it in the specified
4636 modes if possible; otherwise convert either input, output or both to
4637 wider mode. If the integer mode is wider than the mode of FROM,
4638 we can do the conversion signed even if the input is unsigned. */
4639
4640 for (fmode = GET_MODE (to); fmode != VOIDmode;
4641 fmode = GET_MODE_WIDER_MODE (fmode))
4642 for (imode = GET_MODE (from); imode != VOIDmode;
4643 imode = GET_MODE_WIDER_MODE (imode))
4644 {
4645 int doing_unsigned = unsignedp;
4646
4647 if (fmode != GET_MODE (to)
4648 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4649 continue;
4650
4651 icode = can_float_p (fmode, imode, unsignedp);
4652 if (icode == CODE_FOR_nothing && unsignedp)
4653 {
4654 enum insn_code scode = can_float_p (fmode, imode, 0);
4655 if (scode != CODE_FOR_nothing)
4656 can_do_signed = true;
4657 if (imode != GET_MODE (from))
4658 icode = scode, doing_unsigned = 0;
4659 }
4660
4661 if (icode != CODE_FOR_nothing)
4662 {
4663 if (imode != GET_MODE (from))
4664 from = convert_to_mode (imode, from, unsignedp);
4665
4666 if (fmode != GET_MODE (to))
4667 target = gen_reg_rtx (fmode);
4668
4669 emit_unop_insn (icode, target, from,
4670 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4671
4672 if (target != to)
4673 convert_move (to, target, 0);
4674 return;
4675 }
4676 }
4677
4678 /* Unsigned integer, and no way to convert directly. For binary
4679 floating point modes, convert as signed, then conditionally adjust
4680 the result. */
4681 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4682 {
4683 rtx label = gen_label_rtx ();
4684 rtx temp;
4685 REAL_VALUE_TYPE offset;
4686
4687 /* Look for a usable floating mode FMODE wider than the source and at
4688 least as wide as the target. Using FMODE will avoid rounding woes
4689 with unsigned values greater than the signed maximum value. */
4690
4691 for (fmode = GET_MODE (to); fmode != VOIDmode;
4692 fmode = GET_MODE_WIDER_MODE (fmode))
4693 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4694 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4695 break;
4696
4697 if (fmode == VOIDmode)
4698 {
4699 /* There is no such mode. Pretend the target is wide enough. */
4700 fmode = GET_MODE (to);
4701
4702 /* Avoid double-rounding when TO is narrower than FROM. */
4703 if ((significand_size (fmode) + 1)
4704 < GET_MODE_BITSIZE (GET_MODE (from)))
4705 {
4706 rtx temp1;
4707 rtx neglabel = gen_label_rtx ();
4708
4709 /* Don't use TARGET if it isn't a register, is a hard register,
4710 or is the wrong mode. */
4711 if (!REG_P (target)
4712 || REGNO (target) < FIRST_PSEUDO_REGISTER
4713 || GET_MODE (target) != fmode)
4714 target = gen_reg_rtx (fmode);
4715
4716 imode = GET_MODE (from);
4717 do_pending_stack_adjust ();
4718
4719 /* Test whether the sign bit is set. */
4720 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4721 0, neglabel);
4722
4723 /* The sign bit is not set. Convert as signed. */
4724 expand_float (target, from, 0);
4725 emit_jump_insn (gen_jump (label));
4726 emit_barrier ();
4727
4728 /* The sign bit is set.
4729 Convert to a usable (positive signed) value by shifting right
4730 one bit, while remembering if a nonzero bit was shifted
4731 out; i.e., compute (from & 1) | (from >> 1). */
4732
4733 emit_label (neglabel);
4734 temp = expand_binop (imode, and_optab, from, const1_rtx,
4735 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4736 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4737 NULL_RTX, 1);
4738 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4739 OPTAB_LIB_WIDEN);
4740 expand_float (target, temp, 0);
4741
4742 /* Multiply by 2 to undo the shift above. */
4743 temp = expand_binop (fmode, add_optab, target, target,
4744 target, 0, OPTAB_LIB_WIDEN);
4745 if (temp != target)
4746 emit_move_insn (target, temp);
4747
4748 do_pending_stack_adjust ();
4749 emit_label (label);
4750 goto done;
4751 }
4752 }
4753
4754 /* If we are about to do some arithmetic to correct for an
4755 unsigned operand, do it in a pseudo-register. */
4756
4757 if (GET_MODE (to) != fmode
4758 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4759 target = gen_reg_rtx (fmode);
4760
4761 /* Convert as signed integer to floating. */
4762 expand_float (target, from, 0);
4763
4764 /* If FROM is negative (and therefore TO is negative),
4765 correct its value by 2**bitwidth. */
4766
4767 do_pending_stack_adjust ();
4768 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4769 0, label);
4770
4771
4772 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4773 temp = expand_binop (fmode, add_optab, target,
4774 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4775 target, 0, OPTAB_LIB_WIDEN);
4776 if (temp != target)
4777 emit_move_insn (target, temp);
4778
4779 do_pending_stack_adjust ();
4780 emit_label (label);
4781 goto done;
4782 }
4783
4784 /* No hardware instruction available; call a library routine. */
4785 {
4786 rtx libfunc;
4787 rtx insns;
4788 rtx value;
4789 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4790
4791 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4792 from = convert_to_mode (SImode, from, unsignedp);
4793
4794 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4795 gcc_assert (libfunc);
4796
4797 start_sequence ();
4798
4799 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4800 GET_MODE (to), 1, from,
4801 GET_MODE (from));
4802 insns = get_insns ();
4803 end_sequence ();
4804
4805 emit_libcall_block (insns, target, value,
4806 gen_rtx_FLOAT (GET_MODE (to), from));
4807 }
4808
4809 done:
4810
4811 /* Copy result to requested destination
4812 if we have been computing in a temp location. */
4813
4814 if (target != to)
4815 {
4816 if (GET_MODE (target) == GET_MODE (to))
4817 emit_move_insn (to, target);
4818 else
4819 convert_move (to, target, 0);
4820 }
4821 }
4822 \f
4823 /* Generate code to convert FROM to fixed point and store in TO. FROM
4824 must be floating point. */
4825
4826 void
4827 expand_fix (rtx to, rtx from, int unsignedp)
4828 {
4829 enum insn_code icode;
4830 rtx target = to;
4831 enum machine_mode fmode, imode;
4832 int must_trunc = 0;
4833
4834 /* We first try to find a pair of modes, one real and one integer, at
4835 least as wide as FROM and TO, respectively, in which we can open-code
4836 this conversion. If the integer mode is wider than the mode of TO,
4837 we can do the conversion either signed or unsigned. */
4838
4839 for (fmode = GET_MODE (from); fmode != VOIDmode;
4840 fmode = GET_MODE_WIDER_MODE (fmode))
4841 for (imode = GET_MODE (to); imode != VOIDmode;
4842 imode = GET_MODE_WIDER_MODE (imode))
4843 {
4844 int doing_unsigned = unsignedp;
4845
4846 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4847 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4848 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4849
4850 if (icode != CODE_FOR_nothing)
4851 {
4852 if (fmode != GET_MODE (from))
4853 from = convert_to_mode (fmode, from, 0);
4854
4855 if (must_trunc)
4856 {
4857 rtx temp = gen_reg_rtx (GET_MODE (from));
4858 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4859 temp, 0);
4860 }
4861
4862 if (imode != GET_MODE (to))
4863 target = gen_reg_rtx (imode);
4864
4865 emit_unop_insn (icode, target, from,
4866 doing_unsigned ? UNSIGNED_FIX : FIX);
4867 if (target != to)
4868 convert_move (to, target, unsignedp);
4869 return;
4870 }
4871 }
4872
4873 /* For an unsigned conversion, there is one more way to do it.
4874 If we have a signed conversion, we generate code that compares
4875 the real value to the largest representable positive number. If if
4876 is smaller, the conversion is done normally. Otherwise, subtract
4877 one plus the highest signed number, convert, and add it back.
4878
4879 We only need to check all real modes, since we know we didn't find
4880 anything with a wider integer mode.
4881
4882 This code used to extend FP value into mode wider than the destination.
4883 This is not needed. Consider, for instance conversion from SFmode
4884 into DImode.
4885
4886 The hot path through the code is dealing with inputs smaller than 2^63
4887 and doing just the conversion, so there is no bits to lose.
4888
4889 In the other path we know the value is positive in the range 2^63..2^64-1
4890 inclusive. (as for other imput overflow happens and result is undefined)
4891 So we know that the most important bit set in mantissa corresponds to
4892 2^63. The subtraction of 2^63 should not generate any rounding as it
4893 simply clears out that bit. The rest is trivial. */
4894
4895 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4896 for (fmode = GET_MODE (from); fmode != VOIDmode;
4897 fmode = GET_MODE_WIDER_MODE (fmode))
4898 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4899 &must_trunc))
4900 {
4901 int bitsize;
4902 REAL_VALUE_TYPE offset;
4903 rtx limit, lab1, lab2, insn;
4904
4905 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4906 real_2expN (&offset, bitsize - 1);
4907 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4908 lab1 = gen_label_rtx ();
4909 lab2 = gen_label_rtx ();
4910
4911 if (fmode != GET_MODE (from))
4912 from = convert_to_mode (fmode, from, 0);
4913
4914 /* See if we need to do the subtraction. */
4915 do_pending_stack_adjust ();
4916 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4917 0, lab1);
4918
4919 /* If not, do the signed "fix" and branch around fixup code. */
4920 expand_fix (to, from, 0);
4921 emit_jump_insn (gen_jump (lab2));
4922 emit_barrier ();
4923
4924 /* Otherwise, subtract 2**(N-1), convert to signed number,
4925 then add 2**(N-1). Do the addition using XOR since this
4926 will often generate better code. */
4927 emit_label (lab1);
4928 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4929 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4930 expand_fix (to, target, 0);
4931 target = expand_binop (GET_MODE (to), xor_optab, to,
4932 gen_int_mode
4933 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4934 GET_MODE (to)),
4935 to, 1, OPTAB_LIB_WIDEN);
4936
4937 if (target != to)
4938 emit_move_insn (to, target);
4939
4940 emit_label (lab2);
4941
4942 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4943 != CODE_FOR_nothing)
4944 {
4945 /* Make a place for a REG_NOTE and add it. */
4946 insn = emit_move_insn (to, to);
4947 set_unique_reg_note (insn,
4948 REG_EQUAL,
4949 gen_rtx_fmt_e (UNSIGNED_FIX,
4950 GET_MODE (to),
4951 copy_rtx (from)));
4952 }
4953
4954 return;
4955 }
4956
4957 /* We can't do it with an insn, so use a library call. But first ensure
4958 that the mode of TO is at least as wide as SImode, since those are the
4959 only library calls we know about. */
4960
4961 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4962 {
4963 target = gen_reg_rtx (SImode);
4964
4965 expand_fix (target, from, unsignedp);
4966 }
4967 else
4968 {
4969 rtx insns;
4970 rtx value;
4971 rtx libfunc;
4972
4973 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4974 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4975 gcc_assert (libfunc);
4976
4977 start_sequence ();
4978
4979 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4980 GET_MODE (to), 1, from,
4981 GET_MODE (from));
4982 insns = get_insns ();
4983 end_sequence ();
4984
4985 emit_libcall_block (insns, target, value,
4986 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4987 GET_MODE (to), from));
4988 }
4989
4990 if (target != to)
4991 {
4992 if (GET_MODE (to) == GET_MODE (target))
4993 emit_move_insn (to, target);
4994 else
4995 convert_move (to, target, 0);
4996 }
4997 }
4998
4999 /* Generate code to convert FROM to fixed point and store in TO. FROM
5000 must be floating point, TO must be signed. Use the conversion optab
5001 TAB to do the conversion. */
5002
5003 bool
5004 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5005 {
5006 enum insn_code icode;
5007 rtx target = to;
5008 enum machine_mode fmode, imode;
5009
5010 /* We first try to find a pair of modes, one real and one integer, at
5011 least as wide as FROM and TO, respectively, in which we can open-code
5012 this conversion. If the integer mode is wider than the mode of TO,
5013 we can do the conversion either signed or unsigned. */
5014
5015 for (fmode = GET_MODE (from); fmode != VOIDmode;
5016 fmode = GET_MODE_WIDER_MODE (fmode))
5017 for (imode = GET_MODE (to); imode != VOIDmode;
5018 imode = GET_MODE_WIDER_MODE (imode))
5019 {
5020 icode = tab->handlers[imode][fmode].insn_code;
5021 if (icode != CODE_FOR_nothing)
5022 {
5023 if (fmode != GET_MODE (from))
5024 from = convert_to_mode (fmode, from, 0);
5025
5026 if (imode != GET_MODE (to))
5027 target = gen_reg_rtx (imode);
5028
5029 emit_unop_insn (icode, target, from, UNKNOWN);
5030 if (target != to)
5031 convert_move (to, target, 0);
5032 return true;
5033 }
5034 }
5035
5036 return false;
5037 }
5038 \f
5039 /* Report whether we have an instruction to perform the operation
5040 specified by CODE on operands of mode MODE. */
5041 int
5042 have_insn_for (enum rtx_code code, enum machine_mode mode)
5043 {
5044 return (code_to_optab[(int) code] != 0
5045 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
5046 != CODE_FOR_nothing));
5047 }
5048
5049 /* Create a blank optab. */
5050 static optab
5051 new_optab (void)
5052 {
5053 int i;
5054 optab op = ggc_alloc (sizeof (struct optab));
5055 for (i = 0; i < NUM_MACHINE_MODES; i++)
5056 {
5057 op->handlers[i].insn_code = CODE_FOR_nothing;
5058 op->handlers[i].libfunc = 0;
5059 }
5060
5061 return op;
5062 }
5063
5064 static convert_optab
5065 new_convert_optab (void)
5066 {
5067 int i, j;
5068 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5069 for (i = 0; i < NUM_MACHINE_MODES; i++)
5070 for (j = 0; j < NUM_MACHINE_MODES; j++)
5071 {
5072 op->handlers[i][j].insn_code = CODE_FOR_nothing;
5073 op->handlers[i][j].libfunc = 0;
5074 }
5075 return op;
5076 }
5077
5078 /* Same, but fill in its code as CODE, and write it into the
5079 code_to_optab table. */
5080 static inline optab
5081 init_optab (enum rtx_code code)
5082 {
5083 optab op = new_optab ();
5084 op->code = code;
5085 code_to_optab[(int) code] = op;
5086 return op;
5087 }
5088
5089 /* Same, but fill in its code as CODE, and do _not_ write it into
5090 the code_to_optab table. */
5091 static inline optab
5092 init_optabv (enum rtx_code code)
5093 {
5094 optab op = new_optab ();
5095 op->code = code;
5096 return op;
5097 }
5098
5099 /* Conversion optabs never go in the code_to_optab table. */
5100 static inline convert_optab
5101 init_convert_optab (enum rtx_code code)
5102 {
5103 convert_optab op = new_convert_optab ();
5104 op->code = code;
5105 return op;
5106 }
5107
5108 /* Initialize the libfunc fields of an entire group of entries in some
5109 optab. Each entry is set equal to a string consisting of a leading
5110 pair of underscores followed by a generic operation name followed by
5111 a mode name (downshifted to lowercase) followed by a single character
5112 representing the number of operands for the given operation (which is
5113 usually one of the characters '2', '3', or '4').
5114
5115 OPTABLE is the table in which libfunc fields are to be initialized.
5116 FIRST_MODE is the first machine mode index in the given optab to
5117 initialize.
5118 LAST_MODE is the last machine mode index in the given optab to
5119 initialize.
5120 OPNAME is the generic (string) name of the operation.
5121 SUFFIX is the character which specifies the number of operands for
5122 the given generic operation.
5123 */
5124
5125 static void
5126 init_libfuncs (optab optable, int first_mode, int last_mode,
5127 const char *opname, int suffix)
5128 {
5129 int mode;
5130 unsigned opname_len = strlen (opname);
5131
5132 for (mode = first_mode; (int) mode <= (int) last_mode;
5133 mode = (enum machine_mode) ((int) mode + 1))
5134 {
5135 const char *mname = GET_MODE_NAME (mode);
5136 unsigned mname_len = strlen (mname);
5137 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5138 char *p;
5139 const char *q;
5140
5141 p = libfunc_name;
5142 *p++ = '_';
5143 *p++ = '_';
5144 for (q = opname; *q; )
5145 *p++ = *q++;
5146 for (q = mname; *q; q++)
5147 *p++ = TOLOWER (*q);
5148 *p++ = suffix;
5149 *p = '\0';
5150
5151 optable->handlers[(int) mode].libfunc
5152 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5153 }
5154 }
5155
5156 /* Initialize the libfunc fields of an entire group of entries in some
5157 optab which correspond to all integer mode operations. The parameters
5158 have the same meaning as similarly named ones for the `init_libfuncs'
5159 routine. (See above). */
5160
5161 static void
5162 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5163 {
5164 int maxsize = 2*BITS_PER_WORD;
5165 if (maxsize < LONG_LONG_TYPE_SIZE)
5166 maxsize = LONG_LONG_TYPE_SIZE;
5167 init_libfuncs (optable, word_mode,
5168 mode_for_size (maxsize, MODE_INT, 0),
5169 opname, suffix);
5170 }
5171
5172 /* Initialize the libfunc fields of an entire group of entries in some
5173 optab which correspond to all real mode operations. The parameters
5174 have the same meaning as similarly named ones for the `init_libfuncs'
5175 routine. (See above). */
5176
5177 static void
5178 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5179 {
5180 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5181 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5182 opname, suffix);
5183 }
5184
5185 /* Initialize the libfunc fields of an entire group of entries of an
5186 inter-mode-class conversion optab. The string formation rules are
5187 similar to the ones for init_libfuncs, above, but instead of having
5188 a mode name and an operand count these functions have two mode names
5189 and no operand count. */
5190 static void
5191 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5192 enum mode_class from_class,
5193 enum mode_class to_class)
5194 {
5195 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5196 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5197 size_t opname_len = strlen (opname);
5198 size_t max_mname_len = 0;
5199
5200 enum machine_mode fmode, tmode;
5201 const char *fname, *tname;
5202 const char *q;
5203 char *libfunc_name, *suffix;
5204 char *p;
5205
5206 for (fmode = first_from_mode;
5207 fmode != VOIDmode;
5208 fmode = GET_MODE_WIDER_MODE (fmode))
5209 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5210
5211 for (tmode = first_to_mode;
5212 tmode != VOIDmode;
5213 tmode = GET_MODE_WIDER_MODE (tmode))
5214 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5215
5216 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5217 libfunc_name[0] = '_';
5218 libfunc_name[1] = '_';
5219 memcpy (&libfunc_name[2], opname, opname_len);
5220 suffix = libfunc_name + opname_len + 2;
5221
5222 for (fmode = first_from_mode; fmode != VOIDmode;
5223 fmode = GET_MODE_WIDER_MODE (fmode))
5224 for (tmode = first_to_mode; tmode != VOIDmode;
5225 tmode = GET_MODE_WIDER_MODE (tmode))
5226 {
5227 fname = GET_MODE_NAME (fmode);
5228 tname = GET_MODE_NAME (tmode);
5229
5230 p = suffix;
5231 for (q = fname; *q; p++, q++)
5232 *p = TOLOWER (*q);
5233 for (q = tname; *q; p++, q++)
5234 *p = TOLOWER (*q);
5235
5236 *p = '\0';
5237
5238 tab->handlers[tmode][fmode].libfunc
5239 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5240 p - libfunc_name));
5241 }
5242 }
5243
5244 /* Initialize the libfunc fields of an entire group of entries of an
5245 intra-mode-class conversion optab. The string formation rules are
5246 similar to the ones for init_libfunc, above. WIDENING says whether
5247 the optab goes from narrow to wide modes or vice versa. These functions
5248 have two mode names _and_ an operand count. */
5249 static void
5250 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5251 enum mode_class class, bool widening)
5252 {
5253 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5254 size_t opname_len = strlen (opname);
5255 size_t max_mname_len = 0;
5256
5257 enum machine_mode nmode, wmode;
5258 const char *nname, *wname;
5259 const char *q;
5260 char *libfunc_name, *suffix;
5261 char *p;
5262
5263 for (nmode = first_mode; nmode != VOIDmode;
5264 nmode = GET_MODE_WIDER_MODE (nmode))
5265 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5266
5267 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5268 libfunc_name[0] = '_';
5269 libfunc_name[1] = '_';
5270 memcpy (&libfunc_name[2], opname, opname_len);
5271 suffix = libfunc_name + opname_len + 2;
5272
5273 for (nmode = first_mode; nmode != VOIDmode;
5274 nmode = GET_MODE_WIDER_MODE (nmode))
5275 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5276 wmode = GET_MODE_WIDER_MODE (wmode))
5277 {
5278 nname = GET_MODE_NAME (nmode);
5279 wname = GET_MODE_NAME (wmode);
5280
5281 p = suffix;
5282 for (q = widening ? nname : wname; *q; p++, q++)
5283 *p = TOLOWER (*q);
5284 for (q = widening ? wname : nname; *q; p++, q++)
5285 *p = TOLOWER (*q);
5286
5287 *p++ = '2';
5288 *p = '\0';
5289
5290 tab->handlers[widening ? wmode : nmode]
5291 [widening ? nmode : wmode].libfunc
5292 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5293 p - libfunc_name));
5294 }
5295 }
5296
5297
5298 rtx
5299 init_one_libfunc (const char *name)
5300 {
5301 rtx symbol;
5302
5303 /* Create a FUNCTION_DECL that can be passed to
5304 targetm.encode_section_info. */
5305 /* ??? We don't have any type information except for this is
5306 a function. Pretend this is "int foo()". */
5307 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5308 build_function_type (integer_type_node, NULL_TREE));
5309 DECL_ARTIFICIAL (decl) = 1;
5310 DECL_EXTERNAL (decl) = 1;
5311 TREE_PUBLIC (decl) = 1;
5312
5313 symbol = XEXP (DECL_RTL (decl), 0);
5314
5315 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5316 are the flags assigned by targetm.encode_section_info. */
5317 SET_SYMBOL_REF_DECL (symbol, 0);
5318
5319 return symbol;
5320 }
5321
5322 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5323 MODE to NAME, which should be either 0 or a string constant. */
5324 void
5325 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5326 {
5327 if (name)
5328 optable->handlers[mode].libfunc = init_one_libfunc (name);
5329 else
5330 optable->handlers[mode].libfunc = 0;
5331 }
5332
5333 /* Call this to reset the function entry for one conversion optab
5334 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5335 either 0 or a string constant. */
5336 void
5337 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5338 enum machine_mode fmode, const char *name)
5339 {
5340 if (name)
5341 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5342 else
5343 optable->handlers[tmode][fmode].libfunc = 0;
5344 }
5345
5346 /* Call this once to initialize the contents of the optabs
5347 appropriately for the current target machine. */
5348
5349 void
5350 init_optabs (void)
5351 {
5352 unsigned int i;
5353
5354 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5355
5356 for (i = 0; i < NUM_RTX_CODE; i++)
5357 setcc_gen_code[i] = CODE_FOR_nothing;
5358
5359 #ifdef HAVE_conditional_move
5360 for (i = 0; i < NUM_MACHINE_MODES; i++)
5361 movcc_gen_code[i] = CODE_FOR_nothing;
5362 #endif
5363
5364 for (i = 0; i < NUM_MACHINE_MODES; i++)
5365 {
5366 vcond_gen_code[i] = CODE_FOR_nothing;
5367 vcondu_gen_code[i] = CODE_FOR_nothing;
5368 }
5369
5370 add_optab = init_optab (PLUS);
5371 addv_optab = init_optabv (PLUS);
5372 sub_optab = init_optab (MINUS);
5373 subv_optab = init_optabv (MINUS);
5374 smul_optab = init_optab (MULT);
5375 smulv_optab = init_optabv (MULT);
5376 smul_highpart_optab = init_optab (UNKNOWN);
5377 umul_highpart_optab = init_optab (UNKNOWN);
5378 smul_widen_optab = init_optab (UNKNOWN);
5379 umul_widen_optab = init_optab (UNKNOWN);
5380 usmul_widen_optab = init_optab (UNKNOWN);
5381 sdiv_optab = init_optab (DIV);
5382 sdivv_optab = init_optabv (DIV);
5383 sdivmod_optab = init_optab (UNKNOWN);
5384 udiv_optab = init_optab (UDIV);
5385 udivmod_optab = init_optab (UNKNOWN);
5386 smod_optab = init_optab (MOD);
5387 umod_optab = init_optab (UMOD);
5388 fmod_optab = init_optab (UNKNOWN);
5389 remainder_optab = init_optab (UNKNOWN);
5390 ftrunc_optab = init_optab (UNKNOWN);
5391 and_optab = init_optab (AND);
5392 ior_optab = init_optab (IOR);
5393 xor_optab = init_optab (XOR);
5394 ashl_optab = init_optab (ASHIFT);
5395 ashr_optab = init_optab (ASHIFTRT);
5396 lshr_optab = init_optab (LSHIFTRT);
5397 rotl_optab = init_optab (ROTATE);
5398 rotr_optab = init_optab (ROTATERT);
5399 smin_optab = init_optab (SMIN);
5400 smax_optab = init_optab (SMAX);
5401 umin_optab = init_optab (UMIN);
5402 umax_optab = init_optab (UMAX);
5403 pow_optab = init_optab (UNKNOWN);
5404 atan2_optab = init_optab (UNKNOWN);
5405
5406 /* These three have codes assigned exclusively for the sake of
5407 have_insn_for. */
5408 mov_optab = init_optab (SET);
5409 movstrict_optab = init_optab (STRICT_LOW_PART);
5410 cmp_optab = init_optab (COMPARE);
5411
5412 ucmp_optab = init_optab (UNKNOWN);
5413 tst_optab = init_optab (UNKNOWN);
5414
5415 eq_optab = init_optab (EQ);
5416 ne_optab = init_optab (NE);
5417 gt_optab = init_optab (GT);
5418 ge_optab = init_optab (GE);
5419 lt_optab = init_optab (LT);
5420 le_optab = init_optab (LE);
5421 unord_optab = init_optab (UNORDERED);
5422
5423 neg_optab = init_optab (NEG);
5424 negv_optab = init_optabv (NEG);
5425 abs_optab = init_optab (ABS);
5426 absv_optab = init_optabv (ABS);
5427 addcc_optab = init_optab (UNKNOWN);
5428 one_cmpl_optab = init_optab (NOT);
5429 bswap_optab = init_optab (BSWAP);
5430 ffs_optab = init_optab (FFS);
5431 clz_optab = init_optab (CLZ);
5432 ctz_optab = init_optab (CTZ);
5433 popcount_optab = init_optab (POPCOUNT);
5434 parity_optab = init_optab (PARITY);
5435 sqrt_optab = init_optab (SQRT);
5436 floor_optab = init_optab (UNKNOWN);
5437 ceil_optab = init_optab (UNKNOWN);
5438 round_optab = init_optab (UNKNOWN);
5439 btrunc_optab = init_optab (UNKNOWN);
5440 nearbyint_optab = init_optab (UNKNOWN);
5441 rint_optab = init_optab (UNKNOWN);
5442 sincos_optab = init_optab (UNKNOWN);
5443 sin_optab = init_optab (UNKNOWN);
5444 asin_optab = init_optab (UNKNOWN);
5445 cos_optab = init_optab (UNKNOWN);
5446 acos_optab = init_optab (UNKNOWN);
5447 exp_optab = init_optab (UNKNOWN);
5448 exp10_optab = init_optab (UNKNOWN);
5449 exp2_optab = init_optab (UNKNOWN);
5450 expm1_optab = init_optab (UNKNOWN);
5451 ldexp_optab = init_optab (UNKNOWN);
5452 scalb_optab = init_optab (UNKNOWN);
5453 logb_optab = init_optab (UNKNOWN);
5454 ilogb_optab = init_optab (UNKNOWN);
5455 log_optab = init_optab (UNKNOWN);
5456 log10_optab = init_optab (UNKNOWN);
5457 log2_optab = init_optab (UNKNOWN);
5458 log1p_optab = init_optab (UNKNOWN);
5459 tan_optab = init_optab (UNKNOWN);
5460 atan_optab = init_optab (UNKNOWN);
5461 copysign_optab = init_optab (UNKNOWN);
5462
5463 isinf_optab = init_optab (UNKNOWN);
5464
5465 strlen_optab = init_optab (UNKNOWN);
5466 cbranch_optab = init_optab (UNKNOWN);
5467 cmov_optab = init_optab (UNKNOWN);
5468 cstore_optab = init_optab (UNKNOWN);
5469 push_optab = init_optab (UNKNOWN);
5470
5471 reduc_smax_optab = init_optab (UNKNOWN);
5472 reduc_umax_optab = init_optab (UNKNOWN);
5473 reduc_smin_optab = init_optab (UNKNOWN);
5474 reduc_umin_optab = init_optab (UNKNOWN);
5475 reduc_splus_optab = init_optab (UNKNOWN);
5476 reduc_uplus_optab = init_optab (UNKNOWN);
5477
5478 ssum_widen_optab = init_optab (UNKNOWN);
5479 usum_widen_optab = init_optab (UNKNOWN);
5480 sdot_prod_optab = init_optab (UNKNOWN);
5481 udot_prod_optab = init_optab (UNKNOWN);
5482
5483 vec_extract_optab = init_optab (UNKNOWN);
5484 vec_extract_even_optab = init_optab (UNKNOWN);
5485 vec_extract_odd_optab = init_optab (UNKNOWN);
5486 vec_interleave_high_optab = init_optab (UNKNOWN);
5487 vec_interleave_low_optab = init_optab (UNKNOWN);
5488 vec_set_optab = init_optab (UNKNOWN);
5489 vec_init_optab = init_optab (UNKNOWN);
5490 vec_shl_optab = init_optab (UNKNOWN);
5491 vec_shr_optab = init_optab (UNKNOWN);
5492 vec_realign_load_optab = init_optab (UNKNOWN);
5493 movmisalign_optab = init_optab (UNKNOWN);
5494 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5495 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5496 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5497 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5498 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5499 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5500 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5501 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5502 vec_pack_mod_optab = init_optab (UNKNOWN);
5503 vec_pack_usat_optab = init_optab (UNKNOWN);
5504 vec_pack_ssat_optab = init_optab (UNKNOWN);
5505
5506 powi_optab = init_optab (UNKNOWN);
5507
5508 /* Conversions. */
5509 sext_optab = init_convert_optab (SIGN_EXTEND);
5510 zext_optab = init_convert_optab (ZERO_EXTEND);
5511 trunc_optab = init_convert_optab (TRUNCATE);
5512 sfix_optab = init_convert_optab (FIX);
5513 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5514 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5515 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5516 sfloat_optab = init_convert_optab (FLOAT);
5517 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5518 lrint_optab = init_convert_optab (UNKNOWN);
5519 lround_optab = init_convert_optab (UNKNOWN);
5520 lfloor_optab = init_convert_optab (UNKNOWN);
5521 lceil_optab = init_convert_optab (UNKNOWN);
5522
5523 for (i = 0; i < NUM_MACHINE_MODES; i++)
5524 {
5525 movmem_optab[i] = CODE_FOR_nothing;
5526 cmpstr_optab[i] = CODE_FOR_nothing;
5527 cmpstrn_optab[i] = CODE_FOR_nothing;
5528 cmpmem_optab[i] = CODE_FOR_nothing;
5529 setmem_optab[i] = CODE_FOR_nothing;
5530
5531 sync_add_optab[i] = CODE_FOR_nothing;
5532 sync_sub_optab[i] = CODE_FOR_nothing;
5533 sync_ior_optab[i] = CODE_FOR_nothing;
5534 sync_and_optab[i] = CODE_FOR_nothing;
5535 sync_xor_optab[i] = CODE_FOR_nothing;
5536 sync_nand_optab[i] = CODE_FOR_nothing;
5537 sync_old_add_optab[i] = CODE_FOR_nothing;
5538 sync_old_sub_optab[i] = CODE_FOR_nothing;
5539 sync_old_ior_optab[i] = CODE_FOR_nothing;
5540 sync_old_and_optab[i] = CODE_FOR_nothing;
5541 sync_old_xor_optab[i] = CODE_FOR_nothing;
5542 sync_old_nand_optab[i] = CODE_FOR_nothing;
5543 sync_new_add_optab[i] = CODE_FOR_nothing;
5544 sync_new_sub_optab[i] = CODE_FOR_nothing;
5545 sync_new_ior_optab[i] = CODE_FOR_nothing;
5546 sync_new_and_optab[i] = CODE_FOR_nothing;
5547 sync_new_xor_optab[i] = CODE_FOR_nothing;
5548 sync_new_nand_optab[i] = CODE_FOR_nothing;
5549 sync_compare_and_swap[i] = CODE_FOR_nothing;
5550 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5551 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5552 sync_lock_release[i] = CODE_FOR_nothing;
5553
5554 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5555 }
5556
5557 /* Fill in the optabs with the insns we support. */
5558 init_all_optabs ();
5559
5560 /* Initialize the optabs with the names of the library functions. */
5561 init_integral_libfuncs (add_optab, "add", '3');
5562 init_floating_libfuncs (add_optab, "add", '3');
5563 init_integral_libfuncs (addv_optab, "addv", '3');
5564 init_floating_libfuncs (addv_optab, "add", '3');
5565 init_integral_libfuncs (sub_optab, "sub", '3');
5566 init_floating_libfuncs (sub_optab, "sub", '3');
5567 init_integral_libfuncs (subv_optab, "subv", '3');
5568 init_floating_libfuncs (subv_optab, "sub", '3');
5569 init_integral_libfuncs (smul_optab, "mul", '3');
5570 init_floating_libfuncs (smul_optab, "mul", '3');
5571 init_integral_libfuncs (smulv_optab, "mulv", '3');
5572 init_floating_libfuncs (smulv_optab, "mul", '3');
5573 init_integral_libfuncs (sdiv_optab, "div", '3');
5574 init_floating_libfuncs (sdiv_optab, "div", '3');
5575 init_integral_libfuncs (sdivv_optab, "divv", '3');
5576 init_integral_libfuncs (udiv_optab, "udiv", '3');
5577 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5578 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5579 init_integral_libfuncs (smod_optab, "mod", '3');
5580 init_integral_libfuncs (umod_optab, "umod", '3');
5581 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5582 init_integral_libfuncs (and_optab, "and", '3');
5583 init_integral_libfuncs (ior_optab, "ior", '3');
5584 init_integral_libfuncs (xor_optab, "xor", '3');
5585 init_integral_libfuncs (ashl_optab, "ashl", '3');
5586 init_integral_libfuncs (ashr_optab, "ashr", '3');
5587 init_integral_libfuncs (lshr_optab, "lshr", '3');
5588 init_integral_libfuncs (smin_optab, "min", '3');
5589 init_floating_libfuncs (smin_optab, "min", '3');
5590 init_integral_libfuncs (smax_optab, "max", '3');
5591 init_floating_libfuncs (smax_optab, "max", '3');
5592 init_integral_libfuncs (umin_optab, "umin", '3');
5593 init_integral_libfuncs (umax_optab, "umax", '3');
5594 init_integral_libfuncs (neg_optab, "neg", '2');
5595 init_floating_libfuncs (neg_optab, "neg", '2');
5596 init_integral_libfuncs (negv_optab, "negv", '2');
5597 init_floating_libfuncs (negv_optab, "neg", '2');
5598 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5599 init_integral_libfuncs (ffs_optab, "ffs", '2');
5600 init_integral_libfuncs (clz_optab, "clz", '2');
5601 init_integral_libfuncs (ctz_optab, "ctz", '2');
5602 init_integral_libfuncs (popcount_optab, "popcount", '2');
5603 init_integral_libfuncs (parity_optab, "parity", '2');
5604
5605 /* Comparison libcalls for integers MUST come in pairs,
5606 signed/unsigned. */
5607 init_integral_libfuncs (cmp_optab, "cmp", '2');
5608 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5609 init_floating_libfuncs (cmp_optab, "cmp", '2');
5610
5611 /* EQ etc are floating point only. */
5612 init_floating_libfuncs (eq_optab, "eq", '2');
5613 init_floating_libfuncs (ne_optab, "ne", '2');
5614 init_floating_libfuncs (gt_optab, "gt", '2');
5615 init_floating_libfuncs (ge_optab, "ge", '2');
5616 init_floating_libfuncs (lt_optab, "lt", '2');
5617 init_floating_libfuncs (le_optab, "le", '2');
5618 init_floating_libfuncs (unord_optab, "unord", '2');
5619
5620 init_floating_libfuncs (powi_optab, "powi", '2');
5621
5622 /* Conversions. */
5623 init_interclass_conv_libfuncs (sfloat_optab, "float",
5624 MODE_INT, MODE_FLOAT);
5625 init_interclass_conv_libfuncs (sfloat_optab, "float",
5626 MODE_INT, MODE_DECIMAL_FLOAT);
5627 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5628 MODE_INT, MODE_FLOAT);
5629 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5630 MODE_INT, MODE_DECIMAL_FLOAT);
5631 init_interclass_conv_libfuncs (sfix_optab, "fix",
5632 MODE_FLOAT, MODE_INT);
5633 init_interclass_conv_libfuncs (sfix_optab, "fix",
5634 MODE_DECIMAL_FLOAT, MODE_INT);
5635 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5636 MODE_FLOAT, MODE_INT);
5637 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5638 MODE_DECIMAL_FLOAT, MODE_INT);
5639 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5640 MODE_INT, MODE_DECIMAL_FLOAT);
5641 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5642 MODE_INT, MODE_FLOAT);
5643 init_interclass_conv_libfuncs (lround_optab, "lround",
5644 MODE_INT, MODE_FLOAT);
5645 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5646 MODE_INT, MODE_FLOAT);
5647 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5648 MODE_INT, MODE_FLOAT);
5649
5650 /* sext_optab is also used for FLOAT_EXTEND. */
5651 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5652 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5653 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5654 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5655 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5656 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5657 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5658 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5659
5660 /* Explicitly initialize the bswap libfuncs since we need them to be
5661 valid for things other than word_mode. */
5662 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5663 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5664
5665 /* Use cabs for double complex abs, since systems generally have cabs.
5666 Don't define any libcall for float complex, so that cabs will be used. */
5667 if (complex_double_type_node)
5668 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5669 = init_one_libfunc ("cabs");
5670
5671 /* The ffs function operates on `int'. */
5672 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5673 = init_one_libfunc ("ffs");
5674
5675 abort_libfunc = init_one_libfunc ("abort");
5676 memcpy_libfunc = init_one_libfunc ("memcpy");
5677 memmove_libfunc = init_one_libfunc ("memmove");
5678 memcmp_libfunc = init_one_libfunc ("memcmp");
5679 memset_libfunc = init_one_libfunc ("memset");
5680 setbits_libfunc = init_one_libfunc ("__setbits");
5681
5682 #ifndef DONT_USE_BUILTIN_SETJMP
5683 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5684 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5685 #else
5686 setjmp_libfunc = init_one_libfunc ("setjmp");
5687 longjmp_libfunc = init_one_libfunc ("longjmp");
5688 #endif
5689 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5690 unwind_sjlj_unregister_libfunc
5691 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5692
5693 /* For function entry/exit instrumentation. */
5694 profile_function_entry_libfunc
5695 = init_one_libfunc ("__cyg_profile_func_enter");
5696 profile_function_exit_libfunc
5697 = init_one_libfunc ("__cyg_profile_func_exit");
5698
5699 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5700
5701 if (HAVE_conditional_trap)
5702 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5703
5704 /* Allow the target to add more libcalls or rename some, etc. */
5705 targetm.init_libfuncs ();
5706 }
5707
5708 #ifdef DEBUG
5709
5710 /* Print information about the current contents of the optabs on
5711 STDERR. */
5712
5713 static void
5714 debug_optab_libfuncs (void)
5715 {
5716 int i;
5717 int j;
5718 int k;
5719
5720 /* Dump the arithmetic optabs. */
5721 for (i = 0; i != (int) OTI_MAX; i++)
5722 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5723 {
5724 optab o;
5725 struct optab_handlers *h;
5726
5727 o = optab_table[i];
5728 h = &o->handlers[j];
5729 if (h->libfunc)
5730 {
5731 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5732 fprintf (stderr, "%s\t%s:\t%s\n",
5733 GET_RTX_NAME (o->code),
5734 GET_MODE_NAME (j),
5735 XSTR (h->libfunc, 0));
5736 }
5737 }
5738
5739 /* Dump the conversion optabs. */
5740 for (i = 0; i < (int) COI_MAX; ++i)
5741 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5742 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5743 {
5744 convert_optab o;
5745 struct optab_handlers *h;
5746
5747 o = &convert_optab_table[i];
5748 h = &o->handlers[j][k];
5749 if (h->libfunc)
5750 {
5751 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5752 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5753 GET_RTX_NAME (o->code),
5754 GET_MODE_NAME (j),
5755 GET_MODE_NAME (k),
5756 XSTR (h->libfunc, 0));
5757 }
5758 }
5759 }
5760
5761 #endif /* DEBUG */
5762
5763 \f
5764 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5765 CODE. Return 0 on failure. */
5766
5767 rtx
5768 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5769 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5770 {
5771 enum machine_mode mode = GET_MODE (op1);
5772 enum insn_code icode;
5773 rtx insn;
5774
5775 if (!HAVE_conditional_trap)
5776 return 0;
5777
5778 if (mode == VOIDmode)
5779 return 0;
5780
5781 icode = cmp_optab->handlers[(int) mode].insn_code;
5782 if (icode == CODE_FOR_nothing)
5783 return 0;
5784
5785 start_sequence ();
5786 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5787 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5788 if (!op1 || !op2)
5789 {
5790 end_sequence ();
5791 return 0;
5792 }
5793 emit_insn (GEN_FCN (icode) (op1, op2));
5794
5795 PUT_CODE (trap_rtx, code);
5796 gcc_assert (HAVE_conditional_trap);
5797 insn = gen_conditional_trap (trap_rtx, tcode);
5798 if (insn)
5799 {
5800 emit_insn (insn);
5801 insn = get_insns ();
5802 }
5803 end_sequence ();
5804
5805 return insn;
5806 }
5807
5808 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5809 or unsigned operation code. */
5810
5811 static enum rtx_code
5812 get_rtx_code (enum tree_code tcode, bool unsignedp)
5813 {
5814 enum rtx_code code;
5815 switch (tcode)
5816 {
5817 case EQ_EXPR:
5818 code = EQ;
5819 break;
5820 case NE_EXPR:
5821 code = NE;
5822 break;
5823 case LT_EXPR:
5824 code = unsignedp ? LTU : LT;
5825 break;
5826 case LE_EXPR:
5827 code = unsignedp ? LEU : LE;
5828 break;
5829 case GT_EXPR:
5830 code = unsignedp ? GTU : GT;
5831 break;
5832 case GE_EXPR:
5833 code = unsignedp ? GEU : GE;
5834 break;
5835
5836 case UNORDERED_EXPR:
5837 code = UNORDERED;
5838 break;
5839 case ORDERED_EXPR:
5840 code = ORDERED;
5841 break;
5842 case UNLT_EXPR:
5843 code = UNLT;
5844 break;
5845 case UNLE_EXPR:
5846 code = UNLE;
5847 break;
5848 case UNGT_EXPR:
5849 code = UNGT;
5850 break;
5851 case UNGE_EXPR:
5852 code = UNGE;
5853 break;
5854 case UNEQ_EXPR:
5855 code = UNEQ;
5856 break;
5857 case LTGT_EXPR:
5858 code = LTGT;
5859 break;
5860
5861 default:
5862 gcc_unreachable ();
5863 }
5864 return code;
5865 }
5866
5867 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5868 unsigned operators. Do not generate compare instruction. */
5869
5870 static rtx
5871 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5872 {
5873 enum rtx_code rcode;
5874 tree t_op0, t_op1;
5875 rtx rtx_op0, rtx_op1;
5876
5877 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5878 ensures that condition is a relational operation. */
5879 gcc_assert (COMPARISON_CLASS_P (cond));
5880
5881 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5882 t_op0 = TREE_OPERAND (cond, 0);
5883 t_op1 = TREE_OPERAND (cond, 1);
5884
5885 /* Expand operands. */
5886 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5887 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5888
5889 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5890 && GET_MODE (rtx_op0) != VOIDmode)
5891 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5892
5893 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5894 && GET_MODE (rtx_op1) != VOIDmode)
5895 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5896
5897 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5898 }
5899
5900 /* Return insn code for VEC_COND_EXPR EXPR. */
5901
5902 static inline enum insn_code
5903 get_vcond_icode (tree expr, enum machine_mode mode)
5904 {
5905 enum insn_code icode = CODE_FOR_nothing;
5906
5907 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5908 icode = vcondu_gen_code[mode];
5909 else
5910 icode = vcond_gen_code[mode];
5911 return icode;
5912 }
5913
5914 /* Return TRUE iff, appropriate vector insns are available
5915 for vector cond expr expr in VMODE mode. */
5916
5917 bool
5918 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5919 {
5920 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5921 return false;
5922 return true;
5923 }
5924
5925 /* Generate insns for VEC_COND_EXPR. */
5926
5927 rtx
5928 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5929 {
5930 enum insn_code icode;
5931 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5932 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5933 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5934
5935 icode = get_vcond_icode (vec_cond_expr, mode);
5936 if (icode == CODE_FOR_nothing)
5937 return 0;
5938
5939 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5940 target = gen_reg_rtx (mode);
5941
5942 /* Get comparison rtx. First expand both cond expr operands. */
5943 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5944 unsignedp, icode);
5945 cc_op0 = XEXP (comparison, 0);
5946 cc_op1 = XEXP (comparison, 1);
5947 /* Expand both operands and force them in reg, if required. */
5948 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5949 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5950 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5951 && mode != VOIDmode)
5952 rtx_op1 = force_reg (mode, rtx_op1);
5953
5954 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5955 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5956 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5957 && mode != VOIDmode)
5958 rtx_op2 = force_reg (mode, rtx_op2);
5959
5960 /* Emit instruction! */
5961 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5962 comparison, cc_op0, cc_op1));
5963
5964 return target;
5965 }
5966
5967 \f
5968 /* This is an internal subroutine of the other compare_and_swap expanders.
5969 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5970 operation. TARGET is an optional place to store the value result of
5971 the operation. ICODE is the particular instruction to expand. Return
5972 the result of the operation. */
5973
5974 static rtx
5975 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5976 rtx target, enum insn_code icode)
5977 {
5978 enum machine_mode mode = GET_MODE (mem);
5979 rtx insn;
5980
5981 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5982 target = gen_reg_rtx (mode);
5983
5984 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5985 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5986 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5987 old_val = force_reg (mode, old_val);
5988
5989 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5990 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5991 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5992 new_val = force_reg (mode, new_val);
5993
5994 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5995 if (insn == NULL_RTX)
5996 return NULL_RTX;
5997 emit_insn (insn);
5998
5999 return target;
6000 }
6001
6002 /* Expand a compare-and-swap operation and return its value. */
6003
6004 rtx
6005 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6006 {
6007 enum machine_mode mode = GET_MODE (mem);
6008 enum insn_code icode = sync_compare_and_swap[mode];
6009
6010 if (icode == CODE_FOR_nothing)
6011 return NULL_RTX;
6012
6013 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6014 }
6015
6016 /* Expand a compare-and-swap operation and store true into the result if
6017 the operation was successful and false otherwise. Return the result.
6018 Unlike other routines, TARGET is not optional. */
6019
6020 rtx
6021 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6022 {
6023 enum machine_mode mode = GET_MODE (mem);
6024 enum insn_code icode;
6025 rtx subtarget, label0, label1;
6026
6027 /* If the target supports a compare-and-swap pattern that simultaneously
6028 sets some flag for success, then use it. Otherwise use the regular
6029 compare-and-swap and follow that immediately with a compare insn. */
6030 icode = sync_compare_and_swap_cc[mode];
6031 switch (icode)
6032 {
6033 default:
6034 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6035 NULL_RTX, icode);
6036 if (subtarget != NULL_RTX)
6037 break;
6038
6039 /* FALLTHRU */
6040 case CODE_FOR_nothing:
6041 icode = sync_compare_and_swap[mode];
6042 if (icode == CODE_FOR_nothing)
6043 return NULL_RTX;
6044
6045 /* Ensure that if old_val == mem, that we're not comparing
6046 against an old value. */
6047 if (MEM_P (old_val))
6048 old_val = force_reg (mode, old_val);
6049
6050 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6051 NULL_RTX, icode);
6052 if (subtarget == NULL_RTX)
6053 return NULL_RTX;
6054
6055 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6056 }
6057
6058 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6059 setcc instruction from the beginning. We don't work too hard here,
6060 but it's nice to not be stupid about initial code gen either. */
6061 if (STORE_FLAG_VALUE == 1)
6062 {
6063 icode = setcc_gen_code[EQ];
6064 if (icode != CODE_FOR_nothing)
6065 {
6066 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6067 rtx insn;
6068
6069 subtarget = target;
6070 if (!insn_data[icode].operand[0].predicate (target, cmode))
6071 subtarget = gen_reg_rtx (cmode);
6072
6073 insn = GEN_FCN (icode) (subtarget);
6074 if (insn)
6075 {
6076 emit_insn (insn);
6077 if (GET_MODE (target) != GET_MODE (subtarget))
6078 {
6079 convert_move (target, subtarget, 1);
6080 subtarget = target;
6081 }
6082 return subtarget;
6083 }
6084 }
6085 }
6086
6087 /* Without an appropriate setcc instruction, use a set of branches to
6088 get 1 and 0 stored into target. Presumably if the target has a
6089 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6090
6091 label0 = gen_label_rtx ();
6092 label1 = gen_label_rtx ();
6093
6094 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6095 emit_move_insn (target, const0_rtx);
6096 emit_jump_insn (gen_jump (label1));
6097 emit_barrier ();
6098 emit_label (label0);
6099 emit_move_insn (target, const1_rtx);
6100 emit_label (label1);
6101
6102 return target;
6103 }
6104
6105 /* This is a helper function for the other atomic operations. This function
6106 emits a loop that contains SEQ that iterates until a compare-and-swap
6107 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6108 a set of instructions that takes a value from OLD_REG as an input and
6109 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6110 set to the current contents of MEM. After SEQ, a compare-and-swap will
6111 attempt to update MEM with NEW_REG. The function returns true when the
6112 loop was generated successfully. */
6113
6114 static bool
6115 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6116 {
6117 enum machine_mode mode = GET_MODE (mem);
6118 enum insn_code icode;
6119 rtx label, cmp_reg, subtarget;
6120
6121 /* The loop we want to generate looks like
6122
6123 cmp_reg = mem;
6124 label:
6125 old_reg = cmp_reg;
6126 seq;
6127 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6128 if (cmp_reg != old_reg)
6129 goto label;
6130
6131 Note that we only do the plain load from memory once. Subsequent
6132 iterations use the value loaded by the compare-and-swap pattern. */
6133
6134 label = gen_label_rtx ();
6135 cmp_reg = gen_reg_rtx (mode);
6136
6137 emit_move_insn (cmp_reg, mem);
6138 emit_label (label);
6139 emit_move_insn (old_reg, cmp_reg);
6140 if (seq)
6141 emit_insn (seq);
6142
6143 /* If the target supports a compare-and-swap pattern that simultaneously
6144 sets some flag for success, then use it. Otherwise use the regular
6145 compare-and-swap and follow that immediately with a compare insn. */
6146 icode = sync_compare_and_swap_cc[mode];
6147 switch (icode)
6148 {
6149 default:
6150 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6151 cmp_reg, icode);
6152 if (subtarget != NULL_RTX)
6153 {
6154 gcc_assert (subtarget == cmp_reg);
6155 break;
6156 }
6157
6158 /* FALLTHRU */
6159 case CODE_FOR_nothing:
6160 icode = sync_compare_and_swap[mode];
6161 if (icode == CODE_FOR_nothing)
6162 return false;
6163
6164 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6165 cmp_reg, icode);
6166 if (subtarget == NULL_RTX)
6167 return false;
6168 if (subtarget != cmp_reg)
6169 emit_move_insn (cmp_reg, subtarget);
6170
6171 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6172 }
6173
6174 /* ??? Mark this jump predicted not taken? */
6175 emit_jump_insn (bcc_gen_fctn[NE] (label));
6176
6177 return true;
6178 }
6179
6180 /* This function generates the atomic operation MEM CODE= VAL. In this
6181 case, we do not care about any resulting value. Returns NULL if we
6182 cannot generate the operation. */
6183
6184 rtx
6185 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6186 {
6187 enum machine_mode mode = GET_MODE (mem);
6188 enum insn_code icode;
6189 rtx insn;
6190
6191 /* Look to see if the target supports the operation directly. */
6192 switch (code)
6193 {
6194 case PLUS:
6195 icode = sync_add_optab[mode];
6196 break;
6197 case IOR:
6198 icode = sync_ior_optab[mode];
6199 break;
6200 case XOR:
6201 icode = sync_xor_optab[mode];
6202 break;
6203 case AND:
6204 icode = sync_and_optab[mode];
6205 break;
6206 case NOT:
6207 icode = sync_nand_optab[mode];
6208 break;
6209
6210 case MINUS:
6211 icode = sync_sub_optab[mode];
6212 if (icode == CODE_FOR_nothing)
6213 {
6214 icode = sync_add_optab[mode];
6215 if (icode != CODE_FOR_nothing)
6216 {
6217 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6218 code = PLUS;
6219 }
6220 }
6221 break;
6222
6223 default:
6224 gcc_unreachable ();
6225 }
6226
6227 /* Generate the direct operation, if present. */
6228 if (icode != CODE_FOR_nothing)
6229 {
6230 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6231 val = convert_modes (mode, GET_MODE (val), val, 1);
6232 if (!insn_data[icode].operand[1].predicate (val, mode))
6233 val = force_reg (mode, val);
6234
6235 insn = GEN_FCN (icode) (mem, val);
6236 if (insn)
6237 {
6238 emit_insn (insn);
6239 return const0_rtx;
6240 }
6241 }
6242
6243 /* Failing that, generate a compare-and-swap loop in which we perform the
6244 operation with normal arithmetic instructions. */
6245 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6246 {
6247 rtx t0 = gen_reg_rtx (mode), t1;
6248
6249 start_sequence ();
6250
6251 t1 = t0;
6252 if (code == NOT)
6253 {
6254 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6255 code = AND;
6256 }
6257 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6258 true, OPTAB_LIB_WIDEN);
6259
6260 insn = get_insns ();
6261 end_sequence ();
6262
6263 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6264 return const0_rtx;
6265 }
6266
6267 return NULL_RTX;
6268 }
6269
6270 /* This function generates the atomic operation MEM CODE= VAL. In this
6271 case, we do care about the resulting value: if AFTER is true then
6272 return the value MEM holds after the operation, if AFTER is false
6273 then return the value MEM holds before the operation. TARGET is an
6274 optional place for the result value to be stored. */
6275
6276 rtx
6277 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6278 bool after, rtx target)
6279 {
6280 enum machine_mode mode = GET_MODE (mem);
6281 enum insn_code old_code, new_code, icode;
6282 bool compensate;
6283 rtx insn;
6284
6285 /* Look to see if the target supports the operation directly. */
6286 switch (code)
6287 {
6288 case PLUS:
6289 old_code = sync_old_add_optab[mode];
6290 new_code = sync_new_add_optab[mode];
6291 break;
6292 case IOR:
6293 old_code = sync_old_ior_optab[mode];
6294 new_code = sync_new_ior_optab[mode];
6295 break;
6296 case XOR:
6297 old_code = sync_old_xor_optab[mode];
6298 new_code = sync_new_xor_optab[mode];
6299 break;
6300 case AND:
6301 old_code = sync_old_and_optab[mode];
6302 new_code = sync_new_and_optab[mode];
6303 break;
6304 case NOT:
6305 old_code = sync_old_nand_optab[mode];
6306 new_code = sync_new_nand_optab[mode];
6307 break;
6308
6309 case MINUS:
6310 old_code = sync_old_sub_optab[mode];
6311 new_code = sync_new_sub_optab[mode];
6312 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6313 {
6314 old_code = sync_old_add_optab[mode];
6315 new_code = sync_new_add_optab[mode];
6316 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6317 {
6318 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6319 code = PLUS;
6320 }
6321 }
6322 break;
6323
6324 default:
6325 gcc_unreachable ();
6326 }
6327
6328 /* If the target does supports the proper new/old operation, great. But
6329 if we only support the opposite old/new operation, check to see if we
6330 can compensate. In the case in which the old value is supported, then
6331 we can always perform the operation again with normal arithmetic. In
6332 the case in which the new value is supported, then we can only handle
6333 this in the case the operation is reversible. */
6334 compensate = false;
6335 if (after)
6336 {
6337 icode = new_code;
6338 if (icode == CODE_FOR_nothing)
6339 {
6340 icode = old_code;
6341 if (icode != CODE_FOR_nothing)
6342 compensate = true;
6343 }
6344 }
6345 else
6346 {
6347 icode = old_code;
6348 if (icode == CODE_FOR_nothing
6349 && (code == PLUS || code == MINUS || code == XOR))
6350 {
6351 icode = new_code;
6352 if (icode != CODE_FOR_nothing)
6353 compensate = true;
6354 }
6355 }
6356
6357 /* If we found something supported, great. */
6358 if (icode != CODE_FOR_nothing)
6359 {
6360 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6361 target = gen_reg_rtx (mode);
6362
6363 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6364 val = convert_modes (mode, GET_MODE (val), val, 1);
6365 if (!insn_data[icode].operand[2].predicate (val, mode))
6366 val = force_reg (mode, val);
6367
6368 insn = GEN_FCN (icode) (target, mem, val);
6369 if (insn)
6370 {
6371 emit_insn (insn);
6372
6373 /* If we need to compensate for using an operation with the
6374 wrong return value, do so now. */
6375 if (compensate)
6376 {
6377 if (!after)
6378 {
6379 if (code == PLUS)
6380 code = MINUS;
6381 else if (code == MINUS)
6382 code = PLUS;
6383 }
6384
6385 if (code == NOT)
6386 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6387 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6388 true, OPTAB_LIB_WIDEN);
6389 }
6390
6391 return target;
6392 }
6393 }
6394
6395 /* Failing that, generate a compare-and-swap loop in which we perform the
6396 operation with normal arithmetic instructions. */
6397 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6398 {
6399 rtx t0 = gen_reg_rtx (mode), t1;
6400
6401 if (!target || !register_operand (target, mode))
6402 target = gen_reg_rtx (mode);
6403
6404 start_sequence ();
6405
6406 if (!after)
6407 emit_move_insn (target, t0);
6408 t1 = t0;
6409 if (code == NOT)
6410 {
6411 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6412 code = AND;
6413 }
6414 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6415 true, OPTAB_LIB_WIDEN);
6416 if (after)
6417 emit_move_insn (target, t1);
6418
6419 insn = get_insns ();
6420 end_sequence ();
6421
6422 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6423 return target;
6424 }
6425
6426 return NULL_RTX;
6427 }
6428
6429 /* This function expands a test-and-set operation. Ideally we atomically
6430 store VAL in MEM and return the previous value in MEM. Some targets
6431 may not support this operation and only support VAL with the constant 1;
6432 in this case while the return value will be 0/1, but the exact value
6433 stored in MEM is target defined. TARGET is an option place to stick
6434 the return value. */
6435
6436 rtx
6437 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6438 {
6439 enum machine_mode mode = GET_MODE (mem);
6440 enum insn_code icode;
6441 rtx insn;
6442
6443 /* If the target supports the test-and-set directly, great. */
6444 icode = sync_lock_test_and_set[mode];
6445 if (icode != CODE_FOR_nothing)
6446 {
6447 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6448 target = gen_reg_rtx (mode);
6449
6450 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6451 val = convert_modes (mode, GET_MODE (val), val, 1);
6452 if (!insn_data[icode].operand[2].predicate (val, mode))
6453 val = force_reg (mode, val);
6454
6455 insn = GEN_FCN (icode) (target, mem, val);
6456 if (insn)
6457 {
6458 emit_insn (insn);
6459 return target;
6460 }
6461 }
6462
6463 /* Otherwise, use a compare-and-swap loop for the exchange. */
6464 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6465 {
6466 if (!target || !register_operand (target, mode))
6467 target = gen_reg_rtx (mode);
6468 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6469 val = convert_modes (mode, GET_MODE (val), val, 1);
6470 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6471 return target;
6472 }
6473
6474 return NULL_RTX;
6475 }
6476
6477 #include "gt-optabs.h"