intrinsic.h (gfc_check_selected_real_kind, [...]): Update prototypes.
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "target.h"
46
47 /* Each optab contains info on how this target machine
48 can perform a particular operation
49 for all sizes and kinds of operands.
50
51 The operation to be performed is often specified
52 by passing one of these optabs as an argument.
53
54 See expr.h for documentation of these optabs. */
55
56 #if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
57 __extension__ struct optab_d optab_table[OTI_MAX]
58 = { [0 ... OTI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1].insn_code
59 = CODE_FOR_nothing };
60 #else
61 /* init_insn_codes will do runtime initialization otherwise. */
62 struct optab_d optab_table[OTI_MAX];
63 #endif
64
65 rtx libfunc_table[LTI_MAX];
66
67 /* Tables of patterns for converting one mode to another. */
68 #if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
69 __extension__ struct convert_optab_d convert_optab_table[COI_MAX]
70 = { [0 ... COI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1]
71 [0 ... NUM_MACHINE_MODES - 1].insn_code
72 = CODE_FOR_nothing };
73 #else
74 /* init_convert_optab will do runtime initialization otherwise. */
75 struct convert_optab_d convert_optab_table[COI_MAX];
76 #endif
77
78 /* Contains the optab used for each rtx code. */
79 optab code_to_optab[NUM_RTX_CODE + 1];
80
81 #ifdef HAVE_conditional_move
82 /* Indexed by the machine mode, gives the insn code to make a conditional
83 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
84 setcc_gen_code to cut down on the number of named patterns. Consider a day
85 when a lot more rtx codes are conditional (eg: for the ARM). */
86
87 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
88 #endif
89
90 /* Indexed by the machine mode, gives the insn code for vector conditional
91 operation. */
92
93 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
94 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
95
96 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
97 enum machine_mode *);
98 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
99
100 /* Debug facility for use in GDB. */
101 void debug_optab_libfuncs (void);
102
103 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
104 #if ENABLE_DECIMAL_BID_FORMAT
105 #define DECIMAL_PREFIX "bid_"
106 #else
107 #define DECIMAL_PREFIX "dpd_"
108 #endif
109 \f
110
111 /* Info about libfunc. We use same hashtable for normal optabs and conversion
112 optab. In the first case mode2 is unused. */
113 struct GTY(()) libfunc_entry {
114 size_t optab;
115 enum machine_mode mode1, mode2;
116 rtx libfunc;
117 };
118
119 /* Hash table used to convert declarations into nodes. */
120 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
121
122 /* Used for attribute_hash. */
123
124 static hashval_t
125 hash_libfunc (const void *p)
126 {
127 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
128
129 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
130 ^ e->optab);
131 }
132
133 /* Used for optab_hash. */
134
135 static int
136 eq_libfunc (const void *p, const void *q)
137 {
138 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
139 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
140
141 return (e1->optab == e2->optab
142 && e1->mode1 == e2->mode1
143 && e1->mode2 == e2->mode2);
144 }
145
146 /* Return libfunc corresponding operation defined by OPTAB converting
147 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
148 if no libfunc is available. */
149 rtx
150 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
151 enum machine_mode mode2)
152 {
153 struct libfunc_entry e;
154 struct libfunc_entry **slot;
155
156 e.optab = (size_t) (optab - &convert_optab_table[0]);
157 e.mode1 = mode1;
158 e.mode2 = mode2;
159 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
160 if (!slot)
161 {
162 if (optab->libcall_gen)
163 {
164 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
165 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
166 if (slot)
167 return (*slot)->libfunc;
168 else
169 return NULL;
170 }
171 return NULL;
172 }
173 return (*slot)->libfunc;
174 }
175
176 /* Return libfunc corresponding operation defined by OPTAB in MODE.
177 Trigger lazy initialization if needed, return NULL if no libfunc is
178 available. */
179 rtx
180 optab_libfunc (optab optab, enum machine_mode mode)
181 {
182 struct libfunc_entry e;
183 struct libfunc_entry **slot;
184
185 e.optab = (size_t) (optab - &optab_table[0]);
186 e.mode1 = mode;
187 e.mode2 = VOIDmode;
188 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
189 if (!slot)
190 {
191 if (optab->libcall_gen)
192 {
193 optab->libcall_gen (optab, optab->libcall_basename,
194 optab->libcall_suffix, mode);
195 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
196 &e, NO_INSERT);
197 if (slot)
198 return (*slot)->libfunc;
199 else
200 return NULL;
201 }
202 return NULL;
203 }
204 return (*slot)->libfunc;
205 }
206
207 \f
208 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
209 the result of operation CODE applied to OP0 (and OP1 if it is a binary
210 operation).
211
212 If the last insn does not set TARGET, don't do anything, but return 1.
213
214 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
215 don't add the REG_EQUAL note but return 0. Our caller can then try
216 again, ensuring that TARGET is not one of the operands. */
217
218 static int
219 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
220 {
221 rtx last_insn, insn, set;
222 rtx note;
223
224 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
225
226 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
227 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
228 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
229 && GET_RTX_CLASS (code) != RTX_COMPARE
230 && GET_RTX_CLASS (code) != RTX_UNARY)
231 return 1;
232
233 if (GET_CODE (target) == ZERO_EXTRACT)
234 return 1;
235
236 for (last_insn = insns;
237 NEXT_INSN (last_insn) != NULL_RTX;
238 last_insn = NEXT_INSN (last_insn))
239 ;
240
241 set = single_set (last_insn);
242 if (set == NULL_RTX)
243 return 1;
244
245 if (! rtx_equal_p (SET_DEST (set), target)
246 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
247 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
248 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
249 return 1;
250
251 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
252 besides the last insn. */
253 if (reg_overlap_mentioned_p (target, op0)
254 || (op1 && reg_overlap_mentioned_p (target, op1)))
255 {
256 insn = PREV_INSN (last_insn);
257 while (insn != NULL_RTX)
258 {
259 if (reg_set_p (target, insn))
260 return 0;
261
262 insn = PREV_INSN (insn);
263 }
264 }
265
266 if (GET_RTX_CLASS (code) == RTX_UNARY)
267 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
268 else
269 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
270
271 set_unique_reg_note (last_insn, REG_EQUAL, note);
272
273 return 1;
274 }
275 \f
276 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
277 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
278 not actually do a sign-extend or zero-extend, but can leave the
279 higher-order bits of the result rtx undefined, for example, in the case
280 of logical operations, but not right shifts. */
281
282 static rtx
283 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
284 int unsignedp, int no_extend)
285 {
286 rtx result;
287
288 /* If we don't have to extend and this is a constant, return it. */
289 if (no_extend && GET_MODE (op) == VOIDmode)
290 return op;
291
292 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
293 extend since it will be more efficient to do so unless the signedness of
294 a promoted object differs from our extension. */
295 if (! no_extend
296 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
297 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
298 return convert_modes (mode, oldmode, op, unsignedp);
299
300 /* If MODE is no wider than a single word, we return a paradoxical
301 SUBREG. */
302 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
303 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
304
305 /* Otherwise, get an object of MODE, clobber it, and set the low-order
306 part to OP. */
307
308 result = gen_reg_rtx (mode);
309 emit_clobber (result);
310 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
311 return result;
312 }
313 \f
314 /* Return the optab used for computing the operation given by the tree code,
315 CODE and the tree EXP. This function is not always usable (for example, it
316 cannot give complete results for multiplication or division) but probably
317 ought to be relied on more widely throughout the expander. */
318 optab
319 optab_for_tree_code (enum tree_code code, const_tree type,
320 enum optab_subtype subtype)
321 {
322 bool trapv;
323 switch (code)
324 {
325 case BIT_AND_EXPR:
326 return and_optab;
327
328 case BIT_IOR_EXPR:
329 return ior_optab;
330
331 case BIT_NOT_EXPR:
332 return one_cmpl_optab;
333
334 case BIT_XOR_EXPR:
335 return xor_optab;
336
337 case TRUNC_MOD_EXPR:
338 case CEIL_MOD_EXPR:
339 case FLOOR_MOD_EXPR:
340 case ROUND_MOD_EXPR:
341 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
342
343 case RDIV_EXPR:
344 case TRUNC_DIV_EXPR:
345 case CEIL_DIV_EXPR:
346 case FLOOR_DIV_EXPR:
347 case ROUND_DIV_EXPR:
348 case EXACT_DIV_EXPR:
349 if (TYPE_SATURATING(type))
350 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
351 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
352
353 case LSHIFT_EXPR:
354 if (VECTOR_MODE_P (TYPE_MODE (type)))
355 {
356 if (subtype == optab_vector)
357 return TYPE_SATURATING (type) ? NULL : vashl_optab;
358
359 gcc_assert (subtype == optab_scalar);
360 }
361 if (TYPE_SATURATING(type))
362 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
363 return ashl_optab;
364
365 case RSHIFT_EXPR:
366 if (VECTOR_MODE_P (TYPE_MODE (type)))
367 {
368 if (subtype == optab_vector)
369 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
370
371 gcc_assert (subtype == optab_scalar);
372 }
373 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
374
375 case LROTATE_EXPR:
376 if (VECTOR_MODE_P (TYPE_MODE (type)))
377 {
378 if (subtype == optab_vector)
379 return vrotl_optab;
380
381 gcc_assert (subtype == optab_scalar);
382 }
383 return rotl_optab;
384
385 case RROTATE_EXPR:
386 if (VECTOR_MODE_P (TYPE_MODE (type)))
387 {
388 if (subtype == optab_vector)
389 return vrotr_optab;
390
391 gcc_assert (subtype == optab_scalar);
392 }
393 return rotr_optab;
394
395 case MAX_EXPR:
396 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
397
398 case MIN_EXPR:
399 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
400
401 case REALIGN_LOAD_EXPR:
402 return vec_realign_load_optab;
403
404 case WIDEN_SUM_EXPR:
405 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
406
407 case DOT_PROD_EXPR:
408 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
409
410 case WIDEN_MULT_PLUS_EXPR:
411 return (TYPE_UNSIGNED (type)
412 ? (TYPE_SATURATING (type)
413 ? usmadd_widen_optab : umadd_widen_optab)
414 : (TYPE_SATURATING (type)
415 ? ssmadd_widen_optab : smadd_widen_optab));
416
417 case WIDEN_MULT_MINUS_EXPR:
418 return (TYPE_UNSIGNED (type)
419 ? (TYPE_SATURATING (type)
420 ? usmsub_widen_optab : umsub_widen_optab)
421 : (TYPE_SATURATING (type)
422 ? ssmsub_widen_optab : smsub_widen_optab));
423
424 case REDUC_MAX_EXPR:
425 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
426
427 case REDUC_MIN_EXPR:
428 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
429
430 case REDUC_PLUS_EXPR:
431 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
432
433 case VEC_LSHIFT_EXPR:
434 return vec_shl_optab;
435
436 case VEC_RSHIFT_EXPR:
437 return vec_shr_optab;
438
439 case VEC_WIDEN_MULT_HI_EXPR:
440 return TYPE_UNSIGNED (type) ?
441 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
442
443 case VEC_WIDEN_MULT_LO_EXPR:
444 return TYPE_UNSIGNED (type) ?
445 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
446
447 case VEC_UNPACK_HI_EXPR:
448 return TYPE_UNSIGNED (type) ?
449 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
450
451 case VEC_UNPACK_LO_EXPR:
452 return TYPE_UNSIGNED (type) ?
453 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
454
455 case VEC_UNPACK_FLOAT_HI_EXPR:
456 /* The signedness is determined from input operand. */
457 return TYPE_UNSIGNED (type) ?
458 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
459
460 case VEC_UNPACK_FLOAT_LO_EXPR:
461 /* The signedness is determined from input operand. */
462 return TYPE_UNSIGNED (type) ?
463 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
464
465 case VEC_PACK_TRUNC_EXPR:
466 return vec_pack_trunc_optab;
467
468 case VEC_PACK_SAT_EXPR:
469 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
470
471 case VEC_PACK_FIX_TRUNC_EXPR:
472 /* The signedness is determined from output operand. */
473 return TYPE_UNSIGNED (type) ?
474 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
475
476 default:
477 break;
478 }
479
480 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
481 switch (code)
482 {
483 case POINTER_PLUS_EXPR:
484 case PLUS_EXPR:
485 if (TYPE_SATURATING(type))
486 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
487 return trapv ? addv_optab : add_optab;
488
489 case MINUS_EXPR:
490 if (TYPE_SATURATING(type))
491 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
492 return trapv ? subv_optab : sub_optab;
493
494 case MULT_EXPR:
495 if (TYPE_SATURATING(type))
496 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
497 return trapv ? smulv_optab : smul_optab;
498
499 case NEGATE_EXPR:
500 if (TYPE_SATURATING(type))
501 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
502 return trapv ? negv_optab : neg_optab;
503
504 case ABS_EXPR:
505 return trapv ? absv_optab : abs_optab;
506
507 case VEC_EXTRACT_EVEN_EXPR:
508 return vec_extract_even_optab;
509
510 case VEC_EXTRACT_ODD_EXPR:
511 return vec_extract_odd_optab;
512
513 case VEC_INTERLEAVE_HIGH_EXPR:
514 return vec_interleave_high_optab;
515
516 case VEC_INTERLEAVE_LOW_EXPR:
517 return vec_interleave_low_optab;
518
519 default:
520 return NULL;
521 }
522 }
523 \f
524
525 /* Expand vector widening operations.
526
527 There are two different classes of operations handled here:
528 1) Operations whose result is wider than all the arguments to the operation.
529 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
530 In this case OP0 and optionally OP1 would be initialized,
531 but WIDE_OP wouldn't (not relevant for this case).
532 2) Operations whose result is of the same size as the last argument to the
533 operation, but wider than all the other arguments to the operation.
534 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
535 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
536
537 E.g, when called to expand the following operations, this is how
538 the arguments will be initialized:
539 nops OP0 OP1 WIDE_OP
540 widening-sum 2 oprnd0 - oprnd1
541 widening-dot-product 3 oprnd0 oprnd1 oprnd2
542 widening-mult 2 oprnd0 oprnd1 -
543 type-promotion (vec-unpack) 1 oprnd0 - - */
544
545 rtx
546 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
547 rtx target, int unsignedp)
548 {
549 tree oprnd0, oprnd1, oprnd2;
550 enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
551 optab widen_pattern_optab;
552 int icode;
553 enum machine_mode xmode0, xmode1 = VOIDmode, wxmode = VOIDmode;
554 rtx temp;
555 rtx pat;
556 rtx xop0, xop1, wxop;
557 int nops = TREE_CODE_LENGTH (ops->code);
558
559 oprnd0 = ops->op0;
560 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
561 widen_pattern_optab =
562 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
563 if (ops->code == WIDEN_MULT_PLUS_EXPR
564 || ops->code == WIDEN_MULT_MINUS_EXPR)
565 icode = (int) optab_handler (widen_pattern_optab,
566 TYPE_MODE (TREE_TYPE (ops->op2)))->insn_code;
567 else
568 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
569 gcc_assert (icode != CODE_FOR_nothing);
570 xmode0 = insn_data[icode].operand[1].mode;
571
572 if (nops >= 2)
573 {
574 oprnd1 = ops->op1;
575 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
576 xmode1 = insn_data[icode].operand[2].mode;
577 }
578
579 /* The last operand is of a wider mode than the rest of the operands. */
580 if (nops == 2)
581 {
582 wmode = tmode1;
583 wxmode = xmode1;
584 }
585 else if (nops == 3)
586 {
587 gcc_assert (tmode1 == tmode0);
588 gcc_assert (op1);
589 oprnd2 = ops->op2;
590 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
591 wxmode = insn_data[icode].operand[3].mode;
592 }
593
594 if (!wide_op)
595 wmode = wxmode = insn_data[icode].operand[0].mode;
596
597 if (!target
598 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
599 temp = gen_reg_rtx (wmode);
600 else
601 temp = target;
602
603 xop0 = op0;
604 xop1 = op1;
605 wxop = wide_op;
606
607 /* In case the insn wants input operands in modes different from
608 those of the actual operands, convert the operands. It would
609 seem that we don't need to convert CONST_INTs, but we do, so
610 that they're properly zero-extended, sign-extended or truncated
611 for their mode. */
612
613 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
614 xop0 = convert_modes (xmode0,
615 GET_MODE (op0) != VOIDmode
616 ? GET_MODE (op0)
617 : tmode0,
618 xop0, unsignedp);
619
620 if (op1)
621 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
622 xop1 = convert_modes (xmode1,
623 GET_MODE (op1) != VOIDmode
624 ? GET_MODE (op1)
625 : tmode1,
626 xop1, unsignedp);
627
628 if (wide_op)
629 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
630 wxop = convert_modes (wxmode,
631 GET_MODE (wide_op) != VOIDmode
632 ? GET_MODE (wide_op)
633 : wmode,
634 wxop, unsignedp);
635
636 /* Now, if insn's predicates don't allow our operands, put them into
637 pseudo regs. */
638
639 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
640 && xmode0 != VOIDmode)
641 xop0 = copy_to_mode_reg (xmode0, xop0);
642
643 if (op1)
644 {
645 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
646 && xmode1 != VOIDmode)
647 xop1 = copy_to_mode_reg (xmode1, xop1);
648
649 if (wide_op)
650 {
651 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
652 && wxmode != VOIDmode)
653 wxop = copy_to_mode_reg (wxmode, wxop);
654
655 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
656 }
657 else
658 pat = GEN_FCN (icode) (temp, xop0, xop1);
659 }
660 else
661 {
662 if (wide_op)
663 {
664 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
665 && wxmode != VOIDmode)
666 wxop = copy_to_mode_reg (wxmode, wxop);
667
668 pat = GEN_FCN (icode) (temp, xop0, wxop);
669 }
670 else
671 pat = GEN_FCN (icode) (temp, xop0);
672 }
673
674 emit_insn (pat);
675 return temp;
676 }
677
678 /* Generate code to perform an operation specified by TERNARY_OPTAB
679 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
680
681 UNSIGNEDP is for the case where we have to widen the operands
682 to perform the operation. It says to use zero-extension.
683
684 If TARGET is nonzero, the value
685 is generated there, if it is convenient to do so.
686 In all cases an rtx is returned for the locus of the value;
687 this may or may not be TARGET. */
688
689 rtx
690 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
691 rtx op1, rtx op2, rtx target, int unsignedp)
692 {
693 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
694 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
695 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
696 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
697 rtx temp;
698 rtx pat;
699 rtx xop0 = op0, xop1 = op1, xop2 = op2;
700
701 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
702 != CODE_FOR_nothing);
703
704 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
705 temp = gen_reg_rtx (mode);
706 else
707 temp = target;
708
709 /* In case the insn wants input operands in modes different from
710 those of the actual operands, convert the operands. It would
711 seem that we don't need to convert CONST_INTs, but we do, so
712 that they're properly zero-extended, sign-extended or truncated
713 for their mode. */
714
715 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
716 xop0 = convert_modes (mode0,
717 GET_MODE (op0) != VOIDmode
718 ? GET_MODE (op0)
719 : mode,
720 xop0, unsignedp);
721
722 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
723 xop1 = convert_modes (mode1,
724 GET_MODE (op1) != VOIDmode
725 ? GET_MODE (op1)
726 : mode,
727 xop1, unsignedp);
728
729 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
730 xop2 = convert_modes (mode2,
731 GET_MODE (op2) != VOIDmode
732 ? GET_MODE (op2)
733 : mode,
734 xop2, unsignedp);
735
736 /* Now, if insn's predicates don't allow our operands, put them into
737 pseudo regs. */
738
739 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
740 && mode0 != VOIDmode)
741 xop0 = copy_to_mode_reg (mode0, xop0);
742
743 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
744 && mode1 != VOIDmode)
745 xop1 = copy_to_mode_reg (mode1, xop1);
746
747 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
748 && mode2 != VOIDmode)
749 xop2 = copy_to_mode_reg (mode2, xop2);
750
751 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
752
753 emit_insn (pat);
754 return temp;
755 }
756
757
758 /* Like expand_binop, but return a constant rtx if the result can be
759 calculated at compile time. The arguments and return value are
760 otherwise the same as for expand_binop. */
761
762 static rtx
763 simplify_expand_binop (enum machine_mode mode, optab binoptab,
764 rtx op0, rtx op1, rtx target, int unsignedp,
765 enum optab_methods methods)
766 {
767 if (CONSTANT_P (op0) && CONSTANT_P (op1))
768 {
769 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
770
771 if (x)
772 return x;
773 }
774
775 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
776 }
777
778 /* Like simplify_expand_binop, but always put the result in TARGET.
779 Return true if the expansion succeeded. */
780
781 bool
782 force_expand_binop (enum machine_mode mode, optab binoptab,
783 rtx op0, rtx op1, rtx target, int unsignedp,
784 enum optab_methods methods)
785 {
786 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
787 target, unsignedp, methods);
788 if (x == 0)
789 return false;
790 if (x != target)
791 emit_move_insn (target, x);
792 return true;
793 }
794
795 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
796
797 rtx
798 expand_vec_shift_expr (sepops ops, rtx target)
799 {
800 enum insn_code icode;
801 rtx rtx_op1, rtx_op2;
802 enum machine_mode mode1;
803 enum machine_mode mode2;
804 enum machine_mode mode = TYPE_MODE (ops->type);
805 tree vec_oprnd = ops->op0;
806 tree shift_oprnd = ops->op1;
807 optab shift_optab;
808 rtx pat;
809
810 switch (ops->code)
811 {
812 case VEC_RSHIFT_EXPR:
813 shift_optab = vec_shr_optab;
814 break;
815 case VEC_LSHIFT_EXPR:
816 shift_optab = vec_shl_optab;
817 break;
818 default:
819 gcc_unreachable ();
820 }
821
822 icode = optab_handler (shift_optab, mode)->insn_code;
823 gcc_assert (icode != CODE_FOR_nothing);
824
825 mode1 = insn_data[icode].operand[1].mode;
826 mode2 = insn_data[icode].operand[2].mode;
827
828 rtx_op1 = expand_normal (vec_oprnd);
829 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
830 && mode1 != VOIDmode)
831 rtx_op1 = force_reg (mode1, rtx_op1);
832
833 rtx_op2 = expand_normal (shift_oprnd);
834 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
835 && mode2 != VOIDmode)
836 rtx_op2 = force_reg (mode2, rtx_op2);
837
838 if (!target
839 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
840 target = gen_reg_rtx (mode);
841
842 /* Emit instruction */
843 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
844 gcc_assert (pat);
845 emit_insn (pat);
846
847 return target;
848 }
849
850 /* This subroutine of expand_doubleword_shift handles the cases in which
851 the effective shift value is >= BITS_PER_WORD. The arguments and return
852 value are the same as for the parent routine, except that SUPERWORD_OP1
853 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
854 INTO_TARGET may be null if the caller has decided to calculate it. */
855
856 static bool
857 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
858 rtx outof_target, rtx into_target,
859 int unsignedp, enum optab_methods methods)
860 {
861 if (into_target != 0)
862 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
863 into_target, unsignedp, methods))
864 return false;
865
866 if (outof_target != 0)
867 {
868 /* For a signed right shift, we must fill OUTOF_TARGET with copies
869 of the sign bit, otherwise we must fill it with zeros. */
870 if (binoptab != ashr_optab)
871 emit_move_insn (outof_target, CONST0_RTX (word_mode));
872 else
873 if (!force_expand_binop (word_mode, binoptab,
874 outof_input, GEN_INT (BITS_PER_WORD - 1),
875 outof_target, unsignedp, methods))
876 return false;
877 }
878 return true;
879 }
880
881 /* This subroutine of expand_doubleword_shift handles the cases in which
882 the effective shift value is < BITS_PER_WORD. The arguments and return
883 value are the same as for the parent routine. */
884
885 static bool
886 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
887 rtx outof_input, rtx into_input, rtx op1,
888 rtx outof_target, rtx into_target,
889 int unsignedp, enum optab_methods methods,
890 unsigned HOST_WIDE_INT shift_mask)
891 {
892 optab reverse_unsigned_shift, unsigned_shift;
893 rtx tmp, carries;
894
895 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
896 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
897
898 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
899 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
900 the opposite direction to BINOPTAB. */
901 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
902 {
903 carries = outof_input;
904 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
905 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
906 0, true, methods);
907 }
908 else
909 {
910 /* We must avoid shifting by BITS_PER_WORD bits since that is either
911 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
912 has unknown behavior. Do a single shift first, then shift by the
913 remainder. It's OK to use ~OP1 as the remainder if shift counts
914 are truncated to the mode size. */
915 carries = expand_binop (word_mode, reverse_unsigned_shift,
916 outof_input, const1_rtx, 0, unsignedp, methods);
917 if (shift_mask == BITS_PER_WORD - 1)
918 {
919 tmp = immed_double_const (-1, -1, op1_mode);
920 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
921 0, true, methods);
922 }
923 else
924 {
925 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
926 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
927 0, true, methods);
928 }
929 }
930 if (tmp == 0 || carries == 0)
931 return false;
932 carries = expand_binop (word_mode, reverse_unsigned_shift,
933 carries, tmp, 0, unsignedp, methods);
934 if (carries == 0)
935 return false;
936
937 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
938 so the result can go directly into INTO_TARGET if convenient. */
939 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
940 into_target, unsignedp, methods);
941 if (tmp == 0)
942 return false;
943
944 /* Now OR in the bits carried over from OUTOF_INPUT. */
945 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
946 into_target, unsignedp, methods))
947 return false;
948
949 /* Use a standard word_mode shift for the out-of half. */
950 if (outof_target != 0)
951 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
952 outof_target, unsignedp, methods))
953 return false;
954
955 return true;
956 }
957
958
959 #ifdef HAVE_conditional_move
960 /* Try implementing expand_doubleword_shift using conditional moves.
961 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
962 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
963 are the shift counts to use in the former and latter case. All other
964 arguments are the same as the parent routine. */
965
966 static bool
967 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
968 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
969 rtx outof_input, rtx into_input,
970 rtx subword_op1, rtx superword_op1,
971 rtx outof_target, rtx into_target,
972 int unsignedp, enum optab_methods methods,
973 unsigned HOST_WIDE_INT shift_mask)
974 {
975 rtx outof_superword, into_superword;
976
977 /* Put the superword version of the output into OUTOF_SUPERWORD and
978 INTO_SUPERWORD. */
979 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
980 if (outof_target != 0 && subword_op1 == superword_op1)
981 {
982 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
983 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
984 into_superword = outof_target;
985 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
986 outof_superword, 0, unsignedp, methods))
987 return false;
988 }
989 else
990 {
991 into_superword = gen_reg_rtx (word_mode);
992 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
993 outof_superword, into_superword,
994 unsignedp, methods))
995 return false;
996 }
997
998 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
999 if (!expand_subword_shift (op1_mode, binoptab,
1000 outof_input, into_input, subword_op1,
1001 outof_target, into_target,
1002 unsignedp, methods, shift_mask))
1003 return false;
1004
1005 /* Select between them. Do the INTO half first because INTO_SUPERWORD
1006 might be the current value of OUTOF_TARGET. */
1007 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
1008 into_target, into_superword, word_mode, false))
1009 return false;
1010
1011 if (outof_target != 0)
1012 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
1013 outof_target, outof_superword,
1014 word_mode, false))
1015 return false;
1016
1017 return true;
1018 }
1019 #endif
1020
1021 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
1022 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
1023 input operand; the shift moves bits in the direction OUTOF_INPUT->
1024 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1025 of the target. OP1 is the shift count and OP1_MODE is its mode.
1026 If OP1 is constant, it will have been truncated as appropriate
1027 and is known to be nonzero.
1028
1029 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1030 shift count is outside the range [0, BITS_PER_WORD). This routine must
1031 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1032
1033 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1034 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1035 fill with zeros or sign bits as appropriate.
1036
1037 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1038 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1039 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1040 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1041 are undefined.
1042
1043 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1044 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1045 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1046 function wants to calculate it itself.
1047
1048 Return true if the shift could be successfully synthesized. */
1049
1050 static bool
1051 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1052 rtx outof_input, rtx into_input, rtx op1,
1053 rtx outof_target, rtx into_target,
1054 int unsignedp, enum optab_methods methods,
1055 unsigned HOST_WIDE_INT shift_mask)
1056 {
1057 rtx superword_op1, tmp, cmp1, cmp2;
1058 rtx subword_label, done_label;
1059 enum rtx_code cmp_code;
1060
1061 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1062 fill the result with sign or zero bits as appropriate. If so, the value
1063 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1064 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1065 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1066
1067 This isn't worthwhile for constant shifts since the optimizers will
1068 cope better with in-range shift counts. */
1069 if (shift_mask >= BITS_PER_WORD
1070 && outof_target != 0
1071 && !CONSTANT_P (op1))
1072 {
1073 if (!expand_doubleword_shift (op1_mode, binoptab,
1074 outof_input, into_input, op1,
1075 0, into_target,
1076 unsignedp, methods, shift_mask))
1077 return false;
1078 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1079 outof_target, unsignedp, methods))
1080 return false;
1081 return true;
1082 }
1083
1084 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1085 is true when the effective shift value is less than BITS_PER_WORD.
1086 Set SUPERWORD_OP1 to the shift count that should be used to shift
1087 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1088 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1089 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1090 {
1091 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1092 is a subword shift count. */
1093 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1094 0, true, methods);
1095 cmp2 = CONST0_RTX (op1_mode);
1096 cmp_code = EQ;
1097 superword_op1 = op1;
1098 }
1099 else
1100 {
1101 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1102 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1103 0, true, methods);
1104 cmp2 = CONST0_RTX (op1_mode);
1105 cmp_code = LT;
1106 superword_op1 = cmp1;
1107 }
1108 if (cmp1 == 0)
1109 return false;
1110
1111 /* If we can compute the condition at compile time, pick the
1112 appropriate subroutine. */
1113 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1114 if (tmp != 0 && CONST_INT_P (tmp))
1115 {
1116 if (tmp == const0_rtx)
1117 return expand_superword_shift (binoptab, outof_input, superword_op1,
1118 outof_target, into_target,
1119 unsignedp, methods);
1120 else
1121 return expand_subword_shift (op1_mode, binoptab,
1122 outof_input, into_input, op1,
1123 outof_target, into_target,
1124 unsignedp, methods, shift_mask);
1125 }
1126
1127 #ifdef HAVE_conditional_move
1128 /* Try using conditional moves to generate straight-line code. */
1129 {
1130 rtx start = get_last_insn ();
1131 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1132 cmp_code, cmp1, cmp2,
1133 outof_input, into_input,
1134 op1, superword_op1,
1135 outof_target, into_target,
1136 unsignedp, methods, shift_mask))
1137 return true;
1138 delete_insns_since (start);
1139 }
1140 #endif
1141
1142 /* As a last resort, use branches to select the correct alternative. */
1143 subword_label = gen_label_rtx ();
1144 done_label = gen_label_rtx ();
1145
1146 NO_DEFER_POP;
1147 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1148 0, 0, subword_label, -1);
1149 OK_DEFER_POP;
1150
1151 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1152 outof_target, into_target,
1153 unsignedp, methods))
1154 return false;
1155
1156 emit_jump_insn (gen_jump (done_label));
1157 emit_barrier ();
1158 emit_label (subword_label);
1159
1160 if (!expand_subword_shift (op1_mode, binoptab,
1161 outof_input, into_input, op1,
1162 outof_target, into_target,
1163 unsignedp, methods, shift_mask))
1164 return false;
1165
1166 emit_label (done_label);
1167 return true;
1168 }
1169 \f
1170 /* Subroutine of expand_binop. Perform a double word multiplication of
1171 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1172 as the target's word_mode. This function return NULL_RTX if anything
1173 goes wrong, in which case it may have already emitted instructions
1174 which need to be deleted.
1175
1176 If we want to multiply two two-word values and have normal and widening
1177 multiplies of single-word values, we can do this with three smaller
1178 multiplications.
1179
1180 The multiplication proceeds as follows:
1181 _______________________
1182 [__op0_high_|__op0_low__]
1183 _______________________
1184 * [__op1_high_|__op1_low__]
1185 _______________________________________________
1186 _______________________
1187 (1) [__op0_low__*__op1_low__]
1188 _______________________
1189 (2a) [__op0_low__*__op1_high_]
1190 _______________________
1191 (2b) [__op0_high_*__op1_low__]
1192 _______________________
1193 (3) [__op0_high_*__op1_high_]
1194
1195
1196 This gives a 4-word result. Since we are only interested in the
1197 lower 2 words, partial result (3) and the upper words of (2a) and
1198 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1199 calculated using non-widening multiplication.
1200
1201 (1), however, needs to be calculated with an unsigned widening
1202 multiplication. If this operation is not directly supported we
1203 try using a signed widening multiplication and adjust the result.
1204 This adjustment works as follows:
1205
1206 If both operands are positive then no adjustment is needed.
1207
1208 If the operands have different signs, for example op0_low < 0 and
1209 op1_low >= 0, the instruction treats the most significant bit of
1210 op0_low as a sign bit instead of a bit with significance
1211 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1212 with 2**BITS_PER_WORD - op0_low, and two's complements the
1213 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1214 the result.
1215
1216 Similarly, if both operands are negative, we need to add
1217 (op0_low + op1_low) * 2**BITS_PER_WORD.
1218
1219 We use a trick to adjust quickly. We logically shift op0_low right
1220 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1221 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1222 logical shift exists, we do an arithmetic right shift and subtract
1223 the 0 or -1. */
1224
1225 static rtx
1226 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1227 bool umulp, enum optab_methods methods)
1228 {
1229 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1230 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1231 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1232 rtx product, adjust, product_high, temp;
1233
1234 rtx op0_high = operand_subword_force (op0, high, mode);
1235 rtx op0_low = operand_subword_force (op0, low, mode);
1236 rtx op1_high = operand_subword_force (op1, high, mode);
1237 rtx op1_low = operand_subword_force (op1, low, mode);
1238
1239 /* If we're using an unsigned multiply to directly compute the product
1240 of the low-order words of the operands and perform any required
1241 adjustments of the operands, we begin by trying two more multiplications
1242 and then computing the appropriate sum.
1243
1244 We have checked above that the required addition is provided.
1245 Full-word addition will normally always succeed, especially if
1246 it is provided at all, so we don't worry about its failure. The
1247 multiplication may well fail, however, so we do handle that. */
1248
1249 if (!umulp)
1250 {
1251 /* ??? This could be done with emit_store_flag where available. */
1252 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1253 NULL_RTX, 1, methods);
1254 if (temp)
1255 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1256 NULL_RTX, 0, OPTAB_DIRECT);
1257 else
1258 {
1259 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1260 NULL_RTX, 0, methods);
1261 if (!temp)
1262 return NULL_RTX;
1263 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1264 NULL_RTX, 0, OPTAB_DIRECT);
1265 }
1266
1267 if (!op0_high)
1268 return NULL_RTX;
1269 }
1270
1271 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1272 NULL_RTX, 0, OPTAB_DIRECT);
1273 if (!adjust)
1274 return NULL_RTX;
1275
1276 /* OP0_HIGH should now be dead. */
1277
1278 if (!umulp)
1279 {
1280 /* ??? This could be done with emit_store_flag where available. */
1281 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1282 NULL_RTX, 1, methods);
1283 if (temp)
1284 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1285 NULL_RTX, 0, OPTAB_DIRECT);
1286 else
1287 {
1288 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1289 NULL_RTX, 0, methods);
1290 if (!temp)
1291 return NULL_RTX;
1292 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1293 NULL_RTX, 0, OPTAB_DIRECT);
1294 }
1295
1296 if (!op1_high)
1297 return NULL_RTX;
1298 }
1299
1300 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1301 NULL_RTX, 0, OPTAB_DIRECT);
1302 if (!temp)
1303 return NULL_RTX;
1304
1305 /* OP1_HIGH should now be dead. */
1306
1307 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1308 adjust, 0, OPTAB_DIRECT);
1309
1310 if (target && !REG_P (target))
1311 target = NULL_RTX;
1312
1313 if (umulp)
1314 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1315 target, 1, OPTAB_DIRECT);
1316 else
1317 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1318 target, 1, OPTAB_DIRECT);
1319
1320 if (!product)
1321 return NULL_RTX;
1322
1323 product_high = operand_subword (product, high, 1, mode);
1324 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1325 REG_P (product_high) ? product_high : adjust,
1326 0, OPTAB_DIRECT);
1327 emit_move_insn (product_high, adjust);
1328 return product;
1329 }
1330 \f
1331 /* Wrapper around expand_binop which takes an rtx code to specify
1332 the operation to perform, not an optab pointer. All other
1333 arguments are the same. */
1334 rtx
1335 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1336 rtx op1, rtx target, int unsignedp,
1337 enum optab_methods methods)
1338 {
1339 optab binop = code_to_optab[(int) code];
1340 gcc_assert (binop);
1341
1342 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1343 }
1344
1345 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1346 binop. Order them according to commutative_operand_precedence and, if
1347 possible, try to put TARGET or a pseudo first. */
1348 static bool
1349 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1350 {
1351 int op0_prec = commutative_operand_precedence (op0);
1352 int op1_prec = commutative_operand_precedence (op1);
1353
1354 if (op0_prec < op1_prec)
1355 return true;
1356
1357 if (op0_prec > op1_prec)
1358 return false;
1359
1360 /* With equal precedence, both orders are ok, but it is better if the
1361 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1362 if (target == 0 || REG_P (target))
1363 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1364 else
1365 return rtx_equal_p (op1, target);
1366 }
1367
1368 /* Return true if BINOPTAB implements a shift operation. */
1369
1370 static bool
1371 shift_optab_p (optab binoptab)
1372 {
1373 switch (binoptab->code)
1374 {
1375 case ASHIFT:
1376 case SS_ASHIFT:
1377 case US_ASHIFT:
1378 case ASHIFTRT:
1379 case LSHIFTRT:
1380 case ROTATE:
1381 case ROTATERT:
1382 return true;
1383
1384 default:
1385 return false;
1386 }
1387 }
1388
1389 /* Return true if BINOPTAB implements a commutative binary operation. */
1390
1391 static bool
1392 commutative_optab_p (optab binoptab)
1393 {
1394 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1395 || binoptab == smul_widen_optab
1396 || binoptab == umul_widen_optab
1397 || binoptab == smul_highpart_optab
1398 || binoptab == umul_highpart_optab);
1399 }
1400
1401 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1402 optimizing, and if the operand is a constant that costs more than
1403 1 instruction, force the constant into a register and return that
1404 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1405
1406 static rtx
1407 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1408 rtx x, bool unsignedp)
1409 {
1410 bool speed = optimize_insn_for_speed_p ();
1411
1412 if (mode != VOIDmode
1413 && optimize
1414 && CONSTANT_P (x)
1415 && rtx_cost (x, binoptab->code, speed) > rtx_cost (x, SET, speed))
1416 {
1417 if (CONST_INT_P (x))
1418 {
1419 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1420 if (intval != INTVAL (x))
1421 x = GEN_INT (intval);
1422 }
1423 else
1424 x = convert_modes (mode, VOIDmode, x, unsignedp);
1425 x = force_reg (mode, x);
1426 }
1427 return x;
1428 }
1429
1430 /* Helper function for expand_binop: handle the case where there
1431 is an insn that directly implements the indicated operation.
1432 Returns null if this is not possible. */
1433 static rtx
1434 expand_binop_directly (enum machine_mode mode, optab binoptab,
1435 rtx op0, rtx op1,
1436 rtx target, int unsignedp, enum optab_methods methods,
1437 rtx last)
1438 {
1439 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1440 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1441 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1442 enum machine_mode tmp_mode;
1443 bool commutative_p;
1444 rtx pat;
1445 rtx xop0 = op0, xop1 = op1;
1446 rtx temp;
1447 rtx swap;
1448
1449 if (target)
1450 temp = target;
1451 else
1452 temp = gen_reg_rtx (mode);
1453
1454 /* If it is a commutative operator and the modes would match
1455 if we would swap the operands, we can save the conversions. */
1456 commutative_p = commutative_optab_p (binoptab);
1457 if (commutative_p
1458 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1459 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1460 {
1461 swap = xop0;
1462 xop0 = xop1;
1463 xop1 = swap;
1464 }
1465
1466 /* If we are optimizing, force expensive constants into a register. */
1467 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1468 if (!shift_optab_p (binoptab))
1469 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1470
1471 /* In case the insn wants input operands in modes different from
1472 those of the actual operands, convert the operands. It would
1473 seem that we don't need to convert CONST_INTs, but we do, so
1474 that they're properly zero-extended, sign-extended or truncated
1475 for their mode. */
1476
1477 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1478 xop0 = convert_modes (mode0,
1479 GET_MODE (xop0) != VOIDmode
1480 ? GET_MODE (xop0)
1481 : mode,
1482 xop0, unsignedp);
1483
1484 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1485 xop1 = convert_modes (mode1,
1486 GET_MODE (xop1) != VOIDmode
1487 ? GET_MODE (xop1)
1488 : mode,
1489 xop1, unsignedp);
1490
1491 /* If operation is commutative,
1492 try to make the first operand a register.
1493 Even better, try to make it the same as the target.
1494 Also try to make the last operand a constant. */
1495 if (commutative_p
1496 && swap_commutative_operands_with_target (target, xop0, xop1))
1497 {
1498 swap = xop1;
1499 xop1 = xop0;
1500 xop0 = swap;
1501 }
1502
1503 /* Now, if insn's predicates don't allow our operands, put them into
1504 pseudo regs. */
1505
1506 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1507 && mode0 != VOIDmode)
1508 xop0 = copy_to_mode_reg (mode0, xop0);
1509
1510 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1511 && mode1 != VOIDmode)
1512 xop1 = copy_to_mode_reg (mode1, xop1);
1513
1514 if (binoptab == vec_pack_trunc_optab
1515 || binoptab == vec_pack_usat_optab
1516 || binoptab == vec_pack_ssat_optab
1517 || binoptab == vec_pack_ufix_trunc_optab
1518 || binoptab == vec_pack_sfix_trunc_optab)
1519 {
1520 /* The mode of the result is different then the mode of the
1521 arguments. */
1522 tmp_mode = insn_data[icode].operand[0].mode;
1523 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1524 return 0;
1525 }
1526 else
1527 tmp_mode = mode;
1528
1529 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1530 temp = gen_reg_rtx (tmp_mode);
1531
1532 pat = GEN_FCN (icode) (temp, xop0, xop1);
1533 if (pat)
1534 {
1535 /* If PAT is composed of more than one insn, try to add an appropriate
1536 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1537 operand, call expand_binop again, this time without a target. */
1538 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1539 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1540 {
1541 delete_insns_since (last);
1542 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1543 unsignedp, methods);
1544 }
1545
1546 emit_insn (pat);
1547 return temp;
1548 }
1549
1550 delete_insns_since (last);
1551 return NULL_RTX;
1552 }
1553
1554 /* Generate code to perform an operation specified by BINOPTAB
1555 on operands OP0 and OP1, with result having machine-mode MODE.
1556
1557 UNSIGNEDP is for the case where we have to widen the operands
1558 to perform the operation. It says to use zero-extension.
1559
1560 If TARGET is nonzero, the value
1561 is generated there, if it is convenient to do so.
1562 In all cases an rtx is returned for the locus of the value;
1563 this may or may not be TARGET. */
1564
1565 rtx
1566 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1567 rtx target, int unsignedp, enum optab_methods methods)
1568 {
1569 enum optab_methods next_methods
1570 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1571 ? OPTAB_WIDEN : methods);
1572 enum mode_class mclass;
1573 enum machine_mode wider_mode;
1574 rtx libfunc;
1575 rtx temp;
1576 rtx entry_last = get_last_insn ();
1577 rtx last;
1578
1579 mclass = GET_MODE_CLASS (mode);
1580
1581 /* If subtracting an integer constant, convert this into an addition of
1582 the negated constant. */
1583
1584 if (binoptab == sub_optab && CONST_INT_P (op1))
1585 {
1586 op1 = negate_rtx (mode, op1);
1587 binoptab = add_optab;
1588 }
1589
1590 /* Record where to delete back to if we backtrack. */
1591 last = get_last_insn ();
1592
1593 /* If we can do it with a three-operand insn, do so. */
1594
1595 if (methods != OPTAB_MUST_WIDEN
1596 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1597 {
1598 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1599 unsignedp, methods, last);
1600 if (temp)
1601 return temp;
1602 }
1603
1604 /* If we were trying to rotate, and that didn't work, try rotating
1605 the other direction before falling back to shifts and bitwise-or. */
1606 if (((binoptab == rotl_optab
1607 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1608 || (binoptab == rotr_optab
1609 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1610 && mclass == MODE_INT)
1611 {
1612 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1613 rtx newop1;
1614 unsigned int bits = GET_MODE_BITSIZE (mode);
1615
1616 if (CONST_INT_P (op1))
1617 newop1 = GEN_INT (bits - INTVAL (op1));
1618 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1619 newop1 = negate_rtx (GET_MODE (op1), op1);
1620 else
1621 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1622 GEN_INT (bits), op1,
1623 NULL_RTX, unsignedp, OPTAB_DIRECT);
1624
1625 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1626 target, unsignedp, methods, last);
1627 if (temp)
1628 return temp;
1629 }
1630
1631 /* If this is a multiply, see if we can do a widening operation that
1632 takes operands of this mode and makes a wider mode. */
1633
1634 if (binoptab == smul_optab
1635 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1636 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1637 GET_MODE_WIDER_MODE (mode))->insn_code)
1638 != CODE_FOR_nothing))
1639 {
1640 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1641 unsignedp ? umul_widen_optab : smul_widen_optab,
1642 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1643
1644 if (temp != 0)
1645 {
1646 if (GET_MODE_CLASS (mode) == MODE_INT
1647 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1648 GET_MODE_BITSIZE (GET_MODE (temp))))
1649 return gen_lowpart (mode, temp);
1650 else
1651 return convert_to_mode (mode, temp, unsignedp);
1652 }
1653 }
1654
1655 /* Look for a wider mode of the same class for which we think we
1656 can open-code the operation. Check for a widening multiply at the
1657 wider mode as well. */
1658
1659 if (CLASS_HAS_WIDER_MODES_P (mclass)
1660 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1661 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1662 wider_mode != VOIDmode;
1663 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1664 {
1665 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1666 || (binoptab == smul_optab
1667 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1668 && ((optab_handler ((unsignedp ? umul_widen_optab
1669 : smul_widen_optab),
1670 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1671 != CODE_FOR_nothing)))
1672 {
1673 rtx xop0 = op0, xop1 = op1;
1674 int no_extend = 0;
1675
1676 /* For certain integer operations, we need not actually extend
1677 the narrow operands, as long as we will truncate
1678 the results to the same narrowness. */
1679
1680 if ((binoptab == ior_optab || binoptab == and_optab
1681 || binoptab == xor_optab
1682 || binoptab == add_optab || binoptab == sub_optab
1683 || binoptab == smul_optab || binoptab == ashl_optab)
1684 && mclass == MODE_INT)
1685 {
1686 no_extend = 1;
1687 xop0 = avoid_expensive_constant (mode, binoptab,
1688 xop0, unsignedp);
1689 if (binoptab != ashl_optab)
1690 xop1 = avoid_expensive_constant (mode, binoptab,
1691 xop1, unsignedp);
1692 }
1693
1694 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1695
1696 /* The second operand of a shift must always be extended. */
1697 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1698 no_extend && binoptab != ashl_optab);
1699
1700 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1701 unsignedp, OPTAB_DIRECT);
1702 if (temp)
1703 {
1704 if (mclass != MODE_INT
1705 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1706 GET_MODE_BITSIZE (wider_mode)))
1707 {
1708 if (target == 0)
1709 target = gen_reg_rtx (mode);
1710 convert_move (target, temp, 0);
1711 return target;
1712 }
1713 else
1714 return gen_lowpart (mode, temp);
1715 }
1716 else
1717 delete_insns_since (last);
1718 }
1719 }
1720
1721 /* If operation is commutative,
1722 try to make the first operand a register.
1723 Even better, try to make it the same as the target.
1724 Also try to make the last operand a constant. */
1725 if (commutative_optab_p (binoptab)
1726 && swap_commutative_operands_with_target (target, op0, op1))
1727 {
1728 temp = op1;
1729 op1 = op0;
1730 op0 = temp;
1731 }
1732
1733 /* These can be done a word at a time. */
1734 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1735 && mclass == MODE_INT
1736 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1737 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1738 {
1739 int i;
1740 rtx insns;
1741
1742 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1743 won't be accurate, so use a new target. */
1744 if (target == 0 || target == op0 || target == op1)
1745 target = gen_reg_rtx (mode);
1746
1747 start_sequence ();
1748
1749 /* Do the actual arithmetic. */
1750 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1751 {
1752 rtx target_piece = operand_subword (target, i, 1, mode);
1753 rtx x = expand_binop (word_mode, binoptab,
1754 operand_subword_force (op0, i, mode),
1755 operand_subword_force (op1, i, mode),
1756 target_piece, unsignedp, next_methods);
1757
1758 if (x == 0)
1759 break;
1760
1761 if (target_piece != x)
1762 emit_move_insn (target_piece, x);
1763 }
1764
1765 insns = get_insns ();
1766 end_sequence ();
1767
1768 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1769 {
1770 emit_insn (insns);
1771 return target;
1772 }
1773 }
1774
1775 /* Synthesize double word shifts from single word shifts. */
1776 if ((binoptab == lshr_optab || binoptab == ashl_optab
1777 || binoptab == ashr_optab)
1778 && mclass == MODE_INT
1779 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1780 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1781 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1782 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1783 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1784 {
1785 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1786 enum machine_mode op1_mode;
1787
1788 double_shift_mask = targetm.shift_truncation_mask (mode);
1789 shift_mask = targetm.shift_truncation_mask (word_mode);
1790 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1791
1792 /* Apply the truncation to constant shifts. */
1793 if (double_shift_mask > 0 && CONST_INT_P (op1))
1794 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1795
1796 if (op1 == CONST0_RTX (op1_mode))
1797 return op0;
1798
1799 /* Make sure that this is a combination that expand_doubleword_shift
1800 can handle. See the comments there for details. */
1801 if (double_shift_mask == 0
1802 || (shift_mask == BITS_PER_WORD - 1
1803 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1804 {
1805 rtx insns;
1806 rtx into_target, outof_target;
1807 rtx into_input, outof_input;
1808 int left_shift, outof_word;
1809
1810 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1811 won't be accurate, so use a new target. */
1812 if (target == 0 || target == op0 || target == op1)
1813 target = gen_reg_rtx (mode);
1814
1815 start_sequence ();
1816
1817 /* OUTOF_* is the word we are shifting bits away from, and
1818 INTO_* is the word that we are shifting bits towards, thus
1819 they differ depending on the direction of the shift and
1820 WORDS_BIG_ENDIAN. */
1821
1822 left_shift = binoptab == ashl_optab;
1823 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1824
1825 outof_target = operand_subword (target, outof_word, 1, mode);
1826 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1827
1828 outof_input = operand_subword_force (op0, outof_word, mode);
1829 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1830
1831 if (expand_doubleword_shift (op1_mode, binoptab,
1832 outof_input, into_input, op1,
1833 outof_target, into_target,
1834 unsignedp, next_methods, shift_mask))
1835 {
1836 insns = get_insns ();
1837 end_sequence ();
1838
1839 emit_insn (insns);
1840 return target;
1841 }
1842 end_sequence ();
1843 }
1844 }
1845
1846 /* Synthesize double word rotates from single word shifts. */
1847 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1848 && mclass == MODE_INT
1849 && CONST_INT_P (op1)
1850 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1851 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1852 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1853 {
1854 rtx insns;
1855 rtx into_target, outof_target;
1856 rtx into_input, outof_input;
1857 rtx inter;
1858 int shift_count, left_shift, outof_word;
1859
1860 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1861 won't be accurate, so use a new target. Do this also if target is not
1862 a REG, first because having a register instead may open optimization
1863 opportunities, and second because if target and op0 happen to be MEMs
1864 designating the same location, we would risk clobbering it too early
1865 in the code sequence we generate below. */
1866 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1867 target = gen_reg_rtx (mode);
1868
1869 start_sequence ();
1870
1871 shift_count = INTVAL (op1);
1872
1873 /* OUTOF_* is the word we are shifting bits away from, and
1874 INTO_* is the word that we are shifting bits towards, thus
1875 they differ depending on the direction of the shift and
1876 WORDS_BIG_ENDIAN. */
1877
1878 left_shift = (binoptab == rotl_optab);
1879 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1880
1881 outof_target = operand_subword (target, outof_word, 1, mode);
1882 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1883
1884 outof_input = operand_subword_force (op0, outof_word, mode);
1885 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1886
1887 if (shift_count == BITS_PER_WORD)
1888 {
1889 /* This is just a word swap. */
1890 emit_move_insn (outof_target, into_input);
1891 emit_move_insn (into_target, outof_input);
1892 inter = const0_rtx;
1893 }
1894 else
1895 {
1896 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1897 rtx first_shift_count, second_shift_count;
1898 optab reverse_unsigned_shift, unsigned_shift;
1899
1900 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1901 ? lshr_optab : ashl_optab);
1902
1903 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1904 ? ashl_optab : lshr_optab);
1905
1906 if (shift_count > BITS_PER_WORD)
1907 {
1908 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1909 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1910 }
1911 else
1912 {
1913 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1914 second_shift_count = GEN_INT (shift_count);
1915 }
1916
1917 into_temp1 = expand_binop (word_mode, unsigned_shift,
1918 outof_input, first_shift_count,
1919 NULL_RTX, unsignedp, next_methods);
1920 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1921 into_input, second_shift_count,
1922 NULL_RTX, unsignedp, next_methods);
1923
1924 if (into_temp1 != 0 && into_temp2 != 0)
1925 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1926 into_target, unsignedp, next_methods);
1927 else
1928 inter = 0;
1929
1930 if (inter != 0 && inter != into_target)
1931 emit_move_insn (into_target, inter);
1932
1933 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1934 into_input, first_shift_count,
1935 NULL_RTX, unsignedp, next_methods);
1936 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1937 outof_input, second_shift_count,
1938 NULL_RTX, unsignedp, next_methods);
1939
1940 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1941 inter = expand_binop (word_mode, ior_optab,
1942 outof_temp1, outof_temp2,
1943 outof_target, unsignedp, next_methods);
1944
1945 if (inter != 0 && inter != outof_target)
1946 emit_move_insn (outof_target, inter);
1947 }
1948
1949 insns = get_insns ();
1950 end_sequence ();
1951
1952 if (inter != 0)
1953 {
1954 emit_insn (insns);
1955 return target;
1956 }
1957 }
1958
1959 /* These can be done a word at a time by propagating carries. */
1960 if ((binoptab == add_optab || binoptab == sub_optab)
1961 && mclass == MODE_INT
1962 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1963 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1964 {
1965 unsigned int i;
1966 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1967 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1968 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1969 rtx xop0, xop1, xtarget;
1970
1971 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1972 value is one of those, use it. Otherwise, use 1 since it is the
1973 one easiest to get. */
1974 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1975 int normalizep = STORE_FLAG_VALUE;
1976 #else
1977 int normalizep = 1;
1978 #endif
1979
1980 /* Prepare the operands. */
1981 xop0 = force_reg (mode, op0);
1982 xop1 = force_reg (mode, op1);
1983
1984 xtarget = gen_reg_rtx (mode);
1985
1986 if (target == 0 || !REG_P (target))
1987 target = xtarget;
1988
1989 /* Indicate for flow that the entire target reg is being set. */
1990 if (REG_P (target))
1991 emit_clobber (xtarget);
1992
1993 /* Do the actual arithmetic. */
1994 for (i = 0; i < nwords; i++)
1995 {
1996 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1997 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1998 rtx op0_piece = operand_subword_force (xop0, index, mode);
1999 rtx op1_piece = operand_subword_force (xop1, index, mode);
2000 rtx x;
2001
2002 /* Main add/subtract of the input operands. */
2003 x = expand_binop (word_mode, binoptab,
2004 op0_piece, op1_piece,
2005 target_piece, unsignedp, next_methods);
2006 if (x == 0)
2007 break;
2008
2009 if (i + 1 < nwords)
2010 {
2011 /* Store carry from main add/subtract. */
2012 carry_out = gen_reg_rtx (word_mode);
2013 carry_out = emit_store_flag_force (carry_out,
2014 (binoptab == add_optab
2015 ? LT : GT),
2016 x, op0_piece,
2017 word_mode, 1, normalizep);
2018 }
2019
2020 if (i > 0)
2021 {
2022 rtx newx;
2023
2024 /* Add/subtract previous carry to main result. */
2025 newx = expand_binop (word_mode,
2026 normalizep == 1 ? binoptab : otheroptab,
2027 x, carry_in,
2028 NULL_RTX, 1, next_methods);
2029
2030 if (i + 1 < nwords)
2031 {
2032 /* Get out carry from adding/subtracting carry in. */
2033 rtx carry_tmp = gen_reg_rtx (word_mode);
2034 carry_tmp = emit_store_flag_force (carry_tmp,
2035 (binoptab == add_optab
2036 ? LT : GT),
2037 newx, x,
2038 word_mode, 1, normalizep);
2039
2040 /* Logical-ior the two poss. carry together. */
2041 carry_out = expand_binop (word_mode, ior_optab,
2042 carry_out, carry_tmp,
2043 carry_out, 0, next_methods);
2044 if (carry_out == 0)
2045 break;
2046 }
2047 emit_move_insn (target_piece, newx);
2048 }
2049 else
2050 {
2051 if (x != target_piece)
2052 emit_move_insn (target_piece, x);
2053 }
2054
2055 carry_in = carry_out;
2056 }
2057
2058 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2059 {
2060 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2061 || ! rtx_equal_p (target, xtarget))
2062 {
2063 rtx temp = emit_move_insn (target, xtarget);
2064
2065 set_unique_reg_note (temp,
2066 REG_EQUAL,
2067 gen_rtx_fmt_ee (binoptab->code, mode,
2068 copy_rtx (xop0),
2069 copy_rtx (xop1)));
2070 }
2071 else
2072 target = xtarget;
2073
2074 return target;
2075 }
2076
2077 else
2078 delete_insns_since (last);
2079 }
2080
2081 /* Attempt to synthesize double word multiplies using a sequence of word
2082 mode multiplications. We first attempt to generate a sequence using a
2083 more efficient unsigned widening multiply, and if that fails we then
2084 try using a signed widening multiply. */
2085
2086 if (binoptab == smul_optab
2087 && mclass == MODE_INT
2088 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2089 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2090 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2091 {
2092 rtx product = NULL_RTX;
2093
2094 if (optab_handler (umul_widen_optab, mode)->insn_code
2095 != CODE_FOR_nothing)
2096 {
2097 product = expand_doubleword_mult (mode, op0, op1, target,
2098 true, methods);
2099 if (!product)
2100 delete_insns_since (last);
2101 }
2102
2103 if (product == NULL_RTX
2104 && optab_handler (smul_widen_optab, mode)->insn_code
2105 != CODE_FOR_nothing)
2106 {
2107 product = expand_doubleword_mult (mode, op0, op1, target,
2108 false, methods);
2109 if (!product)
2110 delete_insns_since (last);
2111 }
2112
2113 if (product != NULL_RTX)
2114 {
2115 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2116 {
2117 temp = emit_move_insn (target ? target : product, product);
2118 set_unique_reg_note (temp,
2119 REG_EQUAL,
2120 gen_rtx_fmt_ee (MULT, mode,
2121 copy_rtx (op0),
2122 copy_rtx (op1)));
2123 }
2124 return product;
2125 }
2126 }
2127
2128 /* It can't be open-coded in this mode.
2129 Use a library call if one is available and caller says that's ok. */
2130
2131 libfunc = optab_libfunc (binoptab, mode);
2132 if (libfunc
2133 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2134 {
2135 rtx insns;
2136 rtx op1x = op1;
2137 enum machine_mode op1_mode = mode;
2138 rtx value;
2139
2140 start_sequence ();
2141
2142 if (shift_optab_p (binoptab))
2143 {
2144 op1_mode = targetm.libgcc_shift_count_mode ();
2145 /* Specify unsigned here,
2146 since negative shift counts are meaningless. */
2147 op1x = convert_to_mode (op1_mode, op1, 1);
2148 }
2149
2150 if (GET_MODE (op0) != VOIDmode
2151 && GET_MODE (op0) != mode)
2152 op0 = convert_to_mode (mode, op0, unsignedp);
2153
2154 /* Pass 1 for NO_QUEUE so we don't lose any increments
2155 if the libcall is cse'd or moved. */
2156 value = emit_library_call_value (libfunc,
2157 NULL_RTX, LCT_CONST, mode, 2,
2158 op0, mode, op1x, op1_mode);
2159
2160 insns = get_insns ();
2161 end_sequence ();
2162
2163 target = gen_reg_rtx (mode);
2164 emit_libcall_block (insns, target, value,
2165 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2166
2167 return target;
2168 }
2169
2170 delete_insns_since (last);
2171
2172 /* It can't be done in this mode. Can we do it in a wider mode? */
2173
2174 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2175 || methods == OPTAB_MUST_WIDEN))
2176 {
2177 /* Caller says, don't even try. */
2178 delete_insns_since (entry_last);
2179 return 0;
2180 }
2181
2182 /* Compute the value of METHODS to pass to recursive calls.
2183 Don't allow widening to be tried recursively. */
2184
2185 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2186
2187 /* Look for a wider mode of the same class for which it appears we can do
2188 the operation. */
2189
2190 if (CLASS_HAS_WIDER_MODES_P (mclass))
2191 {
2192 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2193 wider_mode != VOIDmode;
2194 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2195 {
2196 if ((optab_handler (binoptab, wider_mode)->insn_code
2197 != CODE_FOR_nothing)
2198 || (methods == OPTAB_LIB
2199 && optab_libfunc (binoptab, wider_mode)))
2200 {
2201 rtx xop0 = op0, xop1 = op1;
2202 int no_extend = 0;
2203
2204 /* For certain integer operations, we need not actually extend
2205 the narrow operands, as long as we will truncate
2206 the results to the same narrowness. */
2207
2208 if ((binoptab == ior_optab || binoptab == and_optab
2209 || binoptab == xor_optab
2210 || binoptab == add_optab || binoptab == sub_optab
2211 || binoptab == smul_optab || binoptab == ashl_optab)
2212 && mclass == MODE_INT)
2213 no_extend = 1;
2214
2215 xop0 = widen_operand (xop0, wider_mode, mode,
2216 unsignedp, no_extend);
2217
2218 /* The second operand of a shift must always be extended. */
2219 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2220 no_extend && binoptab != ashl_optab);
2221
2222 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2223 unsignedp, methods);
2224 if (temp)
2225 {
2226 if (mclass != MODE_INT
2227 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2228 GET_MODE_BITSIZE (wider_mode)))
2229 {
2230 if (target == 0)
2231 target = gen_reg_rtx (mode);
2232 convert_move (target, temp, 0);
2233 return target;
2234 }
2235 else
2236 return gen_lowpart (mode, temp);
2237 }
2238 else
2239 delete_insns_since (last);
2240 }
2241 }
2242 }
2243
2244 delete_insns_since (entry_last);
2245 return 0;
2246 }
2247 \f
2248 /* Expand a binary operator which has both signed and unsigned forms.
2249 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2250 signed operations.
2251
2252 If we widen unsigned operands, we may use a signed wider operation instead
2253 of an unsigned wider operation, since the result would be the same. */
2254
2255 rtx
2256 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2257 rtx op0, rtx op1, rtx target, int unsignedp,
2258 enum optab_methods methods)
2259 {
2260 rtx temp;
2261 optab direct_optab = unsignedp ? uoptab : soptab;
2262 struct optab_d wide_soptab;
2263
2264 /* Do it without widening, if possible. */
2265 temp = expand_binop (mode, direct_optab, op0, op1, target,
2266 unsignedp, OPTAB_DIRECT);
2267 if (temp || methods == OPTAB_DIRECT)
2268 return temp;
2269
2270 /* Try widening to a signed int. Make a fake signed optab that
2271 hides any signed insn for direct use. */
2272 wide_soptab = *soptab;
2273 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2274 /* We don't want to generate new hash table entries from this fake
2275 optab. */
2276 wide_soptab.libcall_gen = NULL;
2277
2278 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2279 unsignedp, OPTAB_WIDEN);
2280
2281 /* For unsigned operands, try widening to an unsigned int. */
2282 if (temp == 0 && unsignedp)
2283 temp = expand_binop (mode, uoptab, op0, op1, target,
2284 unsignedp, OPTAB_WIDEN);
2285 if (temp || methods == OPTAB_WIDEN)
2286 return temp;
2287
2288 /* Use the right width libcall if that exists. */
2289 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2290 if (temp || methods == OPTAB_LIB)
2291 return temp;
2292
2293 /* Must widen and use a libcall, use either signed or unsigned. */
2294 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2295 unsignedp, methods);
2296 if (temp != 0)
2297 return temp;
2298 if (unsignedp)
2299 return expand_binop (mode, uoptab, op0, op1, target,
2300 unsignedp, methods);
2301 return 0;
2302 }
2303 \f
2304 /* Generate code to perform an operation specified by UNOPPTAB
2305 on operand OP0, with two results to TARG0 and TARG1.
2306 We assume that the order of the operands for the instruction
2307 is TARG0, TARG1, OP0.
2308
2309 Either TARG0 or TARG1 may be zero, but what that means is that
2310 the result is not actually wanted. We will generate it into
2311 a dummy pseudo-reg and discard it. They may not both be zero.
2312
2313 Returns 1 if this operation can be performed; 0 if not. */
2314
2315 int
2316 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2317 int unsignedp)
2318 {
2319 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2320 enum mode_class mclass;
2321 enum machine_mode wider_mode;
2322 rtx entry_last = get_last_insn ();
2323 rtx last;
2324
2325 mclass = GET_MODE_CLASS (mode);
2326
2327 if (!targ0)
2328 targ0 = gen_reg_rtx (mode);
2329 if (!targ1)
2330 targ1 = gen_reg_rtx (mode);
2331
2332 /* Record where to go back to if we fail. */
2333 last = get_last_insn ();
2334
2335 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2336 {
2337 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2338 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2339 rtx pat;
2340 rtx xop0 = op0;
2341
2342 if (GET_MODE (xop0) != VOIDmode
2343 && GET_MODE (xop0) != mode0)
2344 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2345
2346 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2347 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2348 xop0 = copy_to_mode_reg (mode0, xop0);
2349
2350 /* We could handle this, but we should always be called with a pseudo
2351 for our targets and all insns should take them as outputs. */
2352 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2353 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2354
2355 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2356 if (pat)
2357 {
2358 emit_insn (pat);
2359 return 1;
2360 }
2361 else
2362 delete_insns_since (last);
2363 }
2364
2365 /* It can't be done in this mode. Can we do it in a wider mode? */
2366
2367 if (CLASS_HAS_WIDER_MODES_P (mclass))
2368 {
2369 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2370 wider_mode != VOIDmode;
2371 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2372 {
2373 if (optab_handler (unoptab, wider_mode)->insn_code
2374 != CODE_FOR_nothing)
2375 {
2376 rtx t0 = gen_reg_rtx (wider_mode);
2377 rtx t1 = gen_reg_rtx (wider_mode);
2378 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2379
2380 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2381 {
2382 convert_move (targ0, t0, unsignedp);
2383 convert_move (targ1, t1, unsignedp);
2384 return 1;
2385 }
2386 else
2387 delete_insns_since (last);
2388 }
2389 }
2390 }
2391
2392 delete_insns_since (entry_last);
2393 return 0;
2394 }
2395 \f
2396 /* Generate code to perform an operation specified by BINOPTAB
2397 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2398 We assume that the order of the operands for the instruction
2399 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2400 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2401
2402 Either TARG0 or TARG1 may be zero, but what that means is that
2403 the result is not actually wanted. We will generate it into
2404 a dummy pseudo-reg and discard it. They may not both be zero.
2405
2406 Returns 1 if this operation can be performed; 0 if not. */
2407
2408 int
2409 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2410 int unsignedp)
2411 {
2412 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2413 enum mode_class mclass;
2414 enum machine_mode wider_mode;
2415 rtx entry_last = get_last_insn ();
2416 rtx last;
2417
2418 mclass = GET_MODE_CLASS (mode);
2419
2420 if (!targ0)
2421 targ0 = gen_reg_rtx (mode);
2422 if (!targ1)
2423 targ1 = gen_reg_rtx (mode);
2424
2425 /* Record where to go back to if we fail. */
2426 last = get_last_insn ();
2427
2428 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2429 {
2430 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2431 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2432 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2433 rtx pat;
2434 rtx xop0 = op0, xop1 = op1;
2435
2436 /* If we are optimizing, force expensive constants into a register. */
2437 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2438 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2439
2440 /* In case the insn wants input operands in modes different from
2441 those of the actual operands, convert the operands. It would
2442 seem that we don't need to convert CONST_INTs, but we do, so
2443 that they're properly zero-extended, sign-extended or truncated
2444 for their mode. */
2445
2446 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2447 xop0 = convert_modes (mode0,
2448 GET_MODE (op0) != VOIDmode
2449 ? GET_MODE (op0)
2450 : mode,
2451 xop0, unsignedp);
2452
2453 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2454 xop1 = convert_modes (mode1,
2455 GET_MODE (op1) != VOIDmode
2456 ? GET_MODE (op1)
2457 : mode,
2458 xop1, unsignedp);
2459
2460 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2461 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2462 xop0 = copy_to_mode_reg (mode0, xop0);
2463
2464 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2465 xop1 = copy_to_mode_reg (mode1, xop1);
2466
2467 /* We could handle this, but we should always be called with a pseudo
2468 for our targets and all insns should take them as outputs. */
2469 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2470 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2471
2472 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2473 if (pat)
2474 {
2475 emit_insn (pat);
2476 return 1;
2477 }
2478 else
2479 delete_insns_since (last);
2480 }
2481
2482 /* It can't be done in this mode. Can we do it in a wider mode? */
2483
2484 if (CLASS_HAS_WIDER_MODES_P (mclass))
2485 {
2486 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2487 wider_mode != VOIDmode;
2488 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2489 {
2490 if (optab_handler (binoptab, wider_mode)->insn_code
2491 != CODE_FOR_nothing)
2492 {
2493 rtx t0 = gen_reg_rtx (wider_mode);
2494 rtx t1 = gen_reg_rtx (wider_mode);
2495 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2496 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2497
2498 if (expand_twoval_binop (binoptab, cop0, cop1,
2499 t0, t1, unsignedp))
2500 {
2501 convert_move (targ0, t0, unsignedp);
2502 convert_move (targ1, t1, unsignedp);
2503 return 1;
2504 }
2505 else
2506 delete_insns_since (last);
2507 }
2508 }
2509 }
2510
2511 delete_insns_since (entry_last);
2512 return 0;
2513 }
2514
2515 /* Expand the two-valued library call indicated by BINOPTAB, but
2516 preserve only one of the values. If TARG0 is non-NULL, the first
2517 value is placed into TARG0; otherwise the second value is placed
2518 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2519 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2520 This routine assumes that the value returned by the library call is
2521 as if the return value was of an integral mode twice as wide as the
2522 mode of OP0. Returns 1 if the call was successful. */
2523
2524 bool
2525 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2526 rtx targ0, rtx targ1, enum rtx_code code)
2527 {
2528 enum machine_mode mode;
2529 enum machine_mode libval_mode;
2530 rtx libval;
2531 rtx insns;
2532 rtx libfunc;
2533
2534 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2535 gcc_assert (!targ0 != !targ1);
2536
2537 mode = GET_MODE (op0);
2538 libfunc = optab_libfunc (binoptab, mode);
2539 if (!libfunc)
2540 return false;
2541
2542 /* The value returned by the library function will have twice as
2543 many bits as the nominal MODE. */
2544 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2545 MODE_INT);
2546 start_sequence ();
2547 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2548 libval_mode, 2,
2549 op0, mode,
2550 op1, mode);
2551 /* Get the part of VAL containing the value that we want. */
2552 libval = simplify_gen_subreg (mode, libval, libval_mode,
2553 targ0 ? 0 : GET_MODE_SIZE (mode));
2554 insns = get_insns ();
2555 end_sequence ();
2556 /* Move the into the desired location. */
2557 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2558 gen_rtx_fmt_ee (code, mode, op0, op1));
2559
2560 return true;
2561 }
2562
2563 \f
2564 /* Wrapper around expand_unop which takes an rtx code to specify
2565 the operation to perform, not an optab pointer. All other
2566 arguments are the same. */
2567 rtx
2568 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2569 rtx target, int unsignedp)
2570 {
2571 optab unop = code_to_optab[(int) code];
2572 gcc_assert (unop);
2573
2574 return expand_unop (mode, unop, op0, target, unsignedp);
2575 }
2576
2577 /* Try calculating
2578 (clz:narrow x)
2579 as
2580 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2581 static rtx
2582 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2583 {
2584 enum mode_class mclass = GET_MODE_CLASS (mode);
2585 if (CLASS_HAS_WIDER_MODES_P (mclass))
2586 {
2587 enum machine_mode wider_mode;
2588 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2589 wider_mode != VOIDmode;
2590 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2591 {
2592 if (optab_handler (clz_optab, wider_mode)->insn_code
2593 != CODE_FOR_nothing)
2594 {
2595 rtx xop0, temp, last;
2596
2597 last = get_last_insn ();
2598
2599 if (target == 0)
2600 target = gen_reg_rtx (mode);
2601 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2602 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2603 if (temp != 0)
2604 temp = expand_binop (wider_mode, sub_optab, temp,
2605 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2606 - GET_MODE_BITSIZE (mode)),
2607 target, true, OPTAB_DIRECT);
2608 if (temp == 0)
2609 delete_insns_since (last);
2610
2611 return temp;
2612 }
2613 }
2614 }
2615 return 0;
2616 }
2617
2618 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2619 quantities, choosing which based on whether the high word is nonzero. */
2620 static rtx
2621 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2622 {
2623 rtx xop0 = force_reg (mode, op0);
2624 rtx subhi = gen_highpart (word_mode, xop0);
2625 rtx sublo = gen_lowpart (word_mode, xop0);
2626 rtx hi0_label = gen_label_rtx ();
2627 rtx after_label = gen_label_rtx ();
2628 rtx seq, temp, result;
2629
2630 /* If we were not given a target, use a word_mode register, not a
2631 'mode' register. The result will fit, and nobody is expecting
2632 anything bigger (the return type of __builtin_clz* is int). */
2633 if (!target)
2634 target = gen_reg_rtx (word_mode);
2635
2636 /* In any case, write to a word_mode scratch in both branches of the
2637 conditional, so we can ensure there is a single move insn setting
2638 'target' to tag a REG_EQUAL note on. */
2639 result = gen_reg_rtx (word_mode);
2640
2641 start_sequence ();
2642
2643 /* If the high word is not equal to zero,
2644 then clz of the full value is clz of the high word. */
2645 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2646 word_mode, true, hi0_label);
2647
2648 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2649 if (!temp)
2650 goto fail;
2651
2652 if (temp != result)
2653 convert_move (result, temp, true);
2654
2655 emit_jump_insn (gen_jump (after_label));
2656 emit_barrier ();
2657
2658 /* Else clz of the full value is clz of the low word plus the number
2659 of bits in the high word. */
2660 emit_label (hi0_label);
2661
2662 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2663 if (!temp)
2664 goto fail;
2665 temp = expand_binop (word_mode, add_optab, temp,
2666 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2667 result, true, OPTAB_DIRECT);
2668 if (!temp)
2669 goto fail;
2670 if (temp != result)
2671 convert_move (result, temp, true);
2672
2673 emit_label (after_label);
2674 convert_move (target, result, true);
2675
2676 seq = get_insns ();
2677 end_sequence ();
2678
2679 add_equal_note (seq, target, CLZ, xop0, 0);
2680 emit_insn (seq);
2681 return target;
2682
2683 fail:
2684 end_sequence ();
2685 return 0;
2686 }
2687
2688 /* Try calculating
2689 (bswap:narrow x)
2690 as
2691 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2692 static rtx
2693 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2694 {
2695 enum mode_class mclass = GET_MODE_CLASS (mode);
2696 enum machine_mode wider_mode;
2697 rtx x, last;
2698
2699 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2700 return NULL_RTX;
2701
2702 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2703 wider_mode != VOIDmode;
2704 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2705 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2706 goto found;
2707 return NULL_RTX;
2708
2709 found:
2710 last = get_last_insn ();
2711
2712 x = widen_operand (op0, wider_mode, mode, true, true);
2713 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2714
2715 if (x != 0)
2716 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2717 size_int (GET_MODE_BITSIZE (wider_mode)
2718 - GET_MODE_BITSIZE (mode)),
2719 NULL_RTX, true);
2720
2721 if (x != 0)
2722 {
2723 if (target == 0)
2724 target = gen_reg_rtx (mode);
2725 emit_move_insn (target, gen_lowpart (mode, x));
2726 }
2727 else
2728 delete_insns_since (last);
2729
2730 return target;
2731 }
2732
2733 /* Try calculating bswap as two bswaps of two word-sized operands. */
2734
2735 static rtx
2736 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2737 {
2738 rtx t0, t1;
2739
2740 t1 = expand_unop (word_mode, bswap_optab,
2741 operand_subword_force (op, 0, mode), NULL_RTX, true);
2742 t0 = expand_unop (word_mode, bswap_optab,
2743 operand_subword_force (op, 1, mode), NULL_RTX, true);
2744
2745 if (target == 0)
2746 target = gen_reg_rtx (mode);
2747 if (REG_P (target))
2748 emit_clobber (target);
2749 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2750 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2751
2752 return target;
2753 }
2754
2755 /* Try calculating (parity x) as (and (popcount x) 1), where
2756 popcount can also be done in a wider mode. */
2757 static rtx
2758 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2759 {
2760 enum mode_class mclass = GET_MODE_CLASS (mode);
2761 if (CLASS_HAS_WIDER_MODES_P (mclass))
2762 {
2763 enum machine_mode wider_mode;
2764 for (wider_mode = mode; wider_mode != VOIDmode;
2765 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2766 {
2767 if (optab_handler (popcount_optab, wider_mode)->insn_code
2768 != CODE_FOR_nothing)
2769 {
2770 rtx xop0, temp, last;
2771
2772 last = get_last_insn ();
2773
2774 if (target == 0)
2775 target = gen_reg_rtx (mode);
2776 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2777 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2778 true);
2779 if (temp != 0)
2780 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2781 target, true, OPTAB_DIRECT);
2782 if (temp == 0)
2783 delete_insns_since (last);
2784
2785 return temp;
2786 }
2787 }
2788 }
2789 return 0;
2790 }
2791
2792 /* Try calculating ctz(x) as K - clz(x & -x) ,
2793 where K is GET_MODE_BITSIZE(mode) - 1.
2794
2795 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2796 don't have to worry about what the hardware does in that case. (If
2797 the clz instruction produces the usual value at 0, which is K, the
2798 result of this code sequence will be -1; expand_ffs, below, relies
2799 on this. It might be nice to have it be K instead, for consistency
2800 with the (very few) processors that provide a ctz with a defined
2801 value, but that would take one more instruction, and it would be
2802 less convenient for expand_ffs anyway. */
2803
2804 static rtx
2805 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2806 {
2807 rtx seq, temp;
2808
2809 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2810 return 0;
2811
2812 start_sequence ();
2813
2814 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2815 if (temp)
2816 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2817 true, OPTAB_DIRECT);
2818 if (temp)
2819 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2820 if (temp)
2821 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2822 temp, target,
2823 true, OPTAB_DIRECT);
2824 if (temp == 0)
2825 {
2826 end_sequence ();
2827 return 0;
2828 }
2829
2830 seq = get_insns ();
2831 end_sequence ();
2832
2833 add_equal_note (seq, temp, CTZ, op0, 0);
2834 emit_insn (seq);
2835 return temp;
2836 }
2837
2838
2839 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2840 else with the sequence used by expand_clz.
2841
2842 The ffs builtin promises to return zero for a zero value and ctz/clz
2843 may have an undefined value in that case. If they do not give us a
2844 convenient value, we have to generate a test and branch. */
2845 static rtx
2846 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2847 {
2848 HOST_WIDE_INT val = 0;
2849 bool defined_at_zero = false;
2850 rtx temp, seq;
2851
2852 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2853 {
2854 start_sequence ();
2855
2856 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2857 if (!temp)
2858 goto fail;
2859
2860 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2861 }
2862 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2863 {
2864 start_sequence ();
2865 temp = expand_ctz (mode, op0, 0);
2866 if (!temp)
2867 goto fail;
2868
2869 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2870 {
2871 defined_at_zero = true;
2872 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2873 }
2874 }
2875 else
2876 return 0;
2877
2878 if (defined_at_zero && val == -1)
2879 /* No correction needed at zero. */;
2880 else
2881 {
2882 /* We don't try to do anything clever with the situation found
2883 on some processors (eg Alpha) where ctz(0:mode) ==
2884 bitsize(mode). If someone can think of a way to send N to -1
2885 and leave alone all values in the range 0..N-1 (where N is a
2886 power of two), cheaper than this test-and-branch, please add it.
2887
2888 The test-and-branch is done after the operation itself, in case
2889 the operation sets condition codes that can be recycled for this.
2890 (This is true on i386, for instance.) */
2891
2892 rtx nonzero_label = gen_label_rtx ();
2893 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2894 mode, true, nonzero_label);
2895
2896 convert_move (temp, GEN_INT (-1), false);
2897 emit_label (nonzero_label);
2898 }
2899
2900 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2901 to produce a value in the range 0..bitsize. */
2902 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2903 target, false, OPTAB_DIRECT);
2904 if (!temp)
2905 goto fail;
2906
2907 seq = get_insns ();
2908 end_sequence ();
2909
2910 add_equal_note (seq, temp, FFS, op0, 0);
2911 emit_insn (seq);
2912 return temp;
2913
2914 fail:
2915 end_sequence ();
2916 return 0;
2917 }
2918
2919 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2920 conditions, VAL may already be a SUBREG against which we cannot generate
2921 a further SUBREG. In this case, we expect forcing the value into a
2922 register will work around the situation. */
2923
2924 static rtx
2925 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2926 enum machine_mode imode)
2927 {
2928 rtx ret;
2929 ret = lowpart_subreg (omode, val, imode);
2930 if (ret == NULL)
2931 {
2932 val = force_reg (imode, val);
2933 ret = lowpart_subreg (omode, val, imode);
2934 gcc_assert (ret != NULL);
2935 }
2936 return ret;
2937 }
2938
2939 /* Expand a floating point absolute value or negation operation via a
2940 logical operation on the sign bit. */
2941
2942 static rtx
2943 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2944 rtx op0, rtx target)
2945 {
2946 const struct real_format *fmt;
2947 int bitpos, word, nwords, i;
2948 enum machine_mode imode;
2949 double_int mask;
2950 rtx temp, insns;
2951
2952 /* The format has to have a simple sign bit. */
2953 fmt = REAL_MODE_FORMAT (mode);
2954 if (fmt == NULL)
2955 return NULL_RTX;
2956
2957 bitpos = fmt->signbit_rw;
2958 if (bitpos < 0)
2959 return NULL_RTX;
2960
2961 /* Don't create negative zeros if the format doesn't support them. */
2962 if (code == NEG && !fmt->has_signed_zero)
2963 return NULL_RTX;
2964
2965 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2966 {
2967 imode = int_mode_for_mode (mode);
2968 if (imode == BLKmode)
2969 return NULL_RTX;
2970 word = 0;
2971 nwords = 1;
2972 }
2973 else
2974 {
2975 imode = word_mode;
2976
2977 if (FLOAT_WORDS_BIG_ENDIAN)
2978 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2979 else
2980 word = bitpos / BITS_PER_WORD;
2981 bitpos = bitpos % BITS_PER_WORD;
2982 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2983 }
2984
2985 mask = double_int_setbit (double_int_zero, bitpos);
2986 if (code == ABS)
2987 mask = double_int_not (mask);
2988
2989 if (target == 0 || target == op0)
2990 target = gen_reg_rtx (mode);
2991
2992 if (nwords > 1)
2993 {
2994 start_sequence ();
2995
2996 for (i = 0; i < nwords; ++i)
2997 {
2998 rtx targ_piece = operand_subword (target, i, 1, mode);
2999 rtx op0_piece = operand_subword_force (op0, i, mode);
3000
3001 if (i == word)
3002 {
3003 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3004 op0_piece,
3005 immed_double_int_const (mask, imode),
3006 targ_piece, 1, OPTAB_LIB_WIDEN);
3007 if (temp != targ_piece)
3008 emit_move_insn (targ_piece, temp);
3009 }
3010 else
3011 emit_move_insn (targ_piece, op0_piece);
3012 }
3013
3014 insns = get_insns ();
3015 end_sequence ();
3016
3017 emit_insn (insns);
3018 }
3019 else
3020 {
3021 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3022 gen_lowpart (imode, op0),
3023 immed_double_int_const (mask, imode),
3024 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3025 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3026
3027 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3028 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3029 }
3030
3031 return target;
3032 }
3033
3034 /* As expand_unop, but will fail rather than attempt the operation in a
3035 different mode or with a libcall. */
3036 static rtx
3037 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3038 int unsignedp)
3039 {
3040 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3041 {
3042 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3043 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3044 rtx xop0 = op0;
3045 rtx last = get_last_insn ();
3046 rtx pat, temp;
3047
3048 if (target)
3049 temp = target;
3050 else
3051 temp = gen_reg_rtx (mode);
3052
3053 if (GET_MODE (xop0) != VOIDmode
3054 && GET_MODE (xop0) != mode0)
3055 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3056
3057 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3058
3059 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3060 xop0 = copy_to_mode_reg (mode0, xop0);
3061
3062 if (!insn_data[icode].operand[0].predicate (temp, mode))
3063 temp = gen_reg_rtx (mode);
3064
3065 pat = GEN_FCN (icode) (temp, xop0);
3066 if (pat)
3067 {
3068 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3069 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3070 {
3071 delete_insns_since (last);
3072 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3073 }
3074
3075 emit_insn (pat);
3076
3077 return temp;
3078 }
3079 else
3080 delete_insns_since (last);
3081 }
3082 return 0;
3083 }
3084
3085 /* Generate code to perform an operation specified by UNOPTAB
3086 on operand OP0, with result having machine-mode MODE.
3087
3088 UNSIGNEDP is for the case where we have to widen the operands
3089 to perform the operation. It says to use zero-extension.
3090
3091 If TARGET is nonzero, the value
3092 is generated there, if it is convenient to do so.
3093 In all cases an rtx is returned for the locus of the value;
3094 this may or may not be TARGET. */
3095
3096 rtx
3097 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3098 int unsignedp)
3099 {
3100 enum mode_class mclass = GET_MODE_CLASS (mode);
3101 enum machine_mode wider_mode;
3102 rtx temp;
3103 rtx libfunc;
3104
3105 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3106 if (temp)
3107 return temp;
3108
3109 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3110
3111 /* Widening (or narrowing) clz needs special treatment. */
3112 if (unoptab == clz_optab)
3113 {
3114 temp = widen_clz (mode, op0, target);
3115 if (temp)
3116 return temp;
3117
3118 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3119 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3120 {
3121 temp = expand_doubleword_clz (mode, op0, target);
3122 if (temp)
3123 return temp;
3124 }
3125
3126 goto try_libcall;
3127 }
3128
3129 /* Widening (or narrowing) bswap needs special treatment. */
3130 if (unoptab == bswap_optab)
3131 {
3132 temp = widen_bswap (mode, op0, target);
3133 if (temp)
3134 return temp;
3135
3136 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3137 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3138 {
3139 temp = expand_doubleword_bswap (mode, op0, target);
3140 if (temp)
3141 return temp;
3142 }
3143
3144 goto try_libcall;
3145 }
3146
3147 if (CLASS_HAS_WIDER_MODES_P (mclass))
3148 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3149 wider_mode != VOIDmode;
3150 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3151 {
3152 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3153 {
3154 rtx xop0 = op0;
3155 rtx last = get_last_insn ();
3156
3157 /* For certain operations, we need not actually extend
3158 the narrow operand, as long as we will truncate the
3159 results to the same narrowness. */
3160
3161 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3162 (unoptab == neg_optab
3163 || unoptab == one_cmpl_optab)
3164 && mclass == MODE_INT);
3165
3166 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3167 unsignedp);
3168
3169 if (temp)
3170 {
3171 if (mclass != MODE_INT
3172 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3173 GET_MODE_BITSIZE (wider_mode)))
3174 {
3175 if (target == 0)
3176 target = gen_reg_rtx (mode);
3177 convert_move (target, temp, 0);
3178 return target;
3179 }
3180 else
3181 return gen_lowpart (mode, temp);
3182 }
3183 else
3184 delete_insns_since (last);
3185 }
3186 }
3187
3188 /* These can be done a word at a time. */
3189 if (unoptab == one_cmpl_optab
3190 && mclass == MODE_INT
3191 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3192 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3193 {
3194 int i;
3195 rtx insns;
3196
3197 if (target == 0 || target == op0)
3198 target = gen_reg_rtx (mode);
3199
3200 start_sequence ();
3201
3202 /* Do the actual arithmetic. */
3203 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3204 {
3205 rtx target_piece = operand_subword (target, i, 1, mode);
3206 rtx x = expand_unop (word_mode, unoptab,
3207 operand_subword_force (op0, i, mode),
3208 target_piece, unsignedp);
3209
3210 if (target_piece != x)
3211 emit_move_insn (target_piece, x);
3212 }
3213
3214 insns = get_insns ();
3215 end_sequence ();
3216
3217 emit_insn (insns);
3218 return target;
3219 }
3220
3221 if (unoptab->code == NEG)
3222 {
3223 /* Try negating floating point values by flipping the sign bit. */
3224 if (SCALAR_FLOAT_MODE_P (mode))
3225 {
3226 temp = expand_absneg_bit (NEG, mode, op0, target);
3227 if (temp)
3228 return temp;
3229 }
3230
3231 /* If there is no negation pattern, and we have no negative zero,
3232 try subtracting from zero. */
3233 if (!HONOR_SIGNED_ZEROS (mode))
3234 {
3235 temp = expand_binop (mode, (unoptab == negv_optab
3236 ? subv_optab : sub_optab),
3237 CONST0_RTX (mode), op0, target,
3238 unsignedp, OPTAB_DIRECT);
3239 if (temp)
3240 return temp;
3241 }
3242 }
3243
3244 /* Try calculating parity (x) as popcount (x) % 2. */
3245 if (unoptab == parity_optab)
3246 {
3247 temp = expand_parity (mode, op0, target);
3248 if (temp)
3249 return temp;
3250 }
3251
3252 /* Try implementing ffs (x) in terms of clz (x). */
3253 if (unoptab == ffs_optab)
3254 {
3255 temp = expand_ffs (mode, op0, target);
3256 if (temp)
3257 return temp;
3258 }
3259
3260 /* Try implementing ctz (x) in terms of clz (x). */
3261 if (unoptab == ctz_optab)
3262 {
3263 temp = expand_ctz (mode, op0, target);
3264 if (temp)
3265 return temp;
3266 }
3267
3268 try_libcall:
3269 /* Now try a library call in this mode. */
3270 libfunc = optab_libfunc (unoptab, mode);
3271 if (libfunc)
3272 {
3273 rtx insns;
3274 rtx value;
3275 rtx eq_value;
3276 enum machine_mode outmode = mode;
3277
3278 /* All of these functions return small values. Thus we choose to
3279 have them return something that isn't a double-word. */
3280 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3281 || unoptab == popcount_optab || unoptab == parity_optab)
3282 outmode
3283 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3284 optab_libfunc (unoptab, mode)));
3285
3286 start_sequence ();
3287
3288 /* Pass 1 for NO_QUEUE so we don't lose any increments
3289 if the libcall is cse'd or moved. */
3290 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3291 1, op0, mode);
3292 insns = get_insns ();
3293 end_sequence ();
3294
3295 target = gen_reg_rtx (outmode);
3296 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3297 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3298 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3299 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3300 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3301 emit_libcall_block (insns, target, value, eq_value);
3302
3303 return target;
3304 }
3305
3306 /* It can't be done in this mode. Can we do it in a wider mode? */
3307
3308 if (CLASS_HAS_WIDER_MODES_P (mclass))
3309 {
3310 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3311 wider_mode != VOIDmode;
3312 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3313 {
3314 if ((optab_handler (unoptab, wider_mode)->insn_code
3315 != CODE_FOR_nothing)
3316 || optab_libfunc (unoptab, wider_mode))
3317 {
3318 rtx xop0 = op0;
3319 rtx last = get_last_insn ();
3320
3321 /* For certain operations, we need not actually extend
3322 the narrow operand, as long as we will truncate the
3323 results to the same narrowness. */
3324
3325 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3326 (unoptab == neg_optab
3327 || unoptab == one_cmpl_optab)
3328 && mclass == MODE_INT);
3329
3330 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3331 unsignedp);
3332
3333 /* If we are generating clz using wider mode, adjust the
3334 result. */
3335 if (unoptab == clz_optab && temp != 0)
3336 temp = expand_binop (wider_mode, sub_optab, temp,
3337 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3338 - GET_MODE_BITSIZE (mode)),
3339 target, true, OPTAB_DIRECT);
3340
3341 if (temp)
3342 {
3343 if (mclass != MODE_INT)
3344 {
3345 if (target == 0)
3346 target = gen_reg_rtx (mode);
3347 convert_move (target, temp, 0);
3348 return target;
3349 }
3350 else
3351 return gen_lowpart (mode, temp);
3352 }
3353 else
3354 delete_insns_since (last);
3355 }
3356 }
3357 }
3358
3359 /* One final attempt at implementing negation via subtraction,
3360 this time allowing widening of the operand. */
3361 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3362 {
3363 rtx temp;
3364 temp = expand_binop (mode,
3365 unoptab == negv_optab ? subv_optab : sub_optab,
3366 CONST0_RTX (mode), op0,
3367 target, unsignedp, OPTAB_LIB_WIDEN);
3368 if (temp)
3369 return temp;
3370 }
3371
3372 return 0;
3373 }
3374 \f
3375 /* Emit code to compute the absolute value of OP0, with result to
3376 TARGET if convenient. (TARGET may be 0.) The return value says
3377 where the result actually is to be found.
3378
3379 MODE is the mode of the operand; the mode of the result is
3380 different but can be deduced from MODE.
3381
3382 */
3383
3384 rtx
3385 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3386 int result_unsignedp)
3387 {
3388 rtx temp;
3389
3390 if (! flag_trapv)
3391 result_unsignedp = 1;
3392
3393 /* First try to do it with a special abs instruction. */
3394 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3395 op0, target, 0);
3396 if (temp != 0)
3397 return temp;
3398
3399 /* For floating point modes, try clearing the sign bit. */
3400 if (SCALAR_FLOAT_MODE_P (mode))
3401 {
3402 temp = expand_absneg_bit (ABS, mode, op0, target);
3403 if (temp)
3404 return temp;
3405 }
3406
3407 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3408 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3409 && !HONOR_SIGNED_ZEROS (mode))
3410 {
3411 rtx last = get_last_insn ();
3412
3413 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3414 if (temp != 0)
3415 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3416 OPTAB_WIDEN);
3417
3418 if (temp != 0)
3419 return temp;
3420
3421 delete_insns_since (last);
3422 }
3423
3424 /* If this machine has expensive jumps, we can do integer absolute
3425 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3426 where W is the width of MODE. */
3427
3428 if (GET_MODE_CLASS (mode) == MODE_INT
3429 && BRANCH_COST (optimize_insn_for_speed_p (),
3430 false) >= 2)
3431 {
3432 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3433 size_int (GET_MODE_BITSIZE (mode) - 1),
3434 NULL_RTX, 0);
3435
3436 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3437 OPTAB_LIB_WIDEN);
3438 if (temp != 0)
3439 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3440 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3441
3442 if (temp != 0)
3443 return temp;
3444 }
3445
3446 return NULL_RTX;
3447 }
3448
3449 rtx
3450 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3451 int result_unsignedp, int safe)
3452 {
3453 rtx temp, op1;
3454
3455 if (! flag_trapv)
3456 result_unsignedp = 1;
3457
3458 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3459 if (temp != 0)
3460 return temp;
3461
3462 /* If that does not win, use conditional jump and negate. */
3463
3464 /* It is safe to use the target if it is the same
3465 as the source if this is also a pseudo register */
3466 if (op0 == target && REG_P (op0)
3467 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3468 safe = 1;
3469
3470 op1 = gen_label_rtx ();
3471 if (target == 0 || ! safe
3472 || GET_MODE (target) != mode
3473 || (MEM_P (target) && MEM_VOLATILE_P (target))
3474 || (REG_P (target)
3475 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3476 target = gen_reg_rtx (mode);
3477
3478 emit_move_insn (target, op0);
3479 NO_DEFER_POP;
3480
3481 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3482 NULL_RTX, NULL_RTX, op1, -1);
3483
3484 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3485 target, target, 0);
3486 if (op0 != target)
3487 emit_move_insn (target, op0);
3488 emit_label (op1);
3489 OK_DEFER_POP;
3490 return target;
3491 }
3492
3493 /* Emit code to compute the one's complement absolute value of OP0
3494 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3495 (TARGET may be NULL_RTX.) The return value says where the result
3496 actually is to be found.
3497
3498 MODE is the mode of the operand; the mode of the result is
3499 different but can be deduced from MODE. */
3500
3501 rtx
3502 expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target)
3503 {
3504 rtx temp;
3505
3506 /* Not applicable for floating point modes. */
3507 if (FLOAT_MODE_P (mode))
3508 return NULL_RTX;
3509
3510 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3511 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing)
3512 {
3513 rtx last = get_last_insn ();
3514
3515 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3516 if (temp != 0)
3517 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3518 OPTAB_WIDEN);
3519
3520 if (temp != 0)
3521 return temp;
3522
3523 delete_insns_since (last);
3524 }
3525
3526 /* If this machine has expensive jumps, we can do one's complement
3527 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3528
3529 if (GET_MODE_CLASS (mode) == MODE_INT
3530 && BRANCH_COST (optimize_insn_for_speed_p (),
3531 false) >= 2)
3532 {
3533 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3534 size_int (GET_MODE_BITSIZE (mode) - 1),
3535 NULL_RTX, 0);
3536
3537 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3538 OPTAB_LIB_WIDEN);
3539
3540 if (temp != 0)
3541 return temp;
3542 }
3543
3544 return NULL_RTX;
3545 }
3546
3547 /* A subroutine of expand_copysign, perform the copysign operation using the
3548 abs and neg primitives advertised to exist on the target. The assumption
3549 is that we have a split register file, and leaving op0 in fp registers,
3550 and not playing with subregs so much, will help the register allocator. */
3551
3552 static rtx
3553 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3554 int bitpos, bool op0_is_abs)
3555 {
3556 enum machine_mode imode;
3557 int icode;
3558 rtx sign, label;
3559
3560 if (target == op1)
3561 target = NULL_RTX;
3562
3563 /* Check if the back end provides an insn that handles signbit for the
3564 argument's mode. */
3565 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3566 if (icode != CODE_FOR_nothing)
3567 {
3568 imode = insn_data[icode].operand[0].mode;
3569 sign = gen_reg_rtx (imode);
3570 emit_unop_insn (icode, sign, op1, UNKNOWN);
3571 }
3572 else
3573 {
3574 double_int mask;
3575
3576 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3577 {
3578 imode = int_mode_for_mode (mode);
3579 if (imode == BLKmode)
3580 return NULL_RTX;
3581 op1 = gen_lowpart (imode, op1);
3582 }
3583 else
3584 {
3585 int word;
3586
3587 imode = word_mode;
3588 if (FLOAT_WORDS_BIG_ENDIAN)
3589 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3590 else
3591 word = bitpos / BITS_PER_WORD;
3592 bitpos = bitpos % BITS_PER_WORD;
3593 op1 = operand_subword_force (op1, word, mode);
3594 }
3595
3596 mask = double_int_setbit (double_int_zero, bitpos);
3597
3598 sign = expand_binop (imode, and_optab, op1,
3599 immed_double_int_const (mask, imode),
3600 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3601 }
3602
3603 if (!op0_is_abs)
3604 {
3605 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3606 if (op0 == NULL)
3607 return NULL_RTX;
3608 target = op0;
3609 }
3610 else
3611 {
3612 if (target == NULL_RTX)
3613 target = copy_to_reg (op0);
3614 else
3615 emit_move_insn (target, op0);
3616 }
3617
3618 label = gen_label_rtx ();
3619 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3620
3621 if (GET_CODE (op0) == CONST_DOUBLE)
3622 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3623 else
3624 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3625 if (op0 != target)
3626 emit_move_insn (target, op0);
3627
3628 emit_label (label);
3629
3630 return target;
3631 }
3632
3633
3634 /* A subroutine of expand_copysign, perform the entire copysign operation
3635 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3636 is true if op0 is known to have its sign bit clear. */
3637
3638 static rtx
3639 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3640 int bitpos, bool op0_is_abs)
3641 {
3642 enum machine_mode imode;
3643 double_int mask;
3644 int word, nwords, i;
3645 rtx temp, insns;
3646
3647 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3648 {
3649 imode = int_mode_for_mode (mode);
3650 if (imode == BLKmode)
3651 return NULL_RTX;
3652 word = 0;
3653 nwords = 1;
3654 }
3655 else
3656 {
3657 imode = word_mode;
3658
3659 if (FLOAT_WORDS_BIG_ENDIAN)
3660 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3661 else
3662 word = bitpos / BITS_PER_WORD;
3663 bitpos = bitpos % BITS_PER_WORD;
3664 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3665 }
3666
3667 mask = double_int_setbit (double_int_zero, bitpos);
3668
3669 if (target == 0 || target == op0 || target == op1)
3670 target = gen_reg_rtx (mode);
3671
3672 if (nwords > 1)
3673 {
3674 start_sequence ();
3675
3676 for (i = 0; i < nwords; ++i)
3677 {
3678 rtx targ_piece = operand_subword (target, i, 1, mode);
3679 rtx op0_piece = operand_subword_force (op0, i, mode);
3680
3681 if (i == word)
3682 {
3683 if (!op0_is_abs)
3684 op0_piece
3685 = expand_binop (imode, and_optab, op0_piece,
3686 immed_double_int_const (double_int_not (mask),
3687 imode),
3688 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3689
3690 op1 = expand_binop (imode, and_optab,
3691 operand_subword_force (op1, i, mode),
3692 immed_double_int_const (mask, imode),
3693 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3694
3695 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3696 targ_piece, 1, OPTAB_LIB_WIDEN);
3697 if (temp != targ_piece)
3698 emit_move_insn (targ_piece, temp);
3699 }
3700 else
3701 emit_move_insn (targ_piece, op0_piece);
3702 }
3703
3704 insns = get_insns ();
3705 end_sequence ();
3706
3707 emit_insn (insns);
3708 }
3709 else
3710 {
3711 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3712 immed_double_int_const (mask, imode),
3713 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3714
3715 op0 = gen_lowpart (imode, op0);
3716 if (!op0_is_abs)
3717 op0 = expand_binop (imode, and_optab, op0,
3718 immed_double_int_const (double_int_not (mask),
3719 imode),
3720 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3721
3722 temp = expand_binop (imode, ior_optab, op0, op1,
3723 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3724 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3725 }
3726
3727 return target;
3728 }
3729
3730 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3731 scalar floating point mode. Return NULL if we do not know how to
3732 expand the operation inline. */
3733
3734 rtx
3735 expand_copysign (rtx op0, rtx op1, rtx target)
3736 {
3737 enum machine_mode mode = GET_MODE (op0);
3738 const struct real_format *fmt;
3739 bool op0_is_abs;
3740 rtx temp;
3741
3742 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3743 gcc_assert (GET_MODE (op1) == mode);
3744
3745 /* First try to do it with a special instruction. */
3746 temp = expand_binop (mode, copysign_optab, op0, op1,
3747 target, 0, OPTAB_DIRECT);
3748 if (temp)
3749 return temp;
3750
3751 fmt = REAL_MODE_FORMAT (mode);
3752 if (fmt == NULL || !fmt->has_signed_zero)
3753 return NULL_RTX;
3754
3755 op0_is_abs = false;
3756 if (GET_CODE (op0) == CONST_DOUBLE)
3757 {
3758 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3759 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3760 op0_is_abs = true;
3761 }
3762
3763 if (fmt->signbit_ro >= 0
3764 && (GET_CODE (op0) == CONST_DOUBLE
3765 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3766 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3767 {
3768 temp = expand_copysign_absneg (mode, op0, op1, target,
3769 fmt->signbit_ro, op0_is_abs);
3770 if (temp)
3771 return temp;
3772 }
3773
3774 if (fmt->signbit_rw < 0)
3775 return NULL_RTX;
3776 return expand_copysign_bit (mode, op0, op1, target,
3777 fmt->signbit_rw, op0_is_abs);
3778 }
3779 \f
3780 /* Generate an instruction whose insn-code is INSN_CODE,
3781 with two operands: an output TARGET and an input OP0.
3782 TARGET *must* be nonzero, and the output is always stored there.
3783 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3784 the value that is stored into TARGET.
3785
3786 Return false if expansion failed. */
3787
3788 bool
3789 maybe_emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3790 {
3791 rtx temp;
3792 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3793 rtx pat;
3794 rtx last = get_last_insn ();
3795
3796 temp = target;
3797
3798 /* Now, if insn does not accept our operands, put them into pseudos. */
3799
3800 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3801 op0 = copy_to_mode_reg (mode0, op0);
3802
3803 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3804 temp = gen_reg_rtx (GET_MODE (temp));
3805
3806 pat = GEN_FCN (icode) (temp, op0);
3807 if (!pat)
3808 {
3809 delete_insns_since (last);
3810 return false;
3811 }
3812
3813 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3814 add_equal_note (pat, temp, code, op0, NULL_RTX);
3815
3816 emit_insn (pat);
3817
3818 if (temp != target)
3819 emit_move_insn (target, temp);
3820 return true;
3821 }
3822 /* Generate an instruction whose insn-code is INSN_CODE,
3823 with two operands: an output TARGET and an input OP0.
3824 TARGET *must* be nonzero, and the output is always stored there.
3825 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3826 the value that is stored into TARGET. */
3827
3828 void
3829 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3830 {
3831 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3832 gcc_assert (ok);
3833 }
3834 \f
3835 struct no_conflict_data
3836 {
3837 rtx target, first, insn;
3838 bool must_stay;
3839 };
3840
3841 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3842 the currently examined clobber / store has to stay in the list of
3843 insns that constitute the actual libcall block. */
3844 static void
3845 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3846 {
3847 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3848
3849 /* If this inns directly contributes to setting the target, it must stay. */
3850 if (reg_overlap_mentioned_p (p->target, dest))
3851 p->must_stay = true;
3852 /* If we haven't committed to keeping any other insns in the list yet,
3853 there is nothing more to check. */
3854 else if (p->insn == p->first)
3855 return;
3856 /* If this insn sets / clobbers a register that feeds one of the insns
3857 already in the list, this insn has to stay too. */
3858 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3859 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3860 || reg_used_between_p (dest, p->first, p->insn)
3861 /* Likewise if this insn depends on a register set by a previous
3862 insn in the list, or if it sets a result (presumably a hard
3863 register) that is set or clobbered by a previous insn.
3864 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3865 SET_DEST perform the former check on the address, and the latter
3866 check on the MEM. */
3867 || (GET_CODE (set) == SET
3868 && (modified_in_p (SET_SRC (set), p->first)
3869 || modified_in_p (SET_DEST (set), p->first)
3870 || modified_between_p (SET_SRC (set), p->first, p->insn)
3871 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3872 p->must_stay = true;
3873 }
3874
3875 \f
3876 /* Emit code to make a call to a constant function or a library call.
3877
3878 INSNS is a list containing all insns emitted in the call.
3879 These insns leave the result in RESULT. Our block is to copy RESULT
3880 to TARGET, which is logically equivalent to EQUIV.
3881
3882 We first emit any insns that set a pseudo on the assumption that these are
3883 loading constants into registers; doing so allows them to be safely cse'ed
3884 between blocks. Then we emit all the other insns in the block, followed by
3885 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3886 note with an operand of EQUIV. */
3887
3888 void
3889 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3890 {
3891 rtx final_dest = target;
3892 rtx next, last, insn;
3893
3894 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3895 into a MEM later. Protect the libcall block from this change. */
3896 if (! REG_P (target) || REG_USERVAR_P (target))
3897 target = gen_reg_rtx (GET_MODE (target));
3898
3899 /* If we're using non-call exceptions, a libcall corresponding to an
3900 operation that may trap may also trap. */
3901 /* ??? See the comment in front of make_reg_eh_region_note. */
3902 if (cfun->can_throw_non_call_exceptions && may_trap_p (equiv))
3903 {
3904 for (insn = insns; insn; insn = NEXT_INSN (insn))
3905 if (CALL_P (insn))
3906 {
3907 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3908 if (note)
3909 {
3910 int lp_nr = INTVAL (XEXP (note, 0));
3911 if (lp_nr == 0 || lp_nr == INT_MIN)
3912 remove_note (insn, note);
3913 }
3914 }
3915 }
3916 else
3917 {
3918 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3919 reg note to indicate that this call cannot throw or execute a nonlocal
3920 goto (unless there is already a REG_EH_REGION note, in which case
3921 we update it). */
3922 for (insn = insns; insn; insn = NEXT_INSN (insn))
3923 if (CALL_P (insn))
3924 make_reg_eh_region_note_nothrow_nononlocal (insn);
3925 }
3926
3927 /* First emit all insns that set pseudos. Remove them from the list as
3928 we go. Avoid insns that set pseudos which were referenced in previous
3929 insns. These can be generated by move_by_pieces, for example,
3930 to update an address. Similarly, avoid insns that reference things
3931 set in previous insns. */
3932
3933 for (insn = insns; insn; insn = next)
3934 {
3935 rtx set = single_set (insn);
3936
3937 next = NEXT_INSN (insn);
3938
3939 if (set != 0 && REG_P (SET_DEST (set))
3940 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3941 {
3942 struct no_conflict_data data;
3943
3944 data.target = const0_rtx;
3945 data.first = insns;
3946 data.insn = insn;
3947 data.must_stay = 0;
3948 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3949 if (! data.must_stay)
3950 {
3951 if (PREV_INSN (insn))
3952 NEXT_INSN (PREV_INSN (insn)) = next;
3953 else
3954 insns = next;
3955
3956 if (next)
3957 PREV_INSN (next) = PREV_INSN (insn);
3958
3959 add_insn (insn);
3960 }
3961 }
3962
3963 /* Some ports use a loop to copy large arguments onto the stack.
3964 Don't move anything outside such a loop. */
3965 if (LABEL_P (insn))
3966 break;
3967 }
3968
3969 /* Write the remaining insns followed by the final copy. */
3970 for (insn = insns; insn; insn = next)
3971 {
3972 next = NEXT_INSN (insn);
3973
3974 add_insn (insn);
3975 }
3976
3977 last = emit_move_insn (target, result);
3978 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3979 != CODE_FOR_nothing)
3980 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3981
3982 if (final_dest != target)
3983 emit_move_insn (final_dest, target);
3984 }
3985 \f
3986 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3987 PURPOSE describes how this comparison will be used. CODE is the rtx
3988 comparison code we will be using.
3989
3990 ??? Actually, CODE is slightly weaker than that. A target is still
3991 required to implement all of the normal bcc operations, but not
3992 required to implement all (or any) of the unordered bcc operations. */
3993
3994 int
3995 can_compare_p (enum rtx_code code, enum machine_mode mode,
3996 enum can_compare_purpose purpose)
3997 {
3998 rtx test;
3999 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
4000 do
4001 {
4002 int icode;
4003
4004 if (purpose == ccp_jump
4005 && (icode = optab_handler (cbranch_optab, mode)->insn_code) != CODE_FOR_nothing
4006 && insn_data[icode].operand[0].predicate (test, mode))
4007 return 1;
4008 if (purpose == ccp_store_flag
4009 && (icode = optab_handler (cstore_optab, mode)->insn_code) != CODE_FOR_nothing
4010 && insn_data[icode].operand[1].predicate (test, mode))
4011 return 1;
4012 if (purpose == ccp_cmov
4013 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
4014 return 1;
4015
4016 mode = GET_MODE_WIDER_MODE (mode);
4017 PUT_MODE (test, mode);
4018 }
4019 while (mode != VOIDmode);
4020
4021 return 0;
4022 }
4023
4024 /* This function is called when we are going to emit a compare instruction that
4025 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4026
4027 *PMODE is the mode of the inputs (in case they are const_int).
4028 *PUNSIGNEDP nonzero says that the operands are unsigned;
4029 this matters if they need to be widened (as given by METHODS).
4030
4031 If they have mode BLKmode, then SIZE specifies the size of both operands.
4032
4033 This function performs all the setup necessary so that the caller only has
4034 to emit a single comparison insn. This setup can involve doing a BLKmode
4035 comparison or emitting a library call to perform the comparison if no insn
4036 is available to handle it.
4037 The values which are passed in through pointers can be modified; the caller
4038 should perform the comparison on the modified values. Constant
4039 comparisons must have already been folded. */
4040
4041 static void
4042 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4043 int unsignedp, enum optab_methods methods,
4044 rtx *ptest, enum machine_mode *pmode)
4045 {
4046 enum machine_mode mode = *pmode;
4047 rtx libfunc, test;
4048 enum machine_mode cmp_mode;
4049 enum mode_class mclass;
4050
4051 /* The other methods are not needed. */
4052 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
4053 || methods == OPTAB_LIB_WIDEN);
4054
4055 /* If we are optimizing, force expensive constants into a register. */
4056 if (CONSTANT_P (x) && optimize
4057 && (rtx_cost (x, COMPARE, optimize_insn_for_speed_p ())
4058 > COSTS_N_INSNS (1)))
4059 x = force_reg (mode, x);
4060
4061 if (CONSTANT_P (y) && optimize
4062 && (rtx_cost (y, COMPARE, optimize_insn_for_speed_p ())
4063 > COSTS_N_INSNS (1)))
4064 y = force_reg (mode, y);
4065
4066 #ifdef HAVE_cc0
4067 /* Make sure if we have a canonical comparison. The RTL
4068 documentation states that canonical comparisons are required only
4069 for targets which have cc0. */
4070 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4071 #endif
4072
4073 /* Don't let both operands fail to indicate the mode. */
4074 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4075 x = force_reg (mode, x);
4076 if (mode == VOIDmode)
4077 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
4078
4079 /* Handle all BLKmode compares. */
4080
4081 if (mode == BLKmode)
4082 {
4083 enum machine_mode result_mode;
4084 enum insn_code cmp_code;
4085 tree length_type;
4086 rtx libfunc;
4087 rtx result;
4088 rtx opalign
4089 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4090
4091 gcc_assert (size);
4092
4093 /* Try to use a memory block compare insn - either cmpstr
4094 or cmpmem will do. */
4095 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4096 cmp_mode != VOIDmode;
4097 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4098 {
4099 cmp_code = cmpmem_optab[cmp_mode];
4100 if (cmp_code == CODE_FOR_nothing)
4101 cmp_code = cmpstr_optab[cmp_mode];
4102 if (cmp_code == CODE_FOR_nothing)
4103 cmp_code = cmpstrn_optab[cmp_mode];
4104 if (cmp_code == CODE_FOR_nothing)
4105 continue;
4106
4107 /* Must make sure the size fits the insn's mode. */
4108 if ((CONST_INT_P (size)
4109 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4110 || (GET_MODE_BITSIZE (GET_MODE (size))
4111 > GET_MODE_BITSIZE (cmp_mode)))
4112 continue;
4113
4114 result_mode = insn_data[cmp_code].operand[0].mode;
4115 result = gen_reg_rtx (result_mode);
4116 size = convert_to_mode (cmp_mode, size, 1);
4117 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4118
4119 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4120 *pmode = result_mode;
4121 return;
4122 }
4123
4124 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4125 goto fail;
4126
4127 /* Otherwise call a library function, memcmp. */
4128 libfunc = memcmp_libfunc;
4129 length_type = sizetype;
4130 result_mode = TYPE_MODE (integer_type_node);
4131 cmp_mode = TYPE_MODE (length_type);
4132 size = convert_to_mode (TYPE_MODE (length_type), size,
4133 TYPE_UNSIGNED (length_type));
4134
4135 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4136 result_mode, 3,
4137 XEXP (x, 0), Pmode,
4138 XEXP (y, 0), Pmode,
4139 size, cmp_mode);
4140
4141 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4142 *pmode = result_mode;
4143 return;
4144 }
4145
4146 /* Don't allow operands to the compare to trap, as that can put the
4147 compare and branch in different basic blocks. */
4148 if (cfun->can_throw_non_call_exceptions)
4149 {
4150 if (may_trap_p (x))
4151 x = force_reg (mode, x);
4152 if (may_trap_p (y))
4153 y = force_reg (mode, y);
4154 }
4155
4156 if (GET_MODE_CLASS (mode) == MODE_CC)
4157 {
4158 gcc_assert (can_compare_p (comparison, CCmode, ccp_jump));
4159 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4160 return;
4161 }
4162
4163 mclass = GET_MODE_CLASS (mode);
4164 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4165 cmp_mode = mode;
4166 do
4167 {
4168 enum insn_code icode;
4169 icode = optab_handler (cbranch_optab, cmp_mode)->insn_code;
4170 if (icode != CODE_FOR_nothing
4171 && insn_data[icode].operand[0].predicate (test, VOIDmode))
4172 {
4173 rtx last = get_last_insn ();
4174 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4175 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4176 if (op0 && op1
4177 && insn_data[icode].operand[1].predicate
4178 (op0, insn_data[icode].operand[1].mode)
4179 && insn_data[icode].operand[2].predicate
4180 (op1, insn_data[icode].operand[2].mode))
4181 {
4182 XEXP (test, 0) = op0;
4183 XEXP (test, 1) = op1;
4184 *ptest = test;
4185 *pmode = cmp_mode;
4186 return;
4187 }
4188 delete_insns_since (last);
4189 }
4190
4191 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4192 break;
4193 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
4194 }
4195 while (cmp_mode != VOIDmode);
4196
4197 if (methods != OPTAB_LIB_WIDEN)
4198 goto fail;
4199
4200 if (!SCALAR_FLOAT_MODE_P (mode))
4201 {
4202 rtx result;
4203
4204 /* Handle a libcall just for the mode we are using. */
4205 libfunc = optab_libfunc (cmp_optab, mode);
4206 gcc_assert (libfunc);
4207
4208 /* If we want unsigned, and this mode has a distinct unsigned
4209 comparison routine, use that. */
4210 if (unsignedp)
4211 {
4212 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4213 if (ulibfunc)
4214 libfunc = ulibfunc;
4215 }
4216
4217 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4218 targetm.libgcc_cmp_return_mode (),
4219 2, x, mode, y, mode);
4220
4221 /* There are two kinds of comparison routines. Biased routines
4222 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4223 of gcc expect that the comparison operation is equivalent
4224 to the modified comparison. For signed comparisons compare the
4225 result against 1 in the biased case, and zero in the unbiased
4226 case. For unsigned comparisons always compare against 1 after
4227 biasing the unbiased result by adding 1. This gives us a way to
4228 represent LTU. */
4229 x = result;
4230 y = const1_rtx;
4231
4232 if (!TARGET_LIB_INT_CMP_BIASED)
4233 {
4234 if (unsignedp)
4235 x = plus_constant (result, 1);
4236 else
4237 y = const0_rtx;
4238 }
4239
4240 *pmode = word_mode;
4241 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4242 ptest, pmode);
4243 }
4244 else
4245 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4246
4247 return;
4248
4249 fail:
4250 *ptest = NULL_RTX;
4251 }
4252
4253 /* Before emitting an insn with code ICODE, make sure that X, which is going
4254 to be used for operand OPNUM of the insn, is converted from mode MODE to
4255 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4256 that it is accepted by the operand predicate. Return the new value. */
4257
4258 rtx
4259 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4260 enum machine_mode wider_mode, int unsignedp)
4261 {
4262 if (mode != wider_mode)
4263 x = convert_modes (wider_mode, mode, x, unsignedp);
4264
4265 if (!insn_data[icode].operand[opnum].predicate
4266 (x, insn_data[icode].operand[opnum].mode))
4267 {
4268 if (reload_completed)
4269 return NULL_RTX;
4270 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4271 }
4272
4273 return x;
4274 }
4275
4276 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4277 we can do the branch. */
4278
4279 static void
4280 emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label)
4281 {
4282 enum machine_mode optab_mode;
4283 enum mode_class mclass;
4284 enum insn_code icode;
4285
4286 mclass = GET_MODE_CLASS (mode);
4287 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4288 icode = optab_handler (cbranch_optab, optab_mode)->insn_code;
4289
4290 gcc_assert (icode != CODE_FOR_nothing);
4291 gcc_assert (insn_data[icode].operand[0].predicate (test, VOIDmode));
4292 emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), XEXP (test, 1), label));
4293 }
4294
4295 /* Generate code to compare X with Y so that the condition codes are
4296 set and to jump to LABEL if the condition is true. If X is a
4297 constant and Y is not a constant, then the comparison is swapped to
4298 ensure that the comparison RTL has the canonical form.
4299
4300 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4301 need to be widened. UNSIGNEDP is also used to select the proper
4302 branch condition code.
4303
4304 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4305
4306 MODE is the mode of the inputs (in case they are const_int).
4307
4308 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4309 It will be potentially converted into an unsigned variant based on
4310 UNSIGNEDP to select a proper jump instruction. */
4311
4312 void
4313 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4314 enum machine_mode mode, int unsignedp, rtx label)
4315 {
4316 rtx op0 = x, op1 = y;
4317 rtx test;
4318
4319 /* Swap operands and condition to ensure canonical RTL. */
4320 if (swap_commutative_operands_p (x, y)
4321 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4322 {
4323 op0 = y, op1 = x;
4324 comparison = swap_condition (comparison);
4325 }
4326
4327 /* If OP0 is still a constant, then both X and Y must be constants
4328 or the opposite comparison is not supported. Force X into a register
4329 to create canonical RTL. */
4330 if (CONSTANT_P (op0))
4331 op0 = force_reg (mode, op0);
4332
4333 if (unsignedp)
4334 comparison = unsigned_condition (comparison);
4335
4336 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4337 &test, &mode);
4338 emit_cmp_and_jump_insn_1 (test, mode, label);
4339 }
4340
4341 \f
4342 /* Emit a library call comparison between floating point X and Y.
4343 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4344
4345 static void
4346 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4347 rtx *ptest, enum machine_mode *pmode)
4348 {
4349 enum rtx_code swapped = swap_condition (comparison);
4350 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4351 enum machine_mode orig_mode = GET_MODE (x);
4352 enum machine_mode mode, cmp_mode;
4353 rtx true_rtx, false_rtx;
4354 rtx value, target, insns, equiv;
4355 rtx libfunc = 0;
4356 bool reversed_p = false;
4357 cmp_mode = targetm.libgcc_cmp_return_mode ();
4358
4359 for (mode = orig_mode;
4360 mode != VOIDmode;
4361 mode = GET_MODE_WIDER_MODE (mode))
4362 {
4363 if (code_to_optab[comparison]
4364 && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4365 break;
4366
4367 if (code_to_optab[swapped]
4368 && (libfunc = optab_libfunc (code_to_optab[swapped], mode)))
4369 {
4370 rtx tmp;
4371 tmp = x; x = y; y = tmp;
4372 comparison = swapped;
4373 break;
4374 }
4375
4376 if (code_to_optab[reversed]
4377 && (libfunc = optab_libfunc (code_to_optab[reversed], mode)))
4378 {
4379 comparison = reversed;
4380 reversed_p = true;
4381 break;
4382 }
4383 }
4384
4385 gcc_assert (mode != VOIDmode);
4386
4387 if (mode != orig_mode)
4388 {
4389 x = convert_to_mode (mode, x, 0);
4390 y = convert_to_mode (mode, y, 0);
4391 }
4392
4393 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4394 the RTL. The allows the RTL optimizers to delete the libcall if the
4395 condition can be determined at compile-time. */
4396 if (comparison == UNORDERED
4397 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4398 {
4399 true_rtx = const_true_rtx;
4400 false_rtx = const0_rtx;
4401 }
4402 else
4403 {
4404 switch (comparison)
4405 {
4406 case EQ:
4407 true_rtx = const0_rtx;
4408 false_rtx = const_true_rtx;
4409 break;
4410
4411 case NE:
4412 true_rtx = const_true_rtx;
4413 false_rtx = const0_rtx;
4414 break;
4415
4416 case GT:
4417 true_rtx = const1_rtx;
4418 false_rtx = const0_rtx;
4419 break;
4420
4421 case GE:
4422 true_rtx = const0_rtx;
4423 false_rtx = constm1_rtx;
4424 break;
4425
4426 case LT:
4427 true_rtx = constm1_rtx;
4428 false_rtx = const0_rtx;
4429 break;
4430
4431 case LE:
4432 true_rtx = const0_rtx;
4433 false_rtx = const1_rtx;
4434 break;
4435
4436 default:
4437 gcc_unreachable ();
4438 }
4439 }
4440
4441 if (comparison == UNORDERED)
4442 {
4443 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4444 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4445 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4446 temp, const_true_rtx, equiv);
4447 }
4448 else
4449 {
4450 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4451 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4452 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4453 equiv, true_rtx, false_rtx);
4454 }
4455
4456 start_sequence ();
4457 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4458 cmp_mode, 2, x, mode, y, mode);
4459 insns = get_insns ();
4460 end_sequence ();
4461
4462 target = gen_reg_rtx (cmp_mode);
4463 emit_libcall_block (insns, target, value, equiv);
4464
4465 if (comparison == UNORDERED
4466 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4467 || reversed_p)
4468 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4469 else
4470 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4471
4472 *pmode = cmp_mode;
4473 }
4474 \f
4475 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4476
4477 void
4478 emit_indirect_jump (rtx loc)
4479 {
4480 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4481 (loc, Pmode))
4482 loc = copy_to_mode_reg (Pmode, loc);
4483
4484 emit_jump_insn (gen_indirect_jump (loc));
4485 emit_barrier ();
4486 }
4487 \f
4488 #ifdef HAVE_conditional_move
4489
4490 /* Emit a conditional move instruction if the machine supports one for that
4491 condition and machine mode.
4492
4493 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4494 the mode to use should they be constants. If it is VOIDmode, they cannot
4495 both be constants.
4496
4497 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4498 should be stored there. MODE is the mode to use should they be constants.
4499 If it is VOIDmode, they cannot both be constants.
4500
4501 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4502 is not supported. */
4503
4504 rtx
4505 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4506 enum machine_mode cmode, rtx op2, rtx op3,
4507 enum machine_mode mode, int unsignedp)
4508 {
4509 rtx tem, subtarget, comparison, insn;
4510 enum insn_code icode;
4511 enum rtx_code reversed;
4512
4513 /* If one operand is constant, make it the second one. Only do this
4514 if the other operand is not constant as well. */
4515
4516 if (swap_commutative_operands_p (op0, op1))
4517 {
4518 tem = op0;
4519 op0 = op1;
4520 op1 = tem;
4521 code = swap_condition (code);
4522 }
4523
4524 /* get_condition will prefer to generate LT and GT even if the old
4525 comparison was against zero, so undo that canonicalization here since
4526 comparisons against zero are cheaper. */
4527 if (code == LT && op1 == const1_rtx)
4528 code = LE, op1 = const0_rtx;
4529 else if (code == GT && op1 == constm1_rtx)
4530 code = GE, op1 = const0_rtx;
4531
4532 if (cmode == VOIDmode)
4533 cmode = GET_MODE (op0);
4534
4535 if (swap_commutative_operands_p (op2, op3)
4536 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4537 != UNKNOWN))
4538 {
4539 tem = op2;
4540 op2 = op3;
4541 op3 = tem;
4542 code = reversed;
4543 }
4544
4545 if (mode == VOIDmode)
4546 mode = GET_MODE (op2);
4547
4548 icode = movcc_gen_code[mode];
4549
4550 if (icode == CODE_FOR_nothing)
4551 return 0;
4552
4553 if (!target)
4554 target = gen_reg_rtx (mode);
4555
4556 subtarget = target;
4557
4558 /* If the insn doesn't accept these operands, put them in pseudos. */
4559
4560 if (!insn_data[icode].operand[0].predicate
4561 (subtarget, insn_data[icode].operand[0].mode))
4562 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4563
4564 if (!insn_data[icode].operand[2].predicate
4565 (op2, insn_data[icode].operand[2].mode))
4566 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4567
4568 if (!insn_data[icode].operand[3].predicate
4569 (op3, insn_data[icode].operand[3].mode))
4570 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4571
4572 /* Everything should now be in the suitable form. */
4573
4574 code = unsignedp ? unsigned_condition (code) : code;
4575 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4576
4577 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4578 return NULL and let the caller figure out how best to deal with this
4579 situation. */
4580 if (!COMPARISON_P (comparison))
4581 return NULL_RTX;
4582
4583 do_pending_stack_adjust ();
4584 start_sequence ();
4585 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4586 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4587 &comparison, &cmode);
4588 if (!comparison)
4589 insn = NULL_RTX;
4590 else
4591 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4592
4593 /* If that failed, then give up. */
4594 if (insn == 0)
4595 {
4596 end_sequence ();
4597 return 0;
4598 }
4599
4600 emit_insn (insn);
4601 insn = get_insns ();
4602 end_sequence ();
4603 emit_insn (insn);
4604 if (subtarget != target)
4605 convert_move (target, subtarget, 0);
4606
4607 return target;
4608 }
4609
4610 /* Return nonzero if a conditional move of mode MODE is supported.
4611
4612 This function is for combine so it can tell whether an insn that looks
4613 like a conditional move is actually supported by the hardware. If we
4614 guess wrong we lose a bit on optimization, but that's it. */
4615 /* ??? sparc64 supports conditionally moving integers values based on fp
4616 comparisons, and vice versa. How do we handle them? */
4617
4618 int
4619 can_conditionally_move_p (enum machine_mode mode)
4620 {
4621 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4622 return 1;
4623
4624 return 0;
4625 }
4626
4627 #endif /* HAVE_conditional_move */
4628
4629 /* Emit a conditional addition instruction if the machine supports one for that
4630 condition and machine mode.
4631
4632 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4633 the mode to use should they be constants. If it is VOIDmode, they cannot
4634 both be constants.
4635
4636 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4637 should be stored there. MODE is the mode to use should they be constants.
4638 If it is VOIDmode, they cannot both be constants.
4639
4640 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4641 is not supported. */
4642
4643 rtx
4644 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4645 enum machine_mode cmode, rtx op2, rtx op3,
4646 enum machine_mode mode, int unsignedp)
4647 {
4648 rtx tem, subtarget, comparison, insn;
4649 enum insn_code icode;
4650 enum rtx_code reversed;
4651
4652 /* If one operand is constant, make it the second one. Only do this
4653 if the other operand is not constant as well. */
4654
4655 if (swap_commutative_operands_p (op0, op1))
4656 {
4657 tem = op0;
4658 op0 = op1;
4659 op1 = tem;
4660 code = swap_condition (code);
4661 }
4662
4663 /* get_condition will prefer to generate LT and GT even if the old
4664 comparison was against zero, so undo that canonicalization here since
4665 comparisons against zero are cheaper. */
4666 if (code == LT && op1 == const1_rtx)
4667 code = LE, op1 = const0_rtx;
4668 else if (code == GT && op1 == constm1_rtx)
4669 code = GE, op1 = const0_rtx;
4670
4671 if (cmode == VOIDmode)
4672 cmode = GET_MODE (op0);
4673
4674 if (swap_commutative_operands_p (op2, op3)
4675 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4676 != UNKNOWN))
4677 {
4678 tem = op2;
4679 op2 = op3;
4680 op3 = tem;
4681 code = reversed;
4682 }
4683
4684 if (mode == VOIDmode)
4685 mode = GET_MODE (op2);
4686
4687 icode = optab_handler (addcc_optab, mode)->insn_code;
4688
4689 if (icode == CODE_FOR_nothing)
4690 return 0;
4691
4692 if (!target)
4693 target = gen_reg_rtx (mode);
4694
4695 /* If the insn doesn't accept these operands, put them in pseudos. */
4696
4697 if (!insn_data[icode].operand[0].predicate
4698 (target, insn_data[icode].operand[0].mode))
4699 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4700 else
4701 subtarget = target;
4702
4703 if (!insn_data[icode].operand[2].predicate
4704 (op2, insn_data[icode].operand[2].mode))
4705 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4706
4707 if (!insn_data[icode].operand[3].predicate
4708 (op3, insn_data[icode].operand[3].mode))
4709 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4710
4711 /* Everything should now be in the suitable form. */
4712
4713 code = unsignedp ? unsigned_condition (code) : code;
4714 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4715
4716 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4717 return NULL and let the caller figure out how best to deal with this
4718 situation. */
4719 if (!COMPARISON_P (comparison))
4720 return NULL_RTX;
4721
4722 do_pending_stack_adjust ();
4723 start_sequence ();
4724 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4725 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4726 &comparison, &cmode);
4727 if (!comparison)
4728 insn = NULL_RTX;
4729 else
4730 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4731
4732 /* If that failed, then give up. */
4733 if (insn == 0)
4734 {
4735 end_sequence ();
4736 return 0;
4737 }
4738
4739 emit_insn (insn);
4740 insn = get_insns ();
4741 end_sequence ();
4742 emit_insn (insn);
4743 if (subtarget != target)
4744 convert_move (target, subtarget, 0);
4745
4746 return target;
4747 }
4748 \f
4749 /* These functions attempt to generate an insn body, rather than
4750 emitting the insn, but if the gen function already emits them, we
4751 make no attempt to turn them back into naked patterns. */
4752
4753 /* Generate and return an insn body to add Y to X. */
4754
4755 rtx
4756 gen_add2_insn (rtx x, rtx y)
4757 {
4758 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4759
4760 gcc_assert (insn_data[icode].operand[0].predicate
4761 (x, insn_data[icode].operand[0].mode));
4762 gcc_assert (insn_data[icode].operand[1].predicate
4763 (x, insn_data[icode].operand[1].mode));
4764 gcc_assert (insn_data[icode].operand[2].predicate
4765 (y, insn_data[icode].operand[2].mode));
4766
4767 return GEN_FCN (icode) (x, x, y);
4768 }
4769
4770 /* Generate and return an insn body to add r1 and c,
4771 storing the result in r0. */
4772
4773 rtx
4774 gen_add3_insn (rtx r0, rtx r1, rtx c)
4775 {
4776 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4777
4778 if (icode == CODE_FOR_nothing
4779 || !(insn_data[icode].operand[0].predicate
4780 (r0, insn_data[icode].operand[0].mode))
4781 || !(insn_data[icode].operand[1].predicate
4782 (r1, insn_data[icode].operand[1].mode))
4783 || !(insn_data[icode].operand[2].predicate
4784 (c, insn_data[icode].operand[2].mode)))
4785 return NULL_RTX;
4786
4787 return GEN_FCN (icode) (r0, r1, c);
4788 }
4789
4790 int
4791 have_add2_insn (rtx x, rtx y)
4792 {
4793 int icode;
4794
4795 gcc_assert (GET_MODE (x) != VOIDmode);
4796
4797 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4798
4799 if (icode == CODE_FOR_nothing)
4800 return 0;
4801
4802 if (!(insn_data[icode].operand[0].predicate
4803 (x, insn_data[icode].operand[0].mode))
4804 || !(insn_data[icode].operand[1].predicate
4805 (x, insn_data[icode].operand[1].mode))
4806 || !(insn_data[icode].operand[2].predicate
4807 (y, insn_data[icode].operand[2].mode)))
4808 return 0;
4809
4810 return 1;
4811 }
4812
4813 /* Generate and return an insn body to subtract Y from X. */
4814
4815 rtx
4816 gen_sub2_insn (rtx x, rtx y)
4817 {
4818 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4819
4820 gcc_assert (insn_data[icode].operand[0].predicate
4821 (x, insn_data[icode].operand[0].mode));
4822 gcc_assert (insn_data[icode].operand[1].predicate
4823 (x, insn_data[icode].operand[1].mode));
4824 gcc_assert (insn_data[icode].operand[2].predicate
4825 (y, insn_data[icode].operand[2].mode));
4826
4827 return GEN_FCN (icode) (x, x, y);
4828 }
4829
4830 /* Generate and return an insn body to subtract r1 and c,
4831 storing the result in r0. */
4832
4833 rtx
4834 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4835 {
4836 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4837
4838 if (icode == CODE_FOR_nothing
4839 || !(insn_data[icode].operand[0].predicate
4840 (r0, insn_data[icode].operand[0].mode))
4841 || !(insn_data[icode].operand[1].predicate
4842 (r1, insn_data[icode].operand[1].mode))
4843 || !(insn_data[icode].operand[2].predicate
4844 (c, insn_data[icode].operand[2].mode)))
4845 return NULL_RTX;
4846
4847 return GEN_FCN (icode) (r0, r1, c);
4848 }
4849
4850 int
4851 have_sub2_insn (rtx x, rtx y)
4852 {
4853 int icode;
4854
4855 gcc_assert (GET_MODE (x) != VOIDmode);
4856
4857 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4858
4859 if (icode == CODE_FOR_nothing)
4860 return 0;
4861
4862 if (!(insn_data[icode].operand[0].predicate
4863 (x, insn_data[icode].operand[0].mode))
4864 || !(insn_data[icode].operand[1].predicate
4865 (x, insn_data[icode].operand[1].mode))
4866 || !(insn_data[icode].operand[2].predicate
4867 (y, insn_data[icode].operand[2].mode)))
4868 return 0;
4869
4870 return 1;
4871 }
4872
4873 /* Generate the body of an instruction to copy Y into X.
4874 It may be a list of insns, if one insn isn't enough. */
4875
4876 rtx
4877 gen_move_insn (rtx x, rtx y)
4878 {
4879 rtx seq;
4880
4881 start_sequence ();
4882 emit_move_insn_1 (x, y);
4883 seq = get_insns ();
4884 end_sequence ();
4885 return seq;
4886 }
4887 \f
4888 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4889 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4890 no such operation exists, CODE_FOR_nothing will be returned. */
4891
4892 enum insn_code
4893 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4894 int unsignedp)
4895 {
4896 convert_optab tab;
4897 #ifdef HAVE_ptr_extend
4898 if (unsignedp < 0)
4899 return CODE_FOR_ptr_extend;
4900 #endif
4901
4902 tab = unsignedp ? zext_optab : sext_optab;
4903 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
4904 }
4905
4906 /* Generate the body of an insn to extend Y (with mode MFROM)
4907 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4908
4909 rtx
4910 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4911 enum machine_mode mfrom, int unsignedp)
4912 {
4913 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4914 return GEN_FCN (icode) (x, y);
4915 }
4916 \f
4917 /* can_fix_p and can_float_p say whether the target machine
4918 can directly convert a given fixed point type to
4919 a given floating point type, or vice versa.
4920 The returned value is the CODE_FOR_... value to use,
4921 or CODE_FOR_nothing if these modes cannot be directly converted.
4922
4923 *TRUNCP_PTR is set to 1 if it is necessary to output
4924 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4925
4926 static enum insn_code
4927 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4928 int unsignedp, int *truncp_ptr)
4929 {
4930 convert_optab tab;
4931 enum insn_code icode;
4932
4933 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4934 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4935 if (icode != CODE_FOR_nothing)
4936 {
4937 *truncp_ptr = 0;
4938 return icode;
4939 }
4940
4941 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4942 for this to work. We need to rework the fix* and ftrunc* patterns
4943 and documentation. */
4944 tab = unsignedp ? ufix_optab : sfix_optab;
4945 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4946 if (icode != CODE_FOR_nothing
4947 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
4948 {
4949 *truncp_ptr = 1;
4950 return icode;
4951 }
4952
4953 *truncp_ptr = 0;
4954 return CODE_FOR_nothing;
4955 }
4956
4957 static enum insn_code
4958 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4959 int unsignedp)
4960 {
4961 convert_optab tab;
4962
4963 tab = unsignedp ? ufloat_optab : sfloat_optab;
4964 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
4965 }
4966 \f
4967 /* Generate code to convert FROM to floating point
4968 and store in TO. FROM must be fixed point and not VOIDmode.
4969 UNSIGNEDP nonzero means regard FROM as unsigned.
4970 Normally this is done by correcting the final value
4971 if it is negative. */
4972
4973 void
4974 expand_float (rtx to, rtx from, int unsignedp)
4975 {
4976 enum insn_code icode;
4977 rtx target = to;
4978 enum machine_mode fmode, imode;
4979 bool can_do_signed = false;
4980
4981 /* Crash now, because we won't be able to decide which mode to use. */
4982 gcc_assert (GET_MODE (from) != VOIDmode);
4983
4984 /* Look for an insn to do the conversion. Do it in the specified
4985 modes if possible; otherwise convert either input, output or both to
4986 wider mode. If the integer mode is wider than the mode of FROM,
4987 we can do the conversion signed even if the input is unsigned. */
4988
4989 for (fmode = GET_MODE (to); fmode != VOIDmode;
4990 fmode = GET_MODE_WIDER_MODE (fmode))
4991 for (imode = GET_MODE (from); imode != VOIDmode;
4992 imode = GET_MODE_WIDER_MODE (imode))
4993 {
4994 int doing_unsigned = unsignedp;
4995
4996 if (fmode != GET_MODE (to)
4997 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4998 continue;
4999
5000 icode = can_float_p (fmode, imode, unsignedp);
5001 if (icode == CODE_FOR_nothing && unsignedp)
5002 {
5003 enum insn_code scode = can_float_p (fmode, imode, 0);
5004 if (scode != CODE_FOR_nothing)
5005 can_do_signed = true;
5006 if (imode != GET_MODE (from))
5007 icode = scode, doing_unsigned = 0;
5008 }
5009
5010 if (icode != CODE_FOR_nothing)
5011 {
5012 if (imode != GET_MODE (from))
5013 from = convert_to_mode (imode, from, unsignedp);
5014
5015 if (fmode != GET_MODE (to))
5016 target = gen_reg_rtx (fmode);
5017
5018 emit_unop_insn (icode, target, from,
5019 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5020
5021 if (target != to)
5022 convert_move (to, target, 0);
5023 return;
5024 }
5025 }
5026
5027 /* Unsigned integer, and no way to convert directly. Convert as signed,
5028 then unconditionally adjust the result. */
5029 if (unsignedp && can_do_signed)
5030 {
5031 rtx label = gen_label_rtx ();
5032 rtx temp;
5033 REAL_VALUE_TYPE offset;
5034
5035 /* Look for a usable floating mode FMODE wider than the source and at
5036 least as wide as the target. Using FMODE will avoid rounding woes
5037 with unsigned values greater than the signed maximum value. */
5038
5039 for (fmode = GET_MODE (to); fmode != VOIDmode;
5040 fmode = GET_MODE_WIDER_MODE (fmode))
5041 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5042 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5043 break;
5044
5045 if (fmode == VOIDmode)
5046 {
5047 /* There is no such mode. Pretend the target is wide enough. */
5048 fmode = GET_MODE (to);
5049
5050 /* Avoid double-rounding when TO is narrower than FROM. */
5051 if ((significand_size (fmode) + 1)
5052 < GET_MODE_BITSIZE (GET_MODE (from)))
5053 {
5054 rtx temp1;
5055 rtx neglabel = gen_label_rtx ();
5056
5057 /* Don't use TARGET if it isn't a register, is a hard register,
5058 or is the wrong mode. */
5059 if (!REG_P (target)
5060 || REGNO (target) < FIRST_PSEUDO_REGISTER
5061 || GET_MODE (target) != fmode)
5062 target = gen_reg_rtx (fmode);
5063
5064 imode = GET_MODE (from);
5065 do_pending_stack_adjust ();
5066
5067 /* Test whether the sign bit is set. */
5068 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5069 0, neglabel);
5070
5071 /* The sign bit is not set. Convert as signed. */
5072 expand_float (target, from, 0);
5073 emit_jump_insn (gen_jump (label));
5074 emit_barrier ();
5075
5076 /* The sign bit is set.
5077 Convert to a usable (positive signed) value by shifting right
5078 one bit, while remembering if a nonzero bit was shifted
5079 out; i.e., compute (from & 1) | (from >> 1). */
5080
5081 emit_label (neglabel);
5082 temp = expand_binop (imode, and_optab, from, const1_rtx,
5083 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5084 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
5085 NULL_RTX, 1);
5086 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5087 OPTAB_LIB_WIDEN);
5088 expand_float (target, temp, 0);
5089
5090 /* Multiply by 2 to undo the shift above. */
5091 temp = expand_binop (fmode, add_optab, target, target,
5092 target, 0, OPTAB_LIB_WIDEN);
5093 if (temp != target)
5094 emit_move_insn (target, temp);
5095
5096 do_pending_stack_adjust ();
5097 emit_label (label);
5098 goto done;
5099 }
5100 }
5101
5102 /* If we are about to do some arithmetic to correct for an
5103 unsigned operand, do it in a pseudo-register. */
5104
5105 if (GET_MODE (to) != fmode
5106 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5107 target = gen_reg_rtx (fmode);
5108
5109 /* Convert as signed integer to floating. */
5110 expand_float (target, from, 0);
5111
5112 /* If FROM is negative (and therefore TO is negative),
5113 correct its value by 2**bitwidth. */
5114
5115 do_pending_stack_adjust ();
5116 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5117 0, label);
5118
5119
5120 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode);
5121 temp = expand_binop (fmode, add_optab, target,
5122 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5123 target, 0, OPTAB_LIB_WIDEN);
5124 if (temp != target)
5125 emit_move_insn (target, temp);
5126
5127 do_pending_stack_adjust ();
5128 emit_label (label);
5129 goto done;
5130 }
5131
5132 /* No hardware instruction available; call a library routine. */
5133 {
5134 rtx libfunc;
5135 rtx insns;
5136 rtx value;
5137 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5138
5139 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5140 from = convert_to_mode (SImode, from, unsignedp);
5141
5142 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5143 gcc_assert (libfunc);
5144
5145 start_sequence ();
5146
5147 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5148 GET_MODE (to), 1, from,
5149 GET_MODE (from));
5150 insns = get_insns ();
5151 end_sequence ();
5152
5153 emit_libcall_block (insns, target, value,
5154 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5155 GET_MODE (to), from));
5156 }
5157
5158 done:
5159
5160 /* Copy result to requested destination
5161 if we have been computing in a temp location. */
5162
5163 if (target != to)
5164 {
5165 if (GET_MODE (target) == GET_MODE (to))
5166 emit_move_insn (to, target);
5167 else
5168 convert_move (to, target, 0);
5169 }
5170 }
5171 \f
5172 /* Generate code to convert FROM to fixed point and store in TO. FROM
5173 must be floating point. */
5174
5175 void
5176 expand_fix (rtx to, rtx from, int unsignedp)
5177 {
5178 enum insn_code icode;
5179 rtx target = to;
5180 enum machine_mode fmode, imode;
5181 int must_trunc = 0;
5182
5183 /* We first try to find a pair of modes, one real and one integer, at
5184 least as wide as FROM and TO, respectively, in which we can open-code
5185 this conversion. If the integer mode is wider than the mode of TO,
5186 we can do the conversion either signed or unsigned. */
5187
5188 for (fmode = GET_MODE (from); fmode != VOIDmode;
5189 fmode = GET_MODE_WIDER_MODE (fmode))
5190 for (imode = GET_MODE (to); imode != VOIDmode;
5191 imode = GET_MODE_WIDER_MODE (imode))
5192 {
5193 int doing_unsigned = unsignedp;
5194
5195 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5196 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5197 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5198
5199 if (icode != CODE_FOR_nothing)
5200 {
5201 rtx last = get_last_insn ();
5202 if (fmode != GET_MODE (from))
5203 from = convert_to_mode (fmode, from, 0);
5204
5205 if (must_trunc)
5206 {
5207 rtx temp = gen_reg_rtx (GET_MODE (from));
5208 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5209 temp, 0);
5210 }
5211
5212 if (imode != GET_MODE (to))
5213 target = gen_reg_rtx (imode);
5214
5215 if (maybe_emit_unop_insn (icode, target, from,
5216 doing_unsigned ? UNSIGNED_FIX : FIX))
5217 {
5218 if (target != to)
5219 convert_move (to, target, unsignedp);
5220 return;
5221 }
5222 delete_insns_since (last);
5223 }
5224 }
5225
5226 /* For an unsigned conversion, there is one more way to do it.
5227 If we have a signed conversion, we generate code that compares
5228 the real value to the largest representable positive number. If if
5229 is smaller, the conversion is done normally. Otherwise, subtract
5230 one plus the highest signed number, convert, and add it back.
5231
5232 We only need to check all real modes, since we know we didn't find
5233 anything with a wider integer mode.
5234
5235 This code used to extend FP value into mode wider than the destination.
5236 This is needed for decimal float modes which cannot accurately
5237 represent one plus the highest signed number of the same size, but
5238 not for binary modes. Consider, for instance conversion from SFmode
5239 into DImode.
5240
5241 The hot path through the code is dealing with inputs smaller than 2^63
5242 and doing just the conversion, so there is no bits to lose.
5243
5244 In the other path we know the value is positive in the range 2^63..2^64-1
5245 inclusive. (as for other input overflow happens and result is undefined)
5246 So we know that the most important bit set in mantissa corresponds to
5247 2^63. The subtraction of 2^63 should not generate any rounding as it
5248 simply clears out that bit. The rest is trivial. */
5249
5250 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5251 for (fmode = GET_MODE (from); fmode != VOIDmode;
5252 fmode = GET_MODE_WIDER_MODE (fmode))
5253 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5254 && (!DECIMAL_FLOAT_MODE_P (fmode)
5255 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))))
5256 {
5257 int bitsize;
5258 REAL_VALUE_TYPE offset;
5259 rtx limit, lab1, lab2, insn;
5260
5261 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5262 real_2expN (&offset, bitsize - 1, fmode);
5263 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5264 lab1 = gen_label_rtx ();
5265 lab2 = gen_label_rtx ();
5266
5267 if (fmode != GET_MODE (from))
5268 from = convert_to_mode (fmode, from, 0);
5269
5270 /* See if we need to do the subtraction. */
5271 do_pending_stack_adjust ();
5272 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5273 0, lab1);
5274
5275 /* If not, do the signed "fix" and branch around fixup code. */
5276 expand_fix (to, from, 0);
5277 emit_jump_insn (gen_jump (lab2));
5278 emit_barrier ();
5279
5280 /* Otherwise, subtract 2**(N-1), convert to signed number,
5281 then add 2**(N-1). Do the addition using XOR since this
5282 will often generate better code. */
5283 emit_label (lab1);
5284 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5285 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5286 expand_fix (to, target, 0);
5287 target = expand_binop (GET_MODE (to), xor_optab, to,
5288 gen_int_mode
5289 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5290 GET_MODE (to)),
5291 to, 1, OPTAB_LIB_WIDEN);
5292
5293 if (target != to)
5294 emit_move_insn (to, target);
5295
5296 emit_label (lab2);
5297
5298 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5299 != CODE_FOR_nothing)
5300 {
5301 /* Make a place for a REG_NOTE and add it. */
5302 insn = emit_move_insn (to, to);
5303 set_unique_reg_note (insn,
5304 REG_EQUAL,
5305 gen_rtx_fmt_e (UNSIGNED_FIX,
5306 GET_MODE (to),
5307 copy_rtx (from)));
5308 }
5309
5310 return;
5311 }
5312
5313 /* We can't do it with an insn, so use a library call. But first ensure
5314 that the mode of TO is at least as wide as SImode, since those are the
5315 only library calls we know about. */
5316
5317 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5318 {
5319 target = gen_reg_rtx (SImode);
5320
5321 expand_fix (target, from, unsignedp);
5322 }
5323 else
5324 {
5325 rtx insns;
5326 rtx value;
5327 rtx libfunc;
5328
5329 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5330 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5331 gcc_assert (libfunc);
5332
5333 start_sequence ();
5334
5335 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5336 GET_MODE (to), 1, from,
5337 GET_MODE (from));
5338 insns = get_insns ();
5339 end_sequence ();
5340
5341 emit_libcall_block (insns, target, value,
5342 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5343 GET_MODE (to), from));
5344 }
5345
5346 if (target != to)
5347 {
5348 if (GET_MODE (to) == GET_MODE (target))
5349 emit_move_insn (to, target);
5350 else
5351 convert_move (to, target, 0);
5352 }
5353 }
5354
5355 /* Generate code to convert FROM or TO a fixed-point.
5356 If UINTP is true, either TO or FROM is an unsigned integer.
5357 If SATP is true, we need to saturate the result. */
5358
5359 void
5360 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5361 {
5362 enum machine_mode to_mode = GET_MODE (to);
5363 enum machine_mode from_mode = GET_MODE (from);
5364 convert_optab tab;
5365 enum rtx_code this_code;
5366 enum insn_code code;
5367 rtx insns, value;
5368 rtx libfunc;
5369
5370 if (to_mode == from_mode)
5371 {
5372 emit_move_insn (to, from);
5373 return;
5374 }
5375
5376 if (uintp)
5377 {
5378 tab = satp ? satfractuns_optab : fractuns_optab;
5379 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5380 }
5381 else
5382 {
5383 tab = satp ? satfract_optab : fract_optab;
5384 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5385 }
5386 code = tab->handlers[to_mode][from_mode].insn_code;
5387 if (code != CODE_FOR_nothing)
5388 {
5389 emit_unop_insn (code, to, from, this_code);
5390 return;
5391 }
5392
5393 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5394 gcc_assert (libfunc);
5395
5396 start_sequence ();
5397 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5398 1, from, from_mode);
5399 insns = get_insns ();
5400 end_sequence ();
5401
5402 emit_libcall_block (insns, to, value,
5403 gen_rtx_fmt_e (tab->code, to_mode, from));
5404 }
5405
5406 /* Generate code to convert FROM to fixed point and store in TO. FROM
5407 must be floating point, TO must be signed. Use the conversion optab
5408 TAB to do the conversion. */
5409
5410 bool
5411 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5412 {
5413 enum insn_code icode;
5414 rtx target = to;
5415 enum machine_mode fmode, imode;
5416
5417 /* We first try to find a pair of modes, one real and one integer, at
5418 least as wide as FROM and TO, respectively, in which we can open-code
5419 this conversion. If the integer mode is wider than the mode of TO,
5420 we can do the conversion either signed or unsigned. */
5421
5422 for (fmode = GET_MODE (from); fmode != VOIDmode;
5423 fmode = GET_MODE_WIDER_MODE (fmode))
5424 for (imode = GET_MODE (to); imode != VOIDmode;
5425 imode = GET_MODE_WIDER_MODE (imode))
5426 {
5427 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5428 if (icode != CODE_FOR_nothing)
5429 {
5430 rtx last = get_last_insn ();
5431 if (fmode != GET_MODE (from))
5432 from = convert_to_mode (fmode, from, 0);
5433
5434 if (imode != GET_MODE (to))
5435 target = gen_reg_rtx (imode);
5436
5437 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5438 {
5439 delete_insns_since (last);
5440 continue;
5441 }
5442 if (target != to)
5443 convert_move (to, target, 0);
5444 return true;
5445 }
5446 }
5447
5448 return false;
5449 }
5450 \f
5451 /* Report whether we have an instruction to perform the operation
5452 specified by CODE on operands of mode MODE. */
5453 int
5454 have_insn_for (enum rtx_code code, enum machine_mode mode)
5455 {
5456 return (code_to_optab[(int) code] != 0
5457 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5458 != CODE_FOR_nothing));
5459 }
5460
5461 /* Set all insn_code fields to CODE_FOR_nothing. */
5462
5463 static void
5464 init_insn_codes (void)
5465 {
5466 unsigned int i;
5467
5468 for (i = 0; i < (unsigned int) OTI_MAX; i++)
5469 {
5470 unsigned int j;
5471 optab op;
5472
5473 op = &optab_table[i];
5474 for (j = 0; j < NUM_MACHINE_MODES; j++)
5475 optab_handler (op, j)->insn_code = CODE_FOR_nothing;
5476 }
5477 for (i = 0; i < (unsigned int) COI_MAX; i++)
5478 {
5479 unsigned int j, k;
5480 convert_optab op;
5481
5482 op = &convert_optab_table[i];
5483 for (j = 0; j < NUM_MACHINE_MODES; j++)
5484 for (k = 0; k < NUM_MACHINE_MODES; k++)
5485 convert_optab_handler (op, j, k)->insn_code = CODE_FOR_nothing;
5486 }
5487 }
5488
5489 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5490 static inline void
5491 init_optab (optab op, enum rtx_code code)
5492 {
5493 op->code = code;
5494 code_to_optab[(int) code] = op;
5495 }
5496
5497 /* Same, but fill in its code as CODE, and do _not_ write it into
5498 the code_to_optab table. */
5499 static inline void
5500 init_optabv (optab op, enum rtx_code code)
5501 {
5502 op->code = code;
5503 }
5504
5505 /* Conversion optabs never go in the code_to_optab table. */
5506 static void
5507 init_convert_optab (convert_optab op, enum rtx_code code)
5508 {
5509 op->code = code;
5510 }
5511
5512 /* Initialize the libfunc fields of an entire group of entries in some
5513 optab. Each entry is set equal to a string consisting of a leading
5514 pair of underscores followed by a generic operation name followed by
5515 a mode name (downshifted to lowercase) followed by a single character
5516 representing the number of operands for the given operation (which is
5517 usually one of the characters '2', '3', or '4').
5518
5519 OPTABLE is the table in which libfunc fields are to be initialized.
5520 OPNAME is the generic (string) name of the operation.
5521 SUFFIX is the character which specifies the number of operands for
5522 the given generic operation.
5523 MODE is the mode to generate for.
5524 */
5525
5526 static void
5527 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5528 {
5529 unsigned opname_len = strlen (opname);
5530 const char *mname = GET_MODE_NAME (mode);
5531 unsigned mname_len = strlen (mname);
5532 char *libfunc_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5533 char *p;
5534 const char *q;
5535
5536 p = libfunc_name;
5537 *p++ = '_';
5538 *p++ = '_';
5539 for (q = opname; *q; )
5540 *p++ = *q++;
5541 for (q = mname; *q; q++)
5542 *p++ = TOLOWER (*q);
5543 *p++ = suffix;
5544 *p = '\0';
5545
5546 set_optab_libfunc (optable, mode,
5547 ggc_alloc_string (libfunc_name, p - libfunc_name));
5548 }
5549
5550 /* Like gen_libfunc, but verify that integer operation is involved. */
5551
5552 static void
5553 gen_int_libfunc (optab optable, const char *opname, char suffix,
5554 enum machine_mode mode)
5555 {
5556 int maxsize = 2 * BITS_PER_WORD;
5557
5558 if (GET_MODE_CLASS (mode) != MODE_INT)
5559 return;
5560 if (maxsize < LONG_LONG_TYPE_SIZE)
5561 maxsize = LONG_LONG_TYPE_SIZE;
5562 if (GET_MODE_CLASS (mode) != MODE_INT
5563 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5564 return;
5565 gen_libfunc (optable, opname, suffix, mode);
5566 }
5567
5568 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5569
5570 static void
5571 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5572 enum machine_mode mode)
5573 {
5574 char *dec_opname;
5575
5576 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5577 gen_libfunc (optable, opname, suffix, mode);
5578 if (DECIMAL_FLOAT_MODE_P (mode))
5579 {
5580 dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5581 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5582 depending on the low level floating format used. */
5583 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5584 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5585 gen_libfunc (optable, dec_opname, suffix, mode);
5586 }
5587 }
5588
5589 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5590
5591 static void
5592 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5593 enum machine_mode mode)
5594 {
5595 if (!ALL_FIXED_POINT_MODE_P (mode))
5596 return;
5597 gen_libfunc (optable, opname, suffix, mode);
5598 }
5599
5600 /* Like gen_libfunc, but verify that signed fixed-point operation is
5601 involved. */
5602
5603 static void
5604 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5605 enum machine_mode mode)
5606 {
5607 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5608 return;
5609 gen_libfunc (optable, opname, suffix, mode);
5610 }
5611
5612 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5613 involved. */
5614
5615 static void
5616 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5617 enum machine_mode mode)
5618 {
5619 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5620 return;
5621 gen_libfunc (optable, opname, suffix, mode);
5622 }
5623
5624 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5625
5626 static void
5627 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5628 enum machine_mode mode)
5629 {
5630 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5631 gen_fp_libfunc (optable, name, suffix, mode);
5632 if (INTEGRAL_MODE_P (mode))
5633 gen_int_libfunc (optable, name, suffix, mode);
5634 }
5635
5636 /* Like gen_libfunc, but verify that FP or INT operation is involved
5637 and add 'v' suffix for integer operation. */
5638
5639 static void
5640 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5641 enum machine_mode mode)
5642 {
5643 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5644 gen_fp_libfunc (optable, name, suffix, mode);
5645 if (GET_MODE_CLASS (mode) == MODE_INT)
5646 {
5647 int len = strlen (name);
5648 char *v_name = XALLOCAVEC (char, len + 2);
5649 strcpy (v_name, name);
5650 v_name[len] = 'v';
5651 v_name[len + 1] = 0;
5652 gen_int_libfunc (optable, v_name, suffix, mode);
5653 }
5654 }
5655
5656 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5657 involved. */
5658
5659 static void
5660 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5661 enum machine_mode mode)
5662 {
5663 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5664 gen_fp_libfunc (optable, name, suffix, mode);
5665 if (INTEGRAL_MODE_P (mode))
5666 gen_int_libfunc (optable, name, suffix, mode);
5667 if (ALL_FIXED_POINT_MODE_P (mode))
5668 gen_fixed_libfunc (optable, name, suffix, mode);
5669 }
5670
5671 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5672 involved. */
5673
5674 static void
5675 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5676 enum machine_mode mode)
5677 {
5678 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5679 gen_fp_libfunc (optable, name, suffix, mode);
5680 if (INTEGRAL_MODE_P (mode))
5681 gen_int_libfunc (optable, name, suffix, mode);
5682 if (SIGNED_FIXED_POINT_MODE_P (mode))
5683 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5684 }
5685
5686 /* Like gen_libfunc, but verify that INT or FIXED operation is
5687 involved. */
5688
5689 static void
5690 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5691 enum machine_mode mode)
5692 {
5693 if (INTEGRAL_MODE_P (mode))
5694 gen_int_libfunc (optable, name, suffix, mode);
5695 if (ALL_FIXED_POINT_MODE_P (mode))
5696 gen_fixed_libfunc (optable, name, suffix, mode);
5697 }
5698
5699 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5700 involved. */
5701
5702 static void
5703 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5704 enum machine_mode mode)
5705 {
5706 if (INTEGRAL_MODE_P (mode))
5707 gen_int_libfunc (optable, name, suffix, mode);
5708 if (SIGNED_FIXED_POINT_MODE_P (mode))
5709 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5710 }
5711
5712 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5713 involved. */
5714
5715 static void
5716 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5717 enum machine_mode mode)
5718 {
5719 if (INTEGRAL_MODE_P (mode))
5720 gen_int_libfunc (optable, name, suffix, mode);
5721 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5722 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5723 }
5724
5725 /* Initialize the libfunc fields of an entire group of entries of an
5726 inter-mode-class conversion optab. The string formation rules are
5727 similar to the ones for init_libfuncs, above, but instead of having
5728 a mode name and an operand count these functions have two mode names
5729 and no operand count. */
5730
5731 static void
5732 gen_interclass_conv_libfunc (convert_optab tab,
5733 const char *opname,
5734 enum machine_mode tmode,
5735 enum machine_mode fmode)
5736 {
5737 size_t opname_len = strlen (opname);
5738 size_t mname_len = 0;
5739
5740 const char *fname, *tname;
5741 const char *q;
5742 char *libfunc_name, *suffix;
5743 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5744 char *p;
5745
5746 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5747 depends on which underlying decimal floating point format is used. */
5748 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5749
5750 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5751
5752 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5753 nondec_name[0] = '_';
5754 nondec_name[1] = '_';
5755 memcpy (&nondec_name[2], opname, opname_len);
5756 nondec_suffix = nondec_name + opname_len + 2;
5757
5758 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5759 dec_name[0] = '_';
5760 dec_name[1] = '_';
5761 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5762 memcpy (&dec_name[2+dec_len], opname, opname_len);
5763 dec_suffix = dec_name + dec_len + opname_len + 2;
5764
5765 fname = GET_MODE_NAME (fmode);
5766 tname = GET_MODE_NAME (tmode);
5767
5768 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5769 {
5770 libfunc_name = dec_name;
5771 suffix = dec_suffix;
5772 }
5773 else
5774 {
5775 libfunc_name = nondec_name;
5776 suffix = nondec_suffix;
5777 }
5778
5779 p = suffix;
5780 for (q = fname; *q; p++, q++)
5781 *p = TOLOWER (*q);
5782 for (q = tname; *q; p++, q++)
5783 *p = TOLOWER (*q);
5784
5785 *p = '\0';
5786
5787 set_conv_libfunc (tab, tmode, fmode,
5788 ggc_alloc_string (libfunc_name, p - libfunc_name));
5789 }
5790
5791 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5792 int->fp conversion. */
5793
5794 static void
5795 gen_int_to_fp_conv_libfunc (convert_optab tab,
5796 const char *opname,
5797 enum machine_mode tmode,
5798 enum machine_mode fmode)
5799 {
5800 if (GET_MODE_CLASS (fmode) != MODE_INT)
5801 return;
5802 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5803 return;
5804 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5805 }
5806
5807 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5808 naming scheme. */
5809
5810 static void
5811 gen_ufloat_conv_libfunc (convert_optab tab,
5812 const char *opname ATTRIBUTE_UNUSED,
5813 enum machine_mode tmode,
5814 enum machine_mode fmode)
5815 {
5816 if (DECIMAL_FLOAT_MODE_P (tmode))
5817 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5818 else
5819 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5820 }
5821
5822 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5823 fp->int conversion. */
5824
5825 static void
5826 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5827 const char *opname,
5828 enum machine_mode tmode,
5829 enum machine_mode fmode)
5830 {
5831 if (GET_MODE_CLASS (fmode) != MODE_INT)
5832 return;
5833 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5834 return;
5835 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5836 }
5837
5838 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5839 fp->int conversion with no decimal floating point involved. */
5840
5841 static void
5842 gen_fp_to_int_conv_libfunc (convert_optab tab,
5843 const char *opname,
5844 enum machine_mode tmode,
5845 enum machine_mode fmode)
5846 {
5847 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5848 return;
5849 if (GET_MODE_CLASS (tmode) != MODE_INT)
5850 return;
5851 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5852 }
5853
5854 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5855 The string formation rules are
5856 similar to the ones for init_libfunc, above. */
5857
5858 static void
5859 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5860 enum machine_mode tmode, enum machine_mode fmode)
5861 {
5862 size_t opname_len = strlen (opname);
5863 size_t mname_len = 0;
5864
5865 const char *fname, *tname;
5866 const char *q;
5867 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5868 char *libfunc_name, *suffix;
5869 char *p;
5870
5871 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5872 depends on which underlying decimal floating point format is used. */
5873 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5874
5875 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5876
5877 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5878 nondec_name[0] = '_';
5879 nondec_name[1] = '_';
5880 memcpy (&nondec_name[2], opname, opname_len);
5881 nondec_suffix = nondec_name + opname_len + 2;
5882
5883 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5884 dec_name[0] = '_';
5885 dec_name[1] = '_';
5886 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5887 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5888 dec_suffix = dec_name + dec_len + opname_len + 2;
5889
5890 fname = GET_MODE_NAME (fmode);
5891 tname = GET_MODE_NAME (tmode);
5892
5893 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5894 {
5895 libfunc_name = dec_name;
5896 suffix = dec_suffix;
5897 }
5898 else
5899 {
5900 libfunc_name = nondec_name;
5901 suffix = nondec_suffix;
5902 }
5903
5904 p = suffix;
5905 for (q = fname; *q; p++, q++)
5906 *p = TOLOWER (*q);
5907 for (q = tname; *q; p++, q++)
5908 *p = TOLOWER (*q);
5909
5910 *p++ = '2';
5911 *p = '\0';
5912
5913 set_conv_libfunc (tab, tmode, fmode,
5914 ggc_alloc_string (libfunc_name, p - libfunc_name));
5915 }
5916
5917 /* Pick proper libcall for trunc_optab. We need to chose if we do
5918 truncation or extension and interclass or intraclass. */
5919
5920 static void
5921 gen_trunc_conv_libfunc (convert_optab tab,
5922 const char *opname,
5923 enum machine_mode tmode,
5924 enum machine_mode fmode)
5925 {
5926 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5927 return;
5928 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5929 return;
5930 if (tmode == fmode)
5931 return;
5932
5933 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5934 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5935 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5936
5937 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5938 return;
5939
5940 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5941 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5942 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5943 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5944 }
5945
5946 /* Pick proper libcall for extend_optab. We need to chose if we do
5947 truncation or extension and interclass or intraclass. */
5948
5949 static void
5950 gen_extend_conv_libfunc (convert_optab tab,
5951 const char *opname ATTRIBUTE_UNUSED,
5952 enum machine_mode tmode,
5953 enum machine_mode fmode)
5954 {
5955 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5956 return;
5957 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5958 return;
5959 if (tmode == fmode)
5960 return;
5961
5962 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5963 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5964 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5965
5966 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5967 return;
5968
5969 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5970 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5971 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5972 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5973 }
5974
5975 /* Pick proper libcall for fract_optab. We need to chose if we do
5976 interclass or intraclass. */
5977
5978 static void
5979 gen_fract_conv_libfunc (convert_optab tab,
5980 const char *opname,
5981 enum machine_mode tmode,
5982 enum machine_mode fmode)
5983 {
5984 if (tmode == fmode)
5985 return;
5986 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5987 return;
5988
5989 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5990 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5991 else
5992 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5993 }
5994
5995 /* Pick proper libcall for fractuns_optab. */
5996
5997 static void
5998 gen_fractuns_conv_libfunc (convert_optab tab,
5999 const char *opname,
6000 enum machine_mode tmode,
6001 enum machine_mode fmode)
6002 {
6003 if (tmode == fmode)
6004 return;
6005 /* One mode must be a fixed-point mode, and the other must be an integer
6006 mode. */
6007 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
6008 || (ALL_FIXED_POINT_MODE_P (fmode)
6009 && GET_MODE_CLASS (tmode) == MODE_INT)))
6010 return;
6011
6012 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6013 }
6014
6015 /* Pick proper libcall for satfract_optab. We need to chose if we do
6016 interclass or intraclass. */
6017
6018 static void
6019 gen_satfract_conv_libfunc (convert_optab tab,
6020 const char *opname,
6021 enum machine_mode tmode,
6022 enum machine_mode fmode)
6023 {
6024 if (tmode == fmode)
6025 return;
6026 /* TMODE must be a fixed-point mode. */
6027 if (!ALL_FIXED_POINT_MODE_P (tmode))
6028 return;
6029
6030 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6031 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6032 else
6033 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6034 }
6035
6036 /* Pick proper libcall for satfractuns_optab. */
6037
6038 static void
6039 gen_satfractuns_conv_libfunc (convert_optab tab,
6040 const char *opname,
6041 enum machine_mode tmode,
6042 enum machine_mode fmode)
6043 {
6044 if (tmode == fmode)
6045 return;
6046 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6047 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6048 return;
6049
6050 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6051 }
6052
6053 /* A table of previously-created libfuncs, hashed by name. */
6054 static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
6055
6056 /* Hashtable callbacks for libfunc_decls. */
6057
6058 static hashval_t
6059 libfunc_decl_hash (const void *entry)
6060 {
6061 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree) entry));
6062 }
6063
6064 static int
6065 libfunc_decl_eq (const void *entry1, const void *entry2)
6066 {
6067 return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
6068 }
6069
6070 /* Build a decl for a libfunc named NAME. */
6071
6072 tree
6073 build_libfunc_function (const char *name)
6074 {
6075 tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
6076 get_identifier (name),
6077 build_function_type (integer_type_node, NULL_TREE));
6078 /* ??? We don't have any type information except for this is
6079 a function. Pretend this is "int foo()". */
6080 DECL_ARTIFICIAL (decl) = 1;
6081 DECL_EXTERNAL (decl) = 1;
6082 TREE_PUBLIC (decl) = 1;
6083 gcc_assert (DECL_ASSEMBLER_NAME (decl));
6084
6085 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6086 are the flags assigned by targetm.encode_section_info. */
6087 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
6088
6089 return decl;
6090 }
6091
6092 rtx
6093 init_one_libfunc (const char *name)
6094 {
6095 tree id, decl;
6096 void **slot;
6097 hashval_t hash;
6098
6099 if (libfunc_decls == NULL)
6100 libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
6101 libfunc_decl_eq, NULL);
6102
6103 /* See if we have already created a libfunc decl for this function. */
6104 id = get_identifier (name);
6105 hash = IDENTIFIER_HASH_VALUE (id);
6106 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
6107 decl = (tree) *slot;
6108 if (decl == NULL)
6109 {
6110 /* Create a new decl, so that it can be passed to
6111 targetm.encode_section_info. */
6112 decl = build_libfunc_function (name);
6113 *slot = decl;
6114 }
6115 return XEXP (DECL_RTL (decl), 0);
6116 }
6117
6118 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6119
6120 rtx
6121 set_user_assembler_libfunc (const char *name, const char *asmspec)
6122 {
6123 tree id, decl;
6124 void **slot;
6125 hashval_t hash;
6126
6127 id = get_identifier (name);
6128 hash = IDENTIFIER_HASH_VALUE (id);
6129 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
6130 gcc_assert (slot);
6131 decl = (tree) *slot;
6132 set_user_assembler_name (decl, asmspec);
6133 return XEXP (DECL_RTL (decl), 0);
6134 }
6135
6136 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6137 MODE to NAME, which should be either 0 or a string constant. */
6138 void
6139 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6140 {
6141 rtx val;
6142 struct libfunc_entry e;
6143 struct libfunc_entry **slot;
6144 e.optab = (size_t) (optable - &optab_table[0]);
6145 e.mode1 = mode;
6146 e.mode2 = VOIDmode;
6147
6148 if (name)
6149 val = init_one_libfunc (name);
6150 else
6151 val = 0;
6152 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6153 if (*slot == NULL)
6154 *slot = ggc_alloc_libfunc_entry ();
6155 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6156 (*slot)->mode1 = mode;
6157 (*slot)->mode2 = VOIDmode;
6158 (*slot)->libfunc = val;
6159 }
6160
6161 /* Call this to reset the function entry for one conversion optab
6162 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6163 either 0 or a string constant. */
6164 void
6165 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6166 enum machine_mode fmode, const char *name)
6167 {
6168 rtx val;
6169 struct libfunc_entry e;
6170 struct libfunc_entry **slot;
6171 e.optab = (size_t) (optable - &convert_optab_table[0]);
6172 e.mode1 = tmode;
6173 e.mode2 = fmode;
6174
6175 if (name)
6176 val = init_one_libfunc (name);
6177 else
6178 val = 0;
6179 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6180 if (*slot == NULL)
6181 *slot = ggc_alloc_libfunc_entry ();
6182 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6183 (*slot)->mode1 = tmode;
6184 (*slot)->mode2 = fmode;
6185 (*slot)->libfunc = val;
6186 }
6187
6188 /* Call this to initialize the contents of the optabs
6189 appropriately for the current target machine. */
6190
6191 void
6192 init_optabs (void)
6193 {
6194 unsigned int i;
6195 #if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
6196 static bool reinit;
6197 #endif
6198
6199 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6200 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6201
6202 #ifdef HAVE_conditional_move
6203 for (i = 0; i < NUM_MACHINE_MODES; i++)
6204 movcc_gen_code[i] = CODE_FOR_nothing;
6205 #endif
6206
6207 for (i = 0; i < NUM_MACHINE_MODES; i++)
6208 {
6209 vcond_gen_code[i] = CODE_FOR_nothing;
6210 vcondu_gen_code[i] = CODE_FOR_nothing;
6211 }
6212
6213 #if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
6214 /* We statically initialize the insn_codes with CODE_FOR_nothing. */
6215 if (reinit)
6216 init_insn_codes ();
6217 #else
6218 init_insn_codes ();
6219 #endif
6220
6221 init_optab (add_optab, PLUS);
6222 init_optabv (addv_optab, PLUS);
6223 init_optab (sub_optab, MINUS);
6224 init_optabv (subv_optab, MINUS);
6225 init_optab (ssadd_optab, SS_PLUS);
6226 init_optab (usadd_optab, US_PLUS);
6227 init_optab (sssub_optab, SS_MINUS);
6228 init_optab (ussub_optab, US_MINUS);
6229 init_optab (smul_optab, MULT);
6230 init_optab (ssmul_optab, SS_MULT);
6231 init_optab (usmul_optab, US_MULT);
6232 init_optabv (smulv_optab, MULT);
6233 init_optab (smul_highpart_optab, UNKNOWN);
6234 init_optab (umul_highpart_optab, UNKNOWN);
6235 init_optab (smul_widen_optab, UNKNOWN);
6236 init_optab (umul_widen_optab, UNKNOWN);
6237 init_optab (usmul_widen_optab, UNKNOWN);
6238 init_optab (smadd_widen_optab, UNKNOWN);
6239 init_optab (umadd_widen_optab, UNKNOWN);
6240 init_optab (ssmadd_widen_optab, UNKNOWN);
6241 init_optab (usmadd_widen_optab, UNKNOWN);
6242 init_optab (smsub_widen_optab, UNKNOWN);
6243 init_optab (umsub_widen_optab, UNKNOWN);
6244 init_optab (ssmsub_widen_optab, UNKNOWN);
6245 init_optab (usmsub_widen_optab, UNKNOWN);
6246 init_optab (sdiv_optab, DIV);
6247 init_optab (ssdiv_optab, SS_DIV);
6248 init_optab (usdiv_optab, US_DIV);
6249 init_optabv (sdivv_optab, DIV);
6250 init_optab (sdivmod_optab, UNKNOWN);
6251 init_optab (udiv_optab, UDIV);
6252 init_optab (udivmod_optab, UNKNOWN);
6253 init_optab (smod_optab, MOD);
6254 init_optab (umod_optab, UMOD);
6255 init_optab (fmod_optab, UNKNOWN);
6256 init_optab (remainder_optab, UNKNOWN);
6257 init_optab (ftrunc_optab, UNKNOWN);
6258 init_optab (and_optab, AND);
6259 init_optab (ior_optab, IOR);
6260 init_optab (xor_optab, XOR);
6261 init_optab (ashl_optab, ASHIFT);
6262 init_optab (ssashl_optab, SS_ASHIFT);
6263 init_optab (usashl_optab, US_ASHIFT);
6264 init_optab (ashr_optab, ASHIFTRT);
6265 init_optab (lshr_optab, LSHIFTRT);
6266 init_optab (rotl_optab, ROTATE);
6267 init_optab (rotr_optab, ROTATERT);
6268 init_optab (smin_optab, SMIN);
6269 init_optab (smax_optab, SMAX);
6270 init_optab (umin_optab, UMIN);
6271 init_optab (umax_optab, UMAX);
6272 init_optab (pow_optab, UNKNOWN);
6273 init_optab (atan2_optab, UNKNOWN);
6274
6275 /* These three have codes assigned exclusively for the sake of
6276 have_insn_for. */
6277 init_optab (mov_optab, SET);
6278 init_optab (movstrict_optab, STRICT_LOW_PART);
6279 init_optab (cbranch_optab, COMPARE);
6280
6281 init_optab (cmov_optab, UNKNOWN);
6282 init_optab (cstore_optab, UNKNOWN);
6283 init_optab (ctrap_optab, UNKNOWN);
6284
6285 init_optab (storent_optab, UNKNOWN);
6286
6287 init_optab (cmp_optab, UNKNOWN);
6288 init_optab (ucmp_optab, UNKNOWN);
6289
6290 init_optab (eq_optab, EQ);
6291 init_optab (ne_optab, NE);
6292 init_optab (gt_optab, GT);
6293 init_optab (ge_optab, GE);
6294 init_optab (lt_optab, LT);
6295 init_optab (le_optab, LE);
6296 init_optab (unord_optab, UNORDERED);
6297
6298 init_optab (neg_optab, NEG);
6299 init_optab (ssneg_optab, SS_NEG);
6300 init_optab (usneg_optab, US_NEG);
6301 init_optabv (negv_optab, NEG);
6302 init_optab (abs_optab, ABS);
6303 init_optabv (absv_optab, ABS);
6304 init_optab (addcc_optab, UNKNOWN);
6305 init_optab (one_cmpl_optab, NOT);
6306 init_optab (bswap_optab, BSWAP);
6307 init_optab (ffs_optab, FFS);
6308 init_optab (clz_optab, CLZ);
6309 init_optab (ctz_optab, CTZ);
6310 init_optab (popcount_optab, POPCOUNT);
6311 init_optab (parity_optab, PARITY);
6312 init_optab (sqrt_optab, SQRT);
6313 init_optab (floor_optab, UNKNOWN);
6314 init_optab (ceil_optab, UNKNOWN);
6315 init_optab (round_optab, UNKNOWN);
6316 init_optab (btrunc_optab, UNKNOWN);
6317 init_optab (nearbyint_optab, UNKNOWN);
6318 init_optab (rint_optab, UNKNOWN);
6319 init_optab (sincos_optab, UNKNOWN);
6320 init_optab (sin_optab, UNKNOWN);
6321 init_optab (asin_optab, UNKNOWN);
6322 init_optab (cos_optab, UNKNOWN);
6323 init_optab (acos_optab, UNKNOWN);
6324 init_optab (exp_optab, UNKNOWN);
6325 init_optab (exp10_optab, UNKNOWN);
6326 init_optab (exp2_optab, UNKNOWN);
6327 init_optab (expm1_optab, UNKNOWN);
6328 init_optab (ldexp_optab, UNKNOWN);
6329 init_optab (scalb_optab, UNKNOWN);
6330 init_optab (significand_optab, UNKNOWN);
6331 init_optab (logb_optab, UNKNOWN);
6332 init_optab (ilogb_optab, UNKNOWN);
6333 init_optab (log_optab, UNKNOWN);
6334 init_optab (log10_optab, UNKNOWN);
6335 init_optab (log2_optab, UNKNOWN);
6336 init_optab (log1p_optab, UNKNOWN);
6337 init_optab (tan_optab, UNKNOWN);
6338 init_optab (atan_optab, UNKNOWN);
6339 init_optab (copysign_optab, UNKNOWN);
6340 init_optab (signbit_optab, UNKNOWN);
6341
6342 init_optab (isinf_optab, UNKNOWN);
6343
6344 init_optab (strlen_optab, UNKNOWN);
6345 init_optab (push_optab, UNKNOWN);
6346
6347 init_optab (reduc_smax_optab, UNKNOWN);
6348 init_optab (reduc_umax_optab, UNKNOWN);
6349 init_optab (reduc_smin_optab, UNKNOWN);
6350 init_optab (reduc_umin_optab, UNKNOWN);
6351 init_optab (reduc_splus_optab, UNKNOWN);
6352 init_optab (reduc_uplus_optab, UNKNOWN);
6353
6354 init_optab (ssum_widen_optab, UNKNOWN);
6355 init_optab (usum_widen_optab, UNKNOWN);
6356 init_optab (sdot_prod_optab, UNKNOWN);
6357 init_optab (udot_prod_optab, UNKNOWN);
6358
6359 init_optab (vec_extract_optab, UNKNOWN);
6360 init_optab (vec_extract_even_optab, UNKNOWN);
6361 init_optab (vec_extract_odd_optab, UNKNOWN);
6362 init_optab (vec_interleave_high_optab, UNKNOWN);
6363 init_optab (vec_interleave_low_optab, UNKNOWN);
6364 init_optab (vec_set_optab, UNKNOWN);
6365 init_optab (vec_init_optab, UNKNOWN);
6366 init_optab (vec_shl_optab, UNKNOWN);
6367 init_optab (vec_shr_optab, UNKNOWN);
6368 init_optab (vec_realign_load_optab, UNKNOWN);
6369 init_optab (movmisalign_optab, UNKNOWN);
6370 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6371 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6372 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6373 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6374 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6375 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6376 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6377 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6378 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6379 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6380 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6381 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6382 init_optab (vec_pack_trunc_optab, UNKNOWN);
6383 init_optab (vec_pack_usat_optab, UNKNOWN);
6384 init_optab (vec_pack_ssat_optab, UNKNOWN);
6385 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6386 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6387
6388 init_optab (powi_optab, UNKNOWN);
6389
6390 /* Conversions. */
6391 init_convert_optab (sext_optab, SIGN_EXTEND);
6392 init_convert_optab (zext_optab, ZERO_EXTEND);
6393 init_convert_optab (trunc_optab, TRUNCATE);
6394 init_convert_optab (sfix_optab, FIX);
6395 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6396 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6397 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6398 init_convert_optab (sfloat_optab, FLOAT);
6399 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6400 init_convert_optab (lrint_optab, UNKNOWN);
6401 init_convert_optab (lround_optab, UNKNOWN);
6402 init_convert_optab (lfloor_optab, UNKNOWN);
6403 init_convert_optab (lceil_optab, UNKNOWN);
6404
6405 init_convert_optab (fract_optab, FRACT_CONVERT);
6406 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6407 init_convert_optab (satfract_optab, SAT_FRACT);
6408 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6409
6410 for (i = 0; i < NUM_MACHINE_MODES; i++)
6411 {
6412 movmem_optab[i] = CODE_FOR_nothing;
6413 cmpstr_optab[i] = CODE_FOR_nothing;
6414 cmpstrn_optab[i] = CODE_FOR_nothing;
6415 cmpmem_optab[i] = CODE_FOR_nothing;
6416 setmem_optab[i] = CODE_FOR_nothing;
6417
6418 sync_add_optab[i] = CODE_FOR_nothing;
6419 sync_sub_optab[i] = CODE_FOR_nothing;
6420 sync_ior_optab[i] = CODE_FOR_nothing;
6421 sync_and_optab[i] = CODE_FOR_nothing;
6422 sync_xor_optab[i] = CODE_FOR_nothing;
6423 sync_nand_optab[i] = CODE_FOR_nothing;
6424 sync_old_add_optab[i] = CODE_FOR_nothing;
6425 sync_old_sub_optab[i] = CODE_FOR_nothing;
6426 sync_old_ior_optab[i] = CODE_FOR_nothing;
6427 sync_old_and_optab[i] = CODE_FOR_nothing;
6428 sync_old_xor_optab[i] = CODE_FOR_nothing;
6429 sync_old_nand_optab[i] = CODE_FOR_nothing;
6430 sync_new_add_optab[i] = CODE_FOR_nothing;
6431 sync_new_sub_optab[i] = CODE_FOR_nothing;
6432 sync_new_ior_optab[i] = CODE_FOR_nothing;
6433 sync_new_and_optab[i] = CODE_FOR_nothing;
6434 sync_new_xor_optab[i] = CODE_FOR_nothing;
6435 sync_new_nand_optab[i] = CODE_FOR_nothing;
6436 sync_compare_and_swap[i] = CODE_FOR_nothing;
6437 sync_lock_test_and_set[i] = CODE_FOR_nothing;
6438 sync_lock_release[i] = CODE_FOR_nothing;
6439
6440 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
6441 }
6442
6443 /* Fill in the optabs with the insns we support. */
6444 init_all_optabs ();
6445
6446 /* Initialize the optabs with the names of the library functions. */
6447 add_optab->libcall_basename = "add";
6448 add_optab->libcall_suffix = '3';
6449 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6450 addv_optab->libcall_basename = "add";
6451 addv_optab->libcall_suffix = '3';
6452 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6453 ssadd_optab->libcall_basename = "ssadd";
6454 ssadd_optab->libcall_suffix = '3';
6455 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6456 usadd_optab->libcall_basename = "usadd";
6457 usadd_optab->libcall_suffix = '3';
6458 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6459 sub_optab->libcall_basename = "sub";
6460 sub_optab->libcall_suffix = '3';
6461 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6462 subv_optab->libcall_basename = "sub";
6463 subv_optab->libcall_suffix = '3';
6464 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6465 sssub_optab->libcall_basename = "sssub";
6466 sssub_optab->libcall_suffix = '3';
6467 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6468 ussub_optab->libcall_basename = "ussub";
6469 ussub_optab->libcall_suffix = '3';
6470 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6471 smul_optab->libcall_basename = "mul";
6472 smul_optab->libcall_suffix = '3';
6473 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6474 smulv_optab->libcall_basename = "mul";
6475 smulv_optab->libcall_suffix = '3';
6476 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6477 ssmul_optab->libcall_basename = "ssmul";
6478 ssmul_optab->libcall_suffix = '3';
6479 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6480 usmul_optab->libcall_basename = "usmul";
6481 usmul_optab->libcall_suffix = '3';
6482 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6483 sdiv_optab->libcall_basename = "div";
6484 sdiv_optab->libcall_suffix = '3';
6485 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6486 sdivv_optab->libcall_basename = "divv";
6487 sdivv_optab->libcall_suffix = '3';
6488 sdivv_optab->libcall_gen = gen_int_libfunc;
6489 ssdiv_optab->libcall_basename = "ssdiv";
6490 ssdiv_optab->libcall_suffix = '3';
6491 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6492 udiv_optab->libcall_basename = "udiv";
6493 udiv_optab->libcall_suffix = '3';
6494 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6495 usdiv_optab->libcall_basename = "usdiv";
6496 usdiv_optab->libcall_suffix = '3';
6497 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6498 sdivmod_optab->libcall_basename = "divmod";
6499 sdivmod_optab->libcall_suffix = '4';
6500 sdivmod_optab->libcall_gen = gen_int_libfunc;
6501 udivmod_optab->libcall_basename = "udivmod";
6502 udivmod_optab->libcall_suffix = '4';
6503 udivmod_optab->libcall_gen = gen_int_libfunc;
6504 smod_optab->libcall_basename = "mod";
6505 smod_optab->libcall_suffix = '3';
6506 smod_optab->libcall_gen = gen_int_libfunc;
6507 umod_optab->libcall_basename = "umod";
6508 umod_optab->libcall_suffix = '3';
6509 umod_optab->libcall_gen = gen_int_libfunc;
6510 ftrunc_optab->libcall_basename = "ftrunc";
6511 ftrunc_optab->libcall_suffix = '2';
6512 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6513 and_optab->libcall_basename = "and";
6514 and_optab->libcall_suffix = '3';
6515 and_optab->libcall_gen = gen_int_libfunc;
6516 ior_optab->libcall_basename = "ior";
6517 ior_optab->libcall_suffix = '3';
6518 ior_optab->libcall_gen = gen_int_libfunc;
6519 xor_optab->libcall_basename = "xor";
6520 xor_optab->libcall_suffix = '3';
6521 xor_optab->libcall_gen = gen_int_libfunc;
6522 ashl_optab->libcall_basename = "ashl";
6523 ashl_optab->libcall_suffix = '3';
6524 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6525 ssashl_optab->libcall_basename = "ssashl";
6526 ssashl_optab->libcall_suffix = '3';
6527 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6528 usashl_optab->libcall_basename = "usashl";
6529 usashl_optab->libcall_suffix = '3';
6530 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6531 ashr_optab->libcall_basename = "ashr";
6532 ashr_optab->libcall_suffix = '3';
6533 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6534 lshr_optab->libcall_basename = "lshr";
6535 lshr_optab->libcall_suffix = '3';
6536 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6537 smin_optab->libcall_basename = "min";
6538 smin_optab->libcall_suffix = '3';
6539 smin_optab->libcall_gen = gen_int_fp_libfunc;
6540 smax_optab->libcall_basename = "max";
6541 smax_optab->libcall_suffix = '3';
6542 smax_optab->libcall_gen = gen_int_fp_libfunc;
6543 umin_optab->libcall_basename = "umin";
6544 umin_optab->libcall_suffix = '3';
6545 umin_optab->libcall_gen = gen_int_libfunc;
6546 umax_optab->libcall_basename = "umax";
6547 umax_optab->libcall_suffix = '3';
6548 umax_optab->libcall_gen = gen_int_libfunc;
6549 neg_optab->libcall_basename = "neg";
6550 neg_optab->libcall_suffix = '2';
6551 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6552 ssneg_optab->libcall_basename = "ssneg";
6553 ssneg_optab->libcall_suffix = '2';
6554 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6555 usneg_optab->libcall_basename = "usneg";
6556 usneg_optab->libcall_suffix = '2';
6557 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6558 negv_optab->libcall_basename = "neg";
6559 negv_optab->libcall_suffix = '2';
6560 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6561 one_cmpl_optab->libcall_basename = "one_cmpl";
6562 one_cmpl_optab->libcall_suffix = '2';
6563 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6564 ffs_optab->libcall_basename = "ffs";
6565 ffs_optab->libcall_suffix = '2';
6566 ffs_optab->libcall_gen = gen_int_libfunc;
6567 clz_optab->libcall_basename = "clz";
6568 clz_optab->libcall_suffix = '2';
6569 clz_optab->libcall_gen = gen_int_libfunc;
6570 ctz_optab->libcall_basename = "ctz";
6571 ctz_optab->libcall_suffix = '2';
6572 ctz_optab->libcall_gen = gen_int_libfunc;
6573 popcount_optab->libcall_basename = "popcount";
6574 popcount_optab->libcall_suffix = '2';
6575 popcount_optab->libcall_gen = gen_int_libfunc;
6576 parity_optab->libcall_basename = "parity";
6577 parity_optab->libcall_suffix = '2';
6578 parity_optab->libcall_gen = gen_int_libfunc;
6579
6580 /* Comparison libcalls for integers MUST come in pairs,
6581 signed/unsigned. */
6582 cmp_optab->libcall_basename = "cmp";
6583 cmp_optab->libcall_suffix = '2';
6584 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6585 ucmp_optab->libcall_basename = "ucmp";
6586 ucmp_optab->libcall_suffix = '2';
6587 ucmp_optab->libcall_gen = gen_int_libfunc;
6588
6589 /* EQ etc are floating point only. */
6590 eq_optab->libcall_basename = "eq";
6591 eq_optab->libcall_suffix = '2';
6592 eq_optab->libcall_gen = gen_fp_libfunc;
6593 ne_optab->libcall_basename = "ne";
6594 ne_optab->libcall_suffix = '2';
6595 ne_optab->libcall_gen = gen_fp_libfunc;
6596 gt_optab->libcall_basename = "gt";
6597 gt_optab->libcall_suffix = '2';
6598 gt_optab->libcall_gen = gen_fp_libfunc;
6599 ge_optab->libcall_basename = "ge";
6600 ge_optab->libcall_suffix = '2';
6601 ge_optab->libcall_gen = gen_fp_libfunc;
6602 lt_optab->libcall_basename = "lt";
6603 lt_optab->libcall_suffix = '2';
6604 lt_optab->libcall_gen = gen_fp_libfunc;
6605 le_optab->libcall_basename = "le";
6606 le_optab->libcall_suffix = '2';
6607 le_optab->libcall_gen = gen_fp_libfunc;
6608 unord_optab->libcall_basename = "unord";
6609 unord_optab->libcall_suffix = '2';
6610 unord_optab->libcall_gen = gen_fp_libfunc;
6611
6612 powi_optab->libcall_basename = "powi";
6613 powi_optab->libcall_suffix = '2';
6614 powi_optab->libcall_gen = gen_fp_libfunc;
6615
6616 /* Conversions. */
6617 sfloat_optab->libcall_basename = "float";
6618 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6619 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6620 sfix_optab->libcall_basename = "fix";
6621 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6622 ufix_optab->libcall_basename = "fixuns";
6623 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6624 lrint_optab->libcall_basename = "lrint";
6625 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6626 lround_optab->libcall_basename = "lround";
6627 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6628 lfloor_optab->libcall_basename = "lfloor";
6629 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6630 lceil_optab->libcall_basename = "lceil";
6631 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6632
6633 /* trunc_optab is also used for FLOAT_EXTEND. */
6634 sext_optab->libcall_basename = "extend";
6635 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6636 trunc_optab->libcall_basename = "trunc";
6637 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6638
6639 /* Conversions for fixed-point modes and other modes. */
6640 fract_optab->libcall_basename = "fract";
6641 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6642 satfract_optab->libcall_basename = "satfract";
6643 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6644 fractuns_optab->libcall_basename = "fractuns";
6645 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6646 satfractuns_optab->libcall_basename = "satfractuns";
6647 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6648
6649 /* The ffs function operates on `int'. Fall back on it if we do not
6650 have a libgcc2 function for that width. */
6651 if (INT_TYPE_SIZE < BITS_PER_WORD)
6652 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6653 "ffs");
6654
6655 /* Explicitly initialize the bswap libfuncs since we need them to be
6656 valid for things other than word_mode. */
6657 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6658 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6659
6660 /* Use cabs for double complex abs, since systems generally have cabs.
6661 Don't define any libcall for float complex, so that cabs will be used. */
6662 if (complex_double_type_node)
6663 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6664
6665 abort_libfunc = init_one_libfunc ("abort");
6666 memcpy_libfunc = init_one_libfunc ("memcpy");
6667 memmove_libfunc = init_one_libfunc ("memmove");
6668 memcmp_libfunc = init_one_libfunc ("memcmp");
6669 memset_libfunc = init_one_libfunc ("memset");
6670 setbits_libfunc = init_one_libfunc ("__setbits");
6671
6672 #ifndef DONT_USE_BUILTIN_SETJMP
6673 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6674 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6675 #else
6676 setjmp_libfunc = init_one_libfunc ("setjmp");
6677 longjmp_libfunc = init_one_libfunc ("longjmp");
6678 #endif
6679 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6680 unwind_sjlj_unregister_libfunc
6681 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6682
6683 /* For function entry/exit instrumentation. */
6684 profile_function_entry_libfunc
6685 = init_one_libfunc ("__cyg_profile_func_enter");
6686 profile_function_exit_libfunc
6687 = init_one_libfunc ("__cyg_profile_func_exit");
6688
6689 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6690
6691 /* Allow the target to add more libcalls or rename some, etc. */
6692 targetm.init_libfuncs ();
6693
6694 #if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
6695 reinit = true;
6696 #endif
6697 }
6698
6699 /* Print information about the current contents of the optabs on
6700 STDERR. */
6701
6702 DEBUG_FUNCTION void
6703 debug_optab_libfuncs (void)
6704 {
6705 int i;
6706 int j;
6707 int k;
6708
6709 /* Dump the arithmetic optabs. */
6710 for (i = 0; i != (int) OTI_MAX; i++)
6711 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6712 {
6713 optab o;
6714 rtx l;
6715
6716 o = &optab_table[i];
6717 l = optab_libfunc (o, (enum machine_mode) j);
6718 if (l)
6719 {
6720 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6721 fprintf (stderr, "%s\t%s:\t%s\n",
6722 GET_RTX_NAME (o->code),
6723 GET_MODE_NAME (j),
6724 XSTR (l, 0));
6725 }
6726 }
6727
6728 /* Dump the conversion optabs. */
6729 for (i = 0; i < (int) COI_MAX; ++i)
6730 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6731 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6732 {
6733 convert_optab o;
6734 rtx l;
6735
6736 o = &convert_optab_table[i];
6737 l = convert_optab_libfunc (o, (enum machine_mode) j,
6738 (enum machine_mode) k);
6739 if (l)
6740 {
6741 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6742 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6743 GET_RTX_NAME (o->code),
6744 GET_MODE_NAME (j),
6745 GET_MODE_NAME (k),
6746 XSTR (l, 0));
6747 }
6748 }
6749 }
6750
6751 \f
6752 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6753 CODE. Return 0 on failure. */
6754
6755 rtx
6756 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6757 {
6758 enum machine_mode mode = GET_MODE (op1);
6759 enum insn_code icode;
6760 rtx insn;
6761 rtx trap_rtx;
6762
6763 if (mode == VOIDmode)
6764 return 0;
6765
6766 icode = optab_handler (ctrap_optab, mode)->insn_code;
6767 if (icode == CODE_FOR_nothing)
6768 return 0;
6769
6770 /* Some targets only accept a zero trap code. */
6771 if (insn_data[icode].operand[3].predicate
6772 && !insn_data[icode].operand[3].predicate (tcode, VOIDmode))
6773 return 0;
6774
6775 do_pending_stack_adjust ();
6776 start_sequence ();
6777 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6778 &trap_rtx, &mode);
6779 if (!trap_rtx)
6780 insn = NULL_RTX;
6781 else
6782 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6783 tcode);
6784
6785 /* If that failed, then give up. */
6786 if (insn == 0)
6787 {
6788 end_sequence ();
6789 return 0;
6790 }
6791
6792 emit_insn (insn);
6793 insn = get_insns ();
6794 end_sequence ();
6795 return insn;
6796 }
6797
6798 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6799 or unsigned operation code. */
6800
6801 static enum rtx_code
6802 get_rtx_code (enum tree_code tcode, bool unsignedp)
6803 {
6804 enum rtx_code code;
6805 switch (tcode)
6806 {
6807 case EQ_EXPR:
6808 code = EQ;
6809 break;
6810 case NE_EXPR:
6811 code = NE;
6812 break;
6813 case LT_EXPR:
6814 code = unsignedp ? LTU : LT;
6815 break;
6816 case LE_EXPR:
6817 code = unsignedp ? LEU : LE;
6818 break;
6819 case GT_EXPR:
6820 code = unsignedp ? GTU : GT;
6821 break;
6822 case GE_EXPR:
6823 code = unsignedp ? GEU : GE;
6824 break;
6825
6826 case UNORDERED_EXPR:
6827 code = UNORDERED;
6828 break;
6829 case ORDERED_EXPR:
6830 code = ORDERED;
6831 break;
6832 case UNLT_EXPR:
6833 code = UNLT;
6834 break;
6835 case UNLE_EXPR:
6836 code = UNLE;
6837 break;
6838 case UNGT_EXPR:
6839 code = UNGT;
6840 break;
6841 case UNGE_EXPR:
6842 code = UNGE;
6843 break;
6844 case UNEQ_EXPR:
6845 code = UNEQ;
6846 break;
6847 case LTGT_EXPR:
6848 code = LTGT;
6849 break;
6850
6851 default:
6852 gcc_unreachable ();
6853 }
6854 return code;
6855 }
6856
6857 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6858 unsigned operators. Do not generate compare instruction. */
6859
6860 static rtx
6861 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6862 {
6863 enum rtx_code rcode;
6864 tree t_op0, t_op1;
6865 rtx rtx_op0, rtx_op1;
6866
6867 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6868 ensures that condition is a relational operation. */
6869 gcc_assert (COMPARISON_CLASS_P (cond));
6870
6871 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6872 t_op0 = TREE_OPERAND (cond, 0);
6873 t_op1 = TREE_OPERAND (cond, 1);
6874
6875 /* Expand operands. */
6876 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6877 EXPAND_STACK_PARM);
6878 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6879 EXPAND_STACK_PARM);
6880
6881 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6882 && GET_MODE (rtx_op0) != VOIDmode)
6883 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6884
6885 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6886 && GET_MODE (rtx_op1) != VOIDmode)
6887 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6888
6889 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6890 }
6891
6892 /* Return insn code for TYPE, the type of a VEC_COND_EXPR. */
6893
6894 static inline enum insn_code
6895 get_vcond_icode (tree type, enum machine_mode mode)
6896 {
6897 enum insn_code icode = CODE_FOR_nothing;
6898
6899 if (TYPE_UNSIGNED (type))
6900 icode = vcondu_gen_code[mode];
6901 else
6902 icode = vcond_gen_code[mode];
6903 return icode;
6904 }
6905
6906 /* Return TRUE iff, appropriate vector insns are available
6907 for vector cond expr with type TYPE in VMODE mode. */
6908
6909 bool
6910 expand_vec_cond_expr_p (tree type, enum machine_mode vmode)
6911 {
6912 if (get_vcond_icode (type, vmode) == CODE_FOR_nothing)
6913 return false;
6914 return true;
6915 }
6916
6917 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
6918 three operands. */
6919
6920 rtx
6921 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
6922 rtx target)
6923 {
6924 enum insn_code icode;
6925 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6926 enum machine_mode mode = TYPE_MODE (vec_cond_type);
6927 bool unsignedp = TYPE_UNSIGNED (vec_cond_type);
6928
6929 icode = get_vcond_icode (vec_cond_type, mode);
6930 if (icode == CODE_FOR_nothing)
6931 return 0;
6932
6933 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6934 target = gen_reg_rtx (mode);
6935
6936 /* Get comparison rtx. First expand both cond expr operands. */
6937 comparison = vector_compare_rtx (op0,
6938 unsignedp, icode);
6939 cc_op0 = XEXP (comparison, 0);
6940 cc_op1 = XEXP (comparison, 1);
6941 /* Expand both operands and force them in reg, if required. */
6942 rtx_op1 = expand_normal (op1);
6943 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6944 && mode != VOIDmode)
6945 rtx_op1 = force_reg (mode, rtx_op1);
6946
6947 rtx_op2 = expand_normal (op2);
6948 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6949 && mode != VOIDmode)
6950 rtx_op2 = force_reg (mode, rtx_op2);
6951
6952 /* Emit instruction! */
6953 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6954 comparison, cc_op0, cc_op1));
6955
6956 return target;
6957 }
6958
6959 \f
6960 /* This is an internal subroutine of the other compare_and_swap expanders.
6961 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6962 operation. TARGET is an optional place to store the value result of
6963 the operation. ICODE is the particular instruction to expand. Return
6964 the result of the operation. */
6965
6966 static rtx
6967 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6968 rtx target, enum insn_code icode)
6969 {
6970 enum machine_mode mode = GET_MODE (mem);
6971 rtx insn;
6972
6973 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6974 target = gen_reg_rtx (mode);
6975
6976 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6977 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6978 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6979 old_val = force_reg (mode, old_val);
6980
6981 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6982 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6983 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6984 new_val = force_reg (mode, new_val);
6985
6986 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6987 if (insn == NULL_RTX)
6988 return NULL_RTX;
6989 emit_insn (insn);
6990
6991 return target;
6992 }
6993
6994 /* Expand a compare-and-swap operation and return its value. */
6995
6996 rtx
6997 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6998 {
6999 enum machine_mode mode = GET_MODE (mem);
7000 enum insn_code icode = sync_compare_and_swap[mode];
7001
7002 if (icode == CODE_FOR_nothing)
7003 return NULL_RTX;
7004
7005 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
7006 }
7007
7008 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7009 pattern. */
7010
7011 static void
7012 find_cc_set (rtx x, const_rtx pat, void *data)
7013 {
7014 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
7015 && GET_CODE (pat) == SET)
7016 {
7017 rtx *p_cc_reg = (rtx *) data;
7018 gcc_assert (!*p_cc_reg);
7019 *p_cc_reg = x;
7020 }
7021 }
7022
7023 /* Expand a compare-and-swap operation and store true into the result if
7024 the operation was successful and false otherwise. Return the result.
7025 Unlike other routines, TARGET is not optional. */
7026
7027 rtx
7028 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
7029 {
7030 enum machine_mode mode = GET_MODE (mem);
7031 enum insn_code icode;
7032 rtx subtarget, seq, cc_reg;
7033
7034 /* If the target supports a compare-and-swap pattern that simultaneously
7035 sets some flag for success, then use it. Otherwise use the regular
7036 compare-and-swap and follow that immediately with a compare insn. */
7037 icode = sync_compare_and_swap[mode];
7038 if (icode == CODE_FOR_nothing)
7039 return NULL_RTX;
7040
7041 do
7042 {
7043 start_sequence ();
7044 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
7045 NULL_RTX, icode);
7046 cc_reg = NULL_RTX;
7047 if (subtarget == NULL_RTX)
7048 {
7049 end_sequence ();
7050 return NULL_RTX;
7051 }
7052
7053 if (have_insn_for (COMPARE, CCmode))
7054 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7055 seq = get_insns ();
7056 end_sequence ();
7057
7058 /* We might be comparing against an old value. Try again. :-( */
7059 if (!cc_reg && MEM_P (old_val))
7060 {
7061 seq = NULL_RTX;
7062 old_val = force_reg (mode, old_val);
7063 }
7064 }
7065 while (!seq);
7066
7067 emit_insn (seq);
7068 if (cc_reg)
7069 return emit_store_flag_force (target, EQ, cc_reg, const0_rtx, VOIDmode, 0, 1);
7070 else
7071 return emit_store_flag_force (target, EQ, subtarget, old_val, VOIDmode, 1, 1);
7072 }
7073
7074 /* This is a helper function for the other atomic operations. This function
7075 emits a loop that contains SEQ that iterates until a compare-and-swap
7076 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7077 a set of instructions that takes a value from OLD_REG as an input and
7078 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7079 set to the current contents of MEM. After SEQ, a compare-and-swap will
7080 attempt to update MEM with NEW_REG. The function returns true when the
7081 loop was generated successfully. */
7082
7083 static bool
7084 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7085 {
7086 enum machine_mode mode = GET_MODE (mem);
7087 enum insn_code icode;
7088 rtx label, cmp_reg, subtarget, cc_reg;
7089
7090 /* The loop we want to generate looks like
7091
7092 cmp_reg = mem;
7093 label:
7094 old_reg = cmp_reg;
7095 seq;
7096 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7097 if (cmp_reg != old_reg)
7098 goto label;
7099
7100 Note that we only do the plain load from memory once. Subsequent
7101 iterations use the value loaded by the compare-and-swap pattern. */
7102
7103 label = gen_label_rtx ();
7104 cmp_reg = gen_reg_rtx (mode);
7105
7106 emit_move_insn (cmp_reg, mem);
7107 emit_label (label);
7108 emit_move_insn (old_reg, cmp_reg);
7109 if (seq)
7110 emit_insn (seq);
7111
7112 /* If the target supports a compare-and-swap pattern that simultaneously
7113 sets some flag for success, then use it. Otherwise use the regular
7114 compare-and-swap and follow that immediately with a compare insn. */
7115 icode = sync_compare_and_swap[mode];
7116 if (icode == CODE_FOR_nothing)
7117 return false;
7118
7119 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7120 cmp_reg, icode);
7121 if (subtarget == NULL_RTX)
7122 return false;
7123
7124 cc_reg = NULL_RTX;
7125 if (have_insn_for (COMPARE, CCmode))
7126 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7127 if (cc_reg)
7128 {
7129 cmp_reg = cc_reg;
7130 old_reg = const0_rtx;
7131 }
7132 else
7133 {
7134 if (subtarget != cmp_reg)
7135 emit_move_insn (cmp_reg, subtarget);
7136 }
7137
7138 /* ??? Mark this jump predicted not taken? */
7139 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, const0_rtx, GET_MODE (cmp_reg), 1,
7140 label);
7141 return true;
7142 }
7143
7144 /* This function generates the atomic operation MEM CODE= VAL. In this
7145 case, we do not care about any resulting value. Returns NULL if we
7146 cannot generate the operation. */
7147
7148 rtx
7149 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
7150 {
7151 enum machine_mode mode = GET_MODE (mem);
7152 enum insn_code icode;
7153 rtx insn;
7154
7155 /* Look to see if the target supports the operation directly. */
7156 switch (code)
7157 {
7158 case PLUS:
7159 icode = sync_add_optab[mode];
7160 break;
7161 case IOR:
7162 icode = sync_ior_optab[mode];
7163 break;
7164 case XOR:
7165 icode = sync_xor_optab[mode];
7166 break;
7167 case AND:
7168 icode = sync_and_optab[mode];
7169 break;
7170 case NOT:
7171 icode = sync_nand_optab[mode];
7172 break;
7173
7174 case MINUS:
7175 icode = sync_sub_optab[mode];
7176 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7177 {
7178 icode = sync_add_optab[mode];
7179 if (icode != CODE_FOR_nothing)
7180 {
7181 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7182 code = PLUS;
7183 }
7184 }
7185 break;
7186
7187 default:
7188 gcc_unreachable ();
7189 }
7190
7191 /* Generate the direct operation, if present. */
7192 if (icode != CODE_FOR_nothing)
7193 {
7194 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7195 val = convert_modes (mode, GET_MODE (val), val, 1);
7196 if (!insn_data[icode].operand[1].predicate (val, mode))
7197 val = force_reg (mode, val);
7198
7199 insn = GEN_FCN (icode) (mem, val);
7200 if (insn)
7201 {
7202 emit_insn (insn);
7203 return const0_rtx;
7204 }
7205 }
7206
7207 /* Failing that, generate a compare-and-swap loop in which we perform the
7208 operation with normal arithmetic instructions. */
7209 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7210 {
7211 rtx t0 = gen_reg_rtx (mode), t1;
7212
7213 start_sequence ();
7214
7215 t1 = t0;
7216 if (code == NOT)
7217 {
7218 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7219 true, OPTAB_LIB_WIDEN);
7220 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7221 }
7222 else
7223 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7224 true, OPTAB_LIB_WIDEN);
7225 insn = get_insns ();
7226 end_sequence ();
7227
7228 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7229 return const0_rtx;
7230 }
7231
7232 return NULL_RTX;
7233 }
7234
7235 /* This function generates the atomic operation MEM CODE= VAL. In this
7236 case, we do care about the resulting value: if AFTER is true then
7237 return the value MEM holds after the operation, if AFTER is false
7238 then return the value MEM holds before the operation. TARGET is an
7239 optional place for the result value to be stored. */
7240
7241 rtx
7242 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
7243 bool after, rtx target)
7244 {
7245 enum machine_mode mode = GET_MODE (mem);
7246 enum insn_code old_code, new_code, icode;
7247 bool compensate;
7248 rtx insn;
7249
7250 /* Look to see if the target supports the operation directly. */
7251 switch (code)
7252 {
7253 case PLUS:
7254 old_code = sync_old_add_optab[mode];
7255 new_code = sync_new_add_optab[mode];
7256 break;
7257 case IOR:
7258 old_code = sync_old_ior_optab[mode];
7259 new_code = sync_new_ior_optab[mode];
7260 break;
7261 case XOR:
7262 old_code = sync_old_xor_optab[mode];
7263 new_code = sync_new_xor_optab[mode];
7264 break;
7265 case AND:
7266 old_code = sync_old_and_optab[mode];
7267 new_code = sync_new_and_optab[mode];
7268 break;
7269 case NOT:
7270 old_code = sync_old_nand_optab[mode];
7271 new_code = sync_new_nand_optab[mode];
7272 break;
7273
7274 case MINUS:
7275 old_code = sync_old_sub_optab[mode];
7276 new_code = sync_new_sub_optab[mode];
7277 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7278 || CONST_INT_P (val))
7279 {
7280 old_code = sync_old_add_optab[mode];
7281 new_code = sync_new_add_optab[mode];
7282 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7283 {
7284 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7285 code = PLUS;
7286 }
7287 }
7288 break;
7289
7290 default:
7291 gcc_unreachable ();
7292 }
7293
7294 /* If the target does supports the proper new/old operation, great. But
7295 if we only support the opposite old/new operation, check to see if we
7296 can compensate. In the case in which the old value is supported, then
7297 we can always perform the operation again with normal arithmetic. In
7298 the case in which the new value is supported, then we can only handle
7299 this in the case the operation is reversible. */
7300 compensate = false;
7301 if (after)
7302 {
7303 icode = new_code;
7304 if (icode == CODE_FOR_nothing)
7305 {
7306 icode = old_code;
7307 if (icode != CODE_FOR_nothing)
7308 compensate = true;
7309 }
7310 }
7311 else
7312 {
7313 icode = old_code;
7314 if (icode == CODE_FOR_nothing
7315 && (code == PLUS || code == MINUS || code == XOR))
7316 {
7317 icode = new_code;
7318 if (icode != CODE_FOR_nothing)
7319 compensate = true;
7320 }
7321 }
7322
7323 /* If we found something supported, great. */
7324 if (icode != CODE_FOR_nothing)
7325 {
7326 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7327 target = gen_reg_rtx (mode);
7328
7329 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7330 val = convert_modes (mode, GET_MODE (val), val, 1);
7331 if (!insn_data[icode].operand[2].predicate (val, mode))
7332 val = force_reg (mode, val);
7333
7334 insn = GEN_FCN (icode) (target, mem, val);
7335 if (insn)
7336 {
7337 emit_insn (insn);
7338
7339 /* If we need to compensate for using an operation with the
7340 wrong return value, do so now. */
7341 if (compensate)
7342 {
7343 if (!after)
7344 {
7345 if (code == PLUS)
7346 code = MINUS;
7347 else if (code == MINUS)
7348 code = PLUS;
7349 }
7350
7351 if (code == NOT)
7352 {
7353 target = expand_simple_binop (mode, AND, target, val,
7354 NULL_RTX, true,
7355 OPTAB_LIB_WIDEN);
7356 target = expand_simple_unop (mode, code, target,
7357 NULL_RTX, true);
7358 }
7359 else
7360 target = expand_simple_binop (mode, code, target, val,
7361 NULL_RTX, true,
7362 OPTAB_LIB_WIDEN);
7363 }
7364
7365 return target;
7366 }
7367 }
7368
7369 /* Failing that, generate a compare-and-swap loop in which we perform the
7370 operation with normal arithmetic instructions. */
7371 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7372 {
7373 rtx t0 = gen_reg_rtx (mode), t1;
7374
7375 if (!target || !register_operand (target, mode))
7376 target = gen_reg_rtx (mode);
7377
7378 start_sequence ();
7379
7380 if (!after)
7381 emit_move_insn (target, t0);
7382 t1 = t0;
7383 if (code == NOT)
7384 {
7385 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7386 true, OPTAB_LIB_WIDEN);
7387 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7388 }
7389 else
7390 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7391 true, OPTAB_LIB_WIDEN);
7392 if (after)
7393 emit_move_insn (target, t1);
7394
7395 insn = get_insns ();
7396 end_sequence ();
7397
7398 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7399 return target;
7400 }
7401
7402 return NULL_RTX;
7403 }
7404
7405 /* This function expands a test-and-set operation. Ideally we atomically
7406 store VAL in MEM and return the previous value in MEM. Some targets
7407 may not support this operation and only support VAL with the constant 1;
7408 in this case while the return value will be 0/1, but the exact value
7409 stored in MEM is target defined. TARGET is an option place to stick
7410 the return value. */
7411
7412 rtx
7413 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7414 {
7415 enum machine_mode mode = GET_MODE (mem);
7416 enum insn_code icode;
7417 rtx insn;
7418
7419 /* If the target supports the test-and-set directly, great. */
7420 icode = sync_lock_test_and_set[mode];
7421 if (icode != CODE_FOR_nothing)
7422 {
7423 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7424 target = gen_reg_rtx (mode);
7425
7426 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7427 val = convert_modes (mode, GET_MODE (val), val, 1);
7428 if (!insn_data[icode].operand[2].predicate (val, mode))
7429 val = force_reg (mode, val);
7430
7431 insn = GEN_FCN (icode) (target, mem, val);
7432 if (insn)
7433 {
7434 emit_insn (insn);
7435 return target;
7436 }
7437 }
7438
7439 /* Otherwise, use a compare-and-swap loop for the exchange. */
7440 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7441 {
7442 if (!target || !register_operand (target, mode))
7443 target = gen_reg_rtx (mode);
7444 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7445 val = convert_modes (mode, GET_MODE (val), val, 1);
7446 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7447 return target;
7448 }
7449
7450 return NULL_RTX;
7451 }
7452
7453 #include "gt-optabs.h"