tree-data-ref.c (subscript_dependence_tester_1): Call free_conflict_function.
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
47
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
51
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
54
55 See expr.h for documentation of these optabs. */
56
57 #if GCC_VERSION >= 4000
58 __extension__ struct optab optab_table[OTI_MAX]
59 = { [0 ... OTI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1].insn_code
60 = CODE_FOR_nothing };
61 #else
62 /* init_insn_codes will do runtime initialization otherwise. */
63 struct optab optab_table[OTI_MAX];
64 #endif
65
66 rtx libfunc_table[LTI_MAX];
67
68 /* Tables of patterns for converting one mode to another. */
69 #if GCC_VERSION >= 4000
70 __extension__ struct convert_optab convert_optab_table[COI_MAX]
71 = { [0 ... COI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1]
72 [0 ... NUM_MACHINE_MODES - 1].insn_code
73 = CODE_FOR_nothing };
74 #else
75 /* init_convert_optab will do runtime initialization otherwise. */
76 struct convert_optab convert_optab_table[COI_MAX];
77 #endif
78
79 /* Contains the optab used for each rtx code. */
80 optab code_to_optab[NUM_RTX_CODE + 1];
81
82 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
83 gives the gen_function to make a branch to test that condition. */
84
85 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
86
87 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
88 gives the insn code to make a store-condition insn
89 to test that condition. */
90
91 enum insn_code setcc_gen_code[NUM_RTX_CODE];
92
93 #ifdef HAVE_conditional_move
94 /* Indexed by the machine mode, gives the insn code to make a conditional
95 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
96 setcc_gen_code to cut down on the number of named patterns. Consider a day
97 when a lot more rtx codes are conditional (eg: for the ARM). */
98
99 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
100 #endif
101
102 /* Indexed by the machine mode, gives the insn code for vector conditional
103 operation. */
104
105 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
106 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
107
108 /* The insn generating function can not take an rtx_code argument.
109 TRAP_RTX is used as an rtx argument. Its code is replaced with
110 the code to be used in the trap insn and all other fields are ignored. */
111 static GTY(()) rtx trap_rtx;
112
113 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
114 enum machine_mode *, int *);
115 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
116
117 /* Debug facility for use in GDB. */
118 void debug_optab_libfuncs (void);
119
120 #ifndef HAVE_conditional_trap
121 #define HAVE_conditional_trap 0
122 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
123 #endif
124
125 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
126 #if ENABLE_DECIMAL_BID_FORMAT
127 #define DECIMAL_PREFIX "bid_"
128 #else
129 #define DECIMAL_PREFIX "dpd_"
130 #endif
131 \f
132
133 /* Info about libfunc. We use same hashtable for normal optabs and conversion
134 optab. In the first case mode2 is unused. */
135 struct libfunc_entry GTY(())
136 {
137 size_t optab;
138 enum machine_mode mode1, mode2;
139 rtx libfunc;
140 };
141
142 /* Hash table used to convert declarations into nodes. */
143 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
144
145 /* Used for attribute_hash. */
146
147 static hashval_t
148 hash_libfunc (const void *p)
149 {
150 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
151
152 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
153 ^ e->optab);
154 }
155
156 /* Used for optab_hash. */
157
158 static int
159 eq_libfunc (const void *p, const void *q)
160 {
161 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
162 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
163
164 return (e1->optab == e2->optab
165 && e1->mode1 == e2->mode1
166 && e1->mode2 == e2->mode2);
167 }
168
169 /* Return libfunc corresponding operation defined by OPTAB converting
170 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
171 if no libfunc is available. */
172 rtx
173 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
174 enum machine_mode mode2)
175 {
176 struct libfunc_entry e;
177 struct libfunc_entry **slot;
178
179 e.optab = (size_t) (optab - &convert_optab_table[0]);
180 e.mode1 = mode1;
181 e.mode2 = mode2;
182 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
183 if (!slot)
184 {
185 if (optab->libcall_gen)
186 {
187 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
188 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
189 if (slot)
190 return (*slot)->libfunc;
191 else
192 return NULL;
193 }
194 return NULL;
195 }
196 return (*slot)->libfunc;
197 }
198
199 /* Return libfunc corresponding operation defined by OPTAB in MODE.
200 Trigger lazy initialization if needed, return NULL if no libfunc is
201 available. */
202 rtx
203 optab_libfunc (optab optab, enum machine_mode mode)
204 {
205 struct libfunc_entry e;
206 struct libfunc_entry **slot;
207
208 e.optab = (size_t) (optab - &optab_table[0]);
209 e.mode1 = mode;
210 e.mode2 = VOIDmode;
211 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
212 if (!slot)
213 {
214 if (optab->libcall_gen)
215 {
216 optab->libcall_gen (optab, optab->libcall_basename,
217 optab->libcall_suffix, mode);
218 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
219 &e, NO_INSERT);
220 if (slot)
221 return (*slot)->libfunc;
222 else
223 return NULL;
224 }
225 return NULL;
226 }
227 return (*slot)->libfunc;
228 }
229
230 \f
231 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
232 the result of operation CODE applied to OP0 (and OP1 if it is a binary
233 operation).
234
235 If the last insn does not set TARGET, don't do anything, but return 1.
236
237 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
238 don't add the REG_EQUAL note but return 0. Our caller can then try
239 again, ensuring that TARGET is not one of the operands. */
240
241 static int
242 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
243 {
244 rtx last_insn, insn, set;
245 rtx note;
246
247 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
248
249 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
250 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
251 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
252 && GET_RTX_CLASS (code) != RTX_COMPARE
253 && GET_RTX_CLASS (code) != RTX_UNARY)
254 return 1;
255
256 if (GET_CODE (target) == ZERO_EXTRACT)
257 return 1;
258
259 for (last_insn = insns;
260 NEXT_INSN (last_insn) != NULL_RTX;
261 last_insn = NEXT_INSN (last_insn))
262 ;
263
264 set = single_set (last_insn);
265 if (set == NULL_RTX)
266 return 1;
267
268 if (! rtx_equal_p (SET_DEST (set), target)
269 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
270 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
271 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
272 return 1;
273
274 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
275 besides the last insn. */
276 if (reg_overlap_mentioned_p (target, op0)
277 || (op1 && reg_overlap_mentioned_p (target, op1)))
278 {
279 insn = PREV_INSN (last_insn);
280 while (insn != NULL_RTX)
281 {
282 if (reg_set_p (target, insn))
283 return 0;
284
285 insn = PREV_INSN (insn);
286 }
287 }
288
289 if (GET_RTX_CLASS (code) == RTX_UNARY)
290 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
291 else
292 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
293
294 set_unique_reg_note (last_insn, REG_EQUAL, note);
295
296 return 1;
297 }
298 \f
299 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
300 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
301 not actually do a sign-extend or zero-extend, but can leave the
302 higher-order bits of the result rtx undefined, for example, in the case
303 of logical operations, but not right shifts. */
304
305 static rtx
306 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
307 int unsignedp, int no_extend)
308 {
309 rtx result;
310
311 /* If we don't have to extend and this is a constant, return it. */
312 if (no_extend && GET_MODE (op) == VOIDmode)
313 return op;
314
315 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
316 extend since it will be more efficient to do so unless the signedness of
317 a promoted object differs from our extension. */
318 if (! no_extend
319 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
320 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
321 return convert_modes (mode, oldmode, op, unsignedp);
322
323 /* If MODE is no wider than a single word, we return a paradoxical
324 SUBREG. */
325 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
326 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
327
328 /* Otherwise, get an object of MODE, clobber it, and set the low-order
329 part to OP. */
330
331 result = gen_reg_rtx (mode);
332 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
333 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
334 return result;
335 }
336 \f
337 /* Return the optab used for computing the operation given by
338 the tree code, CODE. This function is not always usable (for
339 example, it cannot give complete results for multiplication
340 or division) but probably ought to be relied on more widely
341 throughout the expander. */
342 optab
343 optab_for_tree_code (enum tree_code code, const_tree type)
344 {
345 bool trapv;
346 switch (code)
347 {
348 case BIT_AND_EXPR:
349 return and_optab;
350
351 case BIT_IOR_EXPR:
352 return ior_optab;
353
354 case BIT_NOT_EXPR:
355 return one_cmpl_optab;
356
357 case BIT_XOR_EXPR:
358 return xor_optab;
359
360 case TRUNC_MOD_EXPR:
361 case CEIL_MOD_EXPR:
362 case FLOOR_MOD_EXPR:
363 case ROUND_MOD_EXPR:
364 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
365
366 case RDIV_EXPR:
367 case TRUNC_DIV_EXPR:
368 case CEIL_DIV_EXPR:
369 case FLOOR_DIV_EXPR:
370 case ROUND_DIV_EXPR:
371 case EXACT_DIV_EXPR:
372 if (TYPE_SATURATING(type))
373 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
374 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
375
376 case LSHIFT_EXPR:
377 if (TYPE_SATURATING(type))
378 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
379 return ashl_optab;
380
381 case RSHIFT_EXPR:
382 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
383
384 case LROTATE_EXPR:
385 return rotl_optab;
386
387 case RROTATE_EXPR:
388 return rotr_optab;
389
390 case MAX_EXPR:
391 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
392
393 case MIN_EXPR:
394 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
395
396 case REALIGN_LOAD_EXPR:
397 return vec_realign_load_optab;
398
399 case WIDEN_SUM_EXPR:
400 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
401
402 case DOT_PROD_EXPR:
403 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
404
405 case REDUC_MAX_EXPR:
406 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
407
408 case REDUC_MIN_EXPR:
409 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
410
411 case REDUC_PLUS_EXPR:
412 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
413
414 case VEC_LSHIFT_EXPR:
415 return vec_shl_optab;
416
417 case VEC_RSHIFT_EXPR:
418 return vec_shr_optab;
419
420 case VEC_WIDEN_MULT_HI_EXPR:
421 return TYPE_UNSIGNED (type) ?
422 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
423
424 case VEC_WIDEN_MULT_LO_EXPR:
425 return TYPE_UNSIGNED (type) ?
426 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
427
428 case VEC_UNPACK_HI_EXPR:
429 return TYPE_UNSIGNED (type) ?
430 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
431
432 case VEC_UNPACK_LO_EXPR:
433 return TYPE_UNSIGNED (type) ?
434 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
435
436 case VEC_UNPACK_FLOAT_HI_EXPR:
437 /* The signedness is determined from input operand. */
438 return TYPE_UNSIGNED (type) ?
439 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
440
441 case VEC_UNPACK_FLOAT_LO_EXPR:
442 /* The signedness is determined from input operand. */
443 return TYPE_UNSIGNED (type) ?
444 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
445
446 case VEC_PACK_TRUNC_EXPR:
447 return vec_pack_trunc_optab;
448
449 case VEC_PACK_SAT_EXPR:
450 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
451
452 case VEC_PACK_FIX_TRUNC_EXPR:
453 /* The signedness is determined from output operand. */
454 return TYPE_UNSIGNED (type) ?
455 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
456
457 default:
458 break;
459 }
460
461 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
462 switch (code)
463 {
464 case POINTER_PLUS_EXPR:
465 case PLUS_EXPR:
466 if (TYPE_SATURATING(type))
467 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
468 return trapv ? addv_optab : add_optab;
469
470 case MINUS_EXPR:
471 if (TYPE_SATURATING(type))
472 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
473 return trapv ? subv_optab : sub_optab;
474
475 case MULT_EXPR:
476 if (TYPE_SATURATING(type))
477 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
478 return trapv ? smulv_optab : smul_optab;
479
480 case NEGATE_EXPR:
481 if (TYPE_SATURATING(type))
482 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
483 return trapv ? negv_optab : neg_optab;
484
485 case ABS_EXPR:
486 return trapv ? absv_optab : abs_optab;
487
488 case VEC_EXTRACT_EVEN_EXPR:
489 return vec_extract_even_optab;
490
491 case VEC_EXTRACT_ODD_EXPR:
492 return vec_extract_odd_optab;
493
494 case VEC_INTERLEAVE_HIGH_EXPR:
495 return vec_interleave_high_optab;
496
497 case VEC_INTERLEAVE_LOW_EXPR:
498 return vec_interleave_low_optab;
499
500 default:
501 return NULL;
502 }
503 }
504 \f
505
506 /* Expand vector widening operations.
507
508 There are two different classes of operations handled here:
509 1) Operations whose result is wider than all the arguments to the operation.
510 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
511 In this case OP0 and optionally OP1 would be initialized,
512 but WIDE_OP wouldn't (not relevant for this case).
513 2) Operations whose result is of the same size as the last argument to the
514 operation, but wider than all the other arguments to the operation.
515 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
516 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
517
518 E.g, when called to expand the following operations, this is how
519 the arguments will be initialized:
520 nops OP0 OP1 WIDE_OP
521 widening-sum 2 oprnd0 - oprnd1
522 widening-dot-product 3 oprnd0 oprnd1 oprnd2
523 widening-mult 2 oprnd0 oprnd1 -
524 type-promotion (vec-unpack) 1 oprnd0 - - */
525
526 rtx
527 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
528 int unsignedp)
529 {
530 tree oprnd0, oprnd1, oprnd2;
531 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
532 optab widen_pattern_optab;
533 int icode;
534 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
535 rtx temp;
536 rtx pat;
537 rtx xop0, xop1, wxop;
538 int nops = TREE_OPERAND_LENGTH (exp);
539
540 oprnd0 = TREE_OPERAND (exp, 0);
541 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
542 widen_pattern_optab =
543 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
544 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
545 gcc_assert (icode != CODE_FOR_nothing);
546 xmode0 = insn_data[icode].operand[1].mode;
547
548 if (nops >= 2)
549 {
550 oprnd1 = TREE_OPERAND (exp, 1);
551 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
552 xmode1 = insn_data[icode].operand[2].mode;
553 }
554
555 /* The last operand is of a wider mode than the rest of the operands. */
556 if (nops == 2)
557 {
558 wmode = tmode1;
559 wxmode = xmode1;
560 }
561 else if (nops == 3)
562 {
563 gcc_assert (tmode1 == tmode0);
564 gcc_assert (op1);
565 oprnd2 = TREE_OPERAND (exp, 2);
566 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
567 wxmode = insn_data[icode].operand[3].mode;
568 }
569
570 if (!wide_op)
571 wmode = wxmode = insn_data[icode].operand[0].mode;
572
573 if (!target
574 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
575 temp = gen_reg_rtx (wmode);
576 else
577 temp = target;
578
579 xop0 = op0;
580 xop1 = op1;
581 wxop = wide_op;
582
583 /* In case the insn wants input operands in modes different from
584 those of the actual operands, convert the operands. It would
585 seem that we don't need to convert CONST_INTs, but we do, so
586 that they're properly zero-extended, sign-extended or truncated
587 for their mode. */
588
589 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
590 xop0 = convert_modes (xmode0,
591 GET_MODE (op0) != VOIDmode
592 ? GET_MODE (op0)
593 : tmode0,
594 xop0, unsignedp);
595
596 if (op1)
597 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
598 xop1 = convert_modes (xmode1,
599 GET_MODE (op1) != VOIDmode
600 ? GET_MODE (op1)
601 : tmode1,
602 xop1, unsignedp);
603
604 if (wide_op)
605 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
606 wxop = convert_modes (wxmode,
607 GET_MODE (wide_op) != VOIDmode
608 ? GET_MODE (wide_op)
609 : wmode,
610 wxop, unsignedp);
611
612 /* Now, if insn's predicates don't allow our operands, put them into
613 pseudo regs. */
614
615 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
616 && xmode0 != VOIDmode)
617 xop0 = copy_to_mode_reg (xmode0, xop0);
618
619 if (op1)
620 {
621 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
622 && xmode1 != VOIDmode)
623 xop1 = copy_to_mode_reg (xmode1, xop1);
624
625 if (wide_op)
626 {
627 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
628 && wxmode != VOIDmode)
629 wxop = copy_to_mode_reg (wxmode, wxop);
630
631 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
632 }
633 else
634 pat = GEN_FCN (icode) (temp, xop0, xop1);
635 }
636 else
637 {
638 if (wide_op)
639 {
640 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
641 && wxmode != VOIDmode)
642 wxop = copy_to_mode_reg (wxmode, wxop);
643
644 pat = GEN_FCN (icode) (temp, xop0, wxop);
645 }
646 else
647 pat = GEN_FCN (icode) (temp, xop0);
648 }
649
650 emit_insn (pat);
651 return temp;
652 }
653
654 /* Generate code to perform an operation specified by TERNARY_OPTAB
655 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
656
657 UNSIGNEDP is for the case where we have to widen the operands
658 to perform the operation. It says to use zero-extension.
659
660 If TARGET is nonzero, the value
661 is generated there, if it is convenient to do so.
662 In all cases an rtx is returned for the locus of the value;
663 this may or may not be TARGET. */
664
665 rtx
666 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
667 rtx op1, rtx op2, rtx target, int unsignedp)
668 {
669 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
670 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
671 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
672 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
673 rtx temp;
674 rtx pat;
675 rtx xop0 = op0, xop1 = op1, xop2 = op2;
676
677 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
678 != CODE_FOR_nothing);
679
680 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
681 temp = gen_reg_rtx (mode);
682 else
683 temp = target;
684
685 /* In case the insn wants input operands in modes different from
686 those of the actual operands, convert the operands. It would
687 seem that we don't need to convert CONST_INTs, but we do, so
688 that they're properly zero-extended, sign-extended or truncated
689 for their mode. */
690
691 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
692 xop0 = convert_modes (mode0,
693 GET_MODE (op0) != VOIDmode
694 ? GET_MODE (op0)
695 : mode,
696 xop0, unsignedp);
697
698 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
699 xop1 = convert_modes (mode1,
700 GET_MODE (op1) != VOIDmode
701 ? GET_MODE (op1)
702 : mode,
703 xop1, unsignedp);
704
705 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
706 xop2 = convert_modes (mode2,
707 GET_MODE (op2) != VOIDmode
708 ? GET_MODE (op2)
709 : mode,
710 xop2, unsignedp);
711
712 /* Now, if insn's predicates don't allow our operands, put them into
713 pseudo regs. */
714
715 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
716 && mode0 != VOIDmode)
717 xop0 = copy_to_mode_reg (mode0, xop0);
718
719 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
720 && mode1 != VOIDmode)
721 xop1 = copy_to_mode_reg (mode1, xop1);
722
723 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
724 && mode2 != VOIDmode)
725 xop2 = copy_to_mode_reg (mode2, xop2);
726
727 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
728
729 emit_insn (pat);
730 return temp;
731 }
732
733
734 /* Like expand_binop, but return a constant rtx if the result can be
735 calculated at compile time. The arguments and return value are
736 otherwise the same as for expand_binop. */
737
738 static rtx
739 simplify_expand_binop (enum machine_mode mode, optab binoptab,
740 rtx op0, rtx op1, rtx target, int unsignedp,
741 enum optab_methods methods)
742 {
743 if (CONSTANT_P (op0) && CONSTANT_P (op1))
744 {
745 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
746
747 if (x)
748 return x;
749 }
750
751 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
752 }
753
754 /* Like simplify_expand_binop, but always put the result in TARGET.
755 Return true if the expansion succeeded. */
756
757 bool
758 force_expand_binop (enum machine_mode mode, optab binoptab,
759 rtx op0, rtx op1, rtx target, int unsignedp,
760 enum optab_methods methods)
761 {
762 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
763 target, unsignedp, methods);
764 if (x == 0)
765 return false;
766 if (x != target)
767 emit_move_insn (target, x);
768 return true;
769 }
770
771 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
772
773 rtx
774 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
775 {
776 enum insn_code icode;
777 rtx rtx_op1, rtx_op2;
778 enum machine_mode mode1;
779 enum machine_mode mode2;
780 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
781 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
782 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
783 optab shift_optab;
784 rtx pat;
785
786 switch (TREE_CODE (vec_shift_expr))
787 {
788 case VEC_RSHIFT_EXPR:
789 shift_optab = vec_shr_optab;
790 break;
791 case VEC_LSHIFT_EXPR:
792 shift_optab = vec_shl_optab;
793 break;
794 default:
795 gcc_unreachable ();
796 }
797
798 icode = (int) optab_handler (shift_optab, mode)->insn_code;
799 gcc_assert (icode != CODE_FOR_nothing);
800
801 mode1 = insn_data[icode].operand[1].mode;
802 mode2 = insn_data[icode].operand[2].mode;
803
804 rtx_op1 = expand_normal (vec_oprnd);
805 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
806 && mode1 != VOIDmode)
807 rtx_op1 = force_reg (mode1, rtx_op1);
808
809 rtx_op2 = expand_normal (shift_oprnd);
810 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
811 && mode2 != VOIDmode)
812 rtx_op2 = force_reg (mode2, rtx_op2);
813
814 if (!target
815 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
816 target = gen_reg_rtx (mode);
817
818 /* Emit instruction */
819 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
820 gcc_assert (pat);
821 emit_insn (pat);
822
823 return target;
824 }
825
826 /* This subroutine of expand_doubleword_shift handles the cases in which
827 the effective shift value is >= BITS_PER_WORD. The arguments and return
828 value are the same as for the parent routine, except that SUPERWORD_OP1
829 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
830 INTO_TARGET may be null if the caller has decided to calculate it. */
831
832 static bool
833 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
834 rtx outof_target, rtx into_target,
835 int unsignedp, enum optab_methods methods)
836 {
837 if (into_target != 0)
838 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
839 into_target, unsignedp, methods))
840 return false;
841
842 if (outof_target != 0)
843 {
844 /* For a signed right shift, we must fill OUTOF_TARGET with copies
845 of the sign bit, otherwise we must fill it with zeros. */
846 if (binoptab != ashr_optab)
847 emit_move_insn (outof_target, CONST0_RTX (word_mode));
848 else
849 if (!force_expand_binop (word_mode, binoptab,
850 outof_input, GEN_INT (BITS_PER_WORD - 1),
851 outof_target, unsignedp, methods))
852 return false;
853 }
854 return true;
855 }
856
857 /* This subroutine of expand_doubleword_shift handles the cases in which
858 the effective shift value is < BITS_PER_WORD. The arguments and return
859 value are the same as for the parent routine. */
860
861 static bool
862 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
863 rtx outof_input, rtx into_input, rtx op1,
864 rtx outof_target, rtx into_target,
865 int unsignedp, enum optab_methods methods,
866 unsigned HOST_WIDE_INT shift_mask)
867 {
868 optab reverse_unsigned_shift, unsigned_shift;
869 rtx tmp, carries;
870
871 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
872 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
873
874 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
875 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
876 the opposite direction to BINOPTAB. */
877 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
878 {
879 carries = outof_input;
880 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
881 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
882 0, true, methods);
883 }
884 else
885 {
886 /* We must avoid shifting by BITS_PER_WORD bits since that is either
887 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
888 has unknown behavior. Do a single shift first, then shift by the
889 remainder. It's OK to use ~OP1 as the remainder if shift counts
890 are truncated to the mode size. */
891 carries = expand_binop (word_mode, reverse_unsigned_shift,
892 outof_input, const1_rtx, 0, unsignedp, methods);
893 if (shift_mask == BITS_PER_WORD - 1)
894 {
895 tmp = immed_double_const (-1, -1, op1_mode);
896 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
897 0, true, methods);
898 }
899 else
900 {
901 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
902 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
903 0, true, methods);
904 }
905 }
906 if (tmp == 0 || carries == 0)
907 return false;
908 carries = expand_binop (word_mode, reverse_unsigned_shift,
909 carries, tmp, 0, unsignedp, methods);
910 if (carries == 0)
911 return false;
912
913 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
914 so the result can go directly into INTO_TARGET if convenient. */
915 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
916 into_target, unsignedp, methods);
917 if (tmp == 0)
918 return false;
919
920 /* Now OR in the bits carried over from OUTOF_INPUT. */
921 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
922 into_target, unsignedp, methods))
923 return false;
924
925 /* Use a standard word_mode shift for the out-of half. */
926 if (outof_target != 0)
927 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
928 outof_target, unsignedp, methods))
929 return false;
930
931 return true;
932 }
933
934
935 #ifdef HAVE_conditional_move
936 /* Try implementing expand_doubleword_shift using conditional moves.
937 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
938 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
939 are the shift counts to use in the former and latter case. All other
940 arguments are the same as the parent routine. */
941
942 static bool
943 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
944 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
945 rtx outof_input, rtx into_input,
946 rtx subword_op1, rtx superword_op1,
947 rtx outof_target, rtx into_target,
948 int unsignedp, enum optab_methods methods,
949 unsigned HOST_WIDE_INT shift_mask)
950 {
951 rtx outof_superword, into_superword;
952
953 /* Put the superword version of the output into OUTOF_SUPERWORD and
954 INTO_SUPERWORD. */
955 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
956 if (outof_target != 0 && subword_op1 == superword_op1)
957 {
958 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
959 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
960 into_superword = outof_target;
961 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
962 outof_superword, 0, unsignedp, methods))
963 return false;
964 }
965 else
966 {
967 into_superword = gen_reg_rtx (word_mode);
968 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
969 outof_superword, into_superword,
970 unsignedp, methods))
971 return false;
972 }
973
974 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
975 if (!expand_subword_shift (op1_mode, binoptab,
976 outof_input, into_input, subword_op1,
977 outof_target, into_target,
978 unsignedp, methods, shift_mask))
979 return false;
980
981 /* Select between them. Do the INTO half first because INTO_SUPERWORD
982 might be the current value of OUTOF_TARGET. */
983 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
984 into_target, into_superword, word_mode, false))
985 return false;
986
987 if (outof_target != 0)
988 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
989 outof_target, outof_superword,
990 word_mode, false))
991 return false;
992
993 return true;
994 }
995 #endif
996
997 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
998 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
999 input operand; the shift moves bits in the direction OUTOF_INPUT->
1000 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1001 of the target. OP1 is the shift count and OP1_MODE is its mode.
1002 If OP1 is constant, it will have been truncated as appropriate
1003 and is known to be nonzero.
1004
1005 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1006 shift count is outside the range [0, BITS_PER_WORD). This routine must
1007 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1008
1009 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1010 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1011 fill with zeros or sign bits as appropriate.
1012
1013 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1014 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1015 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1016 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1017 are undefined.
1018
1019 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1020 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1021 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1022 function wants to calculate it itself.
1023
1024 Return true if the shift could be successfully synthesized. */
1025
1026 static bool
1027 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1028 rtx outof_input, rtx into_input, rtx op1,
1029 rtx outof_target, rtx into_target,
1030 int unsignedp, enum optab_methods methods,
1031 unsigned HOST_WIDE_INT shift_mask)
1032 {
1033 rtx superword_op1, tmp, cmp1, cmp2;
1034 rtx subword_label, done_label;
1035 enum rtx_code cmp_code;
1036
1037 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1038 fill the result with sign or zero bits as appropriate. If so, the value
1039 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1040 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1041 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1042
1043 This isn't worthwhile for constant shifts since the optimizers will
1044 cope better with in-range shift counts. */
1045 if (shift_mask >= BITS_PER_WORD
1046 && outof_target != 0
1047 && !CONSTANT_P (op1))
1048 {
1049 if (!expand_doubleword_shift (op1_mode, binoptab,
1050 outof_input, into_input, op1,
1051 0, into_target,
1052 unsignedp, methods, shift_mask))
1053 return false;
1054 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1055 outof_target, unsignedp, methods))
1056 return false;
1057 return true;
1058 }
1059
1060 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1061 is true when the effective shift value is less than BITS_PER_WORD.
1062 Set SUPERWORD_OP1 to the shift count that should be used to shift
1063 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1064 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1065 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1066 {
1067 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1068 is a subword shift count. */
1069 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1070 0, true, methods);
1071 cmp2 = CONST0_RTX (op1_mode);
1072 cmp_code = EQ;
1073 superword_op1 = op1;
1074 }
1075 else
1076 {
1077 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1078 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1079 0, true, methods);
1080 cmp2 = CONST0_RTX (op1_mode);
1081 cmp_code = LT;
1082 superword_op1 = cmp1;
1083 }
1084 if (cmp1 == 0)
1085 return false;
1086
1087 /* If we can compute the condition at compile time, pick the
1088 appropriate subroutine. */
1089 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1090 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
1091 {
1092 if (tmp == const0_rtx)
1093 return expand_superword_shift (binoptab, outof_input, superword_op1,
1094 outof_target, into_target,
1095 unsignedp, methods);
1096 else
1097 return expand_subword_shift (op1_mode, binoptab,
1098 outof_input, into_input, op1,
1099 outof_target, into_target,
1100 unsignedp, methods, shift_mask);
1101 }
1102
1103 #ifdef HAVE_conditional_move
1104 /* Try using conditional moves to generate straight-line code. */
1105 {
1106 rtx start = get_last_insn ();
1107 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1108 cmp_code, cmp1, cmp2,
1109 outof_input, into_input,
1110 op1, superword_op1,
1111 outof_target, into_target,
1112 unsignedp, methods, shift_mask))
1113 return true;
1114 delete_insns_since (start);
1115 }
1116 #endif
1117
1118 /* As a last resort, use branches to select the correct alternative. */
1119 subword_label = gen_label_rtx ();
1120 done_label = gen_label_rtx ();
1121
1122 NO_DEFER_POP;
1123 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1124 0, 0, subword_label);
1125 OK_DEFER_POP;
1126
1127 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1128 outof_target, into_target,
1129 unsignedp, methods))
1130 return false;
1131
1132 emit_jump_insn (gen_jump (done_label));
1133 emit_barrier ();
1134 emit_label (subword_label);
1135
1136 if (!expand_subword_shift (op1_mode, binoptab,
1137 outof_input, into_input, op1,
1138 outof_target, into_target,
1139 unsignedp, methods, shift_mask))
1140 return false;
1141
1142 emit_label (done_label);
1143 return true;
1144 }
1145 \f
1146 /* Subroutine of expand_binop. Perform a double word multiplication of
1147 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1148 as the target's word_mode. This function return NULL_RTX if anything
1149 goes wrong, in which case it may have already emitted instructions
1150 which need to be deleted.
1151
1152 If we want to multiply two two-word values and have normal and widening
1153 multiplies of single-word values, we can do this with three smaller
1154 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1155 because we are not operating on one word at a time.
1156
1157 The multiplication proceeds as follows:
1158 _______________________
1159 [__op0_high_|__op0_low__]
1160 _______________________
1161 * [__op1_high_|__op1_low__]
1162 _______________________________________________
1163 _______________________
1164 (1) [__op0_low__*__op1_low__]
1165 _______________________
1166 (2a) [__op0_low__*__op1_high_]
1167 _______________________
1168 (2b) [__op0_high_*__op1_low__]
1169 _______________________
1170 (3) [__op0_high_*__op1_high_]
1171
1172
1173 This gives a 4-word result. Since we are only interested in the
1174 lower 2 words, partial result (3) and the upper words of (2a) and
1175 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1176 calculated using non-widening multiplication.
1177
1178 (1), however, needs to be calculated with an unsigned widening
1179 multiplication. If this operation is not directly supported we
1180 try using a signed widening multiplication and adjust the result.
1181 This adjustment works as follows:
1182
1183 If both operands are positive then no adjustment is needed.
1184
1185 If the operands have different signs, for example op0_low < 0 and
1186 op1_low >= 0, the instruction treats the most significant bit of
1187 op0_low as a sign bit instead of a bit with significance
1188 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1189 with 2**BITS_PER_WORD - op0_low, and two's complements the
1190 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1191 the result.
1192
1193 Similarly, if both operands are negative, we need to add
1194 (op0_low + op1_low) * 2**BITS_PER_WORD.
1195
1196 We use a trick to adjust quickly. We logically shift op0_low right
1197 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1198 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1199 logical shift exists, we do an arithmetic right shift and subtract
1200 the 0 or -1. */
1201
1202 static rtx
1203 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1204 bool umulp, enum optab_methods methods)
1205 {
1206 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1207 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1208 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1209 rtx product, adjust, product_high, temp;
1210
1211 rtx op0_high = operand_subword_force (op0, high, mode);
1212 rtx op0_low = operand_subword_force (op0, low, mode);
1213 rtx op1_high = operand_subword_force (op1, high, mode);
1214 rtx op1_low = operand_subword_force (op1, low, mode);
1215
1216 /* If we're using an unsigned multiply to directly compute the product
1217 of the low-order words of the operands and perform any required
1218 adjustments of the operands, we begin by trying two more multiplications
1219 and then computing the appropriate sum.
1220
1221 We have checked above that the required addition is provided.
1222 Full-word addition will normally always succeed, especially if
1223 it is provided at all, so we don't worry about its failure. The
1224 multiplication may well fail, however, so we do handle that. */
1225
1226 if (!umulp)
1227 {
1228 /* ??? This could be done with emit_store_flag where available. */
1229 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1230 NULL_RTX, 1, methods);
1231 if (temp)
1232 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1233 NULL_RTX, 0, OPTAB_DIRECT);
1234 else
1235 {
1236 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1237 NULL_RTX, 0, methods);
1238 if (!temp)
1239 return NULL_RTX;
1240 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1241 NULL_RTX, 0, OPTAB_DIRECT);
1242 }
1243
1244 if (!op0_high)
1245 return NULL_RTX;
1246 }
1247
1248 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1249 NULL_RTX, 0, OPTAB_DIRECT);
1250 if (!adjust)
1251 return NULL_RTX;
1252
1253 /* OP0_HIGH should now be dead. */
1254
1255 if (!umulp)
1256 {
1257 /* ??? This could be done with emit_store_flag where available. */
1258 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1259 NULL_RTX, 1, methods);
1260 if (temp)
1261 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1262 NULL_RTX, 0, OPTAB_DIRECT);
1263 else
1264 {
1265 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1266 NULL_RTX, 0, methods);
1267 if (!temp)
1268 return NULL_RTX;
1269 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1270 NULL_RTX, 0, OPTAB_DIRECT);
1271 }
1272
1273 if (!op1_high)
1274 return NULL_RTX;
1275 }
1276
1277 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1278 NULL_RTX, 0, OPTAB_DIRECT);
1279 if (!temp)
1280 return NULL_RTX;
1281
1282 /* OP1_HIGH should now be dead. */
1283
1284 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1285 adjust, 0, OPTAB_DIRECT);
1286
1287 if (target && !REG_P (target))
1288 target = NULL_RTX;
1289
1290 if (umulp)
1291 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1292 target, 1, OPTAB_DIRECT);
1293 else
1294 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1295 target, 1, OPTAB_DIRECT);
1296
1297 if (!product)
1298 return NULL_RTX;
1299
1300 product_high = operand_subword (product, high, 1, mode);
1301 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1302 REG_P (product_high) ? product_high : adjust,
1303 0, OPTAB_DIRECT);
1304 emit_move_insn (product_high, adjust);
1305 return product;
1306 }
1307 \f
1308 /* Wrapper around expand_binop which takes an rtx code to specify
1309 the operation to perform, not an optab pointer. All other
1310 arguments are the same. */
1311 rtx
1312 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1313 rtx op1, rtx target, int unsignedp,
1314 enum optab_methods methods)
1315 {
1316 optab binop = code_to_optab[(int) code];
1317 gcc_assert (binop);
1318
1319 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1320 }
1321
1322 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1323 binop. Order them according to commutative_operand_precedence and, if
1324 possible, try to put TARGET or a pseudo first. */
1325 static bool
1326 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1327 {
1328 int op0_prec = commutative_operand_precedence (op0);
1329 int op1_prec = commutative_operand_precedence (op1);
1330
1331 if (op0_prec < op1_prec)
1332 return true;
1333
1334 if (op0_prec > op1_prec)
1335 return false;
1336
1337 /* With equal precedence, both orders are ok, but it is better if the
1338 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1339 if (target == 0 || REG_P (target))
1340 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1341 else
1342 return rtx_equal_p (op1, target);
1343 }
1344
1345 /* Return true if BINOPTAB implements a shift operation. */
1346
1347 static bool
1348 shift_optab_p (optab binoptab)
1349 {
1350 switch (binoptab->code)
1351 {
1352 case ASHIFT:
1353 case SS_ASHIFT:
1354 case US_ASHIFT:
1355 case ASHIFTRT:
1356 case LSHIFTRT:
1357 case ROTATE:
1358 case ROTATERT:
1359 return true;
1360
1361 default:
1362 return false;
1363 }
1364 }
1365
1366 /* Return true if BINOPTAB implements a commutative binary operation. */
1367
1368 static bool
1369 commutative_optab_p (optab binoptab)
1370 {
1371 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1372 || binoptab == smul_widen_optab
1373 || binoptab == umul_widen_optab
1374 || binoptab == smul_highpart_optab
1375 || binoptab == umul_highpart_optab);
1376 }
1377
1378 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1379 optimizing, and if the operand is a constant that costs more than
1380 1 instruction, force the constant into a register and return that
1381 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1382
1383 static rtx
1384 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1385 rtx x, bool unsignedp)
1386 {
1387 if (mode != VOIDmode
1388 && optimize
1389 && CONSTANT_P (x)
1390 && rtx_cost (x, binoptab->code) > COSTS_N_INSNS (1))
1391 {
1392 if (GET_CODE (x) == CONST_INT)
1393 {
1394 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1395 if (intval != INTVAL (x))
1396 x = GEN_INT (intval);
1397 }
1398 else
1399 x = convert_modes (mode, VOIDmode, x, unsignedp);
1400 x = force_reg (mode, x);
1401 }
1402 return x;
1403 }
1404
1405 /* Helper function for expand_binop: handle the case where there
1406 is an insn that directly implements the indicated operation.
1407 Returns null if this is not possible. */
1408 static rtx
1409 expand_binop_directly (enum machine_mode mode, optab binoptab,
1410 rtx op0, rtx op1,
1411 rtx target, int unsignedp, enum optab_methods methods,
1412 rtx last)
1413 {
1414 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1415 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1416 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1417 enum machine_mode tmp_mode;
1418 bool commutative_p;
1419 rtx pat;
1420 rtx xop0 = op0, xop1 = op1;
1421 rtx temp;
1422 rtx swap;
1423
1424 if (target)
1425 temp = target;
1426 else
1427 temp = gen_reg_rtx (mode);
1428
1429 /* If it is a commutative operator and the modes would match
1430 if we would swap the operands, we can save the conversions. */
1431 commutative_p = commutative_optab_p (binoptab);
1432 if (commutative_p
1433 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1434 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1435 {
1436 swap = xop0;
1437 xop0 = xop1;
1438 xop1 = swap;
1439 }
1440
1441 /* If we are optimizing, force expensive constants into a register. */
1442 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1443 if (!shift_optab_p (binoptab))
1444 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1445
1446 /* In case the insn wants input operands in modes different from
1447 those of the actual operands, convert the operands. It would
1448 seem that we don't need to convert CONST_INTs, but we do, so
1449 that they're properly zero-extended, sign-extended or truncated
1450 for their mode. */
1451
1452 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1453 xop0 = convert_modes (mode0,
1454 GET_MODE (xop0) != VOIDmode
1455 ? GET_MODE (xop0)
1456 : mode,
1457 xop0, unsignedp);
1458
1459 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1460 xop1 = convert_modes (mode1,
1461 GET_MODE (xop1) != VOIDmode
1462 ? GET_MODE (xop1)
1463 : mode,
1464 xop1, unsignedp);
1465
1466 /* If operation is commutative,
1467 try to make the first operand a register.
1468 Even better, try to make it the same as the target.
1469 Also try to make the last operand a constant. */
1470 if (commutative_p
1471 && swap_commutative_operands_with_target (target, xop0, xop1))
1472 {
1473 swap = xop1;
1474 xop1 = xop0;
1475 xop0 = swap;
1476 }
1477
1478 /* Now, if insn's predicates don't allow our operands, put them into
1479 pseudo regs. */
1480
1481 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1482 && mode0 != VOIDmode)
1483 xop0 = copy_to_mode_reg (mode0, xop0);
1484
1485 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1486 && mode1 != VOIDmode)
1487 xop1 = copy_to_mode_reg (mode1, xop1);
1488
1489 if (binoptab == vec_pack_trunc_optab
1490 || binoptab == vec_pack_usat_optab
1491 || binoptab == vec_pack_ssat_optab
1492 || binoptab == vec_pack_ufix_trunc_optab
1493 || binoptab == vec_pack_sfix_trunc_optab)
1494 {
1495 /* The mode of the result is different then the mode of the
1496 arguments. */
1497 tmp_mode = insn_data[icode].operand[0].mode;
1498 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1499 return 0;
1500 }
1501 else
1502 tmp_mode = mode;
1503
1504 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1505 temp = gen_reg_rtx (tmp_mode);
1506
1507 pat = GEN_FCN (icode) (temp, xop0, xop1);
1508 if (pat)
1509 {
1510 /* If PAT is composed of more than one insn, try to add an appropriate
1511 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1512 operand, call expand_binop again, this time without a target. */
1513 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1514 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1515 {
1516 delete_insns_since (last);
1517 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1518 unsignedp, methods);
1519 }
1520
1521 emit_insn (pat);
1522 return temp;
1523 }
1524
1525 delete_insns_since (last);
1526 return NULL_RTX;
1527 }
1528
1529 /* Generate code to perform an operation specified by BINOPTAB
1530 on operands OP0 and OP1, with result having machine-mode MODE.
1531
1532 UNSIGNEDP is for the case where we have to widen the operands
1533 to perform the operation. It says to use zero-extension.
1534
1535 If TARGET is nonzero, the value
1536 is generated there, if it is convenient to do so.
1537 In all cases an rtx is returned for the locus of the value;
1538 this may or may not be TARGET. */
1539
1540 rtx
1541 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1542 rtx target, int unsignedp, enum optab_methods methods)
1543 {
1544 enum optab_methods next_methods
1545 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1546 ? OPTAB_WIDEN : methods);
1547 enum mode_class class;
1548 enum machine_mode wider_mode;
1549 rtx libfunc;
1550 rtx temp;
1551 rtx entry_last = get_last_insn ();
1552 rtx last;
1553
1554 class = GET_MODE_CLASS (mode);
1555
1556 /* If subtracting an integer constant, convert this into an addition of
1557 the negated constant. */
1558
1559 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1560 {
1561 op1 = negate_rtx (mode, op1);
1562 binoptab = add_optab;
1563 }
1564
1565 /* Record where to delete back to if we backtrack. */
1566 last = get_last_insn ();
1567
1568 /* If we can do it with a three-operand insn, do so. */
1569
1570 if (methods != OPTAB_MUST_WIDEN
1571 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1572 {
1573 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1574 unsignedp, methods, last);
1575 if (temp)
1576 return temp;
1577 }
1578
1579 /* If we were trying to rotate, and that didn't work, try rotating
1580 the other direction before falling back to shifts and bitwise-or. */
1581 if (((binoptab == rotl_optab
1582 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1583 || (binoptab == rotr_optab
1584 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1585 && class == MODE_INT)
1586 {
1587 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1588 rtx newop1;
1589 unsigned int bits = GET_MODE_BITSIZE (mode);
1590
1591 if (GET_CODE (op1) == CONST_INT)
1592 newop1 = GEN_INT (bits - INTVAL (op1));
1593 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1594 newop1 = negate_rtx (mode, op1);
1595 else
1596 newop1 = expand_binop (mode, sub_optab,
1597 GEN_INT (bits), op1,
1598 NULL_RTX, unsignedp, OPTAB_DIRECT);
1599
1600 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1601 target, unsignedp, methods, last);
1602 if (temp)
1603 return temp;
1604 }
1605
1606 /* If this is a multiply, see if we can do a widening operation that
1607 takes operands of this mode and makes a wider mode. */
1608
1609 if (binoptab == smul_optab
1610 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1611 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1612 GET_MODE_WIDER_MODE (mode))->insn_code)
1613 != CODE_FOR_nothing))
1614 {
1615 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1616 unsignedp ? umul_widen_optab : smul_widen_optab,
1617 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1618
1619 if (temp != 0)
1620 {
1621 if (GET_MODE_CLASS (mode) == MODE_INT
1622 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1623 GET_MODE_BITSIZE (GET_MODE (temp))))
1624 return gen_lowpart (mode, temp);
1625 else
1626 return convert_to_mode (mode, temp, unsignedp);
1627 }
1628 }
1629
1630 /* Look for a wider mode of the same class for which we think we
1631 can open-code the operation. Check for a widening multiply at the
1632 wider mode as well. */
1633
1634 if (CLASS_HAS_WIDER_MODES_P (class)
1635 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1636 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1637 wider_mode != VOIDmode;
1638 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1639 {
1640 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1641 || (binoptab == smul_optab
1642 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1643 && ((optab_handler ((unsignedp ? umul_widen_optab
1644 : smul_widen_optab),
1645 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1646 != CODE_FOR_nothing)))
1647 {
1648 rtx xop0 = op0, xop1 = op1;
1649 int no_extend = 0;
1650
1651 /* For certain integer operations, we need not actually extend
1652 the narrow operands, as long as we will truncate
1653 the results to the same narrowness. */
1654
1655 if ((binoptab == ior_optab || binoptab == and_optab
1656 || binoptab == xor_optab
1657 || binoptab == add_optab || binoptab == sub_optab
1658 || binoptab == smul_optab || binoptab == ashl_optab)
1659 && class == MODE_INT)
1660 {
1661 no_extend = 1;
1662 xop0 = avoid_expensive_constant (mode, binoptab,
1663 xop0, unsignedp);
1664 if (binoptab != ashl_optab)
1665 xop1 = avoid_expensive_constant (mode, binoptab,
1666 xop1, unsignedp);
1667 }
1668
1669 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1670
1671 /* The second operand of a shift must always be extended. */
1672 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1673 no_extend && binoptab != ashl_optab);
1674
1675 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1676 unsignedp, OPTAB_DIRECT);
1677 if (temp)
1678 {
1679 if (class != MODE_INT
1680 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1681 GET_MODE_BITSIZE (wider_mode)))
1682 {
1683 if (target == 0)
1684 target = gen_reg_rtx (mode);
1685 convert_move (target, temp, 0);
1686 return target;
1687 }
1688 else
1689 return gen_lowpart (mode, temp);
1690 }
1691 else
1692 delete_insns_since (last);
1693 }
1694 }
1695
1696 /* If operation is commutative,
1697 try to make the first operand a register.
1698 Even better, try to make it the same as the target.
1699 Also try to make the last operand a constant. */
1700 if (commutative_optab_p (binoptab)
1701 && swap_commutative_operands_with_target (target, op0, op1))
1702 {
1703 temp = op1;
1704 op1 = op0;
1705 op0 = temp;
1706 }
1707
1708 /* These can be done a word at a time. */
1709 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1710 && class == MODE_INT
1711 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1712 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1713 {
1714 int i;
1715 rtx insns;
1716 rtx equiv_value;
1717
1718 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1719 won't be accurate, so use a new target. */
1720 if (target == 0 || target == op0 || target == op1)
1721 target = gen_reg_rtx (mode);
1722
1723 start_sequence ();
1724
1725 /* Do the actual arithmetic. */
1726 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1727 {
1728 rtx target_piece = operand_subword (target, i, 1, mode);
1729 rtx x = expand_binop (word_mode, binoptab,
1730 operand_subword_force (op0, i, mode),
1731 operand_subword_force (op1, i, mode),
1732 target_piece, unsignedp, next_methods);
1733
1734 if (x == 0)
1735 break;
1736
1737 if (target_piece != x)
1738 emit_move_insn (target_piece, x);
1739 }
1740
1741 insns = get_insns ();
1742 end_sequence ();
1743
1744 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1745 {
1746 if (binoptab->code != UNKNOWN)
1747 equiv_value
1748 = gen_rtx_fmt_ee (binoptab->code, mode,
1749 copy_rtx (op0), copy_rtx (op1));
1750 else
1751 equiv_value = 0;
1752
1753 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1754 return target;
1755 }
1756 }
1757
1758 /* Synthesize double word shifts from single word shifts. */
1759 if ((binoptab == lshr_optab || binoptab == ashl_optab
1760 || binoptab == ashr_optab)
1761 && class == MODE_INT
1762 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1763 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1764 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1765 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1766 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1767 {
1768 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1769 enum machine_mode op1_mode;
1770
1771 double_shift_mask = targetm.shift_truncation_mask (mode);
1772 shift_mask = targetm.shift_truncation_mask (word_mode);
1773 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1774
1775 /* Apply the truncation to constant shifts. */
1776 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1777 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1778
1779 if (op1 == CONST0_RTX (op1_mode))
1780 return op0;
1781
1782 /* Make sure that this is a combination that expand_doubleword_shift
1783 can handle. See the comments there for details. */
1784 if (double_shift_mask == 0
1785 || (shift_mask == BITS_PER_WORD - 1
1786 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1787 {
1788 rtx insns, equiv_value;
1789 rtx into_target, outof_target;
1790 rtx into_input, outof_input;
1791 int left_shift, outof_word;
1792
1793 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1794 won't be accurate, so use a new target. */
1795 if (target == 0 || target == op0 || target == op1)
1796 target = gen_reg_rtx (mode);
1797
1798 start_sequence ();
1799
1800 /* OUTOF_* is the word we are shifting bits away from, and
1801 INTO_* is the word that we are shifting bits towards, thus
1802 they differ depending on the direction of the shift and
1803 WORDS_BIG_ENDIAN. */
1804
1805 left_shift = binoptab == ashl_optab;
1806 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1807
1808 outof_target = operand_subword (target, outof_word, 1, mode);
1809 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1810
1811 outof_input = operand_subword_force (op0, outof_word, mode);
1812 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1813
1814 if (expand_doubleword_shift (op1_mode, binoptab,
1815 outof_input, into_input, op1,
1816 outof_target, into_target,
1817 unsignedp, next_methods, shift_mask))
1818 {
1819 insns = get_insns ();
1820 end_sequence ();
1821
1822 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1823 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1824 return target;
1825 }
1826 end_sequence ();
1827 }
1828 }
1829
1830 /* Synthesize double word rotates from single word shifts. */
1831 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1832 && class == MODE_INT
1833 && GET_CODE (op1) == CONST_INT
1834 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1835 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1836 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1837 {
1838 rtx insns;
1839 rtx into_target, outof_target;
1840 rtx into_input, outof_input;
1841 rtx inter;
1842 int shift_count, left_shift, outof_word;
1843
1844 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1845 won't be accurate, so use a new target. Do this also if target is not
1846 a REG, first because having a register instead may open optimization
1847 opportunities, and second because if target and op0 happen to be MEMs
1848 designating the same location, we would risk clobbering it too early
1849 in the code sequence we generate below. */
1850 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1851 target = gen_reg_rtx (mode);
1852
1853 start_sequence ();
1854
1855 shift_count = INTVAL (op1);
1856
1857 /* OUTOF_* is the word we are shifting bits away from, and
1858 INTO_* is the word that we are shifting bits towards, thus
1859 they differ depending on the direction of the shift and
1860 WORDS_BIG_ENDIAN. */
1861
1862 left_shift = (binoptab == rotl_optab);
1863 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1864
1865 outof_target = operand_subword (target, outof_word, 1, mode);
1866 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1867
1868 outof_input = operand_subword_force (op0, outof_word, mode);
1869 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1870
1871 if (shift_count == BITS_PER_WORD)
1872 {
1873 /* This is just a word swap. */
1874 emit_move_insn (outof_target, into_input);
1875 emit_move_insn (into_target, outof_input);
1876 inter = const0_rtx;
1877 }
1878 else
1879 {
1880 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1881 rtx first_shift_count, second_shift_count;
1882 optab reverse_unsigned_shift, unsigned_shift;
1883
1884 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1885 ? lshr_optab : ashl_optab);
1886
1887 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1888 ? ashl_optab : lshr_optab);
1889
1890 if (shift_count > BITS_PER_WORD)
1891 {
1892 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1893 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1894 }
1895 else
1896 {
1897 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1898 second_shift_count = GEN_INT (shift_count);
1899 }
1900
1901 into_temp1 = expand_binop (word_mode, unsigned_shift,
1902 outof_input, first_shift_count,
1903 NULL_RTX, unsignedp, next_methods);
1904 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1905 into_input, second_shift_count,
1906 NULL_RTX, unsignedp, next_methods);
1907
1908 if (into_temp1 != 0 && into_temp2 != 0)
1909 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1910 into_target, unsignedp, next_methods);
1911 else
1912 inter = 0;
1913
1914 if (inter != 0 && inter != into_target)
1915 emit_move_insn (into_target, inter);
1916
1917 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1918 into_input, first_shift_count,
1919 NULL_RTX, unsignedp, next_methods);
1920 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1921 outof_input, second_shift_count,
1922 NULL_RTX, unsignedp, next_methods);
1923
1924 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1925 inter = expand_binop (word_mode, ior_optab,
1926 outof_temp1, outof_temp2,
1927 outof_target, unsignedp, next_methods);
1928
1929 if (inter != 0 && inter != outof_target)
1930 emit_move_insn (outof_target, inter);
1931 }
1932
1933 insns = get_insns ();
1934 end_sequence ();
1935
1936 if (inter != 0)
1937 {
1938 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1939 block to help the register allocator a bit. But a multi-word
1940 rotate will need all the input bits when setting the output
1941 bits, so there clearly is a conflict between the input and
1942 output registers. So we can't use a no-conflict block here. */
1943 emit_insn (insns);
1944 return target;
1945 }
1946 }
1947
1948 /* These can be done a word at a time by propagating carries. */
1949 if ((binoptab == add_optab || binoptab == sub_optab)
1950 && class == MODE_INT
1951 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1952 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1953 {
1954 unsigned int i;
1955 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1956 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1957 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1958 rtx xop0, xop1, xtarget;
1959
1960 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1961 value is one of those, use it. Otherwise, use 1 since it is the
1962 one easiest to get. */
1963 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1964 int normalizep = STORE_FLAG_VALUE;
1965 #else
1966 int normalizep = 1;
1967 #endif
1968
1969 /* Prepare the operands. */
1970 xop0 = force_reg (mode, op0);
1971 xop1 = force_reg (mode, op1);
1972
1973 xtarget = gen_reg_rtx (mode);
1974
1975 if (target == 0 || !REG_P (target))
1976 target = xtarget;
1977
1978 /* Indicate for flow that the entire target reg is being set. */
1979 if (REG_P (target))
1980 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1981
1982 /* Do the actual arithmetic. */
1983 for (i = 0; i < nwords; i++)
1984 {
1985 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1986 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1987 rtx op0_piece = operand_subword_force (xop0, index, mode);
1988 rtx op1_piece = operand_subword_force (xop1, index, mode);
1989 rtx x;
1990
1991 /* Main add/subtract of the input operands. */
1992 x = expand_binop (word_mode, binoptab,
1993 op0_piece, op1_piece,
1994 target_piece, unsignedp, next_methods);
1995 if (x == 0)
1996 break;
1997
1998 if (i + 1 < nwords)
1999 {
2000 /* Store carry from main add/subtract. */
2001 carry_out = gen_reg_rtx (word_mode);
2002 carry_out = emit_store_flag_force (carry_out,
2003 (binoptab == add_optab
2004 ? LT : GT),
2005 x, op0_piece,
2006 word_mode, 1, normalizep);
2007 }
2008
2009 if (i > 0)
2010 {
2011 rtx newx;
2012
2013 /* Add/subtract previous carry to main result. */
2014 newx = expand_binop (word_mode,
2015 normalizep == 1 ? binoptab : otheroptab,
2016 x, carry_in,
2017 NULL_RTX, 1, next_methods);
2018
2019 if (i + 1 < nwords)
2020 {
2021 /* Get out carry from adding/subtracting carry in. */
2022 rtx carry_tmp = gen_reg_rtx (word_mode);
2023 carry_tmp = emit_store_flag_force (carry_tmp,
2024 (binoptab == add_optab
2025 ? LT : GT),
2026 newx, x,
2027 word_mode, 1, normalizep);
2028
2029 /* Logical-ior the two poss. carry together. */
2030 carry_out = expand_binop (word_mode, ior_optab,
2031 carry_out, carry_tmp,
2032 carry_out, 0, next_methods);
2033 if (carry_out == 0)
2034 break;
2035 }
2036 emit_move_insn (target_piece, newx);
2037 }
2038 else
2039 {
2040 if (x != target_piece)
2041 emit_move_insn (target_piece, x);
2042 }
2043
2044 carry_in = carry_out;
2045 }
2046
2047 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2048 {
2049 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2050 || ! rtx_equal_p (target, xtarget))
2051 {
2052 rtx temp = emit_move_insn (target, xtarget);
2053
2054 set_unique_reg_note (temp,
2055 REG_EQUAL,
2056 gen_rtx_fmt_ee (binoptab->code, mode,
2057 copy_rtx (xop0),
2058 copy_rtx (xop1)));
2059 }
2060 else
2061 target = xtarget;
2062
2063 return target;
2064 }
2065
2066 else
2067 delete_insns_since (last);
2068 }
2069
2070 /* Attempt to synthesize double word multiplies using a sequence of word
2071 mode multiplications. We first attempt to generate a sequence using a
2072 more efficient unsigned widening multiply, and if that fails we then
2073 try using a signed widening multiply. */
2074
2075 if (binoptab == smul_optab
2076 && class == MODE_INT
2077 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2078 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2079 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2080 {
2081 rtx product = NULL_RTX;
2082
2083 if (optab_handler (umul_widen_optab, mode)->insn_code
2084 != CODE_FOR_nothing)
2085 {
2086 product = expand_doubleword_mult (mode, op0, op1, target,
2087 true, methods);
2088 if (!product)
2089 delete_insns_since (last);
2090 }
2091
2092 if (product == NULL_RTX
2093 && optab_handler (smul_widen_optab, mode)->insn_code
2094 != CODE_FOR_nothing)
2095 {
2096 product = expand_doubleword_mult (mode, op0, op1, target,
2097 false, methods);
2098 if (!product)
2099 delete_insns_since (last);
2100 }
2101
2102 if (product != NULL_RTX)
2103 {
2104 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2105 {
2106 temp = emit_move_insn (target ? target : product, product);
2107 set_unique_reg_note (temp,
2108 REG_EQUAL,
2109 gen_rtx_fmt_ee (MULT, mode,
2110 copy_rtx (op0),
2111 copy_rtx (op1)));
2112 }
2113 return product;
2114 }
2115 }
2116
2117 /* It can't be open-coded in this mode.
2118 Use a library call if one is available and caller says that's ok. */
2119
2120 libfunc = optab_libfunc (binoptab, mode);
2121 if (libfunc
2122 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2123 {
2124 rtx insns;
2125 rtx op1x = op1;
2126 enum machine_mode op1_mode = mode;
2127 rtx value;
2128
2129 start_sequence ();
2130
2131 if (shift_optab_p (binoptab))
2132 {
2133 op1_mode = targetm.libgcc_shift_count_mode ();
2134 /* Specify unsigned here,
2135 since negative shift counts are meaningless. */
2136 op1x = convert_to_mode (op1_mode, op1, 1);
2137 }
2138
2139 if (GET_MODE (op0) != VOIDmode
2140 && GET_MODE (op0) != mode)
2141 op0 = convert_to_mode (mode, op0, unsignedp);
2142
2143 /* Pass 1 for NO_QUEUE so we don't lose any increments
2144 if the libcall is cse'd or moved. */
2145 value = emit_library_call_value (libfunc,
2146 NULL_RTX, LCT_CONST, mode, 2,
2147 op0, mode, op1x, op1_mode);
2148
2149 insns = get_insns ();
2150 end_sequence ();
2151
2152 target = gen_reg_rtx (mode);
2153 emit_libcall_block (insns, target, value,
2154 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2155
2156 return target;
2157 }
2158
2159 delete_insns_since (last);
2160
2161 /* It can't be done in this mode. Can we do it in a wider mode? */
2162
2163 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2164 || methods == OPTAB_MUST_WIDEN))
2165 {
2166 /* Caller says, don't even try. */
2167 delete_insns_since (entry_last);
2168 return 0;
2169 }
2170
2171 /* Compute the value of METHODS to pass to recursive calls.
2172 Don't allow widening to be tried recursively. */
2173
2174 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2175
2176 /* Look for a wider mode of the same class for which it appears we can do
2177 the operation. */
2178
2179 if (CLASS_HAS_WIDER_MODES_P (class))
2180 {
2181 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2182 wider_mode != VOIDmode;
2183 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2184 {
2185 if ((optab_handler (binoptab, wider_mode)->insn_code
2186 != CODE_FOR_nothing)
2187 || (methods == OPTAB_LIB
2188 && optab_libfunc (binoptab, wider_mode)))
2189 {
2190 rtx xop0 = op0, xop1 = op1;
2191 int no_extend = 0;
2192
2193 /* For certain integer operations, we need not actually extend
2194 the narrow operands, as long as we will truncate
2195 the results to the same narrowness. */
2196
2197 if ((binoptab == ior_optab || binoptab == and_optab
2198 || binoptab == xor_optab
2199 || binoptab == add_optab || binoptab == sub_optab
2200 || binoptab == smul_optab || binoptab == ashl_optab)
2201 && class == MODE_INT)
2202 no_extend = 1;
2203
2204 xop0 = widen_operand (xop0, wider_mode, mode,
2205 unsignedp, no_extend);
2206
2207 /* The second operand of a shift must always be extended. */
2208 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2209 no_extend && binoptab != ashl_optab);
2210
2211 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2212 unsignedp, methods);
2213 if (temp)
2214 {
2215 if (class != MODE_INT
2216 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2217 GET_MODE_BITSIZE (wider_mode)))
2218 {
2219 if (target == 0)
2220 target = gen_reg_rtx (mode);
2221 convert_move (target, temp, 0);
2222 return target;
2223 }
2224 else
2225 return gen_lowpart (mode, temp);
2226 }
2227 else
2228 delete_insns_since (last);
2229 }
2230 }
2231 }
2232
2233 delete_insns_since (entry_last);
2234 return 0;
2235 }
2236 \f
2237 /* Expand a binary operator which has both signed and unsigned forms.
2238 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2239 signed operations.
2240
2241 If we widen unsigned operands, we may use a signed wider operation instead
2242 of an unsigned wider operation, since the result would be the same. */
2243
2244 rtx
2245 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2246 rtx op0, rtx op1, rtx target, int unsignedp,
2247 enum optab_methods methods)
2248 {
2249 rtx temp;
2250 optab direct_optab = unsignedp ? uoptab : soptab;
2251 struct optab wide_soptab;
2252
2253 /* Do it without widening, if possible. */
2254 temp = expand_binop (mode, direct_optab, op0, op1, target,
2255 unsignedp, OPTAB_DIRECT);
2256 if (temp || methods == OPTAB_DIRECT)
2257 return temp;
2258
2259 /* Try widening to a signed int. Make a fake signed optab that
2260 hides any signed insn for direct use. */
2261 wide_soptab = *soptab;
2262 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2263 /* We don't want to generate new hash table entries from this fake
2264 optab. */
2265 wide_soptab.libcall_gen = NULL;
2266
2267 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2268 unsignedp, OPTAB_WIDEN);
2269
2270 /* For unsigned operands, try widening to an unsigned int. */
2271 if (temp == 0 && unsignedp)
2272 temp = expand_binop (mode, uoptab, op0, op1, target,
2273 unsignedp, OPTAB_WIDEN);
2274 if (temp || methods == OPTAB_WIDEN)
2275 return temp;
2276
2277 /* Use the right width lib call if that exists. */
2278 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2279 if (temp || methods == OPTAB_LIB)
2280 return temp;
2281
2282 /* Must widen and use a lib call, use either signed or unsigned. */
2283 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2284 unsignedp, methods);
2285 if (temp != 0)
2286 return temp;
2287 if (unsignedp)
2288 return expand_binop (mode, uoptab, op0, op1, target,
2289 unsignedp, methods);
2290 return 0;
2291 }
2292 \f
2293 /* Generate code to perform an operation specified by UNOPPTAB
2294 on operand OP0, with two results to TARG0 and TARG1.
2295 We assume that the order of the operands for the instruction
2296 is TARG0, TARG1, OP0.
2297
2298 Either TARG0 or TARG1 may be zero, but what that means is that
2299 the result is not actually wanted. We will generate it into
2300 a dummy pseudo-reg and discard it. They may not both be zero.
2301
2302 Returns 1 if this operation can be performed; 0 if not. */
2303
2304 int
2305 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2306 int unsignedp)
2307 {
2308 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2309 enum mode_class class;
2310 enum machine_mode wider_mode;
2311 rtx entry_last = get_last_insn ();
2312 rtx last;
2313
2314 class = GET_MODE_CLASS (mode);
2315
2316 if (!targ0)
2317 targ0 = gen_reg_rtx (mode);
2318 if (!targ1)
2319 targ1 = gen_reg_rtx (mode);
2320
2321 /* Record where to go back to if we fail. */
2322 last = get_last_insn ();
2323
2324 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2325 {
2326 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2327 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2328 rtx pat;
2329 rtx xop0 = op0;
2330
2331 if (GET_MODE (xop0) != VOIDmode
2332 && GET_MODE (xop0) != mode0)
2333 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2334
2335 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2336 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2337 xop0 = copy_to_mode_reg (mode0, xop0);
2338
2339 /* We could handle this, but we should always be called with a pseudo
2340 for our targets and all insns should take them as outputs. */
2341 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2342 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2343
2344 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2345 if (pat)
2346 {
2347 emit_insn (pat);
2348 return 1;
2349 }
2350 else
2351 delete_insns_since (last);
2352 }
2353
2354 /* It can't be done in this mode. Can we do it in a wider mode? */
2355
2356 if (CLASS_HAS_WIDER_MODES_P (class))
2357 {
2358 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2359 wider_mode != VOIDmode;
2360 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2361 {
2362 if (optab_handler (unoptab, wider_mode)->insn_code
2363 != CODE_FOR_nothing)
2364 {
2365 rtx t0 = gen_reg_rtx (wider_mode);
2366 rtx t1 = gen_reg_rtx (wider_mode);
2367 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2368
2369 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2370 {
2371 convert_move (targ0, t0, unsignedp);
2372 convert_move (targ1, t1, unsignedp);
2373 return 1;
2374 }
2375 else
2376 delete_insns_since (last);
2377 }
2378 }
2379 }
2380
2381 delete_insns_since (entry_last);
2382 return 0;
2383 }
2384 \f
2385 /* Generate code to perform an operation specified by BINOPTAB
2386 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2387 We assume that the order of the operands for the instruction
2388 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2389 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2390
2391 Either TARG0 or TARG1 may be zero, but what that means is that
2392 the result is not actually wanted. We will generate it into
2393 a dummy pseudo-reg and discard it. They may not both be zero.
2394
2395 Returns 1 if this operation can be performed; 0 if not. */
2396
2397 int
2398 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2399 int unsignedp)
2400 {
2401 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2402 enum mode_class class;
2403 enum machine_mode wider_mode;
2404 rtx entry_last = get_last_insn ();
2405 rtx last;
2406
2407 class = GET_MODE_CLASS (mode);
2408
2409 if (!targ0)
2410 targ0 = gen_reg_rtx (mode);
2411 if (!targ1)
2412 targ1 = gen_reg_rtx (mode);
2413
2414 /* Record where to go back to if we fail. */
2415 last = get_last_insn ();
2416
2417 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2418 {
2419 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2420 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2421 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2422 rtx pat;
2423 rtx xop0 = op0, xop1 = op1;
2424
2425 /* If we are optimizing, force expensive constants into a register. */
2426 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2427 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2428
2429 /* In case the insn wants input operands in modes different from
2430 those of the actual operands, convert the operands. It would
2431 seem that we don't need to convert CONST_INTs, but we do, so
2432 that they're properly zero-extended, sign-extended or truncated
2433 for their mode. */
2434
2435 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2436 xop0 = convert_modes (mode0,
2437 GET_MODE (op0) != VOIDmode
2438 ? GET_MODE (op0)
2439 : mode,
2440 xop0, unsignedp);
2441
2442 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2443 xop1 = convert_modes (mode1,
2444 GET_MODE (op1) != VOIDmode
2445 ? GET_MODE (op1)
2446 : mode,
2447 xop1, unsignedp);
2448
2449 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2450 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2451 xop0 = copy_to_mode_reg (mode0, xop0);
2452
2453 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2454 xop1 = copy_to_mode_reg (mode1, xop1);
2455
2456 /* We could handle this, but we should always be called with a pseudo
2457 for our targets and all insns should take them as outputs. */
2458 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2459 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2460
2461 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2462 if (pat)
2463 {
2464 emit_insn (pat);
2465 return 1;
2466 }
2467 else
2468 delete_insns_since (last);
2469 }
2470
2471 /* It can't be done in this mode. Can we do it in a wider mode? */
2472
2473 if (CLASS_HAS_WIDER_MODES_P (class))
2474 {
2475 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2476 wider_mode != VOIDmode;
2477 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2478 {
2479 if (optab_handler (binoptab, wider_mode)->insn_code
2480 != CODE_FOR_nothing)
2481 {
2482 rtx t0 = gen_reg_rtx (wider_mode);
2483 rtx t1 = gen_reg_rtx (wider_mode);
2484 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2485 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2486
2487 if (expand_twoval_binop (binoptab, cop0, cop1,
2488 t0, t1, unsignedp))
2489 {
2490 convert_move (targ0, t0, unsignedp);
2491 convert_move (targ1, t1, unsignedp);
2492 return 1;
2493 }
2494 else
2495 delete_insns_since (last);
2496 }
2497 }
2498 }
2499
2500 delete_insns_since (entry_last);
2501 return 0;
2502 }
2503
2504 /* Expand the two-valued library call indicated by BINOPTAB, but
2505 preserve only one of the values. If TARG0 is non-NULL, the first
2506 value is placed into TARG0; otherwise the second value is placed
2507 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2508 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2509 This routine assumes that the value returned by the library call is
2510 as if the return value was of an integral mode twice as wide as the
2511 mode of OP0. Returns 1 if the call was successful. */
2512
2513 bool
2514 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2515 rtx targ0, rtx targ1, enum rtx_code code)
2516 {
2517 enum machine_mode mode;
2518 enum machine_mode libval_mode;
2519 rtx libval;
2520 rtx insns;
2521 rtx libfunc;
2522
2523 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2524 gcc_assert (!targ0 != !targ1);
2525
2526 mode = GET_MODE (op0);
2527 libfunc = optab_libfunc (binoptab, mode);
2528 if (!libfunc)
2529 return false;
2530
2531 /* The value returned by the library function will have twice as
2532 many bits as the nominal MODE. */
2533 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2534 MODE_INT);
2535 start_sequence ();
2536 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2537 libval_mode, 2,
2538 op0, mode,
2539 op1, mode);
2540 /* Get the part of VAL containing the value that we want. */
2541 libval = simplify_gen_subreg (mode, libval, libval_mode,
2542 targ0 ? 0 : GET_MODE_SIZE (mode));
2543 insns = get_insns ();
2544 end_sequence ();
2545 /* Move the into the desired location. */
2546 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2547 gen_rtx_fmt_ee (code, mode, op0, op1));
2548
2549 return true;
2550 }
2551
2552 \f
2553 /* Wrapper around expand_unop which takes an rtx code to specify
2554 the operation to perform, not an optab pointer. All other
2555 arguments are the same. */
2556 rtx
2557 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2558 rtx target, int unsignedp)
2559 {
2560 optab unop = code_to_optab[(int) code];
2561 gcc_assert (unop);
2562
2563 return expand_unop (mode, unop, op0, target, unsignedp);
2564 }
2565
2566 /* Try calculating
2567 (clz:narrow x)
2568 as
2569 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2570 static rtx
2571 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2572 {
2573 enum mode_class class = GET_MODE_CLASS (mode);
2574 if (CLASS_HAS_WIDER_MODES_P (class))
2575 {
2576 enum machine_mode wider_mode;
2577 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2578 wider_mode != VOIDmode;
2579 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2580 {
2581 if (optab_handler (clz_optab, wider_mode)->insn_code
2582 != CODE_FOR_nothing)
2583 {
2584 rtx xop0, temp, last;
2585
2586 last = get_last_insn ();
2587
2588 if (target == 0)
2589 target = gen_reg_rtx (mode);
2590 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2591 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2592 if (temp != 0)
2593 temp = expand_binop (wider_mode, sub_optab, temp,
2594 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2595 - GET_MODE_BITSIZE (mode)),
2596 target, true, OPTAB_DIRECT);
2597 if (temp == 0)
2598 delete_insns_since (last);
2599
2600 return temp;
2601 }
2602 }
2603 }
2604 return 0;
2605 }
2606
2607 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2608 quantities, choosing which based on whether the high word is nonzero. */
2609 static rtx
2610 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2611 {
2612 rtx xop0 = force_reg (mode, op0);
2613 rtx subhi = gen_highpart (word_mode, xop0);
2614 rtx sublo = gen_lowpart (word_mode, xop0);
2615 rtx hi0_label = gen_label_rtx ();
2616 rtx after_label = gen_label_rtx ();
2617 rtx seq, temp, result;
2618
2619 /* If we were not given a target, use a word_mode register, not a
2620 'mode' register. The result will fit, and nobody is expecting
2621 anything bigger (the return type of __builtin_clz* is int). */
2622 if (!target)
2623 target = gen_reg_rtx (word_mode);
2624
2625 /* In any case, write to a word_mode scratch in both branches of the
2626 conditional, so we can ensure there is a single move insn setting
2627 'target' to tag a REG_EQUAL note on. */
2628 result = gen_reg_rtx (word_mode);
2629
2630 start_sequence ();
2631
2632 /* If the high word is not equal to zero,
2633 then clz of the full value is clz of the high word. */
2634 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2635 word_mode, true, hi0_label);
2636
2637 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2638 if (!temp)
2639 goto fail;
2640
2641 if (temp != result)
2642 convert_move (result, temp, true);
2643
2644 emit_jump_insn (gen_jump (after_label));
2645 emit_barrier ();
2646
2647 /* Else clz of the full value is clz of the low word plus the number
2648 of bits in the high word. */
2649 emit_label (hi0_label);
2650
2651 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2652 if (!temp)
2653 goto fail;
2654 temp = expand_binop (word_mode, add_optab, temp,
2655 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2656 result, true, OPTAB_DIRECT);
2657 if (!temp)
2658 goto fail;
2659 if (temp != result)
2660 convert_move (result, temp, true);
2661
2662 emit_label (after_label);
2663 convert_move (target, result, true);
2664
2665 seq = get_insns ();
2666 end_sequence ();
2667
2668 add_equal_note (seq, target, CLZ, xop0, 0);
2669 emit_insn (seq);
2670 return target;
2671
2672 fail:
2673 end_sequence ();
2674 return 0;
2675 }
2676
2677 /* Try calculating
2678 (bswap:narrow x)
2679 as
2680 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2681 static rtx
2682 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2683 {
2684 enum mode_class class = GET_MODE_CLASS (mode);
2685 enum machine_mode wider_mode;
2686 rtx x, last;
2687
2688 if (!CLASS_HAS_WIDER_MODES_P (class))
2689 return NULL_RTX;
2690
2691 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2692 wider_mode != VOIDmode;
2693 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2694 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2695 goto found;
2696 return NULL_RTX;
2697
2698 found:
2699 last = get_last_insn ();
2700
2701 x = widen_operand (op0, wider_mode, mode, true, true);
2702 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2703
2704 if (x != 0)
2705 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2706 size_int (GET_MODE_BITSIZE (wider_mode)
2707 - GET_MODE_BITSIZE (mode)),
2708 NULL_RTX, true);
2709
2710 if (x != 0)
2711 {
2712 if (target == 0)
2713 target = gen_reg_rtx (mode);
2714 emit_move_insn (target, gen_lowpart (mode, x));
2715 }
2716 else
2717 delete_insns_since (last);
2718
2719 return target;
2720 }
2721
2722 /* Try calculating bswap as two bswaps of two word-sized operands. */
2723
2724 static rtx
2725 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2726 {
2727 rtx t0, t1;
2728
2729 t1 = expand_unop (word_mode, bswap_optab,
2730 operand_subword_force (op, 0, mode), NULL_RTX, true);
2731 t0 = expand_unop (word_mode, bswap_optab,
2732 operand_subword_force (op, 1, mode), NULL_RTX, true);
2733
2734 if (target == 0)
2735 target = gen_reg_rtx (mode);
2736 if (REG_P (target))
2737 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2738 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2739 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2740
2741 return target;
2742 }
2743
2744 /* Try calculating (parity x) as (and (popcount x) 1), where
2745 popcount can also be done in a wider mode. */
2746 static rtx
2747 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2748 {
2749 enum mode_class class = GET_MODE_CLASS (mode);
2750 if (CLASS_HAS_WIDER_MODES_P (class))
2751 {
2752 enum machine_mode wider_mode;
2753 for (wider_mode = mode; wider_mode != VOIDmode;
2754 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2755 {
2756 if (optab_handler (popcount_optab, wider_mode)->insn_code
2757 != CODE_FOR_nothing)
2758 {
2759 rtx xop0, temp, last;
2760
2761 last = get_last_insn ();
2762
2763 if (target == 0)
2764 target = gen_reg_rtx (mode);
2765 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2766 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2767 true);
2768 if (temp != 0)
2769 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2770 target, true, OPTAB_DIRECT);
2771 if (temp == 0)
2772 delete_insns_since (last);
2773
2774 return temp;
2775 }
2776 }
2777 }
2778 return 0;
2779 }
2780
2781 /* Try calculating ctz(x) as K - clz(x & -x) ,
2782 where K is GET_MODE_BITSIZE(mode) - 1.
2783
2784 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2785 don't have to worry about what the hardware does in that case. (If
2786 the clz instruction produces the usual value at 0, which is K, the
2787 result of this code sequence will be -1; expand_ffs, below, relies
2788 on this. It might be nice to have it be K instead, for consistency
2789 with the (very few) processors that provide a ctz with a defined
2790 value, but that would take one more instruction, and it would be
2791 less convenient for expand_ffs anyway. */
2792
2793 static rtx
2794 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2795 {
2796 rtx seq, temp;
2797
2798 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2799 return 0;
2800
2801 start_sequence ();
2802
2803 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2804 if (temp)
2805 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2806 true, OPTAB_DIRECT);
2807 if (temp)
2808 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2809 if (temp)
2810 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2811 temp, target,
2812 true, OPTAB_DIRECT);
2813 if (temp == 0)
2814 {
2815 end_sequence ();
2816 return 0;
2817 }
2818
2819 seq = get_insns ();
2820 end_sequence ();
2821
2822 add_equal_note (seq, temp, CTZ, op0, 0);
2823 emit_insn (seq);
2824 return temp;
2825 }
2826
2827
2828 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2829 else with the sequence used by expand_clz.
2830
2831 The ffs builtin promises to return zero for a zero value and ctz/clz
2832 may have an undefined value in that case. If they do not give us a
2833 convenient value, we have to generate a test and branch. */
2834 static rtx
2835 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2836 {
2837 HOST_WIDE_INT val = 0;
2838 bool defined_at_zero = false;
2839 rtx temp, seq;
2840
2841 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2842 {
2843 start_sequence ();
2844
2845 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2846 if (!temp)
2847 goto fail;
2848
2849 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2850 }
2851 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2852 {
2853 start_sequence ();
2854 temp = expand_ctz (mode, op0, 0);
2855 if (!temp)
2856 goto fail;
2857
2858 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2859 {
2860 defined_at_zero = true;
2861 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2862 }
2863 }
2864 else
2865 return 0;
2866
2867 if (defined_at_zero && val == -1)
2868 /* No correction needed at zero. */;
2869 else
2870 {
2871 /* We don't try to do anything clever with the situation found
2872 on some processors (eg Alpha) where ctz(0:mode) ==
2873 bitsize(mode). If someone can think of a way to send N to -1
2874 and leave alone all values in the range 0..N-1 (where N is a
2875 power of two), cheaper than this test-and-branch, please add it.
2876
2877 The test-and-branch is done after the operation itself, in case
2878 the operation sets condition codes that can be recycled for this.
2879 (This is true on i386, for instance.) */
2880
2881 rtx nonzero_label = gen_label_rtx ();
2882 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2883 mode, true, nonzero_label);
2884
2885 convert_move (temp, GEN_INT (-1), false);
2886 emit_label (nonzero_label);
2887 }
2888
2889 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2890 to produce a value in the range 0..bitsize. */
2891 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2892 target, false, OPTAB_DIRECT);
2893 if (!temp)
2894 goto fail;
2895
2896 seq = get_insns ();
2897 end_sequence ();
2898
2899 add_equal_note (seq, temp, FFS, op0, 0);
2900 emit_insn (seq);
2901 return temp;
2902
2903 fail:
2904 end_sequence ();
2905 return 0;
2906 }
2907
2908 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2909 conditions, VAL may already be a SUBREG against which we cannot generate
2910 a further SUBREG. In this case, we expect forcing the value into a
2911 register will work around the situation. */
2912
2913 static rtx
2914 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2915 enum machine_mode imode)
2916 {
2917 rtx ret;
2918 ret = lowpart_subreg (omode, val, imode);
2919 if (ret == NULL)
2920 {
2921 val = force_reg (imode, val);
2922 ret = lowpart_subreg (omode, val, imode);
2923 gcc_assert (ret != NULL);
2924 }
2925 return ret;
2926 }
2927
2928 /* Expand a floating point absolute value or negation operation via a
2929 logical operation on the sign bit. */
2930
2931 static rtx
2932 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2933 rtx op0, rtx target)
2934 {
2935 const struct real_format *fmt;
2936 int bitpos, word, nwords, i;
2937 enum machine_mode imode;
2938 HOST_WIDE_INT hi, lo;
2939 rtx temp, insns;
2940
2941 /* The format has to have a simple sign bit. */
2942 fmt = REAL_MODE_FORMAT (mode);
2943 if (fmt == NULL)
2944 return NULL_RTX;
2945
2946 bitpos = fmt->signbit_rw;
2947 if (bitpos < 0)
2948 return NULL_RTX;
2949
2950 /* Don't create negative zeros if the format doesn't support them. */
2951 if (code == NEG && !fmt->has_signed_zero)
2952 return NULL_RTX;
2953
2954 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2955 {
2956 imode = int_mode_for_mode (mode);
2957 if (imode == BLKmode)
2958 return NULL_RTX;
2959 word = 0;
2960 nwords = 1;
2961 }
2962 else
2963 {
2964 imode = word_mode;
2965
2966 if (FLOAT_WORDS_BIG_ENDIAN)
2967 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2968 else
2969 word = bitpos / BITS_PER_WORD;
2970 bitpos = bitpos % BITS_PER_WORD;
2971 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2972 }
2973
2974 if (bitpos < HOST_BITS_PER_WIDE_INT)
2975 {
2976 hi = 0;
2977 lo = (HOST_WIDE_INT) 1 << bitpos;
2978 }
2979 else
2980 {
2981 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2982 lo = 0;
2983 }
2984 if (code == ABS)
2985 lo = ~lo, hi = ~hi;
2986
2987 if (target == 0 || target == op0)
2988 target = gen_reg_rtx (mode);
2989
2990 if (nwords > 1)
2991 {
2992 start_sequence ();
2993
2994 for (i = 0; i < nwords; ++i)
2995 {
2996 rtx targ_piece = operand_subword (target, i, 1, mode);
2997 rtx op0_piece = operand_subword_force (op0, i, mode);
2998
2999 if (i == word)
3000 {
3001 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3002 op0_piece,
3003 immed_double_const (lo, hi, imode),
3004 targ_piece, 1, OPTAB_LIB_WIDEN);
3005 if (temp != targ_piece)
3006 emit_move_insn (targ_piece, temp);
3007 }
3008 else
3009 emit_move_insn (targ_piece, op0_piece);
3010 }
3011
3012 insns = get_insns ();
3013 end_sequence ();
3014
3015 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
3016 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
3017 }
3018 else
3019 {
3020 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3021 gen_lowpart (imode, op0),
3022 immed_double_const (lo, hi, imode),
3023 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3024 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3025
3026 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3027 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3028 }
3029
3030 return target;
3031 }
3032
3033 /* As expand_unop, but will fail rather than attempt the operation in a
3034 different mode or with a libcall. */
3035 static rtx
3036 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3037 int unsignedp)
3038 {
3039 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3040 {
3041 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3042 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3043 rtx xop0 = op0;
3044 rtx last = get_last_insn ();
3045 rtx pat, temp;
3046
3047 if (target)
3048 temp = target;
3049 else
3050 temp = gen_reg_rtx (mode);
3051
3052 if (GET_MODE (xop0) != VOIDmode
3053 && GET_MODE (xop0) != mode0)
3054 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3055
3056 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3057
3058 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3059 xop0 = copy_to_mode_reg (mode0, xop0);
3060
3061 if (!insn_data[icode].operand[0].predicate (temp, mode))
3062 temp = gen_reg_rtx (mode);
3063
3064 pat = GEN_FCN (icode) (temp, xop0);
3065 if (pat)
3066 {
3067 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3068 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3069 {
3070 delete_insns_since (last);
3071 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3072 }
3073
3074 emit_insn (pat);
3075
3076 return temp;
3077 }
3078 else
3079 delete_insns_since (last);
3080 }
3081 return 0;
3082 }
3083
3084 /* Generate code to perform an operation specified by UNOPTAB
3085 on operand OP0, with result having machine-mode MODE.
3086
3087 UNSIGNEDP is for the case where we have to widen the operands
3088 to perform the operation. It says to use zero-extension.
3089
3090 If TARGET is nonzero, the value
3091 is generated there, if it is convenient to do so.
3092 In all cases an rtx is returned for the locus of the value;
3093 this may or may not be TARGET. */
3094
3095 rtx
3096 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3097 int unsignedp)
3098 {
3099 enum mode_class class = GET_MODE_CLASS (mode);
3100 enum machine_mode wider_mode;
3101 rtx temp;
3102 rtx libfunc;
3103
3104 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3105 if (temp)
3106 return temp;
3107
3108 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3109
3110 /* Widening (or narrowing) clz needs special treatment. */
3111 if (unoptab == clz_optab)
3112 {
3113 temp = widen_clz (mode, op0, target);
3114 if (temp)
3115 return temp;
3116
3117 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3118 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3119 {
3120 temp = expand_doubleword_clz (mode, op0, target);
3121 if (temp)
3122 return temp;
3123 }
3124
3125 goto try_libcall;
3126 }
3127
3128 /* Widening (or narrowing) bswap needs special treatment. */
3129 if (unoptab == bswap_optab)
3130 {
3131 temp = widen_bswap (mode, op0, target);
3132 if (temp)
3133 return temp;
3134
3135 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3136 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3137 {
3138 temp = expand_doubleword_bswap (mode, op0, target);
3139 if (temp)
3140 return temp;
3141 }
3142
3143 goto try_libcall;
3144 }
3145
3146 if (CLASS_HAS_WIDER_MODES_P (class))
3147 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3148 wider_mode != VOIDmode;
3149 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3150 {
3151 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3152 {
3153 rtx xop0 = op0;
3154 rtx last = get_last_insn ();
3155
3156 /* For certain operations, we need not actually extend
3157 the narrow operand, as long as we will truncate the
3158 results to the same narrowness. */
3159
3160 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3161 (unoptab == neg_optab
3162 || unoptab == one_cmpl_optab)
3163 && class == MODE_INT);
3164
3165 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3166 unsignedp);
3167
3168 if (temp)
3169 {
3170 if (class != MODE_INT
3171 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3172 GET_MODE_BITSIZE (wider_mode)))
3173 {
3174 if (target == 0)
3175 target = gen_reg_rtx (mode);
3176 convert_move (target, temp, 0);
3177 return target;
3178 }
3179 else
3180 return gen_lowpart (mode, temp);
3181 }
3182 else
3183 delete_insns_since (last);
3184 }
3185 }
3186
3187 /* These can be done a word at a time. */
3188 if (unoptab == one_cmpl_optab
3189 && class == MODE_INT
3190 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3191 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3192 {
3193 int i;
3194 rtx insns;
3195
3196 if (target == 0 || target == op0)
3197 target = gen_reg_rtx (mode);
3198
3199 start_sequence ();
3200
3201 /* Do the actual arithmetic. */
3202 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3203 {
3204 rtx target_piece = operand_subword (target, i, 1, mode);
3205 rtx x = expand_unop (word_mode, unoptab,
3206 operand_subword_force (op0, i, mode),
3207 target_piece, unsignedp);
3208
3209 if (target_piece != x)
3210 emit_move_insn (target_piece, x);
3211 }
3212
3213 insns = get_insns ();
3214 end_sequence ();
3215
3216 emit_no_conflict_block (insns, target, op0, NULL_RTX,
3217 gen_rtx_fmt_e (unoptab->code, mode,
3218 copy_rtx (op0)));
3219 return target;
3220 }
3221
3222 if (unoptab->code == NEG)
3223 {
3224 /* Try negating floating point values by flipping the sign bit. */
3225 if (SCALAR_FLOAT_MODE_P (mode))
3226 {
3227 temp = expand_absneg_bit (NEG, mode, op0, target);
3228 if (temp)
3229 return temp;
3230 }
3231
3232 /* If there is no negation pattern, and we have no negative zero,
3233 try subtracting from zero. */
3234 if (!HONOR_SIGNED_ZEROS (mode))
3235 {
3236 temp = expand_binop (mode, (unoptab == negv_optab
3237 ? subv_optab : sub_optab),
3238 CONST0_RTX (mode), op0, target,
3239 unsignedp, OPTAB_DIRECT);
3240 if (temp)
3241 return temp;
3242 }
3243 }
3244
3245 /* Try calculating parity (x) as popcount (x) % 2. */
3246 if (unoptab == parity_optab)
3247 {
3248 temp = expand_parity (mode, op0, target);
3249 if (temp)
3250 return temp;
3251 }
3252
3253 /* Try implementing ffs (x) in terms of clz (x). */
3254 if (unoptab == ffs_optab)
3255 {
3256 temp = expand_ffs (mode, op0, target);
3257 if (temp)
3258 return temp;
3259 }
3260
3261 /* Try implementing ctz (x) in terms of clz (x). */
3262 if (unoptab == ctz_optab)
3263 {
3264 temp = expand_ctz (mode, op0, target);
3265 if (temp)
3266 return temp;
3267 }
3268
3269 try_libcall:
3270 /* Now try a library call in this mode. */
3271 libfunc = optab_libfunc (unoptab, mode);
3272 if (libfunc)
3273 {
3274 rtx insns;
3275 rtx value;
3276 enum machine_mode outmode = mode;
3277
3278 /* All of these functions return small values. Thus we choose to
3279 have them return something that isn't a double-word. */
3280 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3281 || unoptab == popcount_optab || unoptab == parity_optab)
3282 outmode
3283 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
3284
3285 start_sequence ();
3286
3287 /* Pass 1 for NO_QUEUE so we don't lose any increments
3288 if the libcall is cse'd or moved. */
3289 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3290 1, op0, mode);
3291 insns = get_insns ();
3292 end_sequence ();
3293
3294 target = gen_reg_rtx (outmode);
3295 emit_libcall_block (insns, target, value,
3296 gen_rtx_fmt_e (unoptab->code, outmode, op0));
3297
3298 return target;
3299 }
3300
3301 /* It can't be done in this mode. Can we do it in a wider mode? */
3302
3303 if (CLASS_HAS_WIDER_MODES_P (class))
3304 {
3305 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3306 wider_mode != VOIDmode;
3307 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3308 {
3309 if ((optab_handler (unoptab, wider_mode)->insn_code
3310 != CODE_FOR_nothing)
3311 || optab_libfunc (unoptab, wider_mode))
3312 {
3313 rtx xop0 = op0;
3314 rtx last = get_last_insn ();
3315
3316 /* For certain operations, we need not actually extend
3317 the narrow operand, as long as we will truncate the
3318 results to the same narrowness. */
3319
3320 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3321 (unoptab == neg_optab
3322 || unoptab == one_cmpl_optab)
3323 && class == MODE_INT);
3324
3325 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3326 unsignedp);
3327
3328 /* If we are generating clz using wider mode, adjust the
3329 result. */
3330 if (unoptab == clz_optab && temp != 0)
3331 temp = expand_binop (wider_mode, sub_optab, temp,
3332 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3333 - GET_MODE_BITSIZE (mode)),
3334 target, true, OPTAB_DIRECT);
3335
3336 if (temp)
3337 {
3338 if (class != MODE_INT)
3339 {
3340 if (target == 0)
3341 target = gen_reg_rtx (mode);
3342 convert_move (target, temp, 0);
3343 return target;
3344 }
3345 else
3346 return gen_lowpart (mode, temp);
3347 }
3348 else
3349 delete_insns_since (last);
3350 }
3351 }
3352 }
3353
3354 /* One final attempt at implementing negation via subtraction,
3355 this time allowing widening of the operand. */
3356 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3357 {
3358 rtx temp;
3359 temp = expand_binop (mode,
3360 unoptab == negv_optab ? subv_optab : sub_optab,
3361 CONST0_RTX (mode), op0,
3362 target, unsignedp, OPTAB_LIB_WIDEN);
3363 if (temp)
3364 return temp;
3365 }
3366
3367 return 0;
3368 }
3369 \f
3370 /* Emit code to compute the absolute value of OP0, with result to
3371 TARGET if convenient. (TARGET may be 0.) The return value says
3372 where the result actually is to be found.
3373
3374 MODE is the mode of the operand; the mode of the result is
3375 different but can be deduced from MODE.
3376
3377 */
3378
3379 rtx
3380 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3381 int result_unsignedp)
3382 {
3383 rtx temp;
3384
3385 if (! flag_trapv)
3386 result_unsignedp = 1;
3387
3388 /* First try to do it with a special abs instruction. */
3389 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3390 op0, target, 0);
3391 if (temp != 0)
3392 return temp;
3393
3394 /* For floating point modes, try clearing the sign bit. */
3395 if (SCALAR_FLOAT_MODE_P (mode))
3396 {
3397 temp = expand_absneg_bit (ABS, mode, op0, target);
3398 if (temp)
3399 return temp;
3400 }
3401
3402 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3403 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3404 && !HONOR_SIGNED_ZEROS (mode))
3405 {
3406 rtx last = get_last_insn ();
3407
3408 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3409 if (temp != 0)
3410 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3411 OPTAB_WIDEN);
3412
3413 if (temp != 0)
3414 return temp;
3415
3416 delete_insns_since (last);
3417 }
3418
3419 /* If this machine has expensive jumps, we can do integer absolute
3420 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3421 where W is the width of MODE. */
3422
3423 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3424 {
3425 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3426 size_int (GET_MODE_BITSIZE (mode) - 1),
3427 NULL_RTX, 0);
3428
3429 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3430 OPTAB_LIB_WIDEN);
3431 if (temp != 0)
3432 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3433 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3434
3435 if (temp != 0)
3436 return temp;
3437 }
3438
3439 return NULL_RTX;
3440 }
3441
3442 rtx
3443 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3444 int result_unsignedp, int safe)
3445 {
3446 rtx temp, op1;
3447
3448 if (! flag_trapv)
3449 result_unsignedp = 1;
3450
3451 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3452 if (temp != 0)
3453 return temp;
3454
3455 /* If that does not win, use conditional jump and negate. */
3456
3457 /* It is safe to use the target if it is the same
3458 as the source if this is also a pseudo register */
3459 if (op0 == target && REG_P (op0)
3460 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3461 safe = 1;
3462
3463 op1 = gen_label_rtx ();
3464 if (target == 0 || ! safe
3465 || GET_MODE (target) != mode
3466 || (MEM_P (target) && MEM_VOLATILE_P (target))
3467 || (REG_P (target)
3468 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3469 target = gen_reg_rtx (mode);
3470
3471 emit_move_insn (target, op0);
3472 NO_DEFER_POP;
3473
3474 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3475 NULL_RTX, NULL_RTX, op1);
3476
3477 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3478 target, target, 0);
3479 if (op0 != target)
3480 emit_move_insn (target, op0);
3481 emit_label (op1);
3482 OK_DEFER_POP;
3483 return target;
3484 }
3485
3486 /* A subroutine of expand_copysign, perform the copysign operation using the
3487 abs and neg primitives advertised to exist on the target. The assumption
3488 is that we have a split register file, and leaving op0 in fp registers,
3489 and not playing with subregs so much, will help the register allocator. */
3490
3491 static rtx
3492 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3493 int bitpos, bool op0_is_abs)
3494 {
3495 enum machine_mode imode;
3496 int icode;
3497 rtx sign, label;
3498
3499 if (target == op1)
3500 target = NULL_RTX;
3501
3502 /* Check if the back end provides an insn that handles signbit for the
3503 argument's mode. */
3504 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3505 if (icode != CODE_FOR_nothing)
3506 {
3507 imode = insn_data[icode].operand[0].mode;
3508 sign = gen_reg_rtx (imode);
3509 emit_unop_insn (icode, sign, op1, UNKNOWN);
3510 }
3511 else
3512 {
3513 HOST_WIDE_INT hi, lo;
3514
3515 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3516 {
3517 imode = int_mode_for_mode (mode);
3518 if (imode == BLKmode)
3519 return NULL_RTX;
3520 op1 = gen_lowpart (imode, op1);
3521 }
3522 else
3523 {
3524 int word;
3525
3526 imode = word_mode;
3527 if (FLOAT_WORDS_BIG_ENDIAN)
3528 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3529 else
3530 word = bitpos / BITS_PER_WORD;
3531 bitpos = bitpos % BITS_PER_WORD;
3532 op1 = operand_subword_force (op1, word, mode);
3533 }
3534
3535 if (bitpos < HOST_BITS_PER_WIDE_INT)
3536 {
3537 hi = 0;
3538 lo = (HOST_WIDE_INT) 1 << bitpos;
3539 }
3540 else
3541 {
3542 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3543 lo = 0;
3544 }
3545
3546 sign = gen_reg_rtx (imode);
3547 sign = expand_binop (imode, and_optab, op1,
3548 immed_double_const (lo, hi, imode),
3549 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3550 }
3551
3552 if (!op0_is_abs)
3553 {
3554 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3555 if (op0 == NULL)
3556 return NULL_RTX;
3557 target = op0;
3558 }
3559 else
3560 {
3561 if (target == NULL_RTX)
3562 target = copy_to_reg (op0);
3563 else
3564 emit_move_insn (target, op0);
3565 }
3566
3567 label = gen_label_rtx ();
3568 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3569
3570 if (GET_CODE (op0) == CONST_DOUBLE)
3571 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3572 else
3573 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3574 if (op0 != target)
3575 emit_move_insn (target, op0);
3576
3577 emit_label (label);
3578
3579 return target;
3580 }
3581
3582
3583 /* A subroutine of expand_copysign, perform the entire copysign operation
3584 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3585 is true if op0 is known to have its sign bit clear. */
3586
3587 static rtx
3588 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3589 int bitpos, bool op0_is_abs)
3590 {
3591 enum machine_mode imode;
3592 HOST_WIDE_INT hi, lo;
3593 int word, nwords, i;
3594 rtx temp, insns;
3595
3596 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3597 {
3598 imode = int_mode_for_mode (mode);
3599 if (imode == BLKmode)
3600 return NULL_RTX;
3601 word = 0;
3602 nwords = 1;
3603 }
3604 else
3605 {
3606 imode = word_mode;
3607
3608 if (FLOAT_WORDS_BIG_ENDIAN)
3609 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3610 else
3611 word = bitpos / BITS_PER_WORD;
3612 bitpos = bitpos % BITS_PER_WORD;
3613 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3614 }
3615
3616 if (bitpos < HOST_BITS_PER_WIDE_INT)
3617 {
3618 hi = 0;
3619 lo = (HOST_WIDE_INT) 1 << bitpos;
3620 }
3621 else
3622 {
3623 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3624 lo = 0;
3625 }
3626
3627 if (target == 0 || target == op0 || target == op1)
3628 target = gen_reg_rtx (mode);
3629
3630 if (nwords > 1)
3631 {
3632 start_sequence ();
3633
3634 for (i = 0; i < nwords; ++i)
3635 {
3636 rtx targ_piece = operand_subword (target, i, 1, mode);
3637 rtx op0_piece = operand_subword_force (op0, i, mode);
3638
3639 if (i == word)
3640 {
3641 if (!op0_is_abs)
3642 op0_piece = expand_binop (imode, and_optab, op0_piece,
3643 immed_double_const (~lo, ~hi, imode),
3644 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3645
3646 op1 = expand_binop (imode, and_optab,
3647 operand_subword_force (op1, i, mode),
3648 immed_double_const (lo, hi, imode),
3649 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3650
3651 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3652 targ_piece, 1, OPTAB_LIB_WIDEN);
3653 if (temp != targ_piece)
3654 emit_move_insn (targ_piece, temp);
3655 }
3656 else
3657 emit_move_insn (targ_piece, op0_piece);
3658 }
3659
3660 insns = get_insns ();
3661 end_sequence ();
3662
3663 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3664 }
3665 else
3666 {
3667 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3668 immed_double_const (lo, hi, imode),
3669 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3670
3671 op0 = gen_lowpart (imode, op0);
3672 if (!op0_is_abs)
3673 op0 = expand_binop (imode, and_optab, op0,
3674 immed_double_const (~lo, ~hi, imode),
3675 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3676
3677 temp = expand_binop (imode, ior_optab, op0, op1,
3678 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3679 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3680 }
3681
3682 return target;
3683 }
3684
3685 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3686 scalar floating point mode. Return NULL if we do not know how to
3687 expand the operation inline. */
3688
3689 rtx
3690 expand_copysign (rtx op0, rtx op1, rtx target)
3691 {
3692 enum machine_mode mode = GET_MODE (op0);
3693 const struct real_format *fmt;
3694 bool op0_is_abs;
3695 rtx temp;
3696
3697 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3698 gcc_assert (GET_MODE (op1) == mode);
3699
3700 /* First try to do it with a special instruction. */
3701 temp = expand_binop (mode, copysign_optab, op0, op1,
3702 target, 0, OPTAB_DIRECT);
3703 if (temp)
3704 return temp;
3705
3706 fmt = REAL_MODE_FORMAT (mode);
3707 if (fmt == NULL || !fmt->has_signed_zero)
3708 return NULL_RTX;
3709
3710 op0_is_abs = false;
3711 if (GET_CODE (op0) == CONST_DOUBLE)
3712 {
3713 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3714 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3715 op0_is_abs = true;
3716 }
3717
3718 if (fmt->signbit_ro >= 0
3719 && (GET_CODE (op0) == CONST_DOUBLE
3720 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3721 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3722 {
3723 temp = expand_copysign_absneg (mode, op0, op1, target,
3724 fmt->signbit_ro, op0_is_abs);
3725 if (temp)
3726 return temp;
3727 }
3728
3729 if (fmt->signbit_rw < 0)
3730 return NULL_RTX;
3731 return expand_copysign_bit (mode, op0, op1, target,
3732 fmt->signbit_rw, op0_is_abs);
3733 }
3734 \f
3735 /* Generate an instruction whose insn-code is INSN_CODE,
3736 with two operands: an output TARGET and an input OP0.
3737 TARGET *must* be nonzero, and the output is always stored there.
3738 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3739 the value that is stored into TARGET. */
3740
3741 void
3742 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3743 {
3744 rtx temp;
3745 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3746 rtx pat;
3747
3748 temp = target;
3749
3750 /* Now, if insn does not accept our operands, put them into pseudos. */
3751
3752 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3753 op0 = copy_to_mode_reg (mode0, op0);
3754
3755 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3756 temp = gen_reg_rtx (GET_MODE (temp));
3757
3758 pat = GEN_FCN (icode) (temp, op0);
3759
3760 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3761 add_equal_note (pat, temp, code, op0, NULL_RTX);
3762
3763 emit_insn (pat);
3764
3765 if (temp != target)
3766 emit_move_insn (target, temp);
3767 }
3768 \f
3769 struct no_conflict_data
3770 {
3771 rtx target, first, insn;
3772 bool must_stay;
3773 };
3774
3775 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3776 Set P->must_stay if the currently examined clobber / store has to stay
3777 in the list of insns that constitute the actual no_conflict block /
3778 libcall block. */
3779 static void
3780 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3781 {
3782 struct no_conflict_data *p= p0;
3783
3784 /* If this inns directly contributes to setting the target, it must stay. */
3785 if (reg_overlap_mentioned_p (p->target, dest))
3786 p->must_stay = true;
3787 /* If we haven't committed to keeping any other insns in the list yet,
3788 there is nothing more to check. */
3789 else if (p->insn == p->first)
3790 return;
3791 /* If this insn sets / clobbers a register that feeds one of the insns
3792 already in the list, this insn has to stay too. */
3793 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3794 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3795 || reg_used_between_p (dest, p->first, p->insn)
3796 /* Likewise if this insn depends on a register set by a previous
3797 insn in the list, or if it sets a result (presumably a hard
3798 register) that is set or clobbered by a previous insn.
3799 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3800 SET_DEST perform the former check on the address, and the latter
3801 check on the MEM. */
3802 || (GET_CODE (set) == SET
3803 && (modified_in_p (SET_SRC (set), p->first)
3804 || modified_in_p (SET_DEST (set), p->first)
3805 || modified_between_p (SET_SRC (set), p->first, p->insn)
3806 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3807 p->must_stay = true;
3808 }
3809
3810 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3811 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3812 is possible to do so. */
3813
3814 void
3815 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3816 {
3817 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3818 {
3819 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3820 encapsulated region would not be in one basic block, i.e. when
3821 there is a control_flow_insn_p insn between FIRST and LAST. */
3822 bool attach_libcall_retval_notes = true;
3823 rtx insn, next = NEXT_INSN (last);
3824
3825 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3826 if (control_flow_insn_p (insn))
3827 {
3828 attach_libcall_retval_notes = false;
3829 break;
3830 }
3831
3832 if (attach_libcall_retval_notes)
3833 {
3834 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3835 REG_NOTES (first));
3836 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3837 REG_NOTES (last));
3838 }
3839 }
3840 }
3841
3842 /* Emit code to perform a series of operations on a multi-word quantity, one
3843 word at a time.
3844
3845 Such a block is preceded by a CLOBBER of the output, consists of multiple
3846 insns, each setting one word of the output, and followed by a SET copying
3847 the output to itself.
3848
3849 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3850 note indicating that it doesn't conflict with the (also multi-word)
3851 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3852 notes.
3853
3854 INSNS is a block of code generated to perform the operation, not including
3855 the CLOBBER and final copy. All insns that compute intermediate values
3856 are first emitted, followed by the block as described above.
3857
3858 TARGET, OP0, and OP1 are the output and inputs of the operations,
3859 respectively. OP1 may be zero for a unary operation.
3860
3861 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3862 on the last insn.
3863
3864 If TARGET is not a register, INSNS is simply emitted with no special
3865 processing. Likewise if anything in INSNS is not an INSN or if
3866 there is a libcall block inside INSNS.
3867
3868 The final insn emitted is returned. */
3869
3870 rtx
3871 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3872 {
3873 rtx prev, next, first, last, insn;
3874
3875 if (!REG_P (target) || reload_in_progress)
3876 return emit_insn (insns);
3877 else
3878 for (insn = insns; insn; insn = NEXT_INSN (insn))
3879 if (!NONJUMP_INSN_P (insn)
3880 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3881 return emit_insn (insns);
3882
3883 /* First emit all insns that do not store into words of the output and remove
3884 these from the list. */
3885 for (insn = insns; insn; insn = next)
3886 {
3887 rtx note;
3888 struct no_conflict_data data;
3889
3890 next = NEXT_INSN (insn);
3891
3892 /* Some ports (cris) create a libcall regions at their own. We must
3893 avoid any potential nesting of LIBCALLs. */
3894 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3895 remove_note (insn, note);
3896 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3897 remove_note (insn, note);
3898
3899 data.target = target;
3900 data.first = insns;
3901 data.insn = insn;
3902 data.must_stay = 0;
3903 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3904 if (! data.must_stay)
3905 {
3906 if (PREV_INSN (insn))
3907 NEXT_INSN (PREV_INSN (insn)) = next;
3908 else
3909 insns = next;
3910
3911 if (next)
3912 PREV_INSN (next) = PREV_INSN (insn);
3913
3914 add_insn (insn);
3915 }
3916 }
3917
3918 prev = get_last_insn ();
3919
3920 /* Now write the CLOBBER of the output, followed by the setting of each
3921 of the words, followed by the final copy. */
3922 if (target != op0 && target != op1)
3923 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3924
3925 for (insn = insns; insn; insn = next)
3926 {
3927 next = NEXT_INSN (insn);
3928 add_insn (insn);
3929
3930 if (op1 && REG_P (op1))
3931 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3932 REG_NOTES (insn));
3933
3934 if (op0 && REG_P (op0))
3935 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3936 REG_NOTES (insn));
3937 }
3938
3939 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3940 != CODE_FOR_nothing)
3941 {
3942 last = emit_move_insn (target, target);
3943 if (equiv)
3944 set_unique_reg_note (last, REG_EQUAL, equiv);
3945 }
3946 else
3947 {
3948 last = get_last_insn ();
3949
3950 /* Remove any existing REG_EQUAL note from "last", or else it will
3951 be mistaken for a note referring to the full contents of the
3952 alleged libcall value when found together with the REG_RETVAL
3953 note added below. An existing note can come from an insn
3954 expansion at "last". */
3955 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3956 }
3957
3958 if (prev == 0)
3959 first = get_insns ();
3960 else
3961 first = NEXT_INSN (prev);
3962
3963 maybe_encapsulate_block (first, last, equiv);
3964
3965 return last;
3966 }
3967 \f
3968 /* Emit code to make a call to a constant function or a library call.
3969
3970 INSNS is a list containing all insns emitted in the call.
3971 These insns leave the result in RESULT. Our block is to copy RESULT
3972 to TARGET, which is logically equivalent to EQUIV.
3973
3974 We first emit any insns that set a pseudo on the assumption that these are
3975 loading constants into registers; doing so allows them to be safely cse'ed
3976 between blocks. Then we emit all the other insns in the block, followed by
3977 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3978 note with an operand of EQUIV.
3979
3980 Moving assignments to pseudos outside of the block is done to improve
3981 the generated code, but is not required to generate correct code,
3982 hence being unable to move an assignment is not grounds for not making
3983 a libcall block. There are two reasons why it is safe to leave these
3984 insns inside the block: First, we know that these pseudos cannot be
3985 used in generated RTL outside the block since they are created for
3986 temporary purposes within the block. Second, CSE will not record the
3987 values of anything set inside a libcall block, so we know they must
3988 be dead at the end of the block.
3989
3990 Except for the first group of insns (the ones setting pseudos), the
3991 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3992 void
3993 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3994 {
3995 rtx final_dest = target;
3996 rtx prev, next, first, last, insn;
3997
3998 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3999 into a MEM later. Protect the libcall block from this change. */
4000 if (! REG_P (target) || REG_USERVAR_P (target))
4001 target = gen_reg_rtx (GET_MODE (target));
4002
4003 /* If we're using non-call exceptions, a libcall corresponding to an
4004 operation that may trap may also trap. */
4005 if (flag_non_call_exceptions && may_trap_p (equiv))
4006 {
4007 for (insn = insns; insn; insn = NEXT_INSN (insn))
4008 if (CALL_P (insn))
4009 {
4010 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
4011
4012 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
4013 remove_note (insn, note);
4014 }
4015 }
4016 else
4017 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4018 reg note to indicate that this call cannot throw or execute a nonlocal
4019 goto (unless there is already a REG_EH_REGION note, in which case
4020 we update it). */
4021 for (insn = insns; insn; insn = NEXT_INSN (insn))
4022 if (CALL_P (insn))
4023 {
4024 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
4025
4026 if (note != 0)
4027 XEXP (note, 0) = constm1_rtx;
4028 else
4029 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
4030 REG_NOTES (insn));
4031 }
4032
4033 /* First emit all insns that set pseudos. Remove them from the list as
4034 we go. Avoid insns that set pseudos which were referenced in previous
4035 insns. These can be generated by move_by_pieces, for example,
4036 to update an address. Similarly, avoid insns that reference things
4037 set in previous insns. */
4038
4039 for (insn = insns; insn; insn = next)
4040 {
4041 rtx set = single_set (insn);
4042 rtx note;
4043
4044 /* Some ports (cris) create a libcall regions at their own. We must
4045 avoid any potential nesting of LIBCALLs. */
4046 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
4047 remove_note (insn, note);
4048 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
4049 remove_note (insn, note);
4050
4051 next = NEXT_INSN (insn);
4052
4053 if (set != 0 && REG_P (SET_DEST (set))
4054 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4055 {
4056 struct no_conflict_data data;
4057
4058 data.target = const0_rtx;
4059 data.first = insns;
4060 data.insn = insn;
4061 data.must_stay = 0;
4062 note_stores (PATTERN (insn), no_conflict_move_test, &data);
4063 if (! data.must_stay)
4064 {
4065 if (PREV_INSN (insn))
4066 NEXT_INSN (PREV_INSN (insn)) = next;
4067 else
4068 insns = next;
4069
4070 if (next)
4071 PREV_INSN (next) = PREV_INSN (insn);
4072
4073 add_insn (insn);
4074 }
4075 }
4076
4077 /* Some ports use a loop to copy large arguments onto the stack.
4078 Don't move anything outside such a loop. */
4079 if (LABEL_P (insn))
4080 break;
4081 }
4082
4083 prev = get_last_insn ();
4084
4085 /* Write the remaining insns followed by the final copy. */
4086
4087 for (insn = insns; insn; insn = next)
4088 {
4089 next = NEXT_INSN (insn);
4090
4091 add_insn (insn);
4092 }
4093
4094 last = emit_move_insn (target, result);
4095 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
4096 != CODE_FOR_nothing)
4097 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
4098 else
4099 {
4100 /* Remove any existing REG_EQUAL note from "last", or else it will
4101 be mistaken for a note referring to the full contents of the
4102 libcall value when found together with the REG_RETVAL note added
4103 below. An existing note can come from an insn expansion at
4104 "last". */
4105 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
4106 }
4107
4108 if (final_dest != target)
4109 emit_move_insn (final_dest, target);
4110
4111 if (prev == 0)
4112 first = get_insns ();
4113 else
4114 first = NEXT_INSN (prev);
4115
4116 maybe_encapsulate_block (first, last, equiv);
4117 }
4118 \f
4119 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4120 PURPOSE describes how this comparison will be used. CODE is the rtx
4121 comparison code we will be using.
4122
4123 ??? Actually, CODE is slightly weaker than that. A target is still
4124 required to implement all of the normal bcc operations, but not
4125 required to implement all (or any) of the unordered bcc operations. */
4126
4127 int
4128 can_compare_p (enum rtx_code code, enum machine_mode mode,
4129 enum can_compare_purpose purpose)
4130 {
4131 do
4132 {
4133 if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
4134 {
4135 if (purpose == ccp_jump)
4136 return bcc_gen_fctn[(int) code] != NULL;
4137 else if (purpose == ccp_store_flag)
4138 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
4139 else
4140 /* There's only one cmov entry point, and it's allowed to fail. */
4141 return 1;
4142 }
4143 if (purpose == ccp_jump
4144 && optab_handler (cbranch_optab, mode)->insn_code != CODE_FOR_nothing)
4145 return 1;
4146 if (purpose == ccp_cmov
4147 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
4148 return 1;
4149 if (purpose == ccp_store_flag
4150 && optab_handler (cstore_optab, mode)->insn_code != CODE_FOR_nothing)
4151 return 1;
4152 mode = GET_MODE_WIDER_MODE (mode);
4153 }
4154 while (mode != VOIDmode);
4155
4156 return 0;
4157 }
4158
4159 /* This function is called when we are going to emit a compare instruction that
4160 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4161
4162 *PMODE is the mode of the inputs (in case they are const_int).
4163 *PUNSIGNEDP nonzero says that the operands are unsigned;
4164 this matters if they need to be widened.
4165
4166 If they have mode BLKmode, then SIZE specifies the size of both operands.
4167
4168 This function performs all the setup necessary so that the caller only has
4169 to emit a single comparison insn. This setup can involve doing a BLKmode
4170 comparison or emitting a library call to perform the comparison if no insn
4171 is available to handle it.
4172 The values which are passed in through pointers can be modified; the caller
4173 should perform the comparison on the modified values. Constant
4174 comparisons must have already been folded. */
4175
4176 static void
4177 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
4178 enum machine_mode *pmode, int *punsignedp,
4179 enum can_compare_purpose purpose)
4180 {
4181 enum machine_mode mode = *pmode;
4182 rtx x = *px, y = *py;
4183 int unsignedp = *punsignedp;
4184 rtx libfunc;
4185
4186 /* If we are inside an appropriately-short loop and we are optimizing,
4187 force expensive constants into a register. */
4188 if (CONSTANT_P (x) && optimize
4189 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
4190 x = force_reg (mode, x);
4191
4192 if (CONSTANT_P (y) && optimize
4193 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
4194 y = force_reg (mode, y);
4195
4196 #ifdef HAVE_cc0
4197 /* Make sure if we have a canonical comparison. The RTL
4198 documentation states that canonical comparisons are required only
4199 for targets which have cc0. */
4200 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4201 #endif
4202
4203 /* Don't let both operands fail to indicate the mode. */
4204 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4205 x = force_reg (mode, x);
4206
4207 /* Handle all BLKmode compares. */
4208
4209 if (mode == BLKmode)
4210 {
4211 enum machine_mode cmp_mode, result_mode;
4212 enum insn_code cmp_code;
4213 tree length_type;
4214 rtx libfunc;
4215 rtx result;
4216 rtx opalign
4217 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4218
4219 gcc_assert (size);
4220
4221 /* Try to use a memory block compare insn - either cmpstr
4222 or cmpmem will do. */
4223 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4224 cmp_mode != VOIDmode;
4225 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4226 {
4227 cmp_code = cmpmem_optab[cmp_mode];
4228 if (cmp_code == CODE_FOR_nothing)
4229 cmp_code = cmpstr_optab[cmp_mode];
4230 if (cmp_code == CODE_FOR_nothing)
4231 cmp_code = cmpstrn_optab[cmp_mode];
4232 if (cmp_code == CODE_FOR_nothing)
4233 continue;
4234
4235 /* Must make sure the size fits the insn's mode. */
4236 if ((GET_CODE (size) == CONST_INT
4237 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4238 || (GET_MODE_BITSIZE (GET_MODE (size))
4239 > GET_MODE_BITSIZE (cmp_mode)))
4240 continue;
4241
4242 result_mode = insn_data[cmp_code].operand[0].mode;
4243 result = gen_reg_rtx (result_mode);
4244 size = convert_to_mode (cmp_mode, size, 1);
4245 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4246
4247 *px = result;
4248 *py = const0_rtx;
4249 *pmode = result_mode;
4250 return;
4251 }
4252
4253 /* Otherwise call a library function, memcmp. */
4254 libfunc = memcmp_libfunc;
4255 length_type = sizetype;
4256 result_mode = TYPE_MODE (integer_type_node);
4257 cmp_mode = TYPE_MODE (length_type);
4258 size = convert_to_mode (TYPE_MODE (length_type), size,
4259 TYPE_UNSIGNED (length_type));
4260
4261 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
4262 result_mode, 3,
4263 XEXP (x, 0), Pmode,
4264 XEXP (y, 0), Pmode,
4265 size, cmp_mode);
4266 *px = result;
4267 *py = const0_rtx;
4268 *pmode = result_mode;
4269 return;
4270 }
4271
4272 /* Don't allow operands to the compare to trap, as that can put the
4273 compare and branch in different basic blocks. */
4274 if (flag_non_call_exceptions)
4275 {
4276 if (may_trap_p (x))
4277 x = force_reg (mode, x);
4278 if (may_trap_p (y))
4279 y = force_reg (mode, y);
4280 }
4281
4282 *px = x;
4283 *py = y;
4284 if (can_compare_p (*pcomparison, mode, purpose))
4285 return;
4286
4287 /* Handle a lib call just for the mode we are using. */
4288
4289 libfunc = optab_libfunc (cmp_optab, mode);
4290 if (libfunc && !SCALAR_FLOAT_MODE_P (mode))
4291 {
4292 rtx result;
4293
4294 /* If we want unsigned, and this mode has a distinct unsigned
4295 comparison routine, use that. */
4296 if (unsignedp)
4297 {
4298 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4299 if (ulibfunc)
4300 libfunc = ulibfunc;
4301 }
4302
4303 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
4304 targetm.libgcc_cmp_return_mode (),
4305 2, x, mode, y, mode);
4306
4307 /* There are two kinds of comparison routines. Biased routines
4308 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4309 of gcc expect that the comparison operation is equivalent
4310 to the modified comparison. For signed comparisons compare the
4311 result against 1 in the biased case, and zero in the unbiased
4312 case. For unsigned comparisons always compare against 1 after
4313 biasing the unbiased result by adding 1. This gives us a way to
4314 represent LTU. */
4315 *px = result;
4316 *pmode = word_mode;
4317 *py = const1_rtx;
4318
4319 if (!TARGET_LIB_INT_CMP_BIASED)
4320 {
4321 if (*punsignedp)
4322 *px = plus_constant (result, 1);
4323 else
4324 *py = const0_rtx;
4325 }
4326 return;
4327 }
4328
4329 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
4330 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4331 }
4332
4333 /* Before emitting an insn with code ICODE, make sure that X, which is going
4334 to be used for operand OPNUM of the insn, is converted from mode MODE to
4335 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4336 that it is accepted by the operand predicate. Return the new value. */
4337
4338 static rtx
4339 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4340 enum machine_mode wider_mode, int unsignedp)
4341 {
4342 if (mode != wider_mode)
4343 x = convert_modes (wider_mode, mode, x, unsignedp);
4344
4345 if (!insn_data[icode].operand[opnum].predicate
4346 (x, insn_data[icode].operand[opnum].mode))
4347 {
4348 if (reload_completed)
4349 return NULL_RTX;
4350 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4351 }
4352
4353 return x;
4354 }
4355
4356 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4357 we can do the comparison.
4358 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4359 be NULL_RTX which indicates that only a comparison is to be generated. */
4360
4361 static void
4362 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
4363 enum rtx_code comparison, int unsignedp, rtx label)
4364 {
4365 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
4366 enum mode_class class = GET_MODE_CLASS (mode);
4367 enum machine_mode wider_mode = mode;
4368
4369 /* Try combined insns first. */
4370 do
4371 {
4372 enum insn_code icode;
4373 PUT_MODE (test, wider_mode);
4374
4375 if (label)
4376 {
4377 icode = optab_handler (cbranch_optab, wider_mode)->insn_code;
4378
4379 if (icode != CODE_FOR_nothing
4380 && insn_data[icode].operand[0].predicate (test, wider_mode))
4381 {
4382 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4383 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4384 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4385 return;
4386 }
4387 }
4388
4389 /* Handle some compares against zero. */
4390 icode = (int) optab_handler (tst_optab, wider_mode)->insn_code;
4391 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4392 {
4393 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4394 emit_insn (GEN_FCN (icode) (x));
4395 if (label)
4396 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4397 return;
4398 }
4399
4400 /* Handle compares for which there is a directly suitable insn. */
4401
4402 icode = (int) optab_handler (cmp_optab, wider_mode)->insn_code;
4403 if (icode != CODE_FOR_nothing)
4404 {
4405 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4406 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4407 emit_insn (GEN_FCN (icode) (x, y));
4408 if (label)
4409 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4410 return;
4411 }
4412
4413 if (!CLASS_HAS_WIDER_MODES_P (class))
4414 break;
4415
4416 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4417 }
4418 while (wider_mode != VOIDmode);
4419
4420 gcc_unreachable ();
4421 }
4422
4423 /* Generate code to compare X with Y so that the condition codes are
4424 set and to jump to LABEL if the condition is true. If X is a
4425 constant and Y is not a constant, then the comparison is swapped to
4426 ensure that the comparison RTL has the canonical form.
4427
4428 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4429 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4430 the proper branch condition code.
4431
4432 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4433
4434 MODE is the mode of the inputs (in case they are const_int).
4435
4436 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4437 be passed unchanged to emit_cmp_insn, then potentially converted into an
4438 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4439
4440 void
4441 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4442 enum machine_mode mode, int unsignedp, rtx label)
4443 {
4444 rtx op0 = x, op1 = y;
4445
4446 /* Swap operands and condition to ensure canonical RTL. */
4447 if (swap_commutative_operands_p (x, y))
4448 {
4449 /* If we're not emitting a branch, callers are required to pass
4450 operands in an order conforming to canonical RTL. We relax this
4451 for commutative comparisons so callers using EQ don't need to do
4452 swapping by hand. */
4453 gcc_assert (label || (comparison == swap_condition (comparison)));
4454
4455 op0 = y, op1 = x;
4456 comparison = swap_condition (comparison);
4457 }
4458
4459 #ifdef HAVE_cc0
4460 /* If OP0 is still a constant, then both X and Y must be constants.
4461 Force X into a register to create canonical RTL. */
4462 if (CONSTANT_P (op0))
4463 op0 = force_reg (mode, op0);
4464 #endif
4465
4466 if (unsignedp)
4467 comparison = unsigned_condition (comparison);
4468
4469 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4470 ccp_jump);
4471 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4472 }
4473
4474 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4475
4476 void
4477 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4478 enum machine_mode mode, int unsignedp)
4479 {
4480 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4481 }
4482 \f
4483 /* Emit a library call comparison between floating point X and Y.
4484 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4485
4486 static void
4487 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4488 enum machine_mode *pmode, int *punsignedp)
4489 {
4490 enum rtx_code comparison = *pcomparison;
4491 enum rtx_code swapped = swap_condition (comparison);
4492 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4493 rtx x = *px;
4494 rtx y = *py;
4495 enum machine_mode orig_mode = GET_MODE (x);
4496 enum machine_mode mode, cmp_mode;
4497 rtx value, target, insns, equiv;
4498 rtx libfunc = 0;
4499 bool reversed_p = false;
4500 cmp_mode = targetm.libgcc_cmp_return_mode ();
4501
4502 for (mode = orig_mode;
4503 mode != VOIDmode;
4504 mode = GET_MODE_WIDER_MODE (mode))
4505 {
4506 if ((libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4507 break;
4508
4509 if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode)))
4510 {
4511 rtx tmp;
4512 tmp = x; x = y; y = tmp;
4513 comparison = swapped;
4514 break;
4515 }
4516
4517 if ((libfunc = optab_libfunc (code_to_optab[reversed], mode))
4518 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4519 {
4520 comparison = reversed;
4521 reversed_p = true;
4522 break;
4523 }
4524 }
4525
4526 gcc_assert (mode != VOIDmode);
4527
4528 if (mode != orig_mode)
4529 {
4530 x = convert_to_mode (mode, x, 0);
4531 y = convert_to_mode (mode, y, 0);
4532 }
4533
4534 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4535 the RTL. The allows the RTL optimizers to delete the libcall if the
4536 condition can be determined at compile-time. */
4537 if (comparison == UNORDERED)
4538 {
4539 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4540 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4541 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4542 temp, const_true_rtx, equiv);
4543 }
4544 else
4545 {
4546 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4547 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4548 {
4549 rtx true_rtx, false_rtx;
4550
4551 switch (comparison)
4552 {
4553 case EQ:
4554 true_rtx = const0_rtx;
4555 false_rtx = const_true_rtx;
4556 break;
4557
4558 case NE:
4559 true_rtx = const_true_rtx;
4560 false_rtx = const0_rtx;
4561 break;
4562
4563 case GT:
4564 true_rtx = const1_rtx;
4565 false_rtx = const0_rtx;
4566 break;
4567
4568 case GE:
4569 true_rtx = const0_rtx;
4570 false_rtx = constm1_rtx;
4571 break;
4572
4573 case LT:
4574 true_rtx = constm1_rtx;
4575 false_rtx = const0_rtx;
4576 break;
4577
4578 case LE:
4579 true_rtx = const0_rtx;
4580 false_rtx = const1_rtx;
4581 break;
4582
4583 default:
4584 gcc_unreachable ();
4585 }
4586 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4587 equiv, true_rtx, false_rtx);
4588 }
4589 }
4590
4591 start_sequence ();
4592 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4593 cmp_mode, 2, x, mode, y, mode);
4594 insns = get_insns ();
4595 end_sequence ();
4596
4597 target = gen_reg_rtx (cmp_mode);
4598 emit_libcall_block (insns, target, value, equiv);
4599
4600 if (comparison == UNORDERED
4601 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4602 comparison = reversed_p ? EQ : NE;
4603
4604 *px = target;
4605 *py = const0_rtx;
4606 *pmode = cmp_mode;
4607 *pcomparison = comparison;
4608 *punsignedp = 0;
4609 }
4610 \f
4611 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4612
4613 void
4614 emit_indirect_jump (rtx loc)
4615 {
4616 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4617 (loc, Pmode))
4618 loc = copy_to_mode_reg (Pmode, loc);
4619
4620 emit_jump_insn (gen_indirect_jump (loc));
4621 emit_barrier ();
4622 }
4623 \f
4624 #ifdef HAVE_conditional_move
4625
4626 /* Emit a conditional move instruction if the machine supports one for that
4627 condition and machine mode.
4628
4629 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4630 the mode to use should they be constants. If it is VOIDmode, they cannot
4631 both be constants.
4632
4633 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4634 should be stored there. MODE is the mode to use should they be constants.
4635 If it is VOIDmode, they cannot both be constants.
4636
4637 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4638 is not supported. */
4639
4640 rtx
4641 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4642 enum machine_mode cmode, rtx op2, rtx op3,
4643 enum machine_mode mode, int unsignedp)
4644 {
4645 rtx tem, subtarget, comparison, insn;
4646 enum insn_code icode;
4647 enum rtx_code reversed;
4648
4649 /* If one operand is constant, make it the second one. Only do this
4650 if the other operand is not constant as well. */
4651
4652 if (swap_commutative_operands_p (op0, op1))
4653 {
4654 tem = op0;
4655 op0 = op1;
4656 op1 = tem;
4657 code = swap_condition (code);
4658 }
4659
4660 /* get_condition will prefer to generate LT and GT even if the old
4661 comparison was against zero, so undo that canonicalization here since
4662 comparisons against zero are cheaper. */
4663 if (code == LT && op1 == const1_rtx)
4664 code = LE, op1 = const0_rtx;
4665 else if (code == GT && op1 == constm1_rtx)
4666 code = GE, op1 = const0_rtx;
4667
4668 if (cmode == VOIDmode)
4669 cmode = GET_MODE (op0);
4670
4671 if (swap_commutative_operands_p (op2, op3)
4672 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4673 != UNKNOWN))
4674 {
4675 tem = op2;
4676 op2 = op3;
4677 op3 = tem;
4678 code = reversed;
4679 }
4680
4681 if (mode == VOIDmode)
4682 mode = GET_MODE (op2);
4683
4684 icode = movcc_gen_code[mode];
4685
4686 if (icode == CODE_FOR_nothing)
4687 return 0;
4688
4689 if (!target)
4690 target = gen_reg_rtx (mode);
4691
4692 subtarget = target;
4693
4694 /* If the insn doesn't accept these operands, put them in pseudos. */
4695
4696 if (!insn_data[icode].operand[0].predicate
4697 (subtarget, insn_data[icode].operand[0].mode))
4698 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4699
4700 if (!insn_data[icode].operand[2].predicate
4701 (op2, insn_data[icode].operand[2].mode))
4702 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4703
4704 if (!insn_data[icode].operand[3].predicate
4705 (op3, insn_data[icode].operand[3].mode))
4706 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4707
4708 /* Everything should now be in the suitable form, so emit the compare insn
4709 and then the conditional move. */
4710
4711 comparison
4712 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4713
4714 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4715 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4716 return NULL and let the caller figure out how best to deal with this
4717 situation. */
4718 if (GET_CODE (comparison) != code)
4719 return NULL_RTX;
4720
4721 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4722
4723 /* If that failed, then give up. */
4724 if (insn == 0)
4725 return 0;
4726
4727 emit_insn (insn);
4728
4729 if (subtarget != target)
4730 convert_move (target, subtarget, 0);
4731
4732 return target;
4733 }
4734
4735 /* Return nonzero if a conditional move of mode MODE is supported.
4736
4737 This function is for combine so it can tell whether an insn that looks
4738 like a conditional move is actually supported by the hardware. If we
4739 guess wrong we lose a bit on optimization, but that's it. */
4740 /* ??? sparc64 supports conditionally moving integers values based on fp
4741 comparisons, and vice versa. How do we handle them? */
4742
4743 int
4744 can_conditionally_move_p (enum machine_mode mode)
4745 {
4746 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4747 return 1;
4748
4749 return 0;
4750 }
4751
4752 #endif /* HAVE_conditional_move */
4753
4754 /* Emit a conditional addition instruction if the machine supports one for that
4755 condition and machine mode.
4756
4757 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4758 the mode to use should they be constants. If it is VOIDmode, they cannot
4759 both be constants.
4760
4761 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4762 should be stored there. MODE is the mode to use should they be constants.
4763 If it is VOIDmode, they cannot both be constants.
4764
4765 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4766 is not supported. */
4767
4768 rtx
4769 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4770 enum machine_mode cmode, rtx op2, rtx op3,
4771 enum machine_mode mode, int unsignedp)
4772 {
4773 rtx tem, subtarget, comparison, insn;
4774 enum insn_code icode;
4775 enum rtx_code reversed;
4776
4777 /* If one operand is constant, make it the second one. Only do this
4778 if the other operand is not constant as well. */
4779
4780 if (swap_commutative_operands_p (op0, op1))
4781 {
4782 tem = op0;
4783 op0 = op1;
4784 op1 = tem;
4785 code = swap_condition (code);
4786 }
4787
4788 /* get_condition will prefer to generate LT and GT even if the old
4789 comparison was against zero, so undo that canonicalization here since
4790 comparisons against zero are cheaper. */
4791 if (code == LT && op1 == const1_rtx)
4792 code = LE, op1 = const0_rtx;
4793 else if (code == GT && op1 == constm1_rtx)
4794 code = GE, op1 = const0_rtx;
4795
4796 if (cmode == VOIDmode)
4797 cmode = GET_MODE (op0);
4798
4799 if (swap_commutative_operands_p (op2, op3)
4800 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4801 != UNKNOWN))
4802 {
4803 tem = op2;
4804 op2 = op3;
4805 op3 = tem;
4806 code = reversed;
4807 }
4808
4809 if (mode == VOIDmode)
4810 mode = GET_MODE (op2);
4811
4812 icode = optab_handler (addcc_optab, mode)->insn_code;
4813
4814 if (icode == CODE_FOR_nothing)
4815 return 0;
4816
4817 if (!target)
4818 target = gen_reg_rtx (mode);
4819
4820 /* If the insn doesn't accept these operands, put them in pseudos. */
4821
4822 if (!insn_data[icode].operand[0].predicate
4823 (target, insn_data[icode].operand[0].mode))
4824 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4825 else
4826 subtarget = target;
4827
4828 if (!insn_data[icode].operand[2].predicate
4829 (op2, insn_data[icode].operand[2].mode))
4830 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4831
4832 if (!insn_data[icode].operand[3].predicate
4833 (op3, insn_data[icode].operand[3].mode))
4834 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4835
4836 /* Everything should now be in the suitable form, so emit the compare insn
4837 and then the conditional move. */
4838
4839 comparison
4840 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4841
4842 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4843 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4844 return NULL and let the caller figure out how best to deal with this
4845 situation. */
4846 if (GET_CODE (comparison) != code)
4847 return NULL_RTX;
4848
4849 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4850
4851 /* If that failed, then give up. */
4852 if (insn == 0)
4853 return 0;
4854
4855 emit_insn (insn);
4856
4857 if (subtarget != target)
4858 convert_move (target, subtarget, 0);
4859
4860 return target;
4861 }
4862 \f
4863 /* These functions attempt to generate an insn body, rather than
4864 emitting the insn, but if the gen function already emits them, we
4865 make no attempt to turn them back into naked patterns. */
4866
4867 /* Generate and return an insn body to add Y to X. */
4868
4869 rtx
4870 gen_add2_insn (rtx x, rtx y)
4871 {
4872 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4873
4874 gcc_assert (insn_data[icode].operand[0].predicate
4875 (x, insn_data[icode].operand[0].mode));
4876 gcc_assert (insn_data[icode].operand[1].predicate
4877 (x, insn_data[icode].operand[1].mode));
4878 gcc_assert (insn_data[icode].operand[2].predicate
4879 (y, insn_data[icode].operand[2].mode));
4880
4881 return GEN_FCN (icode) (x, x, y);
4882 }
4883
4884 /* Generate and return an insn body to add r1 and c,
4885 storing the result in r0. */
4886
4887 rtx
4888 gen_add3_insn (rtx r0, rtx r1, rtx c)
4889 {
4890 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4891
4892 if (icode == CODE_FOR_nothing
4893 || !(insn_data[icode].operand[0].predicate
4894 (r0, insn_data[icode].operand[0].mode))
4895 || !(insn_data[icode].operand[1].predicate
4896 (r1, insn_data[icode].operand[1].mode))
4897 || !(insn_data[icode].operand[2].predicate
4898 (c, insn_data[icode].operand[2].mode)))
4899 return NULL_RTX;
4900
4901 return GEN_FCN (icode) (r0, r1, c);
4902 }
4903
4904 int
4905 have_add2_insn (rtx x, rtx y)
4906 {
4907 int icode;
4908
4909 gcc_assert (GET_MODE (x) != VOIDmode);
4910
4911 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4912
4913 if (icode == CODE_FOR_nothing)
4914 return 0;
4915
4916 if (!(insn_data[icode].operand[0].predicate
4917 (x, insn_data[icode].operand[0].mode))
4918 || !(insn_data[icode].operand[1].predicate
4919 (x, insn_data[icode].operand[1].mode))
4920 || !(insn_data[icode].operand[2].predicate
4921 (y, insn_data[icode].operand[2].mode)))
4922 return 0;
4923
4924 return 1;
4925 }
4926
4927 /* Generate and return an insn body to subtract Y from X. */
4928
4929 rtx
4930 gen_sub2_insn (rtx x, rtx y)
4931 {
4932 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4933
4934 gcc_assert (insn_data[icode].operand[0].predicate
4935 (x, insn_data[icode].operand[0].mode));
4936 gcc_assert (insn_data[icode].operand[1].predicate
4937 (x, insn_data[icode].operand[1].mode));
4938 gcc_assert (insn_data[icode].operand[2].predicate
4939 (y, insn_data[icode].operand[2].mode));
4940
4941 return GEN_FCN (icode) (x, x, y);
4942 }
4943
4944 /* Generate and return an insn body to subtract r1 and c,
4945 storing the result in r0. */
4946
4947 rtx
4948 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4949 {
4950 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4951
4952 if (icode == CODE_FOR_nothing
4953 || !(insn_data[icode].operand[0].predicate
4954 (r0, insn_data[icode].operand[0].mode))
4955 || !(insn_data[icode].operand[1].predicate
4956 (r1, insn_data[icode].operand[1].mode))
4957 || !(insn_data[icode].operand[2].predicate
4958 (c, insn_data[icode].operand[2].mode)))
4959 return NULL_RTX;
4960
4961 return GEN_FCN (icode) (r0, r1, c);
4962 }
4963
4964 int
4965 have_sub2_insn (rtx x, rtx y)
4966 {
4967 int icode;
4968
4969 gcc_assert (GET_MODE (x) != VOIDmode);
4970
4971 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4972
4973 if (icode == CODE_FOR_nothing)
4974 return 0;
4975
4976 if (!(insn_data[icode].operand[0].predicate
4977 (x, insn_data[icode].operand[0].mode))
4978 || !(insn_data[icode].operand[1].predicate
4979 (x, insn_data[icode].operand[1].mode))
4980 || !(insn_data[icode].operand[2].predicate
4981 (y, insn_data[icode].operand[2].mode)))
4982 return 0;
4983
4984 return 1;
4985 }
4986
4987 /* Generate the body of an instruction to copy Y into X.
4988 It may be a list of insns, if one insn isn't enough. */
4989
4990 rtx
4991 gen_move_insn (rtx x, rtx y)
4992 {
4993 rtx seq;
4994
4995 start_sequence ();
4996 emit_move_insn_1 (x, y);
4997 seq = get_insns ();
4998 end_sequence ();
4999 return seq;
5000 }
5001 \f
5002 /* Return the insn code used to extend FROM_MODE to TO_MODE.
5003 UNSIGNEDP specifies zero-extension instead of sign-extension. If
5004 no such operation exists, CODE_FOR_nothing will be returned. */
5005
5006 enum insn_code
5007 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
5008 int unsignedp)
5009 {
5010 convert_optab tab;
5011 #ifdef HAVE_ptr_extend
5012 if (unsignedp < 0)
5013 return CODE_FOR_ptr_extend;
5014 #endif
5015
5016 tab = unsignedp ? zext_optab : sext_optab;
5017 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
5018 }
5019
5020 /* Generate the body of an insn to extend Y (with mode MFROM)
5021 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5022
5023 rtx
5024 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
5025 enum machine_mode mfrom, int unsignedp)
5026 {
5027 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
5028 return GEN_FCN (icode) (x, y);
5029 }
5030 \f
5031 /* can_fix_p and can_float_p say whether the target machine
5032 can directly convert a given fixed point type to
5033 a given floating point type, or vice versa.
5034 The returned value is the CODE_FOR_... value to use,
5035 or CODE_FOR_nothing if these modes cannot be directly converted.
5036
5037 *TRUNCP_PTR is set to 1 if it is necessary to output
5038 an explicit FTRUNC insn before the fix insn; otherwise 0. */
5039
5040 static enum insn_code
5041 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
5042 int unsignedp, int *truncp_ptr)
5043 {
5044 convert_optab tab;
5045 enum insn_code icode;
5046
5047 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
5048 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
5049 if (icode != CODE_FOR_nothing)
5050 {
5051 *truncp_ptr = 0;
5052 return icode;
5053 }
5054
5055 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
5056 for this to work. We need to rework the fix* and ftrunc* patterns
5057 and documentation. */
5058 tab = unsignedp ? ufix_optab : sfix_optab;
5059 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
5060 if (icode != CODE_FOR_nothing
5061 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
5062 {
5063 *truncp_ptr = 1;
5064 return icode;
5065 }
5066
5067 *truncp_ptr = 0;
5068 return CODE_FOR_nothing;
5069 }
5070
5071 static enum insn_code
5072 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
5073 int unsignedp)
5074 {
5075 convert_optab tab;
5076
5077 tab = unsignedp ? ufloat_optab : sfloat_optab;
5078 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
5079 }
5080 \f
5081 /* Generate code to convert FROM to floating point
5082 and store in TO. FROM must be fixed point and not VOIDmode.
5083 UNSIGNEDP nonzero means regard FROM as unsigned.
5084 Normally this is done by correcting the final value
5085 if it is negative. */
5086
5087 void
5088 expand_float (rtx to, rtx from, int unsignedp)
5089 {
5090 enum insn_code icode;
5091 rtx target = to;
5092 enum machine_mode fmode, imode;
5093 bool can_do_signed = false;
5094
5095 /* Crash now, because we won't be able to decide which mode to use. */
5096 gcc_assert (GET_MODE (from) != VOIDmode);
5097
5098 /* Look for an insn to do the conversion. Do it in the specified
5099 modes if possible; otherwise convert either input, output or both to
5100 wider mode. If the integer mode is wider than the mode of FROM,
5101 we can do the conversion signed even if the input is unsigned. */
5102
5103 for (fmode = GET_MODE (to); fmode != VOIDmode;
5104 fmode = GET_MODE_WIDER_MODE (fmode))
5105 for (imode = GET_MODE (from); imode != VOIDmode;
5106 imode = GET_MODE_WIDER_MODE (imode))
5107 {
5108 int doing_unsigned = unsignedp;
5109
5110 if (fmode != GET_MODE (to)
5111 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
5112 continue;
5113
5114 icode = can_float_p (fmode, imode, unsignedp);
5115 if (icode == CODE_FOR_nothing && unsignedp)
5116 {
5117 enum insn_code scode = can_float_p (fmode, imode, 0);
5118 if (scode != CODE_FOR_nothing)
5119 can_do_signed = true;
5120 if (imode != GET_MODE (from))
5121 icode = scode, doing_unsigned = 0;
5122 }
5123
5124 if (icode != CODE_FOR_nothing)
5125 {
5126 if (imode != GET_MODE (from))
5127 from = convert_to_mode (imode, from, unsignedp);
5128
5129 if (fmode != GET_MODE (to))
5130 target = gen_reg_rtx (fmode);
5131
5132 emit_unop_insn (icode, target, from,
5133 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5134
5135 if (target != to)
5136 convert_move (to, target, 0);
5137 return;
5138 }
5139 }
5140
5141 /* Unsigned integer, and no way to convert directly. Convert as signed,
5142 then unconditionally adjust the result. For decimal float values we
5143 do this only if we have already determined that a signed conversion
5144 provides sufficient accuracy. */
5145 if (unsignedp && (can_do_signed || !DECIMAL_FLOAT_MODE_P (GET_MODE (to))))
5146 {
5147 rtx label = gen_label_rtx ();
5148 rtx temp;
5149 REAL_VALUE_TYPE offset;
5150
5151 /* Look for a usable floating mode FMODE wider than the source and at
5152 least as wide as the target. Using FMODE will avoid rounding woes
5153 with unsigned values greater than the signed maximum value. */
5154
5155 for (fmode = GET_MODE (to); fmode != VOIDmode;
5156 fmode = GET_MODE_WIDER_MODE (fmode))
5157 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5158 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5159 break;
5160
5161 if (fmode == VOIDmode)
5162 {
5163 /* There is no such mode. Pretend the target is wide enough. */
5164 fmode = GET_MODE (to);
5165
5166 /* Avoid double-rounding when TO is narrower than FROM. */
5167 if ((significand_size (fmode) + 1)
5168 < GET_MODE_BITSIZE (GET_MODE (from)))
5169 {
5170 rtx temp1;
5171 rtx neglabel = gen_label_rtx ();
5172
5173 /* Don't use TARGET if it isn't a register, is a hard register,
5174 or is the wrong mode. */
5175 if (!REG_P (target)
5176 || REGNO (target) < FIRST_PSEUDO_REGISTER
5177 || GET_MODE (target) != fmode)
5178 target = gen_reg_rtx (fmode);
5179
5180 imode = GET_MODE (from);
5181 do_pending_stack_adjust ();
5182
5183 /* Test whether the sign bit is set. */
5184 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5185 0, neglabel);
5186
5187 /* The sign bit is not set. Convert as signed. */
5188 expand_float (target, from, 0);
5189 emit_jump_insn (gen_jump (label));
5190 emit_barrier ();
5191
5192 /* The sign bit is set.
5193 Convert to a usable (positive signed) value by shifting right
5194 one bit, while remembering if a nonzero bit was shifted
5195 out; i.e., compute (from & 1) | (from >> 1). */
5196
5197 emit_label (neglabel);
5198 temp = expand_binop (imode, and_optab, from, const1_rtx,
5199 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5200 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
5201 NULL_RTX, 1);
5202 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5203 OPTAB_LIB_WIDEN);
5204 expand_float (target, temp, 0);
5205
5206 /* Multiply by 2 to undo the shift above. */
5207 temp = expand_binop (fmode, add_optab, target, target,
5208 target, 0, OPTAB_LIB_WIDEN);
5209 if (temp != target)
5210 emit_move_insn (target, temp);
5211
5212 do_pending_stack_adjust ();
5213 emit_label (label);
5214 goto done;
5215 }
5216 }
5217
5218 /* If we are about to do some arithmetic to correct for an
5219 unsigned operand, do it in a pseudo-register. */
5220
5221 if (GET_MODE (to) != fmode
5222 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5223 target = gen_reg_rtx (fmode);
5224
5225 /* Convert as signed integer to floating. */
5226 expand_float (target, from, 0);
5227
5228 /* If FROM is negative (and therefore TO is negative),
5229 correct its value by 2**bitwidth. */
5230
5231 do_pending_stack_adjust ();
5232 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5233 0, label);
5234
5235
5236 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode);
5237 temp = expand_binop (fmode, add_optab, target,
5238 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5239 target, 0, OPTAB_LIB_WIDEN);
5240 if (temp != target)
5241 emit_move_insn (target, temp);
5242
5243 do_pending_stack_adjust ();
5244 emit_label (label);
5245 goto done;
5246 }
5247
5248 /* No hardware instruction available; call a library routine. */
5249 {
5250 rtx libfunc;
5251 rtx insns;
5252 rtx value;
5253 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5254
5255 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5256 from = convert_to_mode (SImode, from, unsignedp);
5257
5258 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5259 gcc_assert (libfunc);
5260
5261 start_sequence ();
5262
5263 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5264 GET_MODE (to), 1, from,
5265 GET_MODE (from));
5266 insns = get_insns ();
5267 end_sequence ();
5268
5269 emit_libcall_block (insns, target, value,
5270 gen_rtx_FLOAT (GET_MODE (to), from));
5271 }
5272
5273 done:
5274
5275 /* Copy result to requested destination
5276 if we have been computing in a temp location. */
5277
5278 if (target != to)
5279 {
5280 if (GET_MODE (target) == GET_MODE (to))
5281 emit_move_insn (to, target);
5282 else
5283 convert_move (to, target, 0);
5284 }
5285 }
5286 \f
5287 /* Generate code to convert FROM to fixed point and store in TO. FROM
5288 must be floating point. */
5289
5290 void
5291 expand_fix (rtx to, rtx from, int unsignedp)
5292 {
5293 enum insn_code icode;
5294 rtx target = to;
5295 enum machine_mode fmode, imode;
5296 int must_trunc = 0;
5297
5298 /* We first try to find a pair of modes, one real and one integer, at
5299 least as wide as FROM and TO, respectively, in which we can open-code
5300 this conversion. If the integer mode is wider than the mode of TO,
5301 we can do the conversion either signed or unsigned. */
5302
5303 for (fmode = GET_MODE (from); fmode != VOIDmode;
5304 fmode = GET_MODE_WIDER_MODE (fmode))
5305 for (imode = GET_MODE (to); imode != VOIDmode;
5306 imode = GET_MODE_WIDER_MODE (imode))
5307 {
5308 int doing_unsigned = unsignedp;
5309
5310 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5311 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5312 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5313
5314 if (icode != CODE_FOR_nothing)
5315 {
5316 if (fmode != GET_MODE (from))
5317 from = convert_to_mode (fmode, from, 0);
5318
5319 if (must_trunc)
5320 {
5321 rtx temp = gen_reg_rtx (GET_MODE (from));
5322 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5323 temp, 0);
5324 }
5325
5326 if (imode != GET_MODE (to))
5327 target = gen_reg_rtx (imode);
5328
5329 emit_unop_insn (icode, target, from,
5330 doing_unsigned ? UNSIGNED_FIX : FIX);
5331 if (target != to)
5332 convert_move (to, target, unsignedp);
5333 return;
5334 }
5335 }
5336
5337 /* For an unsigned conversion, there is one more way to do it.
5338 If we have a signed conversion, we generate code that compares
5339 the real value to the largest representable positive number. If if
5340 is smaller, the conversion is done normally. Otherwise, subtract
5341 one plus the highest signed number, convert, and add it back.
5342
5343 We only need to check all real modes, since we know we didn't find
5344 anything with a wider integer mode.
5345
5346 This code used to extend FP value into mode wider than the destination.
5347 This is needed for decimal float modes which cannot accurately
5348 represent one plus the highest signed number of the same size, but
5349 not for binary modes. Consider, for instance conversion from SFmode
5350 into DImode.
5351
5352 The hot path through the code is dealing with inputs smaller than 2^63
5353 and doing just the conversion, so there is no bits to lose.
5354
5355 In the other path we know the value is positive in the range 2^63..2^64-1
5356 inclusive. (as for other input overflow happens and result is undefined)
5357 So we know that the most important bit set in mantissa corresponds to
5358 2^63. The subtraction of 2^63 should not generate any rounding as it
5359 simply clears out that bit. The rest is trivial. */
5360
5361 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5362 for (fmode = GET_MODE (from); fmode != VOIDmode;
5363 fmode = GET_MODE_WIDER_MODE (fmode))
5364 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5365 && (!DECIMAL_FLOAT_MODE_P (fmode)
5366 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))))
5367 {
5368 int bitsize;
5369 REAL_VALUE_TYPE offset;
5370 rtx limit, lab1, lab2, insn;
5371
5372 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5373 real_2expN (&offset, bitsize - 1, fmode);
5374 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5375 lab1 = gen_label_rtx ();
5376 lab2 = gen_label_rtx ();
5377
5378 if (fmode != GET_MODE (from))
5379 from = convert_to_mode (fmode, from, 0);
5380
5381 /* See if we need to do the subtraction. */
5382 do_pending_stack_adjust ();
5383 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5384 0, lab1);
5385
5386 /* If not, do the signed "fix" and branch around fixup code. */
5387 expand_fix (to, from, 0);
5388 emit_jump_insn (gen_jump (lab2));
5389 emit_barrier ();
5390
5391 /* Otherwise, subtract 2**(N-1), convert to signed number,
5392 then add 2**(N-1). Do the addition using XOR since this
5393 will often generate better code. */
5394 emit_label (lab1);
5395 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5396 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5397 expand_fix (to, target, 0);
5398 target = expand_binop (GET_MODE (to), xor_optab, to,
5399 gen_int_mode
5400 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5401 GET_MODE (to)),
5402 to, 1, OPTAB_LIB_WIDEN);
5403
5404 if (target != to)
5405 emit_move_insn (to, target);
5406
5407 emit_label (lab2);
5408
5409 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5410 != CODE_FOR_nothing)
5411 {
5412 /* Make a place for a REG_NOTE and add it. */
5413 insn = emit_move_insn (to, to);
5414 set_unique_reg_note (insn,
5415 REG_EQUAL,
5416 gen_rtx_fmt_e (UNSIGNED_FIX,
5417 GET_MODE (to),
5418 copy_rtx (from)));
5419 }
5420
5421 return;
5422 }
5423
5424 /* We can't do it with an insn, so use a library call. But first ensure
5425 that the mode of TO is at least as wide as SImode, since those are the
5426 only library calls we know about. */
5427
5428 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5429 {
5430 target = gen_reg_rtx (SImode);
5431
5432 expand_fix (target, from, unsignedp);
5433 }
5434 else
5435 {
5436 rtx insns;
5437 rtx value;
5438 rtx libfunc;
5439
5440 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5441 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5442 gcc_assert (libfunc);
5443
5444 start_sequence ();
5445
5446 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5447 GET_MODE (to), 1, from,
5448 GET_MODE (from));
5449 insns = get_insns ();
5450 end_sequence ();
5451
5452 emit_libcall_block (insns, target, value,
5453 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5454 GET_MODE (to), from));
5455 }
5456
5457 if (target != to)
5458 {
5459 if (GET_MODE (to) == GET_MODE (target))
5460 emit_move_insn (to, target);
5461 else
5462 convert_move (to, target, 0);
5463 }
5464 }
5465
5466 /* Generate code to convert FROM or TO a fixed-point.
5467 If UINTP is true, either TO or FROM is an unsigned integer.
5468 If SATP is true, we need to saturate the result. */
5469
5470 void
5471 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5472 {
5473 enum machine_mode to_mode = GET_MODE (to);
5474 enum machine_mode from_mode = GET_MODE (from);
5475 convert_optab tab;
5476 enum rtx_code this_code;
5477 enum insn_code code;
5478 rtx insns, value;
5479 rtx libfunc;
5480
5481 if (to_mode == from_mode)
5482 {
5483 emit_move_insn (to, from);
5484 return;
5485 }
5486
5487 if (uintp)
5488 {
5489 tab = satp ? satfractuns_optab : fractuns_optab;
5490 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5491 }
5492 else
5493 {
5494 tab = satp ? satfract_optab : fract_optab;
5495 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5496 }
5497 code = tab->handlers[to_mode][from_mode].insn_code;
5498 if (code != CODE_FOR_nothing)
5499 {
5500 emit_unop_insn (code, to, from, this_code);
5501 return;
5502 }
5503
5504 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5505 gcc_assert (libfunc);
5506
5507 start_sequence ();
5508 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5509 1, from, from_mode);
5510 insns = get_insns ();
5511 end_sequence ();
5512
5513 emit_libcall_block (insns, to, value,
5514 gen_rtx_fmt_e (tab->code, to_mode, from));
5515 }
5516
5517 /* Generate code to convert FROM to fixed point and store in TO. FROM
5518 must be floating point, TO must be signed. Use the conversion optab
5519 TAB to do the conversion. */
5520
5521 bool
5522 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5523 {
5524 enum insn_code icode;
5525 rtx target = to;
5526 enum machine_mode fmode, imode;
5527
5528 /* We first try to find a pair of modes, one real and one integer, at
5529 least as wide as FROM and TO, respectively, in which we can open-code
5530 this conversion. If the integer mode is wider than the mode of TO,
5531 we can do the conversion either signed or unsigned. */
5532
5533 for (fmode = GET_MODE (from); fmode != VOIDmode;
5534 fmode = GET_MODE_WIDER_MODE (fmode))
5535 for (imode = GET_MODE (to); imode != VOIDmode;
5536 imode = GET_MODE_WIDER_MODE (imode))
5537 {
5538 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5539 if (icode != CODE_FOR_nothing)
5540 {
5541 if (fmode != GET_MODE (from))
5542 from = convert_to_mode (fmode, from, 0);
5543
5544 if (imode != GET_MODE (to))
5545 target = gen_reg_rtx (imode);
5546
5547 emit_unop_insn (icode, target, from, UNKNOWN);
5548 if (target != to)
5549 convert_move (to, target, 0);
5550 return true;
5551 }
5552 }
5553
5554 return false;
5555 }
5556 \f
5557 /* Report whether we have an instruction to perform the operation
5558 specified by CODE on operands of mode MODE. */
5559 int
5560 have_insn_for (enum rtx_code code, enum machine_mode mode)
5561 {
5562 return (code_to_optab[(int) code] != 0
5563 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5564 != CODE_FOR_nothing));
5565 }
5566
5567 /* Set all insn_code fields to CODE_FOR_nothing. */
5568
5569 static void
5570 init_insn_codes (void)
5571 {
5572 unsigned int i;
5573
5574 for (i = 0; i < (unsigned int) OTI_MAX; i++)
5575 {
5576 unsigned int j;
5577 optab op;
5578
5579 op = &optab_table[i];
5580 for (j = 0; j < NUM_MACHINE_MODES; j++)
5581 optab_handler (op, j)->insn_code = CODE_FOR_nothing;
5582 }
5583 for (i = 0; i < (unsigned int) COI_MAX; i++)
5584 {
5585 unsigned int j, k;
5586 convert_optab op;
5587
5588 op = &convert_optab_table[i];
5589 for (j = 0; j < NUM_MACHINE_MODES; j++)
5590 for (k = 0; k < NUM_MACHINE_MODES; k++)
5591 convert_optab_handler (op, j, k)->insn_code = CODE_FOR_nothing;
5592 }
5593 }
5594
5595 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5596 static inline void
5597 init_optab (optab op, enum rtx_code code)
5598 {
5599 op->code = code;
5600 code_to_optab[(int) code] = op;
5601 }
5602
5603 /* Same, but fill in its code as CODE, and do _not_ write it into
5604 the code_to_optab table. */
5605 static inline void
5606 init_optabv (optab op, enum rtx_code code)
5607 {
5608 op->code = code;
5609 }
5610
5611 /* Conversion optabs never go in the code_to_optab table. */
5612 static void
5613 init_convert_optab (convert_optab op, enum rtx_code code)
5614 {
5615 op->code = code;
5616 }
5617
5618 /* Initialize the libfunc fields of an entire group of entries in some
5619 optab. Each entry is set equal to a string consisting of a leading
5620 pair of underscores followed by a generic operation name followed by
5621 a mode name (downshifted to lowercase) followed by a single character
5622 representing the number of operands for the given operation (which is
5623 usually one of the characters '2', '3', or '4').
5624
5625 OPTABLE is the table in which libfunc fields are to be initialized.
5626 OPNAME is the generic (string) name of the operation.
5627 SUFFIX is the character which specifies the number of operands for
5628 the given generic operation.
5629 MODE is the mode to generate for.
5630 */
5631
5632 static void
5633 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5634 {
5635 unsigned opname_len = strlen (opname);
5636 const char *mname = GET_MODE_NAME (mode);
5637 unsigned mname_len = strlen (mname);
5638 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5639 char *p;
5640 const char *q;
5641
5642 p = libfunc_name;
5643 *p++ = '_';
5644 *p++ = '_';
5645 for (q = opname; *q; )
5646 *p++ = *q++;
5647 for (q = mname; *q; q++)
5648 *p++ = TOLOWER (*q);
5649 *p++ = suffix;
5650 *p = '\0';
5651
5652 set_optab_libfunc (optable, mode,
5653 ggc_alloc_string (libfunc_name, p - libfunc_name));
5654 }
5655
5656 /* Like gen_libfunc, but verify that integer operation is involved. */
5657
5658 static void
5659 gen_int_libfunc (optab optable, const char *opname, char suffix,
5660 enum machine_mode mode)
5661 {
5662 int maxsize = 2 * BITS_PER_WORD;
5663
5664 if (GET_MODE_CLASS (mode) != MODE_INT)
5665 return;
5666 if (maxsize < LONG_LONG_TYPE_SIZE)
5667 maxsize = LONG_LONG_TYPE_SIZE;
5668 if (GET_MODE_CLASS (mode) != MODE_INT
5669 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5670 return;
5671 gen_libfunc (optable, opname, suffix, mode);
5672 }
5673
5674 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5675
5676 static void
5677 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5678 enum machine_mode mode)
5679 {
5680 char *dec_opname;
5681
5682 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5683 gen_libfunc (optable, opname, suffix, mode);
5684 if (DECIMAL_FLOAT_MODE_P (mode))
5685 {
5686 dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5687 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5688 depending on the low level floating format used. */
5689 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5690 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5691 gen_libfunc (optable, dec_opname, suffix, mode);
5692 }
5693 }
5694
5695 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5696
5697 static void
5698 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5699 enum machine_mode mode)
5700 {
5701 if (!ALL_FIXED_POINT_MODE_P (mode))
5702 return;
5703 gen_libfunc (optable, opname, suffix, mode);
5704 }
5705
5706 /* Like gen_libfunc, but verify that signed fixed-point operation is
5707 involved. */
5708
5709 static void
5710 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5711 enum machine_mode mode)
5712 {
5713 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5714 return;
5715 gen_libfunc (optable, opname, suffix, mode);
5716 }
5717
5718 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5719 involved. */
5720
5721 static void
5722 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5723 enum machine_mode mode)
5724 {
5725 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5726 return;
5727 gen_libfunc (optable, opname, suffix, mode);
5728 }
5729
5730 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5731
5732 static void
5733 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5734 enum machine_mode mode)
5735 {
5736 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5737 gen_fp_libfunc (optable, name, suffix, mode);
5738 if (INTEGRAL_MODE_P (mode))
5739 gen_int_libfunc (optable, name, suffix, mode);
5740 }
5741
5742 /* Like gen_libfunc, but verify that FP or INT operation is involved
5743 and add 'v' suffix for integer operation. */
5744
5745 static void
5746 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5747 enum machine_mode mode)
5748 {
5749 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5750 gen_fp_libfunc (optable, name, suffix, mode);
5751 if (GET_MODE_CLASS (mode) == MODE_INT)
5752 {
5753 int len = strlen (name);
5754 char *v_name = alloca (len + 2);
5755 strcpy (v_name, name);
5756 v_name[len] = 'v';
5757 v_name[len + 1] = 0;
5758 gen_int_libfunc (optable, v_name, suffix, mode);
5759 }
5760 }
5761
5762 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5763 involved. */
5764
5765 static void
5766 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5767 enum machine_mode mode)
5768 {
5769 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5770 gen_fp_libfunc (optable, name, suffix, mode);
5771 if (INTEGRAL_MODE_P (mode))
5772 gen_int_libfunc (optable, name, suffix, mode);
5773 if (ALL_FIXED_POINT_MODE_P (mode))
5774 gen_fixed_libfunc (optable, name, suffix, mode);
5775 }
5776
5777 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5778 involved. */
5779
5780 static void
5781 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5782 enum machine_mode mode)
5783 {
5784 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5785 gen_fp_libfunc (optable, name, suffix, mode);
5786 if (INTEGRAL_MODE_P (mode))
5787 gen_int_libfunc (optable, name, suffix, mode);
5788 if (SIGNED_FIXED_POINT_MODE_P (mode))
5789 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5790 }
5791
5792 /* Like gen_libfunc, but verify that INT or FIXED operation is
5793 involved. */
5794
5795 static void
5796 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5797 enum machine_mode mode)
5798 {
5799 if (INTEGRAL_MODE_P (mode))
5800 gen_int_libfunc (optable, name, suffix, mode);
5801 if (ALL_FIXED_POINT_MODE_P (mode))
5802 gen_fixed_libfunc (optable, name, suffix, mode);
5803 }
5804
5805 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5806 involved. */
5807
5808 static void
5809 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5810 enum machine_mode mode)
5811 {
5812 if (INTEGRAL_MODE_P (mode))
5813 gen_int_libfunc (optable, name, suffix, mode);
5814 if (SIGNED_FIXED_POINT_MODE_P (mode))
5815 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5816 }
5817
5818 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5819 involved. */
5820
5821 static void
5822 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5823 enum machine_mode mode)
5824 {
5825 if (INTEGRAL_MODE_P (mode))
5826 gen_int_libfunc (optable, name, suffix, mode);
5827 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5828 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5829 }
5830
5831 /* Initialize the libfunc fields of an entire group of entries of an
5832 inter-mode-class conversion optab. The string formation rules are
5833 similar to the ones for init_libfuncs, above, but instead of having
5834 a mode name and an operand count these functions have two mode names
5835 and no operand count. */
5836
5837 static void
5838 gen_interclass_conv_libfunc (convert_optab tab,
5839 const char *opname,
5840 enum machine_mode tmode,
5841 enum machine_mode fmode)
5842 {
5843 size_t opname_len = strlen (opname);
5844 size_t mname_len = 0;
5845
5846 const char *fname, *tname;
5847 const char *q;
5848 char *libfunc_name, *suffix;
5849 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5850 char *p;
5851
5852 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5853 depends on which underlying decimal floating point format is used. */
5854 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5855
5856 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5857
5858 nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
5859 nondec_name[0] = '_';
5860 nondec_name[1] = '_';
5861 memcpy (&nondec_name[2], opname, opname_len);
5862 nondec_suffix = nondec_name + opname_len + 2;
5863
5864 dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
5865 dec_name[0] = '_';
5866 dec_name[1] = '_';
5867 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5868 memcpy (&dec_name[2+dec_len], opname, opname_len);
5869 dec_suffix = dec_name + dec_len + opname_len + 2;
5870
5871 fname = GET_MODE_NAME (fmode);
5872 tname = GET_MODE_NAME (tmode);
5873
5874 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5875 {
5876 libfunc_name = dec_name;
5877 suffix = dec_suffix;
5878 }
5879 else
5880 {
5881 libfunc_name = nondec_name;
5882 suffix = nondec_suffix;
5883 }
5884
5885 p = suffix;
5886 for (q = fname; *q; p++, q++)
5887 *p = TOLOWER (*q);
5888 for (q = tname; *q; p++, q++)
5889 *p = TOLOWER (*q);
5890
5891 *p = '\0';
5892
5893 set_conv_libfunc (tab, tmode, fmode,
5894 ggc_alloc_string (libfunc_name, p - libfunc_name));
5895 }
5896
5897 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5898 int->fp conversion. */
5899
5900 static void
5901 gen_int_to_fp_conv_libfunc (convert_optab tab,
5902 const char *opname,
5903 enum machine_mode tmode,
5904 enum machine_mode fmode)
5905 {
5906 if (GET_MODE_CLASS (fmode) != MODE_INT)
5907 return;
5908 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5909 return;
5910 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5911 }
5912
5913 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5914 naming scheme. */
5915
5916 static void
5917 gen_ufloat_conv_libfunc (convert_optab tab,
5918 const char *opname ATTRIBUTE_UNUSED,
5919 enum machine_mode tmode,
5920 enum machine_mode fmode)
5921 {
5922 if (DECIMAL_FLOAT_MODE_P (tmode))
5923 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5924 else
5925 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5926 }
5927
5928 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5929 fp->int conversion. */
5930
5931 static void
5932 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5933 const char *opname,
5934 enum machine_mode tmode,
5935 enum machine_mode fmode)
5936 {
5937 if (GET_MODE_CLASS (fmode) != MODE_INT)
5938 return;
5939 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5940 return;
5941 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5942 }
5943
5944 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5945 fp->int conversion with no decimal floating point involved. */
5946
5947 static void
5948 gen_fp_to_int_conv_libfunc (convert_optab tab,
5949 const char *opname,
5950 enum machine_mode tmode,
5951 enum machine_mode fmode)
5952 {
5953 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5954 return;
5955 if (GET_MODE_CLASS (tmode) != MODE_INT)
5956 return;
5957 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5958 }
5959
5960 /* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
5961 The string formation rules are
5962 similar to the ones for init_libfunc, above. */
5963
5964 static void
5965 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5966 enum machine_mode tmode, enum machine_mode fmode)
5967 {
5968 size_t opname_len = strlen (opname);
5969 size_t mname_len = 0;
5970
5971 const char *fname, *tname;
5972 const char *q;
5973 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5974 char *libfunc_name, *suffix;
5975 char *p;
5976
5977 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5978 depends on which underlying decimal floating point format is used. */
5979 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5980
5981 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5982
5983 nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
5984 nondec_name[0] = '_';
5985 nondec_name[1] = '_';
5986 memcpy (&nondec_name[2], opname, opname_len);
5987 nondec_suffix = nondec_name + opname_len + 2;
5988
5989 dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
5990 dec_name[0] = '_';
5991 dec_name[1] = '_';
5992 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5993 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5994 dec_suffix = dec_name + dec_len + opname_len + 2;
5995
5996 fname = GET_MODE_NAME (fmode);
5997 tname = GET_MODE_NAME (tmode);
5998
5999 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
6000 {
6001 libfunc_name = dec_name;
6002 suffix = dec_suffix;
6003 }
6004 else
6005 {
6006 libfunc_name = nondec_name;
6007 suffix = nondec_suffix;
6008 }
6009
6010 p = suffix;
6011 for (q = fname; *q; p++, q++)
6012 *p = TOLOWER (*q);
6013 for (q = tname; *q; p++, q++)
6014 *p = TOLOWER (*q);
6015
6016 *p++ = '2';
6017 *p = '\0';
6018
6019 set_conv_libfunc (tab, tmode, fmode,
6020 ggc_alloc_string (libfunc_name, p - libfunc_name));
6021 }
6022
6023 /* Pick proper libcall for trunc_optab. We need to chose if we do
6024 truncation or extension and interclass or intraclass. */
6025
6026 static void
6027 gen_trunc_conv_libfunc (convert_optab tab,
6028 const char *opname,
6029 enum machine_mode tmode,
6030 enum machine_mode fmode)
6031 {
6032 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
6033 return;
6034 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
6035 return;
6036 if (tmode == fmode)
6037 return;
6038
6039 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
6040 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
6041 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6042
6043 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
6044 return;
6045
6046 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
6047 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
6048 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
6049 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6050 }
6051
6052 /* Pick proper libcall for extend_optab. We need to chose if we do
6053 truncation or extension and interclass or intraclass. */
6054
6055 static void
6056 gen_extend_conv_libfunc (convert_optab tab,
6057 const char *opname ATTRIBUTE_UNUSED,
6058 enum machine_mode tmode,
6059 enum machine_mode fmode)
6060 {
6061 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
6062 return;
6063 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
6064 return;
6065 if (tmode == fmode)
6066 return;
6067
6068 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
6069 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
6070 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6071
6072 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
6073 return;
6074
6075 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
6076 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
6077 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
6078 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6079 }
6080
6081 /* Pick proper libcall for fract_optab. We need to chose if we do
6082 interclass or intraclass. */
6083
6084 static void
6085 gen_fract_conv_libfunc (convert_optab tab,
6086 const char *opname,
6087 enum machine_mode tmode,
6088 enum machine_mode fmode)
6089 {
6090 if (tmode == fmode)
6091 return;
6092 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
6093 return;
6094
6095 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6096 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6097 else
6098 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6099 }
6100
6101 /* Pick proper libcall for fractuns_optab. */
6102
6103 static void
6104 gen_fractuns_conv_libfunc (convert_optab tab,
6105 const char *opname,
6106 enum machine_mode tmode,
6107 enum machine_mode fmode)
6108 {
6109 if (tmode == fmode)
6110 return;
6111 /* One mode must be a fixed-point mode, and the other must be an integer
6112 mode. */
6113 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
6114 || (ALL_FIXED_POINT_MODE_P (fmode)
6115 && GET_MODE_CLASS (tmode) == MODE_INT)))
6116 return;
6117
6118 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6119 }
6120
6121 /* Pick proper libcall for satfract_optab. We need to chose if we do
6122 interclass or intraclass. */
6123
6124 static void
6125 gen_satfract_conv_libfunc (convert_optab tab,
6126 const char *opname,
6127 enum machine_mode tmode,
6128 enum machine_mode fmode)
6129 {
6130 if (tmode == fmode)
6131 return;
6132 /* TMODE must be a fixed-point mode. */
6133 if (!ALL_FIXED_POINT_MODE_P (tmode))
6134 return;
6135
6136 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6137 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6138 else
6139 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6140 }
6141
6142 /* Pick proper libcall for satfractuns_optab. */
6143
6144 static void
6145 gen_satfractuns_conv_libfunc (convert_optab tab,
6146 const char *opname,
6147 enum machine_mode tmode,
6148 enum machine_mode fmode)
6149 {
6150 if (tmode == fmode)
6151 return;
6152 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6153 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6154 return;
6155
6156 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6157 }
6158
6159 rtx
6160 init_one_libfunc (const char *name)
6161 {
6162 rtx symbol;
6163
6164 /* Create a FUNCTION_DECL that can be passed to
6165 targetm.encode_section_info. */
6166 /* ??? We don't have any type information except for this is
6167 a function. Pretend this is "int foo()". */
6168 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
6169 build_function_type (integer_type_node, NULL_TREE));
6170 DECL_ARTIFICIAL (decl) = 1;
6171 DECL_EXTERNAL (decl) = 1;
6172 TREE_PUBLIC (decl) = 1;
6173
6174 symbol = XEXP (DECL_RTL (decl), 0);
6175
6176 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6177 are the flags assigned by targetm.encode_section_info. */
6178 SET_SYMBOL_REF_DECL (symbol, 0);
6179
6180 return symbol;
6181 }
6182
6183 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6184 MODE to NAME, which should be either 0 or a string constant. */
6185 void
6186 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6187 {
6188 rtx val;
6189 struct libfunc_entry e;
6190 struct libfunc_entry **slot;
6191 e.optab = (size_t) (optable - &optab_table[0]);
6192 e.mode1 = mode;
6193 e.mode2 = VOIDmode;
6194
6195 if (name)
6196 val = init_one_libfunc (name);
6197 else
6198 val = 0;
6199 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6200 if (*slot == NULL)
6201 *slot = ggc_alloc (sizeof (struct libfunc_entry));
6202 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6203 (*slot)->mode1 = mode;
6204 (*slot)->mode2 = VOIDmode;
6205 (*slot)->libfunc = val;
6206 }
6207
6208 /* Call this to reset the function entry for one conversion optab
6209 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6210 either 0 or a string constant. */
6211 void
6212 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6213 enum machine_mode fmode, const char *name)
6214 {
6215 rtx val;
6216 struct libfunc_entry e;
6217 struct libfunc_entry **slot;
6218 e.optab = (size_t) (optable - &convert_optab_table[0]);
6219 e.mode1 = tmode;
6220 e.mode2 = fmode;
6221
6222 if (name)
6223 val = init_one_libfunc (name);
6224 else
6225 val = 0;
6226 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6227 if (*slot == NULL)
6228 *slot = ggc_alloc (sizeof (struct libfunc_entry));
6229 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6230 (*slot)->mode1 = tmode;
6231 (*slot)->mode2 = fmode;
6232 (*slot)->libfunc = val;
6233 }
6234
6235 /* Call this to initialize the contents of the optabs
6236 appropriately for the current target machine. */
6237
6238 void
6239 init_optabs (void)
6240 {
6241 unsigned int i;
6242 enum machine_mode int_mode;
6243 static bool reinit;
6244
6245 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6246 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6247
6248 for (i = 0; i < NUM_RTX_CODE; i++)
6249 setcc_gen_code[i] = CODE_FOR_nothing;
6250
6251 #ifdef HAVE_conditional_move
6252 for (i = 0; i < NUM_MACHINE_MODES; i++)
6253 movcc_gen_code[i] = CODE_FOR_nothing;
6254 #endif
6255
6256 for (i = 0; i < NUM_MACHINE_MODES; i++)
6257 {
6258 vcond_gen_code[i] = CODE_FOR_nothing;
6259 vcondu_gen_code[i] = CODE_FOR_nothing;
6260 }
6261
6262 #if GCC_VERSION >= 4000
6263 /* We statically initialize the insn_codes with CODE_FOR_nothing. */
6264 if (reinit)
6265 init_insn_codes ();
6266 #else
6267 init_insn_codes ();
6268 #endif
6269
6270 init_optab (add_optab, PLUS);
6271 init_optabv (addv_optab, PLUS);
6272 init_optab (sub_optab, MINUS);
6273 init_optabv (subv_optab, MINUS);
6274 init_optab (ssadd_optab, SS_PLUS);
6275 init_optab (usadd_optab, US_PLUS);
6276 init_optab (sssub_optab, SS_MINUS);
6277 init_optab (ussub_optab, US_MINUS);
6278 init_optab (smul_optab, MULT);
6279 init_optab (ssmul_optab, SS_MULT);
6280 init_optab (usmul_optab, US_MULT);
6281 init_optabv (smulv_optab, MULT);
6282 init_optab (smul_highpart_optab, UNKNOWN);
6283 init_optab (umul_highpart_optab, UNKNOWN);
6284 init_optab (smul_widen_optab, UNKNOWN);
6285 init_optab (umul_widen_optab, UNKNOWN);
6286 init_optab (usmul_widen_optab, UNKNOWN);
6287 init_optab (smadd_widen_optab, UNKNOWN);
6288 init_optab (umadd_widen_optab, UNKNOWN);
6289 init_optab (ssmadd_widen_optab, UNKNOWN);
6290 init_optab (usmadd_widen_optab, UNKNOWN);
6291 init_optab (smsub_widen_optab, UNKNOWN);
6292 init_optab (umsub_widen_optab, UNKNOWN);
6293 init_optab (ssmsub_widen_optab, UNKNOWN);
6294 init_optab (usmsub_widen_optab, UNKNOWN);
6295 init_optab (sdiv_optab, DIV);
6296 init_optab (ssdiv_optab, SS_DIV);
6297 init_optab (usdiv_optab, US_DIV);
6298 init_optabv (sdivv_optab, DIV);
6299 init_optab (sdivmod_optab, UNKNOWN);
6300 init_optab (udiv_optab, UDIV);
6301 init_optab (udivmod_optab, UNKNOWN);
6302 init_optab (smod_optab, MOD);
6303 init_optab (umod_optab, UMOD);
6304 init_optab (fmod_optab, UNKNOWN);
6305 init_optab (remainder_optab, UNKNOWN);
6306 init_optab (ftrunc_optab, UNKNOWN);
6307 init_optab (and_optab, AND);
6308 init_optab (ior_optab, IOR);
6309 init_optab (xor_optab, XOR);
6310 init_optab (ashl_optab, ASHIFT);
6311 init_optab (ssashl_optab, SS_ASHIFT);
6312 init_optab (usashl_optab, US_ASHIFT);
6313 init_optab (ashr_optab, ASHIFTRT);
6314 init_optab (lshr_optab, LSHIFTRT);
6315 init_optab (rotl_optab, ROTATE);
6316 init_optab (rotr_optab, ROTATERT);
6317 init_optab (smin_optab, SMIN);
6318 init_optab (smax_optab, SMAX);
6319 init_optab (umin_optab, UMIN);
6320 init_optab (umax_optab, UMAX);
6321 init_optab (pow_optab, UNKNOWN);
6322 init_optab (atan2_optab, UNKNOWN);
6323
6324 /* These three have codes assigned exclusively for the sake of
6325 have_insn_for. */
6326 init_optab (mov_optab, SET);
6327 init_optab (movstrict_optab, STRICT_LOW_PART);
6328 init_optab (cmp_optab, COMPARE);
6329
6330 init_optab (storent_optab, UNKNOWN);
6331
6332 init_optab (ucmp_optab, UNKNOWN);
6333 init_optab (tst_optab, UNKNOWN);
6334
6335 init_optab (eq_optab, EQ);
6336 init_optab (ne_optab, NE);
6337 init_optab (gt_optab, GT);
6338 init_optab (ge_optab, GE);
6339 init_optab (lt_optab, LT);
6340 init_optab (le_optab, LE);
6341 init_optab (unord_optab, UNORDERED);
6342
6343 init_optab (neg_optab, NEG);
6344 init_optab (ssneg_optab, SS_NEG);
6345 init_optab (usneg_optab, US_NEG);
6346 init_optabv (negv_optab, NEG);
6347 init_optab (abs_optab, ABS);
6348 init_optabv (absv_optab, ABS);
6349 init_optab (addcc_optab, UNKNOWN);
6350 init_optab (one_cmpl_optab, NOT);
6351 init_optab (bswap_optab, BSWAP);
6352 init_optab (ffs_optab, FFS);
6353 init_optab (clz_optab, CLZ);
6354 init_optab (ctz_optab, CTZ);
6355 init_optab (popcount_optab, POPCOUNT);
6356 init_optab (parity_optab, PARITY);
6357 init_optab (sqrt_optab, SQRT);
6358 init_optab (floor_optab, UNKNOWN);
6359 init_optab (ceil_optab, UNKNOWN);
6360 init_optab (round_optab, UNKNOWN);
6361 init_optab (btrunc_optab, UNKNOWN);
6362 init_optab (nearbyint_optab, UNKNOWN);
6363 init_optab (rint_optab, UNKNOWN);
6364 init_optab (sincos_optab, UNKNOWN);
6365 init_optab (sin_optab, UNKNOWN);
6366 init_optab (asin_optab, UNKNOWN);
6367 init_optab (cos_optab, UNKNOWN);
6368 init_optab (acos_optab, UNKNOWN);
6369 init_optab (exp_optab, UNKNOWN);
6370 init_optab (exp10_optab, UNKNOWN);
6371 init_optab (exp2_optab, UNKNOWN);
6372 init_optab (expm1_optab, UNKNOWN);
6373 init_optab (ldexp_optab, UNKNOWN);
6374 init_optab (scalb_optab, UNKNOWN);
6375 init_optab (logb_optab, UNKNOWN);
6376 init_optab (ilogb_optab, UNKNOWN);
6377 init_optab (log_optab, UNKNOWN);
6378 init_optab (log10_optab, UNKNOWN);
6379 init_optab (log2_optab, UNKNOWN);
6380 init_optab (log1p_optab, UNKNOWN);
6381 init_optab (tan_optab, UNKNOWN);
6382 init_optab (atan_optab, UNKNOWN);
6383 init_optab (copysign_optab, UNKNOWN);
6384 init_optab (signbit_optab, UNKNOWN);
6385
6386 init_optab (isinf_optab, UNKNOWN);
6387
6388 init_optab (strlen_optab, UNKNOWN);
6389 init_optab (cbranch_optab, UNKNOWN);
6390 init_optab (cmov_optab, UNKNOWN);
6391 init_optab (cstore_optab, UNKNOWN);
6392 init_optab (push_optab, UNKNOWN);
6393
6394 init_optab (reduc_smax_optab, UNKNOWN);
6395 init_optab (reduc_umax_optab, UNKNOWN);
6396 init_optab (reduc_smin_optab, UNKNOWN);
6397 init_optab (reduc_umin_optab, UNKNOWN);
6398 init_optab (reduc_splus_optab, UNKNOWN);
6399 init_optab (reduc_uplus_optab, UNKNOWN);
6400
6401 init_optab (ssum_widen_optab, UNKNOWN);
6402 init_optab (usum_widen_optab, UNKNOWN);
6403 init_optab (sdot_prod_optab, UNKNOWN);
6404 init_optab (udot_prod_optab, UNKNOWN);
6405
6406 init_optab (vec_extract_optab, UNKNOWN);
6407 init_optab (vec_extract_even_optab, UNKNOWN);
6408 init_optab (vec_extract_odd_optab, UNKNOWN);
6409 init_optab (vec_interleave_high_optab, UNKNOWN);
6410 init_optab (vec_interleave_low_optab, UNKNOWN);
6411 init_optab (vec_set_optab, UNKNOWN);
6412 init_optab (vec_init_optab, UNKNOWN);
6413 init_optab (vec_shl_optab, UNKNOWN);
6414 init_optab (vec_shr_optab, UNKNOWN);
6415 init_optab (vec_realign_load_optab, UNKNOWN);
6416 init_optab (movmisalign_optab, UNKNOWN);
6417 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6418 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6419 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6420 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6421 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6422 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6423 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6424 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6425 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6426 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6427 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6428 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6429 init_optab (vec_pack_trunc_optab, UNKNOWN);
6430 init_optab (vec_pack_usat_optab, UNKNOWN);
6431 init_optab (vec_pack_ssat_optab, UNKNOWN);
6432 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6433 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6434
6435 init_optab (powi_optab, UNKNOWN);
6436
6437 /* Conversions. */
6438 init_convert_optab (sext_optab, SIGN_EXTEND);
6439 init_convert_optab (zext_optab, ZERO_EXTEND);
6440 init_convert_optab (trunc_optab, TRUNCATE);
6441 init_convert_optab (sfix_optab, FIX);
6442 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6443 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6444 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6445 init_convert_optab (sfloat_optab, FLOAT);
6446 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6447 init_convert_optab (lrint_optab, UNKNOWN);
6448 init_convert_optab (lround_optab, UNKNOWN);
6449 init_convert_optab (lfloor_optab, UNKNOWN);
6450 init_convert_optab (lceil_optab, UNKNOWN);
6451
6452 init_convert_optab (fract_optab, FRACT_CONVERT);
6453 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6454 init_convert_optab (satfract_optab, SAT_FRACT);
6455 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6456
6457 for (i = 0; i < NUM_MACHINE_MODES; i++)
6458 {
6459 movmem_optab[i] = CODE_FOR_nothing;
6460 cmpstr_optab[i] = CODE_FOR_nothing;
6461 cmpstrn_optab[i] = CODE_FOR_nothing;
6462 cmpmem_optab[i] = CODE_FOR_nothing;
6463 setmem_optab[i] = CODE_FOR_nothing;
6464
6465 sync_add_optab[i] = CODE_FOR_nothing;
6466 sync_sub_optab[i] = CODE_FOR_nothing;
6467 sync_ior_optab[i] = CODE_FOR_nothing;
6468 sync_and_optab[i] = CODE_FOR_nothing;
6469 sync_xor_optab[i] = CODE_FOR_nothing;
6470 sync_nand_optab[i] = CODE_FOR_nothing;
6471 sync_old_add_optab[i] = CODE_FOR_nothing;
6472 sync_old_sub_optab[i] = CODE_FOR_nothing;
6473 sync_old_ior_optab[i] = CODE_FOR_nothing;
6474 sync_old_and_optab[i] = CODE_FOR_nothing;
6475 sync_old_xor_optab[i] = CODE_FOR_nothing;
6476 sync_old_nand_optab[i] = CODE_FOR_nothing;
6477 sync_new_add_optab[i] = CODE_FOR_nothing;
6478 sync_new_sub_optab[i] = CODE_FOR_nothing;
6479 sync_new_ior_optab[i] = CODE_FOR_nothing;
6480 sync_new_and_optab[i] = CODE_FOR_nothing;
6481 sync_new_xor_optab[i] = CODE_FOR_nothing;
6482 sync_new_nand_optab[i] = CODE_FOR_nothing;
6483 sync_compare_and_swap[i] = CODE_FOR_nothing;
6484 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
6485 sync_lock_test_and_set[i] = CODE_FOR_nothing;
6486 sync_lock_release[i] = CODE_FOR_nothing;
6487
6488 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
6489 }
6490
6491 /* Fill in the optabs with the insns we support. */
6492 init_all_optabs ();
6493
6494 /* Initialize the optabs with the names of the library functions. */
6495 add_optab->libcall_basename = "add";
6496 add_optab->libcall_suffix = '3';
6497 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6498 addv_optab->libcall_basename = "add";
6499 addv_optab->libcall_suffix = '3';
6500 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6501 ssadd_optab->libcall_basename = "ssadd";
6502 ssadd_optab->libcall_suffix = '3';
6503 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6504 usadd_optab->libcall_basename = "usadd";
6505 usadd_optab->libcall_suffix = '3';
6506 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6507 sub_optab->libcall_basename = "sub";
6508 sub_optab->libcall_suffix = '3';
6509 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6510 subv_optab->libcall_basename = "sub";
6511 subv_optab->libcall_suffix = '3';
6512 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6513 sssub_optab->libcall_basename = "sssub";
6514 sssub_optab->libcall_suffix = '3';
6515 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6516 ussub_optab->libcall_basename = "ussub";
6517 ussub_optab->libcall_suffix = '3';
6518 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6519 smul_optab->libcall_basename = "mul";
6520 smul_optab->libcall_suffix = '3';
6521 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6522 smulv_optab->libcall_basename = "mul";
6523 smulv_optab->libcall_suffix = '3';
6524 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6525 ssmul_optab->libcall_basename = "ssmul";
6526 ssmul_optab->libcall_suffix = '3';
6527 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6528 usmul_optab->libcall_basename = "usmul";
6529 usmul_optab->libcall_suffix = '3';
6530 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6531 sdiv_optab->libcall_basename = "div";
6532 sdiv_optab->libcall_suffix = '3';
6533 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6534 sdivv_optab->libcall_basename = "divv";
6535 sdivv_optab->libcall_suffix = '3';
6536 sdivv_optab->libcall_gen = gen_int_libfunc;
6537 ssdiv_optab->libcall_basename = "ssdiv";
6538 ssdiv_optab->libcall_suffix = '3';
6539 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6540 udiv_optab->libcall_basename = "udiv";
6541 udiv_optab->libcall_suffix = '3';
6542 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6543 usdiv_optab->libcall_basename = "usdiv";
6544 usdiv_optab->libcall_suffix = '3';
6545 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6546 sdivmod_optab->libcall_basename = "divmod";
6547 sdivmod_optab->libcall_suffix = '4';
6548 sdivmod_optab->libcall_gen = gen_int_libfunc;
6549 udivmod_optab->libcall_basename = "udivmod";
6550 udivmod_optab->libcall_suffix = '4';
6551 udivmod_optab->libcall_gen = gen_int_libfunc;
6552 smod_optab->libcall_basename = "mod";
6553 smod_optab->libcall_suffix = '3';
6554 smod_optab->libcall_gen = gen_int_libfunc;
6555 umod_optab->libcall_basename = "umod";
6556 umod_optab->libcall_suffix = '3';
6557 umod_optab->libcall_gen = gen_int_libfunc;
6558 ftrunc_optab->libcall_basename = "ftrunc";
6559 ftrunc_optab->libcall_suffix = '2';
6560 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6561 and_optab->libcall_basename = "and";
6562 and_optab->libcall_suffix = '3';
6563 and_optab->libcall_gen = gen_int_libfunc;
6564 ior_optab->libcall_basename = "ior";
6565 ior_optab->libcall_suffix = '3';
6566 ior_optab->libcall_gen = gen_int_libfunc;
6567 xor_optab->libcall_basename = "xor";
6568 xor_optab->libcall_suffix = '3';
6569 xor_optab->libcall_gen = gen_int_libfunc;
6570 ashl_optab->libcall_basename = "ashl";
6571 ashl_optab->libcall_suffix = '3';
6572 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6573 ssashl_optab->libcall_basename = "ssashl";
6574 ssashl_optab->libcall_suffix = '3';
6575 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6576 usashl_optab->libcall_basename = "usashl";
6577 usashl_optab->libcall_suffix = '3';
6578 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6579 ashr_optab->libcall_basename = "ashr";
6580 ashr_optab->libcall_suffix = '3';
6581 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6582 lshr_optab->libcall_basename = "lshr";
6583 lshr_optab->libcall_suffix = '3';
6584 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6585 smin_optab->libcall_basename = "min";
6586 smin_optab->libcall_suffix = '3';
6587 smin_optab->libcall_gen = gen_int_fp_libfunc;
6588 smax_optab->libcall_basename = "max";
6589 smax_optab->libcall_suffix = '3';
6590 smax_optab->libcall_gen = gen_int_fp_libfunc;
6591 umin_optab->libcall_basename = "umin";
6592 umin_optab->libcall_suffix = '3';
6593 umin_optab->libcall_gen = gen_int_libfunc;
6594 umax_optab->libcall_basename = "umax";
6595 umax_optab->libcall_suffix = '3';
6596 umax_optab->libcall_gen = gen_int_libfunc;
6597 neg_optab->libcall_basename = "neg";
6598 neg_optab->libcall_suffix = '2';
6599 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6600 ssneg_optab->libcall_basename = "ssneg";
6601 ssneg_optab->libcall_suffix = '2';
6602 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6603 usneg_optab->libcall_basename = "usneg";
6604 usneg_optab->libcall_suffix = '2';
6605 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6606 negv_optab->libcall_basename = "neg";
6607 negv_optab->libcall_suffix = '2';
6608 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6609 one_cmpl_optab->libcall_basename = "one_cmpl";
6610 one_cmpl_optab->libcall_suffix = '2';
6611 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6612 ffs_optab->libcall_basename = "ffs";
6613 ffs_optab->libcall_suffix = '2';
6614 ffs_optab->libcall_gen = gen_int_libfunc;
6615 clz_optab->libcall_basename = "clz";
6616 clz_optab->libcall_suffix = '2';
6617 clz_optab->libcall_gen = gen_int_libfunc;
6618 ctz_optab->libcall_basename = "ctz";
6619 ctz_optab->libcall_suffix = '2';
6620 ctz_optab->libcall_gen = gen_int_libfunc;
6621 popcount_optab->libcall_basename = "popcount";
6622 popcount_optab->libcall_suffix = '2';
6623 popcount_optab->libcall_gen = gen_int_libfunc;
6624 parity_optab->libcall_basename = "parity";
6625 parity_optab->libcall_suffix = '2';
6626 parity_optab->libcall_gen = gen_int_libfunc;
6627
6628 /* Comparison libcalls for integers MUST come in pairs,
6629 signed/unsigned. */
6630 cmp_optab->libcall_basename = "cmp";
6631 cmp_optab->libcall_suffix = '2';
6632 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6633 ucmp_optab->libcall_basename = "ucmp";
6634 ucmp_optab->libcall_suffix = '2';
6635 ucmp_optab->libcall_gen = gen_int_libfunc;
6636
6637 /* EQ etc are floating point only. */
6638 eq_optab->libcall_basename = "eq";
6639 eq_optab->libcall_suffix = '2';
6640 eq_optab->libcall_gen = gen_fp_libfunc;
6641 ne_optab->libcall_basename = "ne";
6642 ne_optab->libcall_suffix = '2';
6643 ne_optab->libcall_gen = gen_fp_libfunc;
6644 gt_optab->libcall_basename = "gt";
6645 gt_optab->libcall_suffix = '2';
6646 gt_optab->libcall_gen = gen_fp_libfunc;
6647 ge_optab->libcall_basename = "ge";
6648 ge_optab->libcall_suffix = '2';
6649 ge_optab->libcall_gen = gen_fp_libfunc;
6650 lt_optab->libcall_basename = "lt";
6651 lt_optab->libcall_suffix = '2';
6652 lt_optab->libcall_gen = gen_fp_libfunc;
6653 le_optab->libcall_basename = "le";
6654 le_optab->libcall_suffix = '2';
6655 le_optab->libcall_gen = gen_fp_libfunc;
6656 unord_optab->libcall_basename = "unord";
6657 unord_optab->libcall_suffix = '2';
6658 unord_optab->libcall_gen = gen_fp_libfunc;
6659
6660 powi_optab->libcall_basename = "powi";
6661 powi_optab->libcall_suffix = '2';
6662 powi_optab->libcall_gen = gen_fp_libfunc;
6663
6664 /* Conversions. */
6665 sfloat_optab->libcall_basename = "float";
6666 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6667 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6668 sfix_optab->libcall_basename = "fix";
6669 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6670 ufix_optab->libcall_basename = "fixuns";
6671 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6672 lrint_optab->libcall_basename = "lrint";
6673 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6674 lround_optab->libcall_basename = "lround";
6675 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6676 lfloor_optab->libcall_basename = "lfloor";
6677 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6678 lceil_optab->libcall_basename = "lceil";
6679 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6680
6681 /* trunc_optab is also used for FLOAT_EXTEND. */
6682 sext_optab->libcall_basename = "extend";
6683 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6684 trunc_optab->libcall_basename = "trunc";
6685 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6686
6687 /* Conversions for fixed-point modes and other modes. */
6688 fract_optab->libcall_basename = "fract";
6689 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6690 satfract_optab->libcall_basename = "satfract";
6691 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6692 fractuns_optab->libcall_basename = "fractuns";
6693 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6694 satfractuns_optab->libcall_basename = "satfractuns";
6695 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6696
6697 /* The ffs function operates on `int'. Fall back on it if we do not
6698 have a libgcc2 function for that width. */
6699 if (INT_TYPE_SIZE < BITS_PER_WORD)
6700 {
6701 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
6702 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6703 "ffs");
6704 }
6705
6706 /* Explicitly initialize the bswap libfuncs since we need them to be
6707 valid for things other than word_mode. */
6708 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6709 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6710
6711 /* Use cabs for double complex abs, since systems generally have cabs.
6712 Don't define any libcall for float complex, so that cabs will be used. */
6713 if (complex_double_type_node)
6714 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6715
6716 abort_libfunc = init_one_libfunc ("abort");
6717 memcpy_libfunc = init_one_libfunc ("memcpy");
6718 memmove_libfunc = init_one_libfunc ("memmove");
6719 memcmp_libfunc = init_one_libfunc ("memcmp");
6720 memset_libfunc = init_one_libfunc ("memset");
6721 setbits_libfunc = init_one_libfunc ("__setbits");
6722
6723 #ifndef DONT_USE_BUILTIN_SETJMP
6724 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6725 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6726 #else
6727 setjmp_libfunc = init_one_libfunc ("setjmp");
6728 longjmp_libfunc = init_one_libfunc ("longjmp");
6729 #endif
6730 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6731 unwind_sjlj_unregister_libfunc
6732 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6733
6734 /* For function entry/exit instrumentation. */
6735 profile_function_entry_libfunc
6736 = init_one_libfunc ("__cyg_profile_func_enter");
6737 profile_function_exit_libfunc
6738 = init_one_libfunc ("__cyg_profile_func_exit");
6739
6740 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6741
6742 if (HAVE_conditional_trap)
6743 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
6744
6745 /* Allow the target to add more libcalls or rename some, etc. */
6746 targetm.init_libfuncs ();
6747
6748 reinit = true;
6749 }
6750
6751 /* Print information about the current contents of the optabs on
6752 STDERR. */
6753
6754 void
6755 debug_optab_libfuncs (void)
6756 {
6757 int i;
6758 int j;
6759 int k;
6760
6761 /* Dump the arithmetic optabs. */
6762 for (i = 0; i != (int) OTI_MAX; i++)
6763 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6764 {
6765 optab o;
6766 rtx l;
6767
6768 o = &optab_table[i];
6769 l = optab_libfunc (o, j);
6770 if (l)
6771 {
6772 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6773 fprintf (stderr, "%s\t%s:\t%s\n",
6774 GET_RTX_NAME (o->code),
6775 GET_MODE_NAME (j),
6776 XSTR (l, 0));
6777 }
6778 }
6779
6780 /* Dump the conversion optabs. */
6781 for (i = 0; i < (int) COI_MAX; ++i)
6782 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6783 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6784 {
6785 convert_optab o;
6786 rtx l;
6787
6788 o = &convert_optab_table[i];
6789 l = convert_optab_libfunc (o, j, k);
6790 if (l)
6791 {
6792 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6793 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6794 GET_RTX_NAME (o->code),
6795 GET_MODE_NAME (j),
6796 GET_MODE_NAME (k),
6797 XSTR (l, 0));
6798 }
6799 }
6800 }
6801
6802 \f
6803 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6804 CODE. Return 0 on failure. */
6805
6806 rtx
6807 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
6808 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
6809 {
6810 enum machine_mode mode = GET_MODE (op1);
6811 enum insn_code icode;
6812 rtx insn;
6813
6814 if (!HAVE_conditional_trap)
6815 return 0;
6816
6817 if (mode == VOIDmode)
6818 return 0;
6819
6820 icode = optab_handler (cmp_optab, mode)->insn_code;
6821 if (icode == CODE_FOR_nothing)
6822 return 0;
6823
6824 start_sequence ();
6825 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
6826 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
6827 if (!op1 || !op2)
6828 {
6829 end_sequence ();
6830 return 0;
6831 }
6832 emit_insn (GEN_FCN (icode) (op1, op2));
6833
6834 PUT_CODE (trap_rtx, code);
6835 gcc_assert (HAVE_conditional_trap);
6836 insn = gen_conditional_trap (trap_rtx, tcode);
6837 if (insn)
6838 {
6839 emit_insn (insn);
6840 insn = get_insns ();
6841 }
6842 end_sequence ();
6843
6844 return insn;
6845 }
6846
6847 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6848 or unsigned operation code. */
6849
6850 static enum rtx_code
6851 get_rtx_code (enum tree_code tcode, bool unsignedp)
6852 {
6853 enum rtx_code code;
6854 switch (tcode)
6855 {
6856 case EQ_EXPR:
6857 code = EQ;
6858 break;
6859 case NE_EXPR:
6860 code = NE;
6861 break;
6862 case LT_EXPR:
6863 code = unsignedp ? LTU : LT;
6864 break;
6865 case LE_EXPR:
6866 code = unsignedp ? LEU : LE;
6867 break;
6868 case GT_EXPR:
6869 code = unsignedp ? GTU : GT;
6870 break;
6871 case GE_EXPR:
6872 code = unsignedp ? GEU : GE;
6873 break;
6874
6875 case UNORDERED_EXPR:
6876 code = UNORDERED;
6877 break;
6878 case ORDERED_EXPR:
6879 code = ORDERED;
6880 break;
6881 case UNLT_EXPR:
6882 code = UNLT;
6883 break;
6884 case UNLE_EXPR:
6885 code = UNLE;
6886 break;
6887 case UNGT_EXPR:
6888 code = UNGT;
6889 break;
6890 case UNGE_EXPR:
6891 code = UNGE;
6892 break;
6893 case UNEQ_EXPR:
6894 code = UNEQ;
6895 break;
6896 case LTGT_EXPR:
6897 code = LTGT;
6898 break;
6899
6900 default:
6901 gcc_unreachable ();
6902 }
6903 return code;
6904 }
6905
6906 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6907 unsigned operators. Do not generate compare instruction. */
6908
6909 static rtx
6910 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6911 {
6912 enum rtx_code rcode;
6913 tree t_op0, t_op1;
6914 rtx rtx_op0, rtx_op1;
6915
6916 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6917 ensures that condition is a relational operation. */
6918 gcc_assert (COMPARISON_CLASS_P (cond));
6919
6920 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6921 t_op0 = TREE_OPERAND (cond, 0);
6922 t_op1 = TREE_OPERAND (cond, 1);
6923
6924 /* Expand operands. */
6925 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6926 EXPAND_STACK_PARM);
6927 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6928 EXPAND_STACK_PARM);
6929
6930 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6931 && GET_MODE (rtx_op0) != VOIDmode)
6932 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6933
6934 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6935 && GET_MODE (rtx_op1) != VOIDmode)
6936 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6937
6938 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6939 }
6940
6941 /* Return insn code for VEC_COND_EXPR EXPR. */
6942
6943 static inline enum insn_code
6944 get_vcond_icode (tree expr, enum machine_mode mode)
6945 {
6946 enum insn_code icode = CODE_FOR_nothing;
6947
6948 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6949 icode = vcondu_gen_code[mode];
6950 else
6951 icode = vcond_gen_code[mode];
6952 return icode;
6953 }
6954
6955 /* Return TRUE iff, appropriate vector insns are available
6956 for vector cond expr expr in VMODE mode. */
6957
6958 bool
6959 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6960 {
6961 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6962 return false;
6963 return true;
6964 }
6965
6966 /* Generate insns for VEC_COND_EXPR. */
6967
6968 rtx
6969 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6970 {
6971 enum insn_code icode;
6972 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6973 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6974 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6975
6976 icode = get_vcond_icode (vec_cond_expr, mode);
6977 if (icode == CODE_FOR_nothing)
6978 return 0;
6979
6980 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6981 target = gen_reg_rtx (mode);
6982
6983 /* Get comparison rtx. First expand both cond expr operands. */
6984 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6985 unsignedp, icode);
6986 cc_op0 = XEXP (comparison, 0);
6987 cc_op1 = XEXP (comparison, 1);
6988 /* Expand both operands and force them in reg, if required. */
6989 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6990 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6991 && mode != VOIDmode)
6992 rtx_op1 = force_reg (mode, rtx_op1);
6993
6994 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6995 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6996 && mode != VOIDmode)
6997 rtx_op2 = force_reg (mode, rtx_op2);
6998
6999 /* Emit instruction! */
7000 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
7001 comparison, cc_op0, cc_op1));
7002
7003 return target;
7004 }
7005
7006 \f
7007 /* This is an internal subroutine of the other compare_and_swap expanders.
7008 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
7009 operation. TARGET is an optional place to store the value result of
7010 the operation. ICODE is the particular instruction to expand. Return
7011 the result of the operation. */
7012
7013 static rtx
7014 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
7015 rtx target, enum insn_code icode)
7016 {
7017 enum machine_mode mode = GET_MODE (mem);
7018 rtx insn;
7019
7020 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7021 target = gen_reg_rtx (mode);
7022
7023 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
7024 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
7025 if (!insn_data[icode].operand[2].predicate (old_val, mode))
7026 old_val = force_reg (mode, old_val);
7027
7028 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
7029 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
7030 if (!insn_data[icode].operand[3].predicate (new_val, mode))
7031 new_val = force_reg (mode, new_val);
7032
7033 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
7034 if (insn == NULL_RTX)
7035 return NULL_RTX;
7036 emit_insn (insn);
7037
7038 return target;
7039 }
7040
7041 /* Expand a compare-and-swap operation and return its value. */
7042
7043 rtx
7044 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
7045 {
7046 enum machine_mode mode = GET_MODE (mem);
7047 enum insn_code icode = sync_compare_and_swap[mode];
7048
7049 if (icode == CODE_FOR_nothing)
7050 return NULL_RTX;
7051
7052 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
7053 }
7054
7055 /* Expand a compare-and-swap operation and store true into the result if
7056 the operation was successful and false otherwise. Return the result.
7057 Unlike other routines, TARGET is not optional. */
7058
7059 rtx
7060 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
7061 {
7062 enum machine_mode mode = GET_MODE (mem);
7063 enum insn_code icode;
7064 rtx subtarget, label0, label1;
7065
7066 /* If the target supports a compare-and-swap pattern that simultaneously
7067 sets some flag for success, then use it. Otherwise use the regular
7068 compare-and-swap and follow that immediately with a compare insn. */
7069 icode = sync_compare_and_swap_cc[mode];
7070 switch (icode)
7071 {
7072 default:
7073 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
7074 NULL_RTX, icode);
7075 if (subtarget != NULL_RTX)
7076 break;
7077
7078 /* FALLTHRU */
7079 case CODE_FOR_nothing:
7080 icode = sync_compare_and_swap[mode];
7081 if (icode == CODE_FOR_nothing)
7082 return NULL_RTX;
7083
7084 /* Ensure that if old_val == mem, that we're not comparing
7085 against an old value. */
7086 if (MEM_P (old_val))
7087 old_val = force_reg (mode, old_val);
7088
7089 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
7090 NULL_RTX, icode);
7091 if (subtarget == NULL_RTX)
7092 return NULL_RTX;
7093
7094 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
7095 }
7096
7097 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
7098 setcc instruction from the beginning. We don't work too hard here,
7099 but it's nice to not be stupid about initial code gen either. */
7100 if (STORE_FLAG_VALUE == 1)
7101 {
7102 icode = setcc_gen_code[EQ];
7103 if (icode != CODE_FOR_nothing)
7104 {
7105 enum machine_mode cmode = insn_data[icode].operand[0].mode;
7106 rtx insn;
7107
7108 subtarget = target;
7109 if (!insn_data[icode].operand[0].predicate (target, cmode))
7110 subtarget = gen_reg_rtx (cmode);
7111
7112 insn = GEN_FCN (icode) (subtarget);
7113 if (insn)
7114 {
7115 emit_insn (insn);
7116 if (GET_MODE (target) != GET_MODE (subtarget))
7117 {
7118 convert_move (target, subtarget, 1);
7119 subtarget = target;
7120 }
7121 return subtarget;
7122 }
7123 }
7124 }
7125
7126 /* Without an appropriate setcc instruction, use a set of branches to
7127 get 1 and 0 stored into target. Presumably if the target has a
7128 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
7129
7130 label0 = gen_label_rtx ();
7131 label1 = gen_label_rtx ();
7132
7133 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
7134 emit_move_insn (target, const0_rtx);
7135 emit_jump_insn (gen_jump (label1));
7136 emit_barrier ();
7137 emit_label (label0);
7138 emit_move_insn (target, const1_rtx);
7139 emit_label (label1);
7140
7141 return target;
7142 }
7143
7144 /* This is a helper function for the other atomic operations. This function
7145 emits a loop that contains SEQ that iterates until a compare-and-swap
7146 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7147 a set of instructions that takes a value from OLD_REG as an input and
7148 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7149 set to the current contents of MEM. After SEQ, a compare-and-swap will
7150 attempt to update MEM with NEW_REG. The function returns true when the
7151 loop was generated successfully. */
7152
7153 static bool
7154 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7155 {
7156 enum machine_mode mode = GET_MODE (mem);
7157 enum insn_code icode;
7158 rtx label, cmp_reg, subtarget;
7159
7160 /* The loop we want to generate looks like
7161
7162 cmp_reg = mem;
7163 label:
7164 old_reg = cmp_reg;
7165 seq;
7166 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7167 if (cmp_reg != old_reg)
7168 goto label;
7169
7170 Note that we only do the plain load from memory once. Subsequent
7171 iterations use the value loaded by the compare-and-swap pattern. */
7172
7173 label = gen_label_rtx ();
7174 cmp_reg = gen_reg_rtx (mode);
7175
7176 emit_move_insn (cmp_reg, mem);
7177 emit_label (label);
7178 emit_move_insn (old_reg, cmp_reg);
7179 if (seq)
7180 emit_insn (seq);
7181
7182 /* If the target supports a compare-and-swap pattern that simultaneously
7183 sets some flag for success, then use it. Otherwise use the regular
7184 compare-and-swap and follow that immediately with a compare insn. */
7185 icode = sync_compare_and_swap_cc[mode];
7186 switch (icode)
7187 {
7188 default:
7189 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7190 cmp_reg, icode);
7191 if (subtarget != NULL_RTX)
7192 {
7193 gcc_assert (subtarget == cmp_reg);
7194 break;
7195 }
7196
7197 /* FALLTHRU */
7198 case CODE_FOR_nothing:
7199 icode = sync_compare_and_swap[mode];
7200 if (icode == CODE_FOR_nothing)
7201 return false;
7202
7203 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7204 cmp_reg, icode);
7205 if (subtarget == NULL_RTX)
7206 return false;
7207 if (subtarget != cmp_reg)
7208 emit_move_insn (cmp_reg, subtarget);
7209
7210 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
7211 }
7212
7213 /* ??? Mark this jump predicted not taken? */
7214 emit_jump_insn (bcc_gen_fctn[NE] (label));
7215
7216 return true;
7217 }
7218
7219 /* This function generates the atomic operation MEM CODE= VAL. In this
7220 case, we do not care about any resulting value. Returns NULL if we
7221 cannot generate the operation. */
7222
7223 rtx
7224 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
7225 {
7226 enum machine_mode mode = GET_MODE (mem);
7227 enum insn_code icode;
7228 rtx insn;
7229
7230 /* Look to see if the target supports the operation directly. */
7231 switch (code)
7232 {
7233 case PLUS:
7234 icode = sync_add_optab[mode];
7235 break;
7236 case IOR:
7237 icode = sync_ior_optab[mode];
7238 break;
7239 case XOR:
7240 icode = sync_xor_optab[mode];
7241 break;
7242 case AND:
7243 icode = sync_and_optab[mode];
7244 break;
7245 case NOT:
7246 icode = sync_nand_optab[mode];
7247 break;
7248
7249 case MINUS:
7250 icode = sync_sub_optab[mode];
7251 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7252 {
7253 icode = sync_add_optab[mode];
7254 if (icode != CODE_FOR_nothing)
7255 {
7256 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7257 code = PLUS;
7258 }
7259 }
7260 break;
7261
7262 default:
7263 gcc_unreachable ();
7264 }
7265
7266 /* Generate the direct operation, if present. */
7267 if (icode != CODE_FOR_nothing)
7268 {
7269 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7270 val = convert_modes (mode, GET_MODE (val), val, 1);
7271 if (!insn_data[icode].operand[1].predicate (val, mode))
7272 val = force_reg (mode, val);
7273
7274 insn = GEN_FCN (icode) (mem, val);
7275 if (insn)
7276 {
7277 emit_insn (insn);
7278 return const0_rtx;
7279 }
7280 }
7281
7282 /* Failing that, generate a compare-and-swap loop in which we perform the
7283 operation with normal arithmetic instructions. */
7284 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7285 {
7286 rtx t0 = gen_reg_rtx (mode), t1;
7287
7288 start_sequence ();
7289
7290 t1 = t0;
7291 if (code == NOT)
7292 {
7293 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
7294 code = AND;
7295 }
7296 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7297 true, OPTAB_LIB_WIDEN);
7298
7299 insn = get_insns ();
7300 end_sequence ();
7301
7302 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7303 return const0_rtx;
7304 }
7305
7306 return NULL_RTX;
7307 }
7308
7309 /* This function generates the atomic operation MEM CODE= VAL. In this
7310 case, we do care about the resulting value: if AFTER is true then
7311 return the value MEM holds after the operation, if AFTER is false
7312 then return the value MEM holds before the operation. TARGET is an
7313 optional place for the result value to be stored. */
7314
7315 rtx
7316 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
7317 bool after, rtx target)
7318 {
7319 enum machine_mode mode = GET_MODE (mem);
7320 enum insn_code old_code, new_code, icode;
7321 bool compensate;
7322 rtx insn;
7323
7324 /* Look to see if the target supports the operation directly. */
7325 switch (code)
7326 {
7327 case PLUS:
7328 old_code = sync_old_add_optab[mode];
7329 new_code = sync_new_add_optab[mode];
7330 break;
7331 case IOR:
7332 old_code = sync_old_ior_optab[mode];
7333 new_code = sync_new_ior_optab[mode];
7334 break;
7335 case XOR:
7336 old_code = sync_old_xor_optab[mode];
7337 new_code = sync_new_xor_optab[mode];
7338 break;
7339 case AND:
7340 old_code = sync_old_and_optab[mode];
7341 new_code = sync_new_and_optab[mode];
7342 break;
7343 case NOT:
7344 old_code = sync_old_nand_optab[mode];
7345 new_code = sync_new_nand_optab[mode];
7346 break;
7347
7348 case MINUS:
7349 old_code = sync_old_sub_optab[mode];
7350 new_code = sync_new_sub_optab[mode];
7351 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7352 || CONST_INT_P (val))
7353 {
7354 old_code = sync_old_add_optab[mode];
7355 new_code = sync_new_add_optab[mode];
7356 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7357 {
7358 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7359 code = PLUS;
7360 }
7361 }
7362 break;
7363
7364 default:
7365 gcc_unreachable ();
7366 }
7367
7368 /* If the target does supports the proper new/old operation, great. But
7369 if we only support the opposite old/new operation, check to see if we
7370 can compensate. In the case in which the old value is supported, then
7371 we can always perform the operation again with normal arithmetic. In
7372 the case in which the new value is supported, then we can only handle
7373 this in the case the operation is reversible. */
7374 compensate = false;
7375 if (after)
7376 {
7377 icode = new_code;
7378 if (icode == CODE_FOR_nothing)
7379 {
7380 icode = old_code;
7381 if (icode != CODE_FOR_nothing)
7382 compensate = true;
7383 }
7384 }
7385 else
7386 {
7387 icode = old_code;
7388 if (icode == CODE_FOR_nothing
7389 && (code == PLUS || code == MINUS || code == XOR))
7390 {
7391 icode = new_code;
7392 if (icode != CODE_FOR_nothing)
7393 compensate = true;
7394 }
7395 }
7396
7397 /* If we found something supported, great. */
7398 if (icode != CODE_FOR_nothing)
7399 {
7400 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7401 target = gen_reg_rtx (mode);
7402
7403 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7404 val = convert_modes (mode, GET_MODE (val), val, 1);
7405 if (!insn_data[icode].operand[2].predicate (val, mode))
7406 val = force_reg (mode, val);
7407
7408 insn = GEN_FCN (icode) (target, mem, val);
7409 if (insn)
7410 {
7411 emit_insn (insn);
7412
7413 /* If we need to compensate for using an operation with the
7414 wrong return value, do so now. */
7415 if (compensate)
7416 {
7417 if (!after)
7418 {
7419 if (code == PLUS)
7420 code = MINUS;
7421 else if (code == MINUS)
7422 code = PLUS;
7423 }
7424
7425 if (code == NOT)
7426 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
7427 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
7428 true, OPTAB_LIB_WIDEN);
7429 }
7430
7431 return target;
7432 }
7433 }
7434
7435 /* Failing that, generate a compare-and-swap loop in which we perform the
7436 operation with normal arithmetic instructions. */
7437 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7438 {
7439 rtx t0 = gen_reg_rtx (mode), t1;
7440
7441 if (!target || !register_operand (target, mode))
7442 target = gen_reg_rtx (mode);
7443
7444 start_sequence ();
7445
7446 if (!after)
7447 emit_move_insn (target, t0);
7448 t1 = t0;
7449 if (code == NOT)
7450 {
7451 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
7452 code = AND;
7453 }
7454 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7455 true, OPTAB_LIB_WIDEN);
7456 if (after)
7457 emit_move_insn (target, t1);
7458
7459 insn = get_insns ();
7460 end_sequence ();
7461
7462 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7463 return target;
7464 }
7465
7466 return NULL_RTX;
7467 }
7468
7469 /* This function expands a test-and-set operation. Ideally we atomically
7470 store VAL in MEM and return the previous value in MEM. Some targets
7471 may not support this operation and only support VAL with the constant 1;
7472 in this case while the return value will be 0/1, but the exact value
7473 stored in MEM is target defined. TARGET is an option place to stick
7474 the return value. */
7475
7476 rtx
7477 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7478 {
7479 enum machine_mode mode = GET_MODE (mem);
7480 enum insn_code icode;
7481 rtx insn;
7482
7483 /* If the target supports the test-and-set directly, great. */
7484 icode = sync_lock_test_and_set[mode];
7485 if (icode != CODE_FOR_nothing)
7486 {
7487 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7488 target = gen_reg_rtx (mode);
7489
7490 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7491 val = convert_modes (mode, GET_MODE (val), val, 1);
7492 if (!insn_data[icode].operand[2].predicate (val, mode))
7493 val = force_reg (mode, val);
7494
7495 insn = GEN_FCN (icode) (target, mem, val);
7496 if (insn)
7497 {
7498 emit_insn (insn);
7499 return target;
7500 }
7501 }
7502
7503 /* Otherwise, use a compare-and-swap loop for the exchange. */
7504 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7505 {
7506 if (!target || !register_operand (target, mode))
7507 target = gen_reg_rtx (mode);
7508 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7509 val = convert_modes (mode, GET_MODE (val), val, 1);
7510 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7511 return target;
7512 }
7513
7514 return NULL_RTX;
7515 }
7516
7517 #include "gt-optabs.h"