Change vec_perm checking and expansion level.
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "target.h"
46
47 struct target_optabs default_target_optabs;
48 struct target_libfuncs default_target_libfuncs;
49 #if SWITCHABLE_TARGET
50 struct target_optabs *this_target_optabs = &default_target_optabs;
51 struct target_libfuncs *this_target_libfuncs = &default_target_libfuncs;
52 #endif
53
54 #define libfunc_hash \
55 (this_target_libfuncs->x_libfunc_hash)
56
57 /* Contains the optab used for each rtx code. */
58 optab code_to_optab[NUM_RTX_CODE + 1];
59
60 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
61 enum machine_mode *);
62 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
63
64 /* Debug facility for use in GDB. */
65 void debug_optab_libfuncs (void);
66
67 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
68 #if ENABLE_DECIMAL_BID_FORMAT
69 #define DECIMAL_PREFIX "bid_"
70 #else
71 #define DECIMAL_PREFIX "dpd_"
72 #endif
73 \f
74 /* Used for libfunc_hash. */
75
76 static hashval_t
77 hash_libfunc (const void *p)
78 {
79 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
80
81 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
82 ^ e->optab);
83 }
84
85 /* Used for libfunc_hash. */
86
87 static int
88 eq_libfunc (const void *p, const void *q)
89 {
90 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
91 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
92
93 return (e1->optab == e2->optab
94 && e1->mode1 == e2->mode1
95 && e1->mode2 == e2->mode2);
96 }
97
98 /* Return libfunc corresponding operation defined by OPTAB converting
99 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
100 if no libfunc is available. */
101 rtx
102 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
103 enum machine_mode mode2)
104 {
105 struct libfunc_entry e;
106 struct libfunc_entry **slot;
107
108 e.optab = (size_t) (optab - &convert_optab_table[0]);
109 e.mode1 = mode1;
110 e.mode2 = mode2;
111 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
112 if (!slot)
113 {
114 if (optab->libcall_gen)
115 {
116 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
117 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
118 if (slot)
119 return (*slot)->libfunc;
120 else
121 return NULL;
122 }
123 return NULL;
124 }
125 return (*slot)->libfunc;
126 }
127
128 /* Return libfunc corresponding operation defined by OPTAB in MODE.
129 Trigger lazy initialization if needed, return NULL if no libfunc is
130 available. */
131 rtx
132 optab_libfunc (optab optab, enum machine_mode mode)
133 {
134 struct libfunc_entry e;
135 struct libfunc_entry **slot;
136
137 e.optab = (size_t) (optab - &optab_table[0]);
138 e.mode1 = mode;
139 e.mode2 = VOIDmode;
140 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
141 if (!slot)
142 {
143 if (optab->libcall_gen)
144 {
145 optab->libcall_gen (optab, optab->libcall_basename,
146 optab->libcall_suffix, mode);
147 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
148 &e, NO_INSERT);
149 if (slot)
150 return (*slot)->libfunc;
151 else
152 return NULL;
153 }
154 return NULL;
155 }
156 return (*slot)->libfunc;
157 }
158
159 \f
160 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
161 the result of operation CODE applied to OP0 (and OP1 if it is a binary
162 operation).
163
164 If the last insn does not set TARGET, don't do anything, but return 1.
165
166 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
167 don't add the REG_EQUAL note but return 0. Our caller can then try
168 again, ensuring that TARGET is not one of the operands. */
169
170 static int
171 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
172 {
173 rtx last_insn, insn, set;
174 rtx note;
175
176 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
177
178 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
179 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
180 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
181 && GET_RTX_CLASS (code) != RTX_COMPARE
182 && GET_RTX_CLASS (code) != RTX_UNARY)
183 return 1;
184
185 if (GET_CODE (target) == ZERO_EXTRACT)
186 return 1;
187
188 for (last_insn = insns;
189 NEXT_INSN (last_insn) != NULL_RTX;
190 last_insn = NEXT_INSN (last_insn))
191 ;
192
193 set = single_set (last_insn);
194 if (set == NULL_RTX)
195 return 1;
196
197 if (! rtx_equal_p (SET_DEST (set), target)
198 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
199 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
200 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
201 return 1;
202
203 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
204 besides the last insn. */
205 if (reg_overlap_mentioned_p (target, op0)
206 || (op1 && reg_overlap_mentioned_p (target, op1)))
207 {
208 insn = PREV_INSN (last_insn);
209 while (insn != NULL_RTX)
210 {
211 if (reg_set_p (target, insn))
212 return 0;
213
214 insn = PREV_INSN (insn);
215 }
216 }
217
218 if (GET_RTX_CLASS (code) == RTX_UNARY)
219 switch (code)
220 {
221 case FFS:
222 case CLZ:
223 case CTZ:
224 case CLRSB:
225 case POPCOUNT:
226 case PARITY:
227 case BSWAP:
228 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
229 {
230 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
231 if (GET_MODE_SIZE (GET_MODE (op0))
232 > GET_MODE_SIZE (GET_MODE (target)))
233 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
234 note, GET_MODE (op0));
235 else
236 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
237 note, GET_MODE (op0));
238 break;
239 }
240 /* FALLTHRU */
241 default:
242 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
243 break;
244 }
245 else
246 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
247
248 set_unique_reg_note (last_insn, REG_EQUAL, note);
249
250 return 1;
251 }
252 \f
253 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
254 for a widening operation would be. In most cases this would be OP0, but if
255 that's a constant it'll be VOIDmode, which isn't useful. */
256
257 static enum machine_mode
258 widened_mode (enum machine_mode to_mode, rtx op0, rtx op1)
259 {
260 enum machine_mode m0 = GET_MODE (op0);
261 enum machine_mode m1 = GET_MODE (op1);
262 enum machine_mode result;
263
264 if (m0 == VOIDmode && m1 == VOIDmode)
265 return to_mode;
266 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
267 result = m1;
268 else
269 result = m0;
270
271 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
272 return to_mode;
273
274 return result;
275 }
276 \f
277 /* Find a widening optab even if it doesn't widen as much as we want.
278 E.g. if from_mode is HImode, and to_mode is DImode, and there is no
279 direct HI->SI insn, then return SI->DI, if that exists.
280 If PERMIT_NON_WIDENING is non-zero then this can be used with
281 non-widening optabs also. */
282
283 enum insn_code
284 find_widening_optab_handler_and_mode (optab op, enum machine_mode to_mode,
285 enum machine_mode from_mode,
286 int permit_non_widening,
287 enum machine_mode *found_mode)
288 {
289 for (; (permit_non_widening || from_mode != to_mode)
290 && GET_MODE_SIZE (from_mode) <= GET_MODE_SIZE (to_mode)
291 && from_mode != VOIDmode;
292 from_mode = GET_MODE_WIDER_MODE (from_mode))
293 {
294 enum insn_code handler = widening_optab_handler (op, to_mode,
295 from_mode);
296
297 if (handler != CODE_FOR_nothing)
298 {
299 if (found_mode)
300 *found_mode = from_mode;
301 return handler;
302 }
303 }
304
305 return CODE_FOR_nothing;
306 }
307 \f
308 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
309 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
310 not actually do a sign-extend or zero-extend, but can leave the
311 higher-order bits of the result rtx undefined, for example, in the case
312 of logical operations, but not right shifts. */
313
314 static rtx
315 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
316 int unsignedp, int no_extend)
317 {
318 rtx result;
319
320 /* If we don't have to extend and this is a constant, return it. */
321 if (no_extend && GET_MODE (op) == VOIDmode)
322 return op;
323
324 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
325 extend since it will be more efficient to do so unless the signedness of
326 a promoted object differs from our extension. */
327 if (! no_extend
328 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
329 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
330 return convert_modes (mode, oldmode, op, unsignedp);
331
332 /* If MODE is no wider than a single word, we return a paradoxical
333 SUBREG. */
334 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
335 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
336
337 /* Otherwise, get an object of MODE, clobber it, and set the low-order
338 part to OP. */
339
340 result = gen_reg_rtx (mode);
341 emit_clobber (result);
342 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
343 return result;
344 }
345 \f
346 /* Return the optab used for computing the operation given by the tree code,
347 CODE and the tree EXP. This function is not always usable (for example, it
348 cannot give complete results for multiplication or division) but probably
349 ought to be relied on more widely throughout the expander. */
350 optab
351 optab_for_tree_code (enum tree_code code, const_tree type,
352 enum optab_subtype subtype)
353 {
354 bool trapv;
355 switch (code)
356 {
357 case BIT_AND_EXPR:
358 return and_optab;
359
360 case BIT_IOR_EXPR:
361 return ior_optab;
362
363 case BIT_NOT_EXPR:
364 return one_cmpl_optab;
365
366 case BIT_XOR_EXPR:
367 return xor_optab;
368
369 case TRUNC_MOD_EXPR:
370 case CEIL_MOD_EXPR:
371 case FLOOR_MOD_EXPR:
372 case ROUND_MOD_EXPR:
373 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
374
375 case RDIV_EXPR:
376 case TRUNC_DIV_EXPR:
377 case CEIL_DIV_EXPR:
378 case FLOOR_DIV_EXPR:
379 case ROUND_DIV_EXPR:
380 case EXACT_DIV_EXPR:
381 if (TYPE_SATURATING(type))
382 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
383 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
384
385 case LSHIFT_EXPR:
386 if (TREE_CODE (type) == VECTOR_TYPE)
387 {
388 if (subtype == optab_vector)
389 return TYPE_SATURATING (type) ? NULL : vashl_optab;
390
391 gcc_assert (subtype == optab_scalar);
392 }
393 if (TYPE_SATURATING(type))
394 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
395 return ashl_optab;
396
397 case RSHIFT_EXPR:
398 if (TREE_CODE (type) == VECTOR_TYPE)
399 {
400 if (subtype == optab_vector)
401 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
402
403 gcc_assert (subtype == optab_scalar);
404 }
405 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
406
407 case LROTATE_EXPR:
408 if (TREE_CODE (type) == VECTOR_TYPE)
409 {
410 if (subtype == optab_vector)
411 return vrotl_optab;
412
413 gcc_assert (subtype == optab_scalar);
414 }
415 return rotl_optab;
416
417 case RROTATE_EXPR:
418 if (TREE_CODE (type) == VECTOR_TYPE)
419 {
420 if (subtype == optab_vector)
421 return vrotr_optab;
422
423 gcc_assert (subtype == optab_scalar);
424 }
425 return rotr_optab;
426
427 case MAX_EXPR:
428 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
429
430 case MIN_EXPR:
431 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
432
433 case REALIGN_LOAD_EXPR:
434 return vec_realign_load_optab;
435
436 case WIDEN_SUM_EXPR:
437 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
438
439 case DOT_PROD_EXPR:
440 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
441
442 case WIDEN_MULT_PLUS_EXPR:
443 return (TYPE_UNSIGNED (type)
444 ? (TYPE_SATURATING (type)
445 ? usmadd_widen_optab : umadd_widen_optab)
446 : (TYPE_SATURATING (type)
447 ? ssmadd_widen_optab : smadd_widen_optab));
448
449 case WIDEN_MULT_MINUS_EXPR:
450 return (TYPE_UNSIGNED (type)
451 ? (TYPE_SATURATING (type)
452 ? usmsub_widen_optab : umsub_widen_optab)
453 : (TYPE_SATURATING (type)
454 ? ssmsub_widen_optab : smsub_widen_optab));
455
456 case FMA_EXPR:
457 return fma_optab;
458
459 case REDUC_MAX_EXPR:
460 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
461
462 case REDUC_MIN_EXPR:
463 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
464
465 case REDUC_PLUS_EXPR:
466 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
467
468 case VEC_LSHIFT_EXPR:
469 return vec_shl_optab;
470
471 case VEC_RSHIFT_EXPR:
472 return vec_shr_optab;
473
474 case VEC_WIDEN_MULT_HI_EXPR:
475 return TYPE_UNSIGNED (type) ?
476 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
477
478 case VEC_WIDEN_MULT_LO_EXPR:
479 return TYPE_UNSIGNED (type) ?
480 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
481
482 case VEC_WIDEN_LSHIFT_HI_EXPR:
483 return TYPE_UNSIGNED (type) ?
484 vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab;
485
486 case VEC_WIDEN_LSHIFT_LO_EXPR:
487 return TYPE_UNSIGNED (type) ?
488 vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab;
489
490 case VEC_UNPACK_HI_EXPR:
491 return TYPE_UNSIGNED (type) ?
492 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
493
494 case VEC_UNPACK_LO_EXPR:
495 return TYPE_UNSIGNED (type) ?
496 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
497
498 case VEC_UNPACK_FLOAT_HI_EXPR:
499 /* The signedness is determined from input operand. */
500 return TYPE_UNSIGNED (type) ?
501 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
502
503 case VEC_UNPACK_FLOAT_LO_EXPR:
504 /* The signedness is determined from input operand. */
505 return TYPE_UNSIGNED (type) ?
506 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
507
508 case VEC_PACK_TRUNC_EXPR:
509 return vec_pack_trunc_optab;
510
511 case VEC_PACK_SAT_EXPR:
512 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
513
514 case VEC_PACK_FIX_TRUNC_EXPR:
515 /* The signedness is determined from output operand. */
516 return TYPE_UNSIGNED (type) ?
517 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
518
519 default:
520 break;
521 }
522
523 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
524 switch (code)
525 {
526 case POINTER_PLUS_EXPR:
527 case PLUS_EXPR:
528 if (TYPE_SATURATING(type))
529 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
530 return trapv ? addv_optab : add_optab;
531
532 case MINUS_EXPR:
533 if (TYPE_SATURATING(type))
534 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
535 return trapv ? subv_optab : sub_optab;
536
537 case MULT_EXPR:
538 if (TYPE_SATURATING(type))
539 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
540 return trapv ? smulv_optab : smul_optab;
541
542 case NEGATE_EXPR:
543 if (TYPE_SATURATING(type))
544 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
545 return trapv ? negv_optab : neg_optab;
546
547 case ABS_EXPR:
548 return trapv ? absv_optab : abs_optab;
549
550 case VEC_EXTRACT_EVEN_EXPR:
551 return vec_extract_even_optab;
552
553 case VEC_EXTRACT_ODD_EXPR:
554 return vec_extract_odd_optab;
555
556 case VEC_INTERLEAVE_HIGH_EXPR:
557 return vec_interleave_high_optab;
558
559 case VEC_INTERLEAVE_LOW_EXPR:
560 return vec_interleave_low_optab;
561
562 default:
563 return NULL;
564 }
565 }
566 \f
567
568 /* Expand vector widening operations.
569
570 There are two different classes of operations handled here:
571 1) Operations whose result is wider than all the arguments to the operation.
572 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
573 In this case OP0 and optionally OP1 would be initialized,
574 but WIDE_OP wouldn't (not relevant for this case).
575 2) Operations whose result is of the same size as the last argument to the
576 operation, but wider than all the other arguments to the operation.
577 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
578 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
579
580 E.g, when called to expand the following operations, this is how
581 the arguments will be initialized:
582 nops OP0 OP1 WIDE_OP
583 widening-sum 2 oprnd0 - oprnd1
584 widening-dot-product 3 oprnd0 oprnd1 oprnd2
585 widening-mult 2 oprnd0 oprnd1 -
586 type-promotion (vec-unpack) 1 oprnd0 - - */
587
588 rtx
589 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
590 rtx target, int unsignedp)
591 {
592 struct expand_operand eops[4];
593 tree oprnd0, oprnd1, oprnd2;
594 enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
595 optab widen_pattern_optab;
596 enum insn_code icode;
597 int nops = TREE_CODE_LENGTH (ops->code);
598 int op;
599
600 oprnd0 = ops->op0;
601 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
602 widen_pattern_optab =
603 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
604 if (ops->code == WIDEN_MULT_PLUS_EXPR
605 || ops->code == WIDEN_MULT_MINUS_EXPR)
606 icode = find_widening_optab_handler (widen_pattern_optab,
607 TYPE_MODE (TREE_TYPE (ops->op2)),
608 tmode0, 0);
609 else
610 icode = optab_handler (widen_pattern_optab, tmode0);
611 gcc_assert (icode != CODE_FOR_nothing);
612
613 if (nops >= 2)
614 {
615 oprnd1 = ops->op1;
616 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
617 }
618
619 /* The last operand is of a wider mode than the rest of the operands. */
620 if (nops == 2)
621 wmode = tmode1;
622 else if (nops == 3)
623 {
624 gcc_assert (tmode1 == tmode0);
625 gcc_assert (op1);
626 oprnd2 = ops->op2;
627 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
628 }
629
630 op = 0;
631 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
632 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
633 if (op1)
634 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
635 if (wide_op)
636 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
637 expand_insn (icode, op, eops);
638 return eops[0].value;
639 }
640
641 /* Generate code to perform an operation specified by TERNARY_OPTAB
642 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
643
644 UNSIGNEDP is for the case where we have to widen the operands
645 to perform the operation. It says to use zero-extension.
646
647 If TARGET is nonzero, the value
648 is generated there, if it is convenient to do so.
649 In all cases an rtx is returned for the locus of the value;
650 this may or may not be TARGET. */
651
652 rtx
653 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
654 rtx op1, rtx op2, rtx target, int unsignedp)
655 {
656 struct expand_operand ops[4];
657 enum insn_code icode = optab_handler (ternary_optab, mode);
658
659 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
660
661 create_output_operand (&ops[0], target, mode);
662 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
663 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
664 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
665 expand_insn (icode, 4, ops);
666 return ops[0].value;
667 }
668
669
670 /* Like expand_binop, but return a constant rtx if the result can be
671 calculated at compile time. The arguments and return value are
672 otherwise the same as for expand_binop. */
673
674 static rtx
675 simplify_expand_binop (enum machine_mode mode, optab binoptab,
676 rtx op0, rtx op1, rtx target, int unsignedp,
677 enum optab_methods methods)
678 {
679 if (CONSTANT_P (op0) && CONSTANT_P (op1))
680 {
681 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
682
683 if (x)
684 return x;
685 }
686
687 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
688 }
689
690 /* Like simplify_expand_binop, but always put the result in TARGET.
691 Return true if the expansion succeeded. */
692
693 bool
694 force_expand_binop (enum machine_mode mode, optab binoptab,
695 rtx op0, rtx op1, rtx target, int unsignedp,
696 enum optab_methods methods)
697 {
698 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
699 target, unsignedp, methods);
700 if (x == 0)
701 return false;
702 if (x != target)
703 emit_move_insn (target, x);
704 return true;
705 }
706
707 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
708
709 rtx
710 expand_vec_shift_expr (sepops ops, rtx target)
711 {
712 struct expand_operand eops[3];
713 enum insn_code icode;
714 rtx rtx_op1, rtx_op2;
715 enum machine_mode mode = TYPE_MODE (ops->type);
716 tree vec_oprnd = ops->op0;
717 tree shift_oprnd = ops->op1;
718 optab shift_optab;
719
720 switch (ops->code)
721 {
722 case VEC_RSHIFT_EXPR:
723 shift_optab = vec_shr_optab;
724 break;
725 case VEC_LSHIFT_EXPR:
726 shift_optab = vec_shl_optab;
727 break;
728 default:
729 gcc_unreachable ();
730 }
731
732 icode = optab_handler (shift_optab, mode);
733 gcc_assert (icode != CODE_FOR_nothing);
734
735 rtx_op1 = expand_normal (vec_oprnd);
736 rtx_op2 = expand_normal (shift_oprnd);
737
738 create_output_operand (&eops[0], target, mode);
739 create_input_operand (&eops[1], rtx_op1, GET_MODE (rtx_op1));
740 create_convert_operand_from_type (&eops[2], rtx_op2, TREE_TYPE (shift_oprnd));
741 expand_insn (icode, 3, eops);
742
743 return eops[0].value;
744 }
745
746 /* Create a new vector value in VMODE with all elements set to OP. The
747 mode of OP must be the element mode of VMODE. If OP is a constant,
748 then the return value will be a constant. */
749
750 static rtx
751 expand_vector_broadcast (enum machine_mode vmode, rtx op)
752 {
753 enum insn_code icode;
754 rtvec vec;
755 rtx ret;
756 int i, n;
757
758 gcc_checking_assert (VECTOR_MODE_P (vmode));
759
760 n = GET_MODE_NUNITS (vmode);
761 vec = rtvec_alloc (n);
762 for (i = 0; i < n; ++i)
763 RTVEC_ELT (vec, i) = op;
764
765 if (CONSTANT_P (op))
766 return gen_rtx_CONST_VECTOR (vmode, vec);
767
768 /* ??? If the target doesn't have a vec_init, then we have no easy way
769 of performing this operation. Most of this sort of generic support
770 is hidden away in the vector lowering support in gimple. */
771 icode = optab_handler (vec_init_optab, vmode);
772 if (icode == CODE_FOR_nothing)
773 return NULL;
774
775 ret = gen_reg_rtx (vmode);
776 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
777
778 return ret;
779 }
780
781 /* This subroutine of expand_doubleword_shift handles the cases in which
782 the effective shift value is >= BITS_PER_WORD. The arguments and return
783 value are the same as for the parent routine, except that SUPERWORD_OP1
784 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
785 INTO_TARGET may be null if the caller has decided to calculate it. */
786
787 static bool
788 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
789 rtx outof_target, rtx into_target,
790 int unsignedp, enum optab_methods methods)
791 {
792 if (into_target != 0)
793 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
794 into_target, unsignedp, methods))
795 return false;
796
797 if (outof_target != 0)
798 {
799 /* For a signed right shift, we must fill OUTOF_TARGET with copies
800 of the sign bit, otherwise we must fill it with zeros. */
801 if (binoptab != ashr_optab)
802 emit_move_insn (outof_target, CONST0_RTX (word_mode));
803 else
804 if (!force_expand_binop (word_mode, binoptab,
805 outof_input, GEN_INT (BITS_PER_WORD - 1),
806 outof_target, unsignedp, methods))
807 return false;
808 }
809 return true;
810 }
811
812 /* This subroutine of expand_doubleword_shift handles the cases in which
813 the effective shift value is < BITS_PER_WORD. The arguments and return
814 value are the same as for the parent routine. */
815
816 static bool
817 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
818 rtx outof_input, rtx into_input, rtx op1,
819 rtx outof_target, rtx into_target,
820 int unsignedp, enum optab_methods methods,
821 unsigned HOST_WIDE_INT shift_mask)
822 {
823 optab reverse_unsigned_shift, unsigned_shift;
824 rtx tmp, carries;
825
826 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
827 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
828
829 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
830 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
831 the opposite direction to BINOPTAB. */
832 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
833 {
834 carries = outof_input;
835 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
836 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
837 0, true, methods);
838 }
839 else
840 {
841 /* We must avoid shifting by BITS_PER_WORD bits since that is either
842 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
843 has unknown behavior. Do a single shift first, then shift by the
844 remainder. It's OK to use ~OP1 as the remainder if shift counts
845 are truncated to the mode size. */
846 carries = expand_binop (word_mode, reverse_unsigned_shift,
847 outof_input, const1_rtx, 0, unsignedp, methods);
848 if (shift_mask == BITS_PER_WORD - 1)
849 {
850 tmp = immed_double_const (-1, -1, op1_mode);
851 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
852 0, true, methods);
853 }
854 else
855 {
856 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
857 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
858 0, true, methods);
859 }
860 }
861 if (tmp == 0 || carries == 0)
862 return false;
863 carries = expand_binop (word_mode, reverse_unsigned_shift,
864 carries, tmp, 0, unsignedp, methods);
865 if (carries == 0)
866 return false;
867
868 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
869 so the result can go directly into INTO_TARGET if convenient. */
870 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
871 into_target, unsignedp, methods);
872 if (tmp == 0)
873 return false;
874
875 /* Now OR in the bits carried over from OUTOF_INPUT. */
876 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
877 into_target, unsignedp, methods))
878 return false;
879
880 /* Use a standard word_mode shift for the out-of half. */
881 if (outof_target != 0)
882 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
883 outof_target, unsignedp, methods))
884 return false;
885
886 return true;
887 }
888
889
890 #ifdef HAVE_conditional_move
891 /* Try implementing expand_doubleword_shift using conditional moves.
892 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
893 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
894 are the shift counts to use in the former and latter case. All other
895 arguments are the same as the parent routine. */
896
897 static bool
898 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
899 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
900 rtx outof_input, rtx into_input,
901 rtx subword_op1, rtx superword_op1,
902 rtx outof_target, rtx into_target,
903 int unsignedp, enum optab_methods methods,
904 unsigned HOST_WIDE_INT shift_mask)
905 {
906 rtx outof_superword, into_superword;
907
908 /* Put the superword version of the output into OUTOF_SUPERWORD and
909 INTO_SUPERWORD. */
910 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
911 if (outof_target != 0 && subword_op1 == superword_op1)
912 {
913 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
914 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
915 into_superword = outof_target;
916 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
917 outof_superword, 0, unsignedp, methods))
918 return false;
919 }
920 else
921 {
922 into_superword = gen_reg_rtx (word_mode);
923 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
924 outof_superword, into_superword,
925 unsignedp, methods))
926 return false;
927 }
928
929 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
930 if (!expand_subword_shift (op1_mode, binoptab,
931 outof_input, into_input, subword_op1,
932 outof_target, into_target,
933 unsignedp, methods, shift_mask))
934 return false;
935
936 /* Select between them. Do the INTO half first because INTO_SUPERWORD
937 might be the current value of OUTOF_TARGET. */
938 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
939 into_target, into_superword, word_mode, false))
940 return false;
941
942 if (outof_target != 0)
943 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
944 outof_target, outof_superword,
945 word_mode, false))
946 return false;
947
948 return true;
949 }
950 #endif
951
952 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
953 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
954 input operand; the shift moves bits in the direction OUTOF_INPUT->
955 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
956 of the target. OP1 is the shift count and OP1_MODE is its mode.
957 If OP1 is constant, it will have been truncated as appropriate
958 and is known to be nonzero.
959
960 If SHIFT_MASK is zero, the result of word shifts is undefined when the
961 shift count is outside the range [0, BITS_PER_WORD). This routine must
962 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
963
964 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
965 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
966 fill with zeros or sign bits as appropriate.
967
968 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
969 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
970 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
971 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
972 are undefined.
973
974 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
975 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
976 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
977 function wants to calculate it itself.
978
979 Return true if the shift could be successfully synthesized. */
980
981 static bool
982 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
983 rtx outof_input, rtx into_input, rtx op1,
984 rtx outof_target, rtx into_target,
985 int unsignedp, enum optab_methods methods,
986 unsigned HOST_WIDE_INT shift_mask)
987 {
988 rtx superword_op1, tmp, cmp1, cmp2;
989 rtx subword_label, done_label;
990 enum rtx_code cmp_code;
991
992 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
993 fill the result with sign or zero bits as appropriate. If so, the value
994 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
995 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
996 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
997
998 This isn't worthwhile for constant shifts since the optimizers will
999 cope better with in-range shift counts. */
1000 if (shift_mask >= BITS_PER_WORD
1001 && outof_target != 0
1002 && !CONSTANT_P (op1))
1003 {
1004 if (!expand_doubleword_shift (op1_mode, binoptab,
1005 outof_input, into_input, op1,
1006 0, into_target,
1007 unsignedp, methods, shift_mask))
1008 return false;
1009 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1010 outof_target, unsignedp, methods))
1011 return false;
1012 return true;
1013 }
1014
1015 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1016 is true when the effective shift value is less than BITS_PER_WORD.
1017 Set SUPERWORD_OP1 to the shift count that should be used to shift
1018 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1019 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1020 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1021 {
1022 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1023 is a subword shift count. */
1024 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1025 0, true, methods);
1026 cmp2 = CONST0_RTX (op1_mode);
1027 cmp_code = EQ;
1028 superword_op1 = op1;
1029 }
1030 else
1031 {
1032 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1033 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1034 0, true, methods);
1035 cmp2 = CONST0_RTX (op1_mode);
1036 cmp_code = LT;
1037 superword_op1 = cmp1;
1038 }
1039 if (cmp1 == 0)
1040 return false;
1041
1042 /* If we can compute the condition at compile time, pick the
1043 appropriate subroutine. */
1044 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1045 if (tmp != 0 && CONST_INT_P (tmp))
1046 {
1047 if (tmp == const0_rtx)
1048 return expand_superword_shift (binoptab, outof_input, superword_op1,
1049 outof_target, into_target,
1050 unsignedp, methods);
1051 else
1052 return expand_subword_shift (op1_mode, binoptab,
1053 outof_input, into_input, op1,
1054 outof_target, into_target,
1055 unsignedp, methods, shift_mask);
1056 }
1057
1058 #ifdef HAVE_conditional_move
1059 /* Try using conditional moves to generate straight-line code. */
1060 {
1061 rtx start = get_last_insn ();
1062 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1063 cmp_code, cmp1, cmp2,
1064 outof_input, into_input,
1065 op1, superword_op1,
1066 outof_target, into_target,
1067 unsignedp, methods, shift_mask))
1068 return true;
1069 delete_insns_since (start);
1070 }
1071 #endif
1072
1073 /* As a last resort, use branches to select the correct alternative. */
1074 subword_label = gen_label_rtx ();
1075 done_label = gen_label_rtx ();
1076
1077 NO_DEFER_POP;
1078 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1079 0, 0, subword_label, -1);
1080 OK_DEFER_POP;
1081
1082 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1083 outof_target, into_target,
1084 unsignedp, methods))
1085 return false;
1086
1087 emit_jump_insn (gen_jump (done_label));
1088 emit_barrier ();
1089 emit_label (subword_label);
1090
1091 if (!expand_subword_shift (op1_mode, binoptab,
1092 outof_input, into_input, op1,
1093 outof_target, into_target,
1094 unsignedp, methods, shift_mask))
1095 return false;
1096
1097 emit_label (done_label);
1098 return true;
1099 }
1100 \f
1101 /* Subroutine of expand_binop. Perform a double word multiplication of
1102 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1103 as the target's word_mode. This function return NULL_RTX if anything
1104 goes wrong, in which case it may have already emitted instructions
1105 which need to be deleted.
1106
1107 If we want to multiply two two-word values and have normal and widening
1108 multiplies of single-word values, we can do this with three smaller
1109 multiplications.
1110
1111 The multiplication proceeds as follows:
1112 _______________________
1113 [__op0_high_|__op0_low__]
1114 _______________________
1115 * [__op1_high_|__op1_low__]
1116 _______________________________________________
1117 _______________________
1118 (1) [__op0_low__*__op1_low__]
1119 _______________________
1120 (2a) [__op0_low__*__op1_high_]
1121 _______________________
1122 (2b) [__op0_high_*__op1_low__]
1123 _______________________
1124 (3) [__op0_high_*__op1_high_]
1125
1126
1127 This gives a 4-word result. Since we are only interested in the
1128 lower 2 words, partial result (3) and the upper words of (2a) and
1129 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1130 calculated using non-widening multiplication.
1131
1132 (1), however, needs to be calculated with an unsigned widening
1133 multiplication. If this operation is not directly supported we
1134 try using a signed widening multiplication and adjust the result.
1135 This adjustment works as follows:
1136
1137 If both operands are positive then no adjustment is needed.
1138
1139 If the operands have different signs, for example op0_low < 0 and
1140 op1_low >= 0, the instruction treats the most significant bit of
1141 op0_low as a sign bit instead of a bit with significance
1142 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1143 with 2**BITS_PER_WORD - op0_low, and two's complements the
1144 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1145 the result.
1146
1147 Similarly, if both operands are negative, we need to add
1148 (op0_low + op1_low) * 2**BITS_PER_WORD.
1149
1150 We use a trick to adjust quickly. We logically shift op0_low right
1151 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1152 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1153 logical shift exists, we do an arithmetic right shift and subtract
1154 the 0 or -1. */
1155
1156 static rtx
1157 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1158 bool umulp, enum optab_methods methods)
1159 {
1160 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1161 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1162 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1163 rtx product, adjust, product_high, temp;
1164
1165 rtx op0_high = operand_subword_force (op0, high, mode);
1166 rtx op0_low = operand_subword_force (op0, low, mode);
1167 rtx op1_high = operand_subword_force (op1, high, mode);
1168 rtx op1_low = operand_subword_force (op1, low, mode);
1169
1170 /* If we're using an unsigned multiply to directly compute the product
1171 of the low-order words of the operands and perform any required
1172 adjustments of the operands, we begin by trying two more multiplications
1173 and then computing the appropriate sum.
1174
1175 We have checked above that the required addition is provided.
1176 Full-word addition will normally always succeed, especially if
1177 it is provided at all, so we don't worry about its failure. The
1178 multiplication may well fail, however, so we do handle that. */
1179
1180 if (!umulp)
1181 {
1182 /* ??? This could be done with emit_store_flag where available. */
1183 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1184 NULL_RTX, 1, methods);
1185 if (temp)
1186 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1187 NULL_RTX, 0, OPTAB_DIRECT);
1188 else
1189 {
1190 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1191 NULL_RTX, 0, methods);
1192 if (!temp)
1193 return NULL_RTX;
1194 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1195 NULL_RTX, 0, OPTAB_DIRECT);
1196 }
1197
1198 if (!op0_high)
1199 return NULL_RTX;
1200 }
1201
1202 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1203 NULL_RTX, 0, OPTAB_DIRECT);
1204 if (!adjust)
1205 return NULL_RTX;
1206
1207 /* OP0_HIGH should now be dead. */
1208
1209 if (!umulp)
1210 {
1211 /* ??? This could be done with emit_store_flag where available. */
1212 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1213 NULL_RTX, 1, methods);
1214 if (temp)
1215 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1216 NULL_RTX, 0, OPTAB_DIRECT);
1217 else
1218 {
1219 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1220 NULL_RTX, 0, methods);
1221 if (!temp)
1222 return NULL_RTX;
1223 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1224 NULL_RTX, 0, OPTAB_DIRECT);
1225 }
1226
1227 if (!op1_high)
1228 return NULL_RTX;
1229 }
1230
1231 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1232 NULL_RTX, 0, OPTAB_DIRECT);
1233 if (!temp)
1234 return NULL_RTX;
1235
1236 /* OP1_HIGH should now be dead. */
1237
1238 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1239 NULL_RTX, 0, OPTAB_DIRECT);
1240
1241 if (target && !REG_P (target))
1242 target = NULL_RTX;
1243
1244 if (umulp)
1245 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1246 target, 1, OPTAB_DIRECT);
1247 else
1248 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1249 target, 1, OPTAB_DIRECT);
1250
1251 if (!product)
1252 return NULL_RTX;
1253
1254 product_high = operand_subword (product, high, 1, mode);
1255 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1256 NULL_RTX, 0, OPTAB_DIRECT);
1257 emit_move_insn (product_high, adjust);
1258 return product;
1259 }
1260 \f
1261 /* Wrapper around expand_binop which takes an rtx code to specify
1262 the operation to perform, not an optab pointer. All other
1263 arguments are the same. */
1264 rtx
1265 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1266 rtx op1, rtx target, int unsignedp,
1267 enum optab_methods methods)
1268 {
1269 optab binop = code_to_optab[(int) code];
1270 gcc_assert (binop);
1271
1272 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1273 }
1274
1275 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1276 binop. Order them according to commutative_operand_precedence and, if
1277 possible, try to put TARGET or a pseudo first. */
1278 static bool
1279 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1280 {
1281 int op0_prec = commutative_operand_precedence (op0);
1282 int op1_prec = commutative_operand_precedence (op1);
1283
1284 if (op0_prec < op1_prec)
1285 return true;
1286
1287 if (op0_prec > op1_prec)
1288 return false;
1289
1290 /* With equal precedence, both orders are ok, but it is better if the
1291 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1292 if (target == 0 || REG_P (target))
1293 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1294 else
1295 return rtx_equal_p (op1, target);
1296 }
1297
1298 /* Return true if BINOPTAB implements a shift operation. */
1299
1300 static bool
1301 shift_optab_p (optab binoptab)
1302 {
1303 switch (binoptab->code)
1304 {
1305 case ASHIFT:
1306 case SS_ASHIFT:
1307 case US_ASHIFT:
1308 case ASHIFTRT:
1309 case LSHIFTRT:
1310 case ROTATE:
1311 case ROTATERT:
1312 return true;
1313
1314 default:
1315 return false;
1316 }
1317 }
1318
1319 /* Return true if BINOPTAB implements a commutative binary operation. */
1320
1321 static bool
1322 commutative_optab_p (optab binoptab)
1323 {
1324 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1325 || binoptab == smul_widen_optab
1326 || binoptab == umul_widen_optab
1327 || binoptab == smul_highpart_optab
1328 || binoptab == umul_highpart_optab);
1329 }
1330
1331 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1332 optimizing, and if the operand is a constant that costs more than
1333 1 instruction, force the constant into a register and return that
1334 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1335
1336 static rtx
1337 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1338 int opn, rtx x, bool unsignedp)
1339 {
1340 bool speed = optimize_insn_for_speed_p ();
1341
1342 if (mode != VOIDmode
1343 && optimize
1344 && CONSTANT_P (x)
1345 && rtx_cost (x, binoptab->code, opn, speed) > set_src_cost (x, speed))
1346 {
1347 if (CONST_INT_P (x))
1348 {
1349 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1350 if (intval != INTVAL (x))
1351 x = GEN_INT (intval);
1352 }
1353 else
1354 x = convert_modes (mode, VOIDmode, x, unsignedp);
1355 x = force_reg (mode, x);
1356 }
1357 return x;
1358 }
1359
1360 /* Helper function for expand_binop: handle the case where there
1361 is an insn that directly implements the indicated operation.
1362 Returns null if this is not possible. */
1363 static rtx
1364 expand_binop_directly (enum machine_mode mode, optab binoptab,
1365 rtx op0, rtx op1,
1366 rtx target, int unsignedp, enum optab_methods methods,
1367 rtx last)
1368 {
1369 enum machine_mode from_mode = widened_mode (mode, op0, op1);
1370 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
1371 from_mode, 1);
1372 enum machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1373 enum machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1374 enum machine_mode mode0, mode1, tmp_mode;
1375 struct expand_operand ops[3];
1376 bool commutative_p;
1377 rtx pat;
1378 rtx xop0 = op0, xop1 = op1;
1379 rtx swap;
1380
1381 /* If it is a commutative operator and the modes would match
1382 if we would swap the operands, we can save the conversions. */
1383 commutative_p = commutative_optab_p (binoptab);
1384 if (commutative_p
1385 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1386 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1387 {
1388 swap = xop0;
1389 xop0 = xop1;
1390 xop1 = swap;
1391 }
1392
1393 /* If we are optimizing, force expensive constants into a register. */
1394 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1395 if (!shift_optab_p (binoptab))
1396 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1397
1398 /* In case the insn wants input operands in modes different from
1399 those of the actual operands, convert the operands. It would
1400 seem that we don't need to convert CONST_INTs, but we do, so
1401 that they're properly zero-extended, sign-extended or truncated
1402 for their mode. */
1403
1404 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1405 if (xmode0 != VOIDmode && xmode0 != mode0)
1406 {
1407 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1408 mode0 = xmode0;
1409 }
1410
1411 mode1 = GET_MODE (xop1) != VOIDmode ? GET_MODE (xop1) : mode;
1412 if (xmode1 != VOIDmode && xmode1 != mode1)
1413 {
1414 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1415 mode1 = xmode1;
1416 }
1417
1418 /* If operation is commutative,
1419 try to make the first operand a register.
1420 Even better, try to make it the same as the target.
1421 Also try to make the last operand a constant. */
1422 if (commutative_p
1423 && swap_commutative_operands_with_target (target, xop0, xop1))
1424 {
1425 swap = xop1;
1426 xop1 = xop0;
1427 xop0 = swap;
1428 }
1429
1430 /* Now, if insn's predicates don't allow our operands, put them into
1431 pseudo regs. */
1432
1433 if (binoptab == vec_pack_trunc_optab
1434 || binoptab == vec_pack_usat_optab
1435 || binoptab == vec_pack_ssat_optab
1436 || binoptab == vec_pack_ufix_trunc_optab
1437 || binoptab == vec_pack_sfix_trunc_optab)
1438 {
1439 /* The mode of the result is different then the mode of the
1440 arguments. */
1441 tmp_mode = insn_data[(int) icode].operand[0].mode;
1442 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1443 {
1444 delete_insns_since (last);
1445 return NULL_RTX;
1446 }
1447 }
1448 else
1449 tmp_mode = mode;
1450
1451 create_output_operand (&ops[0], target, tmp_mode);
1452 create_input_operand (&ops[1], xop0, mode0);
1453 create_input_operand (&ops[2], xop1, mode1);
1454 pat = maybe_gen_insn (icode, 3, ops);
1455 if (pat)
1456 {
1457 /* If PAT is composed of more than one insn, try to add an appropriate
1458 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1459 operand, call expand_binop again, this time without a target. */
1460 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1461 && ! add_equal_note (pat, ops[0].value, binoptab->code,
1462 ops[1].value, ops[2].value))
1463 {
1464 delete_insns_since (last);
1465 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1466 unsignedp, methods);
1467 }
1468
1469 emit_insn (pat);
1470 return ops[0].value;
1471 }
1472 delete_insns_since (last);
1473 return NULL_RTX;
1474 }
1475
1476 /* Generate code to perform an operation specified by BINOPTAB
1477 on operands OP0 and OP1, with result having machine-mode MODE.
1478
1479 UNSIGNEDP is for the case where we have to widen the operands
1480 to perform the operation. It says to use zero-extension.
1481
1482 If TARGET is nonzero, the value
1483 is generated there, if it is convenient to do so.
1484 In all cases an rtx is returned for the locus of the value;
1485 this may or may not be TARGET. */
1486
1487 rtx
1488 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1489 rtx target, int unsignedp, enum optab_methods methods)
1490 {
1491 enum optab_methods next_methods
1492 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1493 ? OPTAB_WIDEN : methods);
1494 enum mode_class mclass;
1495 enum machine_mode wider_mode;
1496 rtx libfunc;
1497 rtx temp;
1498 rtx entry_last = get_last_insn ();
1499 rtx last;
1500
1501 mclass = GET_MODE_CLASS (mode);
1502
1503 /* If subtracting an integer constant, convert this into an addition of
1504 the negated constant. */
1505
1506 if (binoptab == sub_optab && CONST_INT_P (op1))
1507 {
1508 op1 = negate_rtx (mode, op1);
1509 binoptab = add_optab;
1510 }
1511
1512 /* Record where to delete back to if we backtrack. */
1513 last = get_last_insn ();
1514
1515 /* If we can do it with a three-operand insn, do so. */
1516
1517 if (methods != OPTAB_MUST_WIDEN
1518 && find_widening_optab_handler (binoptab, mode,
1519 widened_mode (mode, op0, op1), 1)
1520 != CODE_FOR_nothing)
1521 {
1522 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1523 unsignedp, methods, last);
1524 if (temp)
1525 return temp;
1526 }
1527
1528 /* If we were trying to rotate, and that didn't work, try rotating
1529 the other direction before falling back to shifts and bitwise-or. */
1530 if (((binoptab == rotl_optab
1531 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1532 || (binoptab == rotr_optab
1533 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1534 && mclass == MODE_INT)
1535 {
1536 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1537 rtx newop1;
1538 unsigned int bits = GET_MODE_PRECISION (mode);
1539
1540 if (CONST_INT_P (op1))
1541 newop1 = GEN_INT (bits - INTVAL (op1));
1542 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1543 newop1 = negate_rtx (GET_MODE (op1), op1);
1544 else
1545 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1546 GEN_INT (bits), op1,
1547 NULL_RTX, unsignedp, OPTAB_DIRECT);
1548
1549 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1550 target, unsignedp, methods, last);
1551 if (temp)
1552 return temp;
1553 }
1554
1555 /* If this is a multiply, see if we can do a widening operation that
1556 takes operands of this mode and makes a wider mode. */
1557
1558 if (binoptab == smul_optab
1559 && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1560 && (widening_optab_handler ((unsignedp ? umul_widen_optab
1561 : smul_widen_optab),
1562 GET_MODE_2XWIDER_MODE (mode), mode)
1563 != CODE_FOR_nothing))
1564 {
1565 temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1566 unsignedp ? umul_widen_optab : smul_widen_optab,
1567 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1568
1569 if (temp != 0)
1570 {
1571 if (GET_MODE_CLASS (mode) == MODE_INT
1572 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1573 return gen_lowpart (mode, temp);
1574 else
1575 return convert_to_mode (mode, temp, unsignedp);
1576 }
1577 }
1578
1579 /* If this is a vector shift by a scalar, see if we can do a vector
1580 shift by a vector. If so, broadcast the scalar into a vector. */
1581 if (mclass == MODE_VECTOR_INT)
1582 {
1583 optab otheroptab = NULL;
1584
1585 if (binoptab == ashl_optab)
1586 otheroptab = vashl_optab;
1587 else if (binoptab == ashr_optab)
1588 otheroptab = vashr_optab;
1589 else if (binoptab == lshr_optab)
1590 otheroptab = vlshr_optab;
1591 else if (binoptab == rotl_optab)
1592 otheroptab = vrotl_optab;
1593 else if (binoptab == rotr_optab)
1594 otheroptab = vrotr_optab;
1595
1596 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1597 {
1598 rtx vop1 = expand_vector_broadcast (mode, op1);
1599 if (vop1)
1600 {
1601 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1602 target, unsignedp, methods, last);
1603 if (temp)
1604 return temp;
1605 }
1606 }
1607 }
1608
1609 /* Look for a wider mode of the same class for which we think we
1610 can open-code the operation. Check for a widening multiply at the
1611 wider mode as well. */
1612
1613 if (CLASS_HAS_WIDER_MODES_P (mclass)
1614 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1615 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1616 wider_mode != VOIDmode;
1617 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1618 {
1619 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1620 || (binoptab == smul_optab
1621 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1622 && (find_widening_optab_handler ((unsignedp
1623 ? umul_widen_optab
1624 : smul_widen_optab),
1625 GET_MODE_WIDER_MODE (wider_mode),
1626 mode, 0)
1627 != CODE_FOR_nothing)))
1628 {
1629 rtx xop0 = op0, xop1 = op1;
1630 int no_extend = 0;
1631
1632 /* For certain integer operations, we need not actually extend
1633 the narrow operands, as long as we will truncate
1634 the results to the same narrowness. */
1635
1636 if ((binoptab == ior_optab || binoptab == and_optab
1637 || binoptab == xor_optab
1638 || binoptab == add_optab || binoptab == sub_optab
1639 || binoptab == smul_optab || binoptab == ashl_optab)
1640 && mclass == MODE_INT)
1641 {
1642 no_extend = 1;
1643 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1644 xop0, unsignedp);
1645 if (binoptab != ashl_optab)
1646 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1647 xop1, unsignedp);
1648 }
1649
1650 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1651
1652 /* The second operand of a shift must always be extended. */
1653 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1654 no_extend && binoptab != ashl_optab);
1655
1656 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1657 unsignedp, OPTAB_DIRECT);
1658 if (temp)
1659 {
1660 if (mclass != MODE_INT
1661 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1662 {
1663 if (target == 0)
1664 target = gen_reg_rtx (mode);
1665 convert_move (target, temp, 0);
1666 return target;
1667 }
1668 else
1669 return gen_lowpart (mode, temp);
1670 }
1671 else
1672 delete_insns_since (last);
1673 }
1674 }
1675
1676 /* If operation is commutative,
1677 try to make the first operand a register.
1678 Even better, try to make it the same as the target.
1679 Also try to make the last operand a constant. */
1680 if (commutative_optab_p (binoptab)
1681 && swap_commutative_operands_with_target (target, op0, op1))
1682 {
1683 temp = op1;
1684 op1 = op0;
1685 op0 = temp;
1686 }
1687
1688 /* These can be done a word at a time. */
1689 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1690 && mclass == MODE_INT
1691 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1692 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1693 {
1694 int i;
1695 rtx insns;
1696
1697 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1698 won't be accurate, so use a new target. */
1699 if (target == 0
1700 || target == op0
1701 || target == op1
1702 || !valid_multiword_target_p (target))
1703 target = gen_reg_rtx (mode);
1704
1705 start_sequence ();
1706
1707 /* Do the actual arithmetic. */
1708 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1709 {
1710 rtx target_piece = operand_subword (target, i, 1, mode);
1711 rtx x = expand_binop (word_mode, binoptab,
1712 operand_subword_force (op0, i, mode),
1713 operand_subword_force (op1, i, mode),
1714 target_piece, unsignedp, next_methods);
1715
1716 if (x == 0)
1717 break;
1718
1719 if (target_piece != x)
1720 emit_move_insn (target_piece, x);
1721 }
1722
1723 insns = get_insns ();
1724 end_sequence ();
1725
1726 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1727 {
1728 emit_insn (insns);
1729 return target;
1730 }
1731 }
1732
1733 /* Synthesize double word shifts from single word shifts. */
1734 if ((binoptab == lshr_optab || binoptab == ashl_optab
1735 || binoptab == ashr_optab)
1736 && mclass == MODE_INT
1737 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1738 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1739 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1740 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1741 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1742 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1743 {
1744 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1745 enum machine_mode op1_mode;
1746
1747 double_shift_mask = targetm.shift_truncation_mask (mode);
1748 shift_mask = targetm.shift_truncation_mask (word_mode);
1749 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1750
1751 /* Apply the truncation to constant shifts. */
1752 if (double_shift_mask > 0 && CONST_INT_P (op1))
1753 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1754
1755 if (op1 == CONST0_RTX (op1_mode))
1756 return op0;
1757
1758 /* Make sure that this is a combination that expand_doubleword_shift
1759 can handle. See the comments there for details. */
1760 if (double_shift_mask == 0
1761 || (shift_mask == BITS_PER_WORD - 1
1762 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1763 {
1764 rtx insns;
1765 rtx into_target, outof_target;
1766 rtx into_input, outof_input;
1767 int left_shift, outof_word;
1768
1769 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1770 won't be accurate, so use a new target. */
1771 if (target == 0
1772 || target == op0
1773 || target == op1
1774 || !valid_multiword_target_p (target))
1775 target = gen_reg_rtx (mode);
1776
1777 start_sequence ();
1778
1779 /* OUTOF_* is the word we are shifting bits away from, and
1780 INTO_* is the word that we are shifting bits towards, thus
1781 they differ depending on the direction of the shift and
1782 WORDS_BIG_ENDIAN. */
1783
1784 left_shift = binoptab == ashl_optab;
1785 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1786
1787 outof_target = operand_subword (target, outof_word, 1, mode);
1788 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1789
1790 outof_input = operand_subword_force (op0, outof_word, mode);
1791 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1792
1793 if (expand_doubleword_shift (op1_mode, binoptab,
1794 outof_input, into_input, op1,
1795 outof_target, into_target,
1796 unsignedp, next_methods, shift_mask))
1797 {
1798 insns = get_insns ();
1799 end_sequence ();
1800
1801 emit_insn (insns);
1802 return target;
1803 }
1804 end_sequence ();
1805 }
1806 }
1807
1808 /* Synthesize double word rotates from single word shifts. */
1809 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1810 && mclass == MODE_INT
1811 && CONST_INT_P (op1)
1812 && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1813 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1814 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1815 {
1816 rtx insns;
1817 rtx into_target, outof_target;
1818 rtx into_input, outof_input;
1819 rtx inter;
1820 int shift_count, left_shift, outof_word;
1821
1822 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1823 won't be accurate, so use a new target. Do this also if target is not
1824 a REG, first because having a register instead may open optimization
1825 opportunities, and second because if target and op0 happen to be MEMs
1826 designating the same location, we would risk clobbering it too early
1827 in the code sequence we generate below. */
1828 if (target == 0
1829 || target == op0
1830 || target == op1
1831 || !REG_P (target)
1832 || !valid_multiword_target_p (target))
1833 target = gen_reg_rtx (mode);
1834
1835 start_sequence ();
1836
1837 shift_count = INTVAL (op1);
1838
1839 /* OUTOF_* is the word we are shifting bits away from, and
1840 INTO_* is the word that we are shifting bits towards, thus
1841 they differ depending on the direction of the shift and
1842 WORDS_BIG_ENDIAN. */
1843
1844 left_shift = (binoptab == rotl_optab);
1845 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1846
1847 outof_target = operand_subword (target, outof_word, 1, mode);
1848 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1849
1850 outof_input = operand_subword_force (op0, outof_word, mode);
1851 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1852
1853 if (shift_count == BITS_PER_WORD)
1854 {
1855 /* This is just a word swap. */
1856 emit_move_insn (outof_target, into_input);
1857 emit_move_insn (into_target, outof_input);
1858 inter = const0_rtx;
1859 }
1860 else
1861 {
1862 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1863 rtx first_shift_count, second_shift_count;
1864 optab reverse_unsigned_shift, unsigned_shift;
1865
1866 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1867 ? lshr_optab : ashl_optab);
1868
1869 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1870 ? ashl_optab : lshr_optab);
1871
1872 if (shift_count > BITS_PER_WORD)
1873 {
1874 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1875 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1876 }
1877 else
1878 {
1879 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1880 second_shift_count = GEN_INT (shift_count);
1881 }
1882
1883 into_temp1 = expand_binop (word_mode, unsigned_shift,
1884 outof_input, first_shift_count,
1885 NULL_RTX, unsignedp, next_methods);
1886 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1887 into_input, second_shift_count,
1888 NULL_RTX, unsignedp, next_methods);
1889
1890 if (into_temp1 != 0 && into_temp2 != 0)
1891 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1892 into_target, unsignedp, next_methods);
1893 else
1894 inter = 0;
1895
1896 if (inter != 0 && inter != into_target)
1897 emit_move_insn (into_target, inter);
1898
1899 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1900 into_input, first_shift_count,
1901 NULL_RTX, unsignedp, next_methods);
1902 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1903 outof_input, second_shift_count,
1904 NULL_RTX, unsignedp, next_methods);
1905
1906 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1907 inter = expand_binop (word_mode, ior_optab,
1908 outof_temp1, outof_temp2,
1909 outof_target, unsignedp, next_methods);
1910
1911 if (inter != 0 && inter != outof_target)
1912 emit_move_insn (outof_target, inter);
1913 }
1914
1915 insns = get_insns ();
1916 end_sequence ();
1917
1918 if (inter != 0)
1919 {
1920 emit_insn (insns);
1921 return target;
1922 }
1923 }
1924
1925 /* These can be done a word at a time by propagating carries. */
1926 if ((binoptab == add_optab || binoptab == sub_optab)
1927 && mclass == MODE_INT
1928 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1929 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1930 {
1931 unsigned int i;
1932 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1933 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1934 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1935 rtx xop0, xop1, xtarget;
1936
1937 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1938 value is one of those, use it. Otherwise, use 1 since it is the
1939 one easiest to get. */
1940 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1941 int normalizep = STORE_FLAG_VALUE;
1942 #else
1943 int normalizep = 1;
1944 #endif
1945
1946 /* Prepare the operands. */
1947 xop0 = force_reg (mode, op0);
1948 xop1 = force_reg (mode, op1);
1949
1950 xtarget = gen_reg_rtx (mode);
1951
1952 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1953 target = xtarget;
1954
1955 /* Indicate for flow that the entire target reg is being set. */
1956 if (REG_P (target))
1957 emit_clobber (xtarget);
1958
1959 /* Do the actual arithmetic. */
1960 for (i = 0; i < nwords; i++)
1961 {
1962 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1963 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1964 rtx op0_piece = operand_subword_force (xop0, index, mode);
1965 rtx op1_piece = operand_subword_force (xop1, index, mode);
1966 rtx x;
1967
1968 /* Main add/subtract of the input operands. */
1969 x = expand_binop (word_mode, binoptab,
1970 op0_piece, op1_piece,
1971 target_piece, unsignedp, next_methods);
1972 if (x == 0)
1973 break;
1974
1975 if (i + 1 < nwords)
1976 {
1977 /* Store carry from main add/subtract. */
1978 carry_out = gen_reg_rtx (word_mode);
1979 carry_out = emit_store_flag_force (carry_out,
1980 (binoptab == add_optab
1981 ? LT : GT),
1982 x, op0_piece,
1983 word_mode, 1, normalizep);
1984 }
1985
1986 if (i > 0)
1987 {
1988 rtx newx;
1989
1990 /* Add/subtract previous carry to main result. */
1991 newx = expand_binop (word_mode,
1992 normalizep == 1 ? binoptab : otheroptab,
1993 x, carry_in,
1994 NULL_RTX, 1, next_methods);
1995
1996 if (i + 1 < nwords)
1997 {
1998 /* Get out carry from adding/subtracting carry in. */
1999 rtx carry_tmp = gen_reg_rtx (word_mode);
2000 carry_tmp = emit_store_flag_force (carry_tmp,
2001 (binoptab == add_optab
2002 ? LT : GT),
2003 newx, x,
2004 word_mode, 1, normalizep);
2005
2006 /* Logical-ior the two poss. carry together. */
2007 carry_out = expand_binop (word_mode, ior_optab,
2008 carry_out, carry_tmp,
2009 carry_out, 0, next_methods);
2010 if (carry_out == 0)
2011 break;
2012 }
2013 emit_move_insn (target_piece, newx);
2014 }
2015 else
2016 {
2017 if (x != target_piece)
2018 emit_move_insn (target_piece, x);
2019 }
2020
2021 carry_in = carry_out;
2022 }
2023
2024 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2025 {
2026 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
2027 || ! rtx_equal_p (target, xtarget))
2028 {
2029 rtx temp = emit_move_insn (target, xtarget);
2030
2031 set_unique_reg_note (temp,
2032 REG_EQUAL,
2033 gen_rtx_fmt_ee (binoptab->code, mode,
2034 copy_rtx (xop0),
2035 copy_rtx (xop1)));
2036 }
2037 else
2038 target = xtarget;
2039
2040 return target;
2041 }
2042
2043 else
2044 delete_insns_since (last);
2045 }
2046
2047 /* Attempt to synthesize double word multiplies using a sequence of word
2048 mode multiplications. We first attempt to generate a sequence using a
2049 more efficient unsigned widening multiply, and if that fails we then
2050 try using a signed widening multiply. */
2051
2052 if (binoptab == smul_optab
2053 && mclass == MODE_INT
2054 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2055 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2056 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2057 {
2058 rtx product = NULL_RTX;
2059 if (widening_optab_handler (umul_widen_optab, mode, word_mode)
2060 != CODE_FOR_nothing)
2061 {
2062 product = expand_doubleword_mult (mode, op0, op1, target,
2063 true, methods);
2064 if (!product)
2065 delete_insns_since (last);
2066 }
2067
2068 if (product == NULL_RTX
2069 && widening_optab_handler (smul_widen_optab, mode, word_mode)
2070 != CODE_FOR_nothing)
2071 {
2072 product = expand_doubleword_mult (mode, op0, op1, target,
2073 false, methods);
2074 if (!product)
2075 delete_insns_since (last);
2076 }
2077
2078 if (product != NULL_RTX)
2079 {
2080 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
2081 {
2082 temp = emit_move_insn (target ? target : product, product);
2083 set_unique_reg_note (temp,
2084 REG_EQUAL,
2085 gen_rtx_fmt_ee (MULT, mode,
2086 copy_rtx (op0),
2087 copy_rtx (op1)));
2088 }
2089 return product;
2090 }
2091 }
2092
2093 /* It can't be open-coded in this mode.
2094 Use a library call if one is available and caller says that's ok. */
2095
2096 libfunc = optab_libfunc (binoptab, mode);
2097 if (libfunc
2098 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2099 {
2100 rtx insns;
2101 rtx op1x = op1;
2102 enum machine_mode op1_mode = mode;
2103 rtx value;
2104
2105 start_sequence ();
2106
2107 if (shift_optab_p (binoptab))
2108 {
2109 op1_mode = targetm.libgcc_shift_count_mode ();
2110 /* Specify unsigned here,
2111 since negative shift counts are meaningless. */
2112 op1x = convert_to_mode (op1_mode, op1, 1);
2113 }
2114
2115 if (GET_MODE (op0) != VOIDmode
2116 && GET_MODE (op0) != mode)
2117 op0 = convert_to_mode (mode, op0, unsignedp);
2118
2119 /* Pass 1 for NO_QUEUE so we don't lose any increments
2120 if the libcall is cse'd or moved. */
2121 value = emit_library_call_value (libfunc,
2122 NULL_RTX, LCT_CONST, mode, 2,
2123 op0, mode, op1x, op1_mode);
2124
2125 insns = get_insns ();
2126 end_sequence ();
2127
2128 target = gen_reg_rtx (mode);
2129 emit_libcall_block (insns, target, value,
2130 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2131
2132 return target;
2133 }
2134
2135 delete_insns_since (last);
2136
2137 /* It can't be done in this mode. Can we do it in a wider mode? */
2138
2139 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2140 || methods == OPTAB_MUST_WIDEN))
2141 {
2142 /* Caller says, don't even try. */
2143 delete_insns_since (entry_last);
2144 return 0;
2145 }
2146
2147 /* Compute the value of METHODS to pass to recursive calls.
2148 Don't allow widening to be tried recursively. */
2149
2150 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2151
2152 /* Look for a wider mode of the same class for which it appears we can do
2153 the operation. */
2154
2155 if (CLASS_HAS_WIDER_MODES_P (mclass))
2156 {
2157 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2158 wider_mode != VOIDmode;
2159 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2160 {
2161 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
2162 != CODE_FOR_nothing
2163 || (methods == OPTAB_LIB
2164 && optab_libfunc (binoptab, wider_mode)))
2165 {
2166 rtx xop0 = op0, xop1 = op1;
2167 int no_extend = 0;
2168
2169 /* For certain integer operations, we need not actually extend
2170 the narrow operands, as long as we will truncate
2171 the results to the same narrowness. */
2172
2173 if ((binoptab == ior_optab || binoptab == and_optab
2174 || binoptab == xor_optab
2175 || binoptab == add_optab || binoptab == sub_optab
2176 || binoptab == smul_optab || binoptab == ashl_optab)
2177 && mclass == MODE_INT)
2178 no_extend = 1;
2179
2180 xop0 = widen_operand (xop0, wider_mode, mode,
2181 unsignedp, no_extend);
2182
2183 /* The second operand of a shift must always be extended. */
2184 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2185 no_extend && binoptab != ashl_optab);
2186
2187 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2188 unsignedp, methods);
2189 if (temp)
2190 {
2191 if (mclass != MODE_INT
2192 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2193 {
2194 if (target == 0)
2195 target = gen_reg_rtx (mode);
2196 convert_move (target, temp, 0);
2197 return target;
2198 }
2199 else
2200 return gen_lowpart (mode, temp);
2201 }
2202 else
2203 delete_insns_since (last);
2204 }
2205 }
2206 }
2207
2208 delete_insns_since (entry_last);
2209 return 0;
2210 }
2211 \f
2212 /* Expand a binary operator which has both signed and unsigned forms.
2213 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2214 signed operations.
2215
2216 If we widen unsigned operands, we may use a signed wider operation instead
2217 of an unsigned wider operation, since the result would be the same. */
2218
2219 rtx
2220 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2221 rtx op0, rtx op1, rtx target, int unsignedp,
2222 enum optab_methods methods)
2223 {
2224 rtx temp;
2225 optab direct_optab = unsignedp ? uoptab : soptab;
2226 struct optab_d wide_soptab;
2227
2228 /* Do it without widening, if possible. */
2229 temp = expand_binop (mode, direct_optab, op0, op1, target,
2230 unsignedp, OPTAB_DIRECT);
2231 if (temp || methods == OPTAB_DIRECT)
2232 return temp;
2233
2234 /* Try widening to a signed int. Make a fake signed optab that
2235 hides any signed insn for direct use. */
2236 wide_soptab = *soptab;
2237 set_optab_handler (&wide_soptab, mode, CODE_FOR_nothing);
2238 /* We don't want to generate new hash table entries from this fake
2239 optab. */
2240 wide_soptab.libcall_gen = NULL;
2241
2242 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2243 unsignedp, OPTAB_WIDEN);
2244
2245 /* For unsigned operands, try widening to an unsigned int. */
2246 if (temp == 0 && unsignedp)
2247 temp = expand_binop (mode, uoptab, op0, op1, target,
2248 unsignedp, OPTAB_WIDEN);
2249 if (temp || methods == OPTAB_WIDEN)
2250 return temp;
2251
2252 /* Use the right width libcall if that exists. */
2253 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2254 if (temp || methods == OPTAB_LIB)
2255 return temp;
2256
2257 /* Must widen and use a libcall, use either signed or unsigned. */
2258 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2259 unsignedp, methods);
2260 if (temp != 0)
2261 return temp;
2262 if (unsignedp)
2263 return expand_binop (mode, uoptab, op0, op1, target,
2264 unsignedp, methods);
2265 return 0;
2266 }
2267 \f
2268 /* Generate code to perform an operation specified by UNOPPTAB
2269 on operand OP0, with two results to TARG0 and TARG1.
2270 We assume that the order of the operands for the instruction
2271 is TARG0, TARG1, OP0.
2272
2273 Either TARG0 or TARG1 may be zero, but what that means is that
2274 the result is not actually wanted. We will generate it into
2275 a dummy pseudo-reg and discard it. They may not both be zero.
2276
2277 Returns 1 if this operation can be performed; 0 if not. */
2278
2279 int
2280 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2281 int unsignedp)
2282 {
2283 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2284 enum mode_class mclass;
2285 enum machine_mode wider_mode;
2286 rtx entry_last = get_last_insn ();
2287 rtx last;
2288
2289 mclass = GET_MODE_CLASS (mode);
2290
2291 if (!targ0)
2292 targ0 = gen_reg_rtx (mode);
2293 if (!targ1)
2294 targ1 = gen_reg_rtx (mode);
2295
2296 /* Record where to go back to if we fail. */
2297 last = get_last_insn ();
2298
2299 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2300 {
2301 struct expand_operand ops[3];
2302 enum insn_code icode = optab_handler (unoptab, mode);
2303
2304 create_fixed_operand (&ops[0], targ0);
2305 create_fixed_operand (&ops[1], targ1);
2306 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2307 if (maybe_expand_insn (icode, 3, ops))
2308 return 1;
2309 }
2310
2311 /* It can't be done in this mode. Can we do it in a wider mode? */
2312
2313 if (CLASS_HAS_WIDER_MODES_P (mclass))
2314 {
2315 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2316 wider_mode != VOIDmode;
2317 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2318 {
2319 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2320 {
2321 rtx t0 = gen_reg_rtx (wider_mode);
2322 rtx t1 = gen_reg_rtx (wider_mode);
2323 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2324
2325 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2326 {
2327 convert_move (targ0, t0, unsignedp);
2328 convert_move (targ1, t1, unsignedp);
2329 return 1;
2330 }
2331 else
2332 delete_insns_since (last);
2333 }
2334 }
2335 }
2336
2337 delete_insns_since (entry_last);
2338 return 0;
2339 }
2340 \f
2341 /* Generate code to perform an operation specified by BINOPTAB
2342 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2343 We assume that the order of the operands for the instruction
2344 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2345 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2346
2347 Either TARG0 or TARG1 may be zero, but what that means is that
2348 the result is not actually wanted. We will generate it into
2349 a dummy pseudo-reg and discard it. They may not both be zero.
2350
2351 Returns 1 if this operation can be performed; 0 if not. */
2352
2353 int
2354 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2355 int unsignedp)
2356 {
2357 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2358 enum mode_class mclass;
2359 enum machine_mode wider_mode;
2360 rtx entry_last = get_last_insn ();
2361 rtx last;
2362
2363 mclass = GET_MODE_CLASS (mode);
2364
2365 if (!targ0)
2366 targ0 = gen_reg_rtx (mode);
2367 if (!targ1)
2368 targ1 = gen_reg_rtx (mode);
2369
2370 /* Record where to go back to if we fail. */
2371 last = get_last_insn ();
2372
2373 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2374 {
2375 struct expand_operand ops[4];
2376 enum insn_code icode = optab_handler (binoptab, mode);
2377 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2378 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2379 rtx xop0 = op0, xop1 = op1;
2380
2381 /* If we are optimizing, force expensive constants into a register. */
2382 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2383 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2384
2385 create_fixed_operand (&ops[0], targ0);
2386 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2387 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2388 create_fixed_operand (&ops[3], targ1);
2389 if (maybe_expand_insn (icode, 4, ops))
2390 return 1;
2391 delete_insns_since (last);
2392 }
2393
2394 /* It can't be done in this mode. Can we do it in a wider mode? */
2395
2396 if (CLASS_HAS_WIDER_MODES_P (mclass))
2397 {
2398 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2399 wider_mode != VOIDmode;
2400 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2401 {
2402 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2403 {
2404 rtx t0 = gen_reg_rtx (wider_mode);
2405 rtx t1 = gen_reg_rtx (wider_mode);
2406 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2407 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2408
2409 if (expand_twoval_binop (binoptab, cop0, cop1,
2410 t0, t1, unsignedp))
2411 {
2412 convert_move (targ0, t0, unsignedp);
2413 convert_move (targ1, t1, unsignedp);
2414 return 1;
2415 }
2416 else
2417 delete_insns_since (last);
2418 }
2419 }
2420 }
2421
2422 delete_insns_since (entry_last);
2423 return 0;
2424 }
2425
2426 /* Expand the two-valued library call indicated by BINOPTAB, but
2427 preserve only one of the values. If TARG0 is non-NULL, the first
2428 value is placed into TARG0; otherwise the second value is placed
2429 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2430 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2431 This routine assumes that the value returned by the library call is
2432 as if the return value was of an integral mode twice as wide as the
2433 mode of OP0. Returns 1 if the call was successful. */
2434
2435 bool
2436 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2437 rtx targ0, rtx targ1, enum rtx_code code)
2438 {
2439 enum machine_mode mode;
2440 enum machine_mode libval_mode;
2441 rtx libval;
2442 rtx insns;
2443 rtx libfunc;
2444
2445 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2446 gcc_assert (!targ0 != !targ1);
2447
2448 mode = GET_MODE (op0);
2449 libfunc = optab_libfunc (binoptab, mode);
2450 if (!libfunc)
2451 return false;
2452
2453 /* The value returned by the library function will have twice as
2454 many bits as the nominal MODE. */
2455 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2456 MODE_INT);
2457 start_sequence ();
2458 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2459 libval_mode, 2,
2460 op0, mode,
2461 op1, mode);
2462 /* Get the part of VAL containing the value that we want. */
2463 libval = simplify_gen_subreg (mode, libval, libval_mode,
2464 targ0 ? 0 : GET_MODE_SIZE (mode));
2465 insns = get_insns ();
2466 end_sequence ();
2467 /* Move the into the desired location. */
2468 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2469 gen_rtx_fmt_ee (code, mode, op0, op1));
2470
2471 return true;
2472 }
2473
2474 \f
2475 /* Wrapper around expand_unop which takes an rtx code to specify
2476 the operation to perform, not an optab pointer. All other
2477 arguments are the same. */
2478 rtx
2479 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2480 rtx target, int unsignedp)
2481 {
2482 optab unop = code_to_optab[(int) code];
2483 gcc_assert (unop);
2484
2485 return expand_unop (mode, unop, op0, target, unsignedp);
2486 }
2487
2488 /* Try calculating
2489 (clz:narrow x)
2490 as
2491 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2492
2493 A similar operation can be used for clrsb. UNOPTAB says which operation
2494 we are trying to expand. */
2495 static rtx
2496 widen_leading (enum machine_mode mode, rtx op0, rtx target, optab unoptab)
2497 {
2498 enum mode_class mclass = GET_MODE_CLASS (mode);
2499 if (CLASS_HAS_WIDER_MODES_P (mclass))
2500 {
2501 enum machine_mode wider_mode;
2502 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2503 wider_mode != VOIDmode;
2504 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2505 {
2506 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2507 {
2508 rtx xop0, temp, last;
2509
2510 last = get_last_insn ();
2511
2512 if (target == 0)
2513 target = gen_reg_rtx (mode);
2514 xop0 = widen_operand (op0, wider_mode, mode,
2515 unoptab != clrsb_optab, false);
2516 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2517 unoptab != clrsb_optab);
2518 if (temp != 0)
2519 temp = expand_binop (wider_mode, sub_optab, temp,
2520 GEN_INT (GET_MODE_PRECISION (wider_mode)
2521 - GET_MODE_PRECISION (mode)),
2522 target, true, OPTAB_DIRECT);
2523 if (temp == 0)
2524 delete_insns_since (last);
2525
2526 return temp;
2527 }
2528 }
2529 }
2530 return 0;
2531 }
2532
2533 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2534 quantities, choosing which based on whether the high word is nonzero. */
2535 static rtx
2536 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2537 {
2538 rtx xop0 = force_reg (mode, op0);
2539 rtx subhi = gen_highpart (word_mode, xop0);
2540 rtx sublo = gen_lowpart (word_mode, xop0);
2541 rtx hi0_label = gen_label_rtx ();
2542 rtx after_label = gen_label_rtx ();
2543 rtx seq, temp, result;
2544
2545 /* If we were not given a target, use a word_mode register, not a
2546 'mode' register. The result will fit, and nobody is expecting
2547 anything bigger (the return type of __builtin_clz* is int). */
2548 if (!target)
2549 target = gen_reg_rtx (word_mode);
2550
2551 /* In any case, write to a word_mode scratch in both branches of the
2552 conditional, so we can ensure there is a single move insn setting
2553 'target' to tag a REG_EQUAL note on. */
2554 result = gen_reg_rtx (word_mode);
2555
2556 start_sequence ();
2557
2558 /* If the high word is not equal to zero,
2559 then clz of the full value is clz of the high word. */
2560 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2561 word_mode, true, hi0_label);
2562
2563 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2564 if (!temp)
2565 goto fail;
2566
2567 if (temp != result)
2568 convert_move (result, temp, true);
2569
2570 emit_jump_insn (gen_jump (after_label));
2571 emit_barrier ();
2572
2573 /* Else clz of the full value is clz of the low word plus the number
2574 of bits in the high word. */
2575 emit_label (hi0_label);
2576
2577 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2578 if (!temp)
2579 goto fail;
2580 temp = expand_binop (word_mode, add_optab, temp,
2581 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2582 result, true, OPTAB_DIRECT);
2583 if (!temp)
2584 goto fail;
2585 if (temp != result)
2586 convert_move (result, temp, true);
2587
2588 emit_label (after_label);
2589 convert_move (target, result, true);
2590
2591 seq = get_insns ();
2592 end_sequence ();
2593
2594 add_equal_note (seq, target, CLZ, xop0, 0);
2595 emit_insn (seq);
2596 return target;
2597
2598 fail:
2599 end_sequence ();
2600 return 0;
2601 }
2602
2603 /* Try calculating
2604 (bswap:narrow x)
2605 as
2606 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2607 static rtx
2608 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2609 {
2610 enum mode_class mclass = GET_MODE_CLASS (mode);
2611 enum machine_mode wider_mode;
2612 rtx x, last;
2613
2614 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2615 return NULL_RTX;
2616
2617 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2618 wider_mode != VOIDmode;
2619 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2620 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2621 goto found;
2622 return NULL_RTX;
2623
2624 found:
2625 last = get_last_insn ();
2626
2627 x = widen_operand (op0, wider_mode, mode, true, true);
2628 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2629
2630 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2631 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2632 if (x != 0)
2633 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2634 GET_MODE_BITSIZE (wider_mode)
2635 - GET_MODE_BITSIZE (mode),
2636 NULL_RTX, true);
2637
2638 if (x != 0)
2639 {
2640 if (target == 0)
2641 target = gen_reg_rtx (mode);
2642 emit_move_insn (target, gen_lowpart (mode, x));
2643 }
2644 else
2645 delete_insns_since (last);
2646
2647 return target;
2648 }
2649
2650 /* Try calculating bswap as two bswaps of two word-sized operands. */
2651
2652 static rtx
2653 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2654 {
2655 rtx t0, t1;
2656
2657 t1 = expand_unop (word_mode, bswap_optab,
2658 operand_subword_force (op, 0, mode), NULL_RTX, true);
2659 t0 = expand_unop (word_mode, bswap_optab,
2660 operand_subword_force (op, 1, mode), NULL_RTX, true);
2661
2662 if (target == 0 || !valid_multiword_target_p (target))
2663 target = gen_reg_rtx (mode);
2664 if (REG_P (target))
2665 emit_clobber (target);
2666 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2667 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2668
2669 return target;
2670 }
2671
2672 /* Try calculating (parity x) as (and (popcount x) 1), where
2673 popcount can also be done in a wider mode. */
2674 static rtx
2675 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2676 {
2677 enum mode_class mclass = GET_MODE_CLASS (mode);
2678 if (CLASS_HAS_WIDER_MODES_P (mclass))
2679 {
2680 enum machine_mode wider_mode;
2681 for (wider_mode = mode; wider_mode != VOIDmode;
2682 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2683 {
2684 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2685 {
2686 rtx xop0, temp, last;
2687
2688 last = get_last_insn ();
2689
2690 if (target == 0)
2691 target = gen_reg_rtx (mode);
2692 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2693 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2694 true);
2695 if (temp != 0)
2696 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2697 target, true, OPTAB_DIRECT);
2698 if (temp == 0)
2699 delete_insns_since (last);
2700
2701 return temp;
2702 }
2703 }
2704 }
2705 return 0;
2706 }
2707
2708 /* Try calculating ctz(x) as K - clz(x & -x) ,
2709 where K is GET_MODE_PRECISION(mode) - 1.
2710
2711 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2712 don't have to worry about what the hardware does in that case. (If
2713 the clz instruction produces the usual value at 0, which is K, the
2714 result of this code sequence will be -1; expand_ffs, below, relies
2715 on this. It might be nice to have it be K instead, for consistency
2716 with the (very few) processors that provide a ctz with a defined
2717 value, but that would take one more instruction, and it would be
2718 less convenient for expand_ffs anyway. */
2719
2720 static rtx
2721 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2722 {
2723 rtx seq, temp;
2724
2725 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2726 return 0;
2727
2728 start_sequence ();
2729
2730 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2731 if (temp)
2732 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2733 true, OPTAB_DIRECT);
2734 if (temp)
2735 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2736 if (temp)
2737 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_PRECISION (mode) - 1),
2738 temp, target,
2739 true, OPTAB_DIRECT);
2740 if (temp == 0)
2741 {
2742 end_sequence ();
2743 return 0;
2744 }
2745
2746 seq = get_insns ();
2747 end_sequence ();
2748
2749 add_equal_note (seq, temp, CTZ, op0, 0);
2750 emit_insn (seq);
2751 return temp;
2752 }
2753
2754
2755 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2756 else with the sequence used by expand_clz.
2757
2758 The ffs builtin promises to return zero for a zero value and ctz/clz
2759 may have an undefined value in that case. If they do not give us a
2760 convenient value, we have to generate a test and branch. */
2761 static rtx
2762 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2763 {
2764 HOST_WIDE_INT val = 0;
2765 bool defined_at_zero = false;
2766 rtx temp, seq;
2767
2768 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2769 {
2770 start_sequence ();
2771
2772 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2773 if (!temp)
2774 goto fail;
2775
2776 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2777 }
2778 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2779 {
2780 start_sequence ();
2781 temp = expand_ctz (mode, op0, 0);
2782 if (!temp)
2783 goto fail;
2784
2785 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2786 {
2787 defined_at_zero = true;
2788 val = (GET_MODE_PRECISION (mode) - 1) - val;
2789 }
2790 }
2791 else
2792 return 0;
2793
2794 if (defined_at_zero && val == -1)
2795 /* No correction needed at zero. */;
2796 else
2797 {
2798 /* We don't try to do anything clever with the situation found
2799 on some processors (eg Alpha) where ctz(0:mode) ==
2800 bitsize(mode). If someone can think of a way to send N to -1
2801 and leave alone all values in the range 0..N-1 (where N is a
2802 power of two), cheaper than this test-and-branch, please add it.
2803
2804 The test-and-branch is done after the operation itself, in case
2805 the operation sets condition codes that can be recycled for this.
2806 (This is true on i386, for instance.) */
2807
2808 rtx nonzero_label = gen_label_rtx ();
2809 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2810 mode, true, nonzero_label);
2811
2812 convert_move (temp, GEN_INT (-1), false);
2813 emit_label (nonzero_label);
2814 }
2815
2816 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2817 to produce a value in the range 0..bitsize. */
2818 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2819 target, false, OPTAB_DIRECT);
2820 if (!temp)
2821 goto fail;
2822
2823 seq = get_insns ();
2824 end_sequence ();
2825
2826 add_equal_note (seq, temp, FFS, op0, 0);
2827 emit_insn (seq);
2828 return temp;
2829
2830 fail:
2831 end_sequence ();
2832 return 0;
2833 }
2834
2835 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2836 conditions, VAL may already be a SUBREG against which we cannot generate
2837 a further SUBREG. In this case, we expect forcing the value into a
2838 register will work around the situation. */
2839
2840 static rtx
2841 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2842 enum machine_mode imode)
2843 {
2844 rtx ret;
2845 ret = lowpart_subreg (omode, val, imode);
2846 if (ret == NULL)
2847 {
2848 val = force_reg (imode, val);
2849 ret = lowpart_subreg (omode, val, imode);
2850 gcc_assert (ret != NULL);
2851 }
2852 return ret;
2853 }
2854
2855 /* Expand a floating point absolute value or negation operation via a
2856 logical operation on the sign bit. */
2857
2858 static rtx
2859 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2860 rtx op0, rtx target)
2861 {
2862 const struct real_format *fmt;
2863 int bitpos, word, nwords, i;
2864 enum machine_mode imode;
2865 double_int mask;
2866 rtx temp, insns;
2867
2868 /* The format has to have a simple sign bit. */
2869 fmt = REAL_MODE_FORMAT (mode);
2870 if (fmt == NULL)
2871 return NULL_RTX;
2872
2873 bitpos = fmt->signbit_rw;
2874 if (bitpos < 0)
2875 return NULL_RTX;
2876
2877 /* Don't create negative zeros if the format doesn't support them. */
2878 if (code == NEG && !fmt->has_signed_zero)
2879 return NULL_RTX;
2880
2881 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2882 {
2883 imode = int_mode_for_mode (mode);
2884 if (imode == BLKmode)
2885 return NULL_RTX;
2886 word = 0;
2887 nwords = 1;
2888 }
2889 else
2890 {
2891 imode = word_mode;
2892
2893 if (FLOAT_WORDS_BIG_ENDIAN)
2894 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2895 else
2896 word = bitpos / BITS_PER_WORD;
2897 bitpos = bitpos % BITS_PER_WORD;
2898 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2899 }
2900
2901 mask = double_int_setbit (double_int_zero, bitpos);
2902 if (code == ABS)
2903 mask = double_int_not (mask);
2904
2905 if (target == 0
2906 || target == op0
2907 || (nwords > 1 && !valid_multiword_target_p (target)))
2908 target = gen_reg_rtx (mode);
2909
2910 if (nwords > 1)
2911 {
2912 start_sequence ();
2913
2914 for (i = 0; i < nwords; ++i)
2915 {
2916 rtx targ_piece = operand_subword (target, i, 1, mode);
2917 rtx op0_piece = operand_subword_force (op0, i, mode);
2918
2919 if (i == word)
2920 {
2921 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2922 op0_piece,
2923 immed_double_int_const (mask, imode),
2924 targ_piece, 1, OPTAB_LIB_WIDEN);
2925 if (temp != targ_piece)
2926 emit_move_insn (targ_piece, temp);
2927 }
2928 else
2929 emit_move_insn (targ_piece, op0_piece);
2930 }
2931
2932 insns = get_insns ();
2933 end_sequence ();
2934
2935 emit_insn (insns);
2936 }
2937 else
2938 {
2939 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2940 gen_lowpart (imode, op0),
2941 immed_double_int_const (mask, imode),
2942 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2943 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2944
2945 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2946 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2947 }
2948
2949 return target;
2950 }
2951
2952 /* As expand_unop, but will fail rather than attempt the operation in a
2953 different mode or with a libcall. */
2954 static rtx
2955 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2956 int unsignedp)
2957 {
2958 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2959 {
2960 struct expand_operand ops[2];
2961 enum insn_code icode = optab_handler (unoptab, mode);
2962 rtx last = get_last_insn ();
2963 rtx pat;
2964
2965 create_output_operand (&ops[0], target, mode);
2966 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2967 pat = maybe_gen_insn (icode, 2, ops);
2968 if (pat)
2969 {
2970 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2971 && ! add_equal_note (pat, ops[0].value, unoptab->code,
2972 ops[1].value, NULL_RTX))
2973 {
2974 delete_insns_since (last);
2975 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2976 }
2977
2978 emit_insn (pat);
2979
2980 return ops[0].value;
2981 }
2982 }
2983 return 0;
2984 }
2985
2986 /* Generate code to perform an operation specified by UNOPTAB
2987 on operand OP0, with result having machine-mode MODE.
2988
2989 UNSIGNEDP is for the case where we have to widen the operands
2990 to perform the operation. It says to use zero-extension.
2991
2992 If TARGET is nonzero, the value
2993 is generated there, if it is convenient to do so.
2994 In all cases an rtx is returned for the locus of the value;
2995 this may or may not be TARGET. */
2996
2997 rtx
2998 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2999 int unsignedp)
3000 {
3001 enum mode_class mclass = GET_MODE_CLASS (mode);
3002 enum machine_mode wider_mode;
3003 rtx temp;
3004 rtx libfunc;
3005
3006 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3007 if (temp)
3008 return temp;
3009
3010 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3011
3012 /* Widening (or narrowing) clz needs special treatment. */
3013 if (unoptab == clz_optab)
3014 {
3015 temp = widen_leading (mode, op0, target, unoptab);
3016 if (temp)
3017 return temp;
3018
3019 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3020 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3021 {
3022 temp = expand_doubleword_clz (mode, op0, target);
3023 if (temp)
3024 return temp;
3025 }
3026
3027 goto try_libcall;
3028 }
3029
3030 if (unoptab == clrsb_optab)
3031 {
3032 temp = widen_leading (mode, op0, target, unoptab);
3033 if (temp)
3034 return temp;
3035 goto try_libcall;
3036 }
3037
3038 /* Widening (or narrowing) bswap needs special treatment. */
3039 if (unoptab == bswap_optab)
3040 {
3041 temp = widen_bswap (mode, op0, target);
3042 if (temp)
3043 return temp;
3044
3045 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3046 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3047 {
3048 temp = expand_doubleword_bswap (mode, op0, target);
3049 if (temp)
3050 return temp;
3051 }
3052
3053 goto try_libcall;
3054 }
3055
3056 if (CLASS_HAS_WIDER_MODES_P (mclass))
3057 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3058 wider_mode != VOIDmode;
3059 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3060 {
3061 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3062 {
3063 rtx xop0 = op0;
3064 rtx last = get_last_insn ();
3065
3066 /* For certain operations, we need not actually extend
3067 the narrow operand, as long as we will truncate the
3068 results to the same narrowness. */
3069
3070 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3071 (unoptab == neg_optab
3072 || unoptab == one_cmpl_optab)
3073 && mclass == MODE_INT);
3074
3075 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3076 unsignedp);
3077
3078 if (temp)
3079 {
3080 if (mclass != MODE_INT
3081 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3082 {
3083 if (target == 0)
3084 target = gen_reg_rtx (mode);
3085 convert_move (target, temp, 0);
3086 return target;
3087 }
3088 else
3089 return gen_lowpart (mode, temp);
3090 }
3091 else
3092 delete_insns_since (last);
3093 }
3094 }
3095
3096 /* These can be done a word at a time. */
3097 if (unoptab == one_cmpl_optab
3098 && mclass == MODE_INT
3099 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3100 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3101 {
3102 int i;
3103 rtx insns;
3104
3105 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
3106 target = gen_reg_rtx (mode);
3107
3108 start_sequence ();
3109
3110 /* Do the actual arithmetic. */
3111 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3112 {
3113 rtx target_piece = operand_subword (target, i, 1, mode);
3114 rtx x = expand_unop (word_mode, unoptab,
3115 operand_subword_force (op0, i, mode),
3116 target_piece, unsignedp);
3117
3118 if (target_piece != x)
3119 emit_move_insn (target_piece, x);
3120 }
3121
3122 insns = get_insns ();
3123 end_sequence ();
3124
3125 emit_insn (insns);
3126 return target;
3127 }
3128
3129 if (unoptab->code == NEG)
3130 {
3131 /* Try negating floating point values by flipping the sign bit. */
3132 if (SCALAR_FLOAT_MODE_P (mode))
3133 {
3134 temp = expand_absneg_bit (NEG, mode, op0, target);
3135 if (temp)
3136 return temp;
3137 }
3138
3139 /* If there is no negation pattern, and we have no negative zero,
3140 try subtracting from zero. */
3141 if (!HONOR_SIGNED_ZEROS (mode))
3142 {
3143 temp = expand_binop (mode, (unoptab == negv_optab
3144 ? subv_optab : sub_optab),
3145 CONST0_RTX (mode), op0, target,
3146 unsignedp, OPTAB_DIRECT);
3147 if (temp)
3148 return temp;
3149 }
3150 }
3151
3152 /* Try calculating parity (x) as popcount (x) % 2. */
3153 if (unoptab == parity_optab)
3154 {
3155 temp = expand_parity (mode, op0, target);
3156 if (temp)
3157 return temp;
3158 }
3159
3160 /* Try implementing ffs (x) in terms of clz (x). */
3161 if (unoptab == ffs_optab)
3162 {
3163 temp = expand_ffs (mode, op0, target);
3164 if (temp)
3165 return temp;
3166 }
3167
3168 /* Try implementing ctz (x) in terms of clz (x). */
3169 if (unoptab == ctz_optab)
3170 {
3171 temp = expand_ctz (mode, op0, target);
3172 if (temp)
3173 return temp;
3174 }
3175
3176 try_libcall:
3177 /* Now try a library call in this mode. */
3178 libfunc = optab_libfunc (unoptab, mode);
3179 if (libfunc)
3180 {
3181 rtx insns;
3182 rtx value;
3183 rtx eq_value;
3184 enum machine_mode outmode = mode;
3185
3186 /* All of these functions return small values. Thus we choose to
3187 have them return something that isn't a double-word. */
3188 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3189 || unoptab == clrsb_optab || unoptab == popcount_optab
3190 || unoptab == parity_optab)
3191 outmode
3192 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3193 optab_libfunc (unoptab, mode)));
3194
3195 start_sequence ();
3196
3197 /* Pass 1 for NO_QUEUE so we don't lose any increments
3198 if the libcall is cse'd or moved. */
3199 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3200 1, op0, mode);
3201 insns = get_insns ();
3202 end_sequence ();
3203
3204 target = gen_reg_rtx (outmode);
3205 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3206 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3207 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3208 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3209 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3210 emit_libcall_block (insns, target, value, eq_value);
3211
3212 return target;
3213 }
3214
3215 /* It can't be done in this mode. Can we do it in a wider mode? */
3216
3217 if (CLASS_HAS_WIDER_MODES_P (mclass))
3218 {
3219 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3220 wider_mode != VOIDmode;
3221 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3222 {
3223 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3224 || optab_libfunc (unoptab, wider_mode))
3225 {
3226 rtx xop0 = op0;
3227 rtx last = get_last_insn ();
3228
3229 /* For certain operations, we need not actually extend
3230 the narrow operand, as long as we will truncate the
3231 results to the same narrowness. */
3232
3233 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3234 (unoptab == neg_optab
3235 || unoptab == one_cmpl_optab)
3236 && mclass == MODE_INT);
3237
3238 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3239 unsignedp);
3240
3241 /* If we are generating clz using wider mode, adjust the
3242 result. Similarly for clrsb. */
3243 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3244 && temp != 0)
3245 temp = expand_binop (wider_mode, sub_optab, temp,
3246 GEN_INT (GET_MODE_PRECISION (wider_mode)
3247 - GET_MODE_PRECISION (mode)),
3248 target, true, OPTAB_DIRECT);
3249
3250 if (temp)
3251 {
3252 if (mclass != MODE_INT)
3253 {
3254 if (target == 0)
3255 target = gen_reg_rtx (mode);
3256 convert_move (target, temp, 0);
3257 return target;
3258 }
3259 else
3260 return gen_lowpart (mode, temp);
3261 }
3262 else
3263 delete_insns_since (last);
3264 }
3265 }
3266 }
3267
3268 /* One final attempt at implementing negation via subtraction,
3269 this time allowing widening of the operand. */
3270 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3271 {
3272 rtx temp;
3273 temp = expand_binop (mode,
3274 unoptab == negv_optab ? subv_optab : sub_optab,
3275 CONST0_RTX (mode), op0,
3276 target, unsignedp, OPTAB_LIB_WIDEN);
3277 if (temp)
3278 return temp;
3279 }
3280
3281 return 0;
3282 }
3283 \f
3284 /* Emit code to compute the absolute value of OP0, with result to
3285 TARGET if convenient. (TARGET may be 0.) The return value says
3286 where the result actually is to be found.
3287
3288 MODE is the mode of the operand; the mode of the result is
3289 different but can be deduced from MODE.
3290
3291 */
3292
3293 rtx
3294 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3295 int result_unsignedp)
3296 {
3297 rtx temp;
3298
3299 if (! flag_trapv)
3300 result_unsignedp = 1;
3301
3302 /* First try to do it with a special abs instruction. */
3303 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3304 op0, target, 0);
3305 if (temp != 0)
3306 return temp;
3307
3308 /* For floating point modes, try clearing the sign bit. */
3309 if (SCALAR_FLOAT_MODE_P (mode))
3310 {
3311 temp = expand_absneg_bit (ABS, mode, op0, target);
3312 if (temp)
3313 return temp;
3314 }
3315
3316 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3317 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3318 && !HONOR_SIGNED_ZEROS (mode))
3319 {
3320 rtx last = get_last_insn ();
3321
3322 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3323 if (temp != 0)
3324 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3325 OPTAB_WIDEN);
3326
3327 if (temp != 0)
3328 return temp;
3329
3330 delete_insns_since (last);
3331 }
3332
3333 /* If this machine has expensive jumps, we can do integer absolute
3334 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3335 where W is the width of MODE. */
3336
3337 if (GET_MODE_CLASS (mode) == MODE_INT
3338 && BRANCH_COST (optimize_insn_for_speed_p (),
3339 false) >= 2)
3340 {
3341 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3342 GET_MODE_PRECISION (mode) - 1,
3343 NULL_RTX, 0);
3344
3345 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3346 OPTAB_LIB_WIDEN);
3347 if (temp != 0)
3348 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3349 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3350
3351 if (temp != 0)
3352 return temp;
3353 }
3354
3355 return NULL_RTX;
3356 }
3357
3358 rtx
3359 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3360 int result_unsignedp, int safe)
3361 {
3362 rtx temp, op1;
3363
3364 if (! flag_trapv)
3365 result_unsignedp = 1;
3366
3367 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3368 if (temp != 0)
3369 return temp;
3370
3371 /* If that does not win, use conditional jump and negate. */
3372
3373 /* It is safe to use the target if it is the same
3374 as the source if this is also a pseudo register */
3375 if (op0 == target && REG_P (op0)
3376 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3377 safe = 1;
3378
3379 op1 = gen_label_rtx ();
3380 if (target == 0 || ! safe
3381 || GET_MODE (target) != mode
3382 || (MEM_P (target) && MEM_VOLATILE_P (target))
3383 || (REG_P (target)
3384 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3385 target = gen_reg_rtx (mode);
3386
3387 emit_move_insn (target, op0);
3388 NO_DEFER_POP;
3389
3390 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3391 NULL_RTX, NULL_RTX, op1, -1);
3392
3393 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3394 target, target, 0);
3395 if (op0 != target)
3396 emit_move_insn (target, op0);
3397 emit_label (op1);
3398 OK_DEFER_POP;
3399 return target;
3400 }
3401
3402 /* Emit code to compute the one's complement absolute value of OP0
3403 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3404 (TARGET may be NULL_RTX.) The return value says where the result
3405 actually is to be found.
3406
3407 MODE is the mode of the operand; the mode of the result is
3408 different but can be deduced from MODE. */
3409
3410 rtx
3411 expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target)
3412 {
3413 rtx temp;
3414
3415 /* Not applicable for floating point modes. */
3416 if (FLOAT_MODE_P (mode))
3417 return NULL_RTX;
3418
3419 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3420 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3421 {
3422 rtx last = get_last_insn ();
3423
3424 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3425 if (temp != 0)
3426 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3427 OPTAB_WIDEN);
3428
3429 if (temp != 0)
3430 return temp;
3431
3432 delete_insns_since (last);
3433 }
3434
3435 /* If this machine has expensive jumps, we can do one's complement
3436 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3437
3438 if (GET_MODE_CLASS (mode) == MODE_INT
3439 && BRANCH_COST (optimize_insn_for_speed_p (),
3440 false) >= 2)
3441 {
3442 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3443 GET_MODE_PRECISION (mode) - 1,
3444 NULL_RTX, 0);
3445
3446 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3447 OPTAB_LIB_WIDEN);
3448
3449 if (temp != 0)
3450 return temp;
3451 }
3452
3453 return NULL_RTX;
3454 }
3455
3456 /* A subroutine of expand_copysign, perform the copysign operation using the
3457 abs and neg primitives advertised to exist on the target. The assumption
3458 is that we have a split register file, and leaving op0 in fp registers,
3459 and not playing with subregs so much, will help the register allocator. */
3460
3461 static rtx
3462 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3463 int bitpos, bool op0_is_abs)
3464 {
3465 enum machine_mode imode;
3466 enum insn_code icode;
3467 rtx sign, label;
3468
3469 if (target == op1)
3470 target = NULL_RTX;
3471
3472 /* Check if the back end provides an insn that handles signbit for the
3473 argument's mode. */
3474 icode = optab_handler (signbit_optab, mode);
3475 if (icode != CODE_FOR_nothing)
3476 {
3477 imode = insn_data[(int) icode].operand[0].mode;
3478 sign = gen_reg_rtx (imode);
3479 emit_unop_insn (icode, sign, op1, UNKNOWN);
3480 }
3481 else
3482 {
3483 double_int mask;
3484
3485 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3486 {
3487 imode = int_mode_for_mode (mode);
3488 if (imode == BLKmode)
3489 return NULL_RTX;
3490 op1 = gen_lowpart (imode, op1);
3491 }
3492 else
3493 {
3494 int word;
3495
3496 imode = word_mode;
3497 if (FLOAT_WORDS_BIG_ENDIAN)
3498 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3499 else
3500 word = bitpos / BITS_PER_WORD;
3501 bitpos = bitpos % BITS_PER_WORD;
3502 op1 = operand_subword_force (op1, word, mode);
3503 }
3504
3505 mask = double_int_setbit (double_int_zero, bitpos);
3506
3507 sign = expand_binop (imode, and_optab, op1,
3508 immed_double_int_const (mask, imode),
3509 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3510 }
3511
3512 if (!op0_is_abs)
3513 {
3514 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3515 if (op0 == NULL)
3516 return NULL_RTX;
3517 target = op0;
3518 }
3519 else
3520 {
3521 if (target == NULL_RTX)
3522 target = copy_to_reg (op0);
3523 else
3524 emit_move_insn (target, op0);
3525 }
3526
3527 label = gen_label_rtx ();
3528 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3529
3530 if (GET_CODE (op0) == CONST_DOUBLE)
3531 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3532 else
3533 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3534 if (op0 != target)
3535 emit_move_insn (target, op0);
3536
3537 emit_label (label);
3538
3539 return target;
3540 }
3541
3542
3543 /* A subroutine of expand_copysign, perform the entire copysign operation
3544 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3545 is true if op0 is known to have its sign bit clear. */
3546
3547 static rtx
3548 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3549 int bitpos, bool op0_is_abs)
3550 {
3551 enum machine_mode imode;
3552 double_int mask;
3553 int word, nwords, i;
3554 rtx temp, insns;
3555
3556 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3557 {
3558 imode = int_mode_for_mode (mode);
3559 if (imode == BLKmode)
3560 return NULL_RTX;
3561 word = 0;
3562 nwords = 1;
3563 }
3564 else
3565 {
3566 imode = word_mode;
3567
3568 if (FLOAT_WORDS_BIG_ENDIAN)
3569 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3570 else
3571 word = bitpos / BITS_PER_WORD;
3572 bitpos = bitpos % BITS_PER_WORD;
3573 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3574 }
3575
3576 mask = double_int_setbit (double_int_zero, bitpos);
3577
3578 if (target == 0
3579 || target == op0
3580 || target == op1
3581 || (nwords > 1 && !valid_multiword_target_p (target)))
3582 target = gen_reg_rtx (mode);
3583
3584 if (nwords > 1)
3585 {
3586 start_sequence ();
3587
3588 for (i = 0; i < nwords; ++i)
3589 {
3590 rtx targ_piece = operand_subword (target, i, 1, mode);
3591 rtx op0_piece = operand_subword_force (op0, i, mode);
3592
3593 if (i == word)
3594 {
3595 if (!op0_is_abs)
3596 op0_piece
3597 = expand_binop (imode, and_optab, op0_piece,
3598 immed_double_int_const (double_int_not (mask),
3599 imode),
3600 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3601
3602 op1 = expand_binop (imode, and_optab,
3603 operand_subword_force (op1, i, mode),
3604 immed_double_int_const (mask, imode),
3605 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3606
3607 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3608 targ_piece, 1, OPTAB_LIB_WIDEN);
3609 if (temp != targ_piece)
3610 emit_move_insn (targ_piece, temp);
3611 }
3612 else
3613 emit_move_insn (targ_piece, op0_piece);
3614 }
3615
3616 insns = get_insns ();
3617 end_sequence ();
3618
3619 emit_insn (insns);
3620 }
3621 else
3622 {
3623 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3624 immed_double_int_const (mask, imode),
3625 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3626
3627 op0 = gen_lowpart (imode, op0);
3628 if (!op0_is_abs)
3629 op0 = expand_binop (imode, and_optab, op0,
3630 immed_double_int_const (double_int_not (mask),
3631 imode),
3632 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3633
3634 temp = expand_binop (imode, ior_optab, op0, op1,
3635 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3636 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3637 }
3638
3639 return target;
3640 }
3641
3642 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3643 scalar floating point mode. Return NULL if we do not know how to
3644 expand the operation inline. */
3645
3646 rtx
3647 expand_copysign (rtx op0, rtx op1, rtx target)
3648 {
3649 enum machine_mode mode = GET_MODE (op0);
3650 const struct real_format *fmt;
3651 bool op0_is_abs;
3652 rtx temp;
3653
3654 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3655 gcc_assert (GET_MODE (op1) == mode);
3656
3657 /* First try to do it with a special instruction. */
3658 temp = expand_binop (mode, copysign_optab, op0, op1,
3659 target, 0, OPTAB_DIRECT);
3660 if (temp)
3661 return temp;
3662
3663 fmt = REAL_MODE_FORMAT (mode);
3664 if (fmt == NULL || !fmt->has_signed_zero)
3665 return NULL_RTX;
3666
3667 op0_is_abs = false;
3668 if (GET_CODE (op0) == CONST_DOUBLE)
3669 {
3670 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3671 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3672 op0_is_abs = true;
3673 }
3674
3675 if (fmt->signbit_ro >= 0
3676 && (GET_CODE (op0) == CONST_DOUBLE
3677 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3678 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3679 {
3680 temp = expand_copysign_absneg (mode, op0, op1, target,
3681 fmt->signbit_ro, op0_is_abs);
3682 if (temp)
3683 return temp;
3684 }
3685
3686 if (fmt->signbit_rw < 0)
3687 return NULL_RTX;
3688 return expand_copysign_bit (mode, op0, op1, target,
3689 fmt->signbit_rw, op0_is_abs);
3690 }
3691 \f
3692 /* Generate an instruction whose insn-code is INSN_CODE,
3693 with two operands: an output TARGET and an input OP0.
3694 TARGET *must* be nonzero, and the output is always stored there.
3695 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3696 the value that is stored into TARGET.
3697
3698 Return false if expansion failed. */
3699
3700 bool
3701 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3702 enum rtx_code code)
3703 {
3704 struct expand_operand ops[2];
3705 rtx pat;
3706
3707 create_output_operand (&ops[0], target, GET_MODE (target));
3708 create_input_operand (&ops[1], op0, GET_MODE (op0));
3709 pat = maybe_gen_insn (icode, 2, ops);
3710 if (!pat)
3711 return false;
3712
3713 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3714 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3715
3716 emit_insn (pat);
3717
3718 if (ops[0].value != target)
3719 emit_move_insn (target, ops[0].value);
3720 return true;
3721 }
3722 /* Generate an instruction whose insn-code is INSN_CODE,
3723 with two operands: an output TARGET and an input OP0.
3724 TARGET *must* be nonzero, and the output is always stored there.
3725 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3726 the value that is stored into TARGET. */
3727
3728 void
3729 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3730 {
3731 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3732 gcc_assert (ok);
3733 }
3734 \f
3735 struct no_conflict_data
3736 {
3737 rtx target, first, insn;
3738 bool must_stay;
3739 };
3740
3741 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3742 the currently examined clobber / store has to stay in the list of
3743 insns that constitute the actual libcall block. */
3744 static void
3745 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3746 {
3747 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3748
3749 /* If this inns directly contributes to setting the target, it must stay. */
3750 if (reg_overlap_mentioned_p (p->target, dest))
3751 p->must_stay = true;
3752 /* If we haven't committed to keeping any other insns in the list yet,
3753 there is nothing more to check. */
3754 else if (p->insn == p->first)
3755 return;
3756 /* If this insn sets / clobbers a register that feeds one of the insns
3757 already in the list, this insn has to stay too. */
3758 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3759 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3760 || reg_used_between_p (dest, p->first, p->insn)
3761 /* Likewise if this insn depends on a register set by a previous
3762 insn in the list, or if it sets a result (presumably a hard
3763 register) that is set or clobbered by a previous insn.
3764 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3765 SET_DEST perform the former check on the address, and the latter
3766 check on the MEM. */
3767 || (GET_CODE (set) == SET
3768 && (modified_in_p (SET_SRC (set), p->first)
3769 || modified_in_p (SET_DEST (set), p->first)
3770 || modified_between_p (SET_SRC (set), p->first, p->insn)
3771 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3772 p->must_stay = true;
3773 }
3774
3775 \f
3776 /* Emit code to make a call to a constant function or a library call.
3777
3778 INSNS is a list containing all insns emitted in the call.
3779 These insns leave the result in RESULT. Our block is to copy RESULT
3780 to TARGET, which is logically equivalent to EQUIV.
3781
3782 We first emit any insns that set a pseudo on the assumption that these are
3783 loading constants into registers; doing so allows them to be safely cse'ed
3784 between blocks. Then we emit all the other insns in the block, followed by
3785 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3786 note with an operand of EQUIV. */
3787
3788 void
3789 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3790 {
3791 rtx final_dest = target;
3792 rtx next, last, insn;
3793
3794 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3795 into a MEM later. Protect the libcall block from this change. */
3796 if (! REG_P (target) || REG_USERVAR_P (target))
3797 target = gen_reg_rtx (GET_MODE (target));
3798
3799 /* If we're using non-call exceptions, a libcall corresponding to an
3800 operation that may trap may also trap. */
3801 /* ??? See the comment in front of make_reg_eh_region_note. */
3802 if (cfun->can_throw_non_call_exceptions && may_trap_p (equiv))
3803 {
3804 for (insn = insns; insn; insn = NEXT_INSN (insn))
3805 if (CALL_P (insn))
3806 {
3807 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3808 if (note)
3809 {
3810 int lp_nr = INTVAL (XEXP (note, 0));
3811 if (lp_nr == 0 || lp_nr == INT_MIN)
3812 remove_note (insn, note);
3813 }
3814 }
3815 }
3816 else
3817 {
3818 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3819 reg note to indicate that this call cannot throw or execute a nonlocal
3820 goto (unless there is already a REG_EH_REGION note, in which case
3821 we update it). */
3822 for (insn = insns; insn; insn = NEXT_INSN (insn))
3823 if (CALL_P (insn))
3824 make_reg_eh_region_note_nothrow_nononlocal (insn);
3825 }
3826
3827 /* First emit all insns that set pseudos. Remove them from the list as
3828 we go. Avoid insns that set pseudos which were referenced in previous
3829 insns. These can be generated by move_by_pieces, for example,
3830 to update an address. Similarly, avoid insns that reference things
3831 set in previous insns. */
3832
3833 for (insn = insns; insn; insn = next)
3834 {
3835 rtx set = single_set (insn);
3836
3837 next = NEXT_INSN (insn);
3838
3839 if (set != 0 && REG_P (SET_DEST (set))
3840 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3841 {
3842 struct no_conflict_data data;
3843
3844 data.target = const0_rtx;
3845 data.first = insns;
3846 data.insn = insn;
3847 data.must_stay = 0;
3848 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3849 if (! data.must_stay)
3850 {
3851 if (PREV_INSN (insn))
3852 NEXT_INSN (PREV_INSN (insn)) = next;
3853 else
3854 insns = next;
3855
3856 if (next)
3857 PREV_INSN (next) = PREV_INSN (insn);
3858
3859 add_insn (insn);
3860 }
3861 }
3862
3863 /* Some ports use a loop to copy large arguments onto the stack.
3864 Don't move anything outside such a loop. */
3865 if (LABEL_P (insn))
3866 break;
3867 }
3868
3869 /* Write the remaining insns followed by the final copy. */
3870 for (insn = insns; insn; insn = next)
3871 {
3872 next = NEXT_INSN (insn);
3873
3874 add_insn (insn);
3875 }
3876
3877 last = emit_move_insn (target, result);
3878 if (optab_handler (mov_optab, GET_MODE (target)) != CODE_FOR_nothing)
3879 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3880
3881 if (final_dest != target)
3882 emit_move_insn (final_dest, target);
3883 }
3884 \f
3885 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3886 PURPOSE describes how this comparison will be used. CODE is the rtx
3887 comparison code we will be using.
3888
3889 ??? Actually, CODE is slightly weaker than that. A target is still
3890 required to implement all of the normal bcc operations, but not
3891 required to implement all (or any) of the unordered bcc operations. */
3892
3893 int
3894 can_compare_p (enum rtx_code code, enum machine_mode mode,
3895 enum can_compare_purpose purpose)
3896 {
3897 rtx test;
3898 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3899 do
3900 {
3901 enum insn_code icode;
3902
3903 if (purpose == ccp_jump
3904 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3905 && insn_operand_matches (icode, 0, test))
3906 return 1;
3907 if (purpose == ccp_store_flag
3908 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3909 && insn_operand_matches (icode, 1, test))
3910 return 1;
3911 if (purpose == ccp_cmov
3912 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3913 return 1;
3914
3915 mode = GET_MODE_WIDER_MODE (mode);
3916 PUT_MODE (test, mode);
3917 }
3918 while (mode != VOIDmode);
3919
3920 return 0;
3921 }
3922
3923 /* This function is called when we are going to emit a compare instruction that
3924 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3925
3926 *PMODE is the mode of the inputs (in case they are const_int).
3927 *PUNSIGNEDP nonzero says that the operands are unsigned;
3928 this matters if they need to be widened (as given by METHODS).
3929
3930 If they have mode BLKmode, then SIZE specifies the size of both operands.
3931
3932 This function performs all the setup necessary so that the caller only has
3933 to emit a single comparison insn. This setup can involve doing a BLKmode
3934 comparison or emitting a library call to perform the comparison if no insn
3935 is available to handle it.
3936 The values which are passed in through pointers can be modified; the caller
3937 should perform the comparison on the modified values. Constant
3938 comparisons must have already been folded. */
3939
3940 static void
3941 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3942 int unsignedp, enum optab_methods methods,
3943 rtx *ptest, enum machine_mode *pmode)
3944 {
3945 enum machine_mode mode = *pmode;
3946 rtx libfunc, test;
3947 enum machine_mode cmp_mode;
3948 enum mode_class mclass;
3949
3950 /* The other methods are not needed. */
3951 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3952 || methods == OPTAB_LIB_WIDEN);
3953
3954 /* If we are optimizing, force expensive constants into a register. */
3955 if (CONSTANT_P (x) && optimize
3956 && (rtx_cost (x, COMPARE, 0, optimize_insn_for_speed_p ())
3957 > COSTS_N_INSNS (1)))
3958 x = force_reg (mode, x);
3959
3960 if (CONSTANT_P (y) && optimize
3961 && (rtx_cost (y, COMPARE, 1, optimize_insn_for_speed_p ())
3962 > COSTS_N_INSNS (1)))
3963 y = force_reg (mode, y);
3964
3965 #ifdef HAVE_cc0
3966 /* Make sure if we have a canonical comparison. The RTL
3967 documentation states that canonical comparisons are required only
3968 for targets which have cc0. */
3969 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3970 #endif
3971
3972 /* Don't let both operands fail to indicate the mode. */
3973 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3974 x = force_reg (mode, x);
3975 if (mode == VOIDmode)
3976 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3977
3978 /* Handle all BLKmode compares. */
3979
3980 if (mode == BLKmode)
3981 {
3982 enum machine_mode result_mode;
3983 enum insn_code cmp_code;
3984 tree length_type;
3985 rtx libfunc;
3986 rtx result;
3987 rtx opalign
3988 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3989
3990 gcc_assert (size);
3991
3992 /* Try to use a memory block compare insn - either cmpstr
3993 or cmpmem will do. */
3994 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3995 cmp_mode != VOIDmode;
3996 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3997 {
3998 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3999 if (cmp_code == CODE_FOR_nothing)
4000 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
4001 if (cmp_code == CODE_FOR_nothing)
4002 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
4003 if (cmp_code == CODE_FOR_nothing)
4004 continue;
4005
4006 /* Must make sure the size fits the insn's mode. */
4007 if ((CONST_INT_P (size)
4008 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4009 || (GET_MODE_BITSIZE (GET_MODE (size))
4010 > GET_MODE_BITSIZE (cmp_mode)))
4011 continue;
4012
4013 result_mode = insn_data[cmp_code].operand[0].mode;
4014 result = gen_reg_rtx (result_mode);
4015 size = convert_to_mode (cmp_mode, size, 1);
4016 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4017
4018 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4019 *pmode = result_mode;
4020 return;
4021 }
4022
4023 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4024 goto fail;
4025
4026 /* Otherwise call a library function, memcmp. */
4027 libfunc = memcmp_libfunc;
4028 length_type = sizetype;
4029 result_mode = TYPE_MODE (integer_type_node);
4030 cmp_mode = TYPE_MODE (length_type);
4031 size = convert_to_mode (TYPE_MODE (length_type), size,
4032 TYPE_UNSIGNED (length_type));
4033
4034 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4035 result_mode, 3,
4036 XEXP (x, 0), Pmode,
4037 XEXP (y, 0), Pmode,
4038 size, cmp_mode);
4039
4040 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4041 *pmode = result_mode;
4042 return;
4043 }
4044
4045 /* Don't allow operands to the compare to trap, as that can put the
4046 compare and branch in different basic blocks. */
4047 if (cfun->can_throw_non_call_exceptions)
4048 {
4049 if (may_trap_p (x))
4050 x = force_reg (mode, x);
4051 if (may_trap_p (y))
4052 y = force_reg (mode, y);
4053 }
4054
4055 if (GET_MODE_CLASS (mode) == MODE_CC)
4056 {
4057 gcc_assert (can_compare_p (comparison, CCmode, ccp_jump));
4058 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4059 return;
4060 }
4061
4062 mclass = GET_MODE_CLASS (mode);
4063 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4064 cmp_mode = mode;
4065 do
4066 {
4067 enum insn_code icode;
4068 icode = optab_handler (cbranch_optab, cmp_mode);
4069 if (icode != CODE_FOR_nothing
4070 && insn_operand_matches (icode, 0, test))
4071 {
4072 rtx last = get_last_insn ();
4073 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4074 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4075 if (op0 && op1
4076 && insn_operand_matches (icode, 1, op0)
4077 && insn_operand_matches (icode, 2, op1))
4078 {
4079 XEXP (test, 0) = op0;
4080 XEXP (test, 1) = op1;
4081 *ptest = test;
4082 *pmode = cmp_mode;
4083 return;
4084 }
4085 delete_insns_since (last);
4086 }
4087
4088 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4089 break;
4090 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
4091 }
4092 while (cmp_mode != VOIDmode);
4093
4094 if (methods != OPTAB_LIB_WIDEN)
4095 goto fail;
4096
4097 if (!SCALAR_FLOAT_MODE_P (mode))
4098 {
4099 rtx result;
4100
4101 /* Handle a libcall just for the mode we are using. */
4102 libfunc = optab_libfunc (cmp_optab, mode);
4103 gcc_assert (libfunc);
4104
4105 /* If we want unsigned, and this mode has a distinct unsigned
4106 comparison routine, use that. */
4107 if (unsignedp)
4108 {
4109 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4110 if (ulibfunc)
4111 libfunc = ulibfunc;
4112 }
4113
4114 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4115 targetm.libgcc_cmp_return_mode (),
4116 2, x, mode, y, mode);
4117
4118 /* There are two kinds of comparison routines. Biased routines
4119 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4120 of gcc expect that the comparison operation is equivalent
4121 to the modified comparison. For signed comparisons compare the
4122 result against 1 in the biased case, and zero in the unbiased
4123 case. For unsigned comparisons always compare against 1 after
4124 biasing the unbiased result by adding 1. This gives us a way to
4125 represent LTU.
4126 The comparisons in the fixed-point helper library are always
4127 biased. */
4128 x = result;
4129 y = const1_rtx;
4130
4131 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4132 {
4133 if (unsignedp)
4134 x = plus_constant (result, 1);
4135 else
4136 y = const0_rtx;
4137 }
4138
4139 *pmode = word_mode;
4140 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4141 ptest, pmode);
4142 }
4143 else
4144 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4145
4146 return;
4147
4148 fail:
4149 *ptest = NULL_RTX;
4150 }
4151
4152 /* Before emitting an insn with code ICODE, make sure that X, which is going
4153 to be used for operand OPNUM of the insn, is converted from mode MODE to
4154 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4155 that it is accepted by the operand predicate. Return the new value. */
4156
4157 rtx
4158 prepare_operand (enum insn_code icode, rtx x, int opnum, enum machine_mode mode,
4159 enum machine_mode wider_mode, int unsignedp)
4160 {
4161 if (mode != wider_mode)
4162 x = convert_modes (wider_mode, mode, x, unsignedp);
4163
4164 if (!insn_operand_matches (icode, opnum, x))
4165 {
4166 if (reload_completed)
4167 return NULL_RTX;
4168 x = copy_to_mode_reg (insn_data[(int) icode].operand[opnum].mode, x);
4169 }
4170
4171 return x;
4172 }
4173
4174 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4175 we can do the branch. */
4176
4177 static void
4178 emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label)
4179 {
4180 enum machine_mode optab_mode;
4181 enum mode_class mclass;
4182 enum insn_code icode;
4183
4184 mclass = GET_MODE_CLASS (mode);
4185 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4186 icode = optab_handler (cbranch_optab, optab_mode);
4187
4188 gcc_assert (icode != CODE_FOR_nothing);
4189 gcc_assert (insn_operand_matches (icode, 0, test));
4190 emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), XEXP (test, 1), label));
4191 }
4192
4193 /* Generate code to compare X with Y so that the condition codes are
4194 set and to jump to LABEL if the condition is true. If X is a
4195 constant and Y is not a constant, then the comparison is swapped to
4196 ensure that the comparison RTL has the canonical form.
4197
4198 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4199 need to be widened. UNSIGNEDP is also used to select the proper
4200 branch condition code.
4201
4202 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4203
4204 MODE is the mode of the inputs (in case they are const_int).
4205
4206 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4207 It will be potentially converted into an unsigned variant based on
4208 UNSIGNEDP to select a proper jump instruction. */
4209
4210 void
4211 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4212 enum machine_mode mode, int unsignedp, rtx label)
4213 {
4214 rtx op0 = x, op1 = y;
4215 rtx test;
4216
4217 /* Swap operands and condition to ensure canonical RTL. */
4218 if (swap_commutative_operands_p (x, y)
4219 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4220 {
4221 op0 = y, op1 = x;
4222 comparison = swap_condition (comparison);
4223 }
4224
4225 /* If OP0 is still a constant, then both X and Y must be constants
4226 or the opposite comparison is not supported. Force X into a register
4227 to create canonical RTL. */
4228 if (CONSTANT_P (op0))
4229 op0 = force_reg (mode, op0);
4230
4231 if (unsignedp)
4232 comparison = unsigned_condition (comparison);
4233
4234 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4235 &test, &mode);
4236 emit_cmp_and_jump_insn_1 (test, mode, label);
4237 }
4238
4239 \f
4240 /* Emit a library call comparison between floating point X and Y.
4241 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4242
4243 static void
4244 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4245 rtx *ptest, enum machine_mode *pmode)
4246 {
4247 enum rtx_code swapped = swap_condition (comparison);
4248 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4249 enum machine_mode orig_mode = GET_MODE (x);
4250 enum machine_mode mode, cmp_mode;
4251 rtx true_rtx, false_rtx;
4252 rtx value, target, insns, equiv;
4253 rtx libfunc = 0;
4254 bool reversed_p = false;
4255 cmp_mode = targetm.libgcc_cmp_return_mode ();
4256
4257 for (mode = orig_mode;
4258 mode != VOIDmode;
4259 mode = GET_MODE_WIDER_MODE (mode))
4260 {
4261 if (code_to_optab[comparison]
4262 && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4263 break;
4264
4265 if (code_to_optab[swapped]
4266 && (libfunc = optab_libfunc (code_to_optab[swapped], mode)))
4267 {
4268 rtx tmp;
4269 tmp = x; x = y; y = tmp;
4270 comparison = swapped;
4271 break;
4272 }
4273
4274 if (code_to_optab[reversed]
4275 && (libfunc = optab_libfunc (code_to_optab[reversed], mode)))
4276 {
4277 comparison = reversed;
4278 reversed_p = true;
4279 break;
4280 }
4281 }
4282
4283 gcc_assert (mode != VOIDmode);
4284
4285 if (mode != orig_mode)
4286 {
4287 x = convert_to_mode (mode, x, 0);
4288 y = convert_to_mode (mode, y, 0);
4289 }
4290
4291 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4292 the RTL. The allows the RTL optimizers to delete the libcall if the
4293 condition can be determined at compile-time. */
4294 if (comparison == UNORDERED
4295 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4296 {
4297 true_rtx = const_true_rtx;
4298 false_rtx = const0_rtx;
4299 }
4300 else
4301 {
4302 switch (comparison)
4303 {
4304 case EQ:
4305 true_rtx = const0_rtx;
4306 false_rtx = const_true_rtx;
4307 break;
4308
4309 case NE:
4310 true_rtx = const_true_rtx;
4311 false_rtx = const0_rtx;
4312 break;
4313
4314 case GT:
4315 true_rtx = const1_rtx;
4316 false_rtx = const0_rtx;
4317 break;
4318
4319 case GE:
4320 true_rtx = const0_rtx;
4321 false_rtx = constm1_rtx;
4322 break;
4323
4324 case LT:
4325 true_rtx = constm1_rtx;
4326 false_rtx = const0_rtx;
4327 break;
4328
4329 case LE:
4330 true_rtx = const0_rtx;
4331 false_rtx = const1_rtx;
4332 break;
4333
4334 default:
4335 gcc_unreachable ();
4336 }
4337 }
4338
4339 if (comparison == UNORDERED)
4340 {
4341 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4342 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4343 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4344 temp, const_true_rtx, equiv);
4345 }
4346 else
4347 {
4348 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4349 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4350 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4351 equiv, true_rtx, false_rtx);
4352 }
4353
4354 start_sequence ();
4355 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4356 cmp_mode, 2, x, mode, y, mode);
4357 insns = get_insns ();
4358 end_sequence ();
4359
4360 target = gen_reg_rtx (cmp_mode);
4361 emit_libcall_block (insns, target, value, equiv);
4362
4363 if (comparison == UNORDERED
4364 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4365 || reversed_p)
4366 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4367 else
4368 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4369
4370 *pmode = cmp_mode;
4371 }
4372 \f
4373 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4374
4375 void
4376 emit_indirect_jump (rtx loc)
4377 {
4378 struct expand_operand ops[1];
4379
4380 create_address_operand (&ops[0], loc);
4381 expand_jump_insn (CODE_FOR_indirect_jump, 1, ops);
4382 emit_barrier ();
4383 }
4384 \f
4385 #ifdef HAVE_conditional_move
4386
4387 /* Emit a conditional move instruction if the machine supports one for that
4388 condition and machine mode.
4389
4390 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4391 the mode to use should they be constants. If it is VOIDmode, they cannot
4392 both be constants.
4393
4394 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4395 should be stored there. MODE is the mode to use should they be constants.
4396 If it is VOIDmode, they cannot both be constants.
4397
4398 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4399 is not supported. */
4400
4401 rtx
4402 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4403 enum machine_mode cmode, rtx op2, rtx op3,
4404 enum machine_mode mode, int unsignedp)
4405 {
4406 rtx tem, comparison, last;
4407 enum insn_code icode;
4408 enum rtx_code reversed;
4409
4410 /* If one operand is constant, make it the second one. Only do this
4411 if the other operand is not constant as well. */
4412
4413 if (swap_commutative_operands_p (op0, op1))
4414 {
4415 tem = op0;
4416 op0 = op1;
4417 op1 = tem;
4418 code = swap_condition (code);
4419 }
4420
4421 /* get_condition will prefer to generate LT and GT even if the old
4422 comparison was against zero, so undo that canonicalization here since
4423 comparisons against zero are cheaper. */
4424 if (code == LT && op1 == const1_rtx)
4425 code = LE, op1 = const0_rtx;
4426 else if (code == GT && op1 == constm1_rtx)
4427 code = GE, op1 = const0_rtx;
4428
4429 if (cmode == VOIDmode)
4430 cmode = GET_MODE (op0);
4431
4432 if (swap_commutative_operands_p (op2, op3)
4433 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4434 != UNKNOWN))
4435 {
4436 tem = op2;
4437 op2 = op3;
4438 op3 = tem;
4439 code = reversed;
4440 }
4441
4442 if (mode == VOIDmode)
4443 mode = GET_MODE (op2);
4444
4445 icode = direct_optab_handler (movcc_optab, mode);
4446
4447 if (icode == CODE_FOR_nothing)
4448 return 0;
4449
4450 if (!target)
4451 target = gen_reg_rtx (mode);
4452
4453 code = unsignedp ? unsigned_condition (code) : code;
4454 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4455
4456 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4457 return NULL and let the caller figure out how best to deal with this
4458 situation. */
4459 if (!COMPARISON_P (comparison))
4460 return NULL_RTX;
4461
4462 do_pending_stack_adjust ();
4463 last = get_last_insn ();
4464 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4465 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4466 &comparison, &cmode);
4467 if (comparison)
4468 {
4469 struct expand_operand ops[4];
4470
4471 create_output_operand (&ops[0], target, mode);
4472 create_fixed_operand (&ops[1], comparison);
4473 create_input_operand (&ops[2], op2, mode);
4474 create_input_operand (&ops[3], op3, mode);
4475 if (maybe_expand_insn (icode, 4, ops))
4476 {
4477 if (ops[0].value != target)
4478 convert_move (target, ops[0].value, false);
4479 return target;
4480 }
4481 }
4482 delete_insns_since (last);
4483 return NULL_RTX;
4484 }
4485
4486 /* Return nonzero if a conditional move of mode MODE is supported.
4487
4488 This function is for combine so it can tell whether an insn that looks
4489 like a conditional move is actually supported by the hardware. If we
4490 guess wrong we lose a bit on optimization, but that's it. */
4491 /* ??? sparc64 supports conditionally moving integers values based on fp
4492 comparisons, and vice versa. How do we handle them? */
4493
4494 int
4495 can_conditionally_move_p (enum machine_mode mode)
4496 {
4497 if (direct_optab_handler (movcc_optab, mode) != CODE_FOR_nothing)
4498 return 1;
4499
4500 return 0;
4501 }
4502
4503 #endif /* HAVE_conditional_move */
4504
4505 /* Emit a conditional addition instruction if the machine supports one for that
4506 condition and machine mode.
4507
4508 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4509 the mode to use should they be constants. If it is VOIDmode, they cannot
4510 both be constants.
4511
4512 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4513 should be stored there. MODE is the mode to use should they be constants.
4514 If it is VOIDmode, they cannot both be constants.
4515
4516 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4517 is not supported. */
4518
4519 rtx
4520 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4521 enum machine_mode cmode, rtx op2, rtx op3,
4522 enum machine_mode mode, int unsignedp)
4523 {
4524 rtx tem, comparison, last;
4525 enum insn_code icode;
4526 enum rtx_code reversed;
4527
4528 /* If one operand is constant, make it the second one. Only do this
4529 if the other operand is not constant as well. */
4530
4531 if (swap_commutative_operands_p (op0, op1))
4532 {
4533 tem = op0;
4534 op0 = op1;
4535 op1 = tem;
4536 code = swap_condition (code);
4537 }
4538
4539 /* get_condition will prefer to generate LT and GT even if the old
4540 comparison was against zero, so undo that canonicalization here since
4541 comparisons against zero are cheaper. */
4542 if (code == LT && op1 == const1_rtx)
4543 code = LE, op1 = const0_rtx;
4544 else if (code == GT && op1 == constm1_rtx)
4545 code = GE, op1 = const0_rtx;
4546
4547 if (cmode == VOIDmode)
4548 cmode = GET_MODE (op0);
4549
4550 if (swap_commutative_operands_p (op2, op3)
4551 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4552 != UNKNOWN))
4553 {
4554 tem = op2;
4555 op2 = op3;
4556 op3 = tem;
4557 code = reversed;
4558 }
4559
4560 if (mode == VOIDmode)
4561 mode = GET_MODE (op2);
4562
4563 icode = optab_handler (addcc_optab, mode);
4564
4565 if (icode == CODE_FOR_nothing)
4566 return 0;
4567
4568 if (!target)
4569 target = gen_reg_rtx (mode);
4570
4571 code = unsignedp ? unsigned_condition (code) : code;
4572 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4573
4574 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4575 return NULL and let the caller figure out how best to deal with this
4576 situation. */
4577 if (!COMPARISON_P (comparison))
4578 return NULL_RTX;
4579
4580 do_pending_stack_adjust ();
4581 last = get_last_insn ();
4582 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4583 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4584 &comparison, &cmode);
4585 if (comparison)
4586 {
4587 struct expand_operand ops[4];
4588
4589 create_output_operand (&ops[0], target, mode);
4590 create_fixed_operand (&ops[1], comparison);
4591 create_input_operand (&ops[2], op2, mode);
4592 create_input_operand (&ops[3], op3, mode);
4593 if (maybe_expand_insn (icode, 4, ops))
4594 {
4595 if (ops[0].value != target)
4596 convert_move (target, ops[0].value, false);
4597 return target;
4598 }
4599 }
4600 delete_insns_since (last);
4601 return NULL_RTX;
4602 }
4603 \f
4604 /* These functions attempt to generate an insn body, rather than
4605 emitting the insn, but if the gen function already emits them, we
4606 make no attempt to turn them back into naked patterns. */
4607
4608 /* Generate and return an insn body to add Y to X. */
4609
4610 rtx
4611 gen_add2_insn (rtx x, rtx y)
4612 {
4613 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4614
4615 gcc_assert (insn_operand_matches (icode, 0, x));
4616 gcc_assert (insn_operand_matches (icode, 1, x));
4617 gcc_assert (insn_operand_matches (icode, 2, y));
4618
4619 return GEN_FCN (icode) (x, x, y);
4620 }
4621
4622 /* Generate and return an insn body to add r1 and c,
4623 storing the result in r0. */
4624
4625 rtx
4626 gen_add3_insn (rtx r0, rtx r1, rtx c)
4627 {
4628 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4629
4630 if (icode == CODE_FOR_nothing
4631 || !insn_operand_matches (icode, 0, r0)
4632 || !insn_operand_matches (icode, 1, r1)
4633 || !insn_operand_matches (icode, 2, c))
4634 return NULL_RTX;
4635
4636 return GEN_FCN (icode) (r0, r1, c);
4637 }
4638
4639 int
4640 have_add2_insn (rtx x, rtx y)
4641 {
4642 enum insn_code icode;
4643
4644 gcc_assert (GET_MODE (x) != VOIDmode);
4645
4646 icode = optab_handler (add_optab, GET_MODE (x));
4647
4648 if (icode == CODE_FOR_nothing)
4649 return 0;
4650
4651 if (!insn_operand_matches (icode, 0, x)
4652 || !insn_operand_matches (icode, 1, x)
4653 || !insn_operand_matches (icode, 2, y))
4654 return 0;
4655
4656 return 1;
4657 }
4658
4659 /* Generate and return an insn body to subtract Y from X. */
4660
4661 rtx
4662 gen_sub2_insn (rtx x, rtx y)
4663 {
4664 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4665
4666 gcc_assert (insn_operand_matches (icode, 0, x));
4667 gcc_assert (insn_operand_matches (icode, 1, x));
4668 gcc_assert (insn_operand_matches (icode, 2, y));
4669
4670 return GEN_FCN (icode) (x, x, y);
4671 }
4672
4673 /* Generate and return an insn body to subtract r1 and c,
4674 storing the result in r0. */
4675
4676 rtx
4677 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4678 {
4679 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4680
4681 if (icode == CODE_FOR_nothing
4682 || !insn_operand_matches (icode, 0, r0)
4683 || !insn_operand_matches (icode, 1, r1)
4684 || !insn_operand_matches (icode, 2, c))
4685 return NULL_RTX;
4686
4687 return GEN_FCN (icode) (r0, r1, c);
4688 }
4689
4690 int
4691 have_sub2_insn (rtx x, rtx y)
4692 {
4693 enum insn_code icode;
4694
4695 gcc_assert (GET_MODE (x) != VOIDmode);
4696
4697 icode = optab_handler (sub_optab, GET_MODE (x));
4698
4699 if (icode == CODE_FOR_nothing)
4700 return 0;
4701
4702 if (!insn_operand_matches (icode, 0, x)
4703 || !insn_operand_matches (icode, 1, x)
4704 || !insn_operand_matches (icode, 2, y))
4705 return 0;
4706
4707 return 1;
4708 }
4709
4710 /* Generate the body of an instruction to copy Y into X.
4711 It may be a list of insns, if one insn isn't enough. */
4712
4713 rtx
4714 gen_move_insn (rtx x, rtx y)
4715 {
4716 rtx seq;
4717
4718 start_sequence ();
4719 emit_move_insn_1 (x, y);
4720 seq = get_insns ();
4721 end_sequence ();
4722 return seq;
4723 }
4724 \f
4725 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4726 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4727 no such operation exists, CODE_FOR_nothing will be returned. */
4728
4729 enum insn_code
4730 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4731 int unsignedp)
4732 {
4733 convert_optab tab;
4734 #ifdef HAVE_ptr_extend
4735 if (unsignedp < 0)
4736 return CODE_FOR_ptr_extend;
4737 #endif
4738
4739 tab = unsignedp ? zext_optab : sext_optab;
4740 return convert_optab_handler (tab, to_mode, from_mode);
4741 }
4742
4743 /* Generate the body of an insn to extend Y (with mode MFROM)
4744 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4745
4746 rtx
4747 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4748 enum machine_mode mfrom, int unsignedp)
4749 {
4750 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4751 return GEN_FCN (icode) (x, y);
4752 }
4753 \f
4754 /* can_fix_p and can_float_p say whether the target machine
4755 can directly convert a given fixed point type to
4756 a given floating point type, or vice versa.
4757 The returned value is the CODE_FOR_... value to use,
4758 or CODE_FOR_nothing if these modes cannot be directly converted.
4759
4760 *TRUNCP_PTR is set to 1 if it is necessary to output
4761 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4762
4763 static enum insn_code
4764 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4765 int unsignedp, int *truncp_ptr)
4766 {
4767 convert_optab tab;
4768 enum insn_code icode;
4769
4770 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4771 icode = convert_optab_handler (tab, fixmode, fltmode);
4772 if (icode != CODE_FOR_nothing)
4773 {
4774 *truncp_ptr = 0;
4775 return icode;
4776 }
4777
4778 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4779 for this to work. We need to rework the fix* and ftrunc* patterns
4780 and documentation. */
4781 tab = unsignedp ? ufix_optab : sfix_optab;
4782 icode = convert_optab_handler (tab, fixmode, fltmode);
4783 if (icode != CODE_FOR_nothing
4784 && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
4785 {
4786 *truncp_ptr = 1;
4787 return icode;
4788 }
4789
4790 *truncp_ptr = 0;
4791 return CODE_FOR_nothing;
4792 }
4793
4794 enum insn_code
4795 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4796 int unsignedp)
4797 {
4798 convert_optab tab;
4799
4800 tab = unsignedp ? ufloat_optab : sfloat_optab;
4801 return convert_optab_handler (tab, fltmode, fixmode);
4802 }
4803 \f
4804 /* Generate code to convert FROM to floating point
4805 and store in TO. FROM must be fixed point and not VOIDmode.
4806 UNSIGNEDP nonzero means regard FROM as unsigned.
4807 Normally this is done by correcting the final value
4808 if it is negative. */
4809
4810 void
4811 expand_float (rtx to, rtx from, int unsignedp)
4812 {
4813 enum insn_code icode;
4814 rtx target = to;
4815 enum machine_mode fmode, imode;
4816 bool can_do_signed = false;
4817
4818 /* Crash now, because we won't be able to decide which mode to use. */
4819 gcc_assert (GET_MODE (from) != VOIDmode);
4820
4821 /* Look for an insn to do the conversion. Do it in the specified
4822 modes if possible; otherwise convert either input, output or both to
4823 wider mode. If the integer mode is wider than the mode of FROM,
4824 we can do the conversion signed even if the input is unsigned. */
4825
4826 for (fmode = GET_MODE (to); fmode != VOIDmode;
4827 fmode = GET_MODE_WIDER_MODE (fmode))
4828 for (imode = GET_MODE (from); imode != VOIDmode;
4829 imode = GET_MODE_WIDER_MODE (imode))
4830 {
4831 int doing_unsigned = unsignedp;
4832
4833 if (fmode != GET_MODE (to)
4834 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4835 continue;
4836
4837 icode = can_float_p (fmode, imode, unsignedp);
4838 if (icode == CODE_FOR_nothing && unsignedp)
4839 {
4840 enum insn_code scode = can_float_p (fmode, imode, 0);
4841 if (scode != CODE_FOR_nothing)
4842 can_do_signed = true;
4843 if (imode != GET_MODE (from))
4844 icode = scode, doing_unsigned = 0;
4845 }
4846
4847 if (icode != CODE_FOR_nothing)
4848 {
4849 if (imode != GET_MODE (from))
4850 from = convert_to_mode (imode, from, unsignedp);
4851
4852 if (fmode != GET_MODE (to))
4853 target = gen_reg_rtx (fmode);
4854
4855 emit_unop_insn (icode, target, from,
4856 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4857
4858 if (target != to)
4859 convert_move (to, target, 0);
4860 return;
4861 }
4862 }
4863
4864 /* Unsigned integer, and no way to convert directly. Convert as signed,
4865 then unconditionally adjust the result. */
4866 if (unsignedp && can_do_signed)
4867 {
4868 rtx label = gen_label_rtx ();
4869 rtx temp;
4870 REAL_VALUE_TYPE offset;
4871
4872 /* Look for a usable floating mode FMODE wider than the source and at
4873 least as wide as the target. Using FMODE will avoid rounding woes
4874 with unsigned values greater than the signed maximum value. */
4875
4876 for (fmode = GET_MODE (to); fmode != VOIDmode;
4877 fmode = GET_MODE_WIDER_MODE (fmode))
4878 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4879 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4880 break;
4881
4882 if (fmode == VOIDmode)
4883 {
4884 /* There is no such mode. Pretend the target is wide enough. */
4885 fmode = GET_MODE (to);
4886
4887 /* Avoid double-rounding when TO is narrower than FROM. */
4888 if ((significand_size (fmode) + 1)
4889 < GET_MODE_PRECISION (GET_MODE (from)))
4890 {
4891 rtx temp1;
4892 rtx neglabel = gen_label_rtx ();
4893
4894 /* Don't use TARGET if it isn't a register, is a hard register,
4895 or is the wrong mode. */
4896 if (!REG_P (target)
4897 || REGNO (target) < FIRST_PSEUDO_REGISTER
4898 || GET_MODE (target) != fmode)
4899 target = gen_reg_rtx (fmode);
4900
4901 imode = GET_MODE (from);
4902 do_pending_stack_adjust ();
4903
4904 /* Test whether the sign bit is set. */
4905 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4906 0, neglabel);
4907
4908 /* The sign bit is not set. Convert as signed. */
4909 expand_float (target, from, 0);
4910 emit_jump_insn (gen_jump (label));
4911 emit_barrier ();
4912
4913 /* The sign bit is set.
4914 Convert to a usable (positive signed) value by shifting right
4915 one bit, while remembering if a nonzero bit was shifted
4916 out; i.e., compute (from & 1) | (from >> 1). */
4917
4918 emit_label (neglabel);
4919 temp = expand_binop (imode, and_optab, from, const1_rtx,
4920 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4921 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4922 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4923 OPTAB_LIB_WIDEN);
4924 expand_float (target, temp, 0);
4925
4926 /* Multiply by 2 to undo the shift above. */
4927 temp = expand_binop (fmode, add_optab, target, target,
4928 target, 0, OPTAB_LIB_WIDEN);
4929 if (temp != target)
4930 emit_move_insn (target, temp);
4931
4932 do_pending_stack_adjust ();
4933 emit_label (label);
4934 goto done;
4935 }
4936 }
4937
4938 /* If we are about to do some arithmetic to correct for an
4939 unsigned operand, do it in a pseudo-register. */
4940
4941 if (GET_MODE (to) != fmode
4942 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4943 target = gen_reg_rtx (fmode);
4944
4945 /* Convert as signed integer to floating. */
4946 expand_float (target, from, 0);
4947
4948 /* If FROM is negative (and therefore TO is negative),
4949 correct its value by 2**bitwidth. */
4950
4951 do_pending_stack_adjust ();
4952 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4953 0, label);
4954
4955
4956 real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
4957 temp = expand_binop (fmode, add_optab, target,
4958 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4959 target, 0, OPTAB_LIB_WIDEN);
4960 if (temp != target)
4961 emit_move_insn (target, temp);
4962
4963 do_pending_stack_adjust ();
4964 emit_label (label);
4965 goto done;
4966 }
4967
4968 /* No hardware instruction available; call a library routine. */
4969 {
4970 rtx libfunc;
4971 rtx insns;
4972 rtx value;
4973 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4974
4975 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4976 from = convert_to_mode (SImode, from, unsignedp);
4977
4978 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4979 gcc_assert (libfunc);
4980
4981 start_sequence ();
4982
4983 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4984 GET_MODE (to), 1, from,
4985 GET_MODE (from));
4986 insns = get_insns ();
4987 end_sequence ();
4988
4989 emit_libcall_block (insns, target, value,
4990 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4991 GET_MODE (to), from));
4992 }
4993
4994 done:
4995
4996 /* Copy result to requested destination
4997 if we have been computing in a temp location. */
4998
4999 if (target != to)
5000 {
5001 if (GET_MODE (target) == GET_MODE (to))
5002 emit_move_insn (to, target);
5003 else
5004 convert_move (to, target, 0);
5005 }
5006 }
5007 \f
5008 /* Generate code to convert FROM to fixed point and store in TO. FROM
5009 must be floating point. */
5010
5011 void
5012 expand_fix (rtx to, rtx from, int unsignedp)
5013 {
5014 enum insn_code icode;
5015 rtx target = to;
5016 enum machine_mode fmode, imode;
5017 int must_trunc = 0;
5018
5019 /* We first try to find a pair of modes, one real and one integer, at
5020 least as wide as FROM and TO, respectively, in which we can open-code
5021 this conversion. If the integer mode is wider than the mode of TO,
5022 we can do the conversion either signed or unsigned. */
5023
5024 for (fmode = GET_MODE (from); fmode != VOIDmode;
5025 fmode = GET_MODE_WIDER_MODE (fmode))
5026 for (imode = GET_MODE (to); imode != VOIDmode;
5027 imode = GET_MODE_WIDER_MODE (imode))
5028 {
5029 int doing_unsigned = unsignedp;
5030
5031 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5032 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5033 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5034
5035 if (icode != CODE_FOR_nothing)
5036 {
5037 rtx last = get_last_insn ();
5038 if (fmode != GET_MODE (from))
5039 from = convert_to_mode (fmode, from, 0);
5040
5041 if (must_trunc)
5042 {
5043 rtx temp = gen_reg_rtx (GET_MODE (from));
5044 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5045 temp, 0);
5046 }
5047
5048 if (imode != GET_MODE (to))
5049 target = gen_reg_rtx (imode);
5050
5051 if (maybe_emit_unop_insn (icode, target, from,
5052 doing_unsigned ? UNSIGNED_FIX : FIX))
5053 {
5054 if (target != to)
5055 convert_move (to, target, unsignedp);
5056 return;
5057 }
5058 delete_insns_since (last);
5059 }
5060 }
5061
5062 /* For an unsigned conversion, there is one more way to do it.
5063 If we have a signed conversion, we generate code that compares
5064 the real value to the largest representable positive number. If if
5065 is smaller, the conversion is done normally. Otherwise, subtract
5066 one plus the highest signed number, convert, and add it back.
5067
5068 We only need to check all real modes, since we know we didn't find
5069 anything with a wider integer mode.
5070
5071 This code used to extend FP value into mode wider than the destination.
5072 This is needed for decimal float modes which cannot accurately
5073 represent one plus the highest signed number of the same size, but
5074 not for binary modes. Consider, for instance conversion from SFmode
5075 into DImode.
5076
5077 The hot path through the code is dealing with inputs smaller than 2^63
5078 and doing just the conversion, so there is no bits to lose.
5079
5080 In the other path we know the value is positive in the range 2^63..2^64-1
5081 inclusive. (as for other input overflow happens and result is undefined)
5082 So we know that the most important bit set in mantissa corresponds to
5083 2^63. The subtraction of 2^63 should not generate any rounding as it
5084 simply clears out that bit. The rest is trivial. */
5085
5086 if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5087 for (fmode = GET_MODE (from); fmode != VOIDmode;
5088 fmode = GET_MODE_WIDER_MODE (fmode))
5089 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5090 && (!DECIMAL_FLOAT_MODE_P (fmode)
5091 || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
5092 {
5093 int bitsize;
5094 REAL_VALUE_TYPE offset;
5095 rtx limit, lab1, lab2, insn;
5096
5097 bitsize = GET_MODE_PRECISION (GET_MODE (to));
5098 real_2expN (&offset, bitsize - 1, fmode);
5099 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5100 lab1 = gen_label_rtx ();
5101 lab2 = gen_label_rtx ();
5102
5103 if (fmode != GET_MODE (from))
5104 from = convert_to_mode (fmode, from, 0);
5105
5106 /* See if we need to do the subtraction. */
5107 do_pending_stack_adjust ();
5108 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5109 0, lab1);
5110
5111 /* If not, do the signed "fix" and branch around fixup code. */
5112 expand_fix (to, from, 0);
5113 emit_jump_insn (gen_jump (lab2));
5114 emit_barrier ();
5115
5116 /* Otherwise, subtract 2**(N-1), convert to signed number,
5117 then add 2**(N-1). Do the addition using XOR since this
5118 will often generate better code. */
5119 emit_label (lab1);
5120 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5121 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5122 expand_fix (to, target, 0);
5123 target = expand_binop (GET_MODE (to), xor_optab, to,
5124 gen_int_mode
5125 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5126 GET_MODE (to)),
5127 to, 1, OPTAB_LIB_WIDEN);
5128
5129 if (target != to)
5130 emit_move_insn (to, target);
5131
5132 emit_label (lab2);
5133
5134 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
5135 {
5136 /* Make a place for a REG_NOTE and add it. */
5137 insn = emit_move_insn (to, to);
5138 set_unique_reg_note (insn,
5139 REG_EQUAL,
5140 gen_rtx_fmt_e (UNSIGNED_FIX,
5141 GET_MODE (to),
5142 copy_rtx (from)));
5143 }
5144
5145 return;
5146 }
5147
5148 /* We can't do it with an insn, so use a library call. But first ensure
5149 that the mode of TO is at least as wide as SImode, since those are the
5150 only library calls we know about. */
5151
5152 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5153 {
5154 target = gen_reg_rtx (SImode);
5155
5156 expand_fix (target, from, unsignedp);
5157 }
5158 else
5159 {
5160 rtx insns;
5161 rtx value;
5162 rtx libfunc;
5163
5164 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5165 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5166 gcc_assert (libfunc);
5167
5168 start_sequence ();
5169
5170 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5171 GET_MODE (to), 1, from,
5172 GET_MODE (from));
5173 insns = get_insns ();
5174 end_sequence ();
5175
5176 emit_libcall_block (insns, target, value,
5177 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5178 GET_MODE (to), from));
5179 }
5180
5181 if (target != to)
5182 {
5183 if (GET_MODE (to) == GET_MODE (target))
5184 emit_move_insn (to, target);
5185 else
5186 convert_move (to, target, 0);
5187 }
5188 }
5189
5190 /* Generate code to convert FROM or TO a fixed-point.
5191 If UINTP is true, either TO or FROM is an unsigned integer.
5192 If SATP is true, we need to saturate the result. */
5193
5194 void
5195 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5196 {
5197 enum machine_mode to_mode = GET_MODE (to);
5198 enum machine_mode from_mode = GET_MODE (from);
5199 convert_optab tab;
5200 enum rtx_code this_code;
5201 enum insn_code code;
5202 rtx insns, value;
5203 rtx libfunc;
5204
5205 if (to_mode == from_mode)
5206 {
5207 emit_move_insn (to, from);
5208 return;
5209 }
5210
5211 if (uintp)
5212 {
5213 tab = satp ? satfractuns_optab : fractuns_optab;
5214 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5215 }
5216 else
5217 {
5218 tab = satp ? satfract_optab : fract_optab;
5219 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5220 }
5221 code = convert_optab_handler (tab, to_mode, from_mode);
5222 if (code != CODE_FOR_nothing)
5223 {
5224 emit_unop_insn (code, to, from, this_code);
5225 return;
5226 }
5227
5228 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5229 gcc_assert (libfunc);
5230
5231 start_sequence ();
5232 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5233 1, from, from_mode);
5234 insns = get_insns ();
5235 end_sequence ();
5236
5237 emit_libcall_block (insns, to, value,
5238 gen_rtx_fmt_e (tab->code, to_mode, from));
5239 }
5240
5241 /* Generate code to convert FROM to fixed point and store in TO. FROM
5242 must be floating point, TO must be signed. Use the conversion optab
5243 TAB to do the conversion. */
5244
5245 bool
5246 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5247 {
5248 enum insn_code icode;
5249 rtx target = to;
5250 enum machine_mode fmode, imode;
5251
5252 /* We first try to find a pair of modes, one real and one integer, at
5253 least as wide as FROM and TO, respectively, in which we can open-code
5254 this conversion. If the integer mode is wider than the mode of TO,
5255 we can do the conversion either signed or unsigned. */
5256
5257 for (fmode = GET_MODE (from); fmode != VOIDmode;
5258 fmode = GET_MODE_WIDER_MODE (fmode))
5259 for (imode = GET_MODE (to); imode != VOIDmode;
5260 imode = GET_MODE_WIDER_MODE (imode))
5261 {
5262 icode = convert_optab_handler (tab, imode, fmode);
5263 if (icode != CODE_FOR_nothing)
5264 {
5265 rtx last = get_last_insn ();
5266 if (fmode != GET_MODE (from))
5267 from = convert_to_mode (fmode, from, 0);
5268
5269 if (imode != GET_MODE (to))
5270 target = gen_reg_rtx (imode);
5271
5272 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5273 {
5274 delete_insns_since (last);
5275 continue;
5276 }
5277 if (target != to)
5278 convert_move (to, target, 0);
5279 return true;
5280 }
5281 }
5282
5283 return false;
5284 }
5285 \f
5286 /* Report whether we have an instruction to perform the operation
5287 specified by CODE on operands of mode MODE. */
5288 int
5289 have_insn_for (enum rtx_code code, enum machine_mode mode)
5290 {
5291 return (code_to_optab[(int) code] != 0
5292 && (optab_handler (code_to_optab[(int) code], mode)
5293 != CODE_FOR_nothing));
5294 }
5295
5296 /* Set all insn_code fields to CODE_FOR_nothing. */
5297
5298 static void
5299 init_insn_codes (void)
5300 {
5301 memset (optab_table, 0, sizeof (optab_table));
5302 memset (convert_optab_table, 0, sizeof (convert_optab_table));
5303 memset (direct_optab_table, 0, sizeof (direct_optab_table));
5304 }
5305
5306 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5307 static inline void
5308 init_optab (optab op, enum rtx_code code)
5309 {
5310 op->code = code;
5311 code_to_optab[(int) code] = op;
5312 }
5313
5314 /* Same, but fill in its code as CODE, and do _not_ write it into
5315 the code_to_optab table. */
5316 static inline void
5317 init_optabv (optab op, enum rtx_code code)
5318 {
5319 op->code = code;
5320 }
5321
5322 /* Conversion optabs never go in the code_to_optab table. */
5323 static void
5324 init_convert_optab (convert_optab op, enum rtx_code code)
5325 {
5326 op->code = code;
5327 }
5328
5329 /* Initialize the libfunc fields of an entire group of entries in some
5330 optab. Each entry is set equal to a string consisting of a leading
5331 pair of underscores followed by a generic operation name followed by
5332 a mode name (downshifted to lowercase) followed by a single character
5333 representing the number of operands for the given operation (which is
5334 usually one of the characters '2', '3', or '4').
5335
5336 OPTABLE is the table in which libfunc fields are to be initialized.
5337 OPNAME is the generic (string) name of the operation.
5338 SUFFIX is the character which specifies the number of operands for
5339 the given generic operation.
5340 MODE is the mode to generate for.
5341 */
5342
5343 static void
5344 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5345 {
5346 unsigned opname_len = strlen (opname);
5347 const char *mname = GET_MODE_NAME (mode);
5348 unsigned mname_len = strlen (mname);
5349 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5350 int len = prefix_len + opname_len + mname_len + 1 + 1;
5351 char *libfunc_name = XALLOCAVEC (char, len);
5352 char *p;
5353 const char *q;
5354
5355 p = libfunc_name;
5356 *p++ = '_';
5357 *p++ = '_';
5358 if (targetm.libfunc_gnu_prefix)
5359 {
5360 *p++ = 'g';
5361 *p++ = 'n';
5362 *p++ = 'u';
5363 *p++ = '_';
5364 }
5365 for (q = opname; *q; )
5366 *p++ = *q++;
5367 for (q = mname; *q; q++)
5368 *p++ = TOLOWER (*q);
5369 *p++ = suffix;
5370 *p = '\0';
5371
5372 set_optab_libfunc (optable, mode,
5373 ggc_alloc_string (libfunc_name, p - libfunc_name));
5374 }
5375
5376 /* Like gen_libfunc, but verify that integer operation is involved. */
5377
5378 static void
5379 gen_int_libfunc (optab optable, const char *opname, char suffix,
5380 enum machine_mode mode)
5381 {
5382 int maxsize = 2 * BITS_PER_WORD;
5383
5384 if (GET_MODE_CLASS (mode) != MODE_INT)
5385 return;
5386 if (maxsize < LONG_LONG_TYPE_SIZE)
5387 maxsize = LONG_LONG_TYPE_SIZE;
5388 if (GET_MODE_CLASS (mode) != MODE_INT
5389 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5390 return;
5391 gen_libfunc (optable, opname, suffix, mode);
5392 }
5393
5394 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5395
5396 static void
5397 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5398 enum machine_mode mode)
5399 {
5400 char *dec_opname;
5401
5402 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5403 gen_libfunc (optable, opname, suffix, mode);
5404 if (DECIMAL_FLOAT_MODE_P (mode))
5405 {
5406 dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5407 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5408 depending on the low level floating format used. */
5409 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5410 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5411 gen_libfunc (optable, dec_opname, suffix, mode);
5412 }
5413 }
5414
5415 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5416
5417 static void
5418 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5419 enum machine_mode mode)
5420 {
5421 if (!ALL_FIXED_POINT_MODE_P (mode))
5422 return;
5423 gen_libfunc (optable, opname, suffix, mode);
5424 }
5425
5426 /* Like gen_libfunc, but verify that signed fixed-point operation is
5427 involved. */
5428
5429 static void
5430 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5431 enum machine_mode mode)
5432 {
5433 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5434 return;
5435 gen_libfunc (optable, opname, suffix, mode);
5436 }
5437
5438 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5439 involved. */
5440
5441 static void
5442 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5443 enum machine_mode mode)
5444 {
5445 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5446 return;
5447 gen_libfunc (optable, opname, suffix, mode);
5448 }
5449
5450 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5451
5452 static void
5453 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5454 enum machine_mode mode)
5455 {
5456 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5457 gen_fp_libfunc (optable, name, suffix, mode);
5458 if (INTEGRAL_MODE_P (mode))
5459 gen_int_libfunc (optable, name, suffix, mode);
5460 }
5461
5462 /* Like gen_libfunc, but verify that FP or INT operation is involved
5463 and add 'v' suffix for integer operation. */
5464
5465 static void
5466 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5467 enum machine_mode mode)
5468 {
5469 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5470 gen_fp_libfunc (optable, name, suffix, mode);
5471 if (GET_MODE_CLASS (mode) == MODE_INT)
5472 {
5473 int len = strlen (name);
5474 char *v_name = XALLOCAVEC (char, len + 2);
5475 strcpy (v_name, name);
5476 v_name[len] = 'v';
5477 v_name[len + 1] = 0;
5478 gen_int_libfunc (optable, v_name, suffix, mode);
5479 }
5480 }
5481
5482 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5483 involved. */
5484
5485 static void
5486 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5487 enum machine_mode mode)
5488 {
5489 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5490 gen_fp_libfunc (optable, name, suffix, mode);
5491 if (INTEGRAL_MODE_P (mode))
5492 gen_int_libfunc (optable, name, suffix, mode);
5493 if (ALL_FIXED_POINT_MODE_P (mode))
5494 gen_fixed_libfunc (optable, name, suffix, mode);
5495 }
5496
5497 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5498 involved. */
5499
5500 static void
5501 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5502 enum machine_mode mode)
5503 {
5504 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5505 gen_fp_libfunc (optable, name, suffix, mode);
5506 if (INTEGRAL_MODE_P (mode))
5507 gen_int_libfunc (optable, name, suffix, mode);
5508 if (SIGNED_FIXED_POINT_MODE_P (mode))
5509 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5510 }
5511
5512 /* Like gen_libfunc, but verify that INT or FIXED operation is
5513 involved. */
5514
5515 static void
5516 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5517 enum machine_mode mode)
5518 {
5519 if (INTEGRAL_MODE_P (mode))
5520 gen_int_libfunc (optable, name, suffix, mode);
5521 if (ALL_FIXED_POINT_MODE_P (mode))
5522 gen_fixed_libfunc (optable, name, suffix, mode);
5523 }
5524
5525 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5526 involved. */
5527
5528 static void
5529 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5530 enum machine_mode mode)
5531 {
5532 if (INTEGRAL_MODE_P (mode))
5533 gen_int_libfunc (optable, name, suffix, mode);
5534 if (SIGNED_FIXED_POINT_MODE_P (mode))
5535 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5536 }
5537
5538 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5539 involved. */
5540
5541 static void
5542 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5543 enum machine_mode mode)
5544 {
5545 if (INTEGRAL_MODE_P (mode))
5546 gen_int_libfunc (optable, name, suffix, mode);
5547 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5548 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5549 }
5550
5551 /* Initialize the libfunc fields of an entire group of entries of an
5552 inter-mode-class conversion optab. The string formation rules are
5553 similar to the ones for init_libfuncs, above, but instead of having
5554 a mode name and an operand count these functions have two mode names
5555 and no operand count. */
5556
5557 static void
5558 gen_interclass_conv_libfunc (convert_optab tab,
5559 const char *opname,
5560 enum machine_mode tmode,
5561 enum machine_mode fmode)
5562 {
5563 size_t opname_len = strlen (opname);
5564 size_t mname_len = 0;
5565
5566 const char *fname, *tname;
5567 const char *q;
5568 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5569 char *libfunc_name, *suffix;
5570 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5571 char *p;
5572
5573 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5574 depends on which underlying decimal floating point format is used. */
5575 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5576
5577 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5578
5579 nondec_name = XALLOCAVEC (char, prefix_len + opname_len + mname_len + 1 + 1);
5580 nondec_name[0] = '_';
5581 nondec_name[1] = '_';
5582 if (targetm.libfunc_gnu_prefix)
5583 {
5584 nondec_name[2] = 'g';
5585 nondec_name[3] = 'n';
5586 nondec_name[4] = 'u';
5587 nondec_name[5] = '_';
5588 }
5589
5590 memcpy (&nondec_name[prefix_len], opname, opname_len);
5591 nondec_suffix = nondec_name + opname_len + prefix_len;
5592
5593 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5594 dec_name[0] = '_';
5595 dec_name[1] = '_';
5596 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5597 memcpy (&dec_name[2+dec_len], opname, opname_len);
5598 dec_suffix = dec_name + dec_len + opname_len + 2;
5599
5600 fname = GET_MODE_NAME (fmode);
5601 tname = GET_MODE_NAME (tmode);
5602
5603 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5604 {
5605 libfunc_name = dec_name;
5606 suffix = dec_suffix;
5607 }
5608 else
5609 {
5610 libfunc_name = nondec_name;
5611 suffix = nondec_suffix;
5612 }
5613
5614 p = suffix;
5615 for (q = fname; *q; p++, q++)
5616 *p = TOLOWER (*q);
5617 for (q = tname; *q; p++, q++)
5618 *p = TOLOWER (*q);
5619
5620 *p = '\0';
5621
5622 set_conv_libfunc (tab, tmode, fmode,
5623 ggc_alloc_string (libfunc_name, p - libfunc_name));
5624 }
5625
5626 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5627 int->fp conversion. */
5628
5629 static void
5630 gen_int_to_fp_conv_libfunc (convert_optab tab,
5631 const char *opname,
5632 enum machine_mode tmode,
5633 enum machine_mode fmode)
5634 {
5635 if (GET_MODE_CLASS (fmode) != MODE_INT)
5636 return;
5637 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5638 return;
5639 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5640 }
5641
5642 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5643 naming scheme. */
5644
5645 static void
5646 gen_ufloat_conv_libfunc (convert_optab tab,
5647 const char *opname ATTRIBUTE_UNUSED,
5648 enum machine_mode tmode,
5649 enum machine_mode fmode)
5650 {
5651 if (DECIMAL_FLOAT_MODE_P (tmode))
5652 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5653 else
5654 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5655 }
5656
5657 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5658 fp->int conversion. */
5659
5660 static void
5661 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5662 const char *opname,
5663 enum machine_mode tmode,
5664 enum machine_mode fmode)
5665 {
5666 if (GET_MODE_CLASS (fmode) != MODE_INT)
5667 return;
5668 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5669 return;
5670 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5671 }
5672
5673 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5674 fp->int conversion with no decimal floating point involved. */
5675
5676 static void
5677 gen_fp_to_int_conv_libfunc (convert_optab tab,
5678 const char *opname,
5679 enum machine_mode tmode,
5680 enum machine_mode fmode)
5681 {
5682 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5683 return;
5684 if (GET_MODE_CLASS (tmode) != MODE_INT)
5685 return;
5686 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5687 }
5688
5689 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5690 The string formation rules are
5691 similar to the ones for init_libfunc, above. */
5692
5693 static void
5694 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5695 enum machine_mode tmode, enum machine_mode fmode)
5696 {
5697 size_t opname_len = strlen (opname);
5698 size_t mname_len = 0;
5699
5700 const char *fname, *tname;
5701 const char *q;
5702 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5703 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5704 char *libfunc_name, *suffix;
5705 char *p;
5706
5707 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5708 depends on which underlying decimal floating point format is used. */
5709 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5710
5711 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5712
5713 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5714 nondec_name[0] = '_';
5715 nondec_name[1] = '_';
5716 if (targetm.libfunc_gnu_prefix)
5717 {
5718 nondec_name[2] = 'g';
5719 nondec_name[3] = 'n';
5720 nondec_name[4] = 'u';
5721 nondec_name[5] = '_';
5722 }
5723 memcpy (&nondec_name[prefix_len], opname, opname_len);
5724 nondec_suffix = nondec_name + opname_len + prefix_len;
5725
5726 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5727 dec_name[0] = '_';
5728 dec_name[1] = '_';
5729 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5730 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5731 dec_suffix = dec_name + dec_len + opname_len + 2;
5732
5733 fname = GET_MODE_NAME (fmode);
5734 tname = GET_MODE_NAME (tmode);
5735
5736 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5737 {
5738 libfunc_name = dec_name;
5739 suffix = dec_suffix;
5740 }
5741 else
5742 {
5743 libfunc_name = nondec_name;
5744 suffix = nondec_suffix;
5745 }
5746
5747 p = suffix;
5748 for (q = fname; *q; p++, q++)
5749 *p = TOLOWER (*q);
5750 for (q = tname; *q; p++, q++)
5751 *p = TOLOWER (*q);
5752
5753 *p++ = '2';
5754 *p = '\0';
5755
5756 set_conv_libfunc (tab, tmode, fmode,
5757 ggc_alloc_string (libfunc_name, p - libfunc_name));
5758 }
5759
5760 /* Pick proper libcall for trunc_optab. We need to chose if we do
5761 truncation or extension and interclass or intraclass. */
5762
5763 static void
5764 gen_trunc_conv_libfunc (convert_optab tab,
5765 const char *opname,
5766 enum machine_mode tmode,
5767 enum machine_mode fmode)
5768 {
5769 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5770 return;
5771 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5772 return;
5773 if (tmode == fmode)
5774 return;
5775
5776 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5777 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5778 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5779
5780 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5781 return;
5782
5783 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5784 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5785 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5786 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5787 }
5788
5789 /* Pick proper libcall for extend_optab. We need to chose if we do
5790 truncation or extension and interclass or intraclass. */
5791
5792 static void
5793 gen_extend_conv_libfunc (convert_optab tab,
5794 const char *opname ATTRIBUTE_UNUSED,
5795 enum machine_mode tmode,
5796 enum machine_mode fmode)
5797 {
5798 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5799 return;
5800 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5801 return;
5802 if (tmode == fmode)
5803 return;
5804
5805 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5806 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5807 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5808
5809 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5810 return;
5811
5812 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5813 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5814 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5815 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5816 }
5817
5818 /* Pick proper libcall for fract_optab. We need to chose if we do
5819 interclass or intraclass. */
5820
5821 static void
5822 gen_fract_conv_libfunc (convert_optab tab,
5823 const char *opname,
5824 enum machine_mode tmode,
5825 enum machine_mode fmode)
5826 {
5827 if (tmode == fmode)
5828 return;
5829 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5830 return;
5831
5832 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5833 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5834 else
5835 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5836 }
5837
5838 /* Pick proper libcall for fractuns_optab. */
5839
5840 static void
5841 gen_fractuns_conv_libfunc (convert_optab tab,
5842 const char *opname,
5843 enum machine_mode tmode,
5844 enum machine_mode fmode)
5845 {
5846 if (tmode == fmode)
5847 return;
5848 /* One mode must be a fixed-point mode, and the other must be an integer
5849 mode. */
5850 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5851 || (ALL_FIXED_POINT_MODE_P (fmode)
5852 && GET_MODE_CLASS (tmode) == MODE_INT)))
5853 return;
5854
5855 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5856 }
5857
5858 /* Pick proper libcall for satfract_optab. We need to chose if we do
5859 interclass or intraclass. */
5860
5861 static void
5862 gen_satfract_conv_libfunc (convert_optab tab,
5863 const char *opname,
5864 enum machine_mode tmode,
5865 enum machine_mode fmode)
5866 {
5867 if (tmode == fmode)
5868 return;
5869 /* TMODE must be a fixed-point mode. */
5870 if (!ALL_FIXED_POINT_MODE_P (tmode))
5871 return;
5872
5873 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5874 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5875 else
5876 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5877 }
5878
5879 /* Pick proper libcall for satfractuns_optab. */
5880
5881 static void
5882 gen_satfractuns_conv_libfunc (convert_optab tab,
5883 const char *opname,
5884 enum machine_mode tmode,
5885 enum machine_mode fmode)
5886 {
5887 if (tmode == fmode)
5888 return;
5889 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
5890 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
5891 return;
5892
5893 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5894 }
5895
5896 /* A table of previously-created libfuncs, hashed by name. */
5897 static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
5898
5899 /* Hashtable callbacks for libfunc_decls. */
5900
5901 static hashval_t
5902 libfunc_decl_hash (const void *entry)
5903 {
5904 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree) entry));
5905 }
5906
5907 static int
5908 libfunc_decl_eq (const void *entry1, const void *entry2)
5909 {
5910 return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
5911 }
5912
5913 /* Build a decl for a libfunc named NAME. */
5914
5915 tree
5916 build_libfunc_function (const char *name)
5917 {
5918 tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
5919 get_identifier (name),
5920 build_function_type (integer_type_node, NULL_TREE));
5921 /* ??? We don't have any type information except for this is
5922 a function. Pretend this is "int foo()". */
5923 DECL_ARTIFICIAL (decl) = 1;
5924 DECL_EXTERNAL (decl) = 1;
5925 TREE_PUBLIC (decl) = 1;
5926 gcc_assert (DECL_ASSEMBLER_NAME (decl));
5927
5928 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5929 are the flags assigned by targetm.encode_section_info. */
5930 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
5931
5932 return decl;
5933 }
5934
5935 rtx
5936 init_one_libfunc (const char *name)
5937 {
5938 tree id, decl;
5939 void **slot;
5940 hashval_t hash;
5941
5942 if (libfunc_decls == NULL)
5943 libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
5944 libfunc_decl_eq, NULL);
5945
5946 /* See if we have already created a libfunc decl for this function. */
5947 id = get_identifier (name);
5948 hash = IDENTIFIER_HASH_VALUE (id);
5949 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
5950 decl = (tree) *slot;
5951 if (decl == NULL)
5952 {
5953 /* Create a new decl, so that it can be passed to
5954 targetm.encode_section_info. */
5955 decl = build_libfunc_function (name);
5956 *slot = decl;
5957 }
5958 return XEXP (DECL_RTL (decl), 0);
5959 }
5960
5961 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
5962
5963 rtx
5964 set_user_assembler_libfunc (const char *name, const char *asmspec)
5965 {
5966 tree id, decl;
5967 void **slot;
5968 hashval_t hash;
5969
5970 id = get_identifier (name);
5971 hash = IDENTIFIER_HASH_VALUE (id);
5972 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
5973 gcc_assert (slot);
5974 decl = (tree) *slot;
5975 set_user_assembler_name (decl, asmspec);
5976 return XEXP (DECL_RTL (decl), 0);
5977 }
5978
5979 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5980 MODE to NAME, which should be either 0 or a string constant. */
5981 void
5982 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5983 {
5984 rtx val;
5985 struct libfunc_entry e;
5986 struct libfunc_entry **slot;
5987 e.optab = (size_t) (optable - &optab_table[0]);
5988 e.mode1 = mode;
5989 e.mode2 = VOIDmode;
5990
5991 if (name)
5992 val = init_one_libfunc (name);
5993 else
5994 val = 0;
5995 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
5996 if (*slot == NULL)
5997 *slot = ggc_alloc_libfunc_entry ();
5998 (*slot)->optab = (size_t) (optable - &optab_table[0]);
5999 (*slot)->mode1 = mode;
6000 (*slot)->mode2 = VOIDmode;
6001 (*slot)->libfunc = val;
6002 }
6003
6004 /* Call this to reset the function entry for one conversion optab
6005 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6006 either 0 or a string constant. */
6007 void
6008 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6009 enum machine_mode fmode, const char *name)
6010 {
6011 rtx val;
6012 struct libfunc_entry e;
6013 struct libfunc_entry **slot;
6014 e.optab = (size_t) (optable - &convert_optab_table[0]);
6015 e.mode1 = tmode;
6016 e.mode2 = fmode;
6017
6018 if (name)
6019 val = init_one_libfunc (name);
6020 else
6021 val = 0;
6022 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6023 if (*slot == NULL)
6024 *slot = ggc_alloc_libfunc_entry ();
6025 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6026 (*slot)->mode1 = tmode;
6027 (*slot)->mode2 = fmode;
6028 (*slot)->libfunc = val;
6029 }
6030
6031 /* Call this to initialize the contents of the optabs
6032 appropriately for the current target machine. */
6033
6034 void
6035 init_optabs (void)
6036 {
6037 if (libfunc_hash)
6038 {
6039 htab_empty (libfunc_hash);
6040 /* We statically initialize the insn_codes with the equivalent of
6041 CODE_FOR_nothing. Repeat the process if reinitialising. */
6042 init_insn_codes ();
6043 }
6044 else
6045 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6046
6047 init_optab (add_optab, PLUS);
6048 init_optabv (addv_optab, PLUS);
6049 init_optab (sub_optab, MINUS);
6050 init_optabv (subv_optab, MINUS);
6051 init_optab (ssadd_optab, SS_PLUS);
6052 init_optab (usadd_optab, US_PLUS);
6053 init_optab (sssub_optab, SS_MINUS);
6054 init_optab (ussub_optab, US_MINUS);
6055 init_optab (smul_optab, MULT);
6056 init_optab (ssmul_optab, SS_MULT);
6057 init_optab (usmul_optab, US_MULT);
6058 init_optabv (smulv_optab, MULT);
6059 init_optab (smul_highpart_optab, UNKNOWN);
6060 init_optab (umul_highpart_optab, UNKNOWN);
6061 init_optab (smul_widen_optab, UNKNOWN);
6062 init_optab (umul_widen_optab, UNKNOWN);
6063 init_optab (usmul_widen_optab, UNKNOWN);
6064 init_optab (smadd_widen_optab, UNKNOWN);
6065 init_optab (umadd_widen_optab, UNKNOWN);
6066 init_optab (ssmadd_widen_optab, UNKNOWN);
6067 init_optab (usmadd_widen_optab, UNKNOWN);
6068 init_optab (smsub_widen_optab, UNKNOWN);
6069 init_optab (umsub_widen_optab, UNKNOWN);
6070 init_optab (ssmsub_widen_optab, UNKNOWN);
6071 init_optab (usmsub_widen_optab, UNKNOWN);
6072 init_optab (sdiv_optab, DIV);
6073 init_optab (ssdiv_optab, SS_DIV);
6074 init_optab (usdiv_optab, US_DIV);
6075 init_optabv (sdivv_optab, DIV);
6076 init_optab (sdivmod_optab, UNKNOWN);
6077 init_optab (udiv_optab, UDIV);
6078 init_optab (udivmod_optab, UNKNOWN);
6079 init_optab (smod_optab, MOD);
6080 init_optab (umod_optab, UMOD);
6081 init_optab (fmod_optab, UNKNOWN);
6082 init_optab (remainder_optab, UNKNOWN);
6083 init_optab (ftrunc_optab, UNKNOWN);
6084 init_optab (and_optab, AND);
6085 init_optab (ior_optab, IOR);
6086 init_optab (xor_optab, XOR);
6087 init_optab (ashl_optab, ASHIFT);
6088 init_optab (ssashl_optab, SS_ASHIFT);
6089 init_optab (usashl_optab, US_ASHIFT);
6090 init_optab (ashr_optab, ASHIFTRT);
6091 init_optab (lshr_optab, LSHIFTRT);
6092 init_optabv (vashl_optab, ASHIFT);
6093 init_optabv (vashr_optab, ASHIFTRT);
6094 init_optabv (vlshr_optab, LSHIFTRT);
6095 init_optab (rotl_optab, ROTATE);
6096 init_optab (rotr_optab, ROTATERT);
6097 init_optab (smin_optab, SMIN);
6098 init_optab (smax_optab, SMAX);
6099 init_optab (umin_optab, UMIN);
6100 init_optab (umax_optab, UMAX);
6101 init_optab (pow_optab, UNKNOWN);
6102 init_optab (atan2_optab, UNKNOWN);
6103 init_optab (fma_optab, FMA);
6104 init_optab (fms_optab, UNKNOWN);
6105 init_optab (fnma_optab, UNKNOWN);
6106 init_optab (fnms_optab, UNKNOWN);
6107
6108 /* These three have codes assigned exclusively for the sake of
6109 have_insn_for. */
6110 init_optab (mov_optab, SET);
6111 init_optab (movstrict_optab, STRICT_LOW_PART);
6112 init_optab (cbranch_optab, COMPARE);
6113
6114 init_optab (cmov_optab, UNKNOWN);
6115 init_optab (cstore_optab, UNKNOWN);
6116 init_optab (ctrap_optab, UNKNOWN);
6117
6118 init_optab (storent_optab, UNKNOWN);
6119
6120 init_optab (cmp_optab, UNKNOWN);
6121 init_optab (ucmp_optab, UNKNOWN);
6122
6123 init_optab (eq_optab, EQ);
6124 init_optab (ne_optab, NE);
6125 init_optab (gt_optab, GT);
6126 init_optab (ge_optab, GE);
6127 init_optab (lt_optab, LT);
6128 init_optab (le_optab, LE);
6129 init_optab (unord_optab, UNORDERED);
6130
6131 init_optab (neg_optab, NEG);
6132 init_optab (ssneg_optab, SS_NEG);
6133 init_optab (usneg_optab, US_NEG);
6134 init_optabv (negv_optab, NEG);
6135 init_optab (abs_optab, ABS);
6136 init_optabv (absv_optab, ABS);
6137 init_optab (addcc_optab, UNKNOWN);
6138 init_optab (one_cmpl_optab, NOT);
6139 init_optab (bswap_optab, BSWAP);
6140 init_optab (ffs_optab, FFS);
6141 init_optab (clz_optab, CLZ);
6142 init_optab (ctz_optab, CTZ);
6143 init_optab (clrsb_optab, CLRSB);
6144 init_optab (popcount_optab, POPCOUNT);
6145 init_optab (parity_optab, PARITY);
6146 init_optab (sqrt_optab, SQRT);
6147 init_optab (floor_optab, UNKNOWN);
6148 init_optab (ceil_optab, UNKNOWN);
6149 init_optab (round_optab, UNKNOWN);
6150 init_optab (btrunc_optab, UNKNOWN);
6151 init_optab (nearbyint_optab, UNKNOWN);
6152 init_optab (rint_optab, UNKNOWN);
6153 init_optab (sincos_optab, UNKNOWN);
6154 init_optab (sin_optab, UNKNOWN);
6155 init_optab (asin_optab, UNKNOWN);
6156 init_optab (cos_optab, UNKNOWN);
6157 init_optab (acos_optab, UNKNOWN);
6158 init_optab (exp_optab, UNKNOWN);
6159 init_optab (exp10_optab, UNKNOWN);
6160 init_optab (exp2_optab, UNKNOWN);
6161 init_optab (expm1_optab, UNKNOWN);
6162 init_optab (ldexp_optab, UNKNOWN);
6163 init_optab (scalb_optab, UNKNOWN);
6164 init_optab (significand_optab, UNKNOWN);
6165 init_optab (logb_optab, UNKNOWN);
6166 init_optab (ilogb_optab, UNKNOWN);
6167 init_optab (log_optab, UNKNOWN);
6168 init_optab (log10_optab, UNKNOWN);
6169 init_optab (log2_optab, UNKNOWN);
6170 init_optab (log1p_optab, UNKNOWN);
6171 init_optab (tan_optab, UNKNOWN);
6172 init_optab (atan_optab, UNKNOWN);
6173 init_optab (copysign_optab, UNKNOWN);
6174 init_optab (signbit_optab, UNKNOWN);
6175
6176 init_optab (isinf_optab, UNKNOWN);
6177
6178 init_optab (strlen_optab, UNKNOWN);
6179 init_optab (push_optab, UNKNOWN);
6180
6181 init_optab (reduc_smax_optab, UNKNOWN);
6182 init_optab (reduc_umax_optab, UNKNOWN);
6183 init_optab (reduc_smin_optab, UNKNOWN);
6184 init_optab (reduc_umin_optab, UNKNOWN);
6185 init_optab (reduc_splus_optab, UNKNOWN);
6186 init_optab (reduc_uplus_optab, UNKNOWN);
6187
6188 init_optab (ssum_widen_optab, UNKNOWN);
6189 init_optab (usum_widen_optab, UNKNOWN);
6190 init_optab (sdot_prod_optab, UNKNOWN);
6191 init_optab (udot_prod_optab, UNKNOWN);
6192
6193 init_optab (vec_extract_optab, UNKNOWN);
6194 init_optab (vec_extract_even_optab, UNKNOWN);
6195 init_optab (vec_extract_odd_optab, UNKNOWN);
6196 init_optab (vec_interleave_high_optab, UNKNOWN);
6197 init_optab (vec_interleave_low_optab, UNKNOWN);
6198 init_optab (vec_set_optab, UNKNOWN);
6199 init_optab (vec_init_optab, UNKNOWN);
6200 init_optab (vec_shl_optab, UNKNOWN);
6201 init_optab (vec_shr_optab, UNKNOWN);
6202 init_optab (vec_realign_load_optab, UNKNOWN);
6203 init_optab (movmisalign_optab, UNKNOWN);
6204 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6205 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6206 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6207 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6208 init_optab (vec_widen_ushiftl_hi_optab, UNKNOWN);
6209 init_optab (vec_widen_ushiftl_lo_optab, UNKNOWN);
6210 init_optab (vec_widen_sshiftl_hi_optab, UNKNOWN);
6211 init_optab (vec_widen_sshiftl_lo_optab, UNKNOWN);
6212 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6213 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6214 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6215 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6216 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6217 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6218 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6219 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6220 init_optab (vec_pack_trunc_optab, UNKNOWN);
6221 init_optab (vec_pack_usat_optab, UNKNOWN);
6222 init_optab (vec_pack_ssat_optab, UNKNOWN);
6223 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6224 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6225
6226 init_optab (powi_optab, UNKNOWN);
6227
6228 /* Conversions. */
6229 init_convert_optab (sext_optab, SIGN_EXTEND);
6230 init_convert_optab (zext_optab, ZERO_EXTEND);
6231 init_convert_optab (trunc_optab, TRUNCATE);
6232 init_convert_optab (sfix_optab, FIX);
6233 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6234 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6235 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6236 init_convert_optab (sfloat_optab, FLOAT);
6237 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6238 init_convert_optab (lrint_optab, UNKNOWN);
6239 init_convert_optab (lround_optab, UNKNOWN);
6240 init_convert_optab (lfloor_optab, UNKNOWN);
6241 init_convert_optab (lceil_optab, UNKNOWN);
6242
6243 init_convert_optab (fract_optab, FRACT_CONVERT);
6244 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6245 init_convert_optab (satfract_optab, SAT_FRACT);
6246 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6247
6248 /* Fill in the optabs with the insns we support. */
6249 init_all_optabs ();
6250
6251 /* Initialize the optabs with the names of the library functions. */
6252 add_optab->libcall_basename = "add";
6253 add_optab->libcall_suffix = '3';
6254 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6255 addv_optab->libcall_basename = "add";
6256 addv_optab->libcall_suffix = '3';
6257 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6258 ssadd_optab->libcall_basename = "ssadd";
6259 ssadd_optab->libcall_suffix = '3';
6260 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6261 usadd_optab->libcall_basename = "usadd";
6262 usadd_optab->libcall_suffix = '3';
6263 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6264 sub_optab->libcall_basename = "sub";
6265 sub_optab->libcall_suffix = '3';
6266 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6267 subv_optab->libcall_basename = "sub";
6268 subv_optab->libcall_suffix = '3';
6269 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6270 sssub_optab->libcall_basename = "sssub";
6271 sssub_optab->libcall_suffix = '3';
6272 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6273 ussub_optab->libcall_basename = "ussub";
6274 ussub_optab->libcall_suffix = '3';
6275 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6276 smul_optab->libcall_basename = "mul";
6277 smul_optab->libcall_suffix = '3';
6278 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6279 smulv_optab->libcall_basename = "mul";
6280 smulv_optab->libcall_suffix = '3';
6281 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6282 ssmul_optab->libcall_basename = "ssmul";
6283 ssmul_optab->libcall_suffix = '3';
6284 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6285 usmul_optab->libcall_basename = "usmul";
6286 usmul_optab->libcall_suffix = '3';
6287 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6288 sdiv_optab->libcall_basename = "div";
6289 sdiv_optab->libcall_suffix = '3';
6290 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6291 sdivv_optab->libcall_basename = "divv";
6292 sdivv_optab->libcall_suffix = '3';
6293 sdivv_optab->libcall_gen = gen_int_libfunc;
6294 ssdiv_optab->libcall_basename = "ssdiv";
6295 ssdiv_optab->libcall_suffix = '3';
6296 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6297 udiv_optab->libcall_basename = "udiv";
6298 udiv_optab->libcall_suffix = '3';
6299 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6300 usdiv_optab->libcall_basename = "usdiv";
6301 usdiv_optab->libcall_suffix = '3';
6302 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6303 sdivmod_optab->libcall_basename = "divmod";
6304 sdivmod_optab->libcall_suffix = '4';
6305 sdivmod_optab->libcall_gen = gen_int_libfunc;
6306 udivmod_optab->libcall_basename = "udivmod";
6307 udivmod_optab->libcall_suffix = '4';
6308 udivmod_optab->libcall_gen = gen_int_libfunc;
6309 smod_optab->libcall_basename = "mod";
6310 smod_optab->libcall_suffix = '3';
6311 smod_optab->libcall_gen = gen_int_libfunc;
6312 umod_optab->libcall_basename = "umod";
6313 umod_optab->libcall_suffix = '3';
6314 umod_optab->libcall_gen = gen_int_libfunc;
6315 ftrunc_optab->libcall_basename = "ftrunc";
6316 ftrunc_optab->libcall_suffix = '2';
6317 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6318 and_optab->libcall_basename = "and";
6319 and_optab->libcall_suffix = '3';
6320 and_optab->libcall_gen = gen_int_libfunc;
6321 ior_optab->libcall_basename = "ior";
6322 ior_optab->libcall_suffix = '3';
6323 ior_optab->libcall_gen = gen_int_libfunc;
6324 xor_optab->libcall_basename = "xor";
6325 xor_optab->libcall_suffix = '3';
6326 xor_optab->libcall_gen = gen_int_libfunc;
6327 ashl_optab->libcall_basename = "ashl";
6328 ashl_optab->libcall_suffix = '3';
6329 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6330 ssashl_optab->libcall_basename = "ssashl";
6331 ssashl_optab->libcall_suffix = '3';
6332 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6333 usashl_optab->libcall_basename = "usashl";
6334 usashl_optab->libcall_suffix = '3';
6335 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6336 ashr_optab->libcall_basename = "ashr";
6337 ashr_optab->libcall_suffix = '3';
6338 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6339 lshr_optab->libcall_basename = "lshr";
6340 lshr_optab->libcall_suffix = '3';
6341 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6342 smin_optab->libcall_basename = "min";
6343 smin_optab->libcall_suffix = '3';
6344 smin_optab->libcall_gen = gen_int_fp_libfunc;
6345 smax_optab->libcall_basename = "max";
6346 smax_optab->libcall_suffix = '3';
6347 smax_optab->libcall_gen = gen_int_fp_libfunc;
6348 umin_optab->libcall_basename = "umin";
6349 umin_optab->libcall_suffix = '3';
6350 umin_optab->libcall_gen = gen_int_libfunc;
6351 umax_optab->libcall_basename = "umax";
6352 umax_optab->libcall_suffix = '3';
6353 umax_optab->libcall_gen = gen_int_libfunc;
6354 neg_optab->libcall_basename = "neg";
6355 neg_optab->libcall_suffix = '2';
6356 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6357 ssneg_optab->libcall_basename = "ssneg";
6358 ssneg_optab->libcall_suffix = '2';
6359 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6360 usneg_optab->libcall_basename = "usneg";
6361 usneg_optab->libcall_suffix = '2';
6362 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6363 negv_optab->libcall_basename = "neg";
6364 negv_optab->libcall_suffix = '2';
6365 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6366 one_cmpl_optab->libcall_basename = "one_cmpl";
6367 one_cmpl_optab->libcall_suffix = '2';
6368 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6369 ffs_optab->libcall_basename = "ffs";
6370 ffs_optab->libcall_suffix = '2';
6371 ffs_optab->libcall_gen = gen_int_libfunc;
6372 clz_optab->libcall_basename = "clz";
6373 clz_optab->libcall_suffix = '2';
6374 clz_optab->libcall_gen = gen_int_libfunc;
6375 ctz_optab->libcall_basename = "ctz";
6376 ctz_optab->libcall_suffix = '2';
6377 ctz_optab->libcall_gen = gen_int_libfunc;
6378 clrsb_optab->libcall_basename = "clrsb";
6379 clrsb_optab->libcall_suffix = '2';
6380 clrsb_optab->libcall_gen = gen_int_libfunc;
6381 popcount_optab->libcall_basename = "popcount";
6382 popcount_optab->libcall_suffix = '2';
6383 popcount_optab->libcall_gen = gen_int_libfunc;
6384 parity_optab->libcall_basename = "parity";
6385 parity_optab->libcall_suffix = '2';
6386 parity_optab->libcall_gen = gen_int_libfunc;
6387
6388 /* Comparison libcalls for integers MUST come in pairs,
6389 signed/unsigned. */
6390 cmp_optab->libcall_basename = "cmp";
6391 cmp_optab->libcall_suffix = '2';
6392 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6393 ucmp_optab->libcall_basename = "ucmp";
6394 ucmp_optab->libcall_suffix = '2';
6395 ucmp_optab->libcall_gen = gen_int_libfunc;
6396
6397 /* EQ etc are floating point only. */
6398 eq_optab->libcall_basename = "eq";
6399 eq_optab->libcall_suffix = '2';
6400 eq_optab->libcall_gen = gen_fp_libfunc;
6401 ne_optab->libcall_basename = "ne";
6402 ne_optab->libcall_suffix = '2';
6403 ne_optab->libcall_gen = gen_fp_libfunc;
6404 gt_optab->libcall_basename = "gt";
6405 gt_optab->libcall_suffix = '2';
6406 gt_optab->libcall_gen = gen_fp_libfunc;
6407 ge_optab->libcall_basename = "ge";
6408 ge_optab->libcall_suffix = '2';
6409 ge_optab->libcall_gen = gen_fp_libfunc;
6410 lt_optab->libcall_basename = "lt";
6411 lt_optab->libcall_suffix = '2';
6412 lt_optab->libcall_gen = gen_fp_libfunc;
6413 le_optab->libcall_basename = "le";
6414 le_optab->libcall_suffix = '2';
6415 le_optab->libcall_gen = gen_fp_libfunc;
6416 unord_optab->libcall_basename = "unord";
6417 unord_optab->libcall_suffix = '2';
6418 unord_optab->libcall_gen = gen_fp_libfunc;
6419
6420 powi_optab->libcall_basename = "powi";
6421 powi_optab->libcall_suffix = '2';
6422 powi_optab->libcall_gen = gen_fp_libfunc;
6423
6424 /* Conversions. */
6425 sfloat_optab->libcall_basename = "float";
6426 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6427 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6428 sfix_optab->libcall_basename = "fix";
6429 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6430 ufix_optab->libcall_basename = "fixuns";
6431 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6432 lrint_optab->libcall_basename = "lrint";
6433 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6434 lround_optab->libcall_basename = "lround";
6435 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6436 lfloor_optab->libcall_basename = "lfloor";
6437 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6438 lceil_optab->libcall_basename = "lceil";
6439 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6440
6441 /* trunc_optab is also used for FLOAT_EXTEND. */
6442 sext_optab->libcall_basename = "extend";
6443 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6444 trunc_optab->libcall_basename = "trunc";
6445 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6446
6447 /* Conversions for fixed-point modes and other modes. */
6448 fract_optab->libcall_basename = "fract";
6449 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6450 satfract_optab->libcall_basename = "satfract";
6451 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6452 fractuns_optab->libcall_basename = "fractuns";
6453 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6454 satfractuns_optab->libcall_basename = "satfractuns";
6455 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6456
6457 /* The ffs function operates on `int'. Fall back on it if we do not
6458 have a libgcc2 function for that width. */
6459 if (INT_TYPE_SIZE < BITS_PER_WORD)
6460 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6461 "ffs");
6462
6463 /* Explicitly initialize the bswap libfuncs since we need them to be
6464 valid for things other than word_mode. */
6465 if (targetm.libfunc_gnu_prefix)
6466 {
6467 set_optab_libfunc (bswap_optab, SImode, "__gnu_bswapsi2");
6468 set_optab_libfunc (bswap_optab, DImode, "__gnu_bswapdi2");
6469 }
6470 else
6471 {
6472 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6473 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6474 }
6475
6476 /* Use cabs for double complex abs, since systems generally have cabs.
6477 Don't define any libcall for float complex, so that cabs will be used. */
6478 if (complex_double_type_node)
6479 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6480
6481 abort_libfunc = init_one_libfunc ("abort");
6482 memcpy_libfunc = init_one_libfunc ("memcpy");
6483 memmove_libfunc = init_one_libfunc ("memmove");
6484 memcmp_libfunc = init_one_libfunc ("memcmp");
6485 memset_libfunc = init_one_libfunc ("memset");
6486 setbits_libfunc = init_one_libfunc ("__setbits");
6487
6488 #ifndef DONT_USE_BUILTIN_SETJMP
6489 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6490 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6491 #else
6492 setjmp_libfunc = init_one_libfunc ("setjmp");
6493 longjmp_libfunc = init_one_libfunc ("longjmp");
6494 #endif
6495 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6496 unwind_sjlj_unregister_libfunc
6497 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6498
6499 /* For function entry/exit instrumentation. */
6500 profile_function_entry_libfunc
6501 = init_one_libfunc ("__cyg_profile_func_enter");
6502 profile_function_exit_libfunc
6503 = init_one_libfunc ("__cyg_profile_func_exit");
6504
6505 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6506
6507 /* Allow the target to add more libcalls or rename some, etc. */
6508 targetm.init_libfuncs ();
6509 }
6510
6511 /* Print information about the current contents of the optabs on
6512 STDERR. */
6513
6514 DEBUG_FUNCTION void
6515 debug_optab_libfuncs (void)
6516 {
6517 int i;
6518 int j;
6519 int k;
6520
6521 /* Dump the arithmetic optabs. */
6522 for (i = 0; i != (int) OTI_MAX; i++)
6523 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6524 {
6525 optab o;
6526 rtx l;
6527
6528 o = &optab_table[i];
6529 l = optab_libfunc (o, (enum machine_mode) j);
6530 if (l)
6531 {
6532 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6533 fprintf (stderr, "%s\t%s:\t%s\n",
6534 GET_RTX_NAME (o->code),
6535 GET_MODE_NAME (j),
6536 XSTR (l, 0));
6537 }
6538 }
6539
6540 /* Dump the conversion optabs. */
6541 for (i = 0; i < (int) COI_MAX; ++i)
6542 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6543 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6544 {
6545 convert_optab o;
6546 rtx l;
6547
6548 o = &convert_optab_table[i];
6549 l = convert_optab_libfunc (o, (enum machine_mode) j,
6550 (enum machine_mode) k);
6551 if (l)
6552 {
6553 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6554 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6555 GET_RTX_NAME (o->code),
6556 GET_MODE_NAME (j),
6557 GET_MODE_NAME (k),
6558 XSTR (l, 0));
6559 }
6560 }
6561 }
6562
6563 \f
6564 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6565 CODE. Return 0 on failure. */
6566
6567 rtx
6568 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6569 {
6570 enum machine_mode mode = GET_MODE (op1);
6571 enum insn_code icode;
6572 rtx insn;
6573 rtx trap_rtx;
6574
6575 if (mode == VOIDmode)
6576 return 0;
6577
6578 icode = optab_handler (ctrap_optab, mode);
6579 if (icode == CODE_FOR_nothing)
6580 return 0;
6581
6582 /* Some targets only accept a zero trap code. */
6583 if (!insn_operand_matches (icode, 3, tcode))
6584 return 0;
6585
6586 do_pending_stack_adjust ();
6587 start_sequence ();
6588 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6589 &trap_rtx, &mode);
6590 if (!trap_rtx)
6591 insn = NULL_RTX;
6592 else
6593 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6594 tcode);
6595
6596 /* If that failed, then give up. */
6597 if (insn == 0)
6598 {
6599 end_sequence ();
6600 return 0;
6601 }
6602
6603 emit_insn (insn);
6604 insn = get_insns ();
6605 end_sequence ();
6606 return insn;
6607 }
6608
6609 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6610 or unsigned operation code. */
6611
6612 static enum rtx_code
6613 get_rtx_code (enum tree_code tcode, bool unsignedp)
6614 {
6615 enum rtx_code code;
6616 switch (tcode)
6617 {
6618 case EQ_EXPR:
6619 code = EQ;
6620 break;
6621 case NE_EXPR:
6622 code = NE;
6623 break;
6624 case LT_EXPR:
6625 code = unsignedp ? LTU : LT;
6626 break;
6627 case LE_EXPR:
6628 code = unsignedp ? LEU : LE;
6629 break;
6630 case GT_EXPR:
6631 code = unsignedp ? GTU : GT;
6632 break;
6633 case GE_EXPR:
6634 code = unsignedp ? GEU : GE;
6635 break;
6636
6637 case UNORDERED_EXPR:
6638 code = UNORDERED;
6639 break;
6640 case ORDERED_EXPR:
6641 code = ORDERED;
6642 break;
6643 case UNLT_EXPR:
6644 code = UNLT;
6645 break;
6646 case UNLE_EXPR:
6647 code = UNLE;
6648 break;
6649 case UNGT_EXPR:
6650 code = UNGT;
6651 break;
6652 case UNGE_EXPR:
6653 code = UNGE;
6654 break;
6655 case UNEQ_EXPR:
6656 code = UNEQ;
6657 break;
6658 case LTGT_EXPR:
6659 code = LTGT;
6660 break;
6661
6662 default:
6663 gcc_unreachable ();
6664 }
6665 return code;
6666 }
6667
6668 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6669 unsigned operators. Do not generate compare instruction. */
6670
6671 static rtx
6672 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6673 {
6674 struct expand_operand ops[2];
6675 enum rtx_code rcode;
6676 tree t_op0, t_op1;
6677 rtx rtx_op0, rtx_op1;
6678
6679 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6680 ensures that condition is a relational operation. */
6681 gcc_assert (COMPARISON_CLASS_P (cond));
6682
6683 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6684 t_op0 = TREE_OPERAND (cond, 0);
6685 t_op1 = TREE_OPERAND (cond, 1);
6686
6687 /* Expand operands. */
6688 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6689 EXPAND_STACK_PARM);
6690 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6691 EXPAND_STACK_PARM);
6692
6693 create_input_operand (&ops[0], rtx_op0, GET_MODE (rtx_op0));
6694 create_input_operand (&ops[1], rtx_op1, GET_MODE (rtx_op1));
6695 if (!maybe_legitimize_operands (icode, 4, 2, ops))
6696 gcc_unreachable ();
6697 return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
6698 }
6699
6700 /* Return true if VEC_PERM_EXPR can be expanded using SIMD extensions
6701 of the CPU. SEL may be NULL, which stands for an unknown constant. */
6702
6703 bool
6704 can_vec_perm_p (enum machine_mode mode, bool variable,
6705 const unsigned char *sel)
6706 {
6707 enum machine_mode qimode;
6708
6709 /* If the target doesn't implement a vector mode for the vector type,
6710 then no operations are supported. */
6711 if (!VECTOR_MODE_P (mode))
6712 return false;
6713
6714 if (!variable)
6715 {
6716 if (direct_optab_handler (vec_perm_const_optab, mode) != CODE_FOR_nothing
6717 && (sel == NULL
6718 || targetm.vectorize.vec_perm_const_ok == NULL
6719 || targetm.vectorize.vec_perm_const_ok (mode, sel)))
6720 return true;
6721 }
6722
6723 if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
6724 return true;
6725
6726 /* We allow fallback to a QI vector mode, and adjust the mask. */
6727 if (GET_MODE_INNER (mode) == QImode)
6728 return false;
6729 qimode = mode_for_vector (QImode, GET_MODE_SIZE (mode));
6730 if (!VECTOR_MODE_P (qimode))
6731 return false;
6732
6733 /* ??? For completeness, we ought to check the QImode version of
6734 vec_perm_const_optab. But all users of this implicit lowering
6735 feature implement the variable vec_perm_optab. */
6736 if (direct_optab_handler (vec_perm_optab, qimode) == CODE_FOR_nothing)
6737 return false;
6738
6739 /* In order to support the lowering of variable permutations,
6740 we need to support shifts and adds. */
6741 if (variable)
6742 {
6743 if (GET_MODE_UNIT_SIZE (mode) > 2
6744 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing
6745 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing)
6746 return false;
6747 if (optab_handler (add_optab, qimode) == CODE_FOR_nothing)
6748 return false;
6749 }
6750
6751 return true;
6752 }
6753
6754 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
6755
6756 static rtx
6757 expand_vec_perm_1 (enum insn_code icode, rtx target,
6758 rtx v0, rtx v1, rtx sel)
6759 {
6760 enum machine_mode tmode = GET_MODE (target);
6761 enum machine_mode smode = GET_MODE (sel);
6762 struct expand_operand ops[4];
6763
6764 create_output_operand (&ops[0], target, tmode);
6765 create_input_operand (&ops[3], sel, smode);
6766
6767 /* Make an effort to preserve v0 == v1. The target expander is able to
6768 rely on this to determine if we're permuting a single input operand. */
6769 if (rtx_equal_p (v0, v1))
6770 {
6771 if (!insn_operand_matches (icode, 1, v0))
6772 v0 = force_reg (tmode, v0);
6773 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
6774 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
6775
6776 create_fixed_operand (&ops[1], v0);
6777 create_fixed_operand (&ops[2], v0);
6778 }
6779 else
6780 {
6781 create_input_operand (&ops[1], v0, tmode);
6782 create_input_operand (&ops[2], v1, tmode);
6783 }
6784
6785 if (maybe_expand_insn (icode, 4, ops))
6786 return ops[0].value;
6787 return NULL_RTX;
6788 }
6789
6790 /* Generate instructions for vec_perm optab given its mode
6791 and three operands. */
6792
6793 rtx
6794 expand_vec_perm (enum machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
6795 {
6796 enum insn_code icode;
6797 enum machine_mode qimode;
6798 unsigned int i, w, e, u;
6799 rtx tmp, sel_qi;
6800 rtvec vec;
6801
6802 if (!target || GET_MODE (target) != mode)
6803 target = gen_reg_rtx (mode);
6804
6805 w = GET_MODE_SIZE (mode);
6806 e = GET_MODE_NUNITS (mode);
6807 u = GET_MODE_UNIT_SIZE (mode);
6808
6809 /* Set QIMODE to a different vector mode with byte elements.
6810 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6811 qimode = VOIDmode;
6812 if (GET_MODE_INNER (mode) != QImode)
6813 {
6814 qimode = mode_for_vector (QImode, w);
6815 if (!VECTOR_MODE_P (qimode))
6816 qimode = VOIDmode;
6817 }
6818
6819 /* If the input is a constant, expand it specially. */
6820 if (CONSTANT_P (sel))
6821 {
6822 icode = direct_optab_handler (vec_perm_const_optab, mode);
6823 if (icode != CODE_FOR_nothing)
6824 {
6825 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6826 if (tmp)
6827 return tmp;
6828 }
6829
6830 /* Fall back to a constant byte-based permutation. */
6831 if (qimode != VOIDmode)
6832 {
6833 icode = direct_optab_handler (vec_perm_const_optab, qimode);
6834 if (icode != CODE_FOR_nothing)
6835 {
6836 vec = rtvec_alloc (w);
6837 for (i = 0; i < e; ++i)
6838 {
6839 unsigned int j, this_e;
6840
6841 this_e = INTVAL (XVECEXP (sel, 0, i));
6842 this_e &= 2 * e - 1;
6843 this_e *= u;
6844
6845 for (j = 0; j < u; ++j)
6846 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
6847 }
6848 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
6849
6850 tmp = expand_vec_perm_1 (icode, gen_lowpart (qimode, target),
6851 gen_lowpart (qimode, v0),
6852 gen_lowpart (qimode, v1), sel_qi);
6853 if (tmp)
6854 return gen_lowpart (mode, tmp);
6855 }
6856 }
6857 }
6858
6859 /* Otherwise expand as a fully variable permuation. */
6860 icode = direct_optab_handler (vec_perm_optab, mode);
6861 if (icode != CODE_FOR_nothing)
6862 {
6863 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6864 if (tmp)
6865 return tmp;
6866 }
6867
6868 /* As a special case to aid several targets, lower the element-based
6869 permutation to a byte-based permutation and try again. */
6870 if (qimode == VOIDmode)
6871 return NULL_RTX;
6872 icode = direct_optab_handler (vec_perm_optab, qimode);
6873 if (icode == CODE_FOR_nothing)
6874 return NULL_RTX;
6875
6876 /* Multiply each element by its byte size. */
6877 if (u == 2)
6878 sel = expand_simple_binop (mode, PLUS, sel, sel, sel, 0, OPTAB_DIRECT);
6879 else
6880 sel = expand_simple_binop (mode, ASHIFT, sel, GEN_INT (exact_log2 (u)),
6881 sel, 0, OPTAB_DIRECT);
6882 gcc_assert (sel != NULL);
6883
6884 /* Broadcast the low byte each element into each of its bytes. */
6885 vec = rtvec_alloc (w);
6886 for (i = 0; i < w; ++i)
6887 {
6888 int this_e = i / u * u;
6889 if (BYTES_BIG_ENDIAN)
6890 this_e += u - 1;
6891 RTVEC_ELT (vec, i) = GEN_INT (this_e);
6892 }
6893 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
6894 sel = gen_lowpart (qimode, sel);
6895 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
6896 gcc_assert (sel != NULL);
6897
6898 /* Add the byte offset to each byte element. */
6899 /* Note that the definition of the indicies here is memory ordering,
6900 so there should be no difference between big and little endian. */
6901 vec = rtvec_alloc (w);
6902 for (i = 0; i < w; ++i)
6903 RTVEC_ELT (vec, i) = GEN_INT (i % u);
6904 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
6905 sel = expand_simple_binop (qimode, PLUS, sel, tmp, sel, 0, OPTAB_DIRECT);
6906 gcc_assert (sel != NULL);
6907
6908 tmp = expand_vec_perm_1 (icode, gen_lowpart (qimode, target),
6909 gen_lowpart (qimode, v0),
6910 gen_lowpart (qimode, v1), sel);
6911 if (tmp)
6912 tmp = gen_lowpart (mode, tmp);
6913 return tmp;
6914 }
6915
6916
6917 /* Return insn code for a conditional operator with a comparison in
6918 mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE. */
6919
6920 static inline enum insn_code
6921 get_vcond_icode (enum machine_mode vmode, enum machine_mode cmode, bool uns)
6922 {
6923 enum insn_code icode = CODE_FOR_nothing;
6924 if (uns)
6925 icode = convert_optab_handler (vcondu_optab, vmode, cmode);
6926 else
6927 icode = convert_optab_handler (vcond_optab, vmode, cmode);
6928 return icode;
6929 }
6930
6931 /* Return TRUE iff, appropriate vector insns are available
6932 for vector cond expr with vector type VALUE_TYPE and a comparison
6933 with operand vector types in CMP_OP_TYPE. */
6934
6935 bool
6936 expand_vec_cond_expr_p (tree value_type, tree cmp_op_type)
6937 {
6938 enum machine_mode value_mode = TYPE_MODE (value_type);
6939 enum machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
6940 if (GET_MODE_SIZE (value_mode) != GET_MODE_SIZE (cmp_op_mode)
6941 || GET_MODE_NUNITS (value_mode) != GET_MODE_NUNITS (cmp_op_mode)
6942 || get_vcond_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type),
6943 TYPE_UNSIGNED (cmp_op_type)) == CODE_FOR_nothing)
6944 return false;
6945 return true;
6946 }
6947
6948 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
6949 three operands. */
6950
6951 rtx
6952 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
6953 rtx target)
6954 {
6955 struct expand_operand ops[6];
6956 enum insn_code icode;
6957 rtx comparison, rtx_op1, rtx_op2;
6958 enum machine_mode mode = TYPE_MODE (vec_cond_type);
6959 enum machine_mode cmp_op_mode;
6960 bool unsignedp;
6961
6962 gcc_assert (COMPARISON_CLASS_P (op0));
6963
6964 unsignedp = TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0)));
6965 cmp_op_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0)));
6966
6967 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
6968 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
6969
6970 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
6971 if (icode == CODE_FOR_nothing)
6972 return 0;
6973
6974 comparison = vector_compare_rtx (op0, unsignedp, icode);
6975 rtx_op1 = expand_normal (op1);
6976 rtx_op2 = expand_normal (op2);
6977
6978 create_output_operand (&ops[0], target, mode);
6979 create_input_operand (&ops[1], rtx_op1, mode);
6980 create_input_operand (&ops[2], rtx_op2, mode);
6981 create_fixed_operand (&ops[3], comparison);
6982 create_fixed_operand (&ops[4], XEXP (comparison, 0));
6983 create_fixed_operand (&ops[5], XEXP (comparison, 1));
6984 expand_insn (icode, 6, ops);
6985 return ops[0].value;
6986 }
6987
6988 \f
6989 /* This is an internal subroutine of the other compare_and_swap expanders.
6990 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6991 operation. TARGET is an optional place to store the value result of
6992 the operation. ICODE is the particular instruction to expand. Return
6993 the result of the operation. */
6994
6995 static rtx
6996 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6997 rtx target, enum insn_code icode)
6998 {
6999 struct expand_operand ops[4];
7000 enum machine_mode mode = GET_MODE (mem);
7001
7002 create_output_operand (&ops[0], target, mode);
7003 create_fixed_operand (&ops[1], mem);
7004 /* OLD_VAL and NEW_VAL may have been promoted to a wider mode.
7005 Shrink them if so. */
7006 create_convert_operand_to (&ops[2], old_val, mode, true);
7007 create_convert_operand_to (&ops[3], new_val, mode, true);
7008 if (maybe_expand_insn (icode, 4, ops))
7009 return ops[0].value;
7010 return NULL_RTX;
7011 }
7012
7013 /* Expand a compare-and-swap operation and return its value. */
7014
7015 rtx
7016 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
7017 {
7018 enum machine_mode mode = GET_MODE (mem);
7019 enum insn_code icode
7020 = direct_optab_handler (sync_compare_and_swap_optab, mode);
7021
7022 if (icode == CODE_FOR_nothing)
7023 return NULL_RTX;
7024
7025 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
7026 }
7027
7028 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7029 pattern. */
7030
7031 static void
7032 find_cc_set (rtx x, const_rtx pat, void *data)
7033 {
7034 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
7035 && GET_CODE (pat) == SET)
7036 {
7037 rtx *p_cc_reg = (rtx *) data;
7038 gcc_assert (!*p_cc_reg);
7039 *p_cc_reg = x;
7040 }
7041 }
7042
7043 /* Expand a compare-and-swap operation and store true into the result if
7044 the operation was successful and false otherwise. Return the result.
7045 Unlike other routines, TARGET is not optional. */
7046
7047 rtx
7048 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
7049 {
7050 enum machine_mode mode = GET_MODE (mem);
7051 enum insn_code icode;
7052 rtx subtarget, seq, cc_reg;
7053
7054 /* If the target supports a compare-and-swap pattern that simultaneously
7055 sets some flag for success, then use it. Otherwise use the regular
7056 compare-and-swap and follow that immediately with a compare insn. */
7057 icode = direct_optab_handler (sync_compare_and_swap_optab, mode);
7058 if (icode == CODE_FOR_nothing)
7059 return NULL_RTX;
7060
7061 do_pending_stack_adjust ();
7062 do
7063 {
7064 start_sequence ();
7065 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
7066 NULL_RTX, icode);
7067 cc_reg = NULL_RTX;
7068 if (subtarget == NULL_RTX)
7069 {
7070 end_sequence ();
7071 return NULL_RTX;
7072 }
7073
7074 if (have_insn_for (COMPARE, CCmode))
7075 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7076 seq = get_insns ();
7077 end_sequence ();
7078
7079 /* We might be comparing against an old value. Try again. :-( */
7080 if (!cc_reg && MEM_P (old_val))
7081 {
7082 seq = NULL_RTX;
7083 old_val = force_reg (mode, old_val);
7084 }
7085 }
7086 while (!seq);
7087
7088 emit_insn (seq);
7089 if (cc_reg)
7090 return emit_store_flag_force (target, EQ, cc_reg, const0_rtx, VOIDmode, 0, 1);
7091 else
7092 return emit_store_flag_force (target, EQ, subtarget, old_val, VOIDmode, 1, 1);
7093 }
7094
7095 /* This is a helper function for the other atomic operations. This function
7096 emits a loop that contains SEQ that iterates until a compare-and-swap
7097 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7098 a set of instructions that takes a value from OLD_REG as an input and
7099 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7100 set to the current contents of MEM. After SEQ, a compare-and-swap will
7101 attempt to update MEM with NEW_REG. The function returns true when the
7102 loop was generated successfully. */
7103
7104 static bool
7105 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7106 {
7107 enum machine_mode mode = GET_MODE (mem);
7108 enum insn_code icode;
7109 rtx label, cmp_reg, subtarget, cc_reg;
7110
7111 /* The loop we want to generate looks like
7112
7113 cmp_reg = mem;
7114 label:
7115 old_reg = cmp_reg;
7116 seq;
7117 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7118 if (cmp_reg != old_reg)
7119 goto label;
7120
7121 Note that we only do the plain load from memory once. Subsequent
7122 iterations use the value loaded by the compare-and-swap pattern. */
7123
7124 label = gen_label_rtx ();
7125 cmp_reg = gen_reg_rtx (mode);
7126
7127 emit_move_insn (cmp_reg, mem);
7128 emit_label (label);
7129 emit_move_insn (old_reg, cmp_reg);
7130 if (seq)
7131 emit_insn (seq);
7132
7133 /* If the target supports a compare-and-swap pattern that simultaneously
7134 sets some flag for success, then use it. Otherwise use the regular
7135 compare-and-swap and follow that immediately with a compare insn. */
7136 icode = direct_optab_handler (sync_compare_and_swap_optab, mode);
7137 if (icode == CODE_FOR_nothing)
7138 return false;
7139
7140 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7141 cmp_reg, icode);
7142 if (subtarget == NULL_RTX)
7143 return false;
7144
7145 cc_reg = NULL_RTX;
7146 if (have_insn_for (COMPARE, CCmode))
7147 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7148 if (cc_reg)
7149 {
7150 cmp_reg = cc_reg;
7151 old_reg = const0_rtx;
7152 }
7153 else
7154 {
7155 if (subtarget != cmp_reg)
7156 emit_move_insn (cmp_reg, subtarget);
7157 }
7158
7159 /* ??? Mark this jump predicted not taken? */
7160 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, const0_rtx, GET_MODE (cmp_reg), 1,
7161 label);
7162 return true;
7163 }
7164
7165 /* This function generates the atomic operation MEM CODE= VAL. In this
7166 case, we do not care about any resulting value. Returns NULL if we
7167 cannot generate the operation. */
7168
7169 rtx
7170 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
7171 {
7172 enum machine_mode mode = GET_MODE (mem);
7173 enum insn_code icode;
7174 rtx insn;
7175
7176 /* Look to see if the target supports the operation directly. */
7177 switch (code)
7178 {
7179 case PLUS:
7180 icode = direct_optab_handler (sync_add_optab, mode);
7181 break;
7182 case IOR:
7183 icode = direct_optab_handler (sync_ior_optab, mode);
7184 break;
7185 case XOR:
7186 icode = direct_optab_handler (sync_xor_optab, mode);
7187 break;
7188 case AND:
7189 icode = direct_optab_handler (sync_and_optab, mode);
7190 break;
7191 case NOT:
7192 icode = direct_optab_handler (sync_nand_optab, mode);
7193 break;
7194
7195 case MINUS:
7196 icode = direct_optab_handler (sync_sub_optab, mode);
7197 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7198 {
7199 icode = direct_optab_handler (sync_add_optab, mode);
7200 if (icode != CODE_FOR_nothing)
7201 {
7202 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7203 code = PLUS;
7204 }
7205 }
7206 break;
7207
7208 default:
7209 gcc_unreachable ();
7210 }
7211
7212 /* Generate the direct operation, if present. */
7213 if (icode != CODE_FOR_nothing)
7214 {
7215 struct expand_operand ops[2];
7216
7217 create_fixed_operand (&ops[0], mem);
7218 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7219 create_convert_operand_to (&ops[1], val, mode, true);
7220 if (maybe_expand_insn (icode, 2, ops))
7221 return const0_rtx;
7222 }
7223
7224 /* Failing that, generate a compare-and-swap loop in which we perform the
7225 operation with normal arithmetic instructions. */
7226 if (direct_optab_handler (sync_compare_and_swap_optab, mode)
7227 != CODE_FOR_nothing)
7228 {
7229 rtx t0 = gen_reg_rtx (mode), t1;
7230
7231 start_sequence ();
7232
7233 t1 = t0;
7234 if (code == NOT)
7235 {
7236 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7237 true, OPTAB_LIB_WIDEN);
7238 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7239 }
7240 else
7241 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7242 true, OPTAB_LIB_WIDEN);
7243 insn = get_insns ();
7244 end_sequence ();
7245
7246 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7247 return const0_rtx;
7248 }
7249
7250 return NULL_RTX;
7251 }
7252
7253 /* This function generates the atomic operation MEM CODE= VAL. In this
7254 case, we do care about the resulting value: if AFTER is true then
7255 return the value MEM holds after the operation, if AFTER is false
7256 then return the value MEM holds before the operation. TARGET is an
7257 optional place for the result value to be stored. */
7258
7259 rtx
7260 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
7261 bool after, rtx target)
7262 {
7263 enum machine_mode mode = GET_MODE (mem);
7264 enum insn_code old_code, new_code, icode;
7265 bool compensate;
7266 rtx insn;
7267
7268 /* Look to see if the target supports the operation directly. */
7269 switch (code)
7270 {
7271 case PLUS:
7272 old_code = direct_optab_handler (sync_old_add_optab, mode);
7273 new_code = direct_optab_handler (sync_new_add_optab, mode);
7274 break;
7275 case IOR:
7276 old_code = direct_optab_handler (sync_old_ior_optab, mode);
7277 new_code = direct_optab_handler (sync_new_ior_optab, mode);
7278 break;
7279 case XOR:
7280 old_code = direct_optab_handler (sync_old_xor_optab, mode);
7281 new_code = direct_optab_handler (sync_new_xor_optab, mode);
7282 break;
7283 case AND:
7284 old_code = direct_optab_handler (sync_old_and_optab, mode);
7285 new_code = direct_optab_handler (sync_new_and_optab, mode);
7286 break;
7287 case NOT:
7288 old_code = direct_optab_handler (sync_old_nand_optab, mode);
7289 new_code = direct_optab_handler (sync_new_nand_optab, mode);
7290 break;
7291
7292 case MINUS:
7293 old_code = direct_optab_handler (sync_old_sub_optab, mode);
7294 new_code = direct_optab_handler (sync_new_sub_optab, mode);
7295 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7296 || CONST_INT_P (val))
7297 {
7298 old_code = direct_optab_handler (sync_old_add_optab, mode);
7299 new_code = direct_optab_handler (sync_new_add_optab, mode);
7300 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7301 {
7302 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7303 code = PLUS;
7304 }
7305 }
7306 break;
7307
7308 default:
7309 gcc_unreachable ();
7310 }
7311
7312 /* If the target does supports the proper new/old operation, great. But
7313 if we only support the opposite old/new operation, check to see if we
7314 can compensate. In the case in which the old value is supported, then
7315 we can always perform the operation again with normal arithmetic. In
7316 the case in which the new value is supported, then we can only handle
7317 this in the case the operation is reversible. */
7318 compensate = false;
7319 if (after)
7320 {
7321 icode = new_code;
7322 if (icode == CODE_FOR_nothing)
7323 {
7324 icode = old_code;
7325 if (icode != CODE_FOR_nothing)
7326 compensate = true;
7327 }
7328 }
7329 else
7330 {
7331 icode = old_code;
7332 if (icode == CODE_FOR_nothing
7333 && (code == PLUS || code == MINUS || code == XOR))
7334 {
7335 icode = new_code;
7336 if (icode != CODE_FOR_nothing)
7337 compensate = true;
7338 }
7339 }
7340
7341 /* If we found something supported, great. */
7342 if (icode != CODE_FOR_nothing)
7343 {
7344 struct expand_operand ops[3];
7345
7346 create_output_operand (&ops[0], target, mode);
7347 create_fixed_operand (&ops[1], mem);
7348 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7349 create_convert_operand_to (&ops[2], val, mode, true);
7350 if (maybe_expand_insn (icode, 3, ops))
7351 {
7352 target = ops[0].value;
7353 val = ops[2].value;
7354 /* If we need to compensate for using an operation with the
7355 wrong return value, do so now. */
7356 if (compensate)
7357 {
7358 if (!after)
7359 {
7360 if (code == PLUS)
7361 code = MINUS;
7362 else if (code == MINUS)
7363 code = PLUS;
7364 }
7365
7366 if (code == NOT)
7367 {
7368 target = expand_simple_binop (mode, AND, target, val,
7369 NULL_RTX, true,
7370 OPTAB_LIB_WIDEN);
7371 target = expand_simple_unop (mode, code, target,
7372 NULL_RTX, true);
7373 }
7374 else
7375 target = expand_simple_binop (mode, code, target, val,
7376 NULL_RTX, true,
7377 OPTAB_LIB_WIDEN);
7378 }
7379
7380 return target;
7381 }
7382 }
7383
7384 /* Failing that, generate a compare-and-swap loop in which we perform the
7385 operation with normal arithmetic instructions. */
7386 if (direct_optab_handler (sync_compare_and_swap_optab, mode)
7387 != CODE_FOR_nothing)
7388 {
7389 rtx t0 = gen_reg_rtx (mode), t1;
7390
7391 if (!target || !register_operand (target, mode))
7392 target = gen_reg_rtx (mode);
7393
7394 start_sequence ();
7395
7396 if (!after)
7397 emit_move_insn (target, t0);
7398 t1 = t0;
7399 if (code == NOT)
7400 {
7401 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7402 true, OPTAB_LIB_WIDEN);
7403 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7404 }
7405 else
7406 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7407 true, OPTAB_LIB_WIDEN);
7408 if (after)
7409 emit_move_insn (target, t1);
7410
7411 insn = get_insns ();
7412 end_sequence ();
7413
7414 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7415 return target;
7416 }
7417
7418 return NULL_RTX;
7419 }
7420
7421 /* This function expands a test-and-set operation. Ideally we atomically
7422 store VAL in MEM and return the previous value in MEM. Some targets
7423 may not support this operation and only support VAL with the constant 1;
7424 in this case while the return value will be 0/1, but the exact value
7425 stored in MEM is target defined. TARGET is an option place to stick
7426 the return value. */
7427
7428 rtx
7429 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7430 {
7431 enum machine_mode mode = GET_MODE (mem);
7432 enum insn_code icode;
7433
7434 /* If the target supports the test-and-set directly, great. */
7435 icode = direct_optab_handler (sync_lock_test_and_set_optab, mode);
7436 if (icode != CODE_FOR_nothing)
7437 {
7438 struct expand_operand ops[3];
7439
7440 create_output_operand (&ops[0], target, mode);
7441 create_fixed_operand (&ops[1], mem);
7442 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7443 create_convert_operand_to (&ops[2], val, mode, true);
7444 if (maybe_expand_insn (icode, 3, ops))
7445 return ops[0].value;
7446 }
7447
7448 /* Otherwise, use a compare-and-swap loop for the exchange. */
7449 if (direct_optab_handler (sync_compare_and_swap_optab, mode)
7450 != CODE_FOR_nothing)
7451 {
7452 if (!target || !register_operand (target, mode))
7453 target = gen_reg_rtx (mode);
7454 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7455 val = convert_modes (mode, GET_MODE (val), val, 1);
7456 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7457 return target;
7458 }
7459
7460 return NULL_RTX;
7461 }
7462 \f
7463 /* Return true if OPERAND is suitable for operand number OPNO of
7464 instruction ICODE. */
7465
7466 bool
7467 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
7468 {
7469 return (!insn_data[(int) icode].operand[opno].predicate
7470 || (insn_data[(int) icode].operand[opno].predicate
7471 (operand, insn_data[(int) icode].operand[opno].mode)));
7472 }
7473 \f
7474 /* TARGET is a target of a multiword operation that we are going to
7475 implement as a series of word-mode operations. Return true if
7476 TARGET is suitable for this purpose. */
7477
7478 bool
7479 valid_multiword_target_p (rtx target)
7480 {
7481 enum machine_mode mode;
7482 int i;
7483
7484 mode = GET_MODE (target);
7485 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
7486 if (!validate_subreg (word_mode, mode, target, i))
7487 return false;
7488 return true;
7489 }
7490
7491 /* Like maybe_legitimize_operand, but do not change the code of the
7492 current rtx value. */
7493
7494 static bool
7495 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
7496 struct expand_operand *op)
7497 {
7498 /* See if the operand matches in its current form. */
7499 if (insn_operand_matches (icode, opno, op->value))
7500 return true;
7501
7502 /* If the operand is a memory whose address has no side effects,
7503 try forcing the address into a register. The check for side
7504 effects is important because force_reg cannot handle things
7505 like auto-modified addresses. */
7506 if (insn_data[(int) icode].operand[opno].allows_mem
7507 && MEM_P (op->value)
7508 && !side_effects_p (XEXP (op->value, 0)))
7509 {
7510 rtx addr, mem, last;
7511
7512 last = get_last_insn ();
7513 addr = force_reg (Pmode, XEXP (op->value, 0));
7514 mem = replace_equiv_address (op->value, addr);
7515 if (insn_operand_matches (icode, opno, mem))
7516 {
7517 op->value = mem;
7518 return true;
7519 }
7520 delete_insns_since (last);
7521 }
7522
7523 return false;
7524 }
7525
7526 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7527 on success, storing the new operand value back in OP. */
7528
7529 static bool
7530 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
7531 struct expand_operand *op)
7532 {
7533 enum machine_mode mode, imode;
7534 bool old_volatile_ok, result;
7535
7536 mode = op->mode;
7537 switch (op->type)
7538 {
7539 case EXPAND_FIXED:
7540 old_volatile_ok = volatile_ok;
7541 volatile_ok = true;
7542 result = maybe_legitimize_operand_same_code (icode, opno, op);
7543 volatile_ok = old_volatile_ok;
7544 return result;
7545
7546 case EXPAND_OUTPUT:
7547 gcc_assert (mode != VOIDmode);
7548 if (op->value
7549 && op->value != const0_rtx
7550 && GET_MODE (op->value) == mode
7551 && maybe_legitimize_operand_same_code (icode, opno, op))
7552 return true;
7553
7554 op->value = gen_reg_rtx (mode);
7555 break;
7556
7557 case EXPAND_INPUT:
7558 input:
7559 gcc_assert (mode != VOIDmode);
7560 gcc_assert (GET_MODE (op->value) == VOIDmode
7561 || GET_MODE (op->value) == mode);
7562 if (maybe_legitimize_operand_same_code (icode, opno, op))
7563 return true;
7564
7565 op->value = copy_to_mode_reg (mode, op->value);
7566 break;
7567
7568 case EXPAND_CONVERT_TO:
7569 gcc_assert (mode != VOIDmode);
7570 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7571 goto input;
7572
7573 case EXPAND_CONVERT_FROM:
7574 if (GET_MODE (op->value) != VOIDmode)
7575 mode = GET_MODE (op->value);
7576 else
7577 /* The caller must tell us what mode this value has. */
7578 gcc_assert (mode != VOIDmode);
7579
7580 imode = insn_data[(int) icode].operand[opno].mode;
7581 if (imode != VOIDmode && imode != mode)
7582 {
7583 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
7584 mode = imode;
7585 }
7586 goto input;
7587
7588 case EXPAND_ADDRESS:
7589 gcc_assert (mode != VOIDmode);
7590 op->value = convert_memory_address (mode, op->value);
7591 goto input;
7592
7593 case EXPAND_INTEGER:
7594 mode = insn_data[(int) icode].operand[opno].mode;
7595 if (mode != VOIDmode && const_int_operand (op->value, mode))
7596 goto input;
7597 break;
7598 }
7599 return insn_operand_matches (icode, opno, op->value);
7600 }
7601
7602 /* Make OP describe an input operand that should have the same value
7603 as VALUE, after any mode conversion that the target might request.
7604 TYPE is the type of VALUE. */
7605
7606 void
7607 create_convert_operand_from_type (struct expand_operand *op,
7608 rtx value, tree type)
7609 {
7610 create_convert_operand_from (op, value, TYPE_MODE (type),
7611 TYPE_UNSIGNED (type));
7612 }
7613
7614 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7615 of instruction ICODE. Return true on success, leaving the new operand
7616 values in the OPS themselves. Emit no code on failure. */
7617
7618 bool
7619 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7620 unsigned int nops, struct expand_operand *ops)
7621 {
7622 rtx last;
7623 unsigned int i;
7624
7625 last = get_last_insn ();
7626 for (i = 0; i < nops; i++)
7627 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
7628 {
7629 delete_insns_since (last);
7630 return false;
7631 }
7632 return true;
7633 }
7634
7635 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7636 as its operands. Return the instruction pattern on success,
7637 and emit any necessary set-up code. Return null and emit no
7638 code on failure. */
7639
7640 rtx
7641 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7642 struct expand_operand *ops)
7643 {
7644 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7645 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7646 return NULL_RTX;
7647
7648 switch (nops)
7649 {
7650 case 1:
7651 return GEN_FCN (icode) (ops[0].value);
7652 case 2:
7653 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7654 case 3:
7655 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7656 case 4:
7657 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7658 ops[3].value);
7659 case 5:
7660 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7661 ops[3].value, ops[4].value);
7662 case 6:
7663 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7664 ops[3].value, ops[4].value, ops[5].value);
7665 }
7666 gcc_unreachable ();
7667 }
7668
7669 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7670 as its operands. Return true on success and emit no code on failure. */
7671
7672 bool
7673 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7674 struct expand_operand *ops)
7675 {
7676 rtx pat = maybe_gen_insn (icode, nops, ops);
7677 if (pat)
7678 {
7679 emit_insn (pat);
7680 return true;
7681 }
7682 return false;
7683 }
7684
7685 /* Like maybe_expand_insn, but for jumps. */
7686
7687 bool
7688 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7689 struct expand_operand *ops)
7690 {
7691 rtx pat = maybe_gen_insn (icode, nops, ops);
7692 if (pat)
7693 {
7694 emit_jump_insn (pat);
7695 return true;
7696 }
7697 return false;
7698 }
7699
7700 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7701 as its operands. */
7702
7703 void
7704 expand_insn (enum insn_code icode, unsigned int nops,
7705 struct expand_operand *ops)
7706 {
7707 if (!maybe_expand_insn (icode, nops, ops))
7708 gcc_unreachable ();
7709 }
7710
7711 /* Like expand_insn, but for jumps. */
7712
7713 void
7714 expand_jump_insn (enum insn_code icode, unsigned int nops,
7715 struct expand_operand *ops)
7716 {
7717 if (!maybe_expand_jump_insn (icode, nops, ops))
7718 gcc_unreachable ();
7719 }
7720
7721 #include "gt-optabs.h"