cd4f2cbe6d097a59c66c581742af8285f385e582
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
47
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
51
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
54
55 See expr.h for documentation of these optabs. */
56
57 optab optab_table[OTI_MAX];
58
59 rtx libfunc_table[LTI_MAX];
60
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[CTI_MAX];
63
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
66
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
69
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
71
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
75
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
77
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
83
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
86
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
89
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
92
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
97
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
127
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
132 \f
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
136
137 If the last insn does not set TARGET, don't do anything, but return 1.
138
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
142
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
145 {
146 rtx last_insn, insn, set;
147 rtx note;
148
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
150
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
157
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
160
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
164 ;
165
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
169
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
175
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
180 {
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
183 {
184 if (reg_set_p (target, insn))
185 return 0;
186
187 insn = PREV_INSN (insn);
188 }
189 }
190
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
195
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
197
198 return 1;
199 }
200 \f
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
206
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
210 {
211 rtx result;
212
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
216
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
224
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
229
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
232
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
237 }
238 \f
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
246 {
247 bool trapv;
248 switch (code)
249 {
250 case BIT_AND_EXPR:
251 return and_optab;
252
253 case BIT_IOR_EXPR:
254 return ior_optab;
255
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
258
259 case BIT_XOR_EXPR:
260 return xor_optab;
261
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
267
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
275
276 case LSHIFT_EXPR:
277 return ashl_optab;
278
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
281
282 case LROTATE_EXPR:
283 return rotl_optab;
284
285 case RROTATE_EXPR:
286 return rotr_optab;
287
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
290
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
293
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
296
297 case REDUC_MAX_EXPR:
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
299
300 case REDUC_MIN_EXPR:
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
302
303 case REDUC_PLUS_EXPR:
304 return reduc_plus_optab;
305
306 default:
307 break;
308 }
309
310 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
311 switch (code)
312 {
313 case PLUS_EXPR:
314 return trapv ? addv_optab : add_optab;
315
316 case MINUS_EXPR:
317 return trapv ? subv_optab : sub_optab;
318
319 case MULT_EXPR:
320 return trapv ? smulv_optab : smul_optab;
321
322 case NEGATE_EXPR:
323 return trapv ? negv_optab : neg_optab;
324
325 case ABS_EXPR:
326 return trapv ? absv_optab : abs_optab;
327
328 default:
329 return NULL;
330 }
331 }
332 \f
333
334 /* Generate code to perform an operation specified by TERNARY_OPTAB
335 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
336
337 UNSIGNEDP is for the case where we have to widen the operands
338 to perform the operation. It says to use zero-extension.
339
340 If TARGET is nonzero, the value
341 is generated there, if it is convenient to do so.
342 In all cases an rtx is returned for the locus of the value;
343 this may or may not be TARGET. */
344
345 rtx
346 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
347 rtx op1, rtx op2, rtx target, int unsignedp)
348 {
349 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
350 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
351 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
352 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
353 rtx temp;
354 rtx pat;
355 rtx xop0 = op0, xop1 = op1, xop2 = op2;
356
357 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
358 != CODE_FOR_nothing);
359
360 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
361 temp = gen_reg_rtx (mode);
362 else
363 temp = target;
364
365 /* In case the insn wants input operands in modes different from
366 those of the actual operands, convert the operands. It would
367 seem that we don't need to convert CONST_INTs, but we do, so
368 that they're properly zero-extended, sign-extended or truncated
369 for their mode. */
370
371 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
372 xop0 = convert_modes (mode0,
373 GET_MODE (op0) != VOIDmode
374 ? GET_MODE (op0)
375 : mode,
376 xop0, unsignedp);
377
378 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
379 xop1 = convert_modes (mode1,
380 GET_MODE (op1) != VOIDmode
381 ? GET_MODE (op1)
382 : mode,
383 xop1, unsignedp);
384
385 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
386 xop2 = convert_modes (mode2,
387 GET_MODE (op2) != VOIDmode
388 ? GET_MODE (op2)
389 : mode,
390 xop2, unsignedp);
391
392 /* Now, if insn's predicates don't allow our operands, put them into
393 pseudo regs. */
394
395 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
396 && mode0 != VOIDmode)
397 xop0 = copy_to_mode_reg (mode0, xop0);
398
399 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
400 && mode1 != VOIDmode)
401 xop1 = copy_to_mode_reg (mode1, xop1);
402
403 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
404 && mode2 != VOIDmode)
405 xop2 = copy_to_mode_reg (mode2, xop2);
406
407 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
408
409 emit_insn (pat);
410 return temp;
411 }
412
413
414 /* Like expand_binop, but return a constant rtx if the result can be
415 calculated at compile time. The arguments and return value are
416 otherwise the same as for expand_binop. */
417
418 static rtx
419 simplify_expand_binop (enum machine_mode mode, optab binoptab,
420 rtx op0, rtx op1, rtx target, int unsignedp,
421 enum optab_methods methods)
422 {
423 if (CONSTANT_P (op0) && CONSTANT_P (op1))
424 return simplify_gen_binary (binoptab->code, mode, op0, op1);
425 else
426 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
427 }
428
429 /* Like simplify_expand_binop, but always put the result in TARGET.
430 Return true if the expansion succeeded. */
431
432 bool
433 force_expand_binop (enum machine_mode mode, optab binoptab,
434 rtx op0, rtx op1, rtx target, int unsignedp,
435 enum optab_methods methods)
436 {
437 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
438 target, unsignedp, methods);
439 if (x == 0)
440 return false;
441 if (x != target)
442 emit_move_insn (target, x);
443 return true;
444 }
445
446 /* This subroutine of expand_doubleword_shift handles the cases in which
447 the effective shift value is >= BITS_PER_WORD. The arguments and return
448 value are the same as for the parent routine, except that SUPERWORD_OP1
449 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
450 INTO_TARGET may be null if the caller has decided to calculate it. */
451
452 static bool
453 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
454 rtx outof_target, rtx into_target,
455 int unsignedp, enum optab_methods methods)
456 {
457 if (into_target != 0)
458 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
459 into_target, unsignedp, methods))
460 return false;
461
462 if (outof_target != 0)
463 {
464 /* For a signed right shift, we must fill OUTOF_TARGET with copies
465 of the sign bit, otherwise we must fill it with zeros. */
466 if (binoptab != ashr_optab)
467 emit_move_insn (outof_target, CONST0_RTX (word_mode));
468 else
469 if (!force_expand_binop (word_mode, binoptab,
470 outof_input, GEN_INT (BITS_PER_WORD - 1),
471 outof_target, unsignedp, methods))
472 return false;
473 }
474 return true;
475 }
476
477 /* This subroutine of expand_doubleword_shift handles the cases in which
478 the effective shift value is < BITS_PER_WORD. The arguments and return
479 value are the same as for the parent routine. */
480
481 static bool
482 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
483 rtx outof_input, rtx into_input, rtx op1,
484 rtx outof_target, rtx into_target,
485 int unsignedp, enum optab_methods methods,
486 unsigned HOST_WIDE_INT shift_mask)
487 {
488 optab reverse_unsigned_shift, unsigned_shift;
489 rtx tmp, carries;
490
491 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
492 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
493
494 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
495 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
496 the opposite direction to BINOPTAB. */
497 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
498 {
499 carries = outof_input;
500 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
501 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
502 0, true, methods);
503 }
504 else
505 {
506 /* We must avoid shifting by BITS_PER_WORD bits since that is either
507 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
508 has unknown behavior. Do a single shift first, then shift by the
509 remainder. It's OK to use ~OP1 as the remainder if shift counts
510 are truncated to the mode size. */
511 carries = expand_binop (word_mode, reverse_unsigned_shift,
512 outof_input, const1_rtx, 0, unsignedp, methods);
513 if (shift_mask == BITS_PER_WORD - 1)
514 {
515 tmp = immed_double_const (-1, -1, op1_mode);
516 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
517 0, true, methods);
518 }
519 else
520 {
521 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
522 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
523 0, true, methods);
524 }
525 }
526 if (tmp == 0 || carries == 0)
527 return false;
528 carries = expand_binop (word_mode, reverse_unsigned_shift,
529 carries, tmp, 0, unsignedp, methods);
530 if (carries == 0)
531 return false;
532
533 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
534 so the result can go directly into INTO_TARGET if convenient. */
535 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
536 into_target, unsignedp, methods);
537 if (tmp == 0)
538 return false;
539
540 /* Now OR in the bits carried over from OUTOF_INPUT. */
541 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
542 into_target, unsignedp, methods))
543 return false;
544
545 /* Use a standard word_mode shift for the out-of half. */
546 if (outof_target != 0)
547 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
548 outof_target, unsignedp, methods))
549 return false;
550
551 return true;
552 }
553
554
555 #ifdef HAVE_conditional_move
556 /* Try implementing expand_doubleword_shift using conditional moves.
557 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
558 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
559 are the shift counts to use in the former and latter case. All other
560 arguments are the same as the parent routine. */
561
562 static bool
563 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
564 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
565 rtx outof_input, rtx into_input,
566 rtx subword_op1, rtx superword_op1,
567 rtx outof_target, rtx into_target,
568 int unsignedp, enum optab_methods methods,
569 unsigned HOST_WIDE_INT shift_mask)
570 {
571 rtx outof_superword, into_superword;
572
573 /* Put the superword version of the output into OUTOF_SUPERWORD and
574 INTO_SUPERWORD. */
575 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
576 if (outof_target != 0 && subword_op1 == superword_op1)
577 {
578 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
579 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
580 into_superword = outof_target;
581 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
582 outof_superword, 0, unsignedp, methods))
583 return false;
584 }
585 else
586 {
587 into_superword = gen_reg_rtx (word_mode);
588 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
589 outof_superword, into_superword,
590 unsignedp, methods))
591 return false;
592 }
593
594 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
595 if (!expand_subword_shift (op1_mode, binoptab,
596 outof_input, into_input, subword_op1,
597 outof_target, into_target,
598 unsignedp, methods, shift_mask))
599 return false;
600
601 /* Select between them. Do the INTO half first because INTO_SUPERWORD
602 might be the current value of OUTOF_TARGET. */
603 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
604 into_target, into_superword, word_mode, false))
605 return false;
606
607 if (outof_target != 0)
608 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
609 outof_target, outof_superword,
610 word_mode, false))
611 return false;
612
613 return true;
614 }
615 #endif
616
617 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
618 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
619 input operand; the shift moves bits in the direction OUTOF_INPUT->
620 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
621 of the target. OP1 is the shift count and OP1_MODE is its mode.
622 If OP1 is constant, it will have been truncated as appropriate
623 and is known to be nonzero.
624
625 If SHIFT_MASK is zero, the result of word shifts is undefined when the
626 shift count is outside the range [0, BITS_PER_WORD). This routine must
627 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
628
629 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
630 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
631 fill with zeros or sign bits as appropriate.
632
633 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
634 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
635 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
636 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
637 are undefined.
638
639 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
640 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
641 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
642 function wants to calculate it itself.
643
644 Return true if the shift could be successfully synthesized. */
645
646 static bool
647 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
648 rtx outof_input, rtx into_input, rtx op1,
649 rtx outof_target, rtx into_target,
650 int unsignedp, enum optab_methods methods,
651 unsigned HOST_WIDE_INT shift_mask)
652 {
653 rtx superword_op1, tmp, cmp1, cmp2;
654 rtx subword_label, done_label;
655 enum rtx_code cmp_code;
656
657 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
658 fill the result with sign or zero bits as appropriate. If so, the value
659 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
660 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
661 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
662
663 This isn't worthwhile for constant shifts since the optimizers will
664 cope better with in-range shift counts. */
665 if (shift_mask >= BITS_PER_WORD
666 && outof_target != 0
667 && !CONSTANT_P (op1))
668 {
669 if (!expand_doubleword_shift (op1_mode, binoptab,
670 outof_input, into_input, op1,
671 0, into_target,
672 unsignedp, methods, shift_mask))
673 return false;
674 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
675 outof_target, unsignedp, methods))
676 return false;
677 return true;
678 }
679
680 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
681 is true when the effective shift value is less than BITS_PER_WORD.
682 Set SUPERWORD_OP1 to the shift count that should be used to shift
683 OUTOF_INPUT into INTO_TARGET when the condition is false. */
684 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
685 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
686 {
687 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
688 is a subword shift count. */
689 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
690 0, true, methods);
691 cmp2 = CONST0_RTX (op1_mode);
692 cmp_code = EQ;
693 superword_op1 = op1;
694 }
695 else
696 {
697 /* Set CMP1 to OP1 - BITS_PER_WORD. */
698 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
699 0, true, methods);
700 cmp2 = CONST0_RTX (op1_mode);
701 cmp_code = LT;
702 superword_op1 = cmp1;
703 }
704 if (cmp1 == 0)
705 return false;
706
707 /* If we can compute the condition at compile time, pick the
708 appropriate subroutine. */
709 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
710 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
711 {
712 if (tmp == const0_rtx)
713 return expand_superword_shift (binoptab, outof_input, superword_op1,
714 outof_target, into_target,
715 unsignedp, methods);
716 else
717 return expand_subword_shift (op1_mode, binoptab,
718 outof_input, into_input, op1,
719 outof_target, into_target,
720 unsignedp, methods, shift_mask);
721 }
722
723 #ifdef HAVE_conditional_move
724 /* Try using conditional moves to generate straight-line code. */
725 {
726 rtx start = get_last_insn ();
727 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
728 cmp_code, cmp1, cmp2,
729 outof_input, into_input,
730 op1, superword_op1,
731 outof_target, into_target,
732 unsignedp, methods, shift_mask))
733 return true;
734 delete_insns_since (start);
735 }
736 #endif
737
738 /* As a last resort, use branches to select the correct alternative. */
739 subword_label = gen_label_rtx ();
740 done_label = gen_label_rtx ();
741
742 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
743 0, 0, subword_label);
744
745 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
746 outof_target, into_target,
747 unsignedp, methods))
748 return false;
749
750 emit_jump_insn (gen_jump (done_label));
751 emit_barrier ();
752 emit_label (subword_label);
753
754 if (!expand_subword_shift (op1_mode, binoptab,
755 outof_input, into_input, op1,
756 outof_target, into_target,
757 unsignedp, methods, shift_mask))
758 return false;
759
760 emit_label (done_label);
761 return true;
762 }
763 \f
764 /* Subroutine of expand_binop. Perform a double word multiplication of
765 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
766 as the target's word_mode. This function return NULL_RTX if anything
767 goes wrong, in which case it may have already emitted instructions
768 which need to be deleted.
769
770 If we want to multiply two two-word values and have normal and widening
771 multiplies of single-word values, we can do this with three smaller
772 multiplications. Note that we do not make a REG_NO_CONFLICT block here
773 because we are not operating on one word at a time.
774
775 The multiplication proceeds as follows:
776 _______________________
777 [__op0_high_|__op0_low__]
778 _______________________
779 * [__op1_high_|__op1_low__]
780 _______________________________________________
781 _______________________
782 (1) [__op0_low__*__op1_low__]
783 _______________________
784 (2a) [__op0_low__*__op1_high_]
785 _______________________
786 (2b) [__op0_high_*__op1_low__]
787 _______________________
788 (3) [__op0_high_*__op1_high_]
789
790
791 This gives a 4-word result. Since we are only interested in the
792 lower 2 words, partial result (3) and the upper words of (2a) and
793 (2b) don't need to be calculated. Hence (2a) and (2b) can be
794 calculated using non-widening multiplication.
795
796 (1), however, needs to be calculated with an unsigned widening
797 multiplication. If this operation is not directly supported we
798 try using a signed widening multiplication and adjust the result.
799 This adjustment works as follows:
800
801 If both operands are positive then no adjustment is needed.
802
803 If the operands have different signs, for example op0_low < 0 and
804 op1_low >= 0, the instruction treats the most significant bit of
805 op0_low as a sign bit instead of a bit with significance
806 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
807 with 2**BITS_PER_WORD - op0_low, and two's complements the
808 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
809 the result.
810
811 Similarly, if both operands are negative, we need to add
812 (op0_low + op1_low) * 2**BITS_PER_WORD.
813
814 We use a trick to adjust quickly. We logically shift op0_low right
815 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
816 op0_high (op1_high) before it is used to calculate 2b (2a). If no
817 logical shift exists, we do an arithmetic right shift and subtract
818 the 0 or -1. */
819
820 static rtx
821 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
822 bool umulp, enum optab_methods methods)
823 {
824 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
825 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
826 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
827 rtx product, adjust, product_high, temp;
828
829 rtx op0_high = operand_subword_force (op0, high, mode);
830 rtx op0_low = operand_subword_force (op0, low, mode);
831 rtx op1_high = operand_subword_force (op1, high, mode);
832 rtx op1_low = operand_subword_force (op1, low, mode);
833
834 /* If we're using an unsigned multiply to directly compute the product
835 of the low-order words of the operands and perform any required
836 adjustments of the operands, we begin by trying two more multiplications
837 and then computing the appropriate sum.
838
839 We have checked above that the required addition is provided.
840 Full-word addition will normally always succeed, especially if
841 it is provided at all, so we don't worry about its failure. The
842 multiplication may well fail, however, so we do handle that. */
843
844 if (!umulp)
845 {
846 /* ??? This could be done with emit_store_flag where available. */
847 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
848 NULL_RTX, 1, methods);
849 if (temp)
850 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
851 NULL_RTX, 0, OPTAB_DIRECT);
852 else
853 {
854 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
855 NULL_RTX, 0, methods);
856 if (!temp)
857 return NULL_RTX;
858 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
859 NULL_RTX, 0, OPTAB_DIRECT);
860 }
861
862 if (!op0_high)
863 return NULL_RTX;
864 }
865
866 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
867 NULL_RTX, 0, OPTAB_DIRECT);
868 if (!adjust)
869 return NULL_RTX;
870
871 /* OP0_HIGH should now be dead. */
872
873 if (!umulp)
874 {
875 /* ??? This could be done with emit_store_flag where available. */
876 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
877 NULL_RTX, 1, methods);
878 if (temp)
879 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
880 NULL_RTX, 0, OPTAB_DIRECT);
881 else
882 {
883 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
884 NULL_RTX, 0, methods);
885 if (!temp)
886 return NULL_RTX;
887 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
888 NULL_RTX, 0, OPTAB_DIRECT);
889 }
890
891 if (!op1_high)
892 return NULL_RTX;
893 }
894
895 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
896 NULL_RTX, 0, OPTAB_DIRECT);
897 if (!temp)
898 return NULL_RTX;
899
900 /* OP1_HIGH should now be dead. */
901
902 adjust = expand_binop (word_mode, add_optab, adjust, temp,
903 adjust, 0, OPTAB_DIRECT);
904
905 if (target && !REG_P (target))
906 target = NULL_RTX;
907
908 if (umulp)
909 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
910 target, 1, OPTAB_DIRECT);
911 else
912 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
913 target, 1, OPTAB_DIRECT);
914
915 if (!product)
916 return NULL_RTX;
917
918 product_high = operand_subword (product, high, 1, mode);
919 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
920 REG_P (product_high) ? product_high : adjust,
921 0, OPTAB_DIRECT);
922 emit_move_insn (product_high, adjust);
923 return product;
924 }
925 \f
926 /* Wrapper around expand_binop which takes an rtx code to specify
927 the operation to perform, not an optab pointer. All other
928 arguments are the same. */
929 rtx
930 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
931 rtx op1, rtx target, int unsignedp,
932 enum optab_methods methods)
933 {
934 optab binop = code_to_optab[(int) code];
935 gcc_assert (binop);
936
937 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
938 }
939
940 /* Generate code to perform an operation specified by BINOPTAB
941 on operands OP0 and OP1, with result having machine-mode MODE.
942
943 UNSIGNEDP is for the case where we have to widen the operands
944 to perform the operation. It says to use zero-extension.
945
946 If TARGET is nonzero, the value
947 is generated there, if it is convenient to do so.
948 In all cases an rtx is returned for the locus of the value;
949 this may or may not be TARGET. */
950
951 rtx
952 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
953 rtx target, int unsignedp, enum optab_methods methods)
954 {
955 enum optab_methods next_methods
956 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
957 ? OPTAB_WIDEN : methods);
958 enum mode_class class;
959 enum machine_mode wider_mode;
960 rtx temp;
961 int commutative_op = 0;
962 int shift_op = (binoptab->code == ASHIFT
963 || binoptab->code == ASHIFTRT
964 || binoptab->code == LSHIFTRT
965 || binoptab->code == ROTATE
966 || binoptab->code == ROTATERT);
967 rtx entry_last = get_last_insn ();
968 rtx last;
969
970 class = GET_MODE_CLASS (mode);
971
972 if (flag_force_mem)
973 {
974 /* Load duplicate non-volatile operands once. */
975 if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0))
976 {
977 op0 = force_not_mem (op0);
978 op1 = op0;
979 }
980 else
981 {
982 op0 = force_not_mem (op0);
983 op1 = force_not_mem (op1);
984 }
985 }
986
987 /* If subtracting an integer constant, convert this into an addition of
988 the negated constant. */
989
990 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
991 {
992 op1 = negate_rtx (mode, op1);
993 binoptab = add_optab;
994 }
995
996 /* If we are inside an appropriately-short loop and we are optimizing,
997 force expensive constants into a register. */
998 if (CONSTANT_P (op0) && optimize
999 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1000 {
1001 if (GET_MODE (op0) != VOIDmode)
1002 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1003 op0 = force_reg (mode, op0);
1004 }
1005
1006 if (CONSTANT_P (op1) && optimize
1007 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1008 {
1009 if (GET_MODE (op1) != VOIDmode)
1010 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1011 op1 = force_reg (mode, op1);
1012 }
1013
1014 /* Record where to delete back to if we backtrack. */
1015 last = get_last_insn ();
1016
1017 /* If operation is commutative,
1018 try to make the first operand a register.
1019 Even better, try to make it the same as the target.
1020 Also try to make the last operand a constant. */
1021 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1022 || binoptab == smul_widen_optab
1023 || binoptab == umul_widen_optab
1024 || binoptab == smul_highpart_optab
1025 || binoptab == umul_highpart_optab)
1026 {
1027 commutative_op = 1;
1028
1029 if (((target == 0 || REG_P (target))
1030 ? ((REG_P (op1)
1031 && !REG_P (op0))
1032 || target == op1)
1033 : rtx_equal_p (op1, target))
1034 || GET_CODE (op0) == CONST_INT)
1035 {
1036 temp = op1;
1037 op1 = op0;
1038 op0 = temp;
1039 }
1040 }
1041
1042 /* If we can do it with a three-operand insn, do so. */
1043
1044 if (methods != OPTAB_MUST_WIDEN
1045 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1046 {
1047 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1048 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1049 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1050 rtx pat;
1051 rtx xop0 = op0, xop1 = op1;
1052
1053 if (target)
1054 temp = target;
1055 else
1056 temp = gen_reg_rtx (mode);
1057
1058 /* If it is a commutative operator and the modes would match
1059 if we would swap the operands, we can save the conversions. */
1060 if (commutative_op)
1061 {
1062 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1063 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1064 {
1065 rtx tmp;
1066
1067 tmp = op0; op0 = op1; op1 = tmp;
1068 tmp = xop0; xop0 = xop1; xop1 = tmp;
1069 }
1070 }
1071
1072 /* In case the insn wants input operands in modes different from
1073 those of the actual operands, convert the operands. It would
1074 seem that we don't need to convert CONST_INTs, but we do, so
1075 that they're properly zero-extended, sign-extended or truncated
1076 for their mode. */
1077
1078 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1079 xop0 = convert_modes (mode0,
1080 GET_MODE (op0) != VOIDmode
1081 ? GET_MODE (op0)
1082 : mode,
1083 xop0, unsignedp);
1084
1085 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1086 xop1 = convert_modes (mode1,
1087 GET_MODE (op1) != VOIDmode
1088 ? GET_MODE (op1)
1089 : mode,
1090 xop1, unsignedp);
1091
1092 /* Now, if insn's predicates don't allow our operands, put them into
1093 pseudo regs. */
1094
1095 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1096 && mode0 != VOIDmode)
1097 xop0 = copy_to_mode_reg (mode0, xop0);
1098
1099 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1100 && mode1 != VOIDmode)
1101 xop1 = copy_to_mode_reg (mode1, xop1);
1102
1103 if (!insn_data[icode].operand[0].predicate (temp, mode))
1104 temp = gen_reg_rtx (mode);
1105
1106 pat = GEN_FCN (icode) (temp, xop0, xop1);
1107 if (pat)
1108 {
1109 /* If PAT is composed of more than one insn, try to add an appropriate
1110 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1111 operand, call ourselves again, this time without a target. */
1112 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1113 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1114 {
1115 delete_insns_since (last);
1116 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1117 unsignedp, methods);
1118 }
1119
1120 emit_insn (pat);
1121 return temp;
1122 }
1123 else
1124 delete_insns_since (last);
1125 }
1126
1127 /* If this is a multiply, see if we can do a widening operation that
1128 takes operands of this mode and makes a wider mode. */
1129
1130 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
1131 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1132 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1133 != CODE_FOR_nothing))
1134 {
1135 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1136 unsignedp ? umul_widen_optab : smul_widen_optab,
1137 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1138
1139 if (temp != 0)
1140 {
1141 if (GET_MODE_CLASS (mode) == MODE_INT)
1142 return gen_lowpart (mode, temp);
1143 else
1144 return convert_to_mode (mode, temp, unsignedp);
1145 }
1146 }
1147
1148 /* Look for a wider mode of the same class for which we think we
1149 can open-code the operation. Check for a widening multiply at the
1150 wider mode as well. */
1151
1152 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1153 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1154 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1155 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1156 {
1157 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1158 || (binoptab == smul_optab
1159 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1160 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1161 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1162 != CODE_FOR_nothing)))
1163 {
1164 rtx xop0 = op0, xop1 = op1;
1165 int no_extend = 0;
1166
1167 /* For certain integer operations, we need not actually extend
1168 the narrow operands, as long as we will truncate
1169 the results to the same narrowness. */
1170
1171 if ((binoptab == ior_optab || binoptab == and_optab
1172 || binoptab == xor_optab
1173 || binoptab == add_optab || binoptab == sub_optab
1174 || binoptab == smul_optab || binoptab == ashl_optab)
1175 && class == MODE_INT)
1176 no_extend = 1;
1177
1178 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1179
1180 /* The second operand of a shift must always be extended. */
1181 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1182 no_extend && binoptab != ashl_optab);
1183
1184 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1185 unsignedp, OPTAB_DIRECT);
1186 if (temp)
1187 {
1188 if (class != MODE_INT)
1189 {
1190 if (target == 0)
1191 target = gen_reg_rtx (mode);
1192 convert_move (target, temp, 0);
1193 return target;
1194 }
1195 else
1196 return gen_lowpart (mode, temp);
1197 }
1198 else
1199 delete_insns_since (last);
1200 }
1201 }
1202
1203 /* These can be done a word at a time. */
1204 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1205 && class == MODE_INT
1206 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1207 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1208 {
1209 int i;
1210 rtx insns;
1211 rtx equiv_value;
1212
1213 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1214 won't be accurate, so use a new target. */
1215 if (target == 0 || target == op0 || target == op1)
1216 target = gen_reg_rtx (mode);
1217
1218 start_sequence ();
1219
1220 /* Do the actual arithmetic. */
1221 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1222 {
1223 rtx target_piece = operand_subword (target, i, 1, mode);
1224 rtx x = expand_binop (word_mode, binoptab,
1225 operand_subword_force (op0, i, mode),
1226 operand_subword_force (op1, i, mode),
1227 target_piece, unsignedp, next_methods);
1228
1229 if (x == 0)
1230 break;
1231
1232 if (target_piece != x)
1233 emit_move_insn (target_piece, x);
1234 }
1235
1236 insns = get_insns ();
1237 end_sequence ();
1238
1239 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1240 {
1241 if (binoptab->code != UNKNOWN)
1242 equiv_value
1243 = gen_rtx_fmt_ee (binoptab->code, mode,
1244 copy_rtx (op0), copy_rtx (op1));
1245 else
1246 equiv_value = 0;
1247
1248 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1249 return target;
1250 }
1251 }
1252
1253 /* Synthesize double word shifts from single word shifts. */
1254 if ((binoptab == lshr_optab || binoptab == ashl_optab
1255 || binoptab == ashr_optab)
1256 && class == MODE_INT
1257 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1258 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1259 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1260 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1261 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1262 {
1263 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1264 enum machine_mode op1_mode;
1265
1266 double_shift_mask = targetm.shift_truncation_mask (mode);
1267 shift_mask = targetm.shift_truncation_mask (word_mode);
1268 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1269
1270 /* Apply the truncation to constant shifts. */
1271 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1272 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1273
1274 if (op1 == CONST0_RTX (op1_mode))
1275 return op0;
1276
1277 /* Make sure that this is a combination that expand_doubleword_shift
1278 can handle. See the comments there for details. */
1279 if (double_shift_mask == 0
1280 || (shift_mask == BITS_PER_WORD - 1
1281 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1282 {
1283 rtx insns, equiv_value;
1284 rtx into_target, outof_target;
1285 rtx into_input, outof_input;
1286 int left_shift, outof_word;
1287
1288 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1289 won't be accurate, so use a new target. */
1290 if (target == 0 || target == op0 || target == op1)
1291 target = gen_reg_rtx (mode);
1292
1293 start_sequence ();
1294
1295 /* OUTOF_* is the word we are shifting bits away from, and
1296 INTO_* is the word that we are shifting bits towards, thus
1297 they differ depending on the direction of the shift and
1298 WORDS_BIG_ENDIAN. */
1299
1300 left_shift = binoptab == ashl_optab;
1301 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1302
1303 outof_target = operand_subword (target, outof_word, 1, mode);
1304 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1305
1306 outof_input = operand_subword_force (op0, outof_word, mode);
1307 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1308
1309 if (expand_doubleword_shift (op1_mode, binoptab,
1310 outof_input, into_input, op1,
1311 outof_target, into_target,
1312 unsignedp, methods, shift_mask))
1313 {
1314 insns = get_insns ();
1315 end_sequence ();
1316
1317 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1318 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1319 return target;
1320 }
1321 end_sequence ();
1322 }
1323 }
1324
1325 /* Synthesize double word rotates from single word shifts. */
1326 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1327 && class == MODE_INT
1328 && GET_CODE (op1) == CONST_INT
1329 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1330 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1331 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1332 {
1333 rtx insns, equiv_value;
1334 rtx into_target, outof_target;
1335 rtx into_input, outof_input;
1336 rtx inter;
1337 int shift_count, left_shift, outof_word;
1338
1339 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1340 won't be accurate, so use a new target. Do this also if target is not
1341 a REG, first because having a register instead may open optimization
1342 opportunities, and second because if target and op0 happen to be MEMs
1343 designating the same location, we would risk clobbering it too early
1344 in the code sequence we generate below. */
1345 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1346 target = gen_reg_rtx (mode);
1347
1348 start_sequence ();
1349
1350 shift_count = INTVAL (op1);
1351
1352 /* OUTOF_* is the word we are shifting bits away from, and
1353 INTO_* is the word that we are shifting bits towards, thus
1354 they differ depending on the direction of the shift and
1355 WORDS_BIG_ENDIAN. */
1356
1357 left_shift = (binoptab == rotl_optab);
1358 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1359
1360 outof_target = operand_subword (target, outof_word, 1, mode);
1361 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1362
1363 outof_input = operand_subword_force (op0, outof_word, mode);
1364 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1365
1366 if (shift_count == BITS_PER_WORD)
1367 {
1368 /* This is just a word swap. */
1369 emit_move_insn (outof_target, into_input);
1370 emit_move_insn (into_target, outof_input);
1371 inter = const0_rtx;
1372 }
1373 else
1374 {
1375 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1376 rtx first_shift_count, second_shift_count;
1377 optab reverse_unsigned_shift, unsigned_shift;
1378
1379 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1380 ? lshr_optab : ashl_optab);
1381
1382 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1383 ? ashl_optab : lshr_optab);
1384
1385 if (shift_count > BITS_PER_WORD)
1386 {
1387 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1388 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1389 }
1390 else
1391 {
1392 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1393 second_shift_count = GEN_INT (shift_count);
1394 }
1395
1396 into_temp1 = expand_binop (word_mode, unsigned_shift,
1397 outof_input, first_shift_count,
1398 NULL_RTX, unsignedp, next_methods);
1399 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1400 into_input, second_shift_count,
1401 NULL_RTX, unsignedp, next_methods);
1402
1403 if (into_temp1 != 0 && into_temp2 != 0)
1404 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1405 into_target, unsignedp, next_methods);
1406 else
1407 inter = 0;
1408
1409 if (inter != 0 && inter != into_target)
1410 emit_move_insn (into_target, inter);
1411
1412 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1413 into_input, first_shift_count,
1414 NULL_RTX, unsignedp, next_methods);
1415 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1416 outof_input, second_shift_count,
1417 NULL_RTX, unsignedp, next_methods);
1418
1419 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1420 inter = expand_binop (word_mode, ior_optab,
1421 outof_temp1, outof_temp2,
1422 outof_target, unsignedp, next_methods);
1423
1424 if (inter != 0 && inter != outof_target)
1425 emit_move_insn (outof_target, inter);
1426 }
1427
1428 insns = get_insns ();
1429 end_sequence ();
1430
1431 if (inter != 0)
1432 {
1433 if (binoptab->code != UNKNOWN)
1434 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1435 else
1436 equiv_value = 0;
1437
1438 /* We can't make this a no conflict block if this is a word swap,
1439 because the word swap case fails if the input and output values
1440 are in the same register. */
1441 if (shift_count != BITS_PER_WORD)
1442 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1443 else
1444 emit_insn (insns);
1445
1446
1447 return target;
1448 }
1449 }
1450
1451 /* These can be done a word at a time by propagating carries. */
1452 if ((binoptab == add_optab || binoptab == sub_optab)
1453 && class == MODE_INT
1454 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1455 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1456 {
1457 unsigned int i;
1458 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1459 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1460 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1461 rtx xop0, xop1, xtarget;
1462
1463 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1464 value is one of those, use it. Otherwise, use 1 since it is the
1465 one easiest to get. */
1466 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1467 int normalizep = STORE_FLAG_VALUE;
1468 #else
1469 int normalizep = 1;
1470 #endif
1471
1472 /* Prepare the operands. */
1473 xop0 = force_reg (mode, op0);
1474 xop1 = force_reg (mode, op1);
1475
1476 xtarget = gen_reg_rtx (mode);
1477
1478 if (target == 0 || !REG_P (target))
1479 target = xtarget;
1480
1481 /* Indicate for flow that the entire target reg is being set. */
1482 if (REG_P (target))
1483 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1484
1485 /* Do the actual arithmetic. */
1486 for (i = 0; i < nwords; i++)
1487 {
1488 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1489 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1490 rtx op0_piece = operand_subword_force (xop0, index, mode);
1491 rtx op1_piece = operand_subword_force (xop1, index, mode);
1492 rtx x;
1493
1494 /* Main add/subtract of the input operands. */
1495 x = expand_binop (word_mode, binoptab,
1496 op0_piece, op1_piece,
1497 target_piece, unsignedp, next_methods);
1498 if (x == 0)
1499 break;
1500
1501 if (i + 1 < nwords)
1502 {
1503 /* Store carry from main add/subtract. */
1504 carry_out = gen_reg_rtx (word_mode);
1505 carry_out = emit_store_flag_force (carry_out,
1506 (binoptab == add_optab
1507 ? LT : GT),
1508 x, op0_piece,
1509 word_mode, 1, normalizep);
1510 }
1511
1512 if (i > 0)
1513 {
1514 rtx newx;
1515
1516 /* Add/subtract previous carry to main result. */
1517 newx = expand_binop (word_mode,
1518 normalizep == 1 ? binoptab : otheroptab,
1519 x, carry_in,
1520 NULL_RTX, 1, next_methods);
1521
1522 if (i + 1 < nwords)
1523 {
1524 /* Get out carry from adding/subtracting carry in. */
1525 rtx carry_tmp = gen_reg_rtx (word_mode);
1526 carry_tmp = emit_store_flag_force (carry_tmp,
1527 (binoptab == add_optab
1528 ? LT : GT),
1529 newx, x,
1530 word_mode, 1, normalizep);
1531
1532 /* Logical-ior the two poss. carry together. */
1533 carry_out = expand_binop (word_mode, ior_optab,
1534 carry_out, carry_tmp,
1535 carry_out, 0, next_methods);
1536 if (carry_out == 0)
1537 break;
1538 }
1539 emit_move_insn (target_piece, newx);
1540 }
1541 else
1542 {
1543 if (x != target_piece)
1544 emit_move_insn (target_piece, x);
1545 }
1546
1547 carry_in = carry_out;
1548 }
1549
1550 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1551 {
1552 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1553 || ! rtx_equal_p (target, xtarget))
1554 {
1555 rtx temp = emit_move_insn (target, xtarget);
1556
1557 set_unique_reg_note (temp,
1558 REG_EQUAL,
1559 gen_rtx_fmt_ee (binoptab->code, mode,
1560 copy_rtx (xop0),
1561 copy_rtx (xop1)));
1562 }
1563 else
1564 target = xtarget;
1565
1566 return target;
1567 }
1568
1569 else
1570 delete_insns_since (last);
1571 }
1572
1573 /* Attempt to synthesize double word multiplies using a sequence of word
1574 mode multiplications. We first attempt to generate a sequence using a
1575 more efficient unsigned widening multiply, and if that fails we then
1576 try using a signed widening multiply. */
1577
1578 if (binoptab == smul_optab
1579 && class == MODE_INT
1580 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1581 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1582 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1583 {
1584 rtx product = NULL_RTX;
1585
1586 if (umul_widen_optab->handlers[(int) mode].insn_code
1587 != CODE_FOR_nothing)
1588 {
1589 product = expand_doubleword_mult (mode, op0, op1, target,
1590 true, methods);
1591 if (!product)
1592 delete_insns_since (last);
1593 }
1594
1595 if (product == NULL_RTX
1596 && smul_widen_optab->handlers[(int) mode].insn_code
1597 != CODE_FOR_nothing)
1598 {
1599 product = expand_doubleword_mult (mode, op0, op1, target,
1600 false, methods);
1601 if (!product)
1602 delete_insns_since (last);
1603 }
1604
1605 if (product != NULL_RTX)
1606 {
1607 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1608 {
1609 temp = emit_move_insn (target ? target : product, product);
1610 set_unique_reg_note (temp,
1611 REG_EQUAL,
1612 gen_rtx_fmt_ee (MULT, mode,
1613 copy_rtx (op0),
1614 copy_rtx (op1)));
1615 }
1616 return product;
1617 }
1618 }
1619
1620 /* It can't be open-coded in this mode.
1621 Use a library call if one is available and caller says that's ok. */
1622
1623 if (binoptab->handlers[(int) mode].libfunc
1624 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1625 {
1626 rtx insns;
1627 rtx op1x = op1;
1628 enum machine_mode op1_mode = mode;
1629 rtx value;
1630
1631 start_sequence ();
1632
1633 if (shift_op)
1634 {
1635 op1_mode = word_mode;
1636 /* Specify unsigned here,
1637 since negative shift counts are meaningless. */
1638 op1x = convert_to_mode (word_mode, op1, 1);
1639 }
1640
1641 if (GET_MODE (op0) != VOIDmode
1642 && GET_MODE (op0) != mode)
1643 op0 = convert_to_mode (mode, op0, unsignedp);
1644
1645 /* Pass 1 for NO_QUEUE so we don't lose any increments
1646 if the libcall is cse'd or moved. */
1647 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1648 NULL_RTX, LCT_CONST, mode, 2,
1649 op0, mode, op1x, op1_mode);
1650
1651 insns = get_insns ();
1652 end_sequence ();
1653
1654 target = gen_reg_rtx (mode);
1655 emit_libcall_block (insns, target, value,
1656 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1657
1658 return target;
1659 }
1660
1661 delete_insns_since (last);
1662
1663 /* It can't be done in this mode. Can we do it in a wider mode? */
1664
1665 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1666 || methods == OPTAB_MUST_WIDEN))
1667 {
1668 /* Caller says, don't even try. */
1669 delete_insns_since (entry_last);
1670 return 0;
1671 }
1672
1673 /* Compute the value of METHODS to pass to recursive calls.
1674 Don't allow widening to be tried recursively. */
1675
1676 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1677
1678 /* Look for a wider mode of the same class for which it appears we can do
1679 the operation. */
1680
1681 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1682 {
1683 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1684 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1685 {
1686 if ((binoptab->handlers[(int) wider_mode].insn_code
1687 != CODE_FOR_nothing)
1688 || (methods == OPTAB_LIB
1689 && binoptab->handlers[(int) wider_mode].libfunc))
1690 {
1691 rtx xop0 = op0, xop1 = op1;
1692 int no_extend = 0;
1693
1694 /* For certain integer operations, we need not actually extend
1695 the narrow operands, as long as we will truncate
1696 the results to the same narrowness. */
1697
1698 if ((binoptab == ior_optab || binoptab == and_optab
1699 || binoptab == xor_optab
1700 || binoptab == add_optab || binoptab == sub_optab
1701 || binoptab == smul_optab || binoptab == ashl_optab)
1702 && class == MODE_INT)
1703 no_extend = 1;
1704
1705 xop0 = widen_operand (xop0, wider_mode, mode,
1706 unsignedp, no_extend);
1707
1708 /* The second operand of a shift must always be extended. */
1709 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1710 no_extend && binoptab != ashl_optab);
1711
1712 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1713 unsignedp, methods);
1714 if (temp)
1715 {
1716 if (class != MODE_INT)
1717 {
1718 if (target == 0)
1719 target = gen_reg_rtx (mode);
1720 convert_move (target, temp, 0);
1721 return target;
1722 }
1723 else
1724 return gen_lowpart (mode, temp);
1725 }
1726 else
1727 delete_insns_since (last);
1728 }
1729 }
1730 }
1731
1732 delete_insns_since (entry_last);
1733 return 0;
1734 }
1735 \f
1736 /* Expand a binary operator which has both signed and unsigned forms.
1737 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1738 signed operations.
1739
1740 If we widen unsigned operands, we may use a signed wider operation instead
1741 of an unsigned wider operation, since the result would be the same. */
1742
1743 rtx
1744 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1745 rtx op0, rtx op1, rtx target, int unsignedp,
1746 enum optab_methods methods)
1747 {
1748 rtx temp;
1749 optab direct_optab = unsignedp ? uoptab : soptab;
1750 struct optab wide_soptab;
1751
1752 /* Do it without widening, if possible. */
1753 temp = expand_binop (mode, direct_optab, op0, op1, target,
1754 unsignedp, OPTAB_DIRECT);
1755 if (temp || methods == OPTAB_DIRECT)
1756 return temp;
1757
1758 /* Try widening to a signed int. Make a fake signed optab that
1759 hides any signed insn for direct use. */
1760 wide_soptab = *soptab;
1761 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1762 wide_soptab.handlers[(int) mode].libfunc = 0;
1763
1764 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1765 unsignedp, OPTAB_WIDEN);
1766
1767 /* For unsigned operands, try widening to an unsigned int. */
1768 if (temp == 0 && unsignedp)
1769 temp = expand_binop (mode, uoptab, op0, op1, target,
1770 unsignedp, OPTAB_WIDEN);
1771 if (temp || methods == OPTAB_WIDEN)
1772 return temp;
1773
1774 /* Use the right width lib call if that exists. */
1775 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1776 if (temp || methods == OPTAB_LIB)
1777 return temp;
1778
1779 /* Must widen and use a lib call, use either signed or unsigned. */
1780 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1781 unsignedp, methods);
1782 if (temp != 0)
1783 return temp;
1784 if (unsignedp)
1785 return expand_binop (mode, uoptab, op0, op1, target,
1786 unsignedp, methods);
1787 return 0;
1788 }
1789 \f
1790 /* Generate code to perform an operation specified by UNOPPTAB
1791 on operand OP0, with two results to TARG0 and TARG1.
1792 We assume that the order of the operands for the instruction
1793 is TARG0, TARG1, OP0.
1794
1795 Either TARG0 or TARG1 may be zero, but what that means is that
1796 the result is not actually wanted. We will generate it into
1797 a dummy pseudo-reg and discard it. They may not both be zero.
1798
1799 Returns 1 if this operation can be performed; 0 if not. */
1800
1801 int
1802 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1803 int unsignedp)
1804 {
1805 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1806 enum mode_class class;
1807 enum machine_mode wider_mode;
1808 rtx entry_last = get_last_insn ();
1809 rtx last;
1810
1811 class = GET_MODE_CLASS (mode);
1812
1813 if (flag_force_mem)
1814 op0 = force_not_mem (op0);
1815
1816 if (!targ0)
1817 targ0 = gen_reg_rtx (mode);
1818 if (!targ1)
1819 targ1 = gen_reg_rtx (mode);
1820
1821 /* Record where to go back to if we fail. */
1822 last = get_last_insn ();
1823
1824 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1825 {
1826 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1827 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1828 rtx pat;
1829 rtx xop0 = op0;
1830
1831 if (GET_MODE (xop0) != VOIDmode
1832 && GET_MODE (xop0) != mode0)
1833 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1834
1835 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1836 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1837 xop0 = copy_to_mode_reg (mode0, xop0);
1838
1839 /* We could handle this, but we should always be called with a pseudo
1840 for our targets and all insns should take them as outputs. */
1841 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1842 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1843
1844 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1845 if (pat)
1846 {
1847 emit_insn (pat);
1848 return 1;
1849 }
1850 else
1851 delete_insns_since (last);
1852 }
1853
1854 /* It can't be done in this mode. Can we do it in a wider mode? */
1855
1856 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1857 {
1858 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1859 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1860 {
1861 if (unoptab->handlers[(int) wider_mode].insn_code
1862 != CODE_FOR_nothing)
1863 {
1864 rtx t0 = gen_reg_rtx (wider_mode);
1865 rtx t1 = gen_reg_rtx (wider_mode);
1866 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1867
1868 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1869 {
1870 convert_move (targ0, t0, unsignedp);
1871 convert_move (targ1, t1, unsignedp);
1872 return 1;
1873 }
1874 else
1875 delete_insns_since (last);
1876 }
1877 }
1878 }
1879
1880 delete_insns_since (entry_last);
1881 return 0;
1882 }
1883 \f
1884 /* Generate code to perform an operation specified by BINOPTAB
1885 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1886 We assume that the order of the operands for the instruction
1887 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1888 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1889
1890 Either TARG0 or TARG1 may be zero, but what that means is that
1891 the result is not actually wanted. We will generate it into
1892 a dummy pseudo-reg and discard it. They may not both be zero.
1893
1894 Returns 1 if this operation can be performed; 0 if not. */
1895
1896 int
1897 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1898 int unsignedp)
1899 {
1900 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1901 enum mode_class class;
1902 enum machine_mode wider_mode;
1903 rtx entry_last = get_last_insn ();
1904 rtx last;
1905
1906 class = GET_MODE_CLASS (mode);
1907
1908 if (flag_force_mem)
1909 {
1910 op0 = force_not_mem (op0);
1911 op1 = force_not_mem (op1);
1912 }
1913
1914 /* If we are inside an appropriately-short loop and we are optimizing,
1915 force expensive constants into a register. */
1916 if (CONSTANT_P (op0) && optimize
1917 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1918 op0 = force_reg (mode, op0);
1919
1920 if (CONSTANT_P (op1) && optimize
1921 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1922 op1 = force_reg (mode, op1);
1923
1924 if (!targ0)
1925 targ0 = gen_reg_rtx (mode);
1926 if (!targ1)
1927 targ1 = gen_reg_rtx (mode);
1928
1929 /* Record where to go back to if we fail. */
1930 last = get_last_insn ();
1931
1932 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1933 {
1934 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1935 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1936 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1937 rtx pat;
1938 rtx xop0 = op0, xop1 = op1;
1939
1940 /* In case the insn wants input operands in modes different from
1941 those of the actual operands, convert the operands. It would
1942 seem that we don't need to convert CONST_INTs, but we do, so
1943 that they're properly zero-extended, sign-extended or truncated
1944 for their mode. */
1945
1946 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1947 xop0 = convert_modes (mode0,
1948 GET_MODE (op0) != VOIDmode
1949 ? GET_MODE (op0)
1950 : mode,
1951 xop0, unsignedp);
1952
1953 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1954 xop1 = convert_modes (mode1,
1955 GET_MODE (op1) != VOIDmode
1956 ? GET_MODE (op1)
1957 : mode,
1958 xop1, unsignedp);
1959
1960 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1961 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
1962 xop0 = copy_to_mode_reg (mode0, xop0);
1963
1964 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
1965 xop1 = copy_to_mode_reg (mode1, xop1);
1966
1967 /* We could handle this, but we should always be called with a pseudo
1968 for our targets and all insns should take them as outputs. */
1969 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1970 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
1971
1972 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
1973 if (pat)
1974 {
1975 emit_insn (pat);
1976 return 1;
1977 }
1978 else
1979 delete_insns_since (last);
1980 }
1981
1982 /* It can't be done in this mode. Can we do it in a wider mode? */
1983
1984 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1985 {
1986 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1987 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1988 {
1989 if (binoptab->handlers[(int) wider_mode].insn_code
1990 != CODE_FOR_nothing)
1991 {
1992 rtx t0 = gen_reg_rtx (wider_mode);
1993 rtx t1 = gen_reg_rtx (wider_mode);
1994 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1995 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
1996
1997 if (expand_twoval_binop (binoptab, cop0, cop1,
1998 t0, t1, unsignedp))
1999 {
2000 convert_move (targ0, t0, unsignedp);
2001 convert_move (targ1, t1, unsignedp);
2002 return 1;
2003 }
2004 else
2005 delete_insns_since (last);
2006 }
2007 }
2008 }
2009
2010 delete_insns_since (entry_last);
2011 return 0;
2012 }
2013
2014 /* Expand the two-valued library call indicated by BINOPTAB, but
2015 preserve only one of the values. If TARG0 is non-NULL, the first
2016 value is placed into TARG0; otherwise the second value is placed
2017 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2018 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2019 This routine assumes that the value returned by the library call is
2020 as if the return value was of an integral mode twice as wide as the
2021 mode of OP0. Returns 1 if the call was successful. */
2022
2023 bool
2024 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2025 rtx targ0, rtx targ1, enum rtx_code code)
2026 {
2027 enum machine_mode mode;
2028 enum machine_mode libval_mode;
2029 rtx libval;
2030 rtx insns;
2031
2032 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2033 gcc_assert (!targ0 != !targ1);
2034
2035 mode = GET_MODE (op0);
2036 if (!binoptab->handlers[(int) mode].libfunc)
2037 return false;
2038
2039 /* The value returned by the library function will have twice as
2040 many bits as the nominal MODE. */
2041 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2042 MODE_INT);
2043 start_sequence ();
2044 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2045 NULL_RTX, LCT_CONST,
2046 libval_mode, 2,
2047 op0, mode,
2048 op1, mode);
2049 /* Get the part of VAL containing the value that we want. */
2050 libval = simplify_gen_subreg (mode, libval, libval_mode,
2051 targ0 ? 0 : GET_MODE_SIZE (mode));
2052 insns = get_insns ();
2053 end_sequence ();
2054 /* Move the into the desired location. */
2055 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2056 gen_rtx_fmt_ee (code, mode, op0, op1));
2057
2058 return true;
2059 }
2060
2061 \f
2062 /* Wrapper around expand_unop which takes an rtx code to specify
2063 the operation to perform, not an optab pointer. All other
2064 arguments are the same. */
2065 rtx
2066 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2067 rtx target, int unsignedp)
2068 {
2069 optab unop = code_to_optab[(int) code];
2070 gcc_assert (unop);
2071
2072 return expand_unop (mode, unop, op0, target, unsignedp);
2073 }
2074
2075 /* Try calculating
2076 (clz:narrow x)
2077 as
2078 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2079 static rtx
2080 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2081 {
2082 enum mode_class class = GET_MODE_CLASS (mode);
2083 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2084 {
2085 enum machine_mode wider_mode;
2086 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2087 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2088 {
2089 if (clz_optab->handlers[(int) wider_mode].insn_code
2090 != CODE_FOR_nothing)
2091 {
2092 rtx xop0, temp, last;
2093
2094 last = get_last_insn ();
2095
2096 if (target == 0)
2097 target = gen_reg_rtx (mode);
2098 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2099 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2100 if (temp != 0)
2101 temp = expand_binop (wider_mode, sub_optab, temp,
2102 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2103 - GET_MODE_BITSIZE (mode)),
2104 target, true, OPTAB_DIRECT);
2105 if (temp == 0)
2106 delete_insns_since (last);
2107
2108 return temp;
2109 }
2110 }
2111 }
2112 return 0;
2113 }
2114
2115 /* Try calculating (parity x) as (and (popcount x) 1), where
2116 popcount can also be done in a wider mode. */
2117 static rtx
2118 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2119 {
2120 enum mode_class class = GET_MODE_CLASS (mode);
2121 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2122 {
2123 enum machine_mode wider_mode;
2124 for (wider_mode = mode; wider_mode != VOIDmode;
2125 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2126 {
2127 if (popcount_optab->handlers[(int) wider_mode].insn_code
2128 != CODE_FOR_nothing)
2129 {
2130 rtx xop0, temp, last;
2131
2132 last = get_last_insn ();
2133
2134 if (target == 0)
2135 target = gen_reg_rtx (mode);
2136 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2137 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2138 true);
2139 if (temp != 0)
2140 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2141 target, true, OPTAB_DIRECT);
2142 if (temp == 0)
2143 delete_insns_since (last);
2144
2145 return temp;
2146 }
2147 }
2148 }
2149 return 0;
2150 }
2151
2152 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2153 conditions, VAL may already be a SUBREG against which we cannot generate
2154 a further SUBREG. In this case, we expect forcing the value into a
2155 register will work around the situation. */
2156
2157 static rtx
2158 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2159 enum machine_mode imode)
2160 {
2161 rtx ret;
2162 ret = lowpart_subreg (omode, val, imode);
2163 if (ret == NULL)
2164 {
2165 val = force_reg (imode, val);
2166 ret = lowpart_subreg (omode, val, imode);
2167 gcc_assert (ret != NULL);
2168 }
2169 return ret;
2170 }
2171
2172 /* Expand a floating point absolute value or negation operation via a
2173 logical operation on the sign bit. */
2174
2175 static rtx
2176 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2177 rtx op0, rtx target)
2178 {
2179 const struct real_format *fmt;
2180 int bitpos, word, nwords, i;
2181 enum machine_mode imode;
2182 HOST_WIDE_INT hi, lo;
2183 rtx temp, insns;
2184
2185 /* The format has to have a simple sign bit. */
2186 fmt = REAL_MODE_FORMAT (mode);
2187 if (fmt == NULL)
2188 return NULL_RTX;
2189
2190 bitpos = fmt->signbit_rw;
2191 if (bitpos < 0)
2192 return NULL_RTX;
2193
2194 /* Don't create negative zeros if the format doesn't support them. */
2195 if (code == NEG && !fmt->has_signed_zero)
2196 return NULL_RTX;
2197
2198 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2199 {
2200 imode = int_mode_for_mode (mode);
2201 if (imode == BLKmode)
2202 return NULL_RTX;
2203 word = 0;
2204 nwords = 1;
2205 }
2206 else
2207 {
2208 imode = word_mode;
2209
2210 if (FLOAT_WORDS_BIG_ENDIAN)
2211 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2212 else
2213 word = bitpos / BITS_PER_WORD;
2214 bitpos = bitpos % BITS_PER_WORD;
2215 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2216 }
2217
2218 if (bitpos < HOST_BITS_PER_WIDE_INT)
2219 {
2220 hi = 0;
2221 lo = (HOST_WIDE_INT) 1 << bitpos;
2222 }
2223 else
2224 {
2225 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2226 lo = 0;
2227 }
2228 if (code == ABS)
2229 lo = ~lo, hi = ~hi;
2230
2231 if (target == 0 || target == op0)
2232 target = gen_reg_rtx (mode);
2233
2234 if (nwords > 1)
2235 {
2236 start_sequence ();
2237
2238 for (i = 0; i < nwords; ++i)
2239 {
2240 rtx targ_piece = operand_subword (target, i, 1, mode);
2241 rtx op0_piece = operand_subword_force (op0, i, mode);
2242
2243 if (i == word)
2244 {
2245 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2246 op0_piece,
2247 immed_double_const (lo, hi, imode),
2248 targ_piece, 1, OPTAB_LIB_WIDEN);
2249 if (temp != targ_piece)
2250 emit_move_insn (targ_piece, temp);
2251 }
2252 else
2253 emit_move_insn (targ_piece, op0_piece);
2254 }
2255
2256 insns = get_insns ();
2257 end_sequence ();
2258
2259 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2260 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2261 }
2262 else
2263 {
2264 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2265 gen_lowpart (imode, op0),
2266 immed_double_const (lo, hi, imode),
2267 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2268 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2269
2270 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2271 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2272 }
2273
2274 return target;
2275 }
2276
2277 /* Generate code to perform an operation specified by UNOPTAB
2278 on operand OP0, with result having machine-mode MODE.
2279
2280 UNSIGNEDP is for the case where we have to widen the operands
2281 to perform the operation. It says to use zero-extension.
2282
2283 If TARGET is nonzero, the value
2284 is generated there, if it is convenient to do so.
2285 In all cases an rtx is returned for the locus of the value;
2286 this may or may not be TARGET. */
2287
2288 rtx
2289 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2290 int unsignedp)
2291 {
2292 enum mode_class class;
2293 enum machine_mode wider_mode;
2294 rtx temp;
2295 rtx last = get_last_insn ();
2296 rtx pat;
2297
2298 class = GET_MODE_CLASS (mode);
2299
2300 if (flag_force_mem)
2301 op0 = force_not_mem (op0);
2302
2303 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2304 {
2305 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2306 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2307 rtx xop0 = op0;
2308
2309 if (target)
2310 temp = target;
2311 else
2312 temp = gen_reg_rtx (mode);
2313
2314 if (GET_MODE (xop0) != VOIDmode
2315 && GET_MODE (xop0) != mode0)
2316 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2317
2318 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2319
2320 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2321 xop0 = copy_to_mode_reg (mode0, xop0);
2322
2323 if (!insn_data[icode].operand[0].predicate (temp, mode))
2324 temp = gen_reg_rtx (mode);
2325
2326 pat = GEN_FCN (icode) (temp, xop0);
2327 if (pat)
2328 {
2329 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2330 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2331 {
2332 delete_insns_since (last);
2333 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2334 }
2335
2336 emit_insn (pat);
2337
2338 return temp;
2339 }
2340 else
2341 delete_insns_since (last);
2342 }
2343
2344 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2345
2346 /* Widening clz needs special treatment. */
2347 if (unoptab == clz_optab)
2348 {
2349 temp = widen_clz (mode, op0, target);
2350 if (temp)
2351 return temp;
2352 else
2353 goto try_libcall;
2354 }
2355
2356 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2357 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2358 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2359 {
2360 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2361 {
2362 rtx xop0 = op0;
2363
2364 /* For certain operations, we need not actually extend
2365 the narrow operand, as long as we will truncate the
2366 results to the same narrowness. */
2367
2368 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2369 (unoptab == neg_optab
2370 || unoptab == one_cmpl_optab)
2371 && class == MODE_INT);
2372
2373 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2374 unsignedp);
2375
2376 if (temp)
2377 {
2378 if (class != MODE_INT)
2379 {
2380 if (target == 0)
2381 target = gen_reg_rtx (mode);
2382 convert_move (target, temp, 0);
2383 return target;
2384 }
2385 else
2386 return gen_lowpart (mode, temp);
2387 }
2388 else
2389 delete_insns_since (last);
2390 }
2391 }
2392
2393 /* These can be done a word at a time. */
2394 if (unoptab == one_cmpl_optab
2395 && class == MODE_INT
2396 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2397 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2398 {
2399 int i;
2400 rtx insns;
2401
2402 if (target == 0 || target == op0)
2403 target = gen_reg_rtx (mode);
2404
2405 start_sequence ();
2406
2407 /* Do the actual arithmetic. */
2408 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2409 {
2410 rtx target_piece = operand_subword (target, i, 1, mode);
2411 rtx x = expand_unop (word_mode, unoptab,
2412 operand_subword_force (op0, i, mode),
2413 target_piece, unsignedp);
2414
2415 if (target_piece != x)
2416 emit_move_insn (target_piece, x);
2417 }
2418
2419 insns = get_insns ();
2420 end_sequence ();
2421
2422 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2423 gen_rtx_fmt_e (unoptab->code, mode,
2424 copy_rtx (op0)));
2425 return target;
2426 }
2427
2428 if (unoptab->code == NEG)
2429 {
2430 /* Try negating floating point values by flipping the sign bit. */
2431 if (class == MODE_FLOAT)
2432 {
2433 temp = expand_absneg_bit (NEG, mode, op0, target);
2434 if (temp)
2435 return temp;
2436 }
2437
2438 /* If there is no negation pattern, and we have no negative zero,
2439 try subtracting from zero. */
2440 if (!HONOR_SIGNED_ZEROS (mode))
2441 {
2442 temp = expand_binop (mode, (unoptab == negv_optab
2443 ? subv_optab : sub_optab),
2444 CONST0_RTX (mode), op0, target,
2445 unsignedp, OPTAB_DIRECT);
2446 if (temp)
2447 return temp;
2448 }
2449 }
2450
2451 /* Try calculating parity (x) as popcount (x) % 2. */
2452 if (unoptab == parity_optab)
2453 {
2454 temp = expand_parity (mode, op0, target);
2455 if (temp)
2456 return temp;
2457 }
2458
2459 try_libcall:
2460 /* Now try a library call in this mode. */
2461 if (unoptab->handlers[(int) mode].libfunc)
2462 {
2463 rtx insns;
2464 rtx value;
2465 enum machine_mode outmode = mode;
2466
2467 /* All of these functions return small values. Thus we choose to
2468 have them return something that isn't a double-word. */
2469 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2470 || unoptab == popcount_optab || unoptab == parity_optab)
2471 outmode
2472 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2473
2474 start_sequence ();
2475
2476 /* Pass 1 for NO_QUEUE so we don't lose any increments
2477 if the libcall is cse'd or moved. */
2478 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2479 NULL_RTX, LCT_CONST, outmode,
2480 1, op0, mode);
2481 insns = get_insns ();
2482 end_sequence ();
2483
2484 target = gen_reg_rtx (outmode);
2485 emit_libcall_block (insns, target, value,
2486 gen_rtx_fmt_e (unoptab->code, mode, op0));
2487
2488 return target;
2489 }
2490
2491 /* It can't be done in this mode. Can we do it in a wider mode? */
2492
2493 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2494 {
2495 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2496 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2497 {
2498 if ((unoptab->handlers[(int) wider_mode].insn_code
2499 != CODE_FOR_nothing)
2500 || unoptab->handlers[(int) wider_mode].libfunc)
2501 {
2502 rtx xop0 = op0;
2503
2504 /* For certain operations, we need not actually extend
2505 the narrow operand, as long as we will truncate the
2506 results to the same narrowness. */
2507
2508 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2509 (unoptab == neg_optab
2510 || unoptab == one_cmpl_optab)
2511 && class == MODE_INT);
2512
2513 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2514 unsignedp);
2515
2516 /* If we are generating clz using wider mode, adjust the
2517 result. */
2518 if (unoptab == clz_optab && temp != 0)
2519 temp = expand_binop (wider_mode, sub_optab, temp,
2520 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2521 - GET_MODE_BITSIZE (mode)),
2522 target, true, OPTAB_DIRECT);
2523
2524 if (temp)
2525 {
2526 if (class != MODE_INT)
2527 {
2528 if (target == 0)
2529 target = gen_reg_rtx (mode);
2530 convert_move (target, temp, 0);
2531 return target;
2532 }
2533 else
2534 return gen_lowpart (mode, temp);
2535 }
2536 else
2537 delete_insns_since (last);
2538 }
2539 }
2540 }
2541
2542 /* One final attempt at implementing negation via subtraction,
2543 this time allowing widening of the operand. */
2544 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2545 {
2546 rtx temp;
2547 temp = expand_binop (mode,
2548 unoptab == negv_optab ? subv_optab : sub_optab,
2549 CONST0_RTX (mode), op0,
2550 target, unsignedp, OPTAB_LIB_WIDEN);
2551 if (temp)
2552 return temp;
2553 }
2554
2555 return 0;
2556 }
2557 \f
2558 /* Emit code to compute the absolute value of OP0, with result to
2559 TARGET if convenient. (TARGET may be 0.) The return value says
2560 where the result actually is to be found.
2561
2562 MODE is the mode of the operand; the mode of the result is
2563 different but can be deduced from MODE.
2564
2565 */
2566
2567 rtx
2568 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2569 int result_unsignedp)
2570 {
2571 rtx temp;
2572
2573 if (! flag_trapv)
2574 result_unsignedp = 1;
2575
2576 /* First try to do it with a special abs instruction. */
2577 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2578 op0, target, 0);
2579 if (temp != 0)
2580 return temp;
2581
2582 /* For floating point modes, try clearing the sign bit. */
2583 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2584 {
2585 temp = expand_absneg_bit (ABS, mode, op0, target);
2586 if (temp)
2587 return temp;
2588 }
2589
2590 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2591 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2592 && !HONOR_SIGNED_ZEROS (mode))
2593 {
2594 rtx last = get_last_insn ();
2595
2596 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2597 if (temp != 0)
2598 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2599 OPTAB_WIDEN);
2600
2601 if (temp != 0)
2602 return temp;
2603
2604 delete_insns_since (last);
2605 }
2606
2607 /* If this machine has expensive jumps, we can do integer absolute
2608 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2609 where W is the width of MODE. */
2610
2611 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2612 {
2613 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2614 size_int (GET_MODE_BITSIZE (mode) - 1),
2615 NULL_RTX, 0);
2616
2617 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2618 OPTAB_LIB_WIDEN);
2619 if (temp != 0)
2620 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2621 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2622
2623 if (temp != 0)
2624 return temp;
2625 }
2626
2627 return NULL_RTX;
2628 }
2629
2630 rtx
2631 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2632 int result_unsignedp, int safe)
2633 {
2634 rtx temp, op1;
2635
2636 if (! flag_trapv)
2637 result_unsignedp = 1;
2638
2639 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2640 if (temp != 0)
2641 return temp;
2642
2643 /* If that does not win, use conditional jump and negate. */
2644
2645 /* It is safe to use the target if it is the same
2646 as the source if this is also a pseudo register */
2647 if (op0 == target && REG_P (op0)
2648 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2649 safe = 1;
2650
2651 op1 = gen_label_rtx ();
2652 if (target == 0 || ! safe
2653 || GET_MODE (target) != mode
2654 || (MEM_P (target) && MEM_VOLATILE_P (target))
2655 || (REG_P (target)
2656 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2657 target = gen_reg_rtx (mode);
2658
2659 emit_move_insn (target, op0);
2660 NO_DEFER_POP;
2661
2662 /* If this mode is an integer too wide to compare properly,
2663 compare word by word. Rely on CSE to optimize constant cases. */
2664 if (GET_MODE_CLASS (mode) == MODE_INT
2665 && ! can_compare_p (GE, mode, ccp_jump))
2666 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2667 NULL_RTX, op1);
2668 else
2669 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2670 NULL_RTX, NULL_RTX, op1);
2671
2672 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2673 target, target, 0);
2674 if (op0 != target)
2675 emit_move_insn (target, op0);
2676 emit_label (op1);
2677 OK_DEFER_POP;
2678 return target;
2679 }
2680
2681 /* A subroutine of expand_copysign, perform the copysign operation using the
2682 abs and neg primitives advertised to exist on the target. The assumption
2683 is that we have a split register file, and leaving op0 in fp registers,
2684 and not playing with subregs so much, will help the register allocator. */
2685
2686 static rtx
2687 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2688 int bitpos, bool op0_is_abs)
2689 {
2690 enum machine_mode imode;
2691 HOST_WIDE_INT hi, lo;
2692 int word;
2693 rtx label;
2694
2695 if (target == op1)
2696 target = NULL_RTX;
2697
2698 if (!op0_is_abs)
2699 {
2700 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2701 if (op0 == NULL)
2702 return NULL_RTX;
2703 target = op0;
2704 }
2705 else
2706 {
2707 if (target == NULL_RTX)
2708 target = copy_to_reg (op0);
2709 else
2710 emit_move_insn (target, op0);
2711 }
2712
2713 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2714 {
2715 imode = int_mode_for_mode (mode);
2716 if (imode == BLKmode)
2717 return NULL_RTX;
2718 op1 = gen_lowpart (imode, op1);
2719 }
2720 else
2721 {
2722 imode = word_mode;
2723 if (FLOAT_WORDS_BIG_ENDIAN)
2724 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2725 else
2726 word = bitpos / BITS_PER_WORD;
2727 bitpos = bitpos % BITS_PER_WORD;
2728 op1 = operand_subword_force (op1, word, mode);
2729 }
2730
2731 if (bitpos < HOST_BITS_PER_WIDE_INT)
2732 {
2733 hi = 0;
2734 lo = (HOST_WIDE_INT) 1 << bitpos;
2735 }
2736 else
2737 {
2738 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2739 lo = 0;
2740 }
2741
2742 op1 = expand_binop (imode, and_optab, op1,
2743 immed_double_const (lo, hi, imode),
2744 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2745
2746 label = gen_label_rtx ();
2747 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2748
2749 if (GET_CODE (op0) == CONST_DOUBLE)
2750 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2751 else
2752 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2753 if (op0 != target)
2754 emit_move_insn (target, op0);
2755
2756 emit_label (label);
2757
2758 return target;
2759 }
2760
2761
2762 /* A subroutine of expand_copysign, perform the entire copysign operation
2763 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2764 is true if op0 is known to have its sign bit clear. */
2765
2766 static rtx
2767 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2768 int bitpos, bool op0_is_abs)
2769 {
2770 enum machine_mode imode;
2771 HOST_WIDE_INT hi, lo;
2772 int word, nwords, i;
2773 rtx temp, insns;
2774
2775 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2776 {
2777 imode = int_mode_for_mode (mode);
2778 if (imode == BLKmode)
2779 return NULL_RTX;
2780 word = 0;
2781 nwords = 1;
2782 }
2783 else
2784 {
2785 imode = word_mode;
2786
2787 if (FLOAT_WORDS_BIG_ENDIAN)
2788 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2789 else
2790 word = bitpos / BITS_PER_WORD;
2791 bitpos = bitpos % BITS_PER_WORD;
2792 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2793 }
2794
2795 if (bitpos < HOST_BITS_PER_WIDE_INT)
2796 {
2797 hi = 0;
2798 lo = (HOST_WIDE_INT) 1 << bitpos;
2799 }
2800 else
2801 {
2802 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2803 lo = 0;
2804 }
2805
2806 if (target == 0 || target == op0 || target == op1)
2807 target = gen_reg_rtx (mode);
2808
2809 if (nwords > 1)
2810 {
2811 start_sequence ();
2812
2813 for (i = 0; i < nwords; ++i)
2814 {
2815 rtx targ_piece = operand_subword (target, i, 1, mode);
2816 rtx op0_piece = operand_subword_force (op0, i, mode);
2817
2818 if (i == word)
2819 {
2820 if (!op0_is_abs)
2821 op0_piece = expand_binop (imode, and_optab, op0_piece,
2822 immed_double_const (~lo, ~hi, imode),
2823 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2824
2825 op1 = expand_binop (imode, and_optab,
2826 operand_subword_force (op1, i, mode),
2827 immed_double_const (lo, hi, imode),
2828 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2829
2830 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2831 targ_piece, 1, OPTAB_LIB_WIDEN);
2832 if (temp != targ_piece)
2833 emit_move_insn (targ_piece, temp);
2834 }
2835 else
2836 emit_move_insn (targ_piece, op0_piece);
2837 }
2838
2839 insns = get_insns ();
2840 end_sequence ();
2841
2842 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2843 }
2844 else
2845 {
2846 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2847 immed_double_const (lo, hi, imode),
2848 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2849
2850 op0 = gen_lowpart (imode, op0);
2851 if (!op0_is_abs)
2852 op0 = expand_binop (imode, and_optab, op0,
2853 immed_double_const (~lo, ~hi, imode),
2854 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2855
2856 temp = expand_binop (imode, ior_optab, op0, op1,
2857 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2858 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2859 }
2860
2861 return target;
2862 }
2863
2864 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2865 scalar floating point mode. Return NULL if we do not know how to
2866 expand the operation inline. */
2867
2868 rtx
2869 expand_copysign (rtx op0, rtx op1, rtx target)
2870 {
2871 enum machine_mode mode = GET_MODE (op0);
2872 const struct real_format *fmt;
2873 bool op0_is_abs;
2874 rtx temp;
2875
2876 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2877 gcc_assert (GET_MODE (op1) == mode);
2878
2879 /* First try to do it with a special instruction. */
2880 temp = expand_binop (mode, copysign_optab, op0, op1,
2881 target, 0, OPTAB_DIRECT);
2882 if (temp)
2883 return temp;
2884
2885 fmt = REAL_MODE_FORMAT (mode);
2886 if (fmt == NULL || !fmt->has_signed_zero)
2887 return NULL_RTX;
2888
2889 op0_is_abs = false;
2890 if (GET_CODE (op0) == CONST_DOUBLE)
2891 {
2892 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2893 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2894 op0_is_abs = true;
2895 }
2896
2897 if (fmt->signbit_ro >= 0
2898 && (GET_CODE (op0) == CONST_DOUBLE
2899 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2900 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2901 {
2902 temp = expand_copysign_absneg (mode, op0, op1, target,
2903 fmt->signbit_ro, op0_is_abs);
2904 if (temp)
2905 return temp;
2906 }
2907
2908 if (fmt->signbit_rw < 0)
2909 return NULL_RTX;
2910 return expand_copysign_bit (mode, op0, op1, target,
2911 fmt->signbit_rw, op0_is_abs);
2912 }
2913 \f
2914 /* Generate an instruction whose insn-code is INSN_CODE,
2915 with two operands: an output TARGET and an input OP0.
2916 TARGET *must* be nonzero, and the output is always stored there.
2917 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2918 the value that is stored into TARGET. */
2919
2920 void
2921 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2922 {
2923 rtx temp;
2924 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2925 rtx pat;
2926
2927 temp = target;
2928
2929 /* Sign and zero extension from memory is often done specially on
2930 RISC machines, so forcing into a register here can pessimize
2931 code. */
2932 if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
2933 op0 = force_not_mem (op0);
2934
2935 /* Now, if insn does not accept our operands, put them into pseudos. */
2936
2937 if (!insn_data[icode].operand[1].predicate (op0, mode0))
2938 op0 = copy_to_mode_reg (mode0, op0);
2939
2940 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp))
2941 || (flag_force_mem && MEM_P (temp)))
2942 temp = gen_reg_rtx (GET_MODE (temp));
2943
2944 pat = GEN_FCN (icode) (temp, op0);
2945
2946 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
2947 add_equal_note (pat, temp, code, op0, NULL_RTX);
2948
2949 emit_insn (pat);
2950
2951 if (temp != target)
2952 emit_move_insn (target, temp);
2953 }
2954 \f
2955 struct no_conflict_data
2956 {
2957 rtx target, first, insn;
2958 bool must_stay;
2959 };
2960
2961 /* Called via note_stores by emit_no_conflict_block. Set P->must_stay
2962 if the currently examined clobber / store has to stay in the list of
2963 insns that constitute the actual no_conflict block. */
2964 static void
2965 no_conflict_move_test (rtx dest, rtx set, void *p0)
2966 {
2967 struct no_conflict_data *p= p0;
2968
2969 /* If this inns directly contributes to setting the target, it must stay. */
2970 if (reg_overlap_mentioned_p (p->target, dest))
2971 p->must_stay = true;
2972 /* If we haven't committed to keeping any other insns in the list yet,
2973 there is nothing more to check. */
2974 else if (p->insn == p->first)
2975 return;
2976 /* If this insn sets / clobbers a register that feeds one of the insns
2977 already in the list, this insn has to stay too. */
2978 else if (reg_mentioned_p (dest, PATTERN (p->first))
2979 || reg_used_between_p (dest, p->first, p->insn)
2980 /* Likewise if this insn depends on a register set by a previous
2981 insn in the list. */
2982 || (GET_CODE (set) == SET
2983 && (modified_in_p (SET_SRC (set), p->first)
2984 || modified_between_p (SET_SRC (set), p->first, p->insn))))
2985 p->must_stay = true;
2986 }
2987
2988 /* Emit code to perform a series of operations on a multi-word quantity, one
2989 word at a time.
2990
2991 Such a block is preceded by a CLOBBER of the output, consists of multiple
2992 insns, each setting one word of the output, and followed by a SET copying
2993 the output to itself.
2994
2995 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2996 note indicating that it doesn't conflict with the (also multi-word)
2997 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2998 notes.
2999
3000 INSNS is a block of code generated to perform the operation, not including
3001 the CLOBBER and final copy. All insns that compute intermediate values
3002 are first emitted, followed by the block as described above.
3003
3004 TARGET, OP0, and OP1 are the output and inputs of the operations,
3005 respectively. OP1 may be zero for a unary operation.
3006
3007 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3008 on the last insn.
3009
3010 If TARGET is not a register, INSNS is simply emitted with no special
3011 processing. Likewise if anything in INSNS is not an INSN or if
3012 there is a libcall block inside INSNS.
3013
3014 The final insn emitted is returned. */
3015
3016 rtx
3017 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3018 {
3019 rtx prev, next, first, last, insn;
3020
3021 if (!REG_P (target) || reload_in_progress)
3022 return emit_insn (insns);
3023 else
3024 for (insn = insns; insn; insn = NEXT_INSN (insn))
3025 if (!NONJUMP_INSN_P (insn)
3026 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3027 return emit_insn (insns);
3028
3029 /* First emit all insns that do not store into words of the output and remove
3030 these from the list. */
3031 for (insn = insns; insn; insn = next)
3032 {
3033 rtx note;
3034 struct no_conflict_data data;
3035
3036 next = NEXT_INSN (insn);
3037
3038 /* Some ports (cris) create a libcall regions at their own. We must
3039 avoid any potential nesting of LIBCALLs. */
3040 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3041 remove_note (insn, note);
3042 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3043 remove_note (insn, note);
3044
3045 data.target = target;
3046 data.first = insns;
3047 data.insn = insn;
3048 data.must_stay = 0;
3049 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3050 if (! data.must_stay)
3051 {
3052 if (PREV_INSN (insn))
3053 NEXT_INSN (PREV_INSN (insn)) = next;
3054 else
3055 insns = next;
3056
3057 if (next)
3058 PREV_INSN (next) = PREV_INSN (insn);
3059
3060 add_insn (insn);
3061 }
3062 }
3063
3064 prev = get_last_insn ();
3065
3066 /* Now write the CLOBBER of the output, followed by the setting of each
3067 of the words, followed by the final copy. */
3068 if (target != op0 && target != op1)
3069 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3070
3071 for (insn = insns; insn; insn = next)
3072 {
3073 next = NEXT_INSN (insn);
3074 add_insn (insn);
3075
3076 if (op1 && REG_P (op1))
3077 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3078 REG_NOTES (insn));
3079
3080 if (op0 && REG_P (op0))
3081 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3082 REG_NOTES (insn));
3083 }
3084
3085 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3086 != CODE_FOR_nothing)
3087 {
3088 last = emit_move_insn (target, target);
3089 if (equiv)
3090 set_unique_reg_note (last, REG_EQUAL, equiv);
3091 }
3092 else
3093 {
3094 last = get_last_insn ();
3095
3096 /* Remove any existing REG_EQUAL note from "last", or else it will
3097 be mistaken for a note referring to the full contents of the
3098 alleged libcall value when found together with the REG_RETVAL
3099 note added below. An existing note can come from an insn
3100 expansion at "last". */
3101 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3102 }
3103
3104 if (prev == 0)
3105 first = get_insns ();
3106 else
3107 first = NEXT_INSN (prev);
3108
3109 /* Encapsulate the block so it gets manipulated as a unit. */
3110 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3111 REG_NOTES (first));
3112 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3113
3114 return last;
3115 }
3116 \f
3117 /* Emit code to make a call to a constant function or a library call.
3118
3119 INSNS is a list containing all insns emitted in the call.
3120 These insns leave the result in RESULT. Our block is to copy RESULT
3121 to TARGET, which is logically equivalent to EQUIV.
3122
3123 We first emit any insns that set a pseudo on the assumption that these are
3124 loading constants into registers; doing so allows them to be safely cse'ed
3125 between blocks. Then we emit all the other insns in the block, followed by
3126 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3127 note with an operand of EQUIV.
3128
3129 Moving assignments to pseudos outside of the block is done to improve
3130 the generated code, but is not required to generate correct code,
3131 hence being unable to move an assignment is not grounds for not making
3132 a libcall block. There are two reasons why it is safe to leave these
3133 insns inside the block: First, we know that these pseudos cannot be
3134 used in generated RTL outside the block since they are created for
3135 temporary purposes within the block. Second, CSE will not record the
3136 values of anything set inside a libcall block, so we know they must
3137 be dead at the end of the block.
3138
3139 Except for the first group of insns (the ones setting pseudos), the
3140 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3141
3142 void
3143 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3144 {
3145 rtx final_dest = target;
3146 rtx prev, next, first, last, insn;
3147
3148 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3149 into a MEM later. Protect the libcall block from this change. */
3150 if (! REG_P (target) || REG_USERVAR_P (target))
3151 target = gen_reg_rtx (GET_MODE (target));
3152
3153 /* If we're using non-call exceptions, a libcall corresponding to an
3154 operation that may trap may also trap. */
3155 if (flag_non_call_exceptions && may_trap_p (equiv))
3156 {
3157 for (insn = insns; insn; insn = NEXT_INSN (insn))
3158 if (CALL_P (insn))
3159 {
3160 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3161
3162 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3163 remove_note (insn, note);
3164 }
3165 }
3166 else
3167 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3168 reg note to indicate that this call cannot throw or execute a nonlocal
3169 goto (unless there is already a REG_EH_REGION note, in which case
3170 we update it). */
3171 for (insn = insns; insn; insn = NEXT_INSN (insn))
3172 if (CALL_P (insn))
3173 {
3174 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3175
3176 if (note != 0)
3177 XEXP (note, 0) = constm1_rtx;
3178 else
3179 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3180 REG_NOTES (insn));
3181 }
3182
3183 /* First emit all insns that set pseudos. Remove them from the list as
3184 we go. Avoid insns that set pseudos which were referenced in previous
3185 insns. These can be generated by move_by_pieces, for example,
3186 to update an address. Similarly, avoid insns that reference things
3187 set in previous insns. */
3188
3189 for (insn = insns; insn; insn = next)
3190 {
3191 rtx set = single_set (insn);
3192 rtx note;
3193
3194 /* Some ports (cris) create a libcall regions at their own. We must
3195 avoid any potential nesting of LIBCALLs. */
3196 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3197 remove_note (insn, note);
3198 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3199 remove_note (insn, note);
3200
3201 next = NEXT_INSN (insn);
3202
3203 if (set != 0 && REG_P (SET_DEST (set))
3204 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3205 && (insn == insns
3206 || ((! INSN_P(insns)
3207 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3208 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3209 && ! modified_in_p (SET_SRC (set), insns)
3210 && ! modified_between_p (SET_SRC (set), insns, insn))))
3211 {
3212 if (PREV_INSN (insn))
3213 NEXT_INSN (PREV_INSN (insn)) = next;
3214 else
3215 insns = next;
3216
3217 if (next)
3218 PREV_INSN (next) = PREV_INSN (insn);
3219
3220 add_insn (insn);
3221 }
3222
3223 /* Some ports use a loop to copy large arguments onto the stack.
3224 Don't move anything outside such a loop. */
3225 if (LABEL_P (insn))
3226 break;
3227 }
3228
3229 prev = get_last_insn ();
3230
3231 /* Write the remaining insns followed by the final copy. */
3232
3233 for (insn = insns; insn; insn = next)
3234 {
3235 next = NEXT_INSN (insn);
3236
3237 add_insn (insn);
3238 }
3239
3240 last = emit_move_insn (target, result);
3241 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3242 != CODE_FOR_nothing)
3243 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3244 else
3245 {
3246 /* Remove any existing REG_EQUAL note from "last", or else it will
3247 be mistaken for a note referring to the full contents of the
3248 libcall value when found together with the REG_RETVAL note added
3249 below. An existing note can come from an insn expansion at
3250 "last". */
3251 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3252 }
3253
3254 if (final_dest != target)
3255 emit_move_insn (final_dest, target);
3256
3257 if (prev == 0)
3258 first = get_insns ();
3259 else
3260 first = NEXT_INSN (prev);
3261
3262 /* Encapsulate the block so it gets manipulated as a unit. */
3263 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3264 {
3265 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3266 when the encapsulated region would not be in one basic block,
3267 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3268 */
3269 bool attach_libcall_retval_notes = true;
3270 next = NEXT_INSN (last);
3271 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3272 if (control_flow_insn_p (insn))
3273 {
3274 attach_libcall_retval_notes = false;
3275 break;
3276 }
3277
3278 if (attach_libcall_retval_notes)
3279 {
3280 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3281 REG_NOTES (first));
3282 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3283 REG_NOTES (last));
3284 }
3285 }
3286 }
3287 \f
3288 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3289 PURPOSE describes how this comparison will be used. CODE is the rtx
3290 comparison code we will be using.
3291
3292 ??? Actually, CODE is slightly weaker than that. A target is still
3293 required to implement all of the normal bcc operations, but not
3294 required to implement all (or any) of the unordered bcc operations. */
3295
3296 int
3297 can_compare_p (enum rtx_code code, enum machine_mode mode,
3298 enum can_compare_purpose purpose)
3299 {
3300 do
3301 {
3302 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3303 {
3304 if (purpose == ccp_jump)
3305 return bcc_gen_fctn[(int) code] != NULL;
3306 else if (purpose == ccp_store_flag)
3307 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3308 else
3309 /* There's only one cmov entry point, and it's allowed to fail. */
3310 return 1;
3311 }
3312 if (purpose == ccp_jump
3313 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3314 return 1;
3315 if (purpose == ccp_cmov
3316 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3317 return 1;
3318 if (purpose == ccp_store_flag
3319 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3320 return 1;
3321 mode = GET_MODE_WIDER_MODE (mode);
3322 }
3323 while (mode != VOIDmode);
3324
3325 return 0;
3326 }
3327
3328 /* This function is called when we are going to emit a compare instruction that
3329 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3330
3331 *PMODE is the mode of the inputs (in case they are const_int).
3332 *PUNSIGNEDP nonzero says that the operands are unsigned;
3333 this matters if they need to be widened.
3334
3335 If they have mode BLKmode, then SIZE specifies the size of both operands.
3336
3337 This function performs all the setup necessary so that the caller only has
3338 to emit a single comparison insn. This setup can involve doing a BLKmode
3339 comparison or emitting a library call to perform the comparison if no insn
3340 is available to handle it.
3341 The values which are passed in through pointers can be modified; the caller
3342 should perform the comparison on the modified values. Constant
3343 comparisons must have already been folded. */
3344
3345 static void
3346 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3347 enum machine_mode *pmode, int *punsignedp,
3348 enum can_compare_purpose purpose)
3349 {
3350 enum machine_mode mode = *pmode;
3351 rtx x = *px, y = *py;
3352 int unsignedp = *punsignedp;
3353 enum mode_class class;
3354
3355 class = GET_MODE_CLASS (mode);
3356
3357 if (mode != BLKmode && flag_force_mem)
3358 {
3359 /* Load duplicate non-volatile operands once. */
3360 if (rtx_equal_p (x, y) && ! volatile_refs_p (x))
3361 {
3362 x = force_not_mem (x);
3363 y = x;
3364 }
3365 else
3366 {
3367 x = force_not_mem (x);
3368 y = force_not_mem (y);
3369 }
3370 }
3371
3372 /* If we are inside an appropriately-short loop and we are optimizing,
3373 force expensive constants into a register. */
3374 if (CONSTANT_P (x) && optimize
3375 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3376 x = force_reg (mode, x);
3377
3378 if (CONSTANT_P (y) && optimize
3379 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3380 y = force_reg (mode, y);
3381
3382 #ifdef HAVE_cc0
3383 /* Make sure if we have a canonical comparison. The RTL
3384 documentation states that canonical comparisons are required only
3385 for targets which have cc0. */
3386 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3387 #endif
3388
3389 /* Don't let both operands fail to indicate the mode. */
3390 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3391 x = force_reg (mode, x);
3392
3393 /* Handle all BLKmode compares. */
3394
3395 if (mode == BLKmode)
3396 {
3397 enum machine_mode cmp_mode, result_mode;
3398 enum insn_code cmp_code;
3399 tree length_type;
3400 rtx libfunc;
3401 rtx result;
3402 rtx opalign
3403 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3404
3405 gcc_assert (size);
3406
3407 /* Try to use a memory block compare insn - either cmpstr
3408 or cmpmem will do. */
3409 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3410 cmp_mode != VOIDmode;
3411 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3412 {
3413 cmp_code = cmpmem_optab[cmp_mode];
3414 if (cmp_code == CODE_FOR_nothing)
3415 cmp_code = cmpstr_optab[cmp_mode];
3416 if (cmp_code == CODE_FOR_nothing)
3417 continue;
3418
3419 /* Must make sure the size fits the insn's mode. */
3420 if ((GET_CODE (size) == CONST_INT
3421 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3422 || (GET_MODE_BITSIZE (GET_MODE (size))
3423 > GET_MODE_BITSIZE (cmp_mode)))
3424 continue;
3425
3426 result_mode = insn_data[cmp_code].operand[0].mode;
3427 result = gen_reg_rtx (result_mode);
3428 size = convert_to_mode (cmp_mode, size, 1);
3429 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3430
3431 *px = result;
3432 *py = const0_rtx;
3433 *pmode = result_mode;
3434 return;
3435 }
3436
3437 /* Otherwise call a library function, memcmp. */
3438 libfunc = memcmp_libfunc;
3439 length_type = sizetype;
3440 result_mode = TYPE_MODE (integer_type_node);
3441 cmp_mode = TYPE_MODE (length_type);
3442 size = convert_to_mode (TYPE_MODE (length_type), size,
3443 TYPE_UNSIGNED (length_type));
3444
3445 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3446 result_mode, 3,
3447 XEXP (x, 0), Pmode,
3448 XEXP (y, 0), Pmode,
3449 size, cmp_mode);
3450 *px = result;
3451 *py = const0_rtx;
3452 *pmode = result_mode;
3453 return;
3454 }
3455
3456 /* Don't allow operands to the compare to trap, as that can put the
3457 compare and branch in different basic blocks. */
3458 if (flag_non_call_exceptions)
3459 {
3460 if (may_trap_p (x))
3461 x = force_reg (mode, x);
3462 if (may_trap_p (y))
3463 y = force_reg (mode, y);
3464 }
3465
3466 *px = x;
3467 *py = y;
3468 if (can_compare_p (*pcomparison, mode, purpose))
3469 return;
3470
3471 /* Handle a lib call just for the mode we are using. */
3472
3473 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3474 {
3475 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3476 rtx result;
3477
3478 /* If we want unsigned, and this mode has a distinct unsigned
3479 comparison routine, use that. */
3480 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3481 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3482
3483 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3484 word_mode, 2, x, mode, y, mode);
3485
3486 *px = result;
3487 *pmode = word_mode;
3488 if (TARGET_LIB_INT_CMP_BIASED)
3489 /* Integer comparison returns a result that must be compared
3490 against 1, so that even if we do an unsigned compare
3491 afterward, there is still a value that can represent the
3492 result "less than". */
3493 *py = const1_rtx;
3494 else
3495 {
3496 *py = const0_rtx;
3497 *punsignedp = 1;
3498 }
3499 return;
3500 }
3501
3502 gcc_assert (class == MODE_FLOAT);
3503 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3504 }
3505
3506 /* Before emitting an insn with code ICODE, make sure that X, which is going
3507 to be used for operand OPNUM of the insn, is converted from mode MODE to
3508 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3509 that it is accepted by the operand predicate. Return the new value. */
3510
3511 static rtx
3512 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3513 enum machine_mode wider_mode, int unsignedp)
3514 {
3515 if (mode != wider_mode)
3516 x = convert_modes (wider_mode, mode, x, unsignedp);
3517
3518 if (!insn_data[icode].operand[opnum].predicate
3519 (x, insn_data[icode].operand[opnum].mode))
3520 {
3521 if (no_new_pseudos)
3522 return NULL_RTX;
3523 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3524 }
3525
3526 return x;
3527 }
3528
3529 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3530 we can do the comparison.
3531 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3532 be NULL_RTX which indicates that only a comparison is to be generated. */
3533
3534 static void
3535 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3536 enum rtx_code comparison, int unsignedp, rtx label)
3537 {
3538 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3539 enum mode_class class = GET_MODE_CLASS (mode);
3540 enum machine_mode wider_mode = mode;
3541
3542 /* Try combined insns first. */
3543 do
3544 {
3545 enum insn_code icode;
3546 PUT_MODE (test, wider_mode);
3547
3548 if (label)
3549 {
3550 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3551
3552 if (icode != CODE_FOR_nothing
3553 && insn_data[icode].operand[0].predicate (test, wider_mode))
3554 {
3555 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3556 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3557 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3558 return;
3559 }
3560 }
3561
3562 /* Handle some compares against zero. */
3563 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3564 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3565 {
3566 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3567 emit_insn (GEN_FCN (icode) (x));
3568 if (label)
3569 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3570 return;
3571 }
3572
3573 /* Handle compares for which there is a directly suitable insn. */
3574
3575 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3576 if (icode != CODE_FOR_nothing)
3577 {
3578 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3579 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3580 emit_insn (GEN_FCN (icode) (x, y));
3581 if (label)
3582 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3583 return;
3584 }
3585
3586 if (class != MODE_INT && class != MODE_FLOAT
3587 && class != MODE_COMPLEX_FLOAT)
3588 break;
3589
3590 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3591 }
3592 while (wider_mode != VOIDmode);
3593
3594 gcc_unreachable ();
3595 }
3596
3597 /* Generate code to compare X with Y so that the condition codes are
3598 set and to jump to LABEL if the condition is true. If X is a
3599 constant and Y is not a constant, then the comparison is swapped to
3600 ensure that the comparison RTL has the canonical form.
3601
3602 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3603 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3604 the proper branch condition code.
3605
3606 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3607
3608 MODE is the mode of the inputs (in case they are const_int).
3609
3610 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3611 be passed unchanged to emit_cmp_insn, then potentially converted into an
3612 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3613
3614 void
3615 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3616 enum machine_mode mode, int unsignedp, rtx label)
3617 {
3618 rtx op0 = x, op1 = y;
3619
3620 /* Swap operands and condition to ensure canonical RTL. */
3621 if (swap_commutative_operands_p (x, y))
3622 {
3623 /* If we're not emitting a branch, this means some caller
3624 is out of sync. */
3625 gcc_assert (label);
3626
3627 op0 = y, op1 = x;
3628 comparison = swap_condition (comparison);
3629 }
3630
3631 #ifdef HAVE_cc0
3632 /* If OP0 is still a constant, then both X and Y must be constants.
3633 Force X into a register to create canonical RTL. */
3634 if (CONSTANT_P (op0))
3635 op0 = force_reg (mode, op0);
3636 #endif
3637
3638 if (unsignedp)
3639 comparison = unsigned_condition (comparison);
3640
3641 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3642 ccp_jump);
3643 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3644 }
3645
3646 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3647
3648 void
3649 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3650 enum machine_mode mode, int unsignedp)
3651 {
3652 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3653 }
3654 \f
3655 /* Emit a library call comparison between floating point X and Y.
3656 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3657
3658 static void
3659 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3660 enum machine_mode *pmode, int *punsignedp)
3661 {
3662 enum rtx_code comparison = *pcomparison;
3663 enum rtx_code swapped = swap_condition (comparison);
3664 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3665 rtx x = *px;
3666 rtx y = *py;
3667 enum machine_mode orig_mode = GET_MODE (x);
3668 enum machine_mode mode;
3669 rtx value, target, insns, equiv;
3670 rtx libfunc = 0;
3671 bool reversed_p = false;
3672
3673 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3674 {
3675 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3676 break;
3677
3678 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3679 {
3680 rtx tmp;
3681 tmp = x; x = y; y = tmp;
3682 comparison = swapped;
3683 break;
3684 }
3685
3686 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3687 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3688 {
3689 comparison = reversed;
3690 reversed_p = true;
3691 break;
3692 }
3693 }
3694
3695 gcc_assert (mode != VOIDmode);
3696
3697 if (mode != orig_mode)
3698 {
3699 x = convert_to_mode (mode, x, 0);
3700 y = convert_to_mode (mode, y, 0);
3701 }
3702
3703 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3704 the RTL. The allows the RTL optimizers to delete the libcall if the
3705 condition can be determined at compile-time. */
3706 if (comparison == UNORDERED)
3707 {
3708 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3709 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3710 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3711 temp, const_true_rtx, equiv);
3712 }
3713 else
3714 {
3715 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3716 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3717 {
3718 rtx true_rtx, false_rtx;
3719
3720 switch (comparison)
3721 {
3722 case EQ:
3723 true_rtx = const0_rtx;
3724 false_rtx = const_true_rtx;
3725 break;
3726
3727 case NE:
3728 true_rtx = const_true_rtx;
3729 false_rtx = const0_rtx;
3730 break;
3731
3732 case GT:
3733 true_rtx = const1_rtx;
3734 false_rtx = const0_rtx;
3735 break;
3736
3737 case GE:
3738 true_rtx = const0_rtx;
3739 false_rtx = constm1_rtx;
3740 break;
3741
3742 case LT:
3743 true_rtx = constm1_rtx;
3744 false_rtx = const0_rtx;
3745 break;
3746
3747 case LE:
3748 true_rtx = const0_rtx;
3749 false_rtx = const1_rtx;
3750 break;
3751
3752 default:
3753 gcc_unreachable ();
3754 }
3755 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3756 equiv, true_rtx, false_rtx);
3757 }
3758 }
3759
3760 start_sequence ();
3761 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3762 word_mode, 2, x, mode, y, mode);
3763 insns = get_insns ();
3764 end_sequence ();
3765
3766 target = gen_reg_rtx (word_mode);
3767 emit_libcall_block (insns, target, value, equiv);
3768
3769 if (comparison == UNORDERED
3770 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3771 comparison = reversed_p ? EQ : NE;
3772
3773 *px = target;
3774 *py = const0_rtx;
3775 *pmode = word_mode;
3776 *pcomparison = comparison;
3777 *punsignedp = 0;
3778 }
3779 \f
3780 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3781
3782 void
3783 emit_indirect_jump (rtx loc)
3784 {
3785 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3786 (loc, Pmode))
3787 loc = copy_to_mode_reg (Pmode, loc);
3788
3789 emit_jump_insn (gen_indirect_jump (loc));
3790 emit_barrier ();
3791 }
3792 \f
3793 #ifdef HAVE_conditional_move
3794
3795 /* Emit a conditional move instruction if the machine supports one for that
3796 condition and machine mode.
3797
3798 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3799 the mode to use should they be constants. If it is VOIDmode, they cannot
3800 both be constants.
3801
3802 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3803 should be stored there. MODE is the mode to use should they be constants.
3804 If it is VOIDmode, they cannot both be constants.
3805
3806 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3807 is not supported. */
3808
3809 rtx
3810 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3811 enum machine_mode cmode, rtx op2, rtx op3,
3812 enum machine_mode mode, int unsignedp)
3813 {
3814 rtx tem, subtarget, comparison, insn;
3815 enum insn_code icode;
3816 enum rtx_code reversed;
3817
3818 /* If one operand is constant, make it the second one. Only do this
3819 if the other operand is not constant as well. */
3820
3821 if (swap_commutative_operands_p (op0, op1))
3822 {
3823 tem = op0;
3824 op0 = op1;
3825 op1 = tem;
3826 code = swap_condition (code);
3827 }
3828
3829 /* get_condition will prefer to generate LT and GT even if the old
3830 comparison was against zero, so undo that canonicalization here since
3831 comparisons against zero are cheaper. */
3832 if (code == LT && op1 == const1_rtx)
3833 code = LE, op1 = const0_rtx;
3834 else if (code == GT && op1 == constm1_rtx)
3835 code = GE, op1 = const0_rtx;
3836
3837 if (cmode == VOIDmode)
3838 cmode = GET_MODE (op0);
3839
3840 if (swap_commutative_operands_p (op2, op3)
3841 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3842 != UNKNOWN))
3843 {
3844 tem = op2;
3845 op2 = op3;
3846 op3 = tem;
3847 code = reversed;
3848 }
3849
3850 if (mode == VOIDmode)
3851 mode = GET_MODE (op2);
3852
3853 icode = movcc_gen_code[mode];
3854
3855 if (icode == CODE_FOR_nothing)
3856 return 0;
3857
3858 if (flag_force_mem)
3859 {
3860 op2 = force_not_mem (op2);
3861 op3 = force_not_mem (op3);
3862 }
3863
3864 if (!target)
3865 target = gen_reg_rtx (mode);
3866
3867 subtarget = target;
3868
3869 /* If the insn doesn't accept these operands, put them in pseudos. */
3870
3871 if (!insn_data[icode].operand[0].predicate
3872 (subtarget, insn_data[icode].operand[0].mode))
3873 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3874
3875 if (!insn_data[icode].operand[2].predicate
3876 (op2, insn_data[icode].operand[2].mode))
3877 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3878
3879 if (!insn_data[icode].operand[3].predicate
3880 (op3, insn_data[icode].operand[3].mode))
3881 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3882
3883 /* Everything should now be in the suitable form, so emit the compare insn
3884 and then the conditional move. */
3885
3886 comparison
3887 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3888
3889 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3890 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3891 return NULL and let the caller figure out how best to deal with this
3892 situation. */
3893 if (GET_CODE (comparison) != code)
3894 return NULL_RTX;
3895
3896 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3897
3898 /* If that failed, then give up. */
3899 if (insn == 0)
3900 return 0;
3901
3902 emit_insn (insn);
3903
3904 if (subtarget != target)
3905 convert_move (target, subtarget, 0);
3906
3907 return target;
3908 }
3909
3910 /* Return nonzero if a conditional move of mode MODE is supported.
3911
3912 This function is for combine so it can tell whether an insn that looks
3913 like a conditional move is actually supported by the hardware. If we
3914 guess wrong we lose a bit on optimization, but that's it. */
3915 /* ??? sparc64 supports conditionally moving integers values based on fp
3916 comparisons, and vice versa. How do we handle them? */
3917
3918 int
3919 can_conditionally_move_p (enum machine_mode mode)
3920 {
3921 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3922 return 1;
3923
3924 return 0;
3925 }
3926
3927 #endif /* HAVE_conditional_move */
3928
3929 /* Emit a conditional addition instruction if the machine supports one for that
3930 condition and machine mode.
3931
3932 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3933 the mode to use should they be constants. If it is VOIDmode, they cannot
3934 both be constants.
3935
3936 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3937 should be stored there. MODE is the mode to use should they be constants.
3938 If it is VOIDmode, they cannot both be constants.
3939
3940 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3941 is not supported. */
3942
3943 rtx
3944 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
3945 enum machine_mode cmode, rtx op2, rtx op3,
3946 enum machine_mode mode, int unsignedp)
3947 {
3948 rtx tem, subtarget, comparison, insn;
3949 enum insn_code icode;
3950 enum rtx_code reversed;
3951
3952 /* If one operand is constant, make it the second one. Only do this
3953 if the other operand is not constant as well. */
3954
3955 if (swap_commutative_operands_p (op0, op1))
3956 {
3957 tem = op0;
3958 op0 = op1;
3959 op1 = tem;
3960 code = swap_condition (code);
3961 }
3962
3963 /* get_condition will prefer to generate LT and GT even if the old
3964 comparison was against zero, so undo that canonicalization here since
3965 comparisons against zero are cheaper. */
3966 if (code == LT && op1 == const1_rtx)
3967 code = LE, op1 = const0_rtx;
3968 else if (code == GT && op1 == constm1_rtx)
3969 code = GE, op1 = const0_rtx;
3970
3971 if (cmode == VOIDmode)
3972 cmode = GET_MODE (op0);
3973
3974 if (swap_commutative_operands_p (op2, op3)
3975 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3976 != UNKNOWN))
3977 {
3978 tem = op2;
3979 op2 = op3;
3980 op3 = tem;
3981 code = reversed;
3982 }
3983
3984 if (mode == VOIDmode)
3985 mode = GET_MODE (op2);
3986
3987 icode = addcc_optab->handlers[(int) mode].insn_code;
3988
3989 if (icode == CODE_FOR_nothing)
3990 return 0;
3991
3992 if (flag_force_mem)
3993 {
3994 op2 = force_not_mem (op2);
3995 op3 = force_not_mem (op3);
3996 }
3997
3998 if (!target)
3999 target = gen_reg_rtx (mode);
4000
4001 /* If the insn doesn't accept these operands, put them in pseudos. */
4002
4003 if (!insn_data[icode].operand[0].predicate
4004 (target, insn_data[icode].operand[0].mode))
4005 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4006 else
4007 subtarget = target;
4008
4009 if (!insn_data[icode].operand[2].predicate
4010 (op2, insn_data[icode].operand[2].mode))
4011 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4012
4013 if (!insn_data[icode].operand[3].predicate
4014 (op3, insn_data[icode].operand[3].mode))
4015 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4016
4017 /* Everything should now be in the suitable form, so emit the compare insn
4018 and then the conditional move. */
4019
4020 comparison
4021 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4022
4023 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4024 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4025 return NULL and let the caller figure out how best to deal with this
4026 situation. */
4027 if (GET_CODE (comparison) != code)
4028 return NULL_RTX;
4029
4030 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4031
4032 /* If that failed, then give up. */
4033 if (insn == 0)
4034 return 0;
4035
4036 emit_insn (insn);
4037
4038 if (subtarget != target)
4039 convert_move (target, subtarget, 0);
4040
4041 return target;
4042 }
4043 \f
4044 /* These functions attempt to generate an insn body, rather than
4045 emitting the insn, but if the gen function already emits them, we
4046 make no attempt to turn them back into naked patterns. */
4047
4048 /* Generate and return an insn body to add Y to X. */
4049
4050 rtx
4051 gen_add2_insn (rtx x, rtx y)
4052 {
4053 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4054
4055 gcc_assert (insn_data[icode].operand[0].predicate
4056 (x, insn_data[icode].operand[0].mode));
4057 gcc_assert (insn_data[icode].operand[1].predicate
4058 (x, insn_data[icode].operand[1].mode));
4059 gcc_assert (insn_data[icode].operand[2].predicate
4060 (y, insn_data[icode].operand[2].mode));
4061
4062 return GEN_FCN (icode) (x, x, y);
4063 }
4064
4065 /* Generate and return an insn body to add r1 and c,
4066 storing the result in r0. */
4067 rtx
4068 gen_add3_insn (rtx r0, rtx r1, rtx c)
4069 {
4070 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4071
4072 if (icode == CODE_FOR_nothing
4073 || !(insn_data[icode].operand[0].predicate
4074 (r0, insn_data[icode].operand[0].mode))
4075 || !(insn_data[icode].operand[1].predicate
4076 (r1, insn_data[icode].operand[1].mode))
4077 || !(insn_data[icode].operand[2].predicate
4078 (c, insn_data[icode].operand[2].mode)))
4079 return NULL_RTX;
4080
4081 return GEN_FCN (icode) (r0, r1, c);
4082 }
4083
4084 int
4085 have_add2_insn (rtx x, rtx y)
4086 {
4087 int icode;
4088
4089 gcc_assert (GET_MODE (x) != VOIDmode);
4090
4091 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4092
4093 if (icode == CODE_FOR_nothing)
4094 return 0;
4095
4096 if (!(insn_data[icode].operand[0].predicate
4097 (x, insn_data[icode].operand[0].mode))
4098 || !(insn_data[icode].operand[1].predicate
4099 (x, insn_data[icode].operand[1].mode))
4100 || !(insn_data[icode].operand[2].predicate
4101 (y, insn_data[icode].operand[2].mode)))
4102 return 0;
4103
4104 return 1;
4105 }
4106
4107 /* Generate and return an insn body to subtract Y from X. */
4108
4109 rtx
4110 gen_sub2_insn (rtx x, rtx y)
4111 {
4112 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4113
4114 gcc_assert (insn_data[icode].operand[0].predicate
4115 (x, insn_data[icode].operand[0].mode));
4116 gcc_assert (insn_data[icode].operand[1].predicate
4117 (x, insn_data[icode].operand[1].mode));
4118 gcc_assert (insn_data[icode].operand[2].predicate
4119 (y, insn_data[icode].operand[2].mode));
4120
4121 return GEN_FCN (icode) (x, x, y);
4122 }
4123
4124 /* Generate and return an insn body to subtract r1 and c,
4125 storing the result in r0. */
4126 rtx
4127 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4128 {
4129 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4130
4131 if (icode == CODE_FOR_nothing
4132 || !(insn_data[icode].operand[0].predicate
4133 (r0, insn_data[icode].operand[0].mode))
4134 || !(insn_data[icode].operand[1].predicate
4135 (r1, insn_data[icode].operand[1].mode))
4136 || !(insn_data[icode].operand[2].predicate
4137 (c, insn_data[icode].operand[2].mode)))
4138 return NULL_RTX;
4139
4140 return GEN_FCN (icode) (r0, r1, c);
4141 }
4142
4143 int
4144 have_sub2_insn (rtx x, rtx y)
4145 {
4146 int icode;
4147
4148 gcc_assert (GET_MODE (x) != VOIDmode);
4149
4150 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4151
4152 if (icode == CODE_FOR_nothing)
4153 return 0;
4154
4155 if (!(insn_data[icode].operand[0].predicate
4156 (x, insn_data[icode].operand[0].mode))
4157 || !(insn_data[icode].operand[1].predicate
4158 (x, insn_data[icode].operand[1].mode))
4159 || !(insn_data[icode].operand[2].predicate
4160 (y, insn_data[icode].operand[2].mode)))
4161 return 0;
4162
4163 return 1;
4164 }
4165
4166 /* Generate the body of an instruction to copy Y into X.
4167 It may be a list of insns, if one insn isn't enough. */
4168
4169 rtx
4170 gen_move_insn (rtx x, rtx y)
4171 {
4172 rtx seq;
4173
4174 start_sequence ();
4175 emit_move_insn_1 (x, y);
4176 seq = get_insns ();
4177 end_sequence ();
4178 return seq;
4179 }
4180 \f
4181 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4182 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4183 no such operation exists, CODE_FOR_nothing will be returned. */
4184
4185 enum insn_code
4186 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4187 int unsignedp)
4188 {
4189 convert_optab tab;
4190 #ifdef HAVE_ptr_extend
4191 if (unsignedp < 0)
4192 return CODE_FOR_ptr_extend;
4193 #endif
4194
4195 tab = unsignedp ? zext_optab : sext_optab;
4196 return tab->handlers[to_mode][from_mode].insn_code;
4197 }
4198
4199 /* Generate the body of an insn to extend Y (with mode MFROM)
4200 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4201
4202 rtx
4203 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4204 enum machine_mode mfrom, int unsignedp)
4205 {
4206 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4207 return GEN_FCN (icode) (x, y);
4208 }
4209 \f
4210 /* can_fix_p and can_float_p say whether the target machine
4211 can directly convert a given fixed point type to
4212 a given floating point type, or vice versa.
4213 The returned value is the CODE_FOR_... value to use,
4214 or CODE_FOR_nothing if these modes cannot be directly converted.
4215
4216 *TRUNCP_PTR is set to 1 if it is necessary to output
4217 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4218
4219 static enum insn_code
4220 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4221 int unsignedp, int *truncp_ptr)
4222 {
4223 convert_optab tab;
4224 enum insn_code icode;
4225
4226 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4227 icode = tab->handlers[fixmode][fltmode].insn_code;
4228 if (icode != CODE_FOR_nothing)
4229 {
4230 *truncp_ptr = 0;
4231 return icode;
4232 }
4233
4234 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4235 for this to work. We need to rework the fix* and ftrunc* patterns
4236 and documentation. */
4237 tab = unsignedp ? ufix_optab : sfix_optab;
4238 icode = tab->handlers[fixmode][fltmode].insn_code;
4239 if (icode != CODE_FOR_nothing
4240 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4241 {
4242 *truncp_ptr = 1;
4243 return icode;
4244 }
4245
4246 *truncp_ptr = 0;
4247 return CODE_FOR_nothing;
4248 }
4249
4250 static enum insn_code
4251 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4252 int unsignedp)
4253 {
4254 convert_optab tab;
4255
4256 tab = unsignedp ? ufloat_optab : sfloat_optab;
4257 return tab->handlers[fltmode][fixmode].insn_code;
4258 }
4259 \f
4260 /* Generate code to convert FROM to floating point
4261 and store in TO. FROM must be fixed point and not VOIDmode.
4262 UNSIGNEDP nonzero means regard FROM as unsigned.
4263 Normally this is done by correcting the final value
4264 if it is negative. */
4265
4266 void
4267 expand_float (rtx to, rtx from, int unsignedp)
4268 {
4269 enum insn_code icode;
4270 rtx target = to;
4271 enum machine_mode fmode, imode;
4272
4273 /* Crash now, because we won't be able to decide which mode to use. */
4274 gcc_assert (GET_MODE (from) != VOIDmode);
4275
4276 /* Look for an insn to do the conversion. Do it in the specified
4277 modes if possible; otherwise convert either input, output or both to
4278 wider mode. If the integer mode is wider than the mode of FROM,
4279 we can do the conversion signed even if the input is unsigned. */
4280
4281 for (fmode = GET_MODE (to); fmode != VOIDmode;
4282 fmode = GET_MODE_WIDER_MODE (fmode))
4283 for (imode = GET_MODE (from); imode != VOIDmode;
4284 imode = GET_MODE_WIDER_MODE (imode))
4285 {
4286 int doing_unsigned = unsignedp;
4287
4288 if (fmode != GET_MODE (to)
4289 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4290 continue;
4291
4292 icode = can_float_p (fmode, imode, unsignedp);
4293 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4294 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4295
4296 if (icode != CODE_FOR_nothing)
4297 {
4298 if (imode != GET_MODE (from))
4299 from = convert_to_mode (imode, from, unsignedp);
4300
4301 if (fmode != GET_MODE (to))
4302 target = gen_reg_rtx (fmode);
4303
4304 emit_unop_insn (icode, target, from,
4305 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4306
4307 if (target != to)
4308 convert_move (to, target, 0);
4309 return;
4310 }
4311 }
4312
4313 /* Unsigned integer, and no way to convert directly.
4314 Convert as signed, then conditionally adjust the result. */
4315 if (unsignedp)
4316 {
4317 rtx label = gen_label_rtx ();
4318 rtx temp;
4319 REAL_VALUE_TYPE offset;
4320
4321 if (flag_force_mem)
4322 from = force_not_mem (from);
4323
4324 /* Look for a usable floating mode FMODE wider than the source and at
4325 least as wide as the target. Using FMODE will avoid rounding woes
4326 with unsigned values greater than the signed maximum value. */
4327
4328 for (fmode = GET_MODE (to); fmode != VOIDmode;
4329 fmode = GET_MODE_WIDER_MODE (fmode))
4330 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4331 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4332 break;
4333
4334 if (fmode == VOIDmode)
4335 {
4336 /* There is no such mode. Pretend the target is wide enough. */
4337 fmode = GET_MODE (to);
4338
4339 /* Avoid double-rounding when TO is narrower than FROM. */
4340 if ((significand_size (fmode) + 1)
4341 < GET_MODE_BITSIZE (GET_MODE (from)))
4342 {
4343 rtx temp1;
4344 rtx neglabel = gen_label_rtx ();
4345
4346 /* Don't use TARGET if it isn't a register, is a hard register,
4347 or is the wrong mode. */
4348 if (!REG_P (target)
4349 || REGNO (target) < FIRST_PSEUDO_REGISTER
4350 || GET_MODE (target) != fmode)
4351 target = gen_reg_rtx (fmode);
4352
4353 imode = GET_MODE (from);
4354 do_pending_stack_adjust ();
4355
4356 /* Test whether the sign bit is set. */
4357 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4358 0, neglabel);
4359
4360 /* The sign bit is not set. Convert as signed. */
4361 expand_float (target, from, 0);
4362 emit_jump_insn (gen_jump (label));
4363 emit_barrier ();
4364
4365 /* The sign bit is set.
4366 Convert to a usable (positive signed) value by shifting right
4367 one bit, while remembering if a nonzero bit was shifted
4368 out; i.e., compute (from & 1) | (from >> 1). */
4369
4370 emit_label (neglabel);
4371 temp = expand_binop (imode, and_optab, from, const1_rtx,
4372 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4373 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4374 NULL_RTX, 1);
4375 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4376 OPTAB_LIB_WIDEN);
4377 expand_float (target, temp, 0);
4378
4379 /* Multiply by 2 to undo the shift above. */
4380 temp = expand_binop (fmode, add_optab, target, target,
4381 target, 0, OPTAB_LIB_WIDEN);
4382 if (temp != target)
4383 emit_move_insn (target, temp);
4384
4385 do_pending_stack_adjust ();
4386 emit_label (label);
4387 goto done;
4388 }
4389 }
4390
4391 /* If we are about to do some arithmetic to correct for an
4392 unsigned operand, do it in a pseudo-register. */
4393
4394 if (GET_MODE (to) != fmode
4395 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4396 target = gen_reg_rtx (fmode);
4397
4398 /* Convert as signed integer to floating. */
4399 expand_float (target, from, 0);
4400
4401 /* If FROM is negative (and therefore TO is negative),
4402 correct its value by 2**bitwidth. */
4403
4404 do_pending_stack_adjust ();
4405 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4406 0, label);
4407
4408
4409 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4410 temp = expand_binop (fmode, add_optab, target,
4411 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4412 target, 0, OPTAB_LIB_WIDEN);
4413 if (temp != target)
4414 emit_move_insn (target, temp);
4415
4416 do_pending_stack_adjust ();
4417 emit_label (label);
4418 goto done;
4419 }
4420
4421 /* No hardware instruction available; call a library routine. */
4422 {
4423 rtx libfunc;
4424 rtx insns;
4425 rtx value;
4426 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4427
4428 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4429 from = convert_to_mode (SImode, from, unsignedp);
4430
4431 if (flag_force_mem)
4432 from = force_not_mem (from);
4433
4434 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4435 gcc_assert (libfunc);
4436
4437 start_sequence ();
4438
4439 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4440 GET_MODE (to), 1, from,
4441 GET_MODE (from));
4442 insns = get_insns ();
4443 end_sequence ();
4444
4445 emit_libcall_block (insns, target, value,
4446 gen_rtx_FLOAT (GET_MODE (to), from));
4447 }
4448
4449 done:
4450
4451 /* Copy result to requested destination
4452 if we have been computing in a temp location. */
4453
4454 if (target != to)
4455 {
4456 if (GET_MODE (target) == GET_MODE (to))
4457 emit_move_insn (to, target);
4458 else
4459 convert_move (to, target, 0);
4460 }
4461 }
4462 \f
4463 /* Generate code to convert FROM to fixed point and store in TO. FROM
4464 must be floating point. */
4465
4466 void
4467 expand_fix (rtx to, rtx from, int unsignedp)
4468 {
4469 enum insn_code icode;
4470 rtx target = to;
4471 enum machine_mode fmode, imode;
4472 int must_trunc = 0;
4473
4474 /* We first try to find a pair of modes, one real and one integer, at
4475 least as wide as FROM and TO, respectively, in which we can open-code
4476 this conversion. If the integer mode is wider than the mode of TO,
4477 we can do the conversion either signed or unsigned. */
4478
4479 for (fmode = GET_MODE (from); fmode != VOIDmode;
4480 fmode = GET_MODE_WIDER_MODE (fmode))
4481 for (imode = GET_MODE (to); imode != VOIDmode;
4482 imode = GET_MODE_WIDER_MODE (imode))
4483 {
4484 int doing_unsigned = unsignedp;
4485
4486 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4487 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4488 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4489
4490 if (icode != CODE_FOR_nothing)
4491 {
4492 if (fmode != GET_MODE (from))
4493 from = convert_to_mode (fmode, from, 0);
4494
4495 if (must_trunc)
4496 {
4497 rtx temp = gen_reg_rtx (GET_MODE (from));
4498 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4499 temp, 0);
4500 }
4501
4502 if (imode != GET_MODE (to))
4503 target = gen_reg_rtx (imode);
4504
4505 emit_unop_insn (icode, target, from,
4506 doing_unsigned ? UNSIGNED_FIX : FIX);
4507 if (target != to)
4508 convert_move (to, target, unsignedp);
4509 return;
4510 }
4511 }
4512
4513 /* For an unsigned conversion, there is one more way to do it.
4514 If we have a signed conversion, we generate code that compares
4515 the real value to the largest representable positive number. If if
4516 is smaller, the conversion is done normally. Otherwise, subtract
4517 one plus the highest signed number, convert, and add it back.
4518
4519 We only need to check all real modes, since we know we didn't find
4520 anything with a wider integer mode.
4521
4522 This code used to extend FP value into mode wider than the destination.
4523 This is not needed. Consider, for instance conversion from SFmode
4524 into DImode.
4525
4526 The hot path trought the code is dealing with inputs smaller than 2^63
4527 and doing just the conversion, so there is no bits to lose.
4528
4529 In the other path we know the value is positive in the range 2^63..2^64-1
4530 inclusive. (as for other imput overflow happens and result is undefined)
4531 So we know that the most important bit set in mantissa corresponds to
4532 2^63. The subtraction of 2^63 should not generate any rounding as it
4533 simply clears out that bit. The rest is trivial. */
4534
4535 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4536 for (fmode = GET_MODE (from); fmode != VOIDmode;
4537 fmode = GET_MODE_WIDER_MODE (fmode))
4538 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4539 &must_trunc))
4540 {
4541 int bitsize;
4542 REAL_VALUE_TYPE offset;
4543 rtx limit, lab1, lab2, insn;
4544
4545 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4546 real_2expN (&offset, bitsize - 1);
4547 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4548 lab1 = gen_label_rtx ();
4549 lab2 = gen_label_rtx ();
4550
4551 if (flag_force_mem)
4552 from = force_not_mem (from);
4553
4554 if (fmode != GET_MODE (from))
4555 from = convert_to_mode (fmode, from, 0);
4556
4557 /* See if we need to do the subtraction. */
4558 do_pending_stack_adjust ();
4559 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4560 0, lab1);
4561
4562 /* If not, do the signed "fix" and branch around fixup code. */
4563 expand_fix (to, from, 0);
4564 emit_jump_insn (gen_jump (lab2));
4565 emit_barrier ();
4566
4567 /* Otherwise, subtract 2**(N-1), convert to signed number,
4568 then add 2**(N-1). Do the addition using XOR since this
4569 will often generate better code. */
4570 emit_label (lab1);
4571 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4572 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4573 expand_fix (to, target, 0);
4574 target = expand_binop (GET_MODE (to), xor_optab, to,
4575 gen_int_mode
4576 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4577 GET_MODE (to)),
4578 to, 1, OPTAB_LIB_WIDEN);
4579
4580 if (target != to)
4581 emit_move_insn (to, target);
4582
4583 emit_label (lab2);
4584
4585 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4586 != CODE_FOR_nothing)
4587 {
4588 /* Make a place for a REG_NOTE and add it. */
4589 insn = emit_move_insn (to, to);
4590 set_unique_reg_note (insn,
4591 REG_EQUAL,
4592 gen_rtx_fmt_e (UNSIGNED_FIX,
4593 GET_MODE (to),
4594 copy_rtx (from)));
4595 }
4596
4597 return;
4598 }
4599
4600 /* We can't do it with an insn, so use a library call. But first ensure
4601 that the mode of TO is at least as wide as SImode, since those are the
4602 only library calls we know about. */
4603
4604 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4605 {
4606 target = gen_reg_rtx (SImode);
4607
4608 expand_fix (target, from, unsignedp);
4609 }
4610 else
4611 {
4612 rtx insns;
4613 rtx value;
4614 rtx libfunc;
4615
4616 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4617 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4618 gcc_assert (libfunc);
4619
4620 if (flag_force_mem)
4621 from = force_not_mem (from);
4622
4623 start_sequence ();
4624
4625 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4626 GET_MODE (to), 1, from,
4627 GET_MODE (from));
4628 insns = get_insns ();
4629 end_sequence ();
4630
4631 emit_libcall_block (insns, target, value,
4632 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4633 GET_MODE (to), from));
4634 }
4635
4636 if (target != to)
4637 {
4638 if (GET_MODE (to) == GET_MODE (target))
4639 emit_move_insn (to, target);
4640 else
4641 convert_move (to, target, 0);
4642 }
4643 }
4644 \f
4645 /* Report whether we have an instruction to perform the operation
4646 specified by CODE on operands of mode MODE. */
4647 int
4648 have_insn_for (enum rtx_code code, enum machine_mode mode)
4649 {
4650 return (code_to_optab[(int) code] != 0
4651 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4652 != CODE_FOR_nothing));
4653 }
4654
4655 /* Create a blank optab. */
4656 static optab
4657 new_optab (void)
4658 {
4659 int i;
4660 optab op = ggc_alloc (sizeof (struct optab));
4661 for (i = 0; i < NUM_MACHINE_MODES; i++)
4662 {
4663 op->handlers[i].insn_code = CODE_FOR_nothing;
4664 op->handlers[i].libfunc = 0;
4665 }
4666
4667 return op;
4668 }
4669
4670 static convert_optab
4671 new_convert_optab (void)
4672 {
4673 int i, j;
4674 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4675 for (i = 0; i < NUM_MACHINE_MODES; i++)
4676 for (j = 0; j < NUM_MACHINE_MODES; j++)
4677 {
4678 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4679 op->handlers[i][j].libfunc = 0;
4680 }
4681 return op;
4682 }
4683
4684 /* Same, but fill in its code as CODE, and write it into the
4685 code_to_optab table. */
4686 static inline optab
4687 init_optab (enum rtx_code code)
4688 {
4689 optab op = new_optab ();
4690 op->code = code;
4691 code_to_optab[(int) code] = op;
4692 return op;
4693 }
4694
4695 /* Same, but fill in its code as CODE, and do _not_ write it into
4696 the code_to_optab table. */
4697 static inline optab
4698 init_optabv (enum rtx_code code)
4699 {
4700 optab op = new_optab ();
4701 op->code = code;
4702 return op;
4703 }
4704
4705 /* Conversion optabs never go in the code_to_optab table. */
4706 static inline convert_optab
4707 init_convert_optab (enum rtx_code code)
4708 {
4709 convert_optab op = new_convert_optab ();
4710 op->code = code;
4711 return op;
4712 }
4713
4714 /* Initialize the libfunc fields of an entire group of entries in some
4715 optab. Each entry is set equal to a string consisting of a leading
4716 pair of underscores followed by a generic operation name followed by
4717 a mode name (downshifted to lowercase) followed by a single character
4718 representing the number of operands for the given operation (which is
4719 usually one of the characters '2', '3', or '4').
4720
4721 OPTABLE is the table in which libfunc fields are to be initialized.
4722 FIRST_MODE is the first machine mode index in the given optab to
4723 initialize.
4724 LAST_MODE is the last machine mode index in the given optab to
4725 initialize.
4726 OPNAME is the generic (string) name of the operation.
4727 SUFFIX is the character which specifies the number of operands for
4728 the given generic operation.
4729 */
4730
4731 static void
4732 init_libfuncs (optab optable, int first_mode, int last_mode,
4733 const char *opname, int suffix)
4734 {
4735 int mode;
4736 unsigned opname_len = strlen (opname);
4737
4738 for (mode = first_mode; (int) mode <= (int) last_mode;
4739 mode = (enum machine_mode) ((int) mode + 1))
4740 {
4741 const char *mname = GET_MODE_NAME (mode);
4742 unsigned mname_len = strlen (mname);
4743 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4744 char *p;
4745 const char *q;
4746
4747 p = libfunc_name;
4748 *p++ = '_';
4749 *p++ = '_';
4750 for (q = opname; *q; )
4751 *p++ = *q++;
4752 for (q = mname; *q; q++)
4753 *p++ = TOLOWER (*q);
4754 *p++ = suffix;
4755 *p = '\0';
4756
4757 optable->handlers[(int) mode].libfunc
4758 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4759 }
4760 }
4761
4762 /* Initialize the libfunc fields of an entire group of entries in some
4763 optab which correspond to all integer mode operations. The parameters
4764 have the same meaning as similarly named ones for the `init_libfuncs'
4765 routine. (See above). */
4766
4767 static void
4768 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4769 {
4770 int maxsize = 2*BITS_PER_WORD;
4771 if (maxsize < LONG_LONG_TYPE_SIZE)
4772 maxsize = LONG_LONG_TYPE_SIZE;
4773 init_libfuncs (optable, word_mode,
4774 mode_for_size (maxsize, MODE_INT, 0),
4775 opname, suffix);
4776 }
4777
4778 /* Initialize the libfunc fields of an entire group of entries in some
4779 optab which correspond to all real mode operations. The parameters
4780 have the same meaning as similarly named ones for the `init_libfuncs'
4781 routine. (See above). */
4782
4783 static void
4784 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4785 {
4786 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4787 }
4788
4789 /* Initialize the libfunc fields of an entire group of entries of an
4790 inter-mode-class conversion optab. The string formation rules are
4791 similar to the ones for init_libfuncs, above, but instead of having
4792 a mode name and an operand count these functions have two mode names
4793 and no operand count. */
4794 static void
4795 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4796 enum mode_class from_class,
4797 enum mode_class to_class)
4798 {
4799 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4800 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4801 size_t opname_len = strlen (opname);
4802 size_t max_mname_len = 0;
4803
4804 enum machine_mode fmode, tmode;
4805 const char *fname, *tname;
4806 const char *q;
4807 char *libfunc_name, *suffix;
4808 char *p;
4809
4810 for (fmode = first_from_mode;
4811 fmode != VOIDmode;
4812 fmode = GET_MODE_WIDER_MODE (fmode))
4813 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4814
4815 for (tmode = first_to_mode;
4816 tmode != VOIDmode;
4817 tmode = GET_MODE_WIDER_MODE (tmode))
4818 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4819
4820 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4821 libfunc_name[0] = '_';
4822 libfunc_name[1] = '_';
4823 memcpy (&libfunc_name[2], opname, opname_len);
4824 suffix = libfunc_name + opname_len + 2;
4825
4826 for (fmode = first_from_mode; fmode != VOIDmode;
4827 fmode = GET_MODE_WIDER_MODE (fmode))
4828 for (tmode = first_to_mode; tmode != VOIDmode;
4829 tmode = GET_MODE_WIDER_MODE (tmode))
4830 {
4831 fname = GET_MODE_NAME (fmode);
4832 tname = GET_MODE_NAME (tmode);
4833
4834 p = suffix;
4835 for (q = fname; *q; p++, q++)
4836 *p = TOLOWER (*q);
4837 for (q = tname; *q; p++, q++)
4838 *p = TOLOWER (*q);
4839
4840 *p = '\0';
4841
4842 tab->handlers[tmode][fmode].libfunc
4843 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4844 p - libfunc_name));
4845 }
4846 }
4847
4848 /* Initialize the libfunc fields of an entire group of entries of an
4849 intra-mode-class conversion optab. The string formation rules are
4850 similar to the ones for init_libfunc, above. WIDENING says whether
4851 the optab goes from narrow to wide modes or vice versa. These functions
4852 have two mode names _and_ an operand count. */
4853 static void
4854 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4855 enum mode_class class, bool widening)
4856 {
4857 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4858 size_t opname_len = strlen (opname);
4859 size_t max_mname_len = 0;
4860
4861 enum machine_mode nmode, wmode;
4862 const char *nname, *wname;
4863 const char *q;
4864 char *libfunc_name, *suffix;
4865 char *p;
4866
4867 for (nmode = first_mode; nmode != VOIDmode;
4868 nmode = GET_MODE_WIDER_MODE (nmode))
4869 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4870
4871 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4872 libfunc_name[0] = '_';
4873 libfunc_name[1] = '_';
4874 memcpy (&libfunc_name[2], opname, opname_len);
4875 suffix = libfunc_name + opname_len + 2;
4876
4877 for (nmode = first_mode; nmode != VOIDmode;
4878 nmode = GET_MODE_WIDER_MODE (nmode))
4879 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4880 wmode = GET_MODE_WIDER_MODE (wmode))
4881 {
4882 nname = GET_MODE_NAME (nmode);
4883 wname = GET_MODE_NAME (wmode);
4884
4885 p = suffix;
4886 for (q = widening ? nname : wname; *q; p++, q++)
4887 *p = TOLOWER (*q);
4888 for (q = widening ? wname : nname; *q; p++, q++)
4889 *p = TOLOWER (*q);
4890
4891 *p++ = '2';
4892 *p = '\0';
4893
4894 tab->handlers[widening ? wmode : nmode]
4895 [widening ? nmode : wmode].libfunc
4896 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4897 p - libfunc_name));
4898 }
4899 }
4900
4901
4902 rtx
4903 init_one_libfunc (const char *name)
4904 {
4905 rtx symbol;
4906
4907 /* Create a FUNCTION_DECL that can be passed to
4908 targetm.encode_section_info. */
4909 /* ??? We don't have any type information except for this is
4910 a function. Pretend this is "int foo()". */
4911 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4912 build_function_type (integer_type_node, NULL_TREE));
4913 DECL_ARTIFICIAL (decl) = 1;
4914 DECL_EXTERNAL (decl) = 1;
4915 TREE_PUBLIC (decl) = 1;
4916
4917 symbol = XEXP (DECL_RTL (decl), 0);
4918
4919 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4920 are the flags assigned by targetm.encode_section_info. */
4921 SYMBOL_REF_DECL (symbol) = 0;
4922
4923 return symbol;
4924 }
4925
4926 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4927 MODE to NAME, which should be either 0 or a string constant. */
4928 void
4929 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4930 {
4931 if (name)
4932 optable->handlers[mode].libfunc = init_one_libfunc (name);
4933 else
4934 optable->handlers[mode].libfunc = 0;
4935 }
4936
4937 /* Call this to reset the function entry for one conversion optab
4938 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4939 either 0 or a string constant. */
4940 void
4941 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
4942 enum machine_mode fmode, const char *name)
4943 {
4944 if (name)
4945 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
4946 else
4947 optable->handlers[tmode][fmode].libfunc = 0;
4948 }
4949
4950 /* Call this once to initialize the contents of the optabs
4951 appropriately for the current target machine. */
4952
4953 void
4954 init_optabs (void)
4955 {
4956 unsigned int i;
4957
4958 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4959
4960 for (i = 0; i < NUM_RTX_CODE; i++)
4961 setcc_gen_code[i] = CODE_FOR_nothing;
4962
4963 #ifdef HAVE_conditional_move
4964 for (i = 0; i < NUM_MACHINE_MODES; i++)
4965 movcc_gen_code[i] = CODE_FOR_nothing;
4966 #endif
4967
4968 for (i = 0; i < NUM_MACHINE_MODES; i++)
4969 {
4970 vcond_gen_code[i] = CODE_FOR_nothing;
4971 vcondu_gen_code[i] = CODE_FOR_nothing;
4972 }
4973
4974 add_optab = init_optab (PLUS);
4975 addv_optab = init_optabv (PLUS);
4976 sub_optab = init_optab (MINUS);
4977 subv_optab = init_optabv (MINUS);
4978 smul_optab = init_optab (MULT);
4979 smulv_optab = init_optabv (MULT);
4980 smul_highpart_optab = init_optab (UNKNOWN);
4981 umul_highpart_optab = init_optab (UNKNOWN);
4982 smul_widen_optab = init_optab (UNKNOWN);
4983 umul_widen_optab = init_optab (UNKNOWN);
4984 sdiv_optab = init_optab (DIV);
4985 sdivv_optab = init_optabv (DIV);
4986 sdivmod_optab = init_optab (UNKNOWN);
4987 udiv_optab = init_optab (UDIV);
4988 udivmod_optab = init_optab (UNKNOWN);
4989 smod_optab = init_optab (MOD);
4990 umod_optab = init_optab (UMOD);
4991 fmod_optab = init_optab (UNKNOWN);
4992 drem_optab = init_optab (UNKNOWN);
4993 ftrunc_optab = init_optab (UNKNOWN);
4994 and_optab = init_optab (AND);
4995 ior_optab = init_optab (IOR);
4996 xor_optab = init_optab (XOR);
4997 ashl_optab = init_optab (ASHIFT);
4998 ashr_optab = init_optab (ASHIFTRT);
4999 lshr_optab = init_optab (LSHIFTRT);
5000 rotl_optab = init_optab (ROTATE);
5001 rotr_optab = init_optab (ROTATERT);
5002 smin_optab = init_optab (SMIN);
5003 smax_optab = init_optab (SMAX);
5004 umin_optab = init_optab (UMIN);
5005 umax_optab = init_optab (UMAX);
5006 pow_optab = init_optab (UNKNOWN);
5007 atan2_optab = init_optab (UNKNOWN);
5008
5009 /* These three have codes assigned exclusively for the sake of
5010 have_insn_for. */
5011 mov_optab = init_optab (SET);
5012 movstrict_optab = init_optab (STRICT_LOW_PART);
5013 cmp_optab = init_optab (COMPARE);
5014
5015 ucmp_optab = init_optab (UNKNOWN);
5016 tst_optab = init_optab (UNKNOWN);
5017
5018 eq_optab = init_optab (EQ);
5019 ne_optab = init_optab (NE);
5020 gt_optab = init_optab (GT);
5021 ge_optab = init_optab (GE);
5022 lt_optab = init_optab (LT);
5023 le_optab = init_optab (LE);
5024 unord_optab = init_optab (UNORDERED);
5025
5026 neg_optab = init_optab (NEG);
5027 negv_optab = init_optabv (NEG);
5028 abs_optab = init_optab (ABS);
5029 absv_optab = init_optabv (ABS);
5030 addcc_optab = init_optab (UNKNOWN);
5031 one_cmpl_optab = init_optab (NOT);
5032 ffs_optab = init_optab (FFS);
5033 clz_optab = init_optab (CLZ);
5034 ctz_optab = init_optab (CTZ);
5035 popcount_optab = init_optab (POPCOUNT);
5036 parity_optab = init_optab (PARITY);
5037 sqrt_optab = init_optab (SQRT);
5038 floor_optab = init_optab (UNKNOWN);
5039 lfloor_optab = init_optab (UNKNOWN);
5040 ceil_optab = init_optab (UNKNOWN);
5041 lceil_optab = init_optab (UNKNOWN);
5042 round_optab = init_optab (UNKNOWN);
5043 btrunc_optab = init_optab (UNKNOWN);
5044 nearbyint_optab = init_optab (UNKNOWN);
5045 rint_optab = init_optab (UNKNOWN);
5046 lrint_optab = init_optab (UNKNOWN);
5047 sincos_optab = init_optab (UNKNOWN);
5048 sin_optab = init_optab (UNKNOWN);
5049 asin_optab = init_optab (UNKNOWN);
5050 cos_optab = init_optab (UNKNOWN);
5051 acos_optab = init_optab (UNKNOWN);
5052 exp_optab = init_optab (UNKNOWN);
5053 exp10_optab = init_optab (UNKNOWN);
5054 exp2_optab = init_optab (UNKNOWN);
5055 expm1_optab = init_optab (UNKNOWN);
5056 ldexp_optab = init_optab (UNKNOWN);
5057 logb_optab = init_optab (UNKNOWN);
5058 ilogb_optab = init_optab (UNKNOWN);
5059 log_optab = init_optab (UNKNOWN);
5060 log10_optab = init_optab (UNKNOWN);
5061 log2_optab = init_optab (UNKNOWN);
5062 log1p_optab = init_optab (UNKNOWN);
5063 tan_optab = init_optab (UNKNOWN);
5064 atan_optab = init_optab (UNKNOWN);
5065 copysign_optab = init_optab (UNKNOWN);
5066
5067 strlen_optab = init_optab (UNKNOWN);
5068 cbranch_optab = init_optab (UNKNOWN);
5069 cmov_optab = init_optab (UNKNOWN);
5070 cstore_optab = init_optab (UNKNOWN);
5071 push_optab = init_optab (UNKNOWN);
5072
5073 reduc_smax_optab = init_optab (UNKNOWN);
5074 reduc_umax_optab = init_optab (UNKNOWN);
5075 reduc_smin_optab = init_optab (UNKNOWN);
5076 reduc_umin_optab = init_optab (UNKNOWN);
5077 reduc_plus_optab = init_optab (UNKNOWN);
5078
5079 vec_extract_optab = init_optab (UNKNOWN);
5080 vec_set_optab = init_optab (UNKNOWN);
5081 vec_init_optab = init_optab (UNKNOWN);
5082 vec_realign_load_optab = init_optab (UNKNOWN);
5083 movmisalign_optab = init_optab (UNKNOWN);
5084
5085 powi_optab = init_optab (UNKNOWN);
5086
5087 /* Conversions. */
5088 sext_optab = init_convert_optab (SIGN_EXTEND);
5089 zext_optab = init_convert_optab (ZERO_EXTEND);
5090 trunc_optab = init_convert_optab (TRUNCATE);
5091 sfix_optab = init_convert_optab (FIX);
5092 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5093 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5094 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5095 sfloat_optab = init_convert_optab (FLOAT);
5096 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5097
5098 for (i = 0; i < NUM_MACHINE_MODES; i++)
5099 {
5100 movmem_optab[i] = CODE_FOR_nothing;
5101 clrmem_optab[i] = CODE_FOR_nothing;
5102 cmpstr_optab[i] = CODE_FOR_nothing;
5103 cmpmem_optab[i] = CODE_FOR_nothing;
5104
5105 sync_add_optab[i] = CODE_FOR_nothing;
5106 sync_sub_optab[i] = CODE_FOR_nothing;
5107 sync_ior_optab[i] = CODE_FOR_nothing;
5108 sync_and_optab[i] = CODE_FOR_nothing;
5109 sync_xor_optab[i] = CODE_FOR_nothing;
5110 sync_nand_optab[i] = CODE_FOR_nothing;
5111 sync_old_add_optab[i] = CODE_FOR_nothing;
5112 sync_old_sub_optab[i] = CODE_FOR_nothing;
5113 sync_old_ior_optab[i] = CODE_FOR_nothing;
5114 sync_old_and_optab[i] = CODE_FOR_nothing;
5115 sync_old_xor_optab[i] = CODE_FOR_nothing;
5116 sync_old_nand_optab[i] = CODE_FOR_nothing;
5117 sync_new_add_optab[i] = CODE_FOR_nothing;
5118 sync_new_sub_optab[i] = CODE_FOR_nothing;
5119 sync_new_ior_optab[i] = CODE_FOR_nothing;
5120 sync_new_and_optab[i] = CODE_FOR_nothing;
5121 sync_new_xor_optab[i] = CODE_FOR_nothing;
5122 sync_new_nand_optab[i] = CODE_FOR_nothing;
5123 sync_compare_and_swap[i] = CODE_FOR_nothing;
5124 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5125 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5126 sync_lock_release[i] = CODE_FOR_nothing;
5127
5128 #ifdef HAVE_SECONDARY_RELOADS
5129 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5130 #endif
5131 }
5132
5133 /* Fill in the optabs with the insns we support. */
5134 init_all_optabs ();
5135
5136 /* Initialize the optabs with the names of the library functions. */
5137 init_integral_libfuncs (add_optab, "add", '3');
5138 init_floating_libfuncs (add_optab, "add", '3');
5139 init_integral_libfuncs (addv_optab, "addv", '3');
5140 init_floating_libfuncs (addv_optab, "add", '3');
5141 init_integral_libfuncs (sub_optab, "sub", '3');
5142 init_floating_libfuncs (sub_optab, "sub", '3');
5143 init_integral_libfuncs (subv_optab, "subv", '3');
5144 init_floating_libfuncs (subv_optab, "sub", '3');
5145 init_integral_libfuncs (smul_optab, "mul", '3');
5146 init_floating_libfuncs (smul_optab, "mul", '3');
5147 init_integral_libfuncs (smulv_optab, "mulv", '3');
5148 init_floating_libfuncs (smulv_optab, "mul", '3');
5149 init_integral_libfuncs (sdiv_optab, "div", '3');
5150 init_floating_libfuncs (sdiv_optab, "div", '3');
5151 init_integral_libfuncs (sdivv_optab, "divv", '3');
5152 init_integral_libfuncs (udiv_optab, "udiv", '3');
5153 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5154 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5155 init_integral_libfuncs (smod_optab, "mod", '3');
5156 init_integral_libfuncs (umod_optab, "umod", '3');
5157 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5158 init_integral_libfuncs (and_optab, "and", '3');
5159 init_integral_libfuncs (ior_optab, "ior", '3');
5160 init_integral_libfuncs (xor_optab, "xor", '3');
5161 init_integral_libfuncs (ashl_optab, "ashl", '3');
5162 init_integral_libfuncs (ashr_optab, "ashr", '3');
5163 init_integral_libfuncs (lshr_optab, "lshr", '3');
5164 init_integral_libfuncs (smin_optab, "min", '3');
5165 init_floating_libfuncs (smin_optab, "min", '3');
5166 init_integral_libfuncs (smax_optab, "max", '3');
5167 init_floating_libfuncs (smax_optab, "max", '3');
5168 init_integral_libfuncs (umin_optab, "umin", '3');
5169 init_integral_libfuncs (umax_optab, "umax", '3');
5170 init_integral_libfuncs (neg_optab, "neg", '2');
5171 init_floating_libfuncs (neg_optab, "neg", '2');
5172 init_integral_libfuncs (negv_optab, "negv", '2');
5173 init_floating_libfuncs (negv_optab, "neg", '2');
5174 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5175 init_integral_libfuncs (ffs_optab, "ffs", '2');
5176 init_integral_libfuncs (clz_optab, "clz", '2');
5177 init_integral_libfuncs (ctz_optab, "ctz", '2');
5178 init_integral_libfuncs (popcount_optab, "popcount", '2');
5179 init_integral_libfuncs (parity_optab, "parity", '2');
5180
5181 /* Comparison libcalls for integers MUST come in pairs,
5182 signed/unsigned. */
5183 init_integral_libfuncs (cmp_optab, "cmp", '2');
5184 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5185 init_floating_libfuncs (cmp_optab, "cmp", '2');
5186
5187 /* EQ etc are floating point only. */
5188 init_floating_libfuncs (eq_optab, "eq", '2');
5189 init_floating_libfuncs (ne_optab, "ne", '2');
5190 init_floating_libfuncs (gt_optab, "gt", '2');
5191 init_floating_libfuncs (ge_optab, "ge", '2');
5192 init_floating_libfuncs (lt_optab, "lt", '2');
5193 init_floating_libfuncs (le_optab, "le", '2');
5194 init_floating_libfuncs (unord_optab, "unord", '2');
5195
5196 init_floating_libfuncs (powi_optab, "powi", '2');
5197
5198 /* Conversions. */
5199 init_interclass_conv_libfuncs (sfloat_optab, "float",
5200 MODE_INT, MODE_FLOAT);
5201 init_interclass_conv_libfuncs (sfix_optab, "fix",
5202 MODE_FLOAT, MODE_INT);
5203 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5204 MODE_FLOAT, MODE_INT);
5205
5206 /* sext_optab is also used for FLOAT_EXTEND. */
5207 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5208 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5209
5210 /* Use cabs for double complex abs, since systems generally have cabs.
5211 Don't define any libcall for float complex, so that cabs will be used. */
5212 if (complex_double_type_node)
5213 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5214 = init_one_libfunc ("cabs");
5215
5216 /* The ffs function operates on `int'. */
5217 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5218 = init_one_libfunc ("ffs");
5219
5220 abort_libfunc = init_one_libfunc ("abort");
5221 memcpy_libfunc = init_one_libfunc ("memcpy");
5222 memmove_libfunc = init_one_libfunc ("memmove");
5223 memcmp_libfunc = init_one_libfunc ("memcmp");
5224 memset_libfunc = init_one_libfunc ("memset");
5225 setbits_libfunc = init_one_libfunc ("__setbits");
5226
5227 unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS
5228 ? "_Unwind_SjLj_Resume"
5229 : "_Unwind_Resume");
5230 #ifndef DONT_USE_BUILTIN_SETJMP
5231 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5232 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5233 #else
5234 setjmp_libfunc = init_one_libfunc ("setjmp");
5235 longjmp_libfunc = init_one_libfunc ("longjmp");
5236 #endif
5237 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5238 unwind_sjlj_unregister_libfunc
5239 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5240
5241 /* For function entry/exit instrumentation. */
5242 profile_function_entry_libfunc
5243 = init_one_libfunc ("__cyg_profile_func_enter");
5244 profile_function_exit_libfunc
5245 = init_one_libfunc ("__cyg_profile_func_exit");
5246
5247 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5248
5249 if (HAVE_conditional_trap)
5250 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5251
5252 /* Allow the target to add more libcalls or rename some, etc. */
5253 targetm.init_libfuncs ();
5254 }
5255
5256 #ifdef DEBUG
5257
5258 /* Print information about the current contents of the optabs on
5259 STDERR. */
5260
5261 static void
5262 debug_optab_libfuncs (void)
5263 {
5264 int i;
5265 int j;
5266 int k;
5267
5268 /* Dump the arithmetic optabs. */
5269 for (i = 0; i != (int) OTI_MAX; i++)
5270 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5271 {
5272 optab o;
5273 struct optab_handlers *h;
5274
5275 o = optab_table[i];
5276 h = &o->handlers[j];
5277 if (h->libfunc)
5278 {
5279 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5280 fprintf (stderr, "%s\t%s:\t%s\n",
5281 GET_RTX_NAME (o->code),
5282 GET_MODE_NAME (j),
5283 XSTR (h->libfunc, 0));
5284 }
5285 }
5286
5287 /* Dump the conversion optabs. */
5288 for (i = 0; i < (int) CTI_MAX; ++i)
5289 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5290 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5291 {
5292 convert_optab o;
5293 struct optab_handlers *h;
5294
5295 o = &convert_optab_table[i];
5296 h = &o->handlers[j][k];
5297 if (h->libfunc)
5298 {
5299 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5300 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5301 GET_RTX_NAME (o->code),
5302 GET_MODE_NAME (j),
5303 GET_MODE_NAME (k),
5304 XSTR (h->libfunc, 0));
5305 }
5306 }
5307 }
5308
5309 #endif /* DEBUG */
5310
5311 \f
5312 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5313 CODE. Return 0 on failure. */
5314
5315 rtx
5316 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5317 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5318 {
5319 enum machine_mode mode = GET_MODE (op1);
5320 enum insn_code icode;
5321 rtx insn;
5322
5323 if (!HAVE_conditional_trap)
5324 return 0;
5325
5326 if (mode == VOIDmode)
5327 return 0;
5328
5329 icode = cmp_optab->handlers[(int) mode].insn_code;
5330 if (icode == CODE_FOR_nothing)
5331 return 0;
5332
5333 start_sequence ();
5334 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5335 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5336 if (!op1 || !op2)
5337 {
5338 end_sequence ();
5339 return 0;
5340 }
5341 emit_insn (GEN_FCN (icode) (op1, op2));
5342
5343 PUT_CODE (trap_rtx, code);
5344 gcc_assert (HAVE_conditional_trap);
5345 insn = gen_conditional_trap (trap_rtx, tcode);
5346 if (insn)
5347 {
5348 emit_insn (insn);
5349 insn = get_insns ();
5350 }
5351 end_sequence ();
5352
5353 return insn;
5354 }
5355
5356 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5357 or unsigned operation code. */
5358
5359 static enum rtx_code
5360 get_rtx_code (enum tree_code tcode, bool unsignedp)
5361 {
5362 enum rtx_code code;
5363 switch (tcode)
5364 {
5365 case EQ_EXPR:
5366 code = EQ;
5367 break;
5368 case NE_EXPR:
5369 code = NE;
5370 break;
5371 case LT_EXPR:
5372 code = unsignedp ? LTU : LT;
5373 break;
5374 case LE_EXPR:
5375 code = unsignedp ? LEU : LE;
5376 break;
5377 case GT_EXPR:
5378 code = unsignedp ? GTU : GT;
5379 break;
5380 case GE_EXPR:
5381 code = unsignedp ? GEU : GE;
5382 break;
5383
5384 case UNORDERED_EXPR:
5385 code = UNORDERED;
5386 break;
5387 case ORDERED_EXPR:
5388 code = ORDERED;
5389 break;
5390 case UNLT_EXPR:
5391 code = UNLT;
5392 break;
5393 case UNLE_EXPR:
5394 code = UNLE;
5395 break;
5396 case UNGT_EXPR:
5397 code = UNGT;
5398 break;
5399 case UNGE_EXPR:
5400 code = UNGE;
5401 break;
5402 case UNEQ_EXPR:
5403 code = UNEQ;
5404 break;
5405 case LTGT_EXPR:
5406 code = LTGT;
5407 break;
5408
5409 default:
5410 gcc_unreachable ();
5411 }
5412 return code;
5413 }
5414
5415 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5416 unsigned operators. Do not generate compare instruction. */
5417
5418 static rtx
5419 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5420 {
5421 enum rtx_code rcode;
5422 tree t_op0, t_op1;
5423 rtx rtx_op0, rtx_op1;
5424
5425 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5426 ensures that condition is a relational operation. */
5427 gcc_assert (COMPARISON_CLASS_P (cond));
5428
5429 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5430 t_op0 = TREE_OPERAND (cond, 0);
5431 t_op1 = TREE_OPERAND (cond, 1);
5432
5433 /* Expand operands. */
5434 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5435 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5436
5437 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5438 && GET_MODE (rtx_op0) != VOIDmode)
5439 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5440
5441 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5442 && GET_MODE (rtx_op1) != VOIDmode)
5443 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5444
5445 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5446 }
5447
5448 /* Return insn code for VEC_COND_EXPR EXPR. */
5449
5450 static inline enum insn_code
5451 get_vcond_icode (tree expr, enum machine_mode mode)
5452 {
5453 enum insn_code icode = CODE_FOR_nothing;
5454
5455 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5456 icode = vcondu_gen_code[mode];
5457 else
5458 icode = vcond_gen_code[mode];
5459 return icode;
5460 }
5461
5462 /* Return TRUE iff, appropriate vector insns are available
5463 for vector cond expr expr in VMODE mode. */
5464
5465 bool
5466 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5467 {
5468 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5469 return false;
5470 return true;
5471 }
5472
5473 /* Generate insns for VEC_COND_EXPR. */
5474
5475 rtx
5476 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5477 {
5478 enum insn_code icode;
5479 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5480 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5481 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5482
5483 icode = get_vcond_icode (vec_cond_expr, mode);
5484 if (icode == CODE_FOR_nothing)
5485 return 0;
5486
5487 if (!target)
5488 target = gen_reg_rtx (mode);
5489
5490 /* Get comparison rtx. First expand both cond expr operands. */
5491 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5492 unsignedp, icode);
5493 cc_op0 = XEXP (comparison, 0);
5494 cc_op1 = XEXP (comparison, 1);
5495 /* Expand both operands and force them in reg, if required. */
5496 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5497 NULL_RTX, VOIDmode, 1);
5498 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5499 && mode != VOIDmode)
5500 rtx_op1 = force_reg (mode, rtx_op1);
5501
5502 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5503 NULL_RTX, VOIDmode, 1);
5504 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5505 && mode != VOIDmode)
5506 rtx_op2 = force_reg (mode, rtx_op2);
5507
5508 /* Emit instruction! */
5509 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5510 comparison, cc_op0, cc_op1));
5511
5512 return target;
5513 }
5514
5515 \f
5516 /* This is an internal subroutine of the other compare_and_swap expanders.
5517 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5518 operation. TARGET is an optional place to store the value result of
5519 the operation. ICODE is the particular instruction to expand. Return
5520 the result of the operation. */
5521
5522 static rtx
5523 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5524 rtx target, enum insn_code icode)
5525 {
5526 enum machine_mode mode = GET_MODE (mem);
5527 rtx insn;
5528
5529 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5530 target = gen_reg_rtx (mode);
5531
5532 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5533 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5534 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5535 old_val = force_reg (mode, old_val);
5536
5537 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5538 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5539 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5540 new_val = force_reg (mode, new_val);
5541
5542 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5543 if (insn == NULL_RTX)
5544 return NULL_RTX;
5545 emit_insn (insn);
5546
5547 return target;
5548 }
5549
5550 /* Expand a compare-and-swap operation and return its value. */
5551
5552 rtx
5553 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5554 {
5555 enum machine_mode mode = GET_MODE (mem);
5556 enum insn_code icode = sync_compare_and_swap[mode];
5557
5558 if (icode == CODE_FOR_nothing)
5559 return NULL_RTX;
5560
5561 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5562 }
5563
5564 /* Expand a compare-and-swap operation and store true into the result if
5565 the operation was successful and false otherwise. Return the result.
5566 Unlike other routines, TARGET is not optional. */
5567
5568 rtx
5569 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5570 {
5571 enum machine_mode mode = GET_MODE (mem);
5572 enum insn_code icode;
5573 rtx subtarget, label0, label1;
5574
5575 /* If the target supports a compare-and-swap pattern that simultaneously
5576 sets some flag for success, then use it. Otherwise use the regular
5577 compare-and-swap and follow that immediately with a compare insn. */
5578 icode = sync_compare_and_swap_cc[mode];
5579 switch (icode)
5580 {
5581 default:
5582 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5583 NULL_RTX, icode);
5584 if (subtarget != NULL_RTX)
5585 break;
5586
5587 /* FALLTHRU */
5588 case CODE_FOR_nothing:
5589 icode = sync_compare_and_swap[mode];
5590 if (icode == CODE_FOR_nothing)
5591 return NULL_RTX;
5592
5593 /* Ensure that if old_val == mem, that we're not comparing
5594 against an old value. */
5595 if (MEM_P (old_val))
5596 old_val = force_reg (mode, old_val);
5597
5598 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5599 NULL_RTX, icode);
5600 if (subtarget == NULL_RTX)
5601 return NULL_RTX;
5602
5603 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5604 }
5605
5606 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5607 setcc instruction from the beginning. We don't work too hard here,
5608 but it's nice to not be stupid about initial code gen either. */
5609 if (STORE_FLAG_VALUE == 1)
5610 {
5611 icode = setcc_gen_code[EQ];
5612 if (icode != CODE_FOR_nothing)
5613 {
5614 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5615 rtx insn;
5616
5617 subtarget = target;
5618 if (!insn_data[icode].operand[0].predicate (target, cmode))
5619 subtarget = gen_reg_rtx (cmode);
5620
5621 insn = GEN_FCN (icode) (subtarget);
5622 if (insn)
5623 {
5624 emit_insn (insn);
5625 if (GET_MODE (target) != GET_MODE (subtarget))
5626 {
5627 convert_move (target, subtarget, 1);
5628 subtarget = target;
5629 }
5630 return subtarget;
5631 }
5632 }
5633 }
5634
5635 /* Without an appropriate setcc instruction, use a set of branches to
5636 get 1 and 0 stored into target. Presumably if the target has a
5637 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5638
5639 label0 = gen_label_rtx ();
5640 label1 = gen_label_rtx ();
5641
5642 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5643 emit_move_insn (target, const0_rtx);
5644 emit_jump_insn (gen_jump (label1));
5645 emit_barrier ();
5646 emit_label (label0);
5647 emit_move_insn (target, const1_rtx);
5648 emit_label (label1);
5649
5650 return target;
5651 }
5652
5653 /* This is a helper function for the other atomic operations. This function
5654 emits a loop that contains SEQ that iterates until a compare-and-swap
5655 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5656 a set of instructions that takes a value from OLD_REG as an input and
5657 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5658 set to the current contents of MEM. After SEQ, a compare-and-swap will
5659 attempt to update MEM with NEW_REG. The function returns true when the
5660 loop was generated successfully. */
5661
5662 static bool
5663 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5664 {
5665 enum machine_mode mode = GET_MODE (mem);
5666 enum insn_code icode;
5667 rtx label, cmp_reg, subtarget;
5668
5669 /* The loop we want to generate looks like
5670
5671 cmp_reg = mem;
5672 label:
5673 old_reg = cmp_reg;
5674 seq;
5675 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5676 if (cmp_reg != old_reg)
5677 goto label;
5678
5679 Note that we only do the plain load from memory once. Subsequent
5680 iterations use the value loaded by the compare-and-swap pattern. */
5681
5682 label = gen_label_rtx ();
5683 cmp_reg = gen_reg_rtx (mode);
5684
5685 emit_move_insn (cmp_reg, mem);
5686 emit_label (label);
5687 emit_move_insn (old_reg, cmp_reg);
5688 if (seq)
5689 emit_insn (seq);
5690
5691 /* If the target supports a compare-and-swap pattern that simultaneously
5692 sets some flag for success, then use it. Otherwise use the regular
5693 compare-and-swap and follow that immediately with a compare insn. */
5694 icode = sync_compare_and_swap_cc[mode];
5695 switch (icode)
5696 {
5697 default:
5698 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5699 cmp_reg, icode);
5700 if (subtarget != NULL_RTX)
5701 {
5702 gcc_assert (subtarget == cmp_reg);
5703 break;
5704 }
5705
5706 /* FALLTHRU */
5707 case CODE_FOR_nothing:
5708 icode = sync_compare_and_swap[mode];
5709 if (icode == CODE_FOR_nothing)
5710 return false;
5711
5712 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5713 cmp_reg, icode);
5714 if (subtarget == NULL_RTX)
5715 return false;
5716 if (subtarget != cmp_reg)
5717 emit_move_insn (cmp_reg, subtarget);
5718
5719 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5720 }
5721
5722 /* ??? Mark this jump predicted not taken? */
5723 emit_jump_insn (bcc_gen_fctn[NE] (label));
5724
5725 return true;
5726 }
5727
5728 /* This function generates the atomic operation MEM CODE= VAL. In this
5729 case, we do not care about any resulting value. Returns NULL if we
5730 cannot generate the operation. */
5731
5732 rtx
5733 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5734 {
5735 enum machine_mode mode = GET_MODE (mem);
5736 enum insn_code icode;
5737 rtx insn;
5738
5739 /* Look to see if the target supports the operation directly. */
5740 switch (code)
5741 {
5742 case PLUS:
5743 icode = sync_add_optab[mode];
5744 break;
5745 case IOR:
5746 icode = sync_ior_optab[mode];
5747 break;
5748 case XOR:
5749 icode = sync_xor_optab[mode];
5750 break;
5751 case AND:
5752 icode = sync_and_optab[mode];
5753 break;
5754 case NOT:
5755 icode = sync_nand_optab[mode];
5756 break;
5757
5758 case MINUS:
5759 icode = sync_sub_optab[mode];
5760 if (icode == CODE_FOR_nothing)
5761 {
5762 icode = sync_add_optab[mode];
5763 if (icode != CODE_FOR_nothing)
5764 {
5765 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5766 code = PLUS;
5767 }
5768 }
5769 break;
5770
5771 default:
5772 gcc_unreachable ();
5773 }
5774
5775 /* Generate the direct operation, if present. */
5776 if (icode != CODE_FOR_nothing)
5777 {
5778 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5779 val = convert_modes (mode, GET_MODE (val), val, 1);
5780 if (!insn_data[icode].operand[1].predicate (val, mode))
5781 val = force_reg (mode, val);
5782
5783 insn = GEN_FCN (icode) (mem, val);
5784 if (insn)
5785 {
5786 emit_insn (insn);
5787 return const0_rtx;
5788 }
5789 }
5790
5791 /* Failing that, generate a compare-and-swap loop in which we perform the
5792 operation with normal arithmetic instructions. */
5793 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5794 {
5795 rtx t0 = gen_reg_rtx (mode), t1;
5796
5797 start_sequence ();
5798
5799 t1 = t0;
5800 if (code == NOT)
5801 {
5802 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5803 code = AND;
5804 }
5805 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5806 true, OPTAB_LIB_WIDEN);
5807
5808 insn = get_insns ();
5809 end_sequence ();
5810
5811 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5812 return const0_rtx;
5813 }
5814
5815 return NULL_RTX;
5816 }
5817
5818 /* This function generates the atomic operation MEM CODE= VAL. In this
5819 case, we do care about the resulting value: if AFTER is true then
5820 return the value MEM holds after the operation, if AFTER is false
5821 then return the value MEM holds before the operation. TARGET is an
5822 optional place for the result value to be stored. */
5823
5824 rtx
5825 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5826 bool after, rtx target)
5827 {
5828 enum machine_mode mode = GET_MODE (mem);
5829 enum insn_code old_code, new_code, icode;
5830 bool compensate;
5831 rtx insn;
5832
5833 /* Look to see if the target supports the operation directly. */
5834 switch (code)
5835 {
5836 case PLUS:
5837 old_code = sync_old_add_optab[mode];
5838 new_code = sync_new_add_optab[mode];
5839 break;
5840 case IOR:
5841 old_code = sync_old_ior_optab[mode];
5842 new_code = sync_new_ior_optab[mode];
5843 break;
5844 case XOR:
5845 old_code = sync_old_xor_optab[mode];
5846 new_code = sync_new_xor_optab[mode];
5847 break;
5848 case AND:
5849 old_code = sync_old_and_optab[mode];
5850 new_code = sync_new_and_optab[mode];
5851 break;
5852 case NOT:
5853 old_code = sync_old_nand_optab[mode];
5854 new_code = sync_new_nand_optab[mode];
5855 break;
5856
5857 case MINUS:
5858 old_code = sync_old_sub_optab[mode];
5859 new_code = sync_new_sub_optab[mode];
5860 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5861 {
5862 old_code = sync_old_add_optab[mode];
5863 new_code = sync_new_add_optab[mode];
5864 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5865 {
5866 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5867 code = PLUS;
5868 }
5869 }
5870 break;
5871
5872 default:
5873 gcc_unreachable ();
5874 }
5875
5876 /* If the target does supports the proper new/old operation, great. But
5877 if we only support the opposite old/new operation, check to see if we
5878 can compensate. In the case in which the old value is supported, then
5879 we can always perform the operation again with normal arithmetic. In
5880 the case in which the new value is supported, then we can only handle
5881 this in the case the operation is reversible. */
5882 compensate = false;
5883 if (after)
5884 {
5885 icode = new_code;
5886 if (icode == CODE_FOR_nothing)
5887 {
5888 icode = old_code;
5889 if (icode != CODE_FOR_nothing)
5890 compensate = true;
5891 }
5892 }
5893 else
5894 {
5895 icode = old_code;
5896 if (icode == CODE_FOR_nothing
5897 && (code == PLUS || code == MINUS || code == XOR))
5898 {
5899 icode = new_code;
5900 if (icode != CODE_FOR_nothing)
5901 compensate = true;
5902 }
5903 }
5904
5905 /* If we found something supported, great. */
5906 if (icode != CODE_FOR_nothing)
5907 {
5908 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5909 target = gen_reg_rtx (mode);
5910
5911 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5912 val = convert_modes (mode, GET_MODE (val), val, 1);
5913 if (!insn_data[icode].operand[2].predicate (val, mode))
5914 val = force_reg (mode, val);
5915
5916 insn = GEN_FCN (icode) (target, mem, val);
5917 if (insn)
5918 {
5919 emit_insn (insn);
5920
5921 /* If we need to compensate for using an operation with the
5922 wrong return value, do so now. */
5923 if (compensate)
5924 {
5925 if (!after)
5926 {
5927 if (code == PLUS)
5928 code = MINUS;
5929 else if (code == MINUS)
5930 code = PLUS;
5931 }
5932
5933 if (code == NOT)
5934 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5935 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5936 true, OPTAB_LIB_WIDEN);
5937 }
5938
5939 return target;
5940 }
5941 }
5942
5943 /* Failing that, generate a compare-and-swap loop in which we perform the
5944 operation with normal arithmetic instructions. */
5945 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5946 {
5947 rtx t0 = gen_reg_rtx (mode), t1;
5948
5949 if (!target || !register_operand (target, mode))
5950 target = gen_reg_rtx (mode);
5951
5952 start_sequence ();
5953
5954 if (!after)
5955 emit_move_insn (target, t0);
5956 t1 = t0;
5957 if (code == NOT)
5958 {
5959 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5960 code = AND;
5961 }
5962 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5963 true, OPTAB_LIB_WIDEN);
5964 if (after)
5965 emit_move_insn (target, t1);
5966
5967 insn = get_insns ();
5968 end_sequence ();
5969
5970 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5971 return target;
5972 }
5973
5974 return NULL_RTX;
5975 }
5976
5977 /* This function expands a test-and-set operation. Ideally we atomically
5978 store VAL in MEM and return the previous value in MEM. Some targets
5979 may not support this operation and only support VAL with the constant 1;
5980 in this case while the return value will be 0/1, but the exact value
5981 stored in MEM is target defined. TARGET is an option place to stick
5982 the return value. */
5983
5984 rtx
5985 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
5986 {
5987 enum machine_mode mode = GET_MODE (mem);
5988 enum insn_code icode;
5989 rtx insn;
5990
5991 /* If the target supports the test-and-set directly, great. */
5992 icode = sync_lock_test_and_set[mode];
5993 if (icode != CODE_FOR_nothing)
5994 {
5995 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5996 target = gen_reg_rtx (mode);
5997
5998 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5999 val = convert_modes (mode, GET_MODE (val), val, 1);
6000 if (!insn_data[icode].operand[2].predicate (val, mode))
6001 val = force_reg (mode, val);
6002
6003 insn = GEN_FCN (icode) (target, mem, val);
6004 if (insn)
6005 {
6006 emit_insn (insn);
6007 return target;
6008 }
6009 }
6010
6011 /* Otherwise, use a compare-and-swap loop for the exchange. */
6012 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6013 {
6014 if (!target || !register_operand (target, mode))
6015 target = gen_reg_rtx (mode);
6016 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6017 val = convert_modes (mode, GET_MODE (val), val, 1);
6018 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6019 return target;
6020 }
6021
6022 return NULL_RTX;
6023 }
6024
6025 #include "gt-optabs.h"