re PR target/21412 (ICE loading TLS address)
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
47
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
51
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
54
55 See expr.h for documentation of these optabs. */
56
57 optab optab_table[OTI_MAX];
58
59 rtx libfunc_table[LTI_MAX];
60
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[CTI_MAX];
63
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
66
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
69
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
71
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
75
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
77
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
83
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
86
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
89
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
92
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
97
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
127
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
132 \f
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
136
137 If the last insn does not set TARGET, don't do anything, but return 1.
138
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
142
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
145 {
146 rtx last_insn, insn, set;
147 rtx note;
148
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
150
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
157
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
160
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
164 ;
165
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
169
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
175
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
180 {
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
183 {
184 if (reg_set_p (target, insn))
185 return 0;
186
187 insn = PREV_INSN (insn);
188 }
189 }
190
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
195
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
197
198 return 1;
199 }
200 \f
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
206
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
210 {
211 rtx result;
212
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
216
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
224
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
229
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
232
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
237 }
238 \f
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
246 {
247 bool trapv;
248 switch (code)
249 {
250 case BIT_AND_EXPR:
251 return and_optab;
252
253 case BIT_IOR_EXPR:
254 return ior_optab;
255
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
258
259 case BIT_XOR_EXPR:
260 return xor_optab;
261
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
267
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
275
276 case LSHIFT_EXPR:
277 return ashl_optab;
278
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
281
282 case LROTATE_EXPR:
283 return rotl_optab;
284
285 case RROTATE_EXPR:
286 return rotr_optab;
287
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
290
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
293
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
296
297 default:
298 break;
299 }
300
301 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
302 switch (code)
303 {
304 case PLUS_EXPR:
305 return trapv ? addv_optab : add_optab;
306
307 case MINUS_EXPR:
308 return trapv ? subv_optab : sub_optab;
309
310 case MULT_EXPR:
311 return trapv ? smulv_optab : smul_optab;
312
313 case NEGATE_EXPR:
314 return trapv ? negv_optab : neg_optab;
315
316 case ABS_EXPR:
317 return trapv ? absv_optab : abs_optab;
318
319 default:
320 return NULL;
321 }
322 }
323 \f
324
325 /* Generate code to perform an operation specified by TERNARY_OPTAB
326 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
327
328 UNSIGNEDP is for the case where we have to widen the operands
329 to perform the operation. It says to use zero-extension.
330
331 If TARGET is nonzero, the value
332 is generated there, if it is convenient to do so.
333 In all cases an rtx is returned for the locus of the value;
334 this may or may not be TARGET. */
335
336 rtx
337 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
338 rtx op1, rtx op2, rtx target, int unsignedp)
339 {
340 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
341 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
342 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
343 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
344 rtx temp;
345 rtx pat;
346 rtx xop0 = op0, xop1 = op1, xop2 = op2;
347
348 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
349 != CODE_FOR_nothing);
350
351 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
352 temp = gen_reg_rtx (mode);
353 else
354 temp = target;
355
356 /* In case the insn wants input operands in modes different from
357 those of the actual operands, convert the operands. It would
358 seem that we don't need to convert CONST_INTs, but we do, so
359 that they're properly zero-extended, sign-extended or truncated
360 for their mode. */
361
362 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
363 xop0 = convert_modes (mode0,
364 GET_MODE (op0) != VOIDmode
365 ? GET_MODE (op0)
366 : mode,
367 xop0, unsignedp);
368
369 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
370 xop1 = convert_modes (mode1,
371 GET_MODE (op1) != VOIDmode
372 ? GET_MODE (op1)
373 : mode,
374 xop1, unsignedp);
375
376 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
377 xop2 = convert_modes (mode2,
378 GET_MODE (op2) != VOIDmode
379 ? GET_MODE (op2)
380 : mode,
381 xop2, unsignedp);
382
383 /* Now, if insn's predicates don't allow our operands, put them into
384 pseudo regs. */
385
386 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
387 && mode0 != VOIDmode)
388 xop0 = copy_to_mode_reg (mode0, xop0);
389
390 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
391 && mode1 != VOIDmode)
392 xop1 = copy_to_mode_reg (mode1, xop1);
393
394 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
395 && mode2 != VOIDmode)
396 xop2 = copy_to_mode_reg (mode2, xop2);
397
398 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
399
400 emit_insn (pat);
401 return temp;
402 }
403
404
405 /* Like expand_binop, but return a constant rtx if the result can be
406 calculated at compile time. The arguments and return value are
407 otherwise the same as for expand_binop. */
408
409 static rtx
410 simplify_expand_binop (enum machine_mode mode, optab binoptab,
411 rtx op0, rtx op1, rtx target, int unsignedp,
412 enum optab_methods methods)
413 {
414 if (CONSTANT_P (op0) && CONSTANT_P (op1))
415 return simplify_gen_binary (binoptab->code, mode, op0, op1);
416 else
417 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
418 }
419
420 /* Like simplify_expand_binop, but always put the result in TARGET.
421 Return true if the expansion succeeded. */
422
423 bool
424 force_expand_binop (enum machine_mode mode, optab binoptab,
425 rtx op0, rtx op1, rtx target, int unsignedp,
426 enum optab_methods methods)
427 {
428 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
429 target, unsignedp, methods);
430 if (x == 0)
431 return false;
432 if (x != target)
433 emit_move_insn (target, x);
434 return true;
435 }
436
437 /* This subroutine of expand_doubleword_shift handles the cases in which
438 the effective shift value is >= BITS_PER_WORD. The arguments and return
439 value are the same as for the parent routine, except that SUPERWORD_OP1
440 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
441 INTO_TARGET may be null if the caller has decided to calculate it. */
442
443 static bool
444 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
445 rtx outof_target, rtx into_target,
446 int unsignedp, enum optab_methods methods)
447 {
448 if (into_target != 0)
449 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
450 into_target, unsignedp, methods))
451 return false;
452
453 if (outof_target != 0)
454 {
455 /* For a signed right shift, we must fill OUTOF_TARGET with copies
456 of the sign bit, otherwise we must fill it with zeros. */
457 if (binoptab != ashr_optab)
458 emit_move_insn (outof_target, CONST0_RTX (word_mode));
459 else
460 if (!force_expand_binop (word_mode, binoptab,
461 outof_input, GEN_INT (BITS_PER_WORD - 1),
462 outof_target, unsignedp, methods))
463 return false;
464 }
465 return true;
466 }
467
468 /* This subroutine of expand_doubleword_shift handles the cases in which
469 the effective shift value is < BITS_PER_WORD. The arguments and return
470 value are the same as for the parent routine. */
471
472 static bool
473 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
474 rtx outof_input, rtx into_input, rtx op1,
475 rtx outof_target, rtx into_target,
476 int unsignedp, enum optab_methods methods,
477 unsigned HOST_WIDE_INT shift_mask)
478 {
479 optab reverse_unsigned_shift, unsigned_shift;
480 rtx tmp, carries;
481
482 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
483 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
484
485 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
486 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
487 the opposite direction to BINOPTAB. */
488 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
489 {
490 carries = outof_input;
491 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
492 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
493 0, true, methods);
494 }
495 else
496 {
497 /* We must avoid shifting by BITS_PER_WORD bits since that is either
498 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
499 has unknown behavior. Do a single shift first, then shift by the
500 remainder. It's OK to use ~OP1 as the remainder if shift counts
501 are truncated to the mode size. */
502 carries = expand_binop (word_mode, reverse_unsigned_shift,
503 outof_input, const1_rtx, 0, unsignedp, methods);
504 if (shift_mask == BITS_PER_WORD - 1)
505 {
506 tmp = immed_double_const (-1, -1, op1_mode);
507 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
508 0, true, methods);
509 }
510 else
511 {
512 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
513 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
514 0, true, methods);
515 }
516 }
517 if (tmp == 0 || carries == 0)
518 return false;
519 carries = expand_binop (word_mode, reverse_unsigned_shift,
520 carries, tmp, 0, unsignedp, methods);
521 if (carries == 0)
522 return false;
523
524 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
525 so the result can go directly into INTO_TARGET if convenient. */
526 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
527 into_target, unsignedp, methods);
528 if (tmp == 0)
529 return false;
530
531 /* Now OR in the bits carried over from OUTOF_INPUT. */
532 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
533 into_target, unsignedp, methods))
534 return false;
535
536 /* Use a standard word_mode shift for the out-of half. */
537 if (outof_target != 0)
538 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
539 outof_target, unsignedp, methods))
540 return false;
541
542 return true;
543 }
544
545
546 #ifdef HAVE_conditional_move
547 /* Try implementing expand_doubleword_shift using conditional moves.
548 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
549 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
550 are the shift counts to use in the former and latter case. All other
551 arguments are the same as the parent routine. */
552
553 static bool
554 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
555 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
556 rtx outof_input, rtx into_input,
557 rtx subword_op1, rtx superword_op1,
558 rtx outof_target, rtx into_target,
559 int unsignedp, enum optab_methods methods,
560 unsigned HOST_WIDE_INT shift_mask)
561 {
562 rtx outof_superword, into_superword;
563
564 /* Put the superword version of the output into OUTOF_SUPERWORD and
565 INTO_SUPERWORD. */
566 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
567 if (outof_target != 0 && subword_op1 == superword_op1)
568 {
569 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
570 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
571 into_superword = outof_target;
572 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
573 outof_superword, 0, unsignedp, methods))
574 return false;
575 }
576 else
577 {
578 into_superword = gen_reg_rtx (word_mode);
579 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
580 outof_superword, into_superword,
581 unsignedp, methods))
582 return false;
583 }
584
585 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
586 if (!expand_subword_shift (op1_mode, binoptab,
587 outof_input, into_input, subword_op1,
588 outof_target, into_target,
589 unsignedp, methods, shift_mask))
590 return false;
591
592 /* Select between them. Do the INTO half first because INTO_SUPERWORD
593 might be the current value of OUTOF_TARGET. */
594 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
595 into_target, into_superword, word_mode, false))
596 return false;
597
598 if (outof_target != 0)
599 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
600 outof_target, outof_superword,
601 word_mode, false))
602 return false;
603
604 return true;
605 }
606 #endif
607
608 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
609 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
610 input operand; the shift moves bits in the direction OUTOF_INPUT->
611 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
612 of the target. OP1 is the shift count and OP1_MODE is its mode.
613 If OP1 is constant, it will have been truncated as appropriate
614 and is known to be nonzero.
615
616 If SHIFT_MASK is zero, the result of word shifts is undefined when the
617 shift count is outside the range [0, BITS_PER_WORD). This routine must
618 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
619
620 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
621 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
622 fill with zeros or sign bits as appropriate.
623
624 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
625 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
626 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
627 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
628 are undefined.
629
630 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
631 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
632 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
633 function wants to calculate it itself.
634
635 Return true if the shift could be successfully synthesized. */
636
637 static bool
638 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
639 rtx outof_input, rtx into_input, rtx op1,
640 rtx outof_target, rtx into_target,
641 int unsignedp, enum optab_methods methods,
642 unsigned HOST_WIDE_INT shift_mask)
643 {
644 rtx superword_op1, tmp, cmp1, cmp2;
645 rtx subword_label, done_label;
646 enum rtx_code cmp_code;
647
648 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
649 fill the result with sign or zero bits as appropriate. If so, the value
650 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
651 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
652 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
653
654 This isn't worthwhile for constant shifts since the optimizers will
655 cope better with in-range shift counts. */
656 if (shift_mask >= BITS_PER_WORD
657 && outof_target != 0
658 && !CONSTANT_P (op1))
659 {
660 if (!expand_doubleword_shift (op1_mode, binoptab,
661 outof_input, into_input, op1,
662 0, into_target,
663 unsignedp, methods, shift_mask))
664 return false;
665 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
666 outof_target, unsignedp, methods))
667 return false;
668 return true;
669 }
670
671 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
672 is true when the effective shift value is less than BITS_PER_WORD.
673 Set SUPERWORD_OP1 to the shift count that should be used to shift
674 OUTOF_INPUT into INTO_TARGET when the condition is false. */
675 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
676 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
677 {
678 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
679 is a subword shift count. */
680 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
681 0, true, methods);
682 cmp2 = CONST0_RTX (op1_mode);
683 cmp_code = EQ;
684 superword_op1 = op1;
685 }
686 else
687 {
688 /* Set CMP1 to OP1 - BITS_PER_WORD. */
689 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
690 0, true, methods);
691 cmp2 = CONST0_RTX (op1_mode);
692 cmp_code = LT;
693 superword_op1 = cmp1;
694 }
695 if (cmp1 == 0)
696 return false;
697
698 /* If we can compute the condition at compile time, pick the
699 appropriate subroutine. */
700 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
701 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
702 {
703 if (tmp == const0_rtx)
704 return expand_superword_shift (binoptab, outof_input, superword_op1,
705 outof_target, into_target,
706 unsignedp, methods);
707 else
708 return expand_subword_shift (op1_mode, binoptab,
709 outof_input, into_input, op1,
710 outof_target, into_target,
711 unsignedp, methods, shift_mask);
712 }
713
714 #ifdef HAVE_conditional_move
715 /* Try using conditional moves to generate straight-line code. */
716 {
717 rtx start = get_last_insn ();
718 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
719 cmp_code, cmp1, cmp2,
720 outof_input, into_input,
721 op1, superword_op1,
722 outof_target, into_target,
723 unsignedp, methods, shift_mask))
724 return true;
725 delete_insns_since (start);
726 }
727 #endif
728
729 /* As a last resort, use branches to select the correct alternative. */
730 subword_label = gen_label_rtx ();
731 done_label = gen_label_rtx ();
732
733 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
734 0, 0, subword_label);
735
736 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
737 outof_target, into_target,
738 unsignedp, methods))
739 return false;
740
741 emit_jump_insn (gen_jump (done_label));
742 emit_barrier ();
743 emit_label (subword_label);
744
745 if (!expand_subword_shift (op1_mode, binoptab,
746 outof_input, into_input, op1,
747 outof_target, into_target,
748 unsignedp, methods, shift_mask))
749 return false;
750
751 emit_label (done_label);
752 return true;
753 }
754 \f
755 /* Subroutine of expand_binop. Perform a double word multiplication of
756 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
757 as the target's word_mode. This function return NULL_RTX if anything
758 goes wrong, in which case it may have already emitted instructions
759 which need to be deleted.
760
761 If we want to multiply two two-word values and have normal and widening
762 multiplies of single-word values, we can do this with three smaller
763 multiplications. Note that we do not make a REG_NO_CONFLICT block here
764 because we are not operating on one word at a time.
765
766 The multiplication proceeds as follows:
767 _______________________
768 [__op0_high_|__op0_low__]
769 _______________________
770 * [__op1_high_|__op1_low__]
771 _______________________________________________
772 _______________________
773 (1) [__op0_low__*__op1_low__]
774 _______________________
775 (2a) [__op0_low__*__op1_high_]
776 _______________________
777 (2b) [__op0_high_*__op1_low__]
778 _______________________
779 (3) [__op0_high_*__op1_high_]
780
781
782 This gives a 4-word result. Since we are only interested in the
783 lower 2 words, partial result (3) and the upper words of (2a) and
784 (2b) don't need to be calculated. Hence (2a) and (2b) can be
785 calculated using non-widening multiplication.
786
787 (1), however, needs to be calculated with an unsigned widening
788 multiplication. If this operation is not directly supported we
789 try using a signed widening multiplication and adjust the result.
790 This adjustment works as follows:
791
792 If both operands are positive then no adjustment is needed.
793
794 If the operands have different signs, for example op0_low < 0 and
795 op1_low >= 0, the instruction treats the most significant bit of
796 op0_low as a sign bit instead of a bit with significance
797 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
798 with 2**BITS_PER_WORD - op0_low, and two's complements the
799 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
800 the result.
801
802 Similarly, if both operands are negative, we need to add
803 (op0_low + op1_low) * 2**BITS_PER_WORD.
804
805 We use a trick to adjust quickly. We logically shift op0_low right
806 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
807 op0_high (op1_high) before it is used to calculate 2b (2a). If no
808 logical shift exists, we do an arithmetic right shift and subtract
809 the 0 or -1. */
810
811 static rtx
812 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
813 bool umulp, enum optab_methods methods)
814 {
815 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
816 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
817 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
818 rtx product, adjust, product_high, temp;
819
820 rtx op0_high = operand_subword_force (op0, high, mode);
821 rtx op0_low = operand_subword_force (op0, low, mode);
822 rtx op1_high = operand_subword_force (op1, high, mode);
823 rtx op1_low = operand_subword_force (op1, low, mode);
824
825 /* If we're using an unsigned multiply to directly compute the product
826 of the low-order words of the operands and perform any required
827 adjustments of the operands, we begin by trying two more multiplications
828 and then computing the appropriate sum.
829
830 We have checked above that the required addition is provided.
831 Full-word addition will normally always succeed, especially if
832 it is provided at all, so we don't worry about its failure. The
833 multiplication may well fail, however, so we do handle that. */
834
835 if (!umulp)
836 {
837 /* ??? This could be done with emit_store_flag where available. */
838 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
839 NULL_RTX, 1, methods);
840 if (temp)
841 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
842 NULL_RTX, 0, OPTAB_DIRECT);
843 else
844 {
845 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
846 NULL_RTX, 0, methods);
847 if (!temp)
848 return NULL_RTX;
849 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
850 NULL_RTX, 0, OPTAB_DIRECT);
851 }
852
853 if (!op0_high)
854 return NULL_RTX;
855 }
856
857 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
858 NULL_RTX, 0, OPTAB_DIRECT);
859 if (!adjust)
860 return NULL_RTX;
861
862 /* OP0_HIGH should now be dead. */
863
864 if (!umulp)
865 {
866 /* ??? This could be done with emit_store_flag where available. */
867 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
868 NULL_RTX, 1, methods);
869 if (temp)
870 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
871 NULL_RTX, 0, OPTAB_DIRECT);
872 else
873 {
874 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
875 NULL_RTX, 0, methods);
876 if (!temp)
877 return NULL_RTX;
878 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
879 NULL_RTX, 0, OPTAB_DIRECT);
880 }
881
882 if (!op1_high)
883 return NULL_RTX;
884 }
885
886 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
887 NULL_RTX, 0, OPTAB_DIRECT);
888 if (!temp)
889 return NULL_RTX;
890
891 /* OP1_HIGH should now be dead. */
892
893 adjust = expand_binop (word_mode, add_optab, adjust, temp,
894 adjust, 0, OPTAB_DIRECT);
895
896 if (target && !REG_P (target))
897 target = NULL_RTX;
898
899 if (umulp)
900 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
901 target, 1, OPTAB_DIRECT);
902 else
903 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
904 target, 1, OPTAB_DIRECT);
905
906 if (!product)
907 return NULL_RTX;
908
909 product_high = operand_subword (product, high, 1, mode);
910 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
911 REG_P (product_high) ? product_high : adjust,
912 0, OPTAB_DIRECT);
913 emit_move_insn (product_high, adjust);
914 return product;
915 }
916 \f
917 /* Wrapper around expand_binop which takes an rtx code to specify
918 the operation to perform, not an optab pointer. All other
919 arguments are the same. */
920 rtx
921 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
922 rtx op1, rtx target, int unsignedp,
923 enum optab_methods methods)
924 {
925 optab binop = code_to_optab[(int) code];
926 gcc_assert (binop);
927
928 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
929 }
930
931 /* Generate code to perform an operation specified by BINOPTAB
932 on operands OP0 and OP1, with result having machine-mode MODE.
933
934 UNSIGNEDP is for the case where we have to widen the operands
935 to perform the operation. It says to use zero-extension.
936
937 If TARGET is nonzero, the value
938 is generated there, if it is convenient to do so.
939 In all cases an rtx is returned for the locus of the value;
940 this may or may not be TARGET. */
941
942 rtx
943 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
944 rtx target, int unsignedp, enum optab_methods methods)
945 {
946 enum optab_methods next_methods
947 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
948 ? OPTAB_WIDEN : methods);
949 enum mode_class class;
950 enum machine_mode wider_mode;
951 rtx temp;
952 int commutative_op = 0;
953 int shift_op = (binoptab->code == ASHIFT
954 || binoptab->code == ASHIFTRT
955 || binoptab->code == LSHIFTRT
956 || binoptab->code == ROTATE
957 || binoptab->code == ROTATERT);
958 rtx entry_last = get_last_insn ();
959 rtx last;
960
961 class = GET_MODE_CLASS (mode);
962
963 if (flag_force_mem)
964 {
965 /* Load duplicate non-volatile operands once. */
966 if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0))
967 {
968 op0 = force_not_mem (op0);
969 op1 = op0;
970 }
971 else
972 {
973 op0 = force_not_mem (op0);
974 op1 = force_not_mem (op1);
975 }
976 }
977
978 /* If subtracting an integer constant, convert this into an addition of
979 the negated constant. */
980
981 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
982 {
983 op1 = negate_rtx (mode, op1);
984 binoptab = add_optab;
985 }
986
987 /* If we are inside an appropriately-short loop and we are optimizing,
988 force expensive constants into a register. */
989 if (CONSTANT_P (op0) && optimize
990 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
991 {
992 if (GET_MODE (op0) != VOIDmode)
993 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
994 op0 = force_reg (mode, op0);
995 }
996
997 if (CONSTANT_P (op1) && optimize
998 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
999 {
1000 if (GET_MODE (op1) != VOIDmode)
1001 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1002 op1 = force_reg (mode, op1);
1003 }
1004
1005 /* Record where to delete back to if we backtrack. */
1006 last = get_last_insn ();
1007
1008 /* If operation is commutative,
1009 try to make the first operand a register.
1010 Even better, try to make it the same as the target.
1011 Also try to make the last operand a constant. */
1012 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1013 || binoptab == smul_widen_optab
1014 || binoptab == umul_widen_optab
1015 || binoptab == smul_highpart_optab
1016 || binoptab == umul_highpart_optab)
1017 {
1018 commutative_op = 1;
1019
1020 if (((target == 0 || REG_P (target))
1021 ? ((REG_P (op1)
1022 && !REG_P (op0))
1023 || target == op1)
1024 : rtx_equal_p (op1, target))
1025 || GET_CODE (op0) == CONST_INT)
1026 {
1027 temp = op1;
1028 op1 = op0;
1029 op0 = temp;
1030 }
1031 }
1032
1033 /* If we can do it with a three-operand insn, do so. */
1034
1035 if (methods != OPTAB_MUST_WIDEN
1036 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1037 {
1038 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1039 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1040 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1041 rtx pat;
1042 rtx xop0 = op0, xop1 = op1;
1043
1044 if (target)
1045 temp = target;
1046 else
1047 temp = gen_reg_rtx (mode);
1048
1049 /* If it is a commutative operator and the modes would match
1050 if we would swap the operands, we can save the conversions. */
1051 if (commutative_op)
1052 {
1053 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1054 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1055 {
1056 rtx tmp;
1057
1058 tmp = op0; op0 = op1; op1 = tmp;
1059 tmp = xop0; xop0 = xop1; xop1 = tmp;
1060 }
1061 }
1062
1063 /* In case the insn wants input operands in modes different from
1064 those of the actual operands, convert the operands. It would
1065 seem that we don't need to convert CONST_INTs, but we do, so
1066 that they're properly zero-extended, sign-extended or truncated
1067 for their mode. */
1068
1069 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1070 xop0 = convert_modes (mode0,
1071 GET_MODE (op0) != VOIDmode
1072 ? GET_MODE (op0)
1073 : mode,
1074 xop0, unsignedp);
1075
1076 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1077 xop1 = convert_modes (mode1,
1078 GET_MODE (op1) != VOIDmode
1079 ? GET_MODE (op1)
1080 : mode,
1081 xop1, unsignedp);
1082
1083 /* Now, if insn's predicates don't allow our operands, put them into
1084 pseudo regs. */
1085
1086 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1087 && mode0 != VOIDmode)
1088 xop0 = copy_to_mode_reg (mode0, xop0);
1089
1090 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1091 && mode1 != VOIDmode)
1092 xop1 = copy_to_mode_reg (mode1, xop1);
1093
1094 if (!insn_data[icode].operand[0].predicate (temp, mode))
1095 temp = gen_reg_rtx (mode);
1096
1097 pat = GEN_FCN (icode) (temp, xop0, xop1);
1098 if (pat)
1099 {
1100 /* If PAT is composed of more than one insn, try to add an appropriate
1101 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1102 operand, call ourselves again, this time without a target. */
1103 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1104 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1105 {
1106 delete_insns_since (last);
1107 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1108 unsignedp, methods);
1109 }
1110
1111 emit_insn (pat);
1112 return temp;
1113 }
1114 else
1115 delete_insns_since (last);
1116 }
1117
1118 /* If this is a multiply, see if we can do a widening operation that
1119 takes operands of this mode and makes a wider mode. */
1120
1121 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
1122 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1123 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1124 != CODE_FOR_nothing))
1125 {
1126 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1127 unsignedp ? umul_widen_optab : smul_widen_optab,
1128 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1129
1130 if (temp != 0)
1131 {
1132 if (GET_MODE_CLASS (mode) == MODE_INT)
1133 return gen_lowpart (mode, temp);
1134 else
1135 return convert_to_mode (mode, temp, unsignedp);
1136 }
1137 }
1138
1139 /* Look for a wider mode of the same class for which we think we
1140 can open-code the operation. Check for a widening multiply at the
1141 wider mode as well. */
1142
1143 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1144 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1145 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1146 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1147 {
1148 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1149 || (binoptab == smul_optab
1150 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1151 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1152 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1153 != CODE_FOR_nothing)))
1154 {
1155 rtx xop0 = op0, xop1 = op1;
1156 int no_extend = 0;
1157
1158 /* For certain integer operations, we need not actually extend
1159 the narrow operands, as long as we will truncate
1160 the results to the same narrowness. */
1161
1162 if ((binoptab == ior_optab || binoptab == and_optab
1163 || binoptab == xor_optab
1164 || binoptab == add_optab || binoptab == sub_optab
1165 || binoptab == smul_optab || binoptab == ashl_optab)
1166 && class == MODE_INT)
1167 no_extend = 1;
1168
1169 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1170
1171 /* The second operand of a shift must always be extended. */
1172 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1173 no_extend && binoptab != ashl_optab);
1174
1175 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1176 unsignedp, OPTAB_DIRECT);
1177 if (temp)
1178 {
1179 if (class != MODE_INT)
1180 {
1181 if (target == 0)
1182 target = gen_reg_rtx (mode);
1183 convert_move (target, temp, 0);
1184 return target;
1185 }
1186 else
1187 return gen_lowpart (mode, temp);
1188 }
1189 else
1190 delete_insns_since (last);
1191 }
1192 }
1193
1194 /* These can be done a word at a time. */
1195 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1196 && class == MODE_INT
1197 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1198 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1199 {
1200 int i;
1201 rtx insns;
1202 rtx equiv_value;
1203
1204 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1205 won't be accurate, so use a new target. */
1206 if (target == 0 || target == op0 || target == op1)
1207 target = gen_reg_rtx (mode);
1208
1209 start_sequence ();
1210
1211 /* Do the actual arithmetic. */
1212 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1213 {
1214 rtx target_piece = operand_subword (target, i, 1, mode);
1215 rtx x = expand_binop (word_mode, binoptab,
1216 operand_subword_force (op0, i, mode),
1217 operand_subword_force (op1, i, mode),
1218 target_piece, unsignedp, next_methods);
1219
1220 if (x == 0)
1221 break;
1222
1223 if (target_piece != x)
1224 emit_move_insn (target_piece, x);
1225 }
1226
1227 insns = get_insns ();
1228 end_sequence ();
1229
1230 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1231 {
1232 if (binoptab->code != UNKNOWN)
1233 equiv_value
1234 = gen_rtx_fmt_ee (binoptab->code, mode,
1235 copy_rtx (op0), copy_rtx (op1));
1236 else
1237 equiv_value = 0;
1238
1239 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1240 return target;
1241 }
1242 }
1243
1244 /* Synthesize double word shifts from single word shifts. */
1245 if ((binoptab == lshr_optab || binoptab == ashl_optab
1246 || binoptab == ashr_optab)
1247 && class == MODE_INT
1248 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1249 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1250 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1251 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1252 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1253 {
1254 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1255 enum machine_mode op1_mode;
1256
1257 double_shift_mask = targetm.shift_truncation_mask (mode);
1258 shift_mask = targetm.shift_truncation_mask (word_mode);
1259 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1260
1261 /* Apply the truncation to constant shifts. */
1262 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1263 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1264
1265 if (op1 == CONST0_RTX (op1_mode))
1266 return op0;
1267
1268 /* Make sure that this is a combination that expand_doubleword_shift
1269 can handle. See the comments there for details. */
1270 if (double_shift_mask == 0
1271 || (shift_mask == BITS_PER_WORD - 1
1272 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1273 {
1274 rtx insns, equiv_value;
1275 rtx into_target, outof_target;
1276 rtx into_input, outof_input;
1277 int left_shift, outof_word;
1278
1279 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1280 won't be accurate, so use a new target. */
1281 if (target == 0 || target == op0 || target == op1)
1282 target = gen_reg_rtx (mode);
1283
1284 start_sequence ();
1285
1286 /* OUTOF_* is the word we are shifting bits away from, and
1287 INTO_* is the word that we are shifting bits towards, thus
1288 they differ depending on the direction of the shift and
1289 WORDS_BIG_ENDIAN. */
1290
1291 left_shift = binoptab == ashl_optab;
1292 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1293
1294 outof_target = operand_subword (target, outof_word, 1, mode);
1295 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1296
1297 outof_input = operand_subword_force (op0, outof_word, mode);
1298 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1299
1300 if (expand_doubleword_shift (op1_mode, binoptab,
1301 outof_input, into_input, op1,
1302 outof_target, into_target,
1303 unsignedp, methods, shift_mask))
1304 {
1305 insns = get_insns ();
1306 end_sequence ();
1307
1308 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1309 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1310 return target;
1311 }
1312 end_sequence ();
1313 }
1314 }
1315
1316 /* Synthesize double word rotates from single word shifts. */
1317 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1318 && class == MODE_INT
1319 && GET_CODE (op1) == CONST_INT
1320 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1321 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1322 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1323 {
1324 rtx insns, equiv_value;
1325 rtx into_target, outof_target;
1326 rtx into_input, outof_input;
1327 rtx inter;
1328 int shift_count, left_shift, outof_word;
1329
1330 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1331 won't be accurate, so use a new target. Do this also if target is not
1332 a REG, first because having a register instead may open optimization
1333 opportunities, and second because if target and op0 happen to be MEMs
1334 designating the same location, we would risk clobbering it too early
1335 in the code sequence we generate below. */
1336 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1337 target = gen_reg_rtx (mode);
1338
1339 start_sequence ();
1340
1341 shift_count = INTVAL (op1);
1342
1343 /* OUTOF_* is the word we are shifting bits away from, and
1344 INTO_* is the word that we are shifting bits towards, thus
1345 they differ depending on the direction of the shift and
1346 WORDS_BIG_ENDIAN. */
1347
1348 left_shift = (binoptab == rotl_optab);
1349 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1350
1351 outof_target = operand_subword (target, outof_word, 1, mode);
1352 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1353
1354 outof_input = operand_subword_force (op0, outof_word, mode);
1355 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1356
1357 if (shift_count == BITS_PER_WORD)
1358 {
1359 /* This is just a word swap. */
1360 emit_move_insn (outof_target, into_input);
1361 emit_move_insn (into_target, outof_input);
1362 inter = const0_rtx;
1363 }
1364 else
1365 {
1366 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1367 rtx first_shift_count, second_shift_count;
1368 optab reverse_unsigned_shift, unsigned_shift;
1369
1370 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1371 ? lshr_optab : ashl_optab);
1372
1373 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1374 ? ashl_optab : lshr_optab);
1375
1376 if (shift_count > BITS_PER_WORD)
1377 {
1378 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1379 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1380 }
1381 else
1382 {
1383 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1384 second_shift_count = GEN_INT (shift_count);
1385 }
1386
1387 into_temp1 = expand_binop (word_mode, unsigned_shift,
1388 outof_input, first_shift_count,
1389 NULL_RTX, unsignedp, next_methods);
1390 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1391 into_input, second_shift_count,
1392 NULL_RTX, unsignedp, next_methods);
1393
1394 if (into_temp1 != 0 && into_temp2 != 0)
1395 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1396 into_target, unsignedp, next_methods);
1397 else
1398 inter = 0;
1399
1400 if (inter != 0 && inter != into_target)
1401 emit_move_insn (into_target, inter);
1402
1403 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1404 into_input, first_shift_count,
1405 NULL_RTX, unsignedp, next_methods);
1406 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1407 outof_input, second_shift_count,
1408 NULL_RTX, unsignedp, next_methods);
1409
1410 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1411 inter = expand_binop (word_mode, ior_optab,
1412 outof_temp1, outof_temp2,
1413 outof_target, unsignedp, next_methods);
1414
1415 if (inter != 0 && inter != outof_target)
1416 emit_move_insn (outof_target, inter);
1417 }
1418
1419 insns = get_insns ();
1420 end_sequence ();
1421
1422 if (inter != 0)
1423 {
1424 if (binoptab->code != UNKNOWN)
1425 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1426 else
1427 equiv_value = 0;
1428
1429 /* We can't make this a no conflict block if this is a word swap,
1430 because the word swap case fails if the input and output values
1431 are in the same register. */
1432 if (shift_count != BITS_PER_WORD)
1433 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1434 else
1435 emit_insn (insns);
1436
1437
1438 return target;
1439 }
1440 }
1441
1442 /* These can be done a word at a time by propagating carries. */
1443 if ((binoptab == add_optab || binoptab == sub_optab)
1444 && class == MODE_INT
1445 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1446 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1447 {
1448 unsigned int i;
1449 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1450 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1451 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1452 rtx xop0, xop1, xtarget;
1453
1454 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1455 value is one of those, use it. Otherwise, use 1 since it is the
1456 one easiest to get. */
1457 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1458 int normalizep = STORE_FLAG_VALUE;
1459 #else
1460 int normalizep = 1;
1461 #endif
1462
1463 /* Prepare the operands. */
1464 xop0 = force_reg (mode, op0);
1465 xop1 = force_reg (mode, op1);
1466
1467 xtarget = gen_reg_rtx (mode);
1468
1469 if (target == 0 || !REG_P (target))
1470 target = xtarget;
1471
1472 /* Indicate for flow that the entire target reg is being set. */
1473 if (REG_P (target))
1474 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1475
1476 /* Do the actual arithmetic. */
1477 for (i = 0; i < nwords; i++)
1478 {
1479 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1480 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1481 rtx op0_piece = operand_subword_force (xop0, index, mode);
1482 rtx op1_piece = operand_subword_force (xop1, index, mode);
1483 rtx x;
1484
1485 /* Main add/subtract of the input operands. */
1486 x = expand_binop (word_mode, binoptab,
1487 op0_piece, op1_piece,
1488 target_piece, unsignedp, next_methods);
1489 if (x == 0)
1490 break;
1491
1492 if (i + 1 < nwords)
1493 {
1494 /* Store carry from main add/subtract. */
1495 carry_out = gen_reg_rtx (word_mode);
1496 carry_out = emit_store_flag_force (carry_out,
1497 (binoptab == add_optab
1498 ? LT : GT),
1499 x, op0_piece,
1500 word_mode, 1, normalizep);
1501 }
1502
1503 if (i > 0)
1504 {
1505 rtx newx;
1506
1507 /* Add/subtract previous carry to main result. */
1508 newx = expand_binop (word_mode,
1509 normalizep == 1 ? binoptab : otheroptab,
1510 x, carry_in,
1511 NULL_RTX, 1, next_methods);
1512
1513 if (i + 1 < nwords)
1514 {
1515 /* Get out carry from adding/subtracting carry in. */
1516 rtx carry_tmp = gen_reg_rtx (word_mode);
1517 carry_tmp = emit_store_flag_force (carry_tmp,
1518 (binoptab == add_optab
1519 ? LT : GT),
1520 newx, x,
1521 word_mode, 1, normalizep);
1522
1523 /* Logical-ior the two poss. carry together. */
1524 carry_out = expand_binop (word_mode, ior_optab,
1525 carry_out, carry_tmp,
1526 carry_out, 0, next_methods);
1527 if (carry_out == 0)
1528 break;
1529 }
1530 emit_move_insn (target_piece, newx);
1531 }
1532 else
1533 {
1534 if (x != target_piece)
1535 emit_move_insn (target_piece, x);
1536 }
1537
1538 carry_in = carry_out;
1539 }
1540
1541 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1542 {
1543 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1544 || ! rtx_equal_p (target, xtarget))
1545 {
1546 rtx temp = emit_move_insn (target, xtarget);
1547
1548 set_unique_reg_note (temp,
1549 REG_EQUAL,
1550 gen_rtx_fmt_ee (binoptab->code, mode,
1551 copy_rtx (xop0),
1552 copy_rtx (xop1)));
1553 }
1554 else
1555 target = xtarget;
1556
1557 return target;
1558 }
1559
1560 else
1561 delete_insns_since (last);
1562 }
1563
1564 /* Attempt to synthesize double word multiplies using a sequence of word
1565 mode multiplications. We first attempt to generate a sequence using a
1566 more efficient unsigned widening multiply, and if that fails we then
1567 try using a signed widening multiply. */
1568
1569 if (binoptab == smul_optab
1570 && class == MODE_INT
1571 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1572 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1573 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1574 {
1575 rtx product = NULL_RTX;
1576
1577 if (umul_widen_optab->handlers[(int) mode].insn_code
1578 != CODE_FOR_nothing)
1579 {
1580 product = expand_doubleword_mult (mode, op0, op1, target,
1581 true, methods);
1582 if (!product)
1583 delete_insns_since (last);
1584 }
1585
1586 if (product == NULL_RTX
1587 && smul_widen_optab->handlers[(int) mode].insn_code
1588 != CODE_FOR_nothing)
1589 {
1590 product = expand_doubleword_mult (mode, op0, op1, target,
1591 false, methods);
1592 if (!product)
1593 delete_insns_since (last);
1594 }
1595
1596 if (product != NULL_RTX)
1597 {
1598 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1599 {
1600 temp = emit_move_insn (target ? target : product, product);
1601 set_unique_reg_note (temp,
1602 REG_EQUAL,
1603 gen_rtx_fmt_ee (MULT, mode,
1604 copy_rtx (op0),
1605 copy_rtx (op1)));
1606 }
1607 return product;
1608 }
1609 }
1610
1611 /* It can't be open-coded in this mode.
1612 Use a library call if one is available and caller says that's ok. */
1613
1614 if (binoptab->handlers[(int) mode].libfunc
1615 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1616 {
1617 rtx insns;
1618 rtx op1x = op1;
1619 enum machine_mode op1_mode = mode;
1620 rtx value;
1621
1622 start_sequence ();
1623
1624 if (shift_op)
1625 {
1626 op1_mode = word_mode;
1627 /* Specify unsigned here,
1628 since negative shift counts are meaningless. */
1629 op1x = convert_to_mode (word_mode, op1, 1);
1630 }
1631
1632 if (GET_MODE (op0) != VOIDmode
1633 && GET_MODE (op0) != mode)
1634 op0 = convert_to_mode (mode, op0, unsignedp);
1635
1636 /* Pass 1 for NO_QUEUE so we don't lose any increments
1637 if the libcall is cse'd or moved. */
1638 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1639 NULL_RTX, LCT_CONST, mode, 2,
1640 op0, mode, op1x, op1_mode);
1641
1642 insns = get_insns ();
1643 end_sequence ();
1644
1645 target = gen_reg_rtx (mode);
1646 emit_libcall_block (insns, target, value,
1647 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1648
1649 return target;
1650 }
1651
1652 delete_insns_since (last);
1653
1654 /* It can't be done in this mode. Can we do it in a wider mode? */
1655
1656 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1657 || methods == OPTAB_MUST_WIDEN))
1658 {
1659 /* Caller says, don't even try. */
1660 delete_insns_since (entry_last);
1661 return 0;
1662 }
1663
1664 /* Compute the value of METHODS to pass to recursive calls.
1665 Don't allow widening to be tried recursively. */
1666
1667 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1668
1669 /* Look for a wider mode of the same class for which it appears we can do
1670 the operation. */
1671
1672 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1673 {
1674 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1675 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1676 {
1677 if ((binoptab->handlers[(int) wider_mode].insn_code
1678 != CODE_FOR_nothing)
1679 || (methods == OPTAB_LIB
1680 && binoptab->handlers[(int) wider_mode].libfunc))
1681 {
1682 rtx xop0 = op0, xop1 = op1;
1683 int no_extend = 0;
1684
1685 /* For certain integer operations, we need not actually extend
1686 the narrow operands, as long as we will truncate
1687 the results to the same narrowness. */
1688
1689 if ((binoptab == ior_optab || binoptab == and_optab
1690 || binoptab == xor_optab
1691 || binoptab == add_optab || binoptab == sub_optab
1692 || binoptab == smul_optab || binoptab == ashl_optab)
1693 && class == MODE_INT)
1694 no_extend = 1;
1695
1696 xop0 = widen_operand (xop0, wider_mode, mode,
1697 unsignedp, no_extend);
1698
1699 /* The second operand of a shift must always be extended. */
1700 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1701 no_extend && binoptab != ashl_optab);
1702
1703 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1704 unsignedp, methods);
1705 if (temp)
1706 {
1707 if (class != MODE_INT)
1708 {
1709 if (target == 0)
1710 target = gen_reg_rtx (mode);
1711 convert_move (target, temp, 0);
1712 return target;
1713 }
1714 else
1715 return gen_lowpart (mode, temp);
1716 }
1717 else
1718 delete_insns_since (last);
1719 }
1720 }
1721 }
1722
1723 delete_insns_since (entry_last);
1724 return 0;
1725 }
1726 \f
1727 /* Expand a binary operator which has both signed and unsigned forms.
1728 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1729 signed operations.
1730
1731 If we widen unsigned operands, we may use a signed wider operation instead
1732 of an unsigned wider operation, since the result would be the same. */
1733
1734 rtx
1735 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1736 rtx op0, rtx op1, rtx target, int unsignedp,
1737 enum optab_methods methods)
1738 {
1739 rtx temp;
1740 optab direct_optab = unsignedp ? uoptab : soptab;
1741 struct optab wide_soptab;
1742
1743 /* Do it without widening, if possible. */
1744 temp = expand_binop (mode, direct_optab, op0, op1, target,
1745 unsignedp, OPTAB_DIRECT);
1746 if (temp || methods == OPTAB_DIRECT)
1747 return temp;
1748
1749 /* Try widening to a signed int. Make a fake signed optab that
1750 hides any signed insn for direct use. */
1751 wide_soptab = *soptab;
1752 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1753 wide_soptab.handlers[(int) mode].libfunc = 0;
1754
1755 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1756 unsignedp, OPTAB_WIDEN);
1757
1758 /* For unsigned operands, try widening to an unsigned int. */
1759 if (temp == 0 && unsignedp)
1760 temp = expand_binop (mode, uoptab, op0, op1, target,
1761 unsignedp, OPTAB_WIDEN);
1762 if (temp || methods == OPTAB_WIDEN)
1763 return temp;
1764
1765 /* Use the right width lib call if that exists. */
1766 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1767 if (temp || methods == OPTAB_LIB)
1768 return temp;
1769
1770 /* Must widen and use a lib call, use either signed or unsigned. */
1771 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1772 unsignedp, methods);
1773 if (temp != 0)
1774 return temp;
1775 if (unsignedp)
1776 return expand_binop (mode, uoptab, op0, op1, target,
1777 unsignedp, methods);
1778 return 0;
1779 }
1780 \f
1781 /* Generate code to perform an operation specified by UNOPPTAB
1782 on operand OP0, with two results to TARG0 and TARG1.
1783 We assume that the order of the operands for the instruction
1784 is TARG0, TARG1, OP0.
1785
1786 Either TARG0 or TARG1 may be zero, but what that means is that
1787 the result is not actually wanted. We will generate it into
1788 a dummy pseudo-reg and discard it. They may not both be zero.
1789
1790 Returns 1 if this operation can be performed; 0 if not. */
1791
1792 int
1793 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1794 int unsignedp)
1795 {
1796 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1797 enum mode_class class;
1798 enum machine_mode wider_mode;
1799 rtx entry_last = get_last_insn ();
1800 rtx last;
1801
1802 class = GET_MODE_CLASS (mode);
1803
1804 if (flag_force_mem)
1805 op0 = force_not_mem (op0);
1806
1807 if (!targ0)
1808 targ0 = gen_reg_rtx (mode);
1809 if (!targ1)
1810 targ1 = gen_reg_rtx (mode);
1811
1812 /* Record where to go back to if we fail. */
1813 last = get_last_insn ();
1814
1815 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1816 {
1817 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1818 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1819 rtx pat;
1820 rtx xop0 = op0;
1821
1822 if (GET_MODE (xop0) != VOIDmode
1823 && GET_MODE (xop0) != mode0)
1824 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1825
1826 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1827 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1828 xop0 = copy_to_mode_reg (mode0, xop0);
1829
1830 /* We could handle this, but we should always be called with a pseudo
1831 for our targets and all insns should take them as outputs. */
1832 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1833 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1834
1835 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1836 if (pat)
1837 {
1838 emit_insn (pat);
1839 return 1;
1840 }
1841 else
1842 delete_insns_since (last);
1843 }
1844
1845 /* It can't be done in this mode. Can we do it in a wider mode? */
1846
1847 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1848 {
1849 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1850 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1851 {
1852 if (unoptab->handlers[(int) wider_mode].insn_code
1853 != CODE_FOR_nothing)
1854 {
1855 rtx t0 = gen_reg_rtx (wider_mode);
1856 rtx t1 = gen_reg_rtx (wider_mode);
1857 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1858
1859 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1860 {
1861 convert_move (targ0, t0, unsignedp);
1862 convert_move (targ1, t1, unsignedp);
1863 return 1;
1864 }
1865 else
1866 delete_insns_since (last);
1867 }
1868 }
1869 }
1870
1871 delete_insns_since (entry_last);
1872 return 0;
1873 }
1874 \f
1875 /* Generate code to perform an operation specified by BINOPTAB
1876 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1877 We assume that the order of the operands for the instruction
1878 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1879 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1880
1881 Either TARG0 or TARG1 may be zero, but what that means is that
1882 the result is not actually wanted. We will generate it into
1883 a dummy pseudo-reg and discard it. They may not both be zero.
1884
1885 Returns 1 if this operation can be performed; 0 if not. */
1886
1887 int
1888 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1889 int unsignedp)
1890 {
1891 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1892 enum mode_class class;
1893 enum machine_mode wider_mode;
1894 rtx entry_last = get_last_insn ();
1895 rtx last;
1896
1897 class = GET_MODE_CLASS (mode);
1898
1899 if (flag_force_mem)
1900 {
1901 op0 = force_not_mem (op0);
1902 op1 = force_not_mem (op1);
1903 }
1904
1905 /* If we are inside an appropriately-short loop and we are optimizing,
1906 force expensive constants into a register. */
1907 if (CONSTANT_P (op0) && optimize
1908 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1909 op0 = force_reg (mode, op0);
1910
1911 if (CONSTANT_P (op1) && optimize
1912 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1913 op1 = force_reg (mode, op1);
1914
1915 if (!targ0)
1916 targ0 = gen_reg_rtx (mode);
1917 if (!targ1)
1918 targ1 = gen_reg_rtx (mode);
1919
1920 /* Record where to go back to if we fail. */
1921 last = get_last_insn ();
1922
1923 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1924 {
1925 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1926 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1927 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1928 rtx pat;
1929 rtx xop0 = op0, xop1 = op1;
1930
1931 /* In case the insn wants input operands in modes different from
1932 those of the actual operands, convert the operands. It would
1933 seem that we don't need to convert CONST_INTs, but we do, so
1934 that they're properly zero-extended, sign-extended or truncated
1935 for their mode. */
1936
1937 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1938 xop0 = convert_modes (mode0,
1939 GET_MODE (op0) != VOIDmode
1940 ? GET_MODE (op0)
1941 : mode,
1942 xop0, unsignedp);
1943
1944 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1945 xop1 = convert_modes (mode1,
1946 GET_MODE (op1) != VOIDmode
1947 ? GET_MODE (op1)
1948 : mode,
1949 xop1, unsignedp);
1950
1951 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1952 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
1953 xop0 = copy_to_mode_reg (mode0, xop0);
1954
1955 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
1956 xop1 = copy_to_mode_reg (mode1, xop1);
1957
1958 /* We could handle this, but we should always be called with a pseudo
1959 for our targets and all insns should take them as outputs. */
1960 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1961 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
1962
1963 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
1964 if (pat)
1965 {
1966 emit_insn (pat);
1967 return 1;
1968 }
1969 else
1970 delete_insns_since (last);
1971 }
1972
1973 /* It can't be done in this mode. Can we do it in a wider mode? */
1974
1975 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1976 {
1977 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1978 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1979 {
1980 if (binoptab->handlers[(int) wider_mode].insn_code
1981 != CODE_FOR_nothing)
1982 {
1983 rtx t0 = gen_reg_rtx (wider_mode);
1984 rtx t1 = gen_reg_rtx (wider_mode);
1985 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1986 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
1987
1988 if (expand_twoval_binop (binoptab, cop0, cop1,
1989 t0, t1, unsignedp))
1990 {
1991 convert_move (targ0, t0, unsignedp);
1992 convert_move (targ1, t1, unsignedp);
1993 return 1;
1994 }
1995 else
1996 delete_insns_since (last);
1997 }
1998 }
1999 }
2000
2001 delete_insns_since (entry_last);
2002 return 0;
2003 }
2004
2005 /* Expand the two-valued library call indicated by BINOPTAB, but
2006 preserve only one of the values. If TARG0 is non-NULL, the first
2007 value is placed into TARG0; otherwise the second value is placed
2008 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2009 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2010 This routine assumes that the value returned by the library call is
2011 as if the return value was of an integral mode twice as wide as the
2012 mode of OP0. Returns 1 if the call was successful. */
2013
2014 bool
2015 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2016 rtx targ0, rtx targ1, enum rtx_code code)
2017 {
2018 enum machine_mode mode;
2019 enum machine_mode libval_mode;
2020 rtx libval;
2021 rtx insns;
2022
2023 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2024 gcc_assert (!targ0 != !targ1);
2025
2026 mode = GET_MODE (op0);
2027 if (!binoptab->handlers[(int) mode].libfunc)
2028 return false;
2029
2030 /* The value returned by the library function will have twice as
2031 many bits as the nominal MODE. */
2032 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2033 MODE_INT);
2034 start_sequence ();
2035 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2036 NULL_RTX, LCT_CONST,
2037 libval_mode, 2,
2038 op0, mode,
2039 op1, mode);
2040 /* Get the part of VAL containing the value that we want. */
2041 libval = simplify_gen_subreg (mode, libval, libval_mode,
2042 targ0 ? 0 : GET_MODE_SIZE (mode));
2043 insns = get_insns ();
2044 end_sequence ();
2045 /* Move the into the desired location. */
2046 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2047 gen_rtx_fmt_ee (code, mode, op0, op1));
2048
2049 return true;
2050 }
2051
2052 \f
2053 /* Wrapper around expand_unop which takes an rtx code to specify
2054 the operation to perform, not an optab pointer. All other
2055 arguments are the same. */
2056 rtx
2057 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2058 rtx target, int unsignedp)
2059 {
2060 optab unop = code_to_optab[(int) code];
2061 gcc_assert (unop);
2062
2063 return expand_unop (mode, unop, op0, target, unsignedp);
2064 }
2065
2066 /* Try calculating
2067 (clz:narrow x)
2068 as
2069 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2070 static rtx
2071 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2072 {
2073 enum mode_class class = GET_MODE_CLASS (mode);
2074 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2075 {
2076 enum machine_mode wider_mode;
2077 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2078 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2079 {
2080 if (clz_optab->handlers[(int) wider_mode].insn_code
2081 != CODE_FOR_nothing)
2082 {
2083 rtx xop0, temp, last;
2084
2085 last = get_last_insn ();
2086
2087 if (target == 0)
2088 target = gen_reg_rtx (mode);
2089 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2090 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2091 if (temp != 0)
2092 temp = expand_binop (wider_mode, sub_optab, temp,
2093 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2094 - GET_MODE_BITSIZE (mode)),
2095 target, true, OPTAB_DIRECT);
2096 if (temp == 0)
2097 delete_insns_since (last);
2098
2099 return temp;
2100 }
2101 }
2102 }
2103 return 0;
2104 }
2105
2106 /* Try calculating (parity x) as (and (popcount x) 1), where
2107 popcount can also be done in a wider mode. */
2108 static rtx
2109 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2110 {
2111 enum mode_class class = GET_MODE_CLASS (mode);
2112 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2113 {
2114 enum machine_mode wider_mode;
2115 for (wider_mode = mode; wider_mode != VOIDmode;
2116 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2117 {
2118 if (popcount_optab->handlers[(int) wider_mode].insn_code
2119 != CODE_FOR_nothing)
2120 {
2121 rtx xop0, temp, last;
2122
2123 last = get_last_insn ();
2124
2125 if (target == 0)
2126 target = gen_reg_rtx (mode);
2127 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2128 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2129 true);
2130 if (temp != 0)
2131 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2132 target, true, OPTAB_DIRECT);
2133 if (temp == 0)
2134 delete_insns_since (last);
2135
2136 return temp;
2137 }
2138 }
2139 }
2140 return 0;
2141 }
2142
2143 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2144 conditions, VAL may already be a SUBREG against which we cannot generate
2145 a further SUBREG. In this case, we expect forcing the value into a
2146 register will work around the situation. */
2147
2148 static rtx
2149 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2150 enum machine_mode imode)
2151 {
2152 rtx ret;
2153 ret = lowpart_subreg (omode, val, imode);
2154 if (ret == NULL)
2155 {
2156 val = force_reg (imode, val);
2157 ret = lowpart_subreg (omode, val, imode);
2158 gcc_assert (ret != NULL);
2159 }
2160 return ret;
2161 }
2162
2163 /* Expand a floating point absolute value or negation operation via a
2164 logical operation on the sign bit. */
2165
2166 static rtx
2167 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2168 rtx op0, rtx target)
2169 {
2170 const struct real_format *fmt;
2171 int bitpos, word, nwords, i;
2172 enum machine_mode imode;
2173 HOST_WIDE_INT hi, lo;
2174 rtx temp, insns;
2175
2176 /* The format has to have a simple sign bit. */
2177 fmt = REAL_MODE_FORMAT (mode);
2178 if (fmt == NULL)
2179 return NULL_RTX;
2180
2181 bitpos = fmt->signbit_rw;
2182 if (bitpos < 0)
2183 return NULL_RTX;
2184
2185 /* Don't create negative zeros if the format doesn't support them. */
2186 if (code == NEG && !fmt->has_signed_zero)
2187 return NULL_RTX;
2188
2189 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2190 {
2191 imode = int_mode_for_mode (mode);
2192 if (imode == BLKmode)
2193 return NULL_RTX;
2194 word = 0;
2195 nwords = 1;
2196 }
2197 else
2198 {
2199 imode = word_mode;
2200
2201 if (FLOAT_WORDS_BIG_ENDIAN)
2202 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2203 else
2204 word = bitpos / BITS_PER_WORD;
2205 bitpos = bitpos % BITS_PER_WORD;
2206 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2207 }
2208
2209 if (bitpos < HOST_BITS_PER_WIDE_INT)
2210 {
2211 hi = 0;
2212 lo = (HOST_WIDE_INT) 1 << bitpos;
2213 }
2214 else
2215 {
2216 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2217 lo = 0;
2218 }
2219 if (code == ABS)
2220 lo = ~lo, hi = ~hi;
2221
2222 if (target == 0 || target == op0)
2223 target = gen_reg_rtx (mode);
2224
2225 if (nwords > 1)
2226 {
2227 start_sequence ();
2228
2229 for (i = 0; i < nwords; ++i)
2230 {
2231 rtx targ_piece = operand_subword (target, i, 1, mode);
2232 rtx op0_piece = operand_subword_force (op0, i, mode);
2233
2234 if (i == word)
2235 {
2236 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2237 op0_piece,
2238 immed_double_const (lo, hi, imode),
2239 targ_piece, 1, OPTAB_LIB_WIDEN);
2240 if (temp != targ_piece)
2241 emit_move_insn (targ_piece, temp);
2242 }
2243 else
2244 emit_move_insn (targ_piece, op0_piece);
2245 }
2246
2247 insns = get_insns ();
2248 end_sequence ();
2249
2250 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2251 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2252 }
2253 else
2254 {
2255 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2256 gen_lowpart (imode, op0),
2257 immed_double_const (lo, hi, imode),
2258 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2259 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2260
2261 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2262 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2263 }
2264
2265 return target;
2266 }
2267
2268 /* Generate code to perform an operation specified by UNOPTAB
2269 on operand OP0, with result having machine-mode MODE.
2270
2271 UNSIGNEDP is for the case where we have to widen the operands
2272 to perform the operation. It says to use zero-extension.
2273
2274 If TARGET is nonzero, the value
2275 is generated there, if it is convenient to do so.
2276 In all cases an rtx is returned for the locus of the value;
2277 this may or may not be TARGET. */
2278
2279 rtx
2280 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2281 int unsignedp)
2282 {
2283 enum mode_class class;
2284 enum machine_mode wider_mode;
2285 rtx temp;
2286 rtx last = get_last_insn ();
2287 rtx pat;
2288
2289 class = GET_MODE_CLASS (mode);
2290
2291 if (flag_force_mem)
2292 op0 = force_not_mem (op0);
2293
2294 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2295 {
2296 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2297 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2298 rtx xop0 = op0;
2299
2300 if (target)
2301 temp = target;
2302 else
2303 temp = gen_reg_rtx (mode);
2304
2305 if (GET_MODE (xop0) != VOIDmode
2306 && GET_MODE (xop0) != mode0)
2307 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2308
2309 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2310
2311 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2312 xop0 = copy_to_mode_reg (mode0, xop0);
2313
2314 if (!insn_data[icode].operand[0].predicate (temp, mode))
2315 temp = gen_reg_rtx (mode);
2316
2317 pat = GEN_FCN (icode) (temp, xop0);
2318 if (pat)
2319 {
2320 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2321 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2322 {
2323 delete_insns_since (last);
2324 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2325 }
2326
2327 emit_insn (pat);
2328
2329 return temp;
2330 }
2331 else
2332 delete_insns_since (last);
2333 }
2334
2335 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2336
2337 /* Widening clz needs special treatment. */
2338 if (unoptab == clz_optab)
2339 {
2340 temp = widen_clz (mode, op0, target);
2341 if (temp)
2342 return temp;
2343 else
2344 goto try_libcall;
2345 }
2346
2347 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2348 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2349 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2350 {
2351 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2352 {
2353 rtx xop0 = op0;
2354
2355 /* For certain operations, we need not actually extend
2356 the narrow operand, as long as we will truncate the
2357 results to the same narrowness. */
2358
2359 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2360 (unoptab == neg_optab
2361 || unoptab == one_cmpl_optab)
2362 && class == MODE_INT);
2363
2364 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2365 unsignedp);
2366
2367 if (temp)
2368 {
2369 if (class != MODE_INT)
2370 {
2371 if (target == 0)
2372 target = gen_reg_rtx (mode);
2373 convert_move (target, temp, 0);
2374 return target;
2375 }
2376 else
2377 return gen_lowpart (mode, temp);
2378 }
2379 else
2380 delete_insns_since (last);
2381 }
2382 }
2383
2384 /* These can be done a word at a time. */
2385 if (unoptab == one_cmpl_optab
2386 && class == MODE_INT
2387 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2388 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2389 {
2390 int i;
2391 rtx insns;
2392
2393 if (target == 0 || target == op0)
2394 target = gen_reg_rtx (mode);
2395
2396 start_sequence ();
2397
2398 /* Do the actual arithmetic. */
2399 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2400 {
2401 rtx target_piece = operand_subword (target, i, 1, mode);
2402 rtx x = expand_unop (word_mode, unoptab,
2403 operand_subword_force (op0, i, mode),
2404 target_piece, unsignedp);
2405
2406 if (target_piece != x)
2407 emit_move_insn (target_piece, x);
2408 }
2409
2410 insns = get_insns ();
2411 end_sequence ();
2412
2413 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2414 gen_rtx_fmt_e (unoptab->code, mode,
2415 copy_rtx (op0)));
2416 return target;
2417 }
2418
2419 if (unoptab->code == NEG)
2420 {
2421 /* Try negating floating point values by flipping the sign bit. */
2422 if (class == MODE_FLOAT)
2423 {
2424 temp = expand_absneg_bit (NEG, mode, op0, target);
2425 if (temp)
2426 return temp;
2427 }
2428
2429 /* If there is no negation pattern, and we have no negative zero,
2430 try subtracting from zero. */
2431 if (!HONOR_SIGNED_ZEROS (mode))
2432 {
2433 temp = expand_binop (mode, (unoptab == negv_optab
2434 ? subv_optab : sub_optab),
2435 CONST0_RTX (mode), op0, target,
2436 unsignedp, OPTAB_DIRECT);
2437 if (temp)
2438 return temp;
2439 }
2440 }
2441
2442 /* Try calculating parity (x) as popcount (x) % 2. */
2443 if (unoptab == parity_optab)
2444 {
2445 temp = expand_parity (mode, op0, target);
2446 if (temp)
2447 return temp;
2448 }
2449
2450 try_libcall:
2451 /* Now try a library call in this mode. */
2452 if (unoptab->handlers[(int) mode].libfunc)
2453 {
2454 rtx insns;
2455 rtx value;
2456 enum machine_mode outmode = mode;
2457
2458 /* All of these functions return small values. Thus we choose to
2459 have them return something that isn't a double-word. */
2460 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2461 || unoptab == popcount_optab || unoptab == parity_optab)
2462 outmode
2463 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2464
2465 start_sequence ();
2466
2467 /* Pass 1 for NO_QUEUE so we don't lose any increments
2468 if the libcall is cse'd or moved. */
2469 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2470 NULL_RTX, LCT_CONST, outmode,
2471 1, op0, mode);
2472 insns = get_insns ();
2473 end_sequence ();
2474
2475 target = gen_reg_rtx (outmode);
2476 emit_libcall_block (insns, target, value,
2477 gen_rtx_fmt_e (unoptab->code, mode, op0));
2478
2479 return target;
2480 }
2481
2482 /* It can't be done in this mode. Can we do it in a wider mode? */
2483
2484 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2485 {
2486 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2487 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2488 {
2489 if ((unoptab->handlers[(int) wider_mode].insn_code
2490 != CODE_FOR_nothing)
2491 || unoptab->handlers[(int) wider_mode].libfunc)
2492 {
2493 rtx xop0 = op0;
2494
2495 /* For certain operations, we need not actually extend
2496 the narrow operand, as long as we will truncate the
2497 results to the same narrowness. */
2498
2499 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2500 (unoptab == neg_optab
2501 || unoptab == one_cmpl_optab)
2502 && class == MODE_INT);
2503
2504 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2505 unsignedp);
2506
2507 /* If we are generating clz using wider mode, adjust the
2508 result. */
2509 if (unoptab == clz_optab && temp != 0)
2510 temp = expand_binop (wider_mode, sub_optab, temp,
2511 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2512 - GET_MODE_BITSIZE (mode)),
2513 target, true, OPTAB_DIRECT);
2514
2515 if (temp)
2516 {
2517 if (class != MODE_INT)
2518 {
2519 if (target == 0)
2520 target = gen_reg_rtx (mode);
2521 convert_move (target, temp, 0);
2522 return target;
2523 }
2524 else
2525 return gen_lowpart (mode, temp);
2526 }
2527 else
2528 delete_insns_since (last);
2529 }
2530 }
2531 }
2532
2533 /* One final attempt at implementing negation via subtraction,
2534 this time allowing widening of the operand. */
2535 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2536 {
2537 rtx temp;
2538 temp = expand_binop (mode,
2539 unoptab == negv_optab ? subv_optab : sub_optab,
2540 CONST0_RTX (mode), op0,
2541 target, unsignedp, OPTAB_LIB_WIDEN);
2542 if (temp)
2543 return temp;
2544 }
2545
2546 return 0;
2547 }
2548 \f
2549 /* Emit code to compute the absolute value of OP0, with result to
2550 TARGET if convenient. (TARGET may be 0.) The return value says
2551 where the result actually is to be found.
2552
2553 MODE is the mode of the operand; the mode of the result is
2554 different but can be deduced from MODE.
2555
2556 */
2557
2558 rtx
2559 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2560 int result_unsignedp)
2561 {
2562 rtx temp;
2563
2564 if (! flag_trapv)
2565 result_unsignedp = 1;
2566
2567 /* First try to do it with a special abs instruction. */
2568 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2569 op0, target, 0);
2570 if (temp != 0)
2571 return temp;
2572
2573 /* For floating point modes, try clearing the sign bit. */
2574 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2575 {
2576 temp = expand_absneg_bit (ABS, mode, op0, target);
2577 if (temp)
2578 return temp;
2579 }
2580
2581 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2582 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2583 && !HONOR_SIGNED_ZEROS (mode))
2584 {
2585 rtx last = get_last_insn ();
2586
2587 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2588 if (temp != 0)
2589 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2590 OPTAB_WIDEN);
2591
2592 if (temp != 0)
2593 return temp;
2594
2595 delete_insns_since (last);
2596 }
2597
2598 /* If this machine has expensive jumps, we can do integer absolute
2599 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2600 where W is the width of MODE. */
2601
2602 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2603 {
2604 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2605 size_int (GET_MODE_BITSIZE (mode) - 1),
2606 NULL_RTX, 0);
2607
2608 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2609 OPTAB_LIB_WIDEN);
2610 if (temp != 0)
2611 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2612 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2613
2614 if (temp != 0)
2615 return temp;
2616 }
2617
2618 return NULL_RTX;
2619 }
2620
2621 rtx
2622 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2623 int result_unsignedp, int safe)
2624 {
2625 rtx temp, op1;
2626
2627 if (! flag_trapv)
2628 result_unsignedp = 1;
2629
2630 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2631 if (temp != 0)
2632 return temp;
2633
2634 /* If that does not win, use conditional jump and negate. */
2635
2636 /* It is safe to use the target if it is the same
2637 as the source if this is also a pseudo register */
2638 if (op0 == target && REG_P (op0)
2639 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2640 safe = 1;
2641
2642 op1 = gen_label_rtx ();
2643 if (target == 0 || ! safe
2644 || GET_MODE (target) != mode
2645 || (MEM_P (target) && MEM_VOLATILE_P (target))
2646 || (REG_P (target)
2647 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2648 target = gen_reg_rtx (mode);
2649
2650 emit_move_insn (target, op0);
2651 NO_DEFER_POP;
2652
2653 /* If this mode is an integer too wide to compare properly,
2654 compare word by word. Rely on CSE to optimize constant cases. */
2655 if (GET_MODE_CLASS (mode) == MODE_INT
2656 && ! can_compare_p (GE, mode, ccp_jump))
2657 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2658 NULL_RTX, op1);
2659 else
2660 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2661 NULL_RTX, NULL_RTX, op1);
2662
2663 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2664 target, target, 0);
2665 if (op0 != target)
2666 emit_move_insn (target, op0);
2667 emit_label (op1);
2668 OK_DEFER_POP;
2669 return target;
2670 }
2671
2672 /* A subroutine of expand_copysign, perform the copysign operation using the
2673 abs and neg primitives advertised to exist on the target. The assumption
2674 is that we have a split register file, and leaving op0 in fp registers,
2675 and not playing with subregs so much, will help the register allocator. */
2676
2677 static rtx
2678 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2679 int bitpos, bool op0_is_abs)
2680 {
2681 enum machine_mode imode;
2682 HOST_WIDE_INT hi, lo;
2683 int word;
2684 rtx label;
2685
2686 if (target == op1)
2687 target = NULL_RTX;
2688
2689 if (!op0_is_abs)
2690 {
2691 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2692 if (op0 == NULL)
2693 return NULL_RTX;
2694 target = op0;
2695 }
2696 else
2697 {
2698 if (target == NULL_RTX)
2699 target = copy_to_reg (op0);
2700 else
2701 emit_move_insn (target, op0);
2702 }
2703
2704 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2705 {
2706 imode = int_mode_for_mode (mode);
2707 if (imode == BLKmode)
2708 return NULL_RTX;
2709 op1 = gen_lowpart (imode, op1);
2710 }
2711 else
2712 {
2713 imode = word_mode;
2714 if (FLOAT_WORDS_BIG_ENDIAN)
2715 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2716 else
2717 word = bitpos / BITS_PER_WORD;
2718 bitpos = bitpos % BITS_PER_WORD;
2719 op1 = operand_subword_force (op1, word, mode);
2720 }
2721
2722 if (bitpos < HOST_BITS_PER_WIDE_INT)
2723 {
2724 hi = 0;
2725 lo = (HOST_WIDE_INT) 1 << bitpos;
2726 }
2727 else
2728 {
2729 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2730 lo = 0;
2731 }
2732
2733 op1 = expand_binop (imode, and_optab, op1,
2734 immed_double_const (lo, hi, imode),
2735 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2736
2737 label = gen_label_rtx ();
2738 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2739
2740 if (GET_CODE (op0) == CONST_DOUBLE)
2741 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2742 else
2743 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2744 if (op0 != target)
2745 emit_move_insn (target, op0);
2746
2747 emit_label (label);
2748
2749 return target;
2750 }
2751
2752
2753 /* A subroutine of expand_copysign, perform the entire copysign operation
2754 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2755 is true if op0 is known to have its sign bit clear. */
2756
2757 static rtx
2758 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2759 int bitpos, bool op0_is_abs)
2760 {
2761 enum machine_mode imode;
2762 HOST_WIDE_INT hi, lo;
2763 int word, nwords, i;
2764 rtx temp, insns;
2765
2766 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2767 {
2768 imode = int_mode_for_mode (mode);
2769 if (imode == BLKmode)
2770 return NULL_RTX;
2771 word = 0;
2772 nwords = 1;
2773 }
2774 else
2775 {
2776 imode = word_mode;
2777
2778 if (FLOAT_WORDS_BIG_ENDIAN)
2779 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2780 else
2781 word = bitpos / BITS_PER_WORD;
2782 bitpos = bitpos % BITS_PER_WORD;
2783 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2784 }
2785
2786 if (bitpos < HOST_BITS_PER_WIDE_INT)
2787 {
2788 hi = 0;
2789 lo = (HOST_WIDE_INT) 1 << bitpos;
2790 }
2791 else
2792 {
2793 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2794 lo = 0;
2795 }
2796
2797 if (target == 0 || target == op0 || target == op1)
2798 target = gen_reg_rtx (mode);
2799
2800 if (nwords > 1)
2801 {
2802 start_sequence ();
2803
2804 for (i = 0; i < nwords; ++i)
2805 {
2806 rtx targ_piece = operand_subword (target, i, 1, mode);
2807 rtx op0_piece = operand_subword_force (op0, i, mode);
2808
2809 if (i == word)
2810 {
2811 if (!op0_is_abs)
2812 op0_piece = expand_binop (imode, and_optab, op0_piece,
2813 immed_double_const (~lo, ~hi, imode),
2814 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2815
2816 op1 = expand_binop (imode, and_optab,
2817 operand_subword_force (op1, i, mode),
2818 immed_double_const (lo, hi, imode),
2819 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2820
2821 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2822 targ_piece, 1, OPTAB_LIB_WIDEN);
2823 if (temp != targ_piece)
2824 emit_move_insn (targ_piece, temp);
2825 }
2826 else
2827 emit_move_insn (targ_piece, op0_piece);
2828 }
2829
2830 insns = get_insns ();
2831 end_sequence ();
2832
2833 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2834 }
2835 else
2836 {
2837 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2838 immed_double_const (lo, hi, imode),
2839 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2840
2841 op0 = gen_lowpart (imode, op0);
2842 if (!op0_is_abs)
2843 op0 = expand_binop (imode, and_optab, op0,
2844 immed_double_const (~lo, ~hi, imode),
2845 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2846
2847 temp = expand_binop (imode, ior_optab, op0, op1,
2848 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2849 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2850 }
2851
2852 return target;
2853 }
2854
2855 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2856 scalar floating point mode. Return NULL if we do not know how to
2857 expand the operation inline. */
2858
2859 rtx
2860 expand_copysign (rtx op0, rtx op1, rtx target)
2861 {
2862 enum machine_mode mode = GET_MODE (op0);
2863 const struct real_format *fmt;
2864 bool op0_is_abs;
2865 rtx temp;
2866
2867 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2868 gcc_assert (GET_MODE (op1) == mode);
2869
2870 /* First try to do it with a special instruction. */
2871 temp = expand_binop (mode, copysign_optab, op0, op1,
2872 target, 0, OPTAB_DIRECT);
2873 if (temp)
2874 return temp;
2875
2876 fmt = REAL_MODE_FORMAT (mode);
2877 if (fmt == NULL || !fmt->has_signed_zero)
2878 return NULL_RTX;
2879
2880 op0_is_abs = false;
2881 if (GET_CODE (op0) == CONST_DOUBLE)
2882 {
2883 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2884 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2885 op0_is_abs = true;
2886 }
2887
2888 if (fmt->signbit_ro >= 0
2889 && (GET_CODE (op0) == CONST_DOUBLE
2890 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2891 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2892 {
2893 temp = expand_copysign_absneg (mode, op0, op1, target,
2894 fmt->signbit_ro, op0_is_abs);
2895 if (temp)
2896 return temp;
2897 }
2898
2899 if (fmt->signbit_rw < 0)
2900 return NULL_RTX;
2901 return expand_copysign_bit (mode, op0, op1, target,
2902 fmt->signbit_rw, op0_is_abs);
2903 }
2904 \f
2905 /* Generate an instruction whose insn-code is INSN_CODE,
2906 with two operands: an output TARGET and an input OP0.
2907 TARGET *must* be nonzero, and the output is always stored there.
2908 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2909 the value that is stored into TARGET. */
2910
2911 void
2912 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2913 {
2914 rtx temp;
2915 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2916 rtx pat;
2917
2918 temp = target;
2919
2920 /* Sign and zero extension from memory is often done specially on
2921 RISC machines, so forcing into a register here can pessimize
2922 code. */
2923 if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
2924 op0 = force_not_mem (op0);
2925
2926 /* Now, if insn does not accept our operands, put them into pseudos. */
2927
2928 if (!insn_data[icode].operand[1].predicate (op0, mode0))
2929 op0 = copy_to_mode_reg (mode0, op0);
2930
2931 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp))
2932 || (flag_force_mem && MEM_P (temp)))
2933 temp = gen_reg_rtx (GET_MODE (temp));
2934
2935 pat = GEN_FCN (icode) (temp, op0);
2936
2937 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
2938 add_equal_note (pat, temp, code, op0, NULL_RTX);
2939
2940 emit_insn (pat);
2941
2942 if (temp != target)
2943 emit_move_insn (target, temp);
2944 }
2945 \f
2946 /* Emit code to perform a series of operations on a multi-word quantity, one
2947 word at a time.
2948
2949 Such a block is preceded by a CLOBBER of the output, consists of multiple
2950 insns, each setting one word of the output, and followed by a SET copying
2951 the output to itself.
2952
2953 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2954 note indicating that it doesn't conflict with the (also multi-word)
2955 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2956 notes.
2957
2958 INSNS is a block of code generated to perform the operation, not including
2959 the CLOBBER and final copy. All insns that compute intermediate values
2960 are first emitted, followed by the block as described above.
2961
2962 TARGET, OP0, and OP1 are the output and inputs of the operations,
2963 respectively. OP1 may be zero for a unary operation.
2964
2965 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2966 on the last insn.
2967
2968 If TARGET is not a register, INSNS is simply emitted with no special
2969 processing. Likewise if anything in INSNS is not an INSN or if
2970 there is a libcall block inside INSNS.
2971
2972 The final insn emitted is returned. */
2973
2974 rtx
2975 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
2976 {
2977 rtx prev, next, first, last, insn;
2978
2979 if (!REG_P (target) || reload_in_progress)
2980 return emit_insn (insns);
2981 else
2982 for (insn = insns; insn; insn = NEXT_INSN (insn))
2983 if (!NONJUMP_INSN_P (insn)
2984 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2985 return emit_insn (insns);
2986
2987 /* First emit all insns that do not store into words of the output and remove
2988 these from the list. */
2989 for (insn = insns; insn; insn = next)
2990 {
2991 rtx set = 0, note;
2992 int i;
2993
2994 next = NEXT_INSN (insn);
2995
2996 /* Some ports (cris) create a libcall regions at their own. We must
2997 avoid any potential nesting of LIBCALLs. */
2998 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
2999 remove_note (insn, note);
3000 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3001 remove_note (insn, note);
3002
3003 if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == USE
3004 || GET_CODE (PATTERN (insn)) == CLOBBER)
3005 set = PATTERN (insn);
3006 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3007 {
3008 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
3009 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
3010 {
3011 set = XVECEXP (PATTERN (insn), 0, i);
3012 break;
3013 }
3014 }
3015
3016 gcc_assert (set);
3017
3018 if (! reg_overlap_mentioned_p (target, SET_DEST (set)))
3019 {
3020 if (PREV_INSN (insn))
3021 NEXT_INSN (PREV_INSN (insn)) = next;
3022 else
3023 insns = next;
3024
3025 if (next)
3026 PREV_INSN (next) = PREV_INSN (insn);
3027
3028 add_insn (insn);
3029 }
3030 }
3031
3032 prev = get_last_insn ();
3033
3034 /* Now write the CLOBBER of the output, followed by the setting of each
3035 of the words, followed by the final copy. */
3036 if (target != op0 && target != op1)
3037 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3038
3039 for (insn = insns; insn; insn = next)
3040 {
3041 next = NEXT_INSN (insn);
3042 add_insn (insn);
3043
3044 if (op1 && REG_P (op1))
3045 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3046 REG_NOTES (insn));
3047
3048 if (op0 && REG_P (op0))
3049 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3050 REG_NOTES (insn));
3051 }
3052
3053 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3054 != CODE_FOR_nothing)
3055 {
3056 last = emit_move_insn (target, target);
3057 if (equiv)
3058 set_unique_reg_note (last, REG_EQUAL, equiv);
3059 }
3060 else
3061 {
3062 last = get_last_insn ();
3063
3064 /* Remove any existing REG_EQUAL note from "last", or else it will
3065 be mistaken for a note referring to the full contents of the
3066 alleged libcall value when found together with the REG_RETVAL
3067 note added below. An existing note can come from an insn
3068 expansion at "last". */
3069 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3070 }
3071
3072 if (prev == 0)
3073 first = get_insns ();
3074 else
3075 first = NEXT_INSN (prev);
3076
3077 /* Encapsulate the block so it gets manipulated as a unit. */
3078 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3079 REG_NOTES (first));
3080 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3081
3082 return last;
3083 }
3084 \f
3085 /* Emit code to make a call to a constant function or a library call.
3086
3087 INSNS is a list containing all insns emitted in the call.
3088 These insns leave the result in RESULT. Our block is to copy RESULT
3089 to TARGET, which is logically equivalent to EQUIV.
3090
3091 We first emit any insns that set a pseudo on the assumption that these are
3092 loading constants into registers; doing so allows them to be safely cse'ed
3093 between blocks. Then we emit all the other insns in the block, followed by
3094 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3095 note with an operand of EQUIV.
3096
3097 Moving assignments to pseudos outside of the block is done to improve
3098 the generated code, but is not required to generate correct code,
3099 hence being unable to move an assignment is not grounds for not making
3100 a libcall block. There are two reasons why it is safe to leave these
3101 insns inside the block: First, we know that these pseudos cannot be
3102 used in generated RTL outside the block since they are created for
3103 temporary purposes within the block. Second, CSE will not record the
3104 values of anything set inside a libcall block, so we know they must
3105 be dead at the end of the block.
3106
3107 Except for the first group of insns (the ones setting pseudos), the
3108 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3109
3110 void
3111 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3112 {
3113 rtx final_dest = target;
3114 rtx prev, next, first, last, insn;
3115
3116 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3117 into a MEM later. Protect the libcall block from this change. */
3118 if (! REG_P (target) || REG_USERVAR_P (target))
3119 target = gen_reg_rtx (GET_MODE (target));
3120
3121 /* If we're using non-call exceptions, a libcall corresponding to an
3122 operation that may trap may also trap. */
3123 if (flag_non_call_exceptions && may_trap_p (equiv))
3124 {
3125 for (insn = insns; insn; insn = NEXT_INSN (insn))
3126 if (CALL_P (insn))
3127 {
3128 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3129
3130 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3131 remove_note (insn, note);
3132 }
3133 }
3134 else
3135 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3136 reg note to indicate that this call cannot throw or execute a nonlocal
3137 goto (unless there is already a REG_EH_REGION note, in which case
3138 we update it). */
3139 for (insn = insns; insn; insn = NEXT_INSN (insn))
3140 if (CALL_P (insn))
3141 {
3142 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3143
3144 if (note != 0)
3145 XEXP (note, 0) = constm1_rtx;
3146 else
3147 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3148 REG_NOTES (insn));
3149 }
3150
3151 /* First emit all insns that set pseudos. Remove them from the list as
3152 we go. Avoid insns that set pseudos which were referenced in previous
3153 insns. These can be generated by move_by_pieces, for example,
3154 to update an address. Similarly, avoid insns that reference things
3155 set in previous insns. */
3156
3157 for (insn = insns; insn; insn = next)
3158 {
3159 rtx set = single_set (insn);
3160 rtx note;
3161
3162 /* Some ports (cris) create a libcall regions at their own. We must
3163 avoid any potential nesting of LIBCALLs. */
3164 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3165 remove_note (insn, note);
3166 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3167 remove_note (insn, note);
3168
3169 next = NEXT_INSN (insn);
3170
3171 if (set != 0 && REG_P (SET_DEST (set))
3172 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3173 && (insn == insns
3174 || ((! INSN_P(insns)
3175 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3176 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3177 && ! modified_in_p (SET_SRC (set), insns)
3178 && ! modified_between_p (SET_SRC (set), insns, insn))))
3179 {
3180 if (PREV_INSN (insn))
3181 NEXT_INSN (PREV_INSN (insn)) = next;
3182 else
3183 insns = next;
3184
3185 if (next)
3186 PREV_INSN (next) = PREV_INSN (insn);
3187
3188 add_insn (insn);
3189 }
3190
3191 /* Some ports use a loop to copy large arguments onto the stack.
3192 Don't move anything outside such a loop. */
3193 if (LABEL_P (insn))
3194 break;
3195 }
3196
3197 prev = get_last_insn ();
3198
3199 /* Write the remaining insns followed by the final copy. */
3200
3201 for (insn = insns; insn; insn = next)
3202 {
3203 next = NEXT_INSN (insn);
3204
3205 add_insn (insn);
3206 }
3207
3208 last = emit_move_insn (target, result);
3209 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3210 != CODE_FOR_nothing)
3211 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3212 else
3213 {
3214 /* Remove any existing REG_EQUAL note from "last", or else it will
3215 be mistaken for a note referring to the full contents of the
3216 libcall value when found together with the REG_RETVAL note added
3217 below. An existing note can come from an insn expansion at
3218 "last". */
3219 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3220 }
3221
3222 if (final_dest != target)
3223 emit_move_insn (final_dest, target);
3224
3225 if (prev == 0)
3226 first = get_insns ();
3227 else
3228 first = NEXT_INSN (prev);
3229
3230 /* Encapsulate the block so it gets manipulated as a unit. */
3231 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3232 {
3233 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3234 when the encapsulated region would not be in one basic block,
3235 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3236 */
3237 bool attach_libcall_retval_notes = true;
3238 next = NEXT_INSN (last);
3239 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3240 if (control_flow_insn_p (insn))
3241 {
3242 attach_libcall_retval_notes = false;
3243 break;
3244 }
3245
3246 if (attach_libcall_retval_notes)
3247 {
3248 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3249 REG_NOTES (first));
3250 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3251 REG_NOTES (last));
3252 }
3253 }
3254 }
3255 \f
3256 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3257 PURPOSE describes how this comparison will be used. CODE is the rtx
3258 comparison code we will be using.
3259
3260 ??? Actually, CODE is slightly weaker than that. A target is still
3261 required to implement all of the normal bcc operations, but not
3262 required to implement all (or any) of the unordered bcc operations. */
3263
3264 int
3265 can_compare_p (enum rtx_code code, enum machine_mode mode,
3266 enum can_compare_purpose purpose)
3267 {
3268 do
3269 {
3270 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3271 {
3272 if (purpose == ccp_jump)
3273 return bcc_gen_fctn[(int) code] != NULL;
3274 else if (purpose == ccp_store_flag)
3275 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3276 else
3277 /* There's only one cmov entry point, and it's allowed to fail. */
3278 return 1;
3279 }
3280 if (purpose == ccp_jump
3281 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3282 return 1;
3283 if (purpose == ccp_cmov
3284 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3285 return 1;
3286 if (purpose == ccp_store_flag
3287 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3288 return 1;
3289 mode = GET_MODE_WIDER_MODE (mode);
3290 }
3291 while (mode != VOIDmode);
3292
3293 return 0;
3294 }
3295
3296 /* This function is called when we are going to emit a compare instruction that
3297 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3298
3299 *PMODE is the mode of the inputs (in case they are const_int).
3300 *PUNSIGNEDP nonzero says that the operands are unsigned;
3301 this matters if they need to be widened.
3302
3303 If they have mode BLKmode, then SIZE specifies the size of both operands.
3304
3305 This function performs all the setup necessary so that the caller only has
3306 to emit a single comparison insn. This setup can involve doing a BLKmode
3307 comparison or emitting a library call to perform the comparison if no insn
3308 is available to handle it.
3309 The values which are passed in through pointers can be modified; the caller
3310 should perform the comparison on the modified values. Constant
3311 comparisons must have already been folded. */
3312
3313 static void
3314 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3315 enum machine_mode *pmode, int *punsignedp,
3316 enum can_compare_purpose purpose)
3317 {
3318 enum machine_mode mode = *pmode;
3319 rtx x = *px, y = *py;
3320 int unsignedp = *punsignedp;
3321 enum mode_class class;
3322
3323 class = GET_MODE_CLASS (mode);
3324
3325 if (mode != BLKmode && flag_force_mem)
3326 {
3327 /* Load duplicate non-volatile operands once. */
3328 if (rtx_equal_p (x, y) && ! volatile_refs_p (x))
3329 {
3330 x = force_not_mem (x);
3331 y = x;
3332 }
3333 else
3334 {
3335 x = force_not_mem (x);
3336 y = force_not_mem (y);
3337 }
3338 }
3339
3340 /* If we are inside an appropriately-short loop and we are optimizing,
3341 force expensive constants into a register. */
3342 if (CONSTANT_P (x) && optimize
3343 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3344 x = force_reg (mode, x);
3345
3346 if (CONSTANT_P (y) && optimize
3347 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3348 y = force_reg (mode, y);
3349
3350 #ifdef HAVE_cc0
3351 /* Make sure if we have a canonical comparison. The RTL
3352 documentation states that canonical comparisons are required only
3353 for targets which have cc0. */
3354 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3355 #endif
3356
3357 /* Don't let both operands fail to indicate the mode. */
3358 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3359 x = force_reg (mode, x);
3360
3361 /* Handle all BLKmode compares. */
3362
3363 if (mode == BLKmode)
3364 {
3365 enum machine_mode cmp_mode, result_mode;
3366 enum insn_code cmp_code;
3367 tree length_type;
3368 rtx libfunc;
3369 rtx result;
3370 rtx opalign
3371 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3372
3373 gcc_assert (size);
3374
3375 /* Try to use a memory block compare insn - either cmpstr
3376 or cmpmem will do. */
3377 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3378 cmp_mode != VOIDmode;
3379 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3380 {
3381 cmp_code = cmpmem_optab[cmp_mode];
3382 if (cmp_code == CODE_FOR_nothing)
3383 cmp_code = cmpstr_optab[cmp_mode];
3384 if (cmp_code == CODE_FOR_nothing)
3385 continue;
3386
3387 /* Must make sure the size fits the insn's mode. */
3388 if ((GET_CODE (size) == CONST_INT
3389 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3390 || (GET_MODE_BITSIZE (GET_MODE (size))
3391 > GET_MODE_BITSIZE (cmp_mode)))
3392 continue;
3393
3394 result_mode = insn_data[cmp_code].operand[0].mode;
3395 result = gen_reg_rtx (result_mode);
3396 size = convert_to_mode (cmp_mode, size, 1);
3397 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3398
3399 *px = result;
3400 *py = const0_rtx;
3401 *pmode = result_mode;
3402 return;
3403 }
3404
3405 /* Otherwise call a library function, memcmp. */
3406 libfunc = memcmp_libfunc;
3407 length_type = sizetype;
3408 result_mode = TYPE_MODE (integer_type_node);
3409 cmp_mode = TYPE_MODE (length_type);
3410 size = convert_to_mode (TYPE_MODE (length_type), size,
3411 TYPE_UNSIGNED (length_type));
3412
3413 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3414 result_mode, 3,
3415 XEXP (x, 0), Pmode,
3416 XEXP (y, 0), Pmode,
3417 size, cmp_mode);
3418 *px = result;
3419 *py = const0_rtx;
3420 *pmode = result_mode;
3421 return;
3422 }
3423
3424 /* Don't allow operands to the compare to trap, as that can put the
3425 compare and branch in different basic blocks. */
3426 if (flag_non_call_exceptions)
3427 {
3428 if (may_trap_p (x))
3429 x = force_reg (mode, x);
3430 if (may_trap_p (y))
3431 y = force_reg (mode, y);
3432 }
3433
3434 *px = x;
3435 *py = y;
3436 if (can_compare_p (*pcomparison, mode, purpose))
3437 return;
3438
3439 /* Handle a lib call just for the mode we are using. */
3440
3441 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3442 {
3443 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3444 rtx result;
3445
3446 /* If we want unsigned, and this mode has a distinct unsigned
3447 comparison routine, use that. */
3448 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3449 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3450
3451 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3452 word_mode, 2, x, mode, y, mode);
3453
3454 *px = result;
3455 *pmode = word_mode;
3456 if (TARGET_LIB_INT_CMP_BIASED)
3457 /* Integer comparison returns a result that must be compared
3458 against 1, so that even if we do an unsigned compare
3459 afterward, there is still a value that can represent the
3460 result "less than". */
3461 *py = const1_rtx;
3462 else
3463 {
3464 *py = const0_rtx;
3465 *punsignedp = 1;
3466 }
3467 return;
3468 }
3469
3470 gcc_assert (class == MODE_FLOAT);
3471 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3472 }
3473
3474 /* Before emitting an insn with code ICODE, make sure that X, which is going
3475 to be used for operand OPNUM of the insn, is converted from mode MODE to
3476 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3477 that it is accepted by the operand predicate. Return the new value. */
3478
3479 static rtx
3480 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3481 enum machine_mode wider_mode, int unsignedp)
3482 {
3483 if (mode != wider_mode)
3484 x = convert_modes (wider_mode, mode, x, unsignedp);
3485
3486 if (!insn_data[icode].operand[opnum].predicate
3487 (x, insn_data[icode].operand[opnum].mode))
3488 {
3489 if (no_new_pseudos)
3490 return NULL_RTX;
3491 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3492 }
3493
3494 return x;
3495 }
3496
3497 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3498 we can do the comparison.
3499 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3500 be NULL_RTX which indicates that only a comparison is to be generated. */
3501
3502 static void
3503 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3504 enum rtx_code comparison, int unsignedp, rtx label)
3505 {
3506 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3507 enum mode_class class = GET_MODE_CLASS (mode);
3508 enum machine_mode wider_mode = mode;
3509
3510 /* Try combined insns first. */
3511 do
3512 {
3513 enum insn_code icode;
3514 PUT_MODE (test, wider_mode);
3515
3516 if (label)
3517 {
3518 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3519
3520 if (icode != CODE_FOR_nothing
3521 && insn_data[icode].operand[0].predicate (test, wider_mode))
3522 {
3523 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3524 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3525 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3526 return;
3527 }
3528 }
3529
3530 /* Handle some compares against zero. */
3531 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3532 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3533 {
3534 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3535 emit_insn (GEN_FCN (icode) (x));
3536 if (label)
3537 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3538 return;
3539 }
3540
3541 /* Handle compares for which there is a directly suitable insn. */
3542
3543 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3544 if (icode != CODE_FOR_nothing)
3545 {
3546 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3547 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3548 emit_insn (GEN_FCN (icode) (x, y));
3549 if (label)
3550 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3551 return;
3552 }
3553
3554 if (class != MODE_INT && class != MODE_FLOAT
3555 && class != MODE_COMPLEX_FLOAT)
3556 break;
3557
3558 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3559 }
3560 while (wider_mode != VOIDmode);
3561
3562 gcc_unreachable ();
3563 }
3564
3565 /* Generate code to compare X with Y so that the condition codes are
3566 set and to jump to LABEL if the condition is true. If X is a
3567 constant and Y is not a constant, then the comparison is swapped to
3568 ensure that the comparison RTL has the canonical form.
3569
3570 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3571 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3572 the proper branch condition code.
3573
3574 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3575
3576 MODE is the mode of the inputs (in case they are const_int).
3577
3578 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3579 be passed unchanged to emit_cmp_insn, then potentially converted into an
3580 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3581
3582 void
3583 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3584 enum machine_mode mode, int unsignedp, rtx label)
3585 {
3586 rtx op0 = x, op1 = y;
3587
3588 /* Swap operands and condition to ensure canonical RTL. */
3589 if (swap_commutative_operands_p (x, y))
3590 {
3591 /* If we're not emitting a branch, this means some caller
3592 is out of sync. */
3593 gcc_assert (label);
3594
3595 op0 = y, op1 = x;
3596 comparison = swap_condition (comparison);
3597 }
3598
3599 #ifdef HAVE_cc0
3600 /* If OP0 is still a constant, then both X and Y must be constants.
3601 Force X into a register to create canonical RTL. */
3602 if (CONSTANT_P (op0))
3603 op0 = force_reg (mode, op0);
3604 #endif
3605
3606 if (unsignedp)
3607 comparison = unsigned_condition (comparison);
3608
3609 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3610 ccp_jump);
3611 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3612 }
3613
3614 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3615
3616 void
3617 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3618 enum machine_mode mode, int unsignedp)
3619 {
3620 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3621 }
3622 \f
3623 /* Emit a library call comparison between floating point X and Y.
3624 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3625
3626 static void
3627 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3628 enum machine_mode *pmode, int *punsignedp)
3629 {
3630 enum rtx_code comparison = *pcomparison;
3631 enum rtx_code swapped = swap_condition (comparison);
3632 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3633 rtx x = *px;
3634 rtx y = *py;
3635 enum machine_mode orig_mode = GET_MODE (x);
3636 enum machine_mode mode;
3637 rtx value, target, insns, equiv;
3638 rtx libfunc = 0;
3639 bool reversed_p = false;
3640
3641 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3642 {
3643 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3644 break;
3645
3646 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3647 {
3648 rtx tmp;
3649 tmp = x; x = y; y = tmp;
3650 comparison = swapped;
3651 break;
3652 }
3653
3654 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3655 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3656 {
3657 comparison = reversed;
3658 reversed_p = true;
3659 break;
3660 }
3661 }
3662
3663 gcc_assert (mode != VOIDmode);
3664
3665 if (mode != orig_mode)
3666 {
3667 x = convert_to_mode (mode, x, 0);
3668 y = convert_to_mode (mode, y, 0);
3669 }
3670
3671 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3672 the RTL. The allows the RTL optimizers to delete the libcall if the
3673 condition can be determined at compile-time. */
3674 if (comparison == UNORDERED)
3675 {
3676 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3677 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3678 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3679 temp, const_true_rtx, equiv);
3680 }
3681 else
3682 {
3683 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3684 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3685 {
3686 rtx true_rtx, false_rtx;
3687
3688 switch (comparison)
3689 {
3690 case EQ:
3691 true_rtx = const0_rtx;
3692 false_rtx = const_true_rtx;
3693 break;
3694
3695 case NE:
3696 true_rtx = const_true_rtx;
3697 false_rtx = const0_rtx;
3698 break;
3699
3700 case GT:
3701 true_rtx = const1_rtx;
3702 false_rtx = const0_rtx;
3703 break;
3704
3705 case GE:
3706 true_rtx = const0_rtx;
3707 false_rtx = constm1_rtx;
3708 break;
3709
3710 case LT:
3711 true_rtx = constm1_rtx;
3712 false_rtx = const0_rtx;
3713 break;
3714
3715 case LE:
3716 true_rtx = const0_rtx;
3717 false_rtx = const1_rtx;
3718 break;
3719
3720 default:
3721 gcc_unreachable ();
3722 }
3723 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3724 equiv, true_rtx, false_rtx);
3725 }
3726 }
3727
3728 start_sequence ();
3729 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3730 word_mode, 2, x, mode, y, mode);
3731 insns = get_insns ();
3732 end_sequence ();
3733
3734 target = gen_reg_rtx (word_mode);
3735 emit_libcall_block (insns, target, value, equiv);
3736
3737 if (comparison == UNORDERED
3738 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3739 comparison = reversed_p ? EQ : NE;
3740
3741 *px = target;
3742 *py = const0_rtx;
3743 *pmode = word_mode;
3744 *pcomparison = comparison;
3745 *punsignedp = 0;
3746 }
3747 \f
3748 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3749
3750 void
3751 emit_indirect_jump (rtx loc)
3752 {
3753 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3754 (loc, Pmode))
3755 loc = copy_to_mode_reg (Pmode, loc);
3756
3757 emit_jump_insn (gen_indirect_jump (loc));
3758 emit_barrier ();
3759 }
3760 \f
3761 #ifdef HAVE_conditional_move
3762
3763 /* Emit a conditional move instruction if the machine supports one for that
3764 condition and machine mode.
3765
3766 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3767 the mode to use should they be constants. If it is VOIDmode, they cannot
3768 both be constants.
3769
3770 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3771 should be stored there. MODE is the mode to use should they be constants.
3772 If it is VOIDmode, they cannot both be constants.
3773
3774 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3775 is not supported. */
3776
3777 rtx
3778 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3779 enum machine_mode cmode, rtx op2, rtx op3,
3780 enum machine_mode mode, int unsignedp)
3781 {
3782 rtx tem, subtarget, comparison, insn;
3783 enum insn_code icode;
3784 enum rtx_code reversed;
3785
3786 /* If one operand is constant, make it the second one. Only do this
3787 if the other operand is not constant as well. */
3788
3789 if (swap_commutative_operands_p (op0, op1))
3790 {
3791 tem = op0;
3792 op0 = op1;
3793 op1 = tem;
3794 code = swap_condition (code);
3795 }
3796
3797 /* get_condition will prefer to generate LT and GT even if the old
3798 comparison was against zero, so undo that canonicalization here since
3799 comparisons against zero are cheaper. */
3800 if (code == LT && op1 == const1_rtx)
3801 code = LE, op1 = const0_rtx;
3802 else if (code == GT && op1 == constm1_rtx)
3803 code = GE, op1 = const0_rtx;
3804
3805 if (cmode == VOIDmode)
3806 cmode = GET_MODE (op0);
3807
3808 if (swap_commutative_operands_p (op2, op3)
3809 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3810 != UNKNOWN))
3811 {
3812 tem = op2;
3813 op2 = op3;
3814 op3 = tem;
3815 code = reversed;
3816 }
3817
3818 if (mode == VOIDmode)
3819 mode = GET_MODE (op2);
3820
3821 icode = movcc_gen_code[mode];
3822
3823 if (icode == CODE_FOR_nothing)
3824 return 0;
3825
3826 if (flag_force_mem)
3827 {
3828 op2 = force_not_mem (op2);
3829 op3 = force_not_mem (op3);
3830 }
3831
3832 if (!target)
3833 target = gen_reg_rtx (mode);
3834
3835 subtarget = target;
3836
3837 /* If the insn doesn't accept these operands, put them in pseudos. */
3838
3839 if (!insn_data[icode].operand[0].predicate
3840 (subtarget, insn_data[icode].operand[0].mode))
3841 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3842
3843 if (!insn_data[icode].operand[2].predicate
3844 (op2, insn_data[icode].operand[2].mode))
3845 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3846
3847 if (!insn_data[icode].operand[3].predicate
3848 (op3, insn_data[icode].operand[3].mode))
3849 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3850
3851 /* Everything should now be in the suitable form, so emit the compare insn
3852 and then the conditional move. */
3853
3854 comparison
3855 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3856
3857 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3858 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3859 return NULL and let the caller figure out how best to deal with this
3860 situation. */
3861 if (GET_CODE (comparison) != code)
3862 return NULL_RTX;
3863
3864 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3865
3866 /* If that failed, then give up. */
3867 if (insn == 0)
3868 return 0;
3869
3870 emit_insn (insn);
3871
3872 if (subtarget != target)
3873 convert_move (target, subtarget, 0);
3874
3875 return target;
3876 }
3877
3878 /* Return nonzero if a conditional move of mode MODE is supported.
3879
3880 This function is for combine so it can tell whether an insn that looks
3881 like a conditional move is actually supported by the hardware. If we
3882 guess wrong we lose a bit on optimization, but that's it. */
3883 /* ??? sparc64 supports conditionally moving integers values based on fp
3884 comparisons, and vice versa. How do we handle them? */
3885
3886 int
3887 can_conditionally_move_p (enum machine_mode mode)
3888 {
3889 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3890 return 1;
3891
3892 return 0;
3893 }
3894
3895 #endif /* HAVE_conditional_move */
3896
3897 /* Emit a conditional addition instruction if the machine supports one for that
3898 condition and machine mode.
3899
3900 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3901 the mode to use should they be constants. If it is VOIDmode, they cannot
3902 both be constants.
3903
3904 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3905 should be stored there. MODE is the mode to use should they be constants.
3906 If it is VOIDmode, they cannot both be constants.
3907
3908 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3909 is not supported. */
3910
3911 rtx
3912 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
3913 enum machine_mode cmode, rtx op2, rtx op3,
3914 enum machine_mode mode, int unsignedp)
3915 {
3916 rtx tem, subtarget, comparison, insn;
3917 enum insn_code icode;
3918 enum rtx_code reversed;
3919
3920 /* If one operand is constant, make it the second one. Only do this
3921 if the other operand is not constant as well. */
3922
3923 if (swap_commutative_operands_p (op0, op1))
3924 {
3925 tem = op0;
3926 op0 = op1;
3927 op1 = tem;
3928 code = swap_condition (code);
3929 }
3930
3931 /* get_condition will prefer to generate LT and GT even if the old
3932 comparison was against zero, so undo that canonicalization here since
3933 comparisons against zero are cheaper. */
3934 if (code == LT && op1 == const1_rtx)
3935 code = LE, op1 = const0_rtx;
3936 else if (code == GT && op1 == constm1_rtx)
3937 code = GE, op1 = const0_rtx;
3938
3939 if (cmode == VOIDmode)
3940 cmode = GET_MODE (op0);
3941
3942 if (swap_commutative_operands_p (op2, op3)
3943 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3944 != UNKNOWN))
3945 {
3946 tem = op2;
3947 op2 = op3;
3948 op3 = tem;
3949 code = reversed;
3950 }
3951
3952 if (mode == VOIDmode)
3953 mode = GET_MODE (op2);
3954
3955 icode = addcc_optab->handlers[(int) mode].insn_code;
3956
3957 if (icode == CODE_FOR_nothing)
3958 return 0;
3959
3960 if (flag_force_mem)
3961 {
3962 op2 = force_not_mem (op2);
3963 op3 = force_not_mem (op3);
3964 }
3965
3966 if (!target)
3967 target = gen_reg_rtx (mode);
3968
3969 /* If the insn doesn't accept these operands, put them in pseudos. */
3970
3971 if (!insn_data[icode].operand[0].predicate
3972 (target, insn_data[icode].operand[0].mode))
3973 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3974 else
3975 subtarget = target;
3976
3977 if (!insn_data[icode].operand[2].predicate
3978 (op2, insn_data[icode].operand[2].mode))
3979 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3980
3981 if (!insn_data[icode].operand[3].predicate
3982 (op3, insn_data[icode].operand[3].mode))
3983 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3984
3985 /* Everything should now be in the suitable form, so emit the compare insn
3986 and then the conditional move. */
3987
3988 comparison
3989 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3990
3991 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3992 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3993 return NULL and let the caller figure out how best to deal with this
3994 situation. */
3995 if (GET_CODE (comparison) != code)
3996 return NULL_RTX;
3997
3998 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3999
4000 /* If that failed, then give up. */
4001 if (insn == 0)
4002 return 0;
4003
4004 emit_insn (insn);
4005
4006 if (subtarget != target)
4007 convert_move (target, subtarget, 0);
4008
4009 return target;
4010 }
4011 \f
4012 /* These functions attempt to generate an insn body, rather than
4013 emitting the insn, but if the gen function already emits them, we
4014 make no attempt to turn them back into naked patterns. */
4015
4016 /* Generate and return an insn body to add Y to X. */
4017
4018 rtx
4019 gen_add2_insn (rtx x, rtx y)
4020 {
4021 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4022
4023 gcc_assert (insn_data[icode].operand[0].predicate
4024 (x, insn_data[icode].operand[0].mode));
4025 gcc_assert (insn_data[icode].operand[1].predicate
4026 (x, insn_data[icode].operand[1].mode));
4027 gcc_assert (insn_data[icode].operand[2].predicate
4028 (y, insn_data[icode].operand[2].mode));
4029
4030 return GEN_FCN (icode) (x, x, y);
4031 }
4032
4033 /* Generate and return an insn body to add r1 and c,
4034 storing the result in r0. */
4035 rtx
4036 gen_add3_insn (rtx r0, rtx r1, rtx c)
4037 {
4038 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4039
4040 if (icode == CODE_FOR_nothing
4041 || !(insn_data[icode].operand[0].predicate
4042 (r0, insn_data[icode].operand[0].mode))
4043 || !(insn_data[icode].operand[1].predicate
4044 (r1, insn_data[icode].operand[1].mode))
4045 || !(insn_data[icode].operand[2].predicate
4046 (c, insn_data[icode].operand[2].mode)))
4047 return NULL_RTX;
4048
4049 return GEN_FCN (icode) (r0, r1, c);
4050 }
4051
4052 int
4053 have_add2_insn (rtx x, rtx y)
4054 {
4055 int icode;
4056
4057 gcc_assert (GET_MODE (x) != VOIDmode);
4058
4059 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4060
4061 if (icode == CODE_FOR_nothing)
4062 return 0;
4063
4064 if (!(insn_data[icode].operand[0].predicate
4065 (x, insn_data[icode].operand[0].mode))
4066 || !(insn_data[icode].operand[1].predicate
4067 (x, insn_data[icode].operand[1].mode))
4068 || !(insn_data[icode].operand[2].predicate
4069 (y, insn_data[icode].operand[2].mode)))
4070 return 0;
4071
4072 return 1;
4073 }
4074
4075 /* Generate and return an insn body to subtract Y from X. */
4076
4077 rtx
4078 gen_sub2_insn (rtx x, rtx y)
4079 {
4080 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4081
4082 gcc_assert (insn_data[icode].operand[0].predicate
4083 (x, insn_data[icode].operand[0].mode));
4084 gcc_assert (insn_data[icode].operand[1].predicate
4085 (x, insn_data[icode].operand[1].mode));
4086 gcc_assert (insn_data[icode].operand[2].predicate
4087 (y, insn_data[icode].operand[2].mode));
4088
4089 return GEN_FCN (icode) (x, x, y);
4090 }
4091
4092 /* Generate and return an insn body to subtract r1 and c,
4093 storing the result in r0. */
4094 rtx
4095 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4096 {
4097 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4098
4099 if (icode == CODE_FOR_nothing
4100 || !(insn_data[icode].operand[0].predicate
4101 (r0, insn_data[icode].operand[0].mode))
4102 || !(insn_data[icode].operand[1].predicate
4103 (r1, insn_data[icode].operand[1].mode))
4104 || !(insn_data[icode].operand[2].predicate
4105 (c, insn_data[icode].operand[2].mode)))
4106 return NULL_RTX;
4107
4108 return GEN_FCN (icode) (r0, r1, c);
4109 }
4110
4111 int
4112 have_sub2_insn (rtx x, rtx y)
4113 {
4114 int icode;
4115
4116 gcc_assert (GET_MODE (x) != VOIDmode);
4117
4118 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4119
4120 if (icode == CODE_FOR_nothing)
4121 return 0;
4122
4123 if (!(insn_data[icode].operand[0].predicate
4124 (x, insn_data[icode].operand[0].mode))
4125 || !(insn_data[icode].operand[1].predicate
4126 (x, insn_data[icode].operand[1].mode))
4127 || !(insn_data[icode].operand[2].predicate
4128 (y, insn_data[icode].operand[2].mode)))
4129 return 0;
4130
4131 return 1;
4132 }
4133
4134 /* Generate the body of an instruction to copy Y into X.
4135 It may be a list of insns, if one insn isn't enough. */
4136
4137 rtx
4138 gen_move_insn (rtx x, rtx y)
4139 {
4140 rtx seq;
4141
4142 start_sequence ();
4143 emit_move_insn_1 (x, y);
4144 seq = get_insns ();
4145 end_sequence ();
4146 return seq;
4147 }
4148 \f
4149 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4150 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4151 no such operation exists, CODE_FOR_nothing will be returned. */
4152
4153 enum insn_code
4154 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4155 int unsignedp)
4156 {
4157 convert_optab tab;
4158 #ifdef HAVE_ptr_extend
4159 if (unsignedp < 0)
4160 return CODE_FOR_ptr_extend;
4161 #endif
4162
4163 tab = unsignedp ? zext_optab : sext_optab;
4164 return tab->handlers[to_mode][from_mode].insn_code;
4165 }
4166
4167 /* Generate the body of an insn to extend Y (with mode MFROM)
4168 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4169
4170 rtx
4171 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4172 enum machine_mode mfrom, int unsignedp)
4173 {
4174 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4175 return GEN_FCN (icode) (x, y);
4176 }
4177 \f
4178 /* can_fix_p and can_float_p say whether the target machine
4179 can directly convert a given fixed point type to
4180 a given floating point type, or vice versa.
4181 The returned value is the CODE_FOR_... value to use,
4182 or CODE_FOR_nothing if these modes cannot be directly converted.
4183
4184 *TRUNCP_PTR is set to 1 if it is necessary to output
4185 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4186
4187 static enum insn_code
4188 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4189 int unsignedp, int *truncp_ptr)
4190 {
4191 convert_optab tab;
4192 enum insn_code icode;
4193
4194 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4195 icode = tab->handlers[fixmode][fltmode].insn_code;
4196 if (icode != CODE_FOR_nothing)
4197 {
4198 *truncp_ptr = 0;
4199 return icode;
4200 }
4201
4202 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4203 for this to work. We need to rework the fix* and ftrunc* patterns
4204 and documentation. */
4205 tab = unsignedp ? ufix_optab : sfix_optab;
4206 icode = tab->handlers[fixmode][fltmode].insn_code;
4207 if (icode != CODE_FOR_nothing
4208 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4209 {
4210 *truncp_ptr = 1;
4211 return icode;
4212 }
4213
4214 *truncp_ptr = 0;
4215 return CODE_FOR_nothing;
4216 }
4217
4218 static enum insn_code
4219 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4220 int unsignedp)
4221 {
4222 convert_optab tab;
4223
4224 tab = unsignedp ? ufloat_optab : sfloat_optab;
4225 return tab->handlers[fltmode][fixmode].insn_code;
4226 }
4227 \f
4228 /* Generate code to convert FROM to floating point
4229 and store in TO. FROM must be fixed point and not VOIDmode.
4230 UNSIGNEDP nonzero means regard FROM as unsigned.
4231 Normally this is done by correcting the final value
4232 if it is negative. */
4233
4234 void
4235 expand_float (rtx to, rtx from, int unsignedp)
4236 {
4237 enum insn_code icode;
4238 rtx target = to;
4239 enum machine_mode fmode, imode;
4240
4241 /* Crash now, because we won't be able to decide which mode to use. */
4242 gcc_assert (GET_MODE (from) != VOIDmode);
4243
4244 /* Look for an insn to do the conversion. Do it in the specified
4245 modes if possible; otherwise convert either input, output or both to
4246 wider mode. If the integer mode is wider than the mode of FROM,
4247 we can do the conversion signed even if the input is unsigned. */
4248
4249 for (fmode = GET_MODE (to); fmode != VOIDmode;
4250 fmode = GET_MODE_WIDER_MODE (fmode))
4251 for (imode = GET_MODE (from); imode != VOIDmode;
4252 imode = GET_MODE_WIDER_MODE (imode))
4253 {
4254 int doing_unsigned = unsignedp;
4255
4256 if (fmode != GET_MODE (to)
4257 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4258 continue;
4259
4260 icode = can_float_p (fmode, imode, unsignedp);
4261 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4262 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4263
4264 if (icode != CODE_FOR_nothing)
4265 {
4266 if (imode != GET_MODE (from))
4267 from = convert_to_mode (imode, from, unsignedp);
4268
4269 if (fmode != GET_MODE (to))
4270 target = gen_reg_rtx (fmode);
4271
4272 emit_unop_insn (icode, target, from,
4273 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4274
4275 if (target != to)
4276 convert_move (to, target, 0);
4277 return;
4278 }
4279 }
4280
4281 /* Unsigned integer, and no way to convert directly.
4282 Convert as signed, then conditionally adjust the result. */
4283 if (unsignedp)
4284 {
4285 rtx label = gen_label_rtx ();
4286 rtx temp;
4287 REAL_VALUE_TYPE offset;
4288
4289 if (flag_force_mem)
4290 from = force_not_mem (from);
4291
4292 /* Look for a usable floating mode FMODE wider than the source and at
4293 least as wide as the target. Using FMODE will avoid rounding woes
4294 with unsigned values greater than the signed maximum value. */
4295
4296 for (fmode = GET_MODE (to); fmode != VOIDmode;
4297 fmode = GET_MODE_WIDER_MODE (fmode))
4298 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4299 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4300 break;
4301
4302 if (fmode == VOIDmode)
4303 {
4304 /* There is no such mode. Pretend the target is wide enough. */
4305 fmode = GET_MODE (to);
4306
4307 /* Avoid double-rounding when TO is narrower than FROM. */
4308 if ((significand_size (fmode) + 1)
4309 < GET_MODE_BITSIZE (GET_MODE (from)))
4310 {
4311 rtx temp1;
4312 rtx neglabel = gen_label_rtx ();
4313
4314 /* Don't use TARGET if it isn't a register, is a hard register,
4315 or is the wrong mode. */
4316 if (!REG_P (target)
4317 || REGNO (target) < FIRST_PSEUDO_REGISTER
4318 || GET_MODE (target) != fmode)
4319 target = gen_reg_rtx (fmode);
4320
4321 imode = GET_MODE (from);
4322 do_pending_stack_adjust ();
4323
4324 /* Test whether the sign bit is set. */
4325 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4326 0, neglabel);
4327
4328 /* The sign bit is not set. Convert as signed. */
4329 expand_float (target, from, 0);
4330 emit_jump_insn (gen_jump (label));
4331 emit_barrier ();
4332
4333 /* The sign bit is set.
4334 Convert to a usable (positive signed) value by shifting right
4335 one bit, while remembering if a nonzero bit was shifted
4336 out; i.e., compute (from & 1) | (from >> 1). */
4337
4338 emit_label (neglabel);
4339 temp = expand_binop (imode, and_optab, from, const1_rtx,
4340 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4341 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4342 NULL_RTX, 1);
4343 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4344 OPTAB_LIB_WIDEN);
4345 expand_float (target, temp, 0);
4346
4347 /* Multiply by 2 to undo the shift above. */
4348 temp = expand_binop (fmode, add_optab, target, target,
4349 target, 0, OPTAB_LIB_WIDEN);
4350 if (temp != target)
4351 emit_move_insn (target, temp);
4352
4353 do_pending_stack_adjust ();
4354 emit_label (label);
4355 goto done;
4356 }
4357 }
4358
4359 /* If we are about to do some arithmetic to correct for an
4360 unsigned operand, do it in a pseudo-register. */
4361
4362 if (GET_MODE (to) != fmode
4363 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4364 target = gen_reg_rtx (fmode);
4365
4366 /* Convert as signed integer to floating. */
4367 expand_float (target, from, 0);
4368
4369 /* If FROM is negative (and therefore TO is negative),
4370 correct its value by 2**bitwidth. */
4371
4372 do_pending_stack_adjust ();
4373 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4374 0, label);
4375
4376
4377 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4378 temp = expand_binop (fmode, add_optab, target,
4379 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4380 target, 0, OPTAB_LIB_WIDEN);
4381 if (temp != target)
4382 emit_move_insn (target, temp);
4383
4384 do_pending_stack_adjust ();
4385 emit_label (label);
4386 goto done;
4387 }
4388
4389 /* No hardware instruction available; call a library routine. */
4390 {
4391 rtx libfunc;
4392 rtx insns;
4393 rtx value;
4394 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4395
4396 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4397 from = convert_to_mode (SImode, from, unsignedp);
4398
4399 if (flag_force_mem)
4400 from = force_not_mem (from);
4401
4402 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4403 gcc_assert (libfunc);
4404
4405 start_sequence ();
4406
4407 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4408 GET_MODE (to), 1, from,
4409 GET_MODE (from));
4410 insns = get_insns ();
4411 end_sequence ();
4412
4413 emit_libcall_block (insns, target, value,
4414 gen_rtx_FLOAT (GET_MODE (to), from));
4415 }
4416
4417 done:
4418
4419 /* Copy result to requested destination
4420 if we have been computing in a temp location. */
4421
4422 if (target != to)
4423 {
4424 if (GET_MODE (target) == GET_MODE (to))
4425 emit_move_insn (to, target);
4426 else
4427 convert_move (to, target, 0);
4428 }
4429 }
4430 \f
4431 /* Generate code to convert FROM to fixed point and store in TO. FROM
4432 must be floating point. */
4433
4434 void
4435 expand_fix (rtx to, rtx from, int unsignedp)
4436 {
4437 enum insn_code icode;
4438 rtx target = to;
4439 enum machine_mode fmode, imode;
4440 int must_trunc = 0;
4441
4442 /* We first try to find a pair of modes, one real and one integer, at
4443 least as wide as FROM and TO, respectively, in which we can open-code
4444 this conversion. If the integer mode is wider than the mode of TO,
4445 we can do the conversion either signed or unsigned. */
4446
4447 for (fmode = GET_MODE (from); fmode != VOIDmode;
4448 fmode = GET_MODE_WIDER_MODE (fmode))
4449 for (imode = GET_MODE (to); imode != VOIDmode;
4450 imode = GET_MODE_WIDER_MODE (imode))
4451 {
4452 int doing_unsigned = unsignedp;
4453
4454 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4455 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4456 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4457
4458 if (icode != CODE_FOR_nothing)
4459 {
4460 if (fmode != GET_MODE (from))
4461 from = convert_to_mode (fmode, from, 0);
4462
4463 if (must_trunc)
4464 {
4465 rtx temp = gen_reg_rtx (GET_MODE (from));
4466 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4467 temp, 0);
4468 }
4469
4470 if (imode != GET_MODE (to))
4471 target = gen_reg_rtx (imode);
4472
4473 emit_unop_insn (icode, target, from,
4474 doing_unsigned ? UNSIGNED_FIX : FIX);
4475 if (target != to)
4476 convert_move (to, target, unsignedp);
4477 return;
4478 }
4479 }
4480
4481 /* For an unsigned conversion, there is one more way to do it.
4482 If we have a signed conversion, we generate code that compares
4483 the real value to the largest representable positive number. If if
4484 is smaller, the conversion is done normally. Otherwise, subtract
4485 one plus the highest signed number, convert, and add it back.
4486
4487 We only need to check all real modes, since we know we didn't find
4488 anything with a wider integer mode.
4489
4490 This code used to extend FP value into mode wider than the destination.
4491 This is not needed. Consider, for instance conversion from SFmode
4492 into DImode.
4493
4494 The hot path trought the code is dealing with inputs smaller than 2^63
4495 and doing just the conversion, so there is no bits to lose.
4496
4497 In the other path we know the value is positive in the range 2^63..2^64-1
4498 inclusive. (as for other imput overflow happens and result is undefined)
4499 So we know that the most important bit set in mantissa corresponds to
4500 2^63. The subtraction of 2^63 should not generate any rounding as it
4501 simply clears out that bit. The rest is trivial. */
4502
4503 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4504 for (fmode = GET_MODE (from); fmode != VOIDmode;
4505 fmode = GET_MODE_WIDER_MODE (fmode))
4506 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4507 &must_trunc))
4508 {
4509 int bitsize;
4510 REAL_VALUE_TYPE offset;
4511 rtx limit, lab1, lab2, insn;
4512
4513 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4514 real_2expN (&offset, bitsize - 1);
4515 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4516 lab1 = gen_label_rtx ();
4517 lab2 = gen_label_rtx ();
4518
4519 if (flag_force_mem)
4520 from = force_not_mem (from);
4521
4522 if (fmode != GET_MODE (from))
4523 from = convert_to_mode (fmode, from, 0);
4524
4525 /* See if we need to do the subtraction. */
4526 do_pending_stack_adjust ();
4527 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4528 0, lab1);
4529
4530 /* If not, do the signed "fix" and branch around fixup code. */
4531 expand_fix (to, from, 0);
4532 emit_jump_insn (gen_jump (lab2));
4533 emit_barrier ();
4534
4535 /* Otherwise, subtract 2**(N-1), convert to signed number,
4536 then add 2**(N-1). Do the addition using XOR since this
4537 will often generate better code. */
4538 emit_label (lab1);
4539 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4540 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4541 expand_fix (to, target, 0);
4542 target = expand_binop (GET_MODE (to), xor_optab, to,
4543 gen_int_mode
4544 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4545 GET_MODE (to)),
4546 to, 1, OPTAB_LIB_WIDEN);
4547
4548 if (target != to)
4549 emit_move_insn (to, target);
4550
4551 emit_label (lab2);
4552
4553 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4554 != CODE_FOR_nothing)
4555 {
4556 /* Make a place for a REG_NOTE and add it. */
4557 insn = emit_move_insn (to, to);
4558 set_unique_reg_note (insn,
4559 REG_EQUAL,
4560 gen_rtx_fmt_e (UNSIGNED_FIX,
4561 GET_MODE (to),
4562 copy_rtx (from)));
4563 }
4564
4565 return;
4566 }
4567
4568 /* We can't do it with an insn, so use a library call. But first ensure
4569 that the mode of TO is at least as wide as SImode, since those are the
4570 only library calls we know about. */
4571
4572 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4573 {
4574 target = gen_reg_rtx (SImode);
4575
4576 expand_fix (target, from, unsignedp);
4577 }
4578 else
4579 {
4580 rtx insns;
4581 rtx value;
4582 rtx libfunc;
4583
4584 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4585 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4586 gcc_assert (libfunc);
4587
4588 if (flag_force_mem)
4589 from = force_not_mem (from);
4590
4591 start_sequence ();
4592
4593 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4594 GET_MODE (to), 1, from,
4595 GET_MODE (from));
4596 insns = get_insns ();
4597 end_sequence ();
4598
4599 emit_libcall_block (insns, target, value,
4600 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4601 GET_MODE (to), from));
4602 }
4603
4604 if (target != to)
4605 {
4606 if (GET_MODE (to) == GET_MODE (target))
4607 emit_move_insn (to, target);
4608 else
4609 convert_move (to, target, 0);
4610 }
4611 }
4612 \f
4613 /* Report whether we have an instruction to perform the operation
4614 specified by CODE on operands of mode MODE. */
4615 int
4616 have_insn_for (enum rtx_code code, enum machine_mode mode)
4617 {
4618 return (code_to_optab[(int) code] != 0
4619 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4620 != CODE_FOR_nothing));
4621 }
4622
4623 /* Create a blank optab. */
4624 static optab
4625 new_optab (void)
4626 {
4627 int i;
4628 optab op = ggc_alloc (sizeof (struct optab));
4629 for (i = 0; i < NUM_MACHINE_MODES; i++)
4630 {
4631 op->handlers[i].insn_code = CODE_FOR_nothing;
4632 op->handlers[i].libfunc = 0;
4633 }
4634
4635 return op;
4636 }
4637
4638 static convert_optab
4639 new_convert_optab (void)
4640 {
4641 int i, j;
4642 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4643 for (i = 0; i < NUM_MACHINE_MODES; i++)
4644 for (j = 0; j < NUM_MACHINE_MODES; j++)
4645 {
4646 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4647 op->handlers[i][j].libfunc = 0;
4648 }
4649 return op;
4650 }
4651
4652 /* Same, but fill in its code as CODE, and write it into the
4653 code_to_optab table. */
4654 static inline optab
4655 init_optab (enum rtx_code code)
4656 {
4657 optab op = new_optab ();
4658 op->code = code;
4659 code_to_optab[(int) code] = op;
4660 return op;
4661 }
4662
4663 /* Same, but fill in its code as CODE, and do _not_ write it into
4664 the code_to_optab table. */
4665 static inline optab
4666 init_optabv (enum rtx_code code)
4667 {
4668 optab op = new_optab ();
4669 op->code = code;
4670 return op;
4671 }
4672
4673 /* Conversion optabs never go in the code_to_optab table. */
4674 static inline convert_optab
4675 init_convert_optab (enum rtx_code code)
4676 {
4677 convert_optab op = new_convert_optab ();
4678 op->code = code;
4679 return op;
4680 }
4681
4682 /* Initialize the libfunc fields of an entire group of entries in some
4683 optab. Each entry is set equal to a string consisting of a leading
4684 pair of underscores followed by a generic operation name followed by
4685 a mode name (downshifted to lowercase) followed by a single character
4686 representing the number of operands for the given operation (which is
4687 usually one of the characters '2', '3', or '4').
4688
4689 OPTABLE is the table in which libfunc fields are to be initialized.
4690 FIRST_MODE is the first machine mode index in the given optab to
4691 initialize.
4692 LAST_MODE is the last machine mode index in the given optab to
4693 initialize.
4694 OPNAME is the generic (string) name of the operation.
4695 SUFFIX is the character which specifies the number of operands for
4696 the given generic operation.
4697 */
4698
4699 static void
4700 init_libfuncs (optab optable, int first_mode, int last_mode,
4701 const char *opname, int suffix)
4702 {
4703 int mode;
4704 unsigned opname_len = strlen (opname);
4705
4706 for (mode = first_mode; (int) mode <= (int) last_mode;
4707 mode = (enum machine_mode) ((int) mode + 1))
4708 {
4709 const char *mname = GET_MODE_NAME (mode);
4710 unsigned mname_len = strlen (mname);
4711 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4712 char *p;
4713 const char *q;
4714
4715 p = libfunc_name;
4716 *p++ = '_';
4717 *p++ = '_';
4718 for (q = opname; *q; )
4719 *p++ = *q++;
4720 for (q = mname; *q; q++)
4721 *p++ = TOLOWER (*q);
4722 *p++ = suffix;
4723 *p = '\0';
4724
4725 optable->handlers[(int) mode].libfunc
4726 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4727 }
4728 }
4729
4730 /* Initialize the libfunc fields of an entire group of entries in some
4731 optab which correspond to all integer mode operations. The parameters
4732 have the same meaning as similarly named ones for the `init_libfuncs'
4733 routine. (See above). */
4734
4735 static void
4736 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4737 {
4738 int maxsize = 2*BITS_PER_WORD;
4739 if (maxsize < LONG_LONG_TYPE_SIZE)
4740 maxsize = LONG_LONG_TYPE_SIZE;
4741 init_libfuncs (optable, word_mode,
4742 mode_for_size (maxsize, MODE_INT, 0),
4743 opname, suffix);
4744 }
4745
4746 /* Initialize the libfunc fields of an entire group of entries in some
4747 optab which correspond to all real mode operations. The parameters
4748 have the same meaning as similarly named ones for the `init_libfuncs'
4749 routine. (See above). */
4750
4751 static void
4752 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4753 {
4754 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4755 }
4756
4757 /* Initialize the libfunc fields of an entire group of entries of an
4758 inter-mode-class conversion optab. The string formation rules are
4759 similar to the ones for init_libfuncs, above, but instead of having
4760 a mode name and an operand count these functions have two mode names
4761 and no operand count. */
4762 static void
4763 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4764 enum mode_class from_class,
4765 enum mode_class to_class)
4766 {
4767 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4768 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4769 size_t opname_len = strlen (opname);
4770 size_t max_mname_len = 0;
4771
4772 enum machine_mode fmode, tmode;
4773 const char *fname, *tname;
4774 const char *q;
4775 char *libfunc_name, *suffix;
4776 char *p;
4777
4778 for (fmode = first_from_mode;
4779 fmode != VOIDmode;
4780 fmode = GET_MODE_WIDER_MODE (fmode))
4781 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4782
4783 for (tmode = first_to_mode;
4784 tmode != VOIDmode;
4785 tmode = GET_MODE_WIDER_MODE (tmode))
4786 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4787
4788 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4789 libfunc_name[0] = '_';
4790 libfunc_name[1] = '_';
4791 memcpy (&libfunc_name[2], opname, opname_len);
4792 suffix = libfunc_name + opname_len + 2;
4793
4794 for (fmode = first_from_mode; fmode != VOIDmode;
4795 fmode = GET_MODE_WIDER_MODE (fmode))
4796 for (tmode = first_to_mode; tmode != VOIDmode;
4797 tmode = GET_MODE_WIDER_MODE (tmode))
4798 {
4799 fname = GET_MODE_NAME (fmode);
4800 tname = GET_MODE_NAME (tmode);
4801
4802 p = suffix;
4803 for (q = fname; *q; p++, q++)
4804 *p = TOLOWER (*q);
4805 for (q = tname; *q; p++, q++)
4806 *p = TOLOWER (*q);
4807
4808 *p = '\0';
4809
4810 tab->handlers[tmode][fmode].libfunc
4811 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4812 p - libfunc_name));
4813 }
4814 }
4815
4816 /* Initialize the libfunc fields of an entire group of entries of an
4817 intra-mode-class conversion optab. The string formation rules are
4818 similar to the ones for init_libfunc, above. WIDENING says whether
4819 the optab goes from narrow to wide modes or vice versa. These functions
4820 have two mode names _and_ an operand count. */
4821 static void
4822 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4823 enum mode_class class, bool widening)
4824 {
4825 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4826 size_t opname_len = strlen (opname);
4827 size_t max_mname_len = 0;
4828
4829 enum machine_mode nmode, wmode;
4830 const char *nname, *wname;
4831 const char *q;
4832 char *libfunc_name, *suffix;
4833 char *p;
4834
4835 for (nmode = first_mode; nmode != VOIDmode;
4836 nmode = GET_MODE_WIDER_MODE (nmode))
4837 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4838
4839 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4840 libfunc_name[0] = '_';
4841 libfunc_name[1] = '_';
4842 memcpy (&libfunc_name[2], opname, opname_len);
4843 suffix = libfunc_name + opname_len + 2;
4844
4845 for (nmode = first_mode; nmode != VOIDmode;
4846 nmode = GET_MODE_WIDER_MODE (nmode))
4847 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4848 wmode = GET_MODE_WIDER_MODE (wmode))
4849 {
4850 nname = GET_MODE_NAME (nmode);
4851 wname = GET_MODE_NAME (wmode);
4852
4853 p = suffix;
4854 for (q = widening ? nname : wname; *q; p++, q++)
4855 *p = TOLOWER (*q);
4856 for (q = widening ? wname : nname; *q; p++, q++)
4857 *p = TOLOWER (*q);
4858
4859 *p++ = '2';
4860 *p = '\0';
4861
4862 tab->handlers[widening ? wmode : nmode]
4863 [widening ? nmode : wmode].libfunc
4864 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4865 p - libfunc_name));
4866 }
4867 }
4868
4869
4870 rtx
4871 init_one_libfunc (const char *name)
4872 {
4873 rtx symbol;
4874
4875 /* Create a FUNCTION_DECL that can be passed to
4876 targetm.encode_section_info. */
4877 /* ??? We don't have any type information except for this is
4878 a function. Pretend this is "int foo()". */
4879 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4880 build_function_type (integer_type_node, NULL_TREE));
4881 DECL_ARTIFICIAL (decl) = 1;
4882 DECL_EXTERNAL (decl) = 1;
4883 TREE_PUBLIC (decl) = 1;
4884
4885 symbol = XEXP (DECL_RTL (decl), 0);
4886
4887 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4888 are the flags assigned by targetm.encode_section_info. */
4889 SYMBOL_REF_DECL (symbol) = 0;
4890
4891 return symbol;
4892 }
4893
4894 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4895 MODE to NAME, which should be either 0 or a string constant. */
4896 void
4897 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4898 {
4899 if (name)
4900 optable->handlers[mode].libfunc = init_one_libfunc (name);
4901 else
4902 optable->handlers[mode].libfunc = 0;
4903 }
4904
4905 /* Call this to reset the function entry for one conversion optab
4906 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4907 either 0 or a string constant. */
4908 void
4909 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
4910 enum machine_mode fmode, const char *name)
4911 {
4912 if (name)
4913 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
4914 else
4915 optable->handlers[tmode][fmode].libfunc = 0;
4916 }
4917
4918 /* Call this once to initialize the contents of the optabs
4919 appropriately for the current target machine. */
4920
4921 void
4922 init_optabs (void)
4923 {
4924 unsigned int i;
4925
4926 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4927
4928 for (i = 0; i < NUM_RTX_CODE; i++)
4929 setcc_gen_code[i] = CODE_FOR_nothing;
4930
4931 #ifdef HAVE_conditional_move
4932 for (i = 0; i < NUM_MACHINE_MODES; i++)
4933 movcc_gen_code[i] = CODE_FOR_nothing;
4934 #endif
4935
4936 for (i = 0; i < NUM_MACHINE_MODES; i++)
4937 {
4938 vcond_gen_code[i] = CODE_FOR_nothing;
4939 vcondu_gen_code[i] = CODE_FOR_nothing;
4940 }
4941
4942 add_optab = init_optab (PLUS);
4943 addv_optab = init_optabv (PLUS);
4944 sub_optab = init_optab (MINUS);
4945 subv_optab = init_optabv (MINUS);
4946 smul_optab = init_optab (MULT);
4947 smulv_optab = init_optabv (MULT);
4948 smul_highpart_optab = init_optab (UNKNOWN);
4949 umul_highpart_optab = init_optab (UNKNOWN);
4950 smul_widen_optab = init_optab (UNKNOWN);
4951 umul_widen_optab = init_optab (UNKNOWN);
4952 sdiv_optab = init_optab (DIV);
4953 sdivv_optab = init_optabv (DIV);
4954 sdivmod_optab = init_optab (UNKNOWN);
4955 udiv_optab = init_optab (UDIV);
4956 udivmod_optab = init_optab (UNKNOWN);
4957 smod_optab = init_optab (MOD);
4958 umod_optab = init_optab (UMOD);
4959 fmod_optab = init_optab (UNKNOWN);
4960 drem_optab = init_optab (UNKNOWN);
4961 ftrunc_optab = init_optab (UNKNOWN);
4962 and_optab = init_optab (AND);
4963 ior_optab = init_optab (IOR);
4964 xor_optab = init_optab (XOR);
4965 ashl_optab = init_optab (ASHIFT);
4966 ashr_optab = init_optab (ASHIFTRT);
4967 lshr_optab = init_optab (LSHIFTRT);
4968 rotl_optab = init_optab (ROTATE);
4969 rotr_optab = init_optab (ROTATERT);
4970 smin_optab = init_optab (SMIN);
4971 smax_optab = init_optab (SMAX);
4972 umin_optab = init_optab (UMIN);
4973 umax_optab = init_optab (UMAX);
4974 pow_optab = init_optab (UNKNOWN);
4975 atan2_optab = init_optab (UNKNOWN);
4976
4977 /* These three have codes assigned exclusively for the sake of
4978 have_insn_for. */
4979 mov_optab = init_optab (SET);
4980 movstrict_optab = init_optab (STRICT_LOW_PART);
4981 cmp_optab = init_optab (COMPARE);
4982
4983 ucmp_optab = init_optab (UNKNOWN);
4984 tst_optab = init_optab (UNKNOWN);
4985
4986 eq_optab = init_optab (EQ);
4987 ne_optab = init_optab (NE);
4988 gt_optab = init_optab (GT);
4989 ge_optab = init_optab (GE);
4990 lt_optab = init_optab (LT);
4991 le_optab = init_optab (LE);
4992 unord_optab = init_optab (UNORDERED);
4993
4994 neg_optab = init_optab (NEG);
4995 negv_optab = init_optabv (NEG);
4996 abs_optab = init_optab (ABS);
4997 absv_optab = init_optabv (ABS);
4998 addcc_optab = init_optab (UNKNOWN);
4999 one_cmpl_optab = init_optab (NOT);
5000 ffs_optab = init_optab (FFS);
5001 clz_optab = init_optab (CLZ);
5002 ctz_optab = init_optab (CTZ);
5003 popcount_optab = init_optab (POPCOUNT);
5004 parity_optab = init_optab (PARITY);
5005 sqrt_optab = init_optab (SQRT);
5006 floor_optab = init_optab (UNKNOWN);
5007 lfloor_optab = init_optab (UNKNOWN);
5008 ceil_optab = init_optab (UNKNOWN);
5009 lceil_optab = init_optab (UNKNOWN);
5010 round_optab = init_optab (UNKNOWN);
5011 btrunc_optab = init_optab (UNKNOWN);
5012 nearbyint_optab = init_optab (UNKNOWN);
5013 rint_optab = init_optab (UNKNOWN);
5014 lrint_optab = init_optab (UNKNOWN);
5015 sincos_optab = init_optab (UNKNOWN);
5016 sin_optab = init_optab (UNKNOWN);
5017 asin_optab = init_optab (UNKNOWN);
5018 cos_optab = init_optab (UNKNOWN);
5019 acos_optab = init_optab (UNKNOWN);
5020 exp_optab = init_optab (UNKNOWN);
5021 exp10_optab = init_optab (UNKNOWN);
5022 exp2_optab = init_optab (UNKNOWN);
5023 expm1_optab = init_optab (UNKNOWN);
5024 ldexp_optab = init_optab (UNKNOWN);
5025 logb_optab = init_optab (UNKNOWN);
5026 ilogb_optab = init_optab (UNKNOWN);
5027 log_optab = init_optab (UNKNOWN);
5028 log10_optab = init_optab (UNKNOWN);
5029 log2_optab = init_optab (UNKNOWN);
5030 log1p_optab = init_optab (UNKNOWN);
5031 tan_optab = init_optab (UNKNOWN);
5032 atan_optab = init_optab (UNKNOWN);
5033 copysign_optab = init_optab (UNKNOWN);
5034
5035 strlen_optab = init_optab (UNKNOWN);
5036 cbranch_optab = init_optab (UNKNOWN);
5037 cmov_optab = init_optab (UNKNOWN);
5038 cstore_optab = init_optab (UNKNOWN);
5039 push_optab = init_optab (UNKNOWN);
5040
5041 vec_extract_optab = init_optab (UNKNOWN);
5042 vec_set_optab = init_optab (UNKNOWN);
5043 vec_init_optab = init_optab (UNKNOWN);
5044 vec_realign_load_optab = init_optab (UNKNOWN);
5045 movmisalign_optab = init_optab (UNKNOWN);
5046
5047 powi_optab = init_optab (UNKNOWN);
5048
5049 /* Conversions. */
5050 sext_optab = init_convert_optab (SIGN_EXTEND);
5051 zext_optab = init_convert_optab (ZERO_EXTEND);
5052 trunc_optab = init_convert_optab (TRUNCATE);
5053 sfix_optab = init_convert_optab (FIX);
5054 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5055 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5056 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5057 sfloat_optab = init_convert_optab (FLOAT);
5058 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5059
5060 for (i = 0; i < NUM_MACHINE_MODES; i++)
5061 {
5062 movmem_optab[i] = CODE_FOR_nothing;
5063 clrmem_optab[i] = CODE_FOR_nothing;
5064 cmpstr_optab[i] = CODE_FOR_nothing;
5065 cmpmem_optab[i] = CODE_FOR_nothing;
5066
5067 sync_add_optab[i] = CODE_FOR_nothing;
5068 sync_sub_optab[i] = CODE_FOR_nothing;
5069 sync_ior_optab[i] = CODE_FOR_nothing;
5070 sync_and_optab[i] = CODE_FOR_nothing;
5071 sync_xor_optab[i] = CODE_FOR_nothing;
5072 sync_nand_optab[i] = CODE_FOR_nothing;
5073 sync_old_add_optab[i] = CODE_FOR_nothing;
5074 sync_old_sub_optab[i] = CODE_FOR_nothing;
5075 sync_old_ior_optab[i] = CODE_FOR_nothing;
5076 sync_old_and_optab[i] = CODE_FOR_nothing;
5077 sync_old_xor_optab[i] = CODE_FOR_nothing;
5078 sync_old_nand_optab[i] = CODE_FOR_nothing;
5079 sync_new_add_optab[i] = CODE_FOR_nothing;
5080 sync_new_sub_optab[i] = CODE_FOR_nothing;
5081 sync_new_ior_optab[i] = CODE_FOR_nothing;
5082 sync_new_and_optab[i] = CODE_FOR_nothing;
5083 sync_new_xor_optab[i] = CODE_FOR_nothing;
5084 sync_new_nand_optab[i] = CODE_FOR_nothing;
5085 sync_compare_and_swap[i] = CODE_FOR_nothing;
5086 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5087 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5088 sync_lock_release[i] = CODE_FOR_nothing;
5089
5090 #ifdef HAVE_SECONDARY_RELOADS
5091 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5092 #endif
5093 }
5094
5095 /* Fill in the optabs with the insns we support. */
5096 init_all_optabs ();
5097
5098 /* Initialize the optabs with the names of the library functions. */
5099 init_integral_libfuncs (add_optab, "add", '3');
5100 init_floating_libfuncs (add_optab, "add", '3');
5101 init_integral_libfuncs (addv_optab, "addv", '3');
5102 init_floating_libfuncs (addv_optab, "add", '3');
5103 init_integral_libfuncs (sub_optab, "sub", '3');
5104 init_floating_libfuncs (sub_optab, "sub", '3');
5105 init_integral_libfuncs (subv_optab, "subv", '3');
5106 init_floating_libfuncs (subv_optab, "sub", '3');
5107 init_integral_libfuncs (smul_optab, "mul", '3');
5108 init_floating_libfuncs (smul_optab, "mul", '3');
5109 init_integral_libfuncs (smulv_optab, "mulv", '3');
5110 init_floating_libfuncs (smulv_optab, "mul", '3');
5111 init_integral_libfuncs (sdiv_optab, "div", '3');
5112 init_floating_libfuncs (sdiv_optab, "div", '3');
5113 init_integral_libfuncs (sdivv_optab, "divv", '3');
5114 init_integral_libfuncs (udiv_optab, "udiv", '3');
5115 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5116 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5117 init_integral_libfuncs (smod_optab, "mod", '3');
5118 init_integral_libfuncs (umod_optab, "umod", '3');
5119 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5120 init_integral_libfuncs (and_optab, "and", '3');
5121 init_integral_libfuncs (ior_optab, "ior", '3');
5122 init_integral_libfuncs (xor_optab, "xor", '3');
5123 init_integral_libfuncs (ashl_optab, "ashl", '3');
5124 init_integral_libfuncs (ashr_optab, "ashr", '3');
5125 init_integral_libfuncs (lshr_optab, "lshr", '3');
5126 init_integral_libfuncs (smin_optab, "min", '3');
5127 init_floating_libfuncs (smin_optab, "min", '3');
5128 init_integral_libfuncs (smax_optab, "max", '3');
5129 init_floating_libfuncs (smax_optab, "max", '3');
5130 init_integral_libfuncs (umin_optab, "umin", '3');
5131 init_integral_libfuncs (umax_optab, "umax", '3');
5132 init_integral_libfuncs (neg_optab, "neg", '2');
5133 init_floating_libfuncs (neg_optab, "neg", '2');
5134 init_integral_libfuncs (negv_optab, "negv", '2');
5135 init_floating_libfuncs (negv_optab, "neg", '2');
5136 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5137 init_integral_libfuncs (ffs_optab, "ffs", '2');
5138 init_integral_libfuncs (clz_optab, "clz", '2');
5139 init_integral_libfuncs (ctz_optab, "ctz", '2');
5140 init_integral_libfuncs (popcount_optab, "popcount", '2');
5141 init_integral_libfuncs (parity_optab, "parity", '2');
5142
5143 /* Comparison libcalls for integers MUST come in pairs,
5144 signed/unsigned. */
5145 init_integral_libfuncs (cmp_optab, "cmp", '2');
5146 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5147 init_floating_libfuncs (cmp_optab, "cmp", '2');
5148
5149 /* EQ etc are floating point only. */
5150 init_floating_libfuncs (eq_optab, "eq", '2');
5151 init_floating_libfuncs (ne_optab, "ne", '2');
5152 init_floating_libfuncs (gt_optab, "gt", '2');
5153 init_floating_libfuncs (ge_optab, "ge", '2');
5154 init_floating_libfuncs (lt_optab, "lt", '2');
5155 init_floating_libfuncs (le_optab, "le", '2');
5156 init_floating_libfuncs (unord_optab, "unord", '2');
5157
5158 init_floating_libfuncs (powi_optab, "powi", '2');
5159
5160 /* Conversions. */
5161 init_interclass_conv_libfuncs (sfloat_optab, "float",
5162 MODE_INT, MODE_FLOAT);
5163 init_interclass_conv_libfuncs (sfix_optab, "fix",
5164 MODE_FLOAT, MODE_INT);
5165 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5166 MODE_FLOAT, MODE_INT);
5167
5168 /* sext_optab is also used for FLOAT_EXTEND. */
5169 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5170 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5171
5172 /* Use cabs for double complex abs, since systems generally have cabs.
5173 Don't define any libcall for float complex, so that cabs will be used. */
5174 if (complex_double_type_node)
5175 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5176 = init_one_libfunc ("cabs");
5177
5178 /* The ffs function operates on `int'. */
5179 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5180 = init_one_libfunc ("ffs");
5181
5182 abort_libfunc = init_one_libfunc ("abort");
5183 memcpy_libfunc = init_one_libfunc ("memcpy");
5184 memmove_libfunc = init_one_libfunc ("memmove");
5185 memcmp_libfunc = init_one_libfunc ("memcmp");
5186 memset_libfunc = init_one_libfunc ("memset");
5187 setbits_libfunc = init_one_libfunc ("__setbits");
5188
5189 unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS
5190 ? "_Unwind_SjLj_Resume"
5191 : "_Unwind_Resume");
5192 #ifndef DONT_USE_BUILTIN_SETJMP
5193 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5194 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5195 #else
5196 setjmp_libfunc = init_one_libfunc ("setjmp");
5197 longjmp_libfunc = init_one_libfunc ("longjmp");
5198 #endif
5199 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5200 unwind_sjlj_unregister_libfunc
5201 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5202
5203 /* For function entry/exit instrumentation. */
5204 profile_function_entry_libfunc
5205 = init_one_libfunc ("__cyg_profile_func_enter");
5206 profile_function_exit_libfunc
5207 = init_one_libfunc ("__cyg_profile_func_exit");
5208
5209 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5210
5211 if (HAVE_conditional_trap)
5212 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5213
5214 /* Allow the target to add more libcalls or rename some, etc. */
5215 targetm.init_libfuncs ();
5216 }
5217
5218 #ifdef DEBUG
5219
5220 /* Print information about the current contents of the optabs on
5221 STDERR. */
5222
5223 static void
5224 debug_optab_libfuncs (void)
5225 {
5226 int i;
5227 int j;
5228 int k;
5229
5230 /* Dump the arithmetic optabs. */
5231 for (i = 0; i != (int) OTI_MAX; i++)
5232 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5233 {
5234 optab o;
5235 struct optab_handlers *h;
5236
5237 o = optab_table[i];
5238 h = &o->handlers[j];
5239 if (h->libfunc)
5240 {
5241 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5242 fprintf (stderr, "%s\t%s:\t%s\n",
5243 GET_RTX_NAME (o->code),
5244 GET_MODE_NAME (j),
5245 XSTR (h->libfunc, 0));
5246 }
5247 }
5248
5249 /* Dump the conversion optabs. */
5250 for (i = 0; i < (int) CTI_MAX; ++i)
5251 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5252 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5253 {
5254 convert_optab o;
5255 struct optab_handlers *h;
5256
5257 o = &convert_optab_table[i];
5258 h = &o->handlers[j][k];
5259 if (h->libfunc)
5260 {
5261 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5262 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5263 GET_RTX_NAME (o->code),
5264 GET_MODE_NAME (j),
5265 GET_MODE_NAME (k),
5266 XSTR (h->libfunc, 0));
5267 }
5268 }
5269 }
5270
5271 #endif /* DEBUG */
5272
5273 \f
5274 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5275 CODE. Return 0 on failure. */
5276
5277 rtx
5278 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5279 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5280 {
5281 enum machine_mode mode = GET_MODE (op1);
5282 enum insn_code icode;
5283 rtx insn;
5284
5285 if (!HAVE_conditional_trap)
5286 return 0;
5287
5288 if (mode == VOIDmode)
5289 return 0;
5290
5291 icode = cmp_optab->handlers[(int) mode].insn_code;
5292 if (icode == CODE_FOR_nothing)
5293 return 0;
5294
5295 start_sequence ();
5296 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5297 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5298 if (!op1 || !op2)
5299 {
5300 end_sequence ();
5301 return 0;
5302 }
5303 emit_insn (GEN_FCN (icode) (op1, op2));
5304
5305 PUT_CODE (trap_rtx, code);
5306 gcc_assert (HAVE_conditional_trap);
5307 insn = gen_conditional_trap (trap_rtx, tcode);
5308 if (insn)
5309 {
5310 emit_insn (insn);
5311 insn = get_insns ();
5312 }
5313 end_sequence ();
5314
5315 return insn;
5316 }
5317
5318 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5319 or unsigned operation code. */
5320
5321 static enum rtx_code
5322 get_rtx_code (enum tree_code tcode, bool unsignedp)
5323 {
5324 enum rtx_code code;
5325 switch (tcode)
5326 {
5327 case EQ_EXPR:
5328 code = EQ;
5329 break;
5330 case NE_EXPR:
5331 code = NE;
5332 break;
5333 case LT_EXPR:
5334 code = unsignedp ? LTU : LT;
5335 break;
5336 case LE_EXPR:
5337 code = unsignedp ? LEU : LE;
5338 break;
5339 case GT_EXPR:
5340 code = unsignedp ? GTU : GT;
5341 break;
5342 case GE_EXPR:
5343 code = unsignedp ? GEU : GE;
5344 break;
5345
5346 case UNORDERED_EXPR:
5347 code = UNORDERED;
5348 break;
5349 case ORDERED_EXPR:
5350 code = ORDERED;
5351 break;
5352 case UNLT_EXPR:
5353 code = UNLT;
5354 break;
5355 case UNLE_EXPR:
5356 code = UNLE;
5357 break;
5358 case UNGT_EXPR:
5359 code = UNGT;
5360 break;
5361 case UNGE_EXPR:
5362 code = UNGE;
5363 break;
5364 case UNEQ_EXPR:
5365 code = UNEQ;
5366 break;
5367 case LTGT_EXPR:
5368 code = LTGT;
5369 break;
5370
5371 default:
5372 gcc_unreachable ();
5373 }
5374 return code;
5375 }
5376
5377 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5378 unsigned operators. Do not generate compare instruction. */
5379
5380 static rtx
5381 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5382 {
5383 enum rtx_code rcode;
5384 tree t_op0, t_op1;
5385 rtx rtx_op0, rtx_op1;
5386
5387 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5388 ensures that condition is a relational operation. */
5389 gcc_assert (COMPARISON_CLASS_P (cond));
5390
5391 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5392 t_op0 = TREE_OPERAND (cond, 0);
5393 t_op1 = TREE_OPERAND (cond, 1);
5394
5395 /* Expand operands. */
5396 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5397 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5398
5399 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5400 && GET_MODE (rtx_op0) != VOIDmode)
5401 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5402
5403 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5404 && GET_MODE (rtx_op1) != VOIDmode)
5405 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5406
5407 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5408 }
5409
5410 /* Return insn code for VEC_COND_EXPR EXPR. */
5411
5412 static inline enum insn_code
5413 get_vcond_icode (tree expr, enum machine_mode mode)
5414 {
5415 enum insn_code icode = CODE_FOR_nothing;
5416
5417 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5418 icode = vcondu_gen_code[mode];
5419 else
5420 icode = vcond_gen_code[mode];
5421 return icode;
5422 }
5423
5424 /* Return TRUE iff, appropriate vector insns are available
5425 for vector cond expr expr in VMODE mode. */
5426
5427 bool
5428 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5429 {
5430 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5431 return false;
5432 return true;
5433 }
5434
5435 /* Generate insns for VEC_COND_EXPR. */
5436
5437 rtx
5438 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5439 {
5440 enum insn_code icode;
5441 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5442 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5443 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5444
5445 icode = get_vcond_icode (vec_cond_expr, mode);
5446 if (icode == CODE_FOR_nothing)
5447 return 0;
5448
5449 if (!target)
5450 target = gen_reg_rtx (mode);
5451
5452 /* Get comparison rtx. First expand both cond expr operands. */
5453 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5454 unsignedp, icode);
5455 cc_op0 = XEXP (comparison, 0);
5456 cc_op1 = XEXP (comparison, 1);
5457 /* Expand both operands and force them in reg, if required. */
5458 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5459 NULL_RTX, VOIDmode, 1);
5460 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5461 && mode != VOIDmode)
5462 rtx_op1 = force_reg (mode, rtx_op1);
5463
5464 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5465 NULL_RTX, VOIDmode, 1);
5466 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5467 && mode != VOIDmode)
5468 rtx_op2 = force_reg (mode, rtx_op2);
5469
5470 /* Emit instruction! */
5471 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5472 comparison, cc_op0, cc_op1));
5473
5474 return target;
5475 }
5476
5477 \f
5478 /* This is an internal subroutine of the other compare_and_swap expanders.
5479 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5480 operation. TARGET is an optional place to store the value result of
5481 the operation. ICODE is the particular instruction to expand. Return
5482 the result of the operation. */
5483
5484 static rtx
5485 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5486 rtx target, enum insn_code icode)
5487 {
5488 enum machine_mode mode = GET_MODE (mem);
5489 rtx insn;
5490
5491 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5492 target = gen_reg_rtx (mode);
5493
5494 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5495 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5496 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5497 old_val = force_reg (mode, old_val);
5498
5499 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5500 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5501 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5502 new_val = force_reg (mode, new_val);
5503
5504 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5505 if (insn == NULL_RTX)
5506 return NULL_RTX;
5507 emit_insn (insn);
5508
5509 return target;
5510 }
5511
5512 /* Expand a compare-and-swap operation and return its value. */
5513
5514 rtx
5515 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5516 {
5517 enum machine_mode mode = GET_MODE (mem);
5518 enum insn_code icode = sync_compare_and_swap[mode];
5519
5520 if (icode == CODE_FOR_nothing)
5521 return NULL_RTX;
5522
5523 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5524 }
5525
5526 /* Expand a compare-and-swap operation and store true into the result if
5527 the operation was successful and false otherwise. Return the result.
5528 Unlike other routines, TARGET is not optional. */
5529
5530 rtx
5531 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5532 {
5533 enum machine_mode mode = GET_MODE (mem);
5534 enum insn_code icode;
5535 rtx subtarget, label0, label1;
5536
5537 /* If the target supports a compare-and-swap pattern that simultaneously
5538 sets some flag for success, then use it. Otherwise use the regular
5539 compare-and-swap and follow that immediately with a compare insn. */
5540 icode = sync_compare_and_swap_cc[mode];
5541 switch (icode)
5542 {
5543 default:
5544 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5545 NULL_RTX, icode);
5546 if (subtarget != NULL_RTX)
5547 break;
5548
5549 /* FALLTHRU */
5550 case CODE_FOR_nothing:
5551 icode = sync_compare_and_swap[mode];
5552 if (icode == CODE_FOR_nothing)
5553 return NULL_RTX;
5554
5555 /* Ensure that if old_val == mem, that we're not comparing
5556 against an old value. */
5557 if (MEM_P (old_val))
5558 old_val = force_reg (mode, old_val);
5559
5560 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5561 NULL_RTX, icode);
5562 if (subtarget == NULL_RTX)
5563 return NULL_RTX;
5564
5565 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5566 }
5567
5568 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5569 setcc instruction from the beginning. We don't work too hard here,
5570 but it's nice to not be stupid about initial code gen either. */
5571 if (STORE_FLAG_VALUE == 1)
5572 {
5573 icode = setcc_gen_code[EQ];
5574 if (icode != CODE_FOR_nothing)
5575 {
5576 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5577 rtx insn;
5578
5579 subtarget = target;
5580 if (!insn_data[icode].operand[0].predicate (target, cmode))
5581 subtarget = gen_reg_rtx (cmode);
5582
5583 insn = GEN_FCN (icode) (subtarget);
5584 if (insn)
5585 {
5586 emit_insn (insn);
5587 if (GET_MODE (target) != GET_MODE (subtarget))
5588 {
5589 convert_move (target, subtarget, 1);
5590 subtarget = target;
5591 }
5592 return subtarget;
5593 }
5594 }
5595 }
5596
5597 /* Without an appropriate setcc instruction, use a set of branches to
5598 get 1 and 0 stored into target. Presumably if the target has a
5599 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5600
5601 label0 = gen_label_rtx ();
5602 label1 = gen_label_rtx ();
5603
5604 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5605 emit_move_insn (target, const0_rtx);
5606 emit_jump_insn (gen_jump (label1));
5607 emit_label (label0);
5608 emit_move_insn (target, const1_rtx);
5609 emit_label (label1);
5610
5611 return target;
5612 }
5613
5614 /* This is a helper function for the other atomic operations. This function
5615 emits a loop that contains SEQ that iterates until a compare-and-swap
5616 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5617 a set of instructions that takes a value from OLD_REG as an input and
5618 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5619 set to the current contents of MEM. After SEQ, a compare-and-swap will
5620 attempt to update MEM with NEW_REG. The function returns true when the
5621 loop was generated successfully. */
5622
5623 static bool
5624 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5625 {
5626 enum machine_mode mode = GET_MODE (mem);
5627 enum insn_code icode;
5628 rtx label, cmp_reg, subtarget;
5629
5630 /* The loop we want to generate looks like
5631
5632 cmp_reg = mem;
5633 label:
5634 old_reg = cmp_reg;
5635 seq;
5636 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5637 if (cmp_reg != old_reg)
5638 goto label;
5639
5640 Note that we only do the plain load from memory once. Subsequent
5641 iterations use the value loaded by the compare-and-swap pattern. */
5642
5643 label = gen_label_rtx ();
5644 cmp_reg = gen_reg_rtx (mode);
5645
5646 emit_move_insn (cmp_reg, mem);
5647 emit_label (label);
5648 emit_move_insn (old_reg, cmp_reg);
5649 if (seq)
5650 emit_insn (seq);
5651
5652 /* If the target supports a compare-and-swap pattern that simultaneously
5653 sets some flag for success, then use it. Otherwise use the regular
5654 compare-and-swap and follow that immediately with a compare insn. */
5655 icode = sync_compare_and_swap_cc[mode];
5656 switch (icode)
5657 {
5658 default:
5659 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5660 cmp_reg, icode);
5661 if (subtarget != NULL_RTX)
5662 {
5663 gcc_assert (subtarget == cmp_reg);
5664 break;
5665 }
5666
5667 /* FALLTHRU */
5668 case CODE_FOR_nothing:
5669 icode = sync_compare_and_swap[mode];
5670 if (icode == CODE_FOR_nothing)
5671 return false;
5672
5673 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5674 cmp_reg, icode);
5675 if (subtarget == NULL_RTX)
5676 return false;
5677 if (subtarget != cmp_reg)
5678 emit_move_insn (cmp_reg, subtarget);
5679
5680 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5681 }
5682
5683 /* ??? Mark this jump predicted not taken? */
5684 emit_jump_insn (bcc_gen_fctn[NE] (label));
5685
5686 return true;
5687 }
5688
5689 /* This function generates the atomic operation MEM CODE= VAL. In this
5690 case, we do not care about any resulting value. Returns NULL if we
5691 cannot generate the operation. */
5692
5693 rtx
5694 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5695 {
5696 enum machine_mode mode = GET_MODE (mem);
5697 enum insn_code icode;
5698 rtx insn;
5699
5700 /* Look to see if the target supports the operation directly. */
5701 switch (code)
5702 {
5703 case PLUS:
5704 icode = sync_add_optab[mode];
5705 break;
5706 case IOR:
5707 icode = sync_ior_optab[mode];
5708 break;
5709 case XOR:
5710 icode = sync_xor_optab[mode];
5711 break;
5712 case AND:
5713 icode = sync_and_optab[mode];
5714 break;
5715 case NOT:
5716 icode = sync_nand_optab[mode];
5717 break;
5718
5719 case MINUS:
5720 icode = sync_sub_optab[mode];
5721 if (icode == CODE_FOR_nothing)
5722 {
5723 icode = sync_add_optab[mode];
5724 if (icode != CODE_FOR_nothing)
5725 {
5726 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5727 code = PLUS;
5728 }
5729 }
5730 break;
5731
5732 default:
5733 gcc_unreachable ();
5734 }
5735
5736 /* Generate the direct operation, if present. */
5737 if (icode != CODE_FOR_nothing)
5738 {
5739 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5740 val = convert_modes (mode, GET_MODE (val), val, 1);
5741 if (!insn_data[icode].operand[1].predicate (val, mode))
5742 val = force_reg (mode, val);
5743
5744 insn = GEN_FCN (icode) (mem, val);
5745 if (insn)
5746 {
5747 emit_insn (insn);
5748 return const0_rtx;
5749 }
5750 }
5751
5752 /* Failing that, generate a compare-and-swap loop in which we perform the
5753 operation with normal arithmetic instructions. */
5754 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5755 {
5756 rtx t0 = gen_reg_rtx (mode), t1;
5757
5758 start_sequence ();
5759
5760 t1 = t0;
5761 if (code == NOT)
5762 {
5763 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5764 code = AND;
5765 }
5766 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5767 true, OPTAB_LIB_WIDEN);
5768
5769 insn = get_insns ();
5770 end_sequence ();
5771
5772 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5773 return const0_rtx;
5774 }
5775
5776 return NULL_RTX;
5777 }
5778
5779 /* This function generates the atomic operation MEM CODE= VAL. In this
5780 case, we do care about the resulting value: if AFTER is true then
5781 return the value MEM holds after the operation, if AFTER is false
5782 then return the value MEM holds before the operation. TARGET is an
5783 optional place for the result value to be stored. */
5784
5785 rtx
5786 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5787 bool after, rtx target)
5788 {
5789 enum machine_mode mode = GET_MODE (mem);
5790 enum insn_code old_code, new_code, icode;
5791 bool compensate;
5792 rtx insn;
5793
5794 /* Look to see if the target supports the operation directly. */
5795 switch (code)
5796 {
5797 case PLUS:
5798 old_code = sync_old_add_optab[mode];
5799 new_code = sync_new_add_optab[mode];
5800 break;
5801 case IOR:
5802 old_code = sync_old_ior_optab[mode];
5803 new_code = sync_new_ior_optab[mode];
5804 break;
5805 case XOR:
5806 old_code = sync_old_xor_optab[mode];
5807 new_code = sync_new_xor_optab[mode];
5808 break;
5809 case AND:
5810 old_code = sync_old_and_optab[mode];
5811 new_code = sync_new_and_optab[mode];
5812 break;
5813 case NOT:
5814 old_code = sync_old_nand_optab[mode];
5815 new_code = sync_new_nand_optab[mode];
5816 break;
5817
5818 case MINUS:
5819 old_code = sync_old_sub_optab[mode];
5820 new_code = sync_new_sub_optab[mode];
5821 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5822 {
5823 old_code = sync_old_add_optab[mode];
5824 new_code = sync_new_add_optab[mode];
5825 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5826 {
5827 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5828 code = PLUS;
5829 }
5830 }
5831 break;
5832
5833 default:
5834 gcc_unreachable ();
5835 }
5836
5837 /* If the target does supports the proper new/old operation, great. But
5838 if we only support the opposite old/new operation, check to see if we
5839 can compensate. In the case in which the old value is supported, then
5840 we can always perform the operation again with normal arithmetic. In
5841 the case in which the new value is supported, then we can only handle
5842 this in the case the operation is reversible. */
5843 compensate = false;
5844 if (after)
5845 {
5846 icode = new_code;
5847 if (icode == CODE_FOR_nothing)
5848 {
5849 icode = old_code;
5850 if (icode != CODE_FOR_nothing)
5851 compensate = true;
5852 }
5853 }
5854 else
5855 {
5856 icode = old_code;
5857 if (icode == CODE_FOR_nothing
5858 && (code == PLUS || code == MINUS || code == XOR))
5859 {
5860 icode = new_code;
5861 if (icode != CODE_FOR_nothing)
5862 compensate = true;
5863 }
5864 }
5865
5866 /* If we found something supported, great. */
5867 if (icode != CODE_FOR_nothing)
5868 {
5869 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5870 target = gen_reg_rtx (mode);
5871
5872 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5873 val = convert_modes (mode, GET_MODE (val), val, 1);
5874 if (!insn_data[icode].operand[2].predicate (val, mode))
5875 val = force_reg (mode, val);
5876
5877 insn = GEN_FCN (icode) (target, mem, val);
5878 if (insn)
5879 {
5880 emit_insn (insn);
5881
5882 /* If we need to compensate for using an operation with the
5883 wrong return value, do so now. */
5884 if (compensate)
5885 {
5886 if (!after)
5887 {
5888 if (code == PLUS)
5889 code = MINUS;
5890 else if (code == MINUS)
5891 code = PLUS;
5892 }
5893
5894 if (code == NOT)
5895 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5896 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5897 true, OPTAB_LIB_WIDEN);
5898 }
5899
5900 return target;
5901 }
5902 }
5903
5904 /* Failing that, generate a compare-and-swap loop in which we perform the
5905 operation with normal arithmetic instructions. */
5906 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5907 {
5908 rtx t0 = gen_reg_rtx (mode), t1;
5909
5910 if (!target || !register_operand (target, mode))
5911 target = gen_reg_rtx (mode);
5912
5913 start_sequence ();
5914
5915 if (!after)
5916 emit_move_insn (target, t0);
5917 t1 = t0;
5918 if (code == NOT)
5919 {
5920 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5921 code = AND;
5922 }
5923 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5924 true, OPTAB_LIB_WIDEN);
5925 if (after)
5926 emit_move_insn (target, t1);
5927
5928 insn = get_insns ();
5929 end_sequence ();
5930
5931 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5932 return target;
5933 }
5934
5935 return NULL_RTX;
5936 }
5937
5938 /* This function expands a test-and-set operation. Ideally we atomically
5939 store VAL in MEM and return the previous value in MEM. Some targets
5940 may not support this operation and only support VAL with the constant 1;
5941 in this case while the return value will be 0/1, but the exact value
5942 stored in MEM is target defined. TARGET is an option place to stick
5943 the return value. */
5944
5945 rtx
5946 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
5947 {
5948 enum machine_mode mode = GET_MODE (mem);
5949 enum insn_code icode;
5950 rtx insn;
5951
5952 /* If the target supports the test-and-set directly, great. */
5953 icode = sync_lock_test_and_set[mode];
5954 if (icode != CODE_FOR_nothing)
5955 {
5956 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5957 target = gen_reg_rtx (mode);
5958
5959 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5960 val = convert_modes (mode, GET_MODE (val), val, 1);
5961 if (!insn_data[icode].operand[2].predicate (val, mode))
5962 val = force_reg (mode, val);
5963
5964 insn = GEN_FCN (icode) (target, mem, val);
5965 if (insn)
5966 {
5967 emit_insn (insn);
5968 return target;
5969 }
5970 }
5971
5972 /* Otherwise, use a compare-and-swap loop for the exchange. */
5973 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5974 {
5975 if (!target || !register_operand (target, mode))
5976 target = gen_reg_rtx (mode);
5977 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5978 val = convert_modes (mode, GET_MODE (val), val, 1);
5979 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5980 return target;
5981 }
5982
5983 return NULL_RTX;
5984 }
5985
5986 #include "gt-optabs.h"