1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
33 #include "insn-config.h"
37 #include "langhooks.h"
41 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
42 unsigned HOST_WIDE_INT
,
43 unsigned HOST_WIDE_INT
, rtx
);
44 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
45 unsigned HOST_WIDE_INT
, rtx
);
46 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
47 unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
,
49 unsigned HOST_WIDE_INT
, rtx
, int);
50 static rtx
mask_rtx (enum machine_mode
, int, int, int);
51 static rtx
lshift_value (enum machine_mode
, rtx
, int, int);
52 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
53 unsigned HOST_WIDE_INT
, int);
54 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
55 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
56 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
58 /* Test whether a value is zero of a power of two. */
59 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
61 /* Nonzero means divides or modulus operations are relatively cheap for
62 powers of two, so don't use branches; emit the operation instead.
63 Usually, this will mean that the MD file will emit non-branch
66 static bool sdiv_pow2_cheap
[2][NUM_MACHINE_MODES
];
67 static bool smod_pow2_cheap
[2][NUM_MACHINE_MODES
];
69 #ifndef SLOW_UNALIGNED_ACCESS
70 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
73 /* For compilers that support multiple targets with different word sizes,
74 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
75 is the H8/300(H) compiler. */
77 #ifndef MAX_BITS_PER_WORD
78 #define MAX_BITS_PER_WORD BITS_PER_WORD
81 /* Reduce conditional compilation elsewhere. */
84 #define CODE_FOR_insv CODE_FOR_nothing
85 #define gen_insv(a,b,c,d) NULL_RTX
89 #define CODE_FOR_extv CODE_FOR_nothing
90 #define gen_extv(a,b,c,d) NULL_RTX
94 #define CODE_FOR_extzv CODE_FOR_nothing
95 #define gen_extzv(a,b,c,d) NULL_RTX
98 /* Cost of various pieces of RTL. Note that some of these are indexed by
99 shift count and some by mode. */
100 static int zero_cost
[2];
101 static int add_cost
[2][NUM_MACHINE_MODES
];
102 static int neg_cost
[2][NUM_MACHINE_MODES
];
103 static int shift_cost
[2][NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
104 static int shiftadd_cost
[2][NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
105 static int shiftsub0_cost
[2][NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
106 static int shiftsub1_cost
[2][NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
107 static int mul_cost
[2][NUM_MACHINE_MODES
];
108 static int sdiv_cost
[2][NUM_MACHINE_MODES
];
109 static int udiv_cost
[2][NUM_MACHINE_MODES
];
110 static int mul_widen_cost
[2][NUM_MACHINE_MODES
];
111 static int mul_highpart_cost
[2][NUM_MACHINE_MODES
];
118 struct rtx_def reg
; rtunion reg_fld
[2];
119 struct rtx_def plus
; rtunion plus_fld1
;
121 struct rtx_def mult
; rtunion mult_fld1
;
122 struct rtx_def sdiv
; rtunion sdiv_fld1
;
123 struct rtx_def udiv
; rtunion udiv_fld1
;
125 struct rtx_def sdiv_32
; rtunion sdiv_32_fld1
;
126 struct rtx_def smod_32
; rtunion smod_32_fld1
;
127 struct rtx_def wide_mult
; rtunion wide_mult_fld1
;
128 struct rtx_def wide_lshr
; rtunion wide_lshr_fld1
;
129 struct rtx_def wide_trunc
;
130 struct rtx_def shift
; rtunion shift_fld1
;
131 struct rtx_def shift_mult
; rtunion shift_mult_fld1
;
132 struct rtx_def shift_add
; rtunion shift_add_fld1
;
133 struct rtx_def shift_sub0
; rtunion shift_sub0_fld1
;
134 struct rtx_def shift_sub1
; rtunion shift_sub1_fld1
;
137 rtx pow2
[MAX_BITS_PER_WORD
];
138 rtx cint
[MAX_BITS_PER_WORD
];
140 enum machine_mode mode
, wider_mode
;
144 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
146 pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
147 cint
[m
] = GEN_INT (m
);
149 memset (&all
, 0, sizeof all
);
151 PUT_CODE (&all
.reg
, REG
);
152 /* Avoid using hard regs in ways which may be unsupported. */
153 SET_REGNO (&all
.reg
, LAST_VIRTUAL_REGISTER
+ 1);
155 PUT_CODE (&all
.plus
, PLUS
);
156 XEXP (&all
.plus
, 0) = &all
.reg
;
157 XEXP (&all
.plus
, 1) = &all
.reg
;
159 PUT_CODE (&all
.neg
, NEG
);
160 XEXP (&all
.neg
, 0) = &all
.reg
;
162 PUT_CODE (&all
.mult
, MULT
);
163 XEXP (&all
.mult
, 0) = &all
.reg
;
164 XEXP (&all
.mult
, 1) = &all
.reg
;
166 PUT_CODE (&all
.sdiv
, DIV
);
167 XEXP (&all
.sdiv
, 0) = &all
.reg
;
168 XEXP (&all
.sdiv
, 1) = &all
.reg
;
170 PUT_CODE (&all
.udiv
, UDIV
);
171 XEXP (&all
.udiv
, 0) = &all
.reg
;
172 XEXP (&all
.udiv
, 1) = &all
.reg
;
174 PUT_CODE (&all
.sdiv_32
, DIV
);
175 XEXP (&all
.sdiv_32
, 0) = &all
.reg
;
176 XEXP (&all
.sdiv_32
, 1) = 32 < MAX_BITS_PER_WORD
? cint
[32] : GEN_INT (32);
178 PUT_CODE (&all
.smod_32
, MOD
);
179 XEXP (&all
.smod_32
, 0) = &all
.reg
;
180 XEXP (&all
.smod_32
, 1) = XEXP (&all
.sdiv_32
, 1);
182 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
183 XEXP (&all
.zext
, 0) = &all
.reg
;
185 PUT_CODE (&all
.wide_mult
, MULT
);
186 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
187 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
189 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
190 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
192 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
193 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
195 PUT_CODE (&all
.shift
, ASHIFT
);
196 XEXP (&all
.shift
, 0) = &all
.reg
;
198 PUT_CODE (&all
.shift_mult
, MULT
);
199 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
201 PUT_CODE (&all
.shift_add
, PLUS
);
202 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
203 XEXP (&all
.shift_add
, 1) = &all
.reg
;
205 PUT_CODE (&all
.shift_sub0
, MINUS
);
206 XEXP (&all
.shift_sub0
, 0) = &all
.shift_mult
;
207 XEXP (&all
.shift_sub0
, 1) = &all
.reg
;
209 PUT_CODE (&all
.shift_sub1
, MINUS
);
210 XEXP (&all
.shift_sub1
, 0) = &all
.reg
;
211 XEXP (&all
.shift_sub1
, 1) = &all
.shift_mult
;
213 for (speed
= 0; speed
< 2; speed
++)
215 crtl
->maybe_hot_insn_p
= speed
;
216 zero_cost
[speed
] = rtx_cost (const0_rtx
, SET
, speed
);
218 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
220 mode
= GET_MODE_WIDER_MODE (mode
))
222 PUT_MODE (&all
.reg
, mode
);
223 PUT_MODE (&all
.plus
, mode
);
224 PUT_MODE (&all
.neg
, mode
);
225 PUT_MODE (&all
.mult
, mode
);
226 PUT_MODE (&all
.sdiv
, mode
);
227 PUT_MODE (&all
.udiv
, mode
);
228 PUT_MODE (&all
.sdiv_32
, mode
);
229 PUT_MODE (&all
.smod_32
, mode
);
230 PUT_MODE (&all
.wide_trunc
, mode
);
231 PUT_MODE (&all
.shift
, mode
);
232 PUT_MODE (&all
.shift_mult
, mode
);
233 PUT_MODE (&all
.shift_add
, mode
);
234 PUT_MODE (&all
.shift_sub0
, mode
);
235 PUT_MODE (&all
.shift_sub1
, mode
);
237 add_cost
[speed
][mode
] = rtx_cost (&all
.plus
, SET
, speed
);
238 neg_cost
[speed
][mode
] = rtx_cost (&all
.neg
, SET
, speed
);
239 mul_cost
[speed
][mode
] = rtx_cost (&all
.mult
, SET
, speed
);
240 sdiv_cost
[speed
][mode
] = rtx_cost (&all
.sdiv
, SET
, speed
);
241 udiv_cost
[speed
][mode
] = rtx_cost (&all
.udiv
, SET
, speed
);
243 sdiv_pow2_cheap
[speed
][mode
] = (rtx_cost (&all
.sdiv_32
, SET
, speed
)
244 <= 2 * add_cost
[speed
][mode
]);
245 smod_pow2_cheap
[speed
][mode
] = (rtx_cost (&all
.smod_32
, SET
, speed
)
246 <= 4 * add_cost
[speed
][mode
]);
248 wider_mode
= GET_MODE_WIDER_MODE (mode
);
249 if (wider_mode
!= VOIDmode
)
251 PUT_MODE (&all
.zext
, wider_mode
);
252 PUT_MODE (&all
.wide_mult
, wider_mode
);
253 PUT_MODE (&all
.wide_lshr
, wider_mode
);
254 XEXP (&all
.wide_lshr
, 1) = GEN_INT (GET_MODE_BITSIZE (mode
));
256 mul_widen_cost
[speed
][wider_mode
]
257 = rtx_cost (&all
.wide_mult
, SET
, speed
);
258 mul_highpart_cost
[speed
][mode
]
259 = rtx_cost (&all
.wide_trunc
, SET
, speed
);
262 shift_cost
[speed
][mode
][0] = 0;
263 shiftadd_cost
[speed
][mode
][0] = shiftsub0_cost
[speed
][mode
][0]
264 = shiftsub1_cost
[speed
][mode
][0] = add_cost
[speed
][mode
];
266 n
= MIN (MAX_BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
267 for (m
= 1; m
< n
; m
++)
269 XEXP (&all
.shift
, 1) = cint
[m
];
270 XEXP (&all
.shift_mult
, 1) = pow2
[m
];
272 shift_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift
, SET
, speed
);
273 shiftadd_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_add
, SET
, speed
);
274 shiftsub0_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_sub0
, SET
, speed
);
275 shiftsub1_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_sub1
, SET
, speed
);
279 default_rtl_profile ();
282 /* Return an rtx representing minus the value of X.
283 MODE is the intended mode of the result,
284 useful if X is a CONST_INT. */
287 negate_rtx (enum machine_mode mode
, rtx x
)
289 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
292 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
297 /* Report on the availability of insv/extv/extzv and the desired mode
298 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
299 is false; else the mode of the specified operand. If OPNO is -1,
300 all the caller cares about is whether the insn is available. */
302 mode_for_extraction (enum extraction_pattern pattern
, int opno
)
304 const struct insn_data
*data
;
311 data
= &insn_data
[CODE_FOR_insv
];
314 return MAX_MACHINE_MODE
;
319 data
= &insn_data
[CODE_FOR_extv
];
322 return MAX_MACHINE_MODE
;
327 data
= &insn_data
[CODE_FOR_extzv
];
330 return MAX_MACHINE_MODE
;
339 /* Everyone who uses this function used to follow it with
340 if (result == VOIDmode) result = word_mode; */
341 if (data
->operand
[opno
].mode
== VOIDmode
)
343 return data
->operand
[opno
].mode
;
346 /* Return true if X, of mode MODE, matches the predicate for operand
347 OPNO of instruction ICODE. Allow volatile memories, regardless of
348 the ambient volatile_ok setting. */
351 check_predicate_volatile_ok (enum insn_code icode
, int opno
,
352 rtx x
, enum machine_mode mode
)
354 bool save_volatile_ok
, result
;
356 save_volatile_ok
= volatile_ok
;
357 result
= insn_data
[(int) icode
].operand
[opno
].predicate (x
, mode
);
358 volatile_ok
= save_volatile_ok
;
362 /* A subroutine of store_bit_field, with the same arguments. Return true
363 if the operation could be implemented.
365 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
366 no other way of implementing the operation. If FALLBACK_P is false,
367 return false instead. */
370 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
371 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
372 rtx value
, bool fallback_p
)
375 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
376 unsigned HOST_WIDE_INT offset
, bitpos
;
381 enum machine_mode op_mode
= mode_for_extraction (EP_insv
, 3);
383 while (GET_CODE (op0
) == SUBREG
)
385 /* The following line once was done only if WORDS_BIG_ENDIAN,
386 but I think that is a mistake. WORDS_BIG_ENDIAN is
387 meaningful at a much higher level; when structures are copied
388 between memory and regs, the higher-numbered regs
389 always get higher addresses. */
390 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
391 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
395 /* Paradoxical subregs need special handling on big endian machines. */
396 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
398 int difference
= inner_mode_size
- outer_mode_size
;
400 if (WORDS_BIG_ENDIAN
)
401 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
402 if (BYTES_BIG_ENDIAN
)
403 byte_offset
+= difference
% UNITS_PER_WORD
;
406 byte_offset
= SUBREG_BYTE (op0
);
408 bitnum
+= byte_offset
* BITS_PER_UNIT
;
409 op0
= SUBREG_REG (op0
);
412 /* No action is needed if the target is a register and if the field
413 lies completely outside that register. This can occur if the source
414 code contains an out-of-bounds access to a small array. */
415 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
418 /* Use vec_set patterns for inserting parts of vectors whenever
420 if (VECTOR_MODE_P (GET_MODE (op0
))
422 && (optab_handler (vec_set_optab
, GET_MODE (op0
))->insn_code
424 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
425 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
426 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
428 enum machine_mode outermode
= GET_MODE (op0
);
429 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
430 int icode
= (int) optab_handler (vec_set_optab
, outermode
)->insn_code
;
431 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
432 rtx rtxpos
= GEN_INT (pos
);
436 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
437 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
438 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
442 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
443 src
= copy_to_mode_reg (mode1
, src
);
445 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
446 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
448 /* We could handle this, but we should always be called with a pseudo
449 for our targets and all insns should take them as outputs. */
450 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
451 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
452 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
453 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
464 /* If the target is a register, overwriting the entire object, or storing
465 a full-word or multi-word field can be done with just a SUBREG.
467 If the target is memory, storing any naturally aligned field can be
468 done with a simple store. For targets that support fast unaligned
469 memory, any naturally sized, unit aligned field can be done directly. */
471 offset
= bitnum
/ unit
;
472 bitpos
= bitnum
% unit
;
473 byte_offset
= (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
474 + (offset
* UNITS_PER_WORD
);
477 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
479 ? ((GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
480 || GET_MODE_SIZE (GET_MODE (op0
)) == GET_MODE_SIZE (fieldmode
))
481 && byte_offset
% GET_MODE_SIZE (fieldmode
) == 0)
482 : (! SLOW_UNALIGNED_ACCESS (fieldmode
, MEM_ALIGN (op0
))
483 || (offset
* BITS_PER_UNIT
% bitsize
== 0
484 && MEM_ALIGN (op0
) % GET_MODE_BITSIZE (fieldmode
) == 0))))
487 op0
= adjust_address (op0
, fieldmode
, offset
);
488 else if (GET_MODE (op0
) != fieldmode
)
489 op0
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
491 emit_move_insn (op0
, value
);
495 /* Make sure we are playing with integral modes. Pun with subregs
496 if we aren't. This must come after the entire register case above,
497 since that case is valid for any mode. The following cases are only
498 valid for integral modes. */
500 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
501 if (imode
!= GET_MODE (op0
))
504 op0
= adjust_address (op0
, imode
, 0);
507 gcc_assert (imode
!= BLKmode
);
508 op0
= gen_lowpart (imode
, op0
);
513 /* We may be accessing data outside the field, which means
514 we can alias adjacent data. */
517 op0
= shallow_copy_rtx (op0
);
518 set_mem_alias_set (op0
, 0);
519 set_mem_expr (op0
, 0);
522 /* If OP0 is a register, BITPOS must count within a word.
523 But as we have it, it counts within whatever size OP0 now has.
524 On a bigendian machine, these are not the same, so convert. */
527 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
528 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
530 /* Storing an lsb-aligned field in a register
531 can be done with a movestrict instruction. */
534 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
535 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
536 && (optab_handler (movstrict_optab
, fieldmode
)->insn_code
537 != CODE_FOR_nothing
))
539 int icode
= optab_handler (movstrict_optab
, fieldmode
)->insn_code
;
541 rtx start
= get_last_insn ();
544 /* Get appropriate low part of the value being stored. */
545 if (CONST_INT_P (value
) || REG_P (value
))
546 value
= gen_lowpart (fieldmode
, value
);
547 else if (!(GET_CODE (value
) == SYMBOL_REF
548 || GET_CODE (value
) == LABEL_REF
549 || GET_CODE (value
) == CONST
))
550 value
= convert_to_mode (fieldmode
, value
, 0);
552 if (! (*insn_data
[icode
].operand
[1].predicate
) (value
, fieldmode
))
553 value
= copy_to_mode_reg (fieldmode
, value
);
555 if (GET_CODE (op0
) == SUBREG
)
557 /* Else we've got some float mode source being extracted into
558 a different float mode destination -- this combination of
559 subregs results in Severe Tire Damage. */
560 gcc_assert (GET_MODE (SUBREG_REG (op0
)) == fieldmode
561 || GET_MODE_CLASS (fieldmode
) == MODE_INT
562 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
563 arg0
= SUBREG_REG (op0
);
566 insn
= (GEN_FCN (icode
)
567 (gen_rtx_SUBREG (fieldmode
, arg0
,
568 (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
569 + (offset
* UNITS_PER_WORD
)),
576 delete_insns_since (start
);
579 /* Handle fields bigger than a word. */
581 if (bitsize
> BITS_PER_WORD
)
583 /* Here we transfer the words of the field
584 in the order least significant first.
585 This is because the most significant word is the one which may
587 However, only do that if the value is not BLKmode. */
589 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
590 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
594 /* This is the mode we must force value to, so that there will be enough
595 subwords to extract. Note that fieldmode will often (always?) be
596 VOIDmode, because that is what store_field uses to indicate that this
597 is a bit field, but passing VOIDmode to operand_subword_force
599 fieldmode
= GET_MODE (value
);
600 if (fieldmode
== VOIDmode
)
601 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
603 last
= get_last_insn ();
604 for (i
= 0; i
< nwords
; i
++)
606 /* If I is 0, use the low-order word in both field and target;
607 if I is 1, use the next to lowest word; and so on. */
608 unsigned int wordnum
= (backwards
? nwords
- i
- 1 : i
);
609 unsigned int bit_offset
= (backwards
610 ? MAX ((int) bitsize
- ((int) i
+ 1)
613 : (int) i
* BITS_PER_WORD
);
614 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
616 if (!store_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
617 bitsize
- i
* BITS_PER_WORD
),
618 bitnum
+ bit_offset
, word_mode
,
619 value_word
, fallback_p
))
621 delete_insns_since (last
);
628 /* From here on we can assume that the field to be stored in is
629 a full-word (whatever type that is), since it is shorter than a word. */
631 /* OFFSET is the number of words or bytes (UNIT says which)
632 from STR_RTX to the first word or byte containing part of the field. */
637 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
641 /* Since this is a destination (lvalue), we can't copy
642 it to a pseudo. We can remove a SUBREG that does not
643 change the size of the operand. Such a SUBREG may
644 have been added above. */
645 gcc_assert (GET_CODE (op0
) == SUBREG
646 && (GET_MODE_SIZE (GET_MODE (op0
))
647 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)))));
648 op0
= SUBREG_REG (op0
);
650 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
651 op0
, (offset
* UNITS_PER_WORD
));
656 /* If VALUE has a floating-point or complex mode, access it as an
657 integer of the corresponding size. This can occur on a machine
658 with 64 bit registers that uses SFmode for float. It can also
659 occur for unaligned float or complex fields. */
661 if (GET_MODE (value
) != VOIDmode
662 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
663 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
665 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
666 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
669 /* Now OFFSET is nonzero only if OP0 is memory
670 and is therefore always measured in bytes. */
673 && GET_MODE (value
) != BLKmode
675 && GET_MODE_BITSIZE (op_mode
) >= bitsize
676 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
677 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (op_mode
)))
678 && insn_data
[CODE_FOR_insv
].operand
[1].predicate (GEN_INT (bitsize
),
680 && check_predicate_volatile_ok (CODE_FOR_insv
, 0, op0
, VOIDmode
))
682 int xbitpos
= bitpos
;
685 rtx last
= get_last_insn ();
687 bool copy_back
= false;
689 /* Add OFFSET into OP0's address. */
691 xop0
= adjust_address (xop0
, byte_mode
, offset
);
693 /* If xop0 is a register, we need it in OP_MODE
694 to make it acceptable to the format of insv. */
695 if (GET_CODE (xop0
) == SUBREG
)
696 /* We can't just change the mode, because this might clobber op0,
697 and we will need the original value of op0 if insv fails. */
698 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
699 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
700 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
702 /* If the destination is a paradoxical subreg such that we need a
703 truncate to the inner mode, perform the insertion on a temporary and
704 truncate the result to the original destination. Note that we can't
705 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
706 X) 0)) is (reg:N X). */
707 if (GET_CODE (xop0
) == SUBREG
708 && REG_P (SUBREG_REG (xop0
))
709 && (!TRULY_NOOP_TRUNCATION
710 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0
))),
711 GET_MODE_BITSIZE (op_mode
))))
713 rtx tem
= gen_reg_rtx (op_mode
);
714 emit_move_insn (tem
, xop0
);
719 /* On big-endian machines, we count bits from the most significant.
720 If the bit field insn does not, we must invert. */
722 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
723 xbitpos
= unit
- bitsize
- xbitpos
;
725 /* We have been counting XBITPOS within UNIT.
726 Count instead within the size of the register. */
727 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
728 xbitpos
+= GET_MODE_BITSIZE (op_mode
) - unit
;
730 unit
= GET_MODE_BITSIZE (op_mode
);
732 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
734 if (GET_MODE (value
) != op_mode
)
736 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
738 /* Optimization: Don't bother really extending VALUE
739 if it has all the bits we will actually use. However,
740 if we must narrow it, be sure we do it correctly. */
742 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (op_mode
))
746 tmp
= simplify_subreg (op_mode
, value1
, GET_MODE (value
), 0);
748 tmp
= simplify_gen_subreg (op_mode
,
749 force_reg (GET_MODE (value
),
751 GET_MODE (value
), 0);
755 value1
= gen_lowpart (op_mode
, value1
);
757 else if (CONST_INT_P (value
))
758 value1
= gen_int_mode (INTVAL (value
), op_mode
);
760 /* Parse phase is supposed to make VALUE's data type
761 match that of the component reference, which is a type
762 at least as wide as the field; so VALUE should have
763 a mode that corresponds to that type. */
764 gcc_assert (CONSTANT_P (value
));
767 /* If this machine's insv insists on a register,
768 get VALUE1 into a register. */
769 if (! ((*insn_data
[(int) CODE_FOR_insv
].operand
[3].predicate
)
771 value1
= force_reg (op_mode
, value1
);
773 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
779 convert_move (op0
, xop0
, true);
782 delete_insns_since (last
);
785 /* If OP0 is a memory, try copying it to a register and seeing if a
786 cheap register alternative is available. */
787 if (HAVE_insv
&& MEM_P (op0
))
789 enum machine_mode bestmode
;
791 /* Get the mode to use for inserting into this field. If OP0 is
792 BLKmode, get the smallest mode consistent with the alignment. If
793 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
794 mode. Otherwise, use the smallest mode containing the field. */
796 if (GET_MODE (op0
) == BLKmode
797 || (op_mode
!= MAX_MACHINE_MODE
798 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (op_mode
)))
799 bestmode
= get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
),
800 (op_mode
== MAX_MACHINE_MODE
801 ? VOIDmode
: op_mode
),
802 MEM_VOLATILE_P (op0
));
804 bestmode
= GET_MODE (op0
);
806 if (bestmode
!= VOIDmode
807 && GET_MODE_SIZE (bestmode
) >= GET_MODE_SIZE (fieldmode
)
808 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
809 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
811 rtx last
, tempreg
, xop0
;
812 unsigned HOST_WIDE_INT xoffset
, xbitpos
;
814 last
= get_last_insn ();
816 /* Adjust address to point to the containing unit of
817 that mode. Compute the offset as a multiple of this unit,
818 counting in bytes. */
819 unit
= GET_MODE_BITSIZE (bestmode
);
820 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
821 xbitpos
= bitnum
% unit
;
822 xop0
= adjust_address (op0
, bestmode
, xoffset
);
824 /* Fetch that unit, store the bitfield in it, then store
826 tempreg
= copy_to_reg (xop0
);
827 if (store_bit_field_1 (tempreg
, bitsize
, xbitpos
,
828 fieldmode
, orig_value
, false))
830 emit_move_insn (xop0
, tempreg
);
833 delete_insns_since (last
);
840 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
844 /* Generate code to store value from rtx VALUE
845 into a bit-field within structure STR_RTX
846 containing BITSIZE bits starting at bit BITNUM.
847 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
850 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
851 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
854 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
, fieldmode
, value
, true))
858 /* Use shifts and boolean operations to store VALUE
859 into a bit field of width BITSIZE
860 in a memory location specified by OP0 except offset by OFFSET bytes.
861 (OFFSET must be 0 if OP0 is a register.)
862 The field starts at position BITPOS within the byte.
863 (If OP0 is a register, it may be a full word or a narrower mode,
864 but BITPOS still counts within a full word,
865 which is significant on bigendian machines.) */
868 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT offset
,
869 unsigned HOST_WIDE_INT bitsize
,
870 unsigned HOST_WIDE_INT bitpos
, rtx value
)
872 enum machine_mode mode
;
873 unsigned int total_bits
= BITS_PER_WORD
;
878 /* There is a case not handled here:
879 a structure with a known alignment of just a halfword
880 and a field split across two aligned halfwords within the structure.
881 Or likewise a structure with a known alignment of just a byte
882 and a field split across two bytes.
883 Such cases are not supposed to be able to occur. */
885 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
887 gcc_assert (!offset
);
888 /* Special treatment for a bit field split across two registers. */
889 if (bitsize
+ bitpos
> BITS_PER_WORD
)
891 store_split_bit_field (op0
, bitsize
, bitpos
, value
);
897 /* Get the proper mode to use for this field. We want a mode that
898 includes the entire field. If such a mode would be larger than
899 a word, we won't be doing the extraction the normal way.
900 We don't want a mode bigger than the destination. */
902 mode
= GET_MODE (op0
);
903 if (GET_MODE_BITSIZE (mode
) == 0
904 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
907 if (MEM_VOLATILE_P (op0
)
908 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
909 && flag_strict_volatile_bitfields
> 0)
910 mode
= GET_MODE (op0
);
912 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
913 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
915 if (mode
== VOIDmode
)
917 /* The only way this should occur is if the field spans word
919 store_split_bit_field (op0
, bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
924 total_bits
= GET_MODE_BITSIZE (mode
);
926 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
927 be in the range 0 to total_bits-1, and put any excess bytes in
929 if (bitpos
>= total_bits
)
931 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
932 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
936 /* Get ref to an aligned byte, halfword, or word containing the field.
937 Adjust BITPOS to be position within a word,
938 and OFFSET to be the offset of that word.
939 Then alter OP0 to refer to that word. */
940 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
941 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
942 op0
= adjust_address (op0
, mode
, offset
);
945 mode
= GET_MODE (op0
);
947 /* Now MODE is either some integral mode for a MEM as OP0,
948 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
949 The bit field is contained entirely within OP0.
950 BITPOS is the starting bit number within OP0.
951 (OP0's mode may actually be narrower than MODE.) */
953 if (BYTES_BIG_ENDIAN
)
954 /* BITPOS is the distance between our msb
955 and that of the containing datum.
956 Convert it to the distance from the lsb. */
957 bitpos
= total_bits
- bitsize
- bitpos
;
959 /* Now BITPOS is always the distance between our lsb
962 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
963 we must first convert its mode to MODE. */
965 if (CONST_INT_P (value
))
967 HOST_WIDE_INT v
= INTVAL (value
);
969 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
970 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
974 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
975 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
976 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
979 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
983 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
984 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
986 if (GET_MODE (value
) != mode
)
987 value
= convert_to_mode (mode
, value
, 1);
990 value
= expand_binop (mode
, and_optab
, value
,
991 mask_rtx (mode
, 0, bitsize
, 0),
992 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
994 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
995 build_int_cst (NULL_TREE
, bitpos
), NULL_RTX
, 1);
998 /* Now clear the chosen bits in OP0,
999 except that if VALUE is -1 we need not bother. */
1000 /* We keep the intermediates in registers to allow CSE to combine
1001 consecutive bitfield assignments. */
1003 temp
= force_reg (mode
, op0
);
1007 temp
= expand_binop (mode
, and_optab
, temp
,
1008 mask_rtx (mode
, bitpos
, bitsize
, 1),
1009 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1010 temp
= force_reg (mode
, temp
);
1013 /* Now logical-or VALUE into OP0, unless it is zero. */
1017 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
1018 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1019 temp
= force_reg (mode
, temp
);
1024 op0
= copy_rtx (op0
);
1025 emit_move_insn (op0
, temp
);
1029 /* Store a bit field that is split across multiple accessible memory objects.
1031 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1032 BITSIZE is the field width; BITPOS the position of its first bit
1034 VALUE is the value to store.
1036 This does not yet handle fields wider than BITS_PER_WORD. */
1039 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1040 unsigned HOST_WIDE_INT bitpos
, rtx value
)
1043 unsigned int bitsdone
= 0;
1045 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1047 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1048 unit
= BITS_PER_WORD
;
1050 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1052 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1053 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1054 that VALUE might be a floating-point constant. */
1055 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1057 rtx word
= gen_lowpart_common (word_mode
, value
);
1059 if (word
&& (value
!= word
))
1062 value
= gen_lowpart_common (word_mode
,
1063 force_reg (GET_MODE (value
) != VOIDmode
1065 : word_mode
, value
));
1068 while (bitsdone
< bitsize
)
1070 unsigned HOST_WIDE_INT thissize
;
1072 unsigned HOST_WIDE_INT thispos
;
1073 unsigned HOST_WIDE_INT offset
;
1075 offset
= (bitpos
+ bitsdone
) / unit
;
1076 thispos
= (bitpos
+ bitsdone
) % unit
;
1078 /* THISSIZE must not overrun a word boundary. Otherwise,
1079 store_fixed_bit_field will call us again, and we will mutually
1081 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1082 thissize
= MIN (thissize
, unit
- thispos
);
1084 if (BYTES_BIG_ENDIAN
)
1088 /* We must do an endian conversion exactly the same way as it is
1089 done in extract_bit_field, so that the two calls to
1090 extract_fixed_bit_field will have comparable arguments. */
1091 if (!MEM_P (value
) || GET_MODE (value
) == BLKmode
)
1092 total_bits
= BITS_PER_WORD
;
1094 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1096 /* Fetch successively less significant portions. */
1097 if (CONST_INT_P (value
))
1098 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1099 >> (bitsize
- bitsdone
- thissize
))
1100 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1102 /* The args are chosen so that the last part includes the
1103 lsb. Give extract_bit_field the value it needs (with
1104 endianness compensation) to fetch the piece we want. */
1105 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1106 total_bits
- bitsize
+ bitsdone
,
1111 /* Fetch successively more significant portions. */
1112 if (CONST_INT_P (value
))
1113 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1115 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1117 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1118 bitsdone
, NULL_RTX
, 1);
1121 /* If OP0 is a register, then handle OFFSET here.
1123 When handling multiword bitfields, extract_bit_field may pass
1124 down a word_mode SUBREG of a larger REG for a bitfield that actually
1125 crosses a word boundary. Thus, for a SUBREG, we must find
1126 the current word starting from the base register. */
1127 if (GET_CODE (op0
) == SUBREG
)
1129 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1130 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1131 GET_MODE (SUBREG_REG (op0
)));
1134 else if (REG_P (op0
))
1136 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1142 /* OFFSET is in UNITs, and UNIT is in bits.
1143 store_fixed_bit_field wants offset in bytes. */
1144 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
, thissize
,
1146 bitsdone
+= thissize
;
1150 /* A subroutine of extract_bit_field_1 that converts return value X
1151 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1152 to extract_bit_field. */
1155 convert_extracted_bit_field (rtx x
, enum machine_mode mode
,
1156 enum machine_mode tmode
, bool unsignedp
)
1158 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1161 /* If the x mode is not a scalar integral, first convert to the
1162 integer mode of that size and then access it as a floating-point
1163 value via a SUBREG. */
1164 if (!SCALAR_INT_MODE_P (tmode
))
1166 enum machine_mode smode
;
1168 smode
= mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1169 x
= convert_to_mode (smode
, x
, unsignedp
);
1170 x
= force_reg (smode
, x
);
1171 return gen_lowpart (tmode
, x
);
1174 return convert_to_mode (tmode
, x
, unsignedp
);
1177 /* A subroutine of extract_bit_field, with the same arguments.
1178 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1179 if we can find no other means of implementing the operation.
1180 if FALLBACK_P is false, return NULL instead. */
1183 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1184 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1185 enum machine_mode mode
, enum machine_mode tmode
,
1189 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
1190 unsigned HOST_WIDE_INT offset
, bitpos
;
1192 enum machine_mode int_mode
;
1193 enum machine_mode ext_mode
;
1194 enum machine_mode mode1
;
1195 enum insn_code icode
;
1198 if (tmode
== VOIDmode
)
1201 while (GET_CODE (op0
) == SUBREG
)
1203 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1204 op0
= SUBREG_REG (op0
);
1207 /* If we have an out-of-bounds access to a register, just return an
1208 uninitialized register of the required mode. This can occur if the
1209 source code contains an out-of-bounds access to a small array. */
1210 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1211 return gen_reg_rtx (tmode
);
1214 && mode
== GET_MODE (op0
)
1216 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1218 /* We're trying to extract a full register from itself. */
1222 /* See if we can get a better vector mode before extracting. */
1223 if (VECTOR_MODE_P (GET_MODE (op0
))
1225 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1227 enum machine_mode new_mode
;
1228 int nunits
= GET_MODE_NUNITS (GET_MODE (op0
));
1230 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1231 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1232 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1233 new_mode
= MIN_MODE_VECTOR_FRACT
;
1234 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1235 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1236 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1237 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1238 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1239 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1241 new_mode
= MIN_MODE_VECTOR_INT
;
1243 for (; new_mode
!= VOIDmode
; new_mode
= GET_MODE_WIDER_MODE (new_mode
))
1244 if (GET_MODE_NUNITS (new_mode
) == nunits
1245 && GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1246 && targetm
.vector_mode_supported_p (new_mode
))
1248 if (new_mode
!= VOIDmode
)
1249 op0
= gen_lowpart (new_mode
, op0
);
1252 /* Use vec_extract patterns for extracting parts of vectors whenever
1254 if (VECTOR_MODE_P (GET_MODE (op0
))
1256 && (optab_handler (vec_extract_optab
, GET_MODE (op0
))->insn_code
1257 != CODE_FOR_nothing
)
1258 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1259 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1261 enum machine_mode outermode
= GET_MODE (op0
);
1262 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1263 int icode
= (int) optab_handler (vec_extract_optab
, outermode
)->insn_code
;
1264 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1265 rtx rtxpos
= GEN_INT (pos
);
1267 rtx dest
= NULL
, pat
, seq
;
1268 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
1269 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
1270 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
1272 if (innermode
== tmode
|| innermode
== mode
)
1276 dest
= gen_reg_rtx (innermode
);
1280 if (! (*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
))
1281 dest
= copy_to_mode_reg (mode0
, dest
);
1283 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
1284 src
= copy_to_mode_reg (mode1
, src
);
1286 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
1287 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
1289 /* We could handle this, but we should always be called with a pseudo
1290 for our targets and all insns should take them as outputs. */
1291 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
1292 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
1293 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
1295 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
1303 return gen_lowpart (tmode
, dest
);
1308 /* Make sure we are playing with integral modes. Pun with subregs
1311 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1312 if (imode
!= GET_MODE (op0
))
1315 op0
= adjust_address (op0
, imode
, 0);
1316 else if (imode
!= BLKmode
)
1318 op0
= gen_lowpart (imode
, op0
);
1320 /* If we got a SUBREG, force it into a register since we
1321 aren't going to be able to do another SUBREG on it. */
1322 if (GET_CODE (op0
) == SUBREG
)
1323 op0
= force_reg (imode
, op0
);
1325 else if (REG_P (op0
))
1328 imode
= smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0
)),
1330 reg
= gen_reg_rtx (imode
);
1331 subreg
= gen_lowpart_SUBREG (GET_MODE (op0
), reg
);
1332 emit_move_insn (subreg
, op0
);
1334 bitnum
+= SUBREG_BYTE (subreg
) * BITS_PER_UNIT
;
1338 rtx mem
= assign_stack_temp (GET_MODE (op0
),
1339 GET_MODE_SIZE (GET_MODE (op0
)), 0);
1340 emit_move_insn (mem
, op0
);
1341 op0
= adjust_address (mem
, BLKmode
, 0);
1346 /* We may be accessing data outside the field, which means
1347 we can alias adjacent data. */
1350 op0
= shallow_copy_rtx (op0
);
1351 set_mem_alias_set (op0
, 0);
1352 set_mem_expr (op0
, 0);
1355 /* Extraction of a full-word or multi-word value from a structure
1356 in a register or aligned memory can be done with just a SUBREG.
1357 A subword value in the least significant part of a register
1358 can also be extracted with a SUBREG. For this, we need the
1359 byte offset of the value in op0. */
1361 bitpos
= bitnum
% unit
;
1362 offset
= bitnum
/ unit
;
1363 byte_offset
= bitpos
/ BITS_PER_UNIT
+ offset
* UNITS_PER_WORD
;
1365 /* If OP0 is a register, BITPOS must count within a word.
1366 But as we have it, it counts within whatever size OP0 now has.
1367 On a bigendian machine, these are not the same, so convert. */
1368 if (BYTES_BIG_ENDIAN
1370 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
1371 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1373 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1374 If that's wrong, the solution is to test for it and set TARGET to 0
1377 /* Only scalar integer modes can be converted via subregs. There is an
1378 additional problem for FP modes here in that they can have a precision
1379 which is different from the size. mode_for_size uses precision, but
1380 we want a mode based on the size, so we must avoid calling it for FP
1382 mode1
= (SCALAR_INT_MODE_P (tmode
)
1383 ? mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0)
1386 /* If the bitfield is volatile, we need to make sure the access
1387 remains on a type-aligned boundary. */
1388 if (GET_CODE (op0
) == MEM
1389 && MEM_VOLATILE_P (op0
)
1390 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
1391 && flag_strict_volatile_bitfields
> 0)
1392 goto no_subreg_mode_swap
;
1394 if (((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
1395 && bitpos
% BITS_PER_WORD
== 0)
1396 || (mode1
!= BLKmode
1397 /* ??? The big endian test here is wrong. This is correct
1398 if the value is in a register, and if mode_for_size is not
1399 the same mode as op0. This causes us to get unnecessarily
1400 inefficient code from the Thumb port when -mbig-endian. */
1401 && (BYTES_BIG_ENDIAN
1402 ? bitpos
+ bitsize
== BITS_PER_WORD
1405 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1
),
1406 GET_MODE_BITSIZE (GET_MODE (op0
)))
1407 && GET_MODE_SIZE (mode1
) != 0
1408 && byte_offset
% GET_MODE_SIZE (mode1
) == 0)
1410 && (! SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
1411 || (offset
* BITS_PER_UNIT
% bitsize
== 0
1412 && MEM_ALIGN (op0
) % bitsize
== 0)))))
1415 op0
= adjust_address (op0
, mode1
, offset
);
1416 else if (mode1
!= GET_MODE (op0
))
1418 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1421 goto no_subreg_mode_swap
;
1425 return convert_to_mode (tmode
, op0
, unsignedp
);
1428 no_subreg_mode_swap
:
1430 /* Handle fields bigger than a word. */
1432 if (bitsize
> BITS_PER_WORD
)
1434 /* Here we transfer the words of the field
1435 in the order least significant first.
1436 This is because the most significant word is the one which may
1437 be less than full. */
1439 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1442 if (target
== 0 || !REG_P (target
))
1443 target
= gen_reg_rtx (mode
);
1445 /* Indicate for flow that the entire target reg is being set. */
1446 emit_clobber (target
);
1448 for (i
= 0; i
< nwords
; i
++)
1450 /* If I is 0, use the low-order word in both field and target;
1451 if I is 1, use the next to lowest word; and so on. */
1452 /* Word number in TARGET to use. */
1453 unsigned int wordnum
1455 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1457 /* Offset from start of field in OP0. */
1458 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1459 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1460 * (int) BITS_PER_WORD
))
1461 : (int) i
* BITS_PER_WORD
);
1462 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1464 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
1465 bitsize
- i
* BITS_PER_WORD
),
1466 bitnum
+ bit_offset
, 1, target_part
, mode
,
1469 gcc_assert (target_part
);
1471 if (result_part
!= target_part
)
1472 emit_move_insn (target_part
, result_part
);
1477 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1478 need to be zero'd out. */
1479 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1481 unsigned int i
, total_words
;
1483 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1484 for (i
= nwords
; i
< total_words
; i
++)
1486 (operand_subword (target
,
1487 WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
,
1494 /* Signed bit field: sign-extend with two arithmetic shifts. */
1495 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1496 build_int_cst (NULL_TREE
,
1497 GET_MODE_BITSIZE (mode
) - bitsize
),
1499 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1500 build_int_cst (NULL_TREE
,
1501 GET_MODE_BITSIZE (mode
) - bitsize
),
1505 /* From here on we know the desired field is smaller than a word. */
1507 /* Check if there is a correspondingly-sized integer field, so we can
1508 safely extract it as one size of integer, if necessary; then
1509 truncate or extend to the size that is wanted; then use SUBREGs or
1510 convert_to_mode to get one of the modes we really wanted. */
1512 int_mode
= int_mode_for_mode (tmode
);
1513 if (int_mode
== BLKmode
)
1514 int_mode
= int_mode_for_mode (mode
);
1515 /* Should probably push op0 out to memory and then do a load. */
1516 gcc_assert (int_mode
!= BLKmode
);
1518 /* OFFSET is the number of words or bytes (UNIT says which)
1519 from STR_RTX to the first word or byte containing part of the field. */
1523 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1526 op0
= copy_to_reg (op0
);
1527 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
1528 op0
, (offset
* UNITS_PER_WORD
));
1533 /* Now OFFSET is nonzero only for memory operands. */
1534 ext_mode
= mode_for_extraction (unsignedp
? EP_extzv
: EP_extv
, 0);
1535 icode
= unsignedp
? CODE_FOR_extzv
: CODE_FOR_extv
;
1536 if (ext_mode
!= MAX_MACHINE_MODE
1538 && GET_MODE_BITSIZE (ext_mode
) >= bitsize
1539 /* If op0 is a register, we need it in EXT_MODE to make it
1540 acceptable to the format of ext(z)v. */
1541 && !(GET_CODE (op0
) == SUBREG
&& GET_MODE (op0
) != ext_mode
)
1542 && !((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1543 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (ext_mode
)))
1544 && check_predicate_volatile_ok (icode
, 1, op0
, GET_MODE (op0
)))
1546 unsigned HOST_WIDE_INT xbitpos
= bitpos
, xoffset
= offset
;
1547 rtx bitsize_rtx
, bitpos_rtx
;
1548 rtx last
= get_last_insn ();
1550 rtx xtarget
= target
;
1551 rtx xspec_target
= target
;
1552 rtx xspec_target_subreg
= 0;
1555 /* If op0 is a register, we need it in EXT_MODE to make it
1556 acceptable to the format of ext(z)v. */
1557 if (REG_P (xop0
) && GET_MODE (xop0
) != ext_mode
)
1558 xop0
= gen_lowpart_SUBREG (ext_mode
, xop0
);
1560 /* Get ref to first byte containing part of the field. */
1561 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1563 /* On big-endian machines, we count bits from the most significant.
1564 If the bit field insn does not, we must invert. */
1565 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1566 xbitpos
= unit
- bitsize
- xbitpos
;
1568 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1569 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
1570 xbitpos
+= GET_MODE_BITSIZE (ext_mode
) - unit
;
1572 unit
= GET_MODE_BITSIZE (ext_mode
);
1575 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1577 if (GET_MODE (xtarget
) != ext_mode
)
1579 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1580 between the mode of the extraction (word_mode) and the target
1581 mode. Instead, create a temporary and use convert_move to set
1584 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget
)),
1585 GET_MODE_BITSIZE (ext_mode
)))
1587 xtarget
= gen_lowpart (ext_mode
, xtarget
);
1588 if (GET_MODE_SIZE (ext_mode
)
1589 > GET_MODE_SIZE (GET_MODE (xspec_target
)))
1590 xspec_target_subreg
= xtarget
;
1593 xtarget
= gen_reg_rtx (ext_mode
);
1596 /* If this machine's ext(z)v insists on a register target,
1597 make sure we have one. */
1598 if (!insn_data
[(int) icode
].operand
[0].predicate (xtarget
, ext_mode
))
1599 xtarget
= gen_reg_rtx (ext_mode
);
1601 bitsize_rtx
= GEN_INT (bitsize
);
1602 bitpos_rtx
= GEN_INT (xbitpos
);
1605 ? gen_extzv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
)
1606 : gen_extv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
));
1610 if (xtarget
== xspec_target
)
1612 if (xtarget
== xspec_target_subreg
)
1613 return xspec_target
;
1614 return convert_extracted_bit_field (xtarget
, mode
, tmode
, unsignedp
);
1616 delete_insns_since (last
);
1619 /* If OP0 is a memory, try copying it to a register and seeing if a
1620 cheap register alternative is available. */
1621 if (ext_mode
!= MAX_MACHINE_MODE
&& MEM_P (op0
))
1623 enum machine_mode bestmode
;
1625 /* Get the mode to use for inserting into this field. If
1626 OP0 is BLKmode, get the smallest mode consistent with the
1627 alignment. If OP0 is a non-BLKmode object that is no
1628 wider than EXT_MODE, use its mode. Otherwise, use the
1629 smallest mode containing the field. */
1631 if (GET_MODE (op0
) == BLKmode
1632 || (ext_mode
!= MAX_MACHINE_MODE
1633 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (ext_mode
)))
1634 bestmode
= get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
),
1635 (ext_mode
== MAX_MACHINE_MODE
1636 ? VOIDmode
: ext_mode
),
1637 MEM_VOLATILE_P (op0
));
1639 bestmode
= GET_MODE (op0
);
1641 if (bestmode
!= VOIDmode
1642 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
1643 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
1645 unsigned HOST_WIDE_INT xoffset
, xbitpos
;
1647 /* Compute the offset as a multiple of this unit,
1648 counting in bytes. */
1649 unit
= GET_MODE_BITSIZE (bestmode
);
1650 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1651 xbitpos
= bitnum
% unit
;
1653 /* Make sure the register is big enough for the whole field. */
1654 if (xoffset
* BITS_PER_UNIT
+ unit
1655 >= offset
* BITS_PER_UNIT
+ bitsize
)
1657 rtx last
, result
, xop0
;
1659 last
= get_last_insn ();
1661 /* Fetch it to a register in that size. */
1662 xop0
= adjust_address (op0
, bestmode
, xoffset
);
1663 xop0
= force_reg (bestmode
, xop0
);
1664 result
= extract_bit_field_1 (xop0
, bitsize
, xbitpos
,
1666 mode
, tmode
, false);
1670 delete_insns_since (last
);
1678 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1679 bitpos
, target
, unsignedp
);
1680 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1683 /* Generate code to extract a byte-field from STR_RTX
1684 containing BITSIZE bits, starting at BITNUM,
1685 and put it in TARGET if possible (if TARGET is nonzero).
1686 Regardless of TARGET, we return the rtx for where the value is placed.
1688 STR_RTX is the structure containing the byte (a REG or MEM).
1689 UNSIGNEDP is nonzero if this is an unsigned bit field.
1690 MODE is the natural mode of the field value once extracted.
1691 TMODE is the mode the caller would like the value to have;
1692 but the value may be returned with type MODE instead.
1694 If a TARGET is specified and we can store in it at no extra cost,
1695 we do so, and return TARGET.
1696 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1697 if they are equally easy. */
1700 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1701 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1702 enum machine_mode mode
, enum machine_mode tmode
)
1704 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
,
1705 target
, mode
, tmode
, true);
1708 /* Extract a bit field using shifts and boolean operations
1709 Returns an rtx to represent the value.
1710 OP0 addresses a register (word) or memory (byte).
1711 BITPOS says which bit within the word or byte the bit field starts in.
1712 OFFSET says how many bytes farther the bit field starts;
1713 it is 0 if OP0 is a register.
1714 BITSIZE says how many bits long the bit field is.
1715 (If OP0 is a register, it may be narrower than a full word,
1716 but BITPOS still counts within a full word,
1717 which is significant on bigendian machines.)
1719 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1720 If TARGET is nonzero, attempts to store the value there
1721 and return TARGET, but this is not guaranteed.
1722 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1725 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1726 unsigned HOST_WIDE_INT offset
,
1727 unsigned HOST_WIDE_INT bitsize
,
1728 unsigned HOST_WIDE_INT bitpos
, rtx target
,
1731 unsigned int total_bits
= BITS_PER_WORD
;
1732 enum machine_mode mode
;
1734 if (GET_CODE (op0
) == SUBREG
|| REG_P (op0
))
1736 /* Special treatment for a bit field split across two registers. */
1737 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1738 return extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
);
1742 /* Get the proper mode to use for this field. We want a mode that
1743 includes the entire field. If such a mode would be larger than
1744 a word, we won't be doing the extraction the normal way. */
1746 if (MEM_VOLATILE_P (op0
)
1747 && flag_strict_volatile_bitfields
> 0)
1749 if (GET_MODE_BITSIZE (GET_MODE (op0
)) > 0)
1750 mode
= GET_MODE (op0
);
1751 else if (target
&& GET_MODE_BITSIZE (GET_MODE (target
)) > 0)
1752 mode
= GET_MODE (target
);
1757 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1758 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1760 if (mode
== VOIDmode
)
1761 /* The only way this should occur is if the field spans word
1763 return extract_split_bit_field (op0
, bitsize
,
1764 bitpos
+ offset
* BITS_PER_UNIT
,
1767 total_bits
= GET_MODE_BITSIZE (mode
);
1769 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1770 be in the range 0 to total_bits-1, and put any excess bytes in
1772 if (bitpos
>= total_bits
)
1774 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1775 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1779 /* If we're accessing a volatile MEM, we can't do the next
1780 alignment step if it results in a multi-word access where we
1781 otherwise wouldn't have one. So, check for that case
1784 && MEM_VOLATILE_P (op0
)
1785 && flag_strict_volatile_bitfields
> 0
1786 && bitpos
+ bitsize
<= total_bits
1787 && bitpos
+ bitsize
+ (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
> total_bits
)
1789 if (STRICT_ALIGNMENT
)
1791 static bool informed_about_misalignment
= false;
1794 if (bitsize
== total_bits
)
1795 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1796 "mis-aligned access used for structure member");
1798 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1799 "mis-aligned access used for structure bitfield");
1801 if (! informed_about_misalignment
&& warned
)
1803 informed_about_misalignment
= true;
1804 inform (input_location
,
1805 "When a volatile object spans multiple type-sized locations,"
1806 " the compiler must choose between using a single mis-aligned access to"
1807 " preserve the volatility, or using multiple aligned accesses to avoid"
1808 " runtime faults. This code may fail at runtime if the hardware does"
1809 " not allow this access.");
1816 /* Get ref to an aligned byte, halfword, or word containing the field.
1817 Adjust BITPOS to be position within a word,
1818 and OFFSET to be the offset of that word.
1819 Then alter OP0 to refer to that word. */
1820 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1821 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1824 op0
= adjust_address (op0
, mode
, offset
);
1827 mode
= GET_MODE (op0
);
1829 if (BYTES_BIG_ENDIAN
)
1830 /* BITPOS is the distance between our msb and that of OP0.
1831 Convert it to the distance from the lsb. */
1832 bitpos
= total_bits
- bitsize
- bitpos
;
1834 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1835 We have reduced the big-endian case to the little-endian case. */
1841 /* If the field does not already start at the lsb,
1842 shift it so it does. */
1843 tree amount
= build_int_cst (NULL_TREE
, bitpos
);
1844 /* Maybe propagate the target for the shift. */
1845 /* But not if we will return it--could confuse integrate.c. */
1846 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1847 if (tmode
!= mode
) subtarget
= 0;
1848 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1850 /* Convert the value to the desired mode. */
1852 op0
= convert_to_mode (tmode
, op0
, 1);
1854 /* Unless the msb of the field used to be the msb when we shifted,
1855 mask out the upper bits. */
1857 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
)
1858 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1859 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1860 target
, 1, OPTAB_LIB_WIDEN
);
1864 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1865 then arithmetic-shift its lsb to the lsb of the word. */
1866 op0
= force_reg (mode
, op0
);
1870 /* Find the narrowest integer mode that contains the field. */
1872 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1873 mode
= GET_MODE_WIDER_MODE (mode
))
1874 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1876 op0
= convert_to_mode (mode
, op0
, 0);
1880 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1883 = build_int_cst (NULL_TREE
,
1884 GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
));
1885 /* Maybe propagate the target for the shift. */
1886 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1887 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1890 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1891 build_int_cst (NULL_TREE
,
1892 GET_MODE_BITSIZE (mode
) - bitsize
),
1896 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1897 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1898 complement of that if COMPLEMENT. The mask is truncated if
1899 necessary to the width of mode MODE. The mask is zero-extended if
1900 BITSIZE+BITPOS is too small for MODE. */
1903 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, int complement
)
1907 mask
= double_int_mask (bitsize
);
1908 mask
= double_int_lshift (mask
, bitpos
, HOST_BITS_PER_DOUBLE_INT
, false);
1911 mask
= double_int_not (mask
);
1913 return immed_double_int_const (mask
, mode
);
1916 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1917 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1920 lshift_value (enum machine_mode mode
, rtx value
, int bitpos
, int bitsize
)
1924 val
= double_int_zext (uhwi_to_double_int (INTVAL (value
)), bitsize
);
1925 val
= double_int_lshift (val
, bitpos
, HOST_BITS_PER_DOUBLE_INT
, false);
1927 return immed_double_int_const (val
, mode
);
1930 /* Extract a bit field that is split across two words
1931 and return an RTX for the result.
1933 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1934 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1935 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1938 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1939 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
1942 unsigned int bitsdone
= 0;
1943 rtx result
= NULL_RTX
;
1946 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1948 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1949 unit
= BITS_PER_WORD
;
1951 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1953 while (bitsdone
< bitsize
)
1955 unsigned HOST_WIDE_INT thissize
;
1957 unsigned HOST_WIDE_INT thispos
;
1958 unsigned HOST_WIDE_INT offset
;
1960 offset
= (bitpos
+ bitsdone
) / unit
;
1961 thispos
= (bitpos
+ bitsdone
) % unit
;
1963 /* THISSIZE must not overrun a word boundary. Otherwise,
1964 extract_fixed_bit_field will call us again, and we will mutually
1966 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1967 thissize
= MIN (thissize
, unit
- thispos
);
1969 /* If OP0 is a register, then handle OFFSET here.
1971 When handling multiword bitfields, extract_bit_field may pass
1972 down a word_mode SUBREG of a larger REG for a bitfield that actually
1973 crosses a word boundary. Thus, for a SUBREG, we must find
1974 the current word starting from the base register. */
1975 if (GET_CODE (op0
) == SUBREG
)
1977 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1978 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1979 GET_MODE (SUBREG_REG (op0
)));
1982 else if (REG_P (op0
))
1984 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1990 /* Extract the parts in bit-counting order,
1991 whose meaning is determined by BYTES_PER_UNIT.
1992 OFFSET is in UNITs, and UNIT is in bits.
1993 extract_fixed_bit_field wants offset in bytes. */
1994 part
= extract_fixed_bit_field (word_mode
, word
,
1995 offset
* unit
/ BITS_PER_UNIT
,
1996 thissize
, thispos
, 0, 1);
1997 bitsdone
+= thissize
;
1999 /* Shift this part into place for the result. */
2000 if (BYTES_BIG_ENDIAN
)
2002 if (bitsize
!= bitsdone
)
2003 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2004 build_int_cst (NULL_TREE
, bitsize
- bitsdone
),
2009 if (bitsdone
!= thissize
)
2010 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2011 build_int_cst (NULL_TREE
,
2012 bitsdone
- thissize
), 0, 1);
2018 /* Combine the parts with bitwise or. This works
2019 because we extracted each part as an unsigned bit field. */
2020 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
2026 /* Unsigned bit field: we are done. */
2029 /* Signed bit field: sign-extend with two arithmetic shifts. */
2030 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
2031 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2033 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
2034 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2038 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2039 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2040 MODE, fill the upper bits with zeros. Fail if the layout of either
2041 mode is unknown (as for CC modes) or if the extraction would involve
2042 unprofitable mode punning. Return the value on success, otherwise
2045 This is different from gen_lowpart* in these respects:
2047 - the returned value must always be considered an rvalue
2049 - when MODE is wider than SRC_MODE, the extraction involves
2052 - when MODE is smaller than SRC_MODE, the extraction involves
2053 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2055 In other words, this routine performs a computation, whereas the
2056 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2060 extract_low_bits (enum machine_mode mode
, enum machine_mode src_mode
, rtx src
)
2062 enum machine_mode int_mode
, src_int_mode
;
2064 if (mode
== src_mode
)
2067 if (CONSTANT_P (src
))
2069 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2070 fails, it will happily create (subreg (symbol_ref)) or similar
2072 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
2073 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
2077 if (GET_MODE (src
) == VOIDmode
2078 || !validate_subreg (mode
, src_mode
, src
, byte
))
2081 src
= force_reg (GET_MODE (src
), src
);
2082 return gen_rtx_SUBREG (mode
, src
, byte
);
2085 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
2088 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
2089 && MODES_TIEABLE_P (mode
, src_mode
))
2091 rtx x
= gen_lowpart_common (mode
, src
);
2096 src_int_mode
= int_mode_for_mode (src_mode
);
2097 int_mode
= int_mode_for_mode (mode
);
2098 if (src_int_mode
== BLKmode
|| int_mode
== BLKmode
)
2101 if (!MODES_TIEABLE_P (src_int_mode
, src_mode
))
2103 if (!MODES_TIEABLE_P (int_mode
, mode
))
2106 src
= gen_lowpart (src_int_mode
, src
);
2107 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2108 src
= gen_lowpart (mode
, src
);
2112 /* Add INC into TARGET. */
2115 expand_inc (rtx target
, rtx inc
)
2117 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2119 target
, 0, OPTAB_LIB_WIDEN
);
2120 if (value
!= target
)
2121 emit_move_insn (target
, value
);
2124 /* Subtract DEC from TARGET. */
2127 expand_dec (rtx target
, rtx dec
)
2129 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2131 target
, 0, OPTAB_LIB_WIDEN
);
2132 if (value
!= target
)
2133 emit_move_insn (target
, value
);
2136 /* Output a shift instruction for expression code CODE,
2137 with SHIFTED being the rtx for the value to shift,
2138 and AMOUNT the tree for the amount to shift by.
2139 Store the result in the rtx TARGET, if that is convenient.
2140 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2141 Return the rtx for where the value is. */
2144 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2145 tree amount
, rtx target
, int unsignedp
)
2148 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2149 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2150 optab lshift_optab
= ashl_optab
;
2151 optab rshift_arith_optab
= ashr_optab
;
2152 optab rshift_uns_optab
= lshr_optab
;
2153 optab lrotate_optab
= rotl_optab
;
2154 optab rrotate_optab
= rotr_optab
;
2155 enum machine_mode op1_mode
;
2157 bool speed
= optimize_insn_for_speed_p ();
2159 op1
= expand_normal (amount
);
2160 op1_mode
= GET_MODE (op1
);
2162 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2163 shift amount is a vector, use the vector/vector shift patterns. */
2164 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2166 lshift_optab
= vashl_optab
;
2167 rshift_arith_optab
= vashr_optab
;
2168 rshift_uns_optab
= vlshr_optab
;
2169 lrotate_optab
= vrotl_optab
;
2170 rrotate_optab
= vrotr_optab
;
2173 /* Previously detected shift-counts computed by NEGATE_EXPR
2174 and shifted in the other direction; but that does not work
2177 if (SHIFT_COUNT_TRUNCATED
)
2179 if (CONST_INT_P (op1
)
2180 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2181 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2182 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2183 % GET_MODE_BITSIZE (mode
));
2184 else if (GET_CODE (op1
) == SUBREG
2185 && subreg_lowpart_p (op1
)
2186 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1
))))
2187 op1
= SUBREG_REG (op1
);
2190 if (op1
== const0_rtx
)
2193 /* Check whether its cheaper to implement a left shift by a constant
2194 bit count by a sequence of additions. */
2195 if (code
== LSHIFT_EXPR
2196 && CONST_INT_P (op1
)
2198 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
)
2199 && INTVAL (op1
) < MAX_BITS_PER_WORD
2200 && shift_cost
[speed
][mode
][INTVAL (op1
)] > INTVAL (op1
) * add_cost
[speed
][mode
]
2201 && shift_cost
[speed
][mode
][INTVAL (op1
)] != MAX_COST
)
2204 for (i
= 0; i
< INTVAL (op1
); i
++)
2206 temp
= force_reg (mode
, shifted
);
2207 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2208 unsignedp
, OPTAB_LIB_WIDEN
);
2213 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2215 enum optab_methods methods
;
2218 methods
= OPTAB_DIRECT
;
2219 else if (attempt
== 1)
2220 methods
= OPTAB_WIDEN
;
2222 methods
= OPTAB_LIB_WIDEN
;
2226 /* Widening does not work for rotation. */
2227 if (methods
== OPTAB_WIDEN
)
2229 else if (methods
== OPTAB_LIB_WIDEN
)
2231 /* If we have been unable to open-code this by a rotation,
2232 do it as the IOR of two shifts. I.e., to rotate A
2233 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2234 where C is the bitsize of A.
2236 It is theoretically possible that the target machine might
2237 not be able to perform either shift and hence we would
2238 be making two libcalls rather than just the one for the
2239 shift (similarly if IOR could not be done). We will allow
2240 this extremely unlikely lossage to avoid complicating the
2243 rtx subtarget
= target
== shifted
? 0 : target
;
2244 tree new_amount
, other_amount
;
2246 tree type
= TREE_TYPE (amount
);
2247 if (GET_MODE (op1
) != TYPE_MODE (type
)
2248 && GET_MODE (op1
) != VOIDmode
)
2249 op1
= convert_to_mode (TYPE_MODE (type
), op1
, 1);
2250 new_amount
= make_tree (type
, op1
);
2252 = fold_build2 (MINUS_EXPR
, type
,
2253 build_int_cst (type
, GET_MODE_BITSIZE (mode
)),
2256 shifted
= force_reg (mode
, shifted
);
2258 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2259 mode
, shifted
, new_amount
, 0, 1);
2260 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2261 mode
, shifted
, other_amount
, subtarget
, 1);
2262 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2263 unsignedp
, methods
);
2266 temp
= expand_binop (mode
,
2267 left
? lrotate_optab
: rrotate_optab
,
2268 shifted
, op1
, target
, unsignedp
, methods
);
2271 temp
= expand_binop (mode
,
2272 left
? lshift_optab
: rshift_uns_optab
,
2273 shifted
, op1
, target
, unsignedp
, methods
);
2275 /* Do arithmetic shifts.
2276 Also, if we are going to widen the operand, we can just as well
2277 use an arithmetic right-shift instead of a logical one. */
2278 if (temp
== 0 && ! rotate
2279 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2281 enum optab_methods methods1
= methods
;
2283 /* If trying to widen a log shift to an arithmetic shift,
2284 don't accept an arithmetic shift of the same size. */
2286 methods1
= OPTAB_MUST_WIDEN
;
2288 /* Arithmetic shift */
2290 temp
= expand_binop (mode
,
2291 left
? lshift_optab
: rshift_arith_optab
,
2292 shifted
, op1
, target
, unsignedp
, methods1
);
2295 /* We used to try extzv here for logical right shifts, but that was
2296 only useful for one machine, the VAX, and caused poor code
2297 generation there for lshrdi3, so the code was deleted and a
2298 define_expand for lshrsi3 was added to vax.md. */
2318 /* This structure holds the "cost" of a multiply sequence. The
2319 "cost" field holds the total rtx_cost of every operator in the
2320 synthetic multiplication sequence, hence cost(a op b) is defined
2321 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2322 The "latency" field holds the minimum possible latency of the
2323 synthetic multiply, on a hypothetical infinitely parallel CPU.
2324 This is the critical path, or the maximum height, of the expression
2325 tree which is the sum of rtx_costs on the most expensive path from
2326 any leaf to the root. Hence latency(a op b) is defined as zero for
2327 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2330 short cost
; /* Total rtx_cost of the multiplication sequence. */
2331 short latency
; /* The latency of the multiplication sequence. */
2334 /* This macro is used to compare a pointer to a mult_cost against an
2335 single integer "rtx_cost" value. This is equivalent to the macro
2336 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2337 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2338 || ((X)->cost == (Y) && (X)->latency < (Y)))
2340 /* This macro is used to compare two pointers to mult_costs against
2341 each other. The macro returns true if X is cheaper than Y.
2342 Currently, the cheaper of two mult_costs is the one with the
2343 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2344 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2345 || ((X)->cost == (Y)->cost \
2346 && (X)->latency < (Y)->latency))
2348 /* This structure records a sequence of operations.
2349 `ops' is the number of operations recorded.
2350 `cost' is their total cost.
2351 The operations are stored in `op' and the corresponding
2352 logarithms of the integer coefficients in `log'.
2354 These are the operations:
2355 alg_zero total := 0;
2356 alg_m total := multiplicand;
2357 alg_shift total := total * coeff
2358 alg_add_t_m2 total := total + multiplicand * coeff;
2359 alg_sub_t_m2 total := total - multiplicand * coeff;
2360 alg_add_factor total := total * coeff + total;
2361 alg_sub_factor total := total * coeff - total;
2362 alg_add_t2_m total := total * coeff + multiplicand;
2363 alg_sub_t2_m total := total * coeff - multiplicand;
2365 The first operand must be either alg_zero or alg_m. */
2369 struct mult_cost cost
;
2371 /* The size of the OP and LOG fields are not directly related to the
2372 word size, but the worst-case algorithms will be if we have few
2373 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2374 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2375 in total wordsize operations. */
2376 enum alg_code op
[MAX_BITS_PER_WORD
];
2377 char log
[MAX_BITS_PER_WORD
];
2380 /* The entry for our multiplication cache/hash table. */
2381 struct alg_hash_entry
{
2382 /* The number we are multiplying by. */
2383 unsigned HOST_WIDE_INT t
;
2385 /* The mode in which we are multiplying something by T. */
2386 enum machine_mode mode
;
2388 /* The best multiplication algorithm for t. */
2391 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2392 Otherwise, the cost within which multiplication by T is
2394 struct mult_cost cost
;
2396 /* OPtimized for speed? */
2400 /* The number of cache/hash entries. */
2401 #if HOST_BITS_PER_WIDE_INT == 64
2402 #define NUM_ALG_HASH_ENTRIES 1031
2404 #define NUM_ALG_HASH_ENTRIES 307
2407 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2408 actually a hash table. If we have a collision, that the older
2409 entry is kicked out. */
2410 static struct alg_hash_entry alg_hash
[NUM_ALG_HASH_ENTRIES
];
2412 /* Indicates the type of fixup needed after a constant multiplication.
2413 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2414 the result should be negated, and ADD_VARIANT means that the
2415 multiplicand should be added to the result. */
2416 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2418 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2419 const struct mult_cost
*, enum machine_mode mode
);
2420 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2421 struct algorithm
*, enum mult_variant
*, int);
2422 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2423 const struct algorithm
*, enum mult_variant
);
2424 static unsigned HOST_WIDE_INT
choose_multiplier (unsigned HOST_WIDE_INT
, int,
2425 int, rtx
*, int *, int *);
2426 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2427 static rtx
extract_high_half (enum machine_mode
, rtx
);
2428 static rtx
expand_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2429 static rtx
expand_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2431 /* Compute and return the best algorithm for multiplying by T.
2432 The algorithm must cost less than cost_limit
2433 If retval.cost >= COST_LIMIT, no algorithm was found and all
2434 other field of the returned struct are undefined.
2435 MODE is the machine mode of the multiplication. */
2438 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2439 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2442 struct algorithm
*alg_in
, *best_alg
;
2443 struct mult_cost best_cost
;
2444 struct mult_cost new_limit
;
2445 int op_cost
, op_latency
;
2446 unsigned HOST_WIDE_INT orig_t
= t
;
2447 unsigned HOST_WIDE_INT q
;
2448 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
2450 bool cache_hit
= false;
2451 enum alg_code cache_alg
= alg_zero
;
2452 bool speed
= optimize_insn_for_speed_p ();
2454 /* Indicate that no algorithm is yet found. If no algorithm
2455 is found, this value will be returned and indicate failure. */
2456 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2457 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2459 if (cost_limit
->cost
< 0
2460 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2463 /* Restrict the bits of "t" to the multiplication's mode. */
2464 t
&= GET_MODE_MASK (mode
);
2466 /* t == 1 can be done in zero cost. */
2470 alg_out
->cost
.cost
= 0;
2471 alg_out
->cost
.latency
= 0;
2472 alg_out
->op
[0] = alg_m
;
2476 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2480 if (MULT_COST_LESS (cost_limit
, zero_cost
[speed
]))
2485 alg_out
->cost
.cost
= zero_cost
[speed
];
2486 alg_out
->cost
.latency
= zero_cost
[speed
];
2487 alg_out
->op
[0] = alg_zero
;
2492 /* We'll be needing a couple extra algorithm structures now. */
2494 alg_in
= XALLOCA (struct algorithm
);
2495 best_alg
= XALLOCA (struct algorithm
);
2496 best_cost
= *cost_limit
;
2498 /* Compute the hash index. */
2499 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2501 /* See if we already know what to do for T. */
2502 if (alg_hash
[hash_index
].t
== t
2503 && alg_hash
[hash_index
].mode
== mode
2504 && alg_hash
[hash_index
].mode
== mode
2505 && alg_hash
[hash_index
].speed
== speed
2506 && alg_hash
[hash_index
].alg
!= alg_unknown
)
2508 cache_alg
= alg_hash
[hash_index
].alg
;
2510 if (cache_alg
== alg_impossible
)
2512 /* The cache tells us that it's impossible to synthesize
2513 multiplication by T within alg_hash[hash_index].cost. */
2514 if (!CHEAPER_MULT_COST (&alg_hash
[hash_index
].cost
, cost_limit
))
2515 /* COST_LIMIT is at least as restrictive as the one
2516 recorded in the hash table, in which case we have no
2517 hope of synthesizing a multiplication. Just
2521 /* If we get here, COST_LIMIT is less restrictive than the
2522 one recorded in the hash table, so we may be able to
2523 synthesize a multiplication. Proceed as if we didn't
2524 have the cache entry. */
2528 if (CHEAPER_MULT_COST (cost_limit
, &alg_hash
[hash_index
].cost
))
2529 /* The cached algorithm shows that this multiplication
2530 requires more cost than COST_LIMIT. Just return. This
2531 way, we don't clobber this cache entry with
2532 alg_impossible but retain useful information. */
2544 goto do_alg_addsub_t_m2
;
2546 case alg_add_factor
:
2547 case alg_sub_factor
:
2548 goto do_alg_addsub_factor
;
2551 goto do_alg_add_t2_m
;
2554 goto do_alg_sub_t2_m
;
2562 /* If we have a group of zero bits at the low-order part of T, try
2563 multiplying by the remaining bits and then doing a shift. */
2568 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2572 /* The function expand_shift will choose between a shift and
2573 a sequence of additions, so the observed cost is given as
2574 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2575 op_cost
= m
* add_cost
[speed
][mode
];
2576 if (shift_cost
[speed
][mode
][m
] < op_cost
)
2577 op_cost
= shift_cost
[speed
][mode
][m
];
2578 new_limit
.cost
= best_cost
.cost
- op_cost
;
2579 new_limit
.latency
= best_cost
.latency
- op_cost
;
2580 synth_mult (alg_in
, q
, &new_limit
, mode
);
2582 alg_in
->cost
.cost
+= op_cost
;
2583 alg_in
->cost
.latency
+= op_cost
;
2584 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2586 struct algorithm
*x
;
2587 best_cost
= alg_in
->cost
;
2588 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2589 best_alg
->log
[best_alg
->ops
] = m
;
2590 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2593 /* See if treating ORIG_T as a signed number yields a better
2594 sequence. Try this sequence only for a negative ORIG_T
2595 as it would be useless for a non-negative ORIG_T. */
2596 if ((HOST_WIDE_INT
) orig_t
< 0)
2598 /* Shift ORIG_T as follows because a right shift of a
2599 negative-valued signed type is implementation
2601 q
= ~(~orig_t
>> m
);
2602 /* The function expand_shift will choose between a shift
2603 and a sequence of additions, so the observed cost is
2604 given as MIN (m * add_cost[speed][mode],
2605 shift_cost[speed][mode][m]). */
2606 op_cost
= m
* add_cost
[speed
][mode
];
2607 if (shift_cost
[speed
][mode
][m
] < op_cost
)
2608 op_cost
= shift_cost
[speed
][mode
][m
];
2609 new_limit
.cost
= best_cost
.cost
- op_cost
;
2610 new_limit
.latency
= best_cost
.latency
- op_cost
;
2611 synth_mult (alg_in
, q
, &new_limit
, mode
);
2613 alg_in
->cost
.cost
+= op_cost
;
2614 alg_in
->cost
.latency
+= op_cost
;
2615 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2617 struct algorithm
*x
;
2618 best_cost
= alg_in
->cost
;
2619 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2620 best_alg
->log
[best_alg
->ops
] = m
;
2621 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2629 /* If we have an odd number, add or subtract one. */
2632 unsigned HOST_WIDE_INT w
;
2635 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2637 /* If T was -1, then W will be zero after the loop. This is another
2638 case where T ends with ...111. Handling this with (T + 1) and
2639 subtract 1 produces slightly better code and results in algorithm
2640 selection much faster than treating it like the ...0111 case
2644 /* Reject the case where t is 3.
2645 Thus we prefer addition in that case. */
2648 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2650 op_cost
= add_cost
[speed
][mode
];
2651 new_limit
.cost
= best_cost
.cost
- op_cost
;
2652 new_limit
.latency
= best_cost
.latency
- op_cost
;
2653 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2655 alg_in
->cost
.cost
+= op_cost
;
2656 alg_in
->cost
.latency
+= op_cost
;
2657 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2659 struct algorithm
*x
;
2660 best_cost
= alg_in
->cost
;
2661 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2662 best_alg
->log
[best_alg
->ops
] = 0;
2663 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2668 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2670 op_cost
= add_cost
[speed
][mode
];
2671 new_limit
.cost
= best_cost
.cost
- op_cost
;
2672 new_limit
.latency
= best_cost
.latency
- op_cost
;
2673 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2675 alg_in
->cost
.cost
+= op_cost
;
2676 alg_in
->cost
.latency
+= op_cost
;
2677 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2679 struct algorithm
*x
;
2680 best_cost
= alg_in
->cost
;
2681 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2682 best_alg
->log
[best_alg
->ops
] = 0;
2683 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2687 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2688 quickly with a - a * n for some appropriate constant n. */
2689 m
= exact_log2 (-orig_t
+ 1);
2690 if (m
>= 0 && m
< maxm
)
2692 op_cost
= shiftsub1_cost
[speed
][mode
][m
];
2693 new_limit
.cost
= best_cost
.cost
- op_cost
;
2694 new_limit
.latency
= best_cost
.latency
- op_cost
;
2695 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
, &new_limit
, mode
);
2697 alg_in
->cost
.cost
+= op_cost
;
2698 alg_in
->cost
.latency
+= op_cost
;
2699 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2701 struct algorithm
*x
;
2702 best_cost
= alg_in
->cost
;
2703 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2704 best_alg
->log
[best_alg
->ops
] = m
;
2705 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2713 /* Look for factors of t of the form
2714 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2715 If we find such a factor, we can multiply by t using an algorithm that
2716 multiplies by q, shift the result by m and add/subtract it to itself.
2718 We search for large factors first and loop down, even if large factors
2719 are less probable than small; if we find a large factor we will find a
2720 good sequence quickly, and therefore be able to prune (by decreasing
2721 COST_LIMIT) the search. */
2723 do_alg_addsub_factor
:
2724 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2726 unsigned HOST_WIDE_INT d
;
2728 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2729 if (t
% d
== 0 && t
> d
&& m
< maxm
2730 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2732 /* If the target has a cheap shift-and-add instruction use
2733 that in preference to a shift insn followed by an add insn.
2734 Assume that the shift-and-add is "atomic" with a latency
2735 equal to its cost, otherwise assume that on superscalar
2736 hardware the shift may be executed concurrently with the
2737 earlier steps in the algorithm. */
2738 op_cost
= add_cost
[speed
][mode
] + shift_cost
[speed
][mode
][m
];
2739 if (shiftadd_cost
[speed
][mode
][m
] < op_cost
)
2741 op_cost
= shiftadd_cost
[speed
][mode
][m
];
2742 op_latency
= op_cost
;
2745 op_latency
= add_cost
[speed
][mode
];
2747 new_limit
.cost
= best_cost
.cost
- op_cost
;
2748 new_limit
.latency
= best_cost
.latency
- op_latency
;
2749 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2751 alg_in
->cost
.cost
+= op_cost
;
2752 alg_in
->cost
.latency
+= op_latency
;
2753 if (alg_in
->cost
.latency
< op_cost
)
2754 alg_in
->cost
.latency
= op_cost
;
2755 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2757 struct algorithm
*x
;
2758 best_cost
= alg_in
->cost
;
2759 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2760 best_alg
->log
[best_alg
->ops
] = m
;
2761 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2763 /* Other factors will have been taken care of in the recursion. */
2767 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2768 if (t
% d
== 0 && t
> d
&& m
< maxm
2769 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2771 /* If the target has a cheap shift-and-subtract insn use
2772 that in preference to a shift insn followed by a sub insn.
2773 Assume that the shift-and-sub is "atomic" with a latency
2774 equal to it's cost, otherwise assume that on superscalar
2775 hardware the shift may be executed concurrently with the
2776 earlier steps in the algorithm. */
2777 op_cost
= add_cost
[speed
][mode
] + shift_cost
[speed
][mode
][m
];
2778 if (shiftsub0_cost
[speed
][mode
][m
] < op_cost
)
2780 op_cost
= shiftsub0_cost
[speed
][mode
][m
];
2781 op_latency
= op_cost
;
2784 op_latency
= add_cost
[speed
][mode
];
2786 new_limit
.cost
= best_cost
.cost
- op_cost
;
2787 new_limit
.latency
= best_cost
.latency
- op_latency
;
2788 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2790 alg_in
->cost
.cost
+= op_cost
;
2791 alg_in
->cost
.latency
+= op_latency
;
2792 if (alg_in
->cost
.latency
< op_cost
)
2793 alg_in
->cost
.latency
= op_cost
;
2794 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2796 struct algorithm
*x
;
2797 best_cost
= alg_in
->cost
;
2798 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2799 best_alg
->log
[best_alg
->ops
] = m
;
2800 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2808 /* Try shift-and-add (load effective address) instructions,
2809 i.e. do a*3, a*5, a*9. */
2816 if (m
>= 0 && m
< maxm
)
2818 op_cost
= shiftadd_cost
[speed
][mode
][m
];
2819 new_limit
.cost
= best_cost
.cost
- op_cost
;
2820 new_limit
.latency
= best_cost
.latency
- op_cost
;
2821 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2823 alg_in
->cost
.cost
+= op_cost
;
2824 alg_in
->cost
.latency
+= op_cost
;
2825 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2827 struct algorithm
*x
;
2828 best_cost
= alg_in
->cost
;
2829 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2830 best_alg
->log
[best_alg
->ops
] = m
;
2831 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2841 if (m
>= 0 && m
< maxm
)
2843 op_cost
= shiftsub0_cost
[speed
][mode
][m
];
2844 new_limit
.cost
= best_cost
.cost
- op_cost
;
2845 new_limit
.latency
= best_cost
.latency
- op_cost
;
2846 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2848 alg_in
->cost
.cost
+= op_cost
;
2849 alg_in
->cost
.latency
+= op_cost
;
2850 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2852 struct algorithm
*x
;
2853 best_cost
= alg_in
->cost
;
2854 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2855 best_alg
->log
[best_alg
->ops
] = m
;
2856 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2864 /* If best_cost has not decreased, we have not found any algorithm. */
2865 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2867 /* We failed to find an algorithm. Record alg_impossible for
2868 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2869 we are asked to find an algorithm for T within the same or
2870 lower COST_LIMIT, we can immediately return to the
2872 alg_hash
[hash_index
].t
= t
;
2873 alg_hash
[hash_index
].mode
= mode
;
2874 alg_hash
[hash_index
].speed
= speed
;
2875 alg_hash
[hash_index
].alg
= alg_impossible
;
2876 alg_hash
[hash_index
].cost
= *cost_limit
;
2880 /* Cache the result. */
2883 alg_hash
[hash_index
].t
= t
;
2884 alg_hash
[hash_index
].mode
= mode
;
2885 alg_hash
[hash_index
].speed
= speed
;
2886 alg_hash
[hash_index
].alg
= best_alg
->op
[best_alg
->ops
];
2887 alg_hash
[hash_index
].cost
.cost
= best_cost
.cost
;
2888 alg_hash
[hash_index
].cost
.latency
= best_cost
.latency
;
2891 /* If we are getting a too long sequence for `struct algorithm'
2892 to record, make this search fail. */
2893 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2896 /* Copy the algorithm from temporary space to the space at alg_out.
2897 We avoid using structure assignment because the majority of
2898 best_alg is normally undefined, and this is a critical function. */
2899 alg_out
->ops
= best_alg
->ops
+ 1;
2900 alg_out
->cost
= best_cost
;
2901 memcpy (alg_out
->op
, best_alg
->op
,
2902 alg_out
->ops
* sizeof *alg_out
->op
);
2903 memcpy (alg_out
->log
, best_alg
->log
,
2904 alg_out
->ops
* sizeof *alg_out
->log
);
2907 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2908 Try three variations:
2910 - a shift/add sequence based on VAL itself
2911 - a shift/add sequence based on -VAL, followed by a negation
2912 - a shift/add sequence based on VAL - 1, followed by an addition.
2914 Return true if the cheapest of these cost less than MULT_COST,
2915 describing the algorithm in *ALG and final fixup in *VARIANT. */
2918 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2919 struct algorithm
*alg
, enum mult_variant
*variant
,
2922 struct algorithm alg2
;
2923 struct mult_cost limit
;
2925 bool speed
= optimize_insn_for_speed_p ();
2927 /* Fail quickly for impossible bounds. */
2931 /* Ensure that mult_cost provides a reasonable upper bound.
2932 Any constant multiplication can be performed with less
2933 than 2 * bits additions. */
2934 op_cost
= 2 * GET_MODE_BITSIZE (mode
) * add_cost
[speed
][mode
];
2935 if (mult_cost
> op_cost
)
2936 mult_cost
= op_cost
;
2938 *variant
= basic_variant
;
2939 limit
.cost
= mult_cost
;
2940 limit
.latency
= mult_cost
;
2941 synth_mult (alg
, val
, &limit
, mode
);
2943 /* This works only if the inverted value actually fits in an
2945 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2947 op_cost
= neg_cost
[speed
][mode
];
2948 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2950 limit
.cost
= alg
->cost
.cost
- op_cost
;
2951 limit
.latency
= alg
->cost
.latency
- op_cost
;
2955 limit
.cost
= mult_cost
- op_cost
;
2956 limit
.latency
= mult_cost
- op_cost
;
2959 synth_mult (&alg2
, -val
, &limit
, mode
);
2960 alg2
.cost
.cost
+= op_cost
;
2961 alg2
.cost
.latency
+= op_cost
;
2962 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2963 *alg
= alg2
, *variant
= negate_variant
;
2966 /* This proves very useful for division-by-constant. */
2967 op_cost
= add_cost
[speed
][mode
];
2968 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2970 limit
.cost
= alg
->cost
.cost
- op_cost
;
2971 limit
.latency
= alg
->cost
.latency
- op_cost
;
2975 limit
.cost
= mult_cost
- op_cost
;
2976 limit
.latency
= mult_cost
- op_cost
;
2979 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2980 alg2
.cost
.cost
+= op_cost
;
2981 alg2
.cost
.latency
+= op_cost
;
2982 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2983 *alg
= alg2
, *variant
= add_variant
;
2985 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2988 /* A subroutine of expand_mult, used for constant multiplications.
2989 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2990 convenient. Use the shift/add sequence described by ALG and apply
2991 the final fixup specified by VARIANT. */
2994 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2995 rtx target
, const struct algorithm
*alg
,
2996 enum mult_variant variant
)
2998 HOST_WIDE_INT val_so_far
;
2999 rtx insn
, accum
, tem
;
3001 enum machine_mode nmode
;
3003 /* Avoid referencing memory over and over and invalid sharing
3005 op0
= force_reg (mode
, op0
);
3007 /* ACCUM starts out either as OP0 or as a zero, depending on
3008 the first operation. */
3010 if (alg
->op
[0] == alg_zero
)
3012 accum
= copy_to_mode_reg (mode
, const0_rtx
);
3015 else if (alg
->op
[0] == alg_m
)
3017 accum
= copy_to_mode_reg (mode
, op0
);
3023 for (opno
= 1; opno
< alg
->ops
; opno
++)
3025 int log
= alg
->log
[opno
];
3026 rtx shift_subtarget
= optimize
? 0 : accum
;
3028 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
3031 rtx accum_target
= optimize
? 0 : accum
;
3033 switch (alg
->op
[opno
])
3036 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3037 build_int_cst (NULL_TREE
, log
),
3043 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
3044 build_int_cst (NULL_TREE
, log
),
3046 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
3047 add_target
? add_target
: accum_target
);
3048 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
3052 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
3053 build_int_cst (NULL_TREE
, log
),
3055 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
3056 add_target
? add_target
: accum_target
);
3057 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
3061 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3062 build_int_cst (NULL_TREE
, log
),
3065 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
3066 add_target
? add_target
: accum_target
);
3067 val_so_far
= (val_so_far
<< log
) + 1;
3071 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3072 build_int_cst (NULL_TREE
, log
),
3073 shift_subtarget
, 0);
3074 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
3075 add_target
? add_target
: accum_target
);
3076 val_so_far
= (val_so_far
<< log
) - 1;
3079 case alg_add_factor
:
3080 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3081 build_int_cst (NULL_TREE
, log
),
3083 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
3084 add_target
? add_target
: accum_target
);
3085 val_so_far
+= val_so_far
<< log
;
3088 case alg_sub_factor
:
3089 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3090 build_int_cst (NULL_TREE
, log
),
3092 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
3094 ? add_target
: (optimize
? 0 : tem
)));
3095 val_so_far
= (val_so_far
<< log
) - val_so_far
;
3102 /* Write a REG_EQUAL note on the last insn so that we can cse
3103 multiplication sequences. Note that if ACCUM is a SUBREG,
3104 we've set the inner register and must properly indicate
3107 tem
= op0
, nmode
= mode
;
3108 if (GET_CODE (accum
) == SUBREG
)
3110 nmode
= GET_MODE (SUBREG_REG (accum
));
3111 tem
= gen_lowpart (nmode
, op0
);
3114 insn
= get_last_insn ();
3115 set_unique_reg_note (insn
, REG_EQUAL
,
3116 gen_rtx_MULT (nmode
, tem
,
3117 GEN_INT (val_so_far
)));
3120 if (variant
== negate_variant
)
3122 val_so_far
= -val_so_far
;
3123 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3125 else if (variant
== add_variant
)
3127 val_so_far
= val_so_far
+ 1;
3128 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3131 /* Compare only the bits of val and val_so_far that are significant
3132 in the result mode, to avoid sign-/zero-extension confusion. */
3133 val
&= GET_MODE_MASK (mode
);
3134 val_so_far
&= GET_MODE_MASK (mode
);
3135 gcc_assert (val
== val_so_far
);
3140 /* Perform a multiplication and return an rtx for the result.
3141 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3142 TARGET is a suggestion for where to store the result (an rtx).
3144 We check specially for a constant integer as OP1.
3145 If you want this check for OP0 as well, then before calling
3146 you should swap the two operands if OP0 would be constant. */
3149 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3152 enum mult_variant variant
;
3153 struct algorithm algorithm
;
3155 bool speed
= optimize_insn_for_speed_p ();
3157 /* Handling const0_rtx here allows us to use zero as a rogue value for
3159 if (op1
== const0_rtx
)
3161 if (op1
== const1_rtx
)
3163 if (op1
== constm1_rtx
)
3164 return expand_unop (mode
,
3165 GET_MODE_CLASS (mode
) == MODE_INT
3166 && !unsignedp
&& flag_trapv
3167 ? negv_optab
: neg_optab
,
3170 /* These are the operations that are potentially turned into a sequence
3171 of shifts and additions. */
3172 if (SCALAR_INT_MODE_P (mode
)
3173 && (unsignedp
|| !flag_trapv
))
3175 HOST_WIDE_INT coeff
= 0;
3176 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3178 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3179 less than or equal in size to `unsigned int' this doesn't matter.
3180 If the mode is larger than `unsigned int', then synth_mult works
3181 only if the constant value exactly fits in an `unsigned int' without
3182 any truncation. This means that multiplying by negative values does
3183 not work; results are off by 2^32 on a 32 bit machine. */
3185 if (CONST_INT_P (op1
))
3187 /* Attempt to handle multiplication of DImode values by negative
3188 coefficients, by performing the multiplication by a positive
3189 multiplier and then inverting the result. */
3190 if (INTVAL (op1
) < 0
3191 && GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
)
3193 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3194 result is interpreted as an unsigned coefficient.
3195 Exclude cost of op0 from max_cost to match the cost
3196 calculation of the synth_mult. */
3197 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
, speed
)
3198 - neg_cost
[speed
][mode
];
3200 && choose_mult_variant (mode
, -INTVAL (op1
), &algorithm
,
3201 &variant
, max_cost
))
3203 rtx temp
= expand_mult_const (mode
, op0
, -INTVAL (op1
),
3204 NULL_RTX
, &algorithm
,
3206 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3209 else coeff
= INTVAL (op1
);
3211 else if (GET_CODE (op1
) == CONST_DOUBLE
)
3213 /* If we are multiplying in DImode, it may still be a win
3214 to try to work with shifts and adds. */
3215 if (CONST_DOUBLE_HIGH (op1
) == 0
3216 && CONST_DOUBLE_LOW (op1
) > 0)
3217 coeff
= CONST_DOUBLE_LOW (op1
);
3218 else if (CONST_DOUBLE_LOW (op1
) == 0
3219 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1
)))
3221 int shift
= floor_log2 (CONST_DOUBLE_HIGH (op1
))
3222 + HOST_BITS_PER_WIDE_INT
;
3223 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3224 build_int_cst (NULL_TREE
, shift
),
3229 /* We used to test optimize here, on the grounds that it's better to
3230 produce a smaller program when -O is not used. But this causes
3231 such a terrible slowdown sometimes that it seems better to always
3235 /* Special case powers of two. */
3236 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3237 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3238 build_int_cst (NULL_TREE
, floor_log2 (coeff
)),
3241 /* Exclude cost of op0 from max_cost to match the cost
3242 calculation of the synth_mult. */
3243 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
, speed
);
3244 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3246 return expand_mult_const (mode
, op0
, coeff
, target
,
3247 &algorithm
, variant
);
3251 if (GET_CODE (op0
) == CONST_DOUBLE
)
3258 /* Expand x*2.0 as x+x. */
3259 if (GET_CODE (op1
) == CONST_DOUBLE
3260 && SCALAR_FLOAT_MODE_P (mode
))
3263 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
3265 if (REAL_VALUES_EQUAL (d
, dconst2
))
3267 op0
= force_reg (GET_MODE (op0
), op0
);
3268 return expand_binop (mode
, add_optab
, op0
, op0
,
3269 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3273 /* This used to use umul_optab if unsigned, but for non-widening multiply
3274 there is no difference between signed and unsigned. */
3275 op0
= expand_binop (mode
,
3277 && flag_trapv
&& (GET_MODE_CLASS(mode
) == MODE_INT
)
3278 ? smulv_optab
: smul_optab
,
3279 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3284 /* Perform a widening multiplication and return an rtx for the result.
3285 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3286 TARGET is a suggestion for where to store the result (an rtx).
3287 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3288 or smul_widen_optab.
3290 We check specially for a constant integer as OP1, comparing the
3291 cost of a widening multiply against the cost of a sequence of shifts
3295 expand_widening_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3296 int unsignedp
, optab this_optab
)
3298 bool speed
= optimize_insn_for_speed_p ();
3300 if (CONST_INT_P (op1
)
3301 && (INTVAL (op1
) >= 0
3302 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
))
3304 HOST_WIDE_INT coeff
= INTVAL (op1
);
3306 enum mult_variant variant
;
3307 struct algorithm algorithm
;
3309 /* Special case powers of two. */
3310 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3312 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3313 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3314 build_int_cst (NULL_TREE
, floor_log2 (coeff
)),
3318 /* Exclude cost of op0 from max_cost to match the cost
3319 calculation of the synth_mult. */
3320 max_cost
= mul_widen_cost
[speed
][mode
];
3321 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3324 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3325 return expand_mult_const (mode
, op0
, coeff
, target
,
3326 &algorithm
, variant
);
3329 return expand_binop (mode
, this_optab
, op0
, op1
, target
,
3330 unsignedp
, OPTAB_LIB_WIDEN
);
3333 /* Return the smallest n such that 2**n >= X. */
3336 ceil_log2 (unsigned HOST_WIDE_INT x
)
3338 return floor_log2 (x
- 1) + 1;
3341 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3342 replace division by D, and put the least significant N bits of the result
3343 in *MULTIPLIER_PTR and return the most significant bit.
3345 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3346 needed precision is in PRECISION (should be <= N).
3348 PRECISION should be as small as possible so this function can choose
3349 multiplier more freely.
3351 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3352 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3354 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3355 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3358 unsigned HOST_WIDE_INT
3359 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3360 rtx
*multiplier_ptr
, int *post_shift_ptr
, int *lgup_ptr
)
3362 HOST_WIDE_INT mhigh_hi
, mlow_hi
;
3363 unsigned HOST_WIDE_INT mhigh_lo
, mlow_lo
;
3364 int lgup
, post_shift
;
3366 unsigned HOST_WIDE_INT nl
, dummy1
;
3367 HOST_WIDE_INT nh
, dummy2
;
3369 /* lgup = ceil(log2(divisor)); */
3370 lgup
= ceil_log2 (d
);
3372 gcc_assert (lgup
<= n
);
3375 pow2
= n
+ lgup
- precision
;
3377 /* We could handle this with some effort, but this case is much
3378 better handled directly with a scc insn, so rely on caller using
3380 gcc_assert (pow
!= 2 * HOST_BITS_PER_WIDE_INT
);
3382 /* mlow = 2^(N + lgup)/d */
3383 if (pow
>= HOST_BITS_PER_WIDE_INT
)
3385 nh
= (HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
3391 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
3393 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3394 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
3396 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3397 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
3398 nh
|= (HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
3400 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
3401 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3402 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
3404 gcc_assert (!mhigh_hi
|| nh
- d
< d
);
3405 gcc_assert (mhigh_hi
<= 1 && mlow_hi
<= 1);
3406 /* Assert that mlow < mhigh. */
3407 gcc_assert (mlow_hi
< mhigh_hi
3408 || (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
));
3410 /* If precision == N, then mlow, mhigh exceed 2^N
3411 (but they do not exceed 2^(N+1)). */
3413 /* Reduce to lowest terms. */
3414 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3416 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
3417 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
3427 *post_shift_ptr
= post_shift
;
3429 if (n
< HOST_BITS_PER_WIDE_INT
)
3431 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3432 *multiplier_ptr
= GEN_INT (mhigh_lo
& mask
);
3433 return mhigh_lo
>= mask
;
3437 *multiplier_ptr
= GEN_INT (mhigh_lo
);
3442 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3443 congruent to 1 (mod 2**N). */
3445 static unsigned HOST_WIDE_INT
3446 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3448 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3450 /* The algorithm notes that the choice y = x satisfies
3451 x*y == 1 mod 2^3, since x is assumed odd.
3452 Each iteration doubles the number of bits of significance in y. */
3454 unsigned HOST_WIDE_INT mask
;
3455 unsigned HOST_WIDE_INT y
= x
;
3458 mask
= (n
== HOST_BITS_PER_WIDE_INT
3459 ? ~(unsigned HOST_WIDE_INT
) 0
3460 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3464 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3470 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3471 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3472 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3473 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3476 The result is put in TARGET if that is convenient.
3478 MODE is the mode of operation. */
3481 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3482 rtx op1
, rtx target
, int unsignedp
)
3485 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3487 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3488 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3490 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3492 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3495 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3496 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3498 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3499 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3505 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3508 extract_high_half (enum machine_mode mode
, rtx op
)
3510 enum machine_mode wider_mode
;
3512 if (mode
== word_mode
)
3513 return gen_highpart (mode
, op
);
3515 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3517 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3518 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3519 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
)), 0, 1);
3520 return convert_modes (mode
, wider_mode
, op
, 0);
3523 /* Like expand_mult_highpart, but only consider using a multiplication
3524 optab. OP1 is an rtx for the constant operand. */
3527 expand_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3528 rtx target
, int unsignedp
, int max_cost
)
3530 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3531 enum machine_mode wider_mode
;
3535 bool speed
= optimize_insn_for_speed_p ();
3537 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3539 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3540 size
= GET_MODE_BITSIZE (mode
);
3542 /* Firstly, try using a multiplication insn that only generates the needed
3543 high part of the product, and in the sign flavor of unsignedp. */
3544 if (mul_highpart_cost
[speed
][mode
] < max_cost
)
3546 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3547 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3548 unsignedp
, OPTAB_DIRECT
);
3553 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3554 Need to adjust the result after the multiplication. */
3555 if (size
- 1 < BITS_PER_WORD
3556 && (mul_highpart_cost
[speed
][mode
] + 2 * shift_cost
[speed
][mode
][size
-1]
3557 + 4 * add_cost
[speed
][mode
] < max_cost
))
3559 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3560 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3561 unsignedp
, OPTAB_DIRECT
);
3563 /* We used the wrong signedness. Adjust the result. */
3564 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3568 /* Try widening multiplication. */
3569 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3570 if (optab_handler (moptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
3571 && mul_widen_cost
[speed
][wider_mode
] < max_cost
)
3573 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3574 unsignedp
, OPTAB_WIDEN
);
3576 return extract_high_half (mode
, tem
);
3579 /* Try widening the mode and perform a non-widening multiplication. */
3580 if (optab_handler (smul_optab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
3581 && size
- 1 < BITS_PER_WORD
3582 && mul_cost
[speed
][wider_mode
] + shift_cost
[speed
][mode
][size
-1] < max_cost
)
3584 rtx insns
, wop0
, wop1
;
3586 /* We need to widen the operands, for example to ensure the
3587 constant multiplier is correctly sign or zero extended.
3588 Use a sequence to clean-up any instructions emitted by
3589 the conversions if things don't work out. */
3591 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3592 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3593 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3594 unsignedp
, OPTAB_WIDEN
);
3595 insns
= get_insns ();
3601 return extract_high_half (mode
, tem
);
3605 /* Try widening multiplication of opposite signedness, and adjust. */
3606 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3607 if (optab_handler (moptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
3608 && size
- 1 < BITS_PER_WORD
3609 && (mul_widen_cost
[speed
][wider_mode
] + 2 * shift_cost
[speed
][mode
][size
-1]
3610 + 4 * add_cost
[speed
][mode
] < max_cost
))
3612 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3613 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3616 tem
= extract_high_half (mode
, tem
);
3617 /* We used the wrong signedness. Adjust the result. */
3618 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3626 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3627 putting the high half of the result in TARGET if that is convenient,
3628 and return where the result is. If the operation can not be performed,
3631 MODE is the mode of operation and result.
3633 UNSIGNEDP nonzero means unsigned multiply.
3635 MAX_COST is the total allowed cost for the expanded RTL. */
3638 expand_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3639 rtx target
, int unsignedp
, int max_cost
)
3641 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3642 unsigned HOST_WIDE_INT cnst1
;
3644 bool sign_adjust
= false;
3645 enum mult_variant variant
;
3646 struct algorithm alg
;
3648 bool speed
= optimize_insn_for_speed_p ();
3650 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3651 /* We can't support modes wider than HOST_BITS_PER_INT. */
3652 gcc_assert (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
);
3654 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3656 /* We can't optimize modes wider than BITS_PER_WORD.
3657 ??? We might be able to perform double-word arithmetic if
3658 mode == word_mode, however all the cost calculations in
3659 synth_mult etc. assume single-word operations. */
3660 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3661 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3662 unsignedp
, max_cost
);
3664 extra_cost
= shift_cost
[speed
][mode
][GET_MODE_BITSIZE (mode
) - 1];
3666 /* Check whether we try to multiply by a negative constant. */
3667 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3670 extra_cost
+= add_cost
[speed
][mode
];
3673 /* See whether shift/add multiplication is cheap enough. */
3674 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3675 max_cost
- extra_cost
))
3677 /* See whether the specialized multiplication optabs are
3678 cheaper than the shift/add version. */
3679 tem
= expand_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3680 alg
.cost
.cost
+ extra_cost
);
3684 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3685 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3686 tem
= extract_high_half (mode
, tem
);
3688 /* Adjust result for signedness. */
3690 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3694 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3695 unsignedp
, max_cost
);
3699 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3702 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3704 unsigned HOST_WIDE_INT masklow
, maskhigh
;
3705 rtx result
, temp
, shift
, label
;
3708 logd
= floor_log2 (d
);
3709 result
= gen_reg_rtx (mode
);
3711 /* Avoid conditional branches when they're expensive. */
3712 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3713 && optimize_insn_for_speed_p ())
3715 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3719 signmask
= force_reg (mode
, signmask
);
3720 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3721 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3723 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3724 which instruction sequence to use. If logical right shifts
3725 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3726 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3728 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3729 if (optab_handler (lshr_optab
, mode
)->insn_code
== CODE_FOR_nothing
3730 || rtx_cost (temp
, SET
, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3732 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3733 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3734 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3735 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3736 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3737 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3738 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3739 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3740 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3741 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3745 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3746 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3747 signmask
= force_reg (mode
, signmask
);
3749 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3750 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3751 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3752 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3753 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3754 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3760 /* Mask contains the mode's signbit and the significant bits of the
3761 modulus. By including the signbit in the operation, many targets
3762 can avoid an explicit compare operation in the following comparison
3765 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3766 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3768 masklow
|= (HOST_WIDE_INT
) -1 << (GET_MODE_BITSIZE (mode
) - 1);
3772 maskhigh
= (HOST_WIDE_INT
) -1
3773 << (GET_MODE_BITSIZE (mode
) - HOST_BITS_PER_WIDE_INT
- 1);
3775 temp
= expand_binop (mode
, and_optab
, op0
,
3776 immed_double_const (masklow
, maskhigh
, mode
),
3777 result
, 1, OPTAB_LIB_WIDEN
);
3779 emit_move_insn (result
, temp
);
3781 label
= gen_label_rtx ();
3782 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3784 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3785 0, OPTAB_LIB_WIDEN
);
3786 masklow
= (HOST_WIDE_INT
) -1 << logd
;
3788 temp
= expand_binop (mode
, ior_optab
, temp
,
3789 immed_double_const (masklow
, maskhigh
, mode
),
3790 result
, 1, OPTAB_LIB_WIDEN
);
3791 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3792 0, OPTAB_LIB_WIDEN
);
3794 emit_move_insn (result
, temp
);
3799 /* Expand signed division of OP0 by a power of two D in mode MODE.
3800 This routine is only called for positive values of D. */
3803 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3809 logd
= floor_log2 (d
);
3810 shift
= build_int_cst (NULL_TREE
, logd
);
3813 && BRANCH_COST (optimize_insn_for_speed_p (),
3816 temp
= gen_reg_rtx (mode
);
3817 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3818 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3819 0, OPTAB_LIB_WIDEN
);
3820 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3823 #ifdef HAVE_conditional_move
3824 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3829 /* ??? emit_conditional_move forces a stack adjustment via
3830 compare_from_rtx so, if the sequence is discarded, it will
3831 be lost. Do it now instead. */
3832 do_pending_stack_adjust ();
3835 temp2
= copy_to_mode_reg (mode
, op0
);
3836 temp
= expand_binop (mode
, add_optab
, temp2
, GEN_INT (d
-1),
3837 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3838 temp
= force_reg (mode
, temp
);
3840 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3841 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3842 mode
, temp
, temp2
, mode
, 0);
3845 rtx seq
= get_insns ();
3848 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, shift
, NULL_RTX
, 0);
3854 if (BRANCH_COST (optimize_insn_for_speed_p (),
3857 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3859 temp
= gen_reg_rtx (mode
);
3860 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3861 if (shift_cost
[optimize_insn_for_speed_p ()][mode
][ushift
] > COSTS_N_INSNS (1))
3862 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (d
- 1),
3863 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3865 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3866 build_int_cst (NULL_TREE
, ushift
),
3868 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3869 0, OPTAB_LIB_WIDEN
);
3870 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3873 label
= gen_label_rtx ();
3874 temp
= copy_to_mode_reg (mode
, op0
);
3875 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3876 expand_inc (temp
, GEN_INT (d
- 1));
3878 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3881 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3882 if that is convenient, and returning where the result is.
3883 You may request either the quotient or the remainder as the result;
3884 specify REM_FLAG nonzero to get the remainder.
3886 CODE is the expression code for which kind of division this is;
3887 it controls how rounding is done. MODE is the machine mode to use.
3888 UNSIGNEDP nonzero means do unsigned division. */
3890 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3891 and then correct it by or'ing in missing high bits
3892 if result of ANDI is nonzero.
3893 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3894 This could optimize to a bfexts instruction.
3895 But C doesn't use these operations, so their optimizations are
3897 /* ??? For modulo, we don't actually need the highpart of the first product,
3898 the low part will do nicely. And for small divisors, the second multiply
3899 can also be a low-part only multiply or even be completely left out.
3900 E.g. to calculate the remainder of a division by 3 with a 32 bit
3901 multiply, multiply with 0x55555556 and extract the upper two bits;
3902 the result is exact for inputs up to 0x1fffffff.
3903 The input range can be reduced by using cross-sum rules.
3904 For odd divisors >= 3, the following table gives right shift counts
3905 so that if a number is shifted by an integer multiple of the given
3906 amount, the remainder stays the same:
3907 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3908 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3909 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3910 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3911 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3913 Cross-sum rules for even numbers can be derived by leaving as many bits
3914 to the right alone as the divisor has zeros to the right.
3915 E.g. if x is an unsigned 32 bit number:
3916 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3920 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3921 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3923 enum machine_mode compute_mode
;
3925 rtx quotient
= 0, remainder
= 0;
3929 optab optab1
, optab2
;
3930 int op1_is_constant
, op1_is_pow2
= 0;
3931 int max_cost
, extra_cost
;
3932 static HOST_WIDE_INT last_div_const
= 0;
3933 static HOST_WIDE_INT ext_op1
;
3934 bool speed
= optimize_insn_for_speed_p ();
3936 op1_is_constant
= CONST_INT_P (op1
);
3937 if (op1_is_constant
)
3939 ext_op1
= INTVAL (op1
);
3941 ext_op1
&= GET_MODE_MASK (mode
);
3942 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3943 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3947 This is the structure of expand_divmod:
3949 First comes code to fix up the operands so we can perform the operations
3950 correctly and efficiently.
3952 Second comes a switch statement with code specific for each rounding mode.
3953 For some special operands this code emits all RTL for the desired
3954 operation, for other cases, it generates only a quotient and stores it in
3955 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3956 to indicate that it has not done anything.
3958 Last comes code that finishes the operation. If QUOTIENT is set and
3959 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3960 QUOTIENT is not set, it is computed using trunc rounding.
3962 We try to generate special code for division and remainder when OP1 is a
3963 constant. If |OP1| = 2**n we can use shifts and some other fast
3964 operations. For other values of OP1, we compute a carefully selected
3965 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3968 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3969 half of the product. Different strategies for generating the product are
3970 implemented in expand_mult_highpart.
3972 If what we actually want is the remainder, we generate that by another
3973 by-constant multiplication and a subtraction. */
3975 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3976 code below will malfunction if we are, so check here and handle
3977 the special case if so. */
3978 if (op1
== const1_rtx
)
3979 return rem_flag
? const0_rtx
: op0
;
3981 /* When dividing by -1, we could get an overflow.
3982 negv_optab can handle overflows. */
3983 if (! unsignedp
&& op1
== constm1_rtx
)
3987 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3988 ? negv_optab
: neg_optab
, op0
, target
, 0);
3992 /* Don't use the function value register as a target
3993 since we have to read it as well as write it,
3994 and function-inlining gets confused by this. */
3995 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3996 /* Don't clobber an operand while doing a multi-step calculation. */
3997 || ((rem_flag
|| op1_is_constant
)
3998 && (reg_mentioned_p (target
, op0
)
3999 || (MEM_P (op0
) && MEM_P (target
))))
4000 || reg_mentioned_p (target
, op1
)
4001 || (MEM_P (op1
) && MEM_P (target
))))
4004 /* Get the mode in which to perform this computation. Normally it will
4005 be MODE, but sometimes we can't do the desired operation in MODE.
4006 If so, pick a wider mode in which we can do the operation. Convert
4007 to that mode at the start to avoid repeated conversions.
4009 First see what operations we need. These depend on the expression
4010 we are evaluating. (We assume that divxx3 insns exist under the
4011 same conditions that modxx3 insns and that these insns don't normally
4012 fail. If these assumptions are not correct, we may generate less
4013 efficient code in some cases.)
4015 Then see if we find a mode in which we can open-code that operation
4016 (either a division, modulus, or shift). Finally, check for the smallest
4017 mode for which we can do the operation with a library call. */
4019 /* We might want to refine this now that we have division-by-constant
4020 optimization. Since expand_mult_highpart tries so many variants, it is
4021 not straightforward to generalize this. Maybe we should make an array
4022 of possible modes in init_expmed? Save this for GCC 2.7. */
4024 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
4025 ? (unsignedp
? lshr_optab
: ashr_optab
)
4026 : (unsignedp
? udiv_optab
: sdiv_optab
));
4027 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
4029 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
4031 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
4032 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
4033 if (optab_handler (optab1
, compute_mode
)->insn_code
!= CODE_FOR_nothing
4034 || optab_handler (optab2
, compute_mode
)->insn_code
!= CODE_FOR_nothing
)
4037 if (compute_mode
== VOIDmode
)
4038 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
4039 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
4040 if (optab_libfunc (optab1
, compute_mode
)
4041 || optab_libfunc (optab2
, compute_mode
))
4044 /* If we still couldn't find a mode, use MODE, but expand_binop will
4046 if (compute_mode
== VOIDmode
)
4047 compute_mode
= mode
;
4049 if (target
&& GET_MODE (target
) == compute_mode
)
4052 tquotient
= gen_reg_rtx (compute_mode
);
4054 size
= GET_MODE_BITSIZE (compute_mode
);
4056 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4057 (mode), and thereby get better code when OP1 is a constant. Do that
4058 later. It will require going over all usages of SIZE below. */
4059 size
= GET_MODE_BITSIZE (mode
);
4062 /* Only deduct something for a REM if the last divide done was
4063 for a different constant. Then set the constant of the last
4065 max_cost
= unsignedp
? udiv_cost
[speed
][compute_mode
] : sdiv_cost
[speed
][compute_mode
];
4066 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
4067 && INTVAL (op1
) == last_div_const
))
4068 max_cost
-= mul_cost
[speed
][compute_mode
] + add_cost
[speed
][compute_mode
];
4070 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
4072 /* Now convert to the best mode to use. */
4073 if (compute_mode
!= mode
)
4075 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
4076 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
4078 /* convert_modes may have placed op1 into a register, so we
4079 must recompute the following. */
4080 op1_is_constant
= CONST_INT_P (op1
);
4081 op1_is_pow2
= (op1_is_constant
4082 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4084 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
4087 /* If one of the operands is a volatile MEM, copy it into a register. */
4089 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
4090 op0
= force_reg (compute_mode
, op0
);
4091 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
4092 op1
= force_reg (compute_mode
, op1
);
4094 /* If we need the remainder or if OP1 is constant, we need to
4095 put OP0 in a register in case it has any queued subexpressions. */
4096 if (rem_flag
|| op1_is_constant
)
4097 op0
= force_reg (compute_mode
, op0
);
4099 last
= get_last_insn ();
4101 /* Promote floor rounding to trunc rounding for unsigned operations. */
4104 if (code
== FLOOR_DIV_EXPR
)
4105 code
= TRUNC_DIV_EXPR
;
4106 if (code
== FLOOR_MOD_EXPR
)
4107 code
= TRUNC_MOD_EXPR
;
4108 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
4109 code
= TRUNC_DIV_EXPR
;
4112 if (op1
!= const0_rtx
)
4115 case TRUNC_MOD_EXPR
:
4116 case TRUNC_DIV_EXPR
:
4117 if (op1_is_constant
)
4121 unsigned HOST_WIDE_INT mh
;
4122 int pre_shift
, post_shift
;
4125 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
4126 & GET_MODE_MASK (compute_mode
));
4128 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4130 pre_shift
= floor_log2 (d
);
4134 = expand_binop (compute_mode
, and_optab
, op0
,
4135 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4139 return gen_lowpart (mode
, remainder
);
4141 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4142 build_int_cst (NULL_TREE
,
4146 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4148 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4150 /* Most significant bit of divisor is set; emit an scc
4152 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4153 compute_mode
, 1, 1);
4157 /* Find a suitable multiplier and right shift count
4158 instead of multiplying with D. */
4160 mh
= choose_multiplier (d
, size
, size
,
4161 &ml
, &post_shift
, &dummy
);
4163 /* If the suggested multiplier is more than SIZE bits,
4164 we can do better for even divisors, using an
4165 initial right shift. */
4166 if (mh
!= 0 && (d
& 1) == 0)
4168 pre_shift
= floor_log2 (d
& -d
);
4169 mh
= choose_multiplier (d
>> pre_shift
, size
,
4171 &ml
, &post_shift
, &dummy
);
4181 if (post_shift
- 1 >= BITS_PER_WORD
)
4185 = (shift_cost
[speed
][compute_mode
][post_shift
- 1]
4186 + shift_cost
[speed
][compute_mode
][1]
4187 + 2 * add_cost
[speed
][compute_mode
]);
4188 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
4190 max_cost
- extra_cost
);
4193 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4197 (RSHIFT_EXPR
, compute_mode
, t2
,
4198 build_int_cst (NULL_TREE
, 1),
4200 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4203 quotient
= expand_shift
4204 (RSHIFT_EXPR
, compute_mode
, t4
,
4205 build_int_cst (NULL_TREE
, post_shift
- 1),
4212 if (pre_shift
>= BITS_PER_WORD
4213 || post_shift
>= BITS_PER_WORD
)
4217 (RSHIFT_EXPR
, compute_mode
, op0
,
4218 build_int_cst (NULL_TREE
, pre_shift
),
4221 = (shift_cost
[speed
][compute_mode
][pre_shift
]
4222 + shift_cost
[speed
][compute_mode
][post_shift
]);
4223 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
4225 max_cost
- extra_cost
);
4228 quotient
= expand_shift
4229 (RSHIFT_EXPR
, compute_mode
, t2
,
4230 build_int_cst (NULL_TREE
, post_shift
),
4235 else /* Too wide mode to use tricky code */
4238 insn
= get_last_insn ();
4240 && (set
= single_set (insn
)) != 0
4241 && SET_DEST (set
) == quotient
)
4242 set_unique_reg_note (insn
,
4244 gen_rtx_UDIV (compute_mode
, op0
, op1
));
4246 else /* TRUNC_DIV, signed */
4248 unsigned HOST_WIDE_INT ml
;
4249 int lgup
, post_shift
;
4251 HOST_WIDE_INT d
= INTVAL (op1
);
4252 unsigned HOST_WIDE_INT abs_d
;
4254 /* Since d might be INT_MIN, we have to cast to
4255 unsigned HOST_WIDE_INT before negating to avoid
4256 undefined signed overflow. */
4258 ? (unsigned HOST_WIDE_INT
) d
4259 : - (unsigned HOST_WIDE_INT
) d
);
4261 /* n rem d = n rem -d */
4262 if (rem_flag
&& d
< 0)
4265 op1
= gen_int_mode (abs_d
, compute_mode
);
4271 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4273 else if (HOST_BITS_PER_WIDE_INT
>= size
4274 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4276 /* This case is not handled correctly below. */
4277 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4278 compute_mode
, 1, 1);
4282 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4283 && (rem_flag
? smod_pow2_cheap
[speed
][compute_mode
]
4284 : sdiv_pow2_cheap
[speed
][compute_mode
])
4285 /* We assume that cheap metric is true if the
4286 optab has an expander for this mode. */
4287 && ((optab_handler ((rem_flag
? smod_optab
4289 compute_mode
)->insn_code
4290 != CODE_FOR_nothing
)
4291 || (optab_handler(sdivmod_optab
,
4293 ->insn_code
!= CODE_FOR_nothing
)))
4295 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4299 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4301 return gen_lowpart (mode
, remainder
);
4304 if (sdiv_pow2_cheap
[speed
][compute_mode
]
4305 && ((optab_handler (sdiv_optab
, compute_mode
)->insn_code
4306 != CODE_FOR_nothing
)
4307 || (optab_handler (sdivmod_optab
, compute_mode
)->insn_code
4308 != CODE_FOR_nothing
)))
4309 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4311 gen_int_mode (abs_d
,
4315 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4317 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4318 negate the quotient. */
4321 insn
= get_last_insn ();
4323 && (set
= single_set (insn
)) != 0
4324 && SET_DEST (set
) == quotient
4325 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4326 << (HOST_BITS_PER_WIDE_INT
- 1)))
4327 set_unique_reg_note (insn
,
4329 gen_rtx_DIV (compute_mode
,
4336 quotient
= expand_unop (compute_mode
, neg_optab
,
4337 quotient
, quotient
, 0);
4340 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4342 choose_multiplier (abs_d
, size
, size
- 1,
4343 &mlr
, &post_shift
, &lgup
);
4344 ml
= (unsigned HOST_WIDE_INT
) INTVAL (mlr
);
4345 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4349 if (post_shift
>= BITS_PER_WORD
4350 || size
- 1 >= BITS_PER_WORD
)
4353 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4354 + shift_cost
[speed
][compute_mode
][size
- 1]
4355 + add_cost
[speed
][compute_mode
]);
4356 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4358 max_cost
- extra_cost
);
4362 (RSHIFT_EXPR
, compute_mode
, t1
,
4363 build_int_cst (NULL_TREE
, post_shift
),
4366 (RSHIFT_EXPR
, compute_mode
, op0
,
4367 build_int_cst (NULL_TREE
, size
- 1),
4371 = force_operand (gen_rtx_MINUS (compute_mode
,
4376 = force_operand (gen_rtx_MINUS (compute_mode
,
4384 if (post_shift
>= BITS_PER_WORD
4385 || size
- 1 >= BITS_PER_WORD
)
4388 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4389 mlr
= gen_int_mode (ml
, compute_mode
);
4390 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4391 + shift_cost
[speed
][compute_mode
][size
- 1]
4392 + 2 * add_cost
[speed
][compute_mode
]);
4393 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4395 max_cost
- extra_cost
);
4398 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4402 (RSHIFT_EXPR
, compute_mode
, t2
,
4403 build_int_cst (NULL_TREE
, post_shift
),
4406 (RSHIFT_EXPR
, compute_mode
, op0
,
4407 build_int_cst (NULL_TREE
, size
- 1),
4411 = force_operand (gen_rtx_MINUS (compute_mode
,
4416 = force_operand (gen_rtx_MINUS (compute_mode
,
4421 else /* Too wide mode to use tricky code */
4424 insn
= get_last_insn ();
4426 && (set
= single_set (insn
)) != 0
4427 && SET_DEST (set
) == quotient
)
4428 set_unique_reg_note (insn
,
4430 gen_rtx_DIV (compute_mode
, op0
, op1
));
4435 delete_insns_since (last
);
4438 case FLOOR_DIV_EXPR
:
4439 case FLOOR_MOD_EXPR
:
4440 /* We will come here only for signed operations. */
4441 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4443 unsigned HOST_WIDE_INT mh
;
4444 int pre_shift
, lgup
, post_shift
;
4445 HOST_WIDE_INT d
= INTVAL (op1
);
4450 /* We could just as easily deal with negative constants here,
4451 but it does not seem worth the trouble for GCC 2.6. */
4452 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4454 pre_shift
= floor_log2 (d
);
4457 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
4458 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4459 remainder
, 0, OPTAB_LIB_WIDEN
);
4461 return gen_lowpart (mode
, remainder
);
4463 quotient
= expand_shift
4464 (RSHIFT_EXPR
, compute_mode
, op0
,
4465 build_int_cst (NULL_TREE
, pre_shift
),
4472 mh
= choose_multiplier (d
, size
, size
- 1,
4473 &ml
, &post_shift
, &lgup
);
4476 if (post_shift
< BITS_PER_WORD
4477 && size
- 1 < BITS_PER_WORD
)
4480 (RSHIFT_EXPR
, compute_mode
, op0
,
4481 build_int_cst (NULL_TREE
, size
- 1),
4483 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4484 NULL_RTX
, 0, OPTAB_WIDEN
);
4485 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4486 + shift_cost
[speed
][compute_mode
][size
- 1]
4487 + 2 * add_cost
[speed
][compute_mode
]);
4488 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
4490 max_cost
- extra_cost
);
4494 (RSHIFT_EXPR
, compute_mode
, t3
,
4495 build_int_cst (NULL_TREE
, post_shift
),
4497 quotient
= expand_binop (compute_mode
, xor_optab
,
4498 t4
, t1
, tquotient
, 0,
4506 rtx nsign
, t1
, t2
, t3
, t4
;
4507 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4508 op0
, constm1_rtx
), NULL_RTX
);
4509 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4511 nsign
= expand_shift
4512 (RSHIFT_EXPR
, compute_mode
, t2
,
4513 build_int_cst (NULL_TREE
, size
- 1),
4515 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4517 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4522 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4524 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4533 delete_insns_since (last
);
4535 /* Try using an instruction that produces both the quotient and
4536 remainder, using truncation. We can easily compensate the quotient
4537 or remainder to get floor rounding, once we have the remainder.
4538 Notice that we compute also the final remainder value here,
4539 and return the result right away. */
4540 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4541 target
= gen_reg_rtx (compute_mode
);
4546 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4547 quotient
= gen_reg_rtx (compute_mode
);
4552 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4553 remainder
= gen_reg_rtx (compute_mode
);
4556 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4557 quotient
, remainder
, 0))
4559 /* This could be computed with a branch-less sequence.
4560 Save that for later. */
4562 rtx label
= gen_label_rtx ();
4563 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4564 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4565 NULL_RTX
, 0, OPTAB_WIDEN
);
4566 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4567 expand_dec (quotient
, const1_rtx
);
4568 expand_inc (remainder
, op1
);
4570 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4573 /* No luck with division elimination or divmod. Have to do it
4574 by conditionally adjusting op0 *and* the result. */
4576 rtx label1
, label2
, label3
, label4
, label5
;
4580 quotient
= gen_reg_rtx (compute_mode
);
4581 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4582 label1
= gen_label_rtx ();
4583 label2
= gen_label_rtx ();
4584 label3
= gen_label_rtx ();
4585 label4
= gen_label_rtx ();
4586 label5
= gen_label_rtx ();
4587 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4588 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4589 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4590 quotient
, 0, OPTAB_LIB_WIDEN
);
4591 if (tem
!= quotient
)
4592 emit_move_insn (quotient
, tem
);
4593 emit_jump_insn (gen_jump (label5
));
4595 emit_label (label1
);
4596 expand_inc (adjusted_op0
, const1_rtx
);
4597 emit_jump_insn (gen_jump (label4
));
4599 emit_label (label2
);
4600 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4601 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4602 quotient
, 0, OPTAB_LIB_WIDEN
);
4603 if (tem
!= quotient
)
4604 emit_move_insn (quotient
, tem
);
4605 emit_jump_insn (gen_jump (label5
));
4607 emit_label (label3
);
4608 expand_dec (adjusted_op0
, const1_rtx
);
4609 emit_label (label4
);
4610 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4611 quotient
, 0, OPTAB_LIB_WIDEN
);
4612 if (tem
!= quotient
)
4613 emit_move_insn (quotient
, tem
);
4614 expand_dec (quotient
, const1_rtx
);
4615 emit_label (label5
);
4623 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4626 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4627 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4628 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4630 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4632 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4633 t3
= gen_reg_rtx (compute_mode
);
4634 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4635 compute_mode
, 1, 1);
4639 lab
= gen_label_rtx ();
4640 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4641 expand_inc (t1
, const1_rtx
);
4646 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4652 /* Try using an instruction that produces both the quotient and
4653 remainder, using truncation. We can easily compensate the
4654 quotient or remainder to get ceiling rounding, once we have the
4655 remainder. Notice that we compute also the final remainder
4656 value here, and return the result right away. */
4657 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4658 target
= gen_reg_rtx (compute_mode
);
4662 remainder
= (REG_P (target
)
4663 ? target
: gen_reg_rtx (compute_mode
));
4664 quotient
= gen_reg_rtx (compute_mode
);
4668 quotient
= (REG_P (target
)
4669 ? target
: gen_reg_rtx (compute_mode
));
4670 remainder
= gen_reg_rtx (compute_mode
);
4673 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4676 /* This could be computed with a branch-less sequence.
4677 Save that for later. */
4678 rtx label
= gen_label_rtx ();
4679 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4680 compute_mode
, label
);
4681 expand_inc (quotient
, const1_rtx
);
4682 expand_dec (remainder
, op1
);
4684 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4687 /* No luck with division elimination or divmod. Have to do it
4688 by conditionally adjusting op0 *and* the result. */
4691 rtx adjusted_op0
, tem
;
4693 quotient
= gen_reg_rtx (compute_mode
);
4694 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4695 label1
= gen_label_rtx ();
4696 label2
= gen_label_rtx ();
4697 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4698 compute_mode
, label1
);
4699 emit_move_insn (quotient
, const0_rtx
);
4700 emit_jump_insn (gen_jump (label2
));
4702 emit_label (label1
);
4703 expand_dec (adjusted_op0
, const1_rtx
);
4704 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4705 quotient
, 1, OPTAB_LIB_WIDEN
);
4706 if (tem
!= quotient
)
4707 emit_move_insn (quotient
, tem
);
4708 expand_inc (quotient
, const1_rtx
);
4709 emit_label (label2
);
4714 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4715 && INTVAL (op1
) >= 0)
4717 /* This is extremely similar to the code for the unsigned case
4718 above. For 2.7 we should merge these variants, but for
4719 2.6.1 I don't want to touch the code for unsigned since that
4720 get used in C. The signed case will only be used by other
4724 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4725 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4726 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4728 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4730 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4731 t3
= gen_reg_rtx (compute_mode
);
4732 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4733 compute_mode
, 1, 1);
4737 lab
= gen_label_rtx ();
4738 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4739 expand_inc (t1
, const1_rtx
);
4744 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4750 /* Try using an instruction that produces both the quotient and
4751 remainder, using truncation. We can easily compensate the
4752 quotient or remainder to get ceiling rounding, once we have the
4753 remainder. Notice that we compute also the final remainder
4754 value here, and return the result right away. */
4755 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4756 target
= gen_reg_rtx (compute_mode
);
4759 remainder
= (REG_P (target
)
4760 ? target
: gen_reg_rtx (compute_mode
));
4761 quotient
= gen_reg_rtx (compute_mode
);
4765 quotient
= (REG_P (target
)
4766 ? target
: gen_reg_rtx (compute_mode
));
4767 remainder
= gen_reg_rtx (compute_mode
);
4770 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4773 /* This could be computed with a branch-less sequence.
4774 Save that for later. */
4776 rtx label
= gen_label_rtx ();
4777 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4778 compute_mode
, label
);
4779 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4780 NULL_RTX
, 0, OPTAB_WIDEN
);
4781 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4782 expand_inc (quotient
, const1_rtx
);
4783 expand_dec (remainder
, op1
);
4785 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4788 /* No luck with division elimination or divmod. Have to do it
4789 by conditionally adjusting op0 *and* the result. */
4791 rtx label1
, label2
, label3
, label4
, label5
;
4795 quotient
= gen_reg_rtx (compute_mode
);
4796 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4797 label1
= gen_label_rtx ();
4798 label2
= gen_label_rtx ();
4799 label3
= gen_label_rtx ();
4800 label4
= gen_label_rtx ();
4801 label5
= gen_label_rtx ();
4802 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4803 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4804 compute_mode
, label1
);
4805 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4806 quotient
, 0, OPTAB_LIB_WIDEN
);
4807 if (tem
!= quotient
)
4808 emit_move_insn (quotient
, tem
);
4809 emit_jump_insn (gen_jump (label5
));
4811 emit_label (label1
);
4812 expand_dec (adjusted_op0
, const1_rtx
);
4813 emit_jump_insn (gen_jump (label4
));
4815 emit_label (label2
);
4816 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4817 compute_mode
, label3
);
4818 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4819 quotient
, 0, OPTAB_LIB_WIDEN
);
4820 if (tem
!= quotient
)
4821 emit_move_insn (quotient
, tem
);
4822 emit_jump_insn (gen_jump (label5
));
4824 emit_label (label3
);
4825 expand_inc (adjusted_op0
, const1_rtx
);
4826 emit_label (label4
);
4827 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4828 quotient
, 0, OPTAB_LIB_WIDEN
);
4829 if (tem
!= quotient
)
4830 emit_move_insn (quotient
, tem
);
4831 expand_inc (quotient
, const1_rtx
);
4832 emit_label (label5
);
4837 case EXACT_DIV_EXPR
:
4838 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4840 HOST_WIDE_INT d
= INTVAL (op1
);
4841 unsigned HOST_WIDE_INT ml
;
4845 pre_shift
= floor_log2 (d
& -d
);
4846 ml
= invert_mod2n (d
>> pre_shift
, size
);
4847 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4848 build_int_cst (NULL_TREE
, pre_shift
),
4849 NULL_RTX
, unsignedp
);
4850 quotient
= expand_mult (compute_mode
, t1
,
4851 gen_int_mode (ml
, compute_mode
),
4854 insn
= get_last_insn ();
4855 set_unique_reg_note (insn
,
4857 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4863 case ROUND_DIV_EXPR
:
4864 case ROUND_MOD_EXPR
:
4869 label
= gen_label_rtx ();
4870 quotient
= gen_reg_rtx (compute_mode
);
4871 remainder
= gen_reg_rtx (compute_mode
);
4872 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4875 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4876 quotient
, 1, OPTAB_LIB_WIDEN
);
4877 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4878 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4879 remainder
, 1, OPTAB_LIB_WIDEN
);
4881 tem
= plus_constant (op1
, -1);
4882 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4883 build_int_cst (NULL_TREE
, 1),
4885 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4886 expand_inc (quotient
, const1_rtx
);
4887 expand_dec (remainder
, op1
);
4892 rtx abs_rem
, abs_op1
, tem
, mask
;
4894 label
= gen_label_rtx ();
4895 quotient
= gen_reg_rtx (compute_mode
);
4896 remainder
= gen_reg_rtx (compute_mode
);
4897 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4900 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4901 quotient
, 0, OPTAB_LIB_WIDEN
);
4902 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4903 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4904 remainder
, 0, OPTAB_LIB_WIDEN
);
4906 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4907 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4908 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4909 build_int_cst (NULL_TREE
, 1),
4911 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4912 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4913 NULL_RTX
, 0, OPTAB_WIDEN
);
4914 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4915 build_int_cst (NULL_TREE
, size
- 1),
4917 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4918 NULL_RTX
, 0, OPTAB_WIDEN
);
4919 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4920 NULL_RTX
, 0, OPTAB_WIDEN
);
4921 expand_inc (quotient
, tem
);
4922 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4923 NULL_RTX
, 0, OPTAB_WIDEN
);
4924 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4925 NULL_RTX
, 0, OPTAB_WIDEN
);
4926 expand_dec (remainder
, tem
);
4929 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4937 if (target
&& GET_MODE (target
) != compute_mode
)
4942 /* Try to produce the remainder without producing the quotient.
4943 If we seem to have a divmod pattern that does not require widening,
4944 don't try widening here. We should really have a WIDEN argument
4945 to expand_twoval_binop, since what we'd really like to do here is
4946 1) try a mod insn in compute_mode
4947 2) try a divmod insn in compute_mode
4948 3) try a div insn in compute_mode and multiply-subtract to get
4950 4) try the same things with widening allowed. */
4952 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4955 ((optab_handler (optab2
, compute_mode
)->insn_code
4956 != CODE_FOR_nothing
)
4957 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4960 /* No luck there. Can we do remainder and divide at once
4961 without a library call? */
4962 remainder
= gen_reg_rtx (compute_mode
);
4963 if (! expand_twoval_binop ((unsignedp
4967 NULL_RTX
, remainder
, unsignedp
))
4972 return gen_lowpart (mode
, remainder
);
4975 /* Produce the quotient. Try a quotient insn, but not a library call.
4976 If we have a divmod in this mode, use it in preference to widening
4977 the div (for this test we assume it will not fail). Note that optab2
4978 is set to the one of the two optabs that the call below will use. */
4980 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4981 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4983 ((optab_handler (optab2
, compute_mode
)->insn_code
4984 != CODE_FOR_nothing
)
4985 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4989 /* No luck there. Try a quotient-and-remainder insn,
4990 keeping the quotient alone. */
4991 quotient
= gen_reg_rtx (compute_mode
);
4992 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4994 quotient
, NULL_RTX
, unsignedp
))
4998 /* Still no luck. If we are not computing the remainder,
4999 use a library call for the quotient. */
5000 quotient
= sign_expand_binop (compute_mode
,
5001 udiv_optab
, sdiv_optab
,
5003 unsignedp
, OPTAB_LIB_WIDEN
);
5010 if (target
&& GET_MODE (target
) != compute_mode
)
5015 /* No divide instruction either. Use library for remainder. */
5016 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
5018 unsignedp
, OPTAB_LIB_WIDEN
);
5019 /* No remainder function. Try a quotient-and-remainder
5020 function, keeping the remainder. */
5023 remainder
= gen_reg_rtx (compute_mode
);
5024 if (!expand_twoval_binop_libfunc
5025 (unsignedp
? udivmod_optab
: sdivmod_optab
,
5027 NULL_RTX
, remainder
,
5028 unsignedp
? UMOD
: MOD
))
5029 remainder
= NULL_RTX
;
5034 /* We divided. Now finish doing X - Y * (X / Y). */
5035 remainder
= expand_mult (compute_mode
, quotient
, op1
,
5036 NULL_RTX
, unsignedp
);
5037 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
5038 remainder
, target
, unsignedp
,
5043 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
5046 /* Return a tree node with data type TYPE, describing the value of X.
5047 Usually this is an VAR_DECL, if there is no obvious better choice.
5048 X may be an expression, however we only support those expressions
5049 generated by loop.c. */
5052 make_tree (tree type
, rtx x
)
5056 switch (GET_CODE (x
))
5060 HOST_WIDE_INT hi
= 0;
5063 && !(TYPE_UNSIGNED (type
)
5064 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
5065 < HOST_BITS_PER_WIDE_INT
)))
5068 t
= build_int_cst_wide (type
, INTVAL (x
), hi
);
5074 if (GET_MODE (x
) == VOIDmode
)
5075 t
= build_int_cst_wide (type
,
5076 CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
5081 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
5082 t
= build_real (type
, d
);
5089 int units
= CONST_VECTOR_NUNITS (x
);
5090 tree itype
= TREE_TYPE (type
);
5095 /* Build a tree with vector elements. */
5096 for (i
= units
- 1; i
>= 0; --i
)
5098 rtx elt
= CONST_VECTOR_ELT (x
, i
);
5099 t
= tree_cons (NULL_TREE
, make_tree (itype
, elt
), t
);
5102 return build_vector (type
, t
);
5106 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5107 make_tree (type
, XEXP (x
, 1)));
5110 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5111 make_tree (type
, XEXP (x
, 1)));
5114 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
5117 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5118 make_tree (type
, XEXP (x
, 1)));
5121 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5122 make_tree (type
, XEXP (x
, 1)));
5125 t
= unsigned_type_for (type
);
5126 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5127 make_tree (t
, XEXP (x
, 0)),
5128 make_tree (type
, XEXP (x
, 1))));
5131 t
= signed_type_for (type
);
5132 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5133 make_tree (t
, XEXP (x
, 0)),
5134 make_tree (type
, XEXP (x
, 1))));
5137 if (TREE_CODE (type
) != REAL_TYPE
)
5138 t
= signed_type_for (type
);
5142 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5143 make_tree (t
, XEXP (x
, 0)),
5144 make_tree (t
, XEXP (x
, 1))));
5146 t
= unsigned_type_for (type
);
5147 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5148 make_tree (t
, XEXP (x
, 0)),
5149 make_tree (t
, XEXP (x
, 1))));
5153 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5154 GET_CODE (x
) == ZERO_EXTEND
);
5155 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5158 return make_tree (type
, XEXP (x
, 0));
5161 t
= SYMBOL_REF_DECL (x
);
5163 return fold_convert (type
, build_fold_addr_expr (t
));
5164 /* else fall through. */
5167 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
5169 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5170 address mode to pointer mode. */
5171 if (POINTER_TYPE_P (type
))
5172 x
= convert_memory_address_addr_space
5173 (TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5175 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5176 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5177 t
->decl_with_rtl
.rtl
= x
;
5183 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5184 and returning TARGET.
5186 If TARGET is 0, a pseudo-register or constant is returned. */
5189 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5193 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5194 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5196 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5200 else if (tem
!= target
)
5201 emit_move_insn (target
, tem
);
5205 /* Helper function for emit_store_flag. */
5207 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5208 enum machine_mode mode
, enum machine_mode compare_mode
,
5209 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5210 enum machine_mode target_mode
)
5212 rtx op0
, last
, comparison
, subtarget
, pattern
;
5213 enum machine_mode result_mode
= insn_data
[(int) icode
].operand
[0].mode
;
5215 last
= get_last_insn ();
5216 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5217 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5218 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5220 || !insn_data
[icode
].operand
[2].predicate
5221 (x
, insn_data
[icode
].operand
[2].mode
)
5222 || !insn_data
[icode
].operand
[3].predicate
5223 (y
, insn_data
[icode
].operand
[3].mode
)
5224 || !insn_data
[icode
].operand
[1].predicate (comparison
, VOIDmode
))
5226 delete_insns_since (last
);
5230 if (target_mode
== VOIDmode
)
5231 target_mode
= result_mode
;
5233 target
= gen_reg_rtx (target_mode
);
5236 || !(insn_data
[(int) icode
].operand
[0].predicate (target
, result_mode
)))
5237 subtarget
= gen_reg_rtx (result_mode
);
5241 pattern
= GEN_FCN (icode
) (subtarget
, comparison
, x
, y
);
5244 emit_insn (pattern
);
5246 /* If we are converting to a wider mode, first convert to
5247 TARGET_MODE, then normalize. This produces better combining
5248 opportunities on machines that have a SIGN_EXTRACT when we are
5249 testing a single bit. This mostly benefits the 68k.
5251 If STORE_FLAG_VALUE does not have the sign bit set when
5252 interpreted in MODE, we can do this conversion as unsigned, which
5253 is usually more efficient. */
5254 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (result_mode
))
5256 convert_move (target
, subtarget
,
5257 (GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
)
5258 && 0 == (STORE_FLAG_VALUE
5259 & ((HOST_WIDE_INT
) 1
5260 << (GET_MODE_BITSIZE (result_mode
) -1))));
5262 result_mode
= target_mode
;
5267 /* If we want to keep subexpressions around, don't reuse our last
5272 /* Now normalize to the proper value in MODE. Sometimes we don't
5273 have to do anything. */
5274 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5276 /* STORE_FLAG_VALUE might be the most negative number, so write
5277 the comparison this way to avoid a compiler-time warning. */
5278 else if (- normalizep
== STORE_FLAG_VALUE
)
5279 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5281 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5282 it hard to use a value of just the sign bit due to ANSI integer
5283 constant typing rules. */
5284 else if (GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
5285 && (STORE_FLAG_VALUE
5286 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (result_mode
) - 1))))
5287 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5288 size_int (GET_MODE_BITSIZE (result_mode
) - 1), subtarget
,
5292 gcc_assert (STORE_FLAG_VALUE
& 1);
5294 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5295 if (normalizep
== -1)
5296 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5299 /* If we were converting to a smaller mode, do the conversion now. */
5300 if (target_mode
!= result_mode
)
5302 convert_move (target
, op0
, 0);
5310 /* A subroutine of emit_store_flag only including "tricks" that do not
5311 need a recursive call. These are kept separate to avoid infinite
5315 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5316 enum machine_mode mode
, int unsignedp
, int normalizep
,
5317 enum machine_mode target_mode
)
5320 enum insn_code icode
;
5321 enum machine_mode compare_mode
;
5322 enum mode_class mclass
;
5323 enum rtx_code scode
;
5327 code
= unsigned_condition (code
);
5328 scode
= swap_condition (code
);
5330 /* If one operand is constant, make it the second one. Only do this
5331 if the other operand is not constant as well. */
5333 if (swap_commutative_operands_p (op0
, op1
))
5338 code
= swap_condition (code
);
5341 if (mode
== VOIDmode
)
5342 mode
= GET_MODE (op0
);
5344 /* For some comparisons with 1 and -1, we can convert this to
5345 comparisons with zero. This will often produce more opportunities for
5346 store-flag insns. */
5351 if (op1
== const1_rtx
)
5352 op1
= const0_rtx
, code
= LE
;
5355 if (op1
== constm1_rtx
)
5356 op1
= const0_rtx
, code
= LT
;
5359 if (op1
== const1_rtx
)
5360 op1
= const0_rtx
, code
= GT
;
5363 if (op1
== constm1_rtx
)
5364 op1
= const0_rtx
, code
= GE
;
5367 if (op1
== const1_rtx
)
5368 op1
= const0_rtx
, code
= NE
;
5371 if (op1
== const1_rtx
)
5372 op1
= const0_rtx
, code
= EQ
;
5378 /* If we are comparing a double-word integer with zero or -1, we can
5379 convert the comparison into one involving a single word. */
5380 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5381 && GET_MODE_CLASS (mode
) == MODE_INT
5382 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5384 if ((code
== EQ
|| code
== NE
)
5385 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5389 /* Do a logical OR or AND of the two words and compare the
5391 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5392 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5393 tem
= expand_binop (word_mode
,
5394 op1
== const0_rtx
? ior_optab
: and_optab
,
5395 op00
, op01
, NULL_RTX
, unsignedp
,
5399 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5400 unsignedp
, normalizep
);
5402 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5406 /* If testing the sign bit, can just test on high word. */
5407 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5408 subreg_highpart_offset (word_mode
,
5410 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5411 unsignedp
, normalizep
);
5418 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5421 target
= gen_reg_rtx (target_mode
);
5423 convert_move (target
, tem
,
5424 0 == ((normalizep
? normalizep
: STORE_FLAG_VALUE
)
5425 & ((HOST_WIDE_INT
) 1
5426 << (GET_MODE_BITSIZE (word_mode
) -1))));
5431 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5432 complement of A (for GE) and shifting the sign bit to the low bit. */
5433 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5434 && GET_MODE_CLASS (mode
) == MODE_INT
5435 && (normalizep
|| STORE_FLAG_VALUE
== 1
5436 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5437 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5438 == ((unsigned HOST_WIDE_INT
) 1
5439 << (GET_MODE_BITSIZE (mode
) - 1))))))
5446 /* If the result is to be wider than OP0, it is best to convert it
5447 first. If it is to be narrower, it is *incorrect* to convert it
5449 else if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5451 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5455 if (target_mode
!= mode
)
5459 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5460 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5461 ? 0 : subtarget
), 0);
5463 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5464 /* If we are supposed to produce a 0/1 value, we want to do
5465 a logical shift from the sign bit to the low-order bit; for
5466 a -1/0 value, we do an arithmetic shift. */
5467 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5468 size_int (GET_MODE_BITSIZE (mode
) - 1),
5469 subtarget
, normalizep
!= -1);
5471 if (mode
!= target_mode
)
5472 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5477 mclass
= GET_MODE_CLASS (mode
);
5478 for (compare_mode
= mode
; compare_mode
!= VOIDmode
;
5479 compare_mode
= GET_MODE_WIDER_MODE (compare_mode
))
5481 enum machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5482 icode
= optab_handler (cstore_optab
, optab_mode
)->insn_code
;
5483 if (icode
!= CODE_FOR_nothing
)
5485 do_pending_stack_adjust ();
5486 tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5487 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5491 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5493 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5494 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5505 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5506 and storing in TARGET. Normally return TARGET.
5507 Return 0 if that cannot be done.
5509 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5510 it is VOIDmode, they cannot both be CONST_INT.
5512 UNSIGNEDP is for the case where we have to widen the operands
5513 to perform the operation. It says to use zero-extension.
5515 NORMALIZEP is 1 if we should convert the result to be either zero
5516 or one. Normalize is -1 if we should convert the result to be
5517 either zero or -1. If NORMALIZEP is zero, the result will be left
5518 "raw" out of the scc insn. */
5521 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5522 enum machine_mode mode
, int unsignedp
, int normalizep
)
5524 enum machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5525 enum rtx_code rcode
;
5527 rtx tem
, last
, trueval
;
5529 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5534 /* If we reached here, we can't do this with a scc insn, however there
5535 are some comparisons that can be done in other ways. Don't do any
5536 of these cases if branches are very cheap. */
5537 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5540 /* See what we need to return. We can only return a 1, -1, or the
5543 if (normalizep
== 0)
5545 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5546 normalizep
= STORE_FLAG_VALUE
;
5548 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5549 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5550 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
5556 last
= get_last_insn ();
5558 /* If optimizing, use different pseudo registers for each insn, instead
5559 of reusing the same pseudo. This leads to better CSE, but slows
5560 down the compiler, since there are more pseudos */
5561 subtarget
= (!optimize
5562 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5563 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5565 /* For floating-point comparisons, try the reverse comparison or try
5566 changing the "orderedness" of the comparison. */
5567 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5569 enum rtx_code first_code
;
5572 rcode
= reverse_condition_maybe_unordered (code
);
5573 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5574 && (code
== ORDERED
|| code
== UNORDERED
5575 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5576 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5578 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5579 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5581 /* For the reverse comparison, use either an addition or a XOR. */
5583 && rtx_cost (GEN_INT (normalizep
), PLUS
,
5584 optimize_insn_for_speed_p ()) == 0)
5586 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5587 STORE_FLAG_VALUE
, target_mode
);
5589 return expand_binop (target_mode
, add_optab
, tem
,
5590 GEN_INT (normalizep
),
5591 target
, 0, OPTAB_WIDEN
);
5594 && rtx_cost (trueval
, XOR
,
5595 optimize_insn_for_speed_p ()) == 0)
5597 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5598 normalizep
, target_mode
);
5600 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5601 target
, INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5605 delete_insns_since (last
);
5607 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5608 if (code
== ORDERED
|| code
== UNORDERED
)
5611 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5613 /* If there are no NaNs, the first comparison should always fall through.
5614 Effectively change the comparison to the other one. */
5615 if (!HONOR_NANS (mode
))
5617 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5618 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5622 #ifdef HAVE_conditional_move
5623 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5624 conditional move. */
5625 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5626 normalizep
, target_mode
);
5631 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5632 tem
, const0_rtx
, GET_MODE (tem
), 0);
5634 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5635 trueval
, tem
, GET_MODE (tem
), 0);
5638 delete_insns_since (last
);
5645 /* The remaining tricks only apply to integer comparisons. */
5647 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5650 /* If this is an equality comparison of integers, we can try to exclusive-or
5651 (or subtract) the two operands and use a recursive call to try the
5652 comparison with zero. Don't do any of these cases if branches are
5655 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5657 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5661 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5664 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5665 mode
, unsignedp
, normalizep
);
5669 delete_insns_since (last
);
5672 /* For integer comparisons, try the reverse comparison. However, for
5673 small X and if we'd have anyway to extend, implementing "X != 0"
5674 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5675 rcode
= reverse_condition (code
);
5676 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5677 && ! (optab_handler (cstore_optab
, mode
)->insn_code
== CODE_FOR_nothing
5679 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5680 && op1
== const0_rtx
))
5682 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5683 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5685 /* Again, for the reverse comparison, use either an addition or a XOR. */
5687 && rtx_cost (GEN_INT (normalizep
), PLUS
,
5688 optimize_insn_for_speed_p ()) == 0)
5690 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5691 STORE_FLAG_VALUE
, target_mode
);
5693 tem
= expand_binop (target_mode
, add_optab
, tem
,
5694 GEN_INT (normalizep
), target
, 0, OPTAB_WIDEN
);
5697 && rtx_cost (trueval
, XOR
,
5698 optimize_insn_for_speed_p ()) == 0)
5700 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5701 normalizep
, target_mode
);
5703 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5704 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5709 delete_insns_since (last
);
5712 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5713 the constant zero. Reject all other comparisons at this point. Only
5714 do LE and GT if branches are expensive since they are expensive on
5715 2-operand machines. */
5717 if (op1
!= const0_rtx
5718 || (code
!= EQ
&& code
!= NE
5719 && (BRANCH_COST (optimize_insn_for_speed_p (),
5720 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5723 /* Try to put the result of the comparison in the sign bit. Assume we can't
5724 do the necessary operation below. */
5728 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5729 the sign bit set. */
5733 /* This is destructive, so SUBTARGET can't be OP0. */
5734 if (rtx_equal_p (subtarget
, op0
))
5737 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5740 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5744 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5745 number of bits in the mode of OP0, minus one. */
5749 if (rtx_equal_p (subtarget
, op0
))
5752 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5753 size_int (GET_MODE_BITSIZE (mode
) - 1),
5755 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5759 if (code
== EQ
|| code
== NE
)
5761 /* For EQ or NE, one way to do the comparison is to apply an operation
5762 that converts the operand into a positive number if it is nonzero
5763 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5764 for NE we negate. This puts the result in the sign bit. Then we
5765 normalize with a shift, if needed.
5767 Two operations that can do the above actions are ABS and FFS, so try
5768 them. If that doesn't work, and MODE is smaller than a full word,
5769 we can use zero-extension to the wider mode (an unsigned conversion)
5770 as the operation. */
5772 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5773 that is compensated by the subsequent overflow when subtracting
5776 if (optab_handler (abs_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
5777 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5778 else if (optab_handler (ffs_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
5779 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5780 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5782 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5789 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5792 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5795 /* If we couldn't do it that way, for NE we can "or" the two's complement
5796 of the value with itself. For EQ, we take the one's complement of
5797 that "or", which is an extra insn, so we only handle EQ if branches
5802 || BRANCH_COST (optimize_insn_for_speed_p (),
5805 if (rtx_equal_p (subtarget
, op0
))
5808 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5809 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5812 if (tem
&& code
== EQ
)
5813 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5817 if (tem
&& normalizep
)
5818 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5819 size_int (GET_MODE_BITSIZE (mode
) - 1),
5820 subtarget
, normalizep
== 1);
5826 else if (GET_MODE (tem
) != target_mode
)
5828 convert_move (target
, tem
, 0);
5831 else if (!subtarget
)
5833 emit_move_insn (target
, tem
);
5838 delete_insns_since (last
);
5843 /* Like emit_store_flag, but always succeeds. */
5846 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5847 enum machine_mode mode
, int unsignedp
, int normalizep
)
5850 rtx trueval
, falseval
;
5852 /* First see if emit_store_flag can do the job. */
5853 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5858 target
= gen_reg_rtx (word_mode
);
5860 /* If this failed, we have to do this with set/compare/jump/set code.
5861 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5862 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5864 && GET_MODE_CLASS (mode
) == MODE_INT
5867 && op1
== const0_rtx
)
5869 label
= gen_label_rtx ();
5870 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
,
5871 mode
, NULL_RTX
, NULL_RTX
, label
, -1);
5872 emit_move_insn (target
, trueval
);
5878 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5879 target
= gen_reg_rtx (GET_MODE (target
));
5881 /* Jump in the right direction if the target cannot implement CODE
5882 but can jump on its reverse condition. */
5883 falseval
= const0_rtx
;
5884 if (! can_compare_p (code
, mode
, ccp_jump
)
5885 && (! FLOAT_MODE_P (mode
)
5886 || code
== ORDERED
|| code
== UNORDERED
5887 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5888 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5890 enum rtx_code rcode
;
5891 if (FLOAT_MODE_P (mode
))
5892 rcode
= reverse_condition_maybe_unordered (code
);
5894 rcode
= reverse_condition (code
);
5896 /* Canonicalize to UNORDERED for the libcall. */
5897 if (can_compare_p (rcode
, mode
, ccp_jump
)
5898 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
5901 trueval
= const0_rtx
;
5906 emit_move_insn (target
, trueval
);
5907 label
= gen_label_rtx ();
5908 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5909 NULL_RTX
, label
, -1);
5911 emit_move_insn (target
, falseval
);
5917 /* Perform possibly multi-word comparison and conditional jump to LABEL
5918 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5919 now a thin wrapper around do_compare_rtx_and_jump. */
5922 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5925 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5926 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5927 NULL_RTX
, NULL_RTX
, label
, -1);