1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "coretypes.h"
34 #include "insn-config.h"
39 #include "langhooks.h"
41 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
42 unsigned HOST_WIDE_INT
,
43 unsigned HOST_WIDE_INT
, rtx
);
44 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
45 unsigned HOST_WIDE_INT
, rtx
);
46 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
47 unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
,
49 unsigned HOST_WIDE_INT
, rtx
, int);
50 static rtx
mask_rtx (enum machine_mode
, int, int, int);
51 static rtx
lshift_value (enum machine_mode
, rtx
, int, int);
52 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
53 unsigned HOST_WIDE_INT
, int);
54 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
55 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
56 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
58 /* Test whether a value is zero of a power of two. */
59 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
61 /* Nonzero means divides or modulus operations are relatively cheap for
62 powers of two, so don't use branches; emit the operation instead.
63 Usually, this will mean that the MD file will emit non-branch
66 static bool sdiv_pow2_cheap
[NUM_MACHINE_MODES
];
67 static bool smod_pow2_cheap
[NUM_MACHINE_MODES
];
69 #ifndef SLOW_UNALIGNED_ACCESS
70 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
73 /* For compilers that support multiple targets with different word sizes,
74 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
75 is the H8/300(H) compiler. */
77 #ifndef MAX_BITS_PER_WORD
78 #define MAX_BITS_PER_WORD BITS_PER_WORD
81 /* Reduce conditional compilation elsewhere. */
84 #define CODE_FOR_insv CODE_FOR_nothing
85 #define gen_insv(a,b,c,d) NULL_RTX
89 #define CODE_FOR_extv CODE_FOR_nothing
90 #define gen_extv(a,b,c,d) NULL_RTX
94 #define CODE_FOR_extzv CODE_FOR_nothing
95 #define gen_extzv(a,b,c,d) NULL_RTX
98 /* Cost of various pieces of RTL. Note that some of these are indexed by
99 shift count and some by mode. */
100 static int zero_cost
;
101 static int add_cost
[NUM_MACHINE_MODES
];
102 static int neg_cost
[NUM_MACHINE_MODES
];
103 static int shift_cost
[NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
104 static int shiftadd_cost
[NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
105 static int shiftsub_cost
[NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
106 static int mul_cost
[NUM_MACHINE_MODES
];
107 static int sdiv_cost
[NUM_MACHINE_MODES
];
108 static int udiv_cost
[NUM_MACHINE_MODES
];
109 static int mul_widen_cost
[NUM_MACHINE_MODES
];
110 static int mul_highpart_cost
[NUM_MACHINE_MODES
];
117 struct rtx_def reg
; rtunion reg_fld
[2];
118 struct rtx_def plus
; rtunion plus_fld1
;
120 struct rtx_def mult
; rtunion mult_fld1
;
121 struct rtx_def sdiv
; rtunion sdiv_fld1
;
122 struct rtx_def udiv
; rtunion udiv_fld1
;
124 struct rtx_def sdiv_32
; rtunion sdiv_32_fld1
;
125 struct rtx_def smod_32
; rtunion smod_32_fld1
;
126 struct rtx_def wide_mult
; rtunion wide_mult_fld1
;
127 struct rtx_def wide_lshr
; rtunion wide_lshr_fld1
;
128 struct rtx_def wide_trunc
;
129 struct rtx_def shift
; rtunion shift_fld1
;
130 struct rtx_def shift_mult
; rtunion shift_mult_fld1
;
131 struct rtx_def shift_add
; rtunion shift_add_fld1
;
132 struct rtx_def shift_sub
; rtunion shift_sub_fld1
;
135 rtx pow2
[MAX_BITS_PER_WORD
];
136 rtx cint
[MAX_BITS_PER_WORD
];
138 enum machine_mode mode
, wider_mode
;
140 zero_cost
= rtx_cost (const0_rtx
, 0);
142 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
144 pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
145 cint
[m
] = GEN_INT (m
);
148 memset (&all
, 0, sizeof all
);
150 PUT_CODE (&all
.reg
, REG
);
151 /* Avoid using hard regs in ways which may be unsupported. */
152 REGNO (&all
.reg
) = LAST_VIRTUAL_REGISTER
+ 1;
154 PUT_CODE (&all
.plus
, PLUS
);
155 XEXP (&all
.plus
, 0) = &all
.reg
;
156 XEXP (&all
.plus
, 1) = &all
.reg
;
158 PUT_CODE (&all
.neg
, NEG
);
159 XEXP (&all
.neg
, 0) = &all
.reg
;
161 PUT_CODE (&all
.mult
, MULT
);
162 XEXP (&all
.mult
, 0) = &all
.reg
;
163 XEXP (&all
.mult
, 1) = &all
.reg
;
165 PUT_CODE (&all
.sdiv
, DIV
);
166 XEXP (&all
.sdiv
, 0) = &all
.reg
;
167 XEXP (&all
.sdiv
, 1) = &all
.reg
;
169 PUT_CODE (&all
.udiv
, UDIV
);
170 XEXP (&all
.udiv
, 0) = &all
.reg
;
171 XEXP (&all
.udiv
, 1) = &all
.reg
;
173 PUT_CODE (&all
.sdiv_32
, DIV
);
174 XEXP (&all
.sdiv_32
, 0) = &all
.reg
;
175 XEXP (&all
.sdiv_32
, 1) = 32 < MAX_BITS_PER_WORD
? cint
[32] : GEN_INT (32);
177 PUT_CODE (&all
.smod_32
, MOD
);
178 XEXP (&all
.smod_32
, 0) = &all
.reg
;
179 XEXP (&all
.smod_32
, 1) = XEXP (&all
.sdiv_32
, 1);
181 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
182 XEXP (&all
.zext
, 0) = &all
.reg
;
184 PUT_CODE (&all
.wide_mult
, MULT
);
185 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
186 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
188 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
189 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
191 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
192 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
194 PUT_CODE (&all
.shift
, ASHIFT
);
195 XEXP (&all
.shift
, 0) = &all
.reg
;
197 PUT_CODE (&all
.shift_mult
, MULT
);
198 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
200 PUT_CODE (&all
.shift_add
, PLUS
);
201 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
202 XEXP (&all
.shift_add
, 1) = &all
.reg
;
204 PUT_CODE (&all
.shift_sub
, MINUS
);
205 XEXP (&all
.shift_sub
, 0) = &all
.shift_mult
;
206 XEXP (&all
.shift_sub
, 1) = &all
.reg
;
208 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
210 mode
= GET_MODE_WIDER_MODE (mode
))
212 PUT_MODE (&all
.reg
, mode
);
213 PUT_MODE (&all
.plus
, mode
);
214 PUT_MODE (&all
.neg
, mode
);
215 PUT_MODE (&all
.mult
, mode
);
216 PUT_MODE (&all
.sdiv
, mode
);
217 PUT_MODE (&all
.udiv
, mode
);
218 PUT_MODE (&all
.sdiv_32
, mode
);
219 PUT_MODE (&all
.smod_32
, mode
);
220 PUT_MODE (&all
.wide_trunc
, mode
);
221 PUT_MODE (&all
.shift
, mode
);
222 PUT_MODE (&all
.shift_mult
, mode
);
223 PUT_MODE (&all
.shift_add
, mode
);
224 PUT_MODE (&all
.shift_sub
, mode
);
226 add_cost
[mode
] = rtx_cost (&all
.plus
, SET
);
227 neg_cost
[mode
] = rtx_cost (&all
.neg
, SET
);
228 mul_cost
[mode
] = rtx_cost (&all
.mult
, SET
);
229 sdiv_cost
[mode
] = rtx_cost (&all
.sdiv
, SET
);
230 udiv_cost
[mode
] = rtx_cost (&all
.udiv
, SET
);
232 sdiv_pow2_cheap
[mode
] = (rtx_cost (&all
.sdiv_32
, SET
)
233 <= 2 * add_cost
[mode
]);
234 smod_pow2_cheap
[mode
] = (rtx_cost (&all
.smod_32
, SET
)
235 <= 4 * add_cost
[mode
]);
237 wider_mode
= GET_MODE_WIDER_MODE (mode
);
238 if (wider_mode
!= VOIDmode
)
240 PUT_MODE (&all
.zext
, wider_mode
);
241 PUT_MODE (&all
.wide_mult
, wider_mode
);
242 PUT_MODE (&all
.wide_lshr
, wider_mode
);
243 XEXP (&all
.wide_lshr
, 1) = GEN_INT (GET_MODE_BITSIZE (mode
));
245 mul_widen_cost
[wider_mode
] = rtx_cost (&all
.wide_mult
, SET
);
246 mul_highpart_cost
[mode
] = rtx_cost (&all
.wide_trunc
, SET
);
249 shift_cost
[mode
][0] = 0;
250 shiftadd_cost
[mode
][0] = shiftsub_cost
[mode
][0] = add_cost
[mode
];
252 n
= MIN (MAX_BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
253 for (m
= 1; m
< n
; m
++)
255 XEXP (&all
.shift
, 1) = cint
[m
];
256 XEXP (&all
.shift_mult
, 1) = pow2
[m
];
258 shift_cost
[mode
][m
] = rtx_cost (&all
.shift
, SET
);
259 shiftadd_cost
[mode
][m
] = rtx_cost (&all
.shift_add
, SET
);
260 shiftsub_cost
[mode
][m
] = rtx_cost (&all
.shift_sub
, SET
);
265 /* Return an rtx representing minus the value of X.
266 MODE is the intended mode of the result,
267 useful if X is a CONST_INT. */
270 negate_rtx (enum machine_mode mode
, rtx x
)
272 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
275 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
280 /* Report on the availability of insv/extv/extzv and the desired mode
281 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
282 is false; else the mode of the specified operand. If OPNO is -1,
283 all the caller cares about is whether the insn is available. */
285 mode_for_extraction (enum extraction_pattern pattern
, int opno
)
287 const struct insn_data
*data
;
294 data
= &insn_data
[CODE_FOR_insv
];
297 return MAX_MACHINE_MODE
;
302 data
= &insn_data
[CODE_FOR_extv
];
305 return MAX_MACHINE_MODE
;
310 data
= &insn_data
[CODE_FOR_extzv
];
313 return MAX_MACHINE_MODE
;
322 /* Everyone who uses this function used to follow it with
323 if (result == VOIDmode) result = word_mode; */
324 if (data
->operand
[opno
].mode
== VOIDmode
)
326 return data
->operand
[opno
].mode
;
330 /* Generate code to store value from rtx VALUE
331 into a bit-field within structure STR_RTX
332 containing BITSIZE bits starting at bit BITNUM.
333 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
334 ALIGN is the alignment that STR_RTX is known to have.
335 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
337 /* ??? Note that there are two different ideas here for how
338 to determine the size to count bits within, for a register.
339 One is BITS_PER_WORD, and the other is the size of operand 3
342 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
343 else, we use the mode of operand 3. */
346 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
347 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
351 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
352 unsigned HOST_WIDE_INT offset
, bitpos
;
357 enum machine_mode op_mode
= mode_for_extraction (EP_insv
, 3);
359 while (GET_CODE (op0
) == SUBREG
)
361 /* The following line once was done only if WORDS_BIG_ENDIAN,
362 but I think that is a mistake. WORDS_BIG_ENDIAN is
363 meaningful at a much higher level; when structures are copied
364 between memory and regs, the higher-numbered regs
365 always get higher addresses. */
366 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
367 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
371 /* Paradoxical subregs need special handling on big endian machines. */
372 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
374 int difference
= inner_mode_size
- outer_mode_size
;
376 if (WORDS_BIG_ENDIAN
)
377 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
378 if (BYTES_BIG_ENDIAN
)
379 byte_offset
+= difference
% UNITS_PER_WORD
;
382 byte_offset
= SUBREG_BYTE (op0
);
384 bitnum
+= byte_offset
* BITS_PER_UNIT
;
385 op0
= SUBREG_REG (op0
);
388 /* No action is needed if the target is a register and if the field
389 lies completely outside that register. This can occur if the source
390 code contains an out-of-bounds access to a small array. */
391 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
394 /* Use vec_set patterns for inserting parts of vectors whenever
396 if (VECTOR_MODE_P (GET_MODE (op0
))
398 && (vec_set_optab
->handlers
[GET_MODE (op0
)].insn_code
400 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
401 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
402 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
404 enum machine_mode outermode
= GET_MODE (op0
);
405 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
406 int icode
= (int) vec_set_optab
->handlers
[outermode
].insn_code
;
407 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
408 rtx rtxpos
= GEN_INT (pos
);
412 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
413 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
414 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
418 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
419 src
= copy_to_mode_reg (mode1
, src
);
421 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
422 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
424 /* We could handle this, but we should always be called with a pseudo
425 for our targets and all insns should take them as outputs. */
426 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
427 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
428 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
429 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
440 /* If the target is a register, overwriting the entire object, or storing
441 a full-word or multi-word field can be done with just a SUBREG.
443 If the target is memory, storing any naturally aligned field can be
444 done with a simple store. For targets that support fast unaligned
445 memory, any naturally sized, unit aligned field can be done directly. */
447 offset
= bitnum
/ unit
;
448 bitpos
= bitnum
% unit
;
449 byte_offset
= (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
450 + (offset
* UNITS_PER_WORD
);
453 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
455 ? ((GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
456 || GET_MODE_SIZE (GET_MODE (op0
)) == GET_MODE_SIZE (fieldmode
))
457 && byte_offset
% GET_MODE_SIZE (fieldmode
) == 0)
458 : (! SLOW_UNALIGNED_ACCESS (fieldmode
, MEM_ALIGN (op0
))
459 || (offset
* BITS_PER_UNIT
% bitsize
== 0
460 && MEM_ALIGN (op0
) % GET_MODE_BITSIZE (fieldmode
) == 0))))
463 op0
= adjust_address (op0
, fieldmode
, offset
);
464 else if (GET_MODE (op0
) != fieldmode
)
465 op0
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
467 emit_move_insn (op0
, value
);
471 /* Make sure we are playing with integral modes. Pun with subregs
472 if we aren't. This must come after the entire register case above,
473 since that case is valid for any mode. The following cases are only
474 valid for integral modes. */
476 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
477 if (imode
!= GET_MODE (op0
))
480 op0
= adjust_address (op0
, imode
, 0);
483 gcc_assert (imode
!= BLKmode
);
484 op0
= gen_lowpart (imode
, op0
);
489 /* We may be accessing data outside the field, which means
490 we can alias adjacent data. */
493 op0
= shallow_copy_rtx (op0
);
494 set_mem_alias_set (op0
, 0);
495 set_mem_expr (op0
, 0);
498 /* If OP0 is a register, BITPOS must count within a word.
499 But as we have it, it counts within whatever size OP0 now has.
500 On a bigendian machine, these are not the same, so convert. */
503 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
504 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
506 /* Storing an lsb-aligned field in a register
507 can be done with a movestrict instruction. */
510 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
511 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
512 && (movstrict_optab
->handlers
[fieldmode
].insn_code
513 != CODE_FOR_nothing
))
515 int icode
= movstrict_optab
->handlers
[fieldmode
].insn_code
;
517 /* Get appropriate low part of the value being stored. */
518 if (GET_CODE (value
) == CONST_INT
|| REG_P (value
))
519 value
= gen_lowpart (fieldmode
, value
);
520 else if (!(GET_CODE (value
) == SYMBOL_REF
521 || GET_CODE (value
) == LABEL_REF
522 || GET_CODE (value
) == CONST
))
523 value
= convert_to_mode (fieldmode
, value
, 0);
525 if (! (*insn_data
[icode
].operand
[1].predicate
) (value
, fieldmode
))
526 value
= copy_to_mode_reg (fieldmode
, value
);
528 if (GET_CODE (op0
) == SUBREG
)
530 /* Else we've got some float mode source being extracted into
531 a different float mode destination -- this combination of
532 subregs results in Severe Tire Damage. */
533 gcc_assert (GET_MODE (SUBREG_REG (op0
)) == fieldmode
534 || GET_MODE_CLASS (fieldmode
) == MODE_INT
535 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
536 op0
= SUBREG_REG (op0
);
539 emit_insn (GEN_FCN (icode
)
540 (gen_rtx_SUBREG (fieldmode
, op0
,
541 (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
542 + (offset
* UNITS_PER_WORD
)),
548 /* Handle fields bigger than a word. */
550 if (bitsize
> BITS_PER_WORD
)
552 /* Here we transfer the words of the field
553 in the order least significant first.
554 This is because the most significant word is the one which may
556 However, only do that if the value is not BLKmode. */
558 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
559 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
562 /* This is the mode we must force value to, so that there will be enough
563 subwords to extract. Note that fieldmode will often (always?) be
564 VOIDmode, because that is what store_field uses to indicate that this
565 is a bit field, but passing VOIDmode to operand_subword_force
567 fieldmode
= GET_MODE (value
);
568 if (fieldmode
== VOIDmode
)
569 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
571 for (i
= 0; i
< nwords
; i
++)
573 /* If I is 0, use the low-order word in both field and target;
574 if I is 1, use the next to lowest word; and so on. */
575 unsigned int wordnum
= (backwards
? nwords
- i
- 1 : i
);
576 unsigned int bit_offset
= (backwards
577 ? MAX ((int) bitsize
- ((int) i
+ 1)
580 : (int) i
* BITS_PER_WORD
);
582 store_bit_field (op0
, MIN (BITS_PER_WORD
,
583 bitsize
- i
* BITS_PER_WORD
),
584 bitnum
+ bit_offset
, word_mode
,
585 operand_subword_force (value
, wordnum
, fieldmode
));
590 /* From here on we can assume that the field to be stored in is
591 a full-word (whatever type that is), since it is shorter than a word. */
593 /* OFFSET is the number of words or bytes (UNIT says which)
594 from STR_RTX to the first word or byte containing part of the field. */
599 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
603 /* Since this is a destination (lvalue), we can't copy
604 it to a pseudo. We can remove a SUBREG that does not
605 change the size of the operand. Such a SUBREG may
606 have been added above. */
607 gcc_assert (GET_CODE (op0
) == SUBREG
608 && (GET_MODE_SIZE (GET_MODE (op0
))
609 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)))));
610 op0
= SUBREG_REG (op0
);
612 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
613 op0
, (offset
* UNITS_PER_WORD
));
618 /* If VALUE has a floating-point or complex mode, access it as an
619 integer of the corresponding size. This can occur on a machine
620 with 64 bit registers that uses SFmode for float. It can also
621 occur for unaligned float or complex fields. */
623 if (GET_MODE (value
) != VOIDmode
624 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
625 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
627 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
628 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
631 /* Now OFFSET is nonzero only if OP0 is memory
632 and is therefore always measured in bytes. */
635 && GET_MODE (value
) != BLKmode
637 && GET_MODE_BITSIZE (op_mode
) >= bitsize
638 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
639 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (op_mode
)))
640 && insn_data
[CODE_FOR_insv
].operand
[1].predicate (GEN_INT (bitsize
),
643 int xbitpos
= bitpos
;
646 rtx last
= get_last_insn ();
648 enum machine_mode maxmode
= mode_for_extraction (EP_insv
, 3);
649 int save_volatile_ok
= volatile_ok
;
653 /* If this machine's insv can only insert into a register, copy OP0
654 into a register and save it back later. */
656 && ! ((*insn_data
[(int) CODE_FOR_insv
].operand
[0].predicate
)
660 enum machine_mode bestmode
;
662 /* Get the mode to use for inserting into this field. If OP0 is
663 BLKmode, get the smallest mode consistent with the alignment. If
664 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
665 mode. Otherwise, use the smallest mode containing the field. */
667 if (GET_MODE (op0
) == BLKmode
668 || GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (maxmode
))
670 = get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
), maxmode
,
671 MEM_VOLATILE_P (op0
));
673 bestmode
= GET_MODE (op0
);
675 if (bestmode
== VOIDmode
676 || GET_MODE_SIZE (bestmode
) < GET_MODE_SIZE (fieldmode
)
677 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
678 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
681 /* Adjust address to point to the containing unit of that mode.
682 Compute offset as multiple of this unit, counting in bytes. */
683 unit
= GET_MODE_BITSIZE (bestmode
);
684 offset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
685 bitpos
= bitnum
% unit
;
686 op0
= adjust_address (op0
, bestmode
, offset
);
688 /* Fetch that unit, store the bitfield in it, then store
690 tempreg
= copy_to_reg (op0
);
691 store_bit_field (tempreg
, bitsize
, bitpos
, fieldmode
, orig_value
);
692 emit_move_insn (op0
, tempreg
);
695 volatile_ok
= save_volatile_ok
;
697 /* Add OFFSET into OP0's address. */
699 xop0
= adjust_address (xop0
, byte_mode
, offset
);
701 /* If xop0 is a register, we need it in MAXMODE
702 to make it acceptable to the format of insv. */
703 if (GET_CODE (xop0
) == SUBREG
)
704 /* We can't just change the mode, because this might clobber op0,
705 and we will need the original value of op0 if insv fails. */
706 xop0
= gen_rtx_SUBREG (maxmode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
707 if (REG_P (xop0
) && GET_MODE (xop0
) != maxmode
)
708 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
710 /* On big-endian machines, we count bits from the most significant.
711 If the bit field insn does not, we must invert. */
713 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
714 xbitpos
= unit
- bitsize
- xbitpos
;
716 /* We have been counting XBITPOS within UNIT.
717 Count instead within the size of the register. */
718 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
719 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
721 unit
= GET_MODE_BITSIZE (maxmode
);
723 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
725 if (GET_MODE (value
) != maxmode
)
727 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
729 /* Optimization: Don't bother really extending VALUE
730 if it has all the bits we will actually use. However,
731 if we must narrow it, be sure we do it correctly. */
733 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (maxmode
))
737 tmp
= simplify_subreg (maxmode
, value1
, GET_MODE (value
), 0);
739 tmp
= simplify_gen_subreg (maxmode
,
740 force_reg (GET_MODE (value
),
742 GET_MODE (value
), 0);
746 value1
= gen_lowpart (maxmode
, value1
);
748 else if (GET_CODE (value
) == CONST_INT
)
749 value1
= gen_int_mode (INTVAL (value
), maxmode
);
751 /* Parse phase is supposed to make VALUE's data type
752 match that of the component reference, which is a type
753 at least as wide as the field; so VALUE should have
754 a mode that corresponds to that type. */
755 gcc_assert (CONSTANT_P (value
));
758 /* If this machine's insv insists on a register,
759 get VALUE1 into a register. */
760 if (! ((*insn_data
[(int) CODE_FOR_insv
].operand
[3].predicate
)
762 value1
= force_reg (maxmode
, value1
);
764 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
769 delete_insns_since (last
);
770 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
775 /* Insv is not available; store using shifts and boolean ops. */
776 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
780 /* Use shifts and boolean operations to store VALUE
781 into a bit field of width BITSIZE
782 in a memory location specified by OP0 except offset by OFFSET bytes.
783 (OFFSET must be 0 if OP0 is a register.)
784 The field starts at position BITPOS within the byte.
785 (If OP0 is a register, it may be a full word or a narrower mode,
786 but BITPOS still counts within a full word,
787 which is significant on bigendian machines.) */
790 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT offset
,
791 unsigned HOST_WIDE_INT bitsize
,
792 unsigned HOST_WIDE_INT bitpos
, rtx value
)
794 enum machine_mode mode
;
795 unsigned int total_bits
= BITS_PER_WORD
;
800 /* There is a case not handled here:
801 a structure with a known alignment of just a halfword
802 and a field split across two aligned halfwords within the structure.
803 Or likewise a structure with a known alignment of just a byte
804 and a field split across two bytes.
805 Such cases are not supposed to be able to occur. */
807 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
809 gcc_assert (!offset
);
810 /* Special treatment for a bit field split across two registers. */
811 if (bitsize
+ bitpos
> BITS_PER_WORD
)
813 store_split_bit_field (op0
, bitsize
, bitpos
, value
);
819 /* Get the proper mode to use for this field. We want a mode that
820 includes the entire field. If such a mode would be larger than
821 a word, we won't be doing the extraction the normal way.
822 We don't want a mode bigger than the destination. */
824 mode
= GET_MODE (op0
);
825 if (GET_MODE_BITSIZE (mode
) == 0
826 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
828 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
829 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
831 if (mode
== VOIDmode
)
833 /* The only way this should occur is if the field spans word
835 store_split_bit_field (op0
, bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
840 total_bits
= GET_MODE_BITSIZE (mode
);
842 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
843 be in the range 0 to total_bits-1, and put any excess bytes in
845 if (bitpos
>= total_bits
)
847 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
848 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
852 /* Get ref to an aligned byte, halfword, or word containing the field.
853 Adjust BITPOS to be position within a word,
854 and OFFSET to be the offset of that word.
855 Then alter OP0 to refer to that word. */
856 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
857 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
858 op0
= adjust_address (op0
, mode
, offset
);
861 mode
= GET_MODE (op0
);
863 /* Now MODE is either some integral mode for a MEM as OP0,
864 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
865 The bit field is contained entirely within OP0.
866 BITPOS is the starting bit number within OP0.
867 (OP0's mode may actually be narrower than MODE.) */
869 if (BYTES_BIG_ENDIAN
)
870 /* BITPOS is the distance between our msb
871 and that of the containing datum.
872 Convert it to the distance from the lsb. */
873 bitpos
= total_bits
- bitsize
- bitpos
;
875 /* Now BITPOS is always the distance between our lsb
878 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
879 we must first convert its mode to MODE. */
881 if (GET_CODE (value
) == CONST_INT
)
883 HOST_WIDE_INT v
= INTVAL (value
);
885 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
886 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
890 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
891 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
892 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
895 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
899 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
900 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
902 if (GET_MODE (value
) != mode
)
904 if ((REG_P (value
) || GET_CODE (value
) == SUBREG
)
905 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (value
)))
906 value
= gen_lowpart (mode
, value
);
908 value
= convert_to_mode (mode
, value
, 1);
912 value
= expand_binop (mode
, and_optab
, value
,
913 mask_rtx (mode
, 0, bitsize
, 0),
914 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
916 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
917 build_int_cst (NULL_TREE
, bitpos
), NULL_RTX
, 1);
920 /* Now clear the chosen bits in OP0,
921 except that if VALUE is -1 we need not bother. */
927 temp
= expand_binop (mode
, and_optab
, op0
,
928 mask_rtx (mode
, bitpos
, bitsize
, 1),
929 subtarget
, 1, OPTAB_LIB_WIDEN
);
935 /* Now logical-or VALUE into OP0, unless it is zero. */
938 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
939 subtarget
, 1, OPTAB_LIB_WIDEN
);
941 emit_move_insn (op0
, temp
);
944 /* Store a bit field that is split across multiple accessible memory objects.
946 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
947 BITSIZE is the field width; BITPOS the position of its first bit
949 VALUE is the value to store.
951 This does not yet handle fields wider than BITS_PER_WORD. */
954 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
955 unsigned HOST_WIDE_INT bitpos
, rtx value
)
958 unsigned int bitsdone
= 0;
960 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
962 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
963 unit
= BITS_PER_WORD
;
965 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
967 /* If VALUE is a constant other than a CONST_INT, get it into a register in
968 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
969 that VALUE might be a floating-point constant. */
970 if (CONSTANT_P (value
) && GET_CODE (value
) != CONST_INT
)
972 rtx word
= gen_lowpart_common (word_mode
, value
);
974 if (word
&& (value
!= word
))
977 value
= gen_lowpart_common (word_mode
,
978 force_reg (GET_MODE (value
) != VOIDmode
980 : word_mode
, value
));
983 while (bitsdone
< bitsize
)
985 unsigned HOST_WIDE_INT thissize
;
987 unsigned HOST_WIDE_INT thispos
;
988 unsigned HOST_WIDE_INT offset
;
990 offset
= (bitpos
+ bitsdone
) / unit
;
991 thispos
= (bitpos
+ bitsdone
) % unit
;
993 /* THISSIZE must not overrun a word boundary. Otherwise,
994 store_fixed_bit_field will call us again, and we will mutually
996 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
997 thissize
= MIN (thissize
, unit
- thispos
);
999 if (BYTES_BIG_ENDIAN
)
1003 /* We must do an endian conversion exactly the same way as it is
1004 done in extract_bit_field, so that the two calls to
1005 extract_fixed_bit_field will have comparable arguments. */
1006 if (!MEM_P (value
) || GET_MODE (value
) == BLKmode
)
1007 total_bits
= BITS_PER_WORD
;
1009 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1011 /* Fetch successively less significant portions. */
1012 if (GET_CODE (value
) == CONST_INT
)
1013 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1014 >> (bitsize
- bitsdone
- thissize
))
1015 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1017 /* The args are chosen so that the last part includes the
1018 lsb. Give extract_bit_field the value it needs (with
1019 endianness compensation) to fetch the piece we want. */
1020 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1021 total_bits
- bitsize
+ bitsdone
,
1026 /* Fetch successively more significant portions. */
1027 if (GET_CODE (value
) == CONST_INT
)
1028 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1030 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1032 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1033 bitsdone
, NULL_RTX
, 1);
1036 /* If OP0 is a register, then handle OFFSET here.
1038 When handling multiword bitfields, extract_bit_field may pass
1039 down a word_mode SUBREG of a larger REG for a bitfield that actually
1040 crosses a word boundary. Thus, for a SUBREG, we must find
1041 the current word starting from the base register. */
1042 if (GET_CODE (op0
) == SUBREG
)
1044 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1045 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1046 GET_MODE (SUBREG_REG (op0
)));
1049 else if (REG_P (op0
))
1051 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1057 /* OFFSET is in UNITs, and UNIT is in bits.
1058 store_fixed_bit_field wants offset in bytes. */
1059 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
, thissize
,
1061 bitsdone
+= thissize
;
1065 /* Generate code to extract a byte-field from STR_RTX
1066 containing BITSIZE bits, starting at BITNUM,
1067 and put it in TARGET if possible (if TARGET is nonzero).
1068 Regardless of TARGET, we return the rtx for where the value is placed.
1070 STR_RTX is the structure containing the byte (a REG or MEM).
1071 UNSIGNEDP is nonzero if this is an unsigned bit field.
1072 MODE is the natural mode of the field value once extracted.
1073 TMODE is the mode the caller would like the value to have;
1074 but the value may be returned with type MODE instead.
1076 TOTAL_SIZE is the size in bytes of the containing structure,
1079 If a TARGET is specified and we can store in it at no extra cost,
1080 we do so, and return TARGET.
1081 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1082 if they are equally easy. */
1085 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1086 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1087 enum machine_mode mode
, enum machine_mode tmode
)
1090 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
1091 unsigned HOST_WIDE_INT offset
, bitpos
;
1093 rtx spec_target
= target
;
1094 rtx spec_target_subreg
= 0;
1095 enum machine_mode int_mode
;
1096 enum machine_mode extv_mode
= mode_for_extraction (EP_extv
, 0);
1097 enum machine_mode extzv_mode
= mode_for_extraction (EP_extzv
, 0);
1098 enum machine_mode mode1
;
1101 if (tmode
== VOIDmode
)
1104 while (GET_CODE (op0
) == SUBREG
)
1106 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1107 op0
= SUBREG_REG (op0
);
1110 /* If we have an out-of-bounds access to a register, just return an
1111 uninitialized register of the required mode. This can occur if the
1112 source code contains an out-of-bounds access to a small array. */
1113 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1114 return gen_reg_rtx (tmode
);
1117 && mode
== GET_MODE (op0
)
1119 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1121 /* We're trying to extract a full register from itself. */
1125 /* Use vec_extract patterns for extracting parts of vectors whenever
1127 if (VECTOR_MODE_P (GET_MODE (op0
))
1129 && (vec_extract_optab
->handlers
[GET_MODE (op0
)].insn_code
1130 != CODE_FOR_nothing
)
1131 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1132 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1134 enum machine_mode outermode
= GET_MODE (op0
);
1135 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1136 int icode
= (int) vec_extract_optab
->handlers
[outermode
].insn_code
;
1137 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1138 rtx rtxpos
= GEN_INT (pos
);
1140 rtx dest
= NULL
, pat
, seq
;
1141 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
1142 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
1143 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
1145 if (innermode
== tmode
|| innermode
== mode
)
1149 dest
= gen_reg_rtx (innermode
);
1153 if (! (*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
))
1154 dest
= copy_to_mode_reg (mode0
, dest
);
1156 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
1157 src
= copy_to_mode_reg (mode1
, src
);
1159 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
1160 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
1162 /* We could handle this, but we should always be called with a pseudo
1163 for our targets and all insns should take them as outputs. */
1164 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
1165 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
1166 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
1168 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
1179 /* Make sure we are playing with integral modes. Pun with subregs
1182 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1183 if (imode
!= GET_MODE (op0
))
1186 op0
= adjust_address (op0
, imode
, 0);
1189 gcc_assert (imode
!= BLKmode
);
1190 op0
= gen_lowpart (imode
, op0
);
1192 /* If we got a SUBREG, force it into a register since we
1193 aren't going to be able to do another SUBREG on it. */
1194 if (GET_CODE (op0
) == SUBREG
)
1195 op0
= force_reg (imode
, op0
);
1200 /* We may be accessing data outside the field, which means
1201 we can alias adjacent data. */
1204 op0
= shallow_copy_rtx (op0
);
1205 set_mem_alias_set (op0
, 0);
1206 set_mem_expr (op0
, 0);
1209 /* Extraction of a full-word or multi-word value from a structure
1210 in a register or aligned memory can be done with just a SUBREG.
1211 A subword value in the least significant part of a register
1212 can also be extracted with a SUBREG. For this, we need the
1213 byte offset of the value in op0. */
1215 bitpos
= bitnum
% unit
;
1216 offset
= bitnum
/ unit
;
1217 byte_offset
= bitpos
/ BITS_PER_UNIT
+ offset
* UNITS_PER_WORD
;
1219 /* If OP0 is a register, BITPOS must count within a word.
1220 But as we have it, it counts within whatever size OP0 now has.
1221 On a bigendian machine, these are not the same, so convert. */
1222 if (BYTES_BIG_ENDIAN
1224 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
1225 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1227 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1228 If that's wrong, the solution is to test for it and set TARGET to 0
1231 /* Only scalar integer modes can be converted via subregs. There is an
1232 additional problem for FP modes here in that they can have a precision
1233 which is different from the size. mode_for_size uses precision, but
1234 we want a mode based on the size, so we must avoid calling it for FP
1236 mode1
= (SCALAR_INT_MODE_P (tmode
)
1237 ? mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0)
1240 if (((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
1241 && bitpos
% BITS_PER_WORD
== 0)
1242 || (mode1
!= BLKmode
1243 /* ??? The big endian test here is wrong. This is correct
1244 if the value is in a register, and if mode_for_size is not
1245 the same mode as op0. This causes us to get unnecessarily
1246 inefficient code from the Thumb port when -mbig-endian. */
1247 && (BYTES_BIG_ENDIAN
1248 ? bitpos
+ bitsize
== BITS_PER_WORD
1251 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1252 GET_MODE_BITSIZE (GET_MODE (op0
)))
1253 && GET_MODE_SIZE (mode1
) != 0
1254 && byte_offset
% GET_MODE_SIZE (mode1
) == 0)
1256 && (! SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
1257 || (offset
* BITS_PER_UNIT
% bitsize
== 0
1258 && MEM_ALIGN (op0
) % bitsize
== 0)))))
1260 if (mode1
!= GET_MODE (op0
))
1263 op0
= adjust_address (op0
, mode1
, offset
);
1266 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1269 goto no_subreg_mode_swap
;
1274 return convert_to_mode (tmode
, op0
, unsignedp
);
1277 no_subreg_mode_swap
:
1279 /* Handle fields bigger than a word. */
1281 if (bitsize
> BITS_PER_WORD
)
1283 /* Here we transfer the words of the field
1284 in the order least significant first.
1285 This is because the most significant word is the one which may
1286 be less than full. */
1288 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1291 if (target
== 0 || !REG_P (target
))
1292 target
= gen_reg_rtx (mode
);
1294 /* Indicate for flow that the entire target reg is being set. */
1295 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
1297 for (i
= 0; i
< nwords
; i
++)
1299 /* If I is 0, use the low-order word in both field and target;
1300 if I is 1, use the next to lowest word; and so on. */
1301 /* Word number in TARGET to use. */
1302 unsigned int wordnum
1304 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1306 /* Offset from start of field in OP0. */
1307 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1308 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1309 * (int) BITS_PER_WORD
))
1310 : (int) i
* BITS_PER_WORD
);
1311 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1313 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
1314 bitsize
- i
* BITS_PER_WORD
),
1315 bitnum
+ bit_offset
, 1, target_part
, mode
,
1318 gcc_assert (target_part
);
1320 if (result_part
!= target_part
)
1321 emit_move_insn (target_part
, result_part
);
1326 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1327 need to be zero'd out. */
1328 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1330 unsigned int i
, total_words
;
1332 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1333 for (i
= nwords
; i
< total_words
; i
++)
1335 (operand_subword (target
,
1336 WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
,
1343 /* Signed bit field: sign-extend with two arithmetic shifts. */
1344 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1345 build_int_cst (NULL_TREE
,
1346 GET_MODE_BITSIZE (mode
) - bitsize
),
1348 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1349 build_int_cst (NULL_TREE
,
1350 GET_MODE_BITSIZE (mode
) - bitsize
),
1354 /* From here on we know the desired field is smaller than a word. */
1356 /* Check if there is a correspondingly-sized integer field, so we can
1357 safely extract it as one size of integer, if necessary; then
1358 truncate or extend to the size that is wanted; then use SUBREGs or
1359 convert_to_mode to get one of the modes we really wanted. */
1361 int_mode
= int_mode_for_mode (tmode
);
1362 if (int_mode
== BLKmode
)
1363 int_mode
= int_mode_for_mode (mode
);
1364 /* Should probably push op0 out to memory and then do a load. */
1365 gcc_assert (int_mode
!= BLKmode
);
1367 /* OFFSET is the number of words or bytes (UNIT says which)
1368 from STR_RTX to the first word or byte containing part of the field. */
1372 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1375 op0
= copy_to_reg (op0
);
1376 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
1377 op0
, (offset
* UNITS_PER_WORD
));
1382 /* Now OFFSET is nonzero only for memory operands. */
1388 && GET_MODE_BITSIZE (extzv_mode
) >= bitsize
1389 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1390 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (extzv_mode
))))
1392 unsigned HOST_WIDE_INT xbitpos
= bitpos
, xoffset
= offset
;
1393 rtx bitsize_rtx
, bitpos_rtx
;
1394 rtx last
= get_last_insn ();
1396 rtx xtarget
= target
;
1397 rtx xspec_target
= spec_target
;
1398 rtx xspec_target_subreg
= spec_target_subreg
;
1400 enum machine_mode maxmode
= mode_for_extraction (EP_extzv
, 0);
1404 int save_volatile_ok
= volatile_ok
;
1407 /* Is the memory operand acceptable? */
1408 if (! ((*insn_data
[(int) CODE_FOR_extzv
].operand
[1].predicate
)
1409 (xop0
, GET_MODE (xop0
))))
1411 /* No, load into a reg and extract from there. */
1412 enum machine_mode bestmode
;
1414 /* Get the mode to use for inserting into this field. If
1415 OP0 is BLKmode, get the smallest mode consistent with the
1416 alignment. If OP0 is a non-BLKmode object that is no
1417 wider than MAXMODE, use its mode. Otherwise, use the
1418 smallest mode containing the field. */
1420 if (GET_MODE (xop0
) == BLKmode
1421 || (GET_MODE_SIZE (GET_MODE (op0
))
1422 > GET_MODE_SIZE (maxmode
)))
1423 bestmode
= get_best_mode (bitsize
, bitnum
,
1424 MEM_ALIGN (xop0
), maxmode
,
1425 MEM_VOLATILE_P (xop0
));
1427 bestmode
= GET_MODE (xop0
);
1429 if (bestmode
== VOIDmode
1430 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (xop0
))
1431 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (xop0
)))
1434 /* Compute offset as multiple of this unit,
1435 counting in bytes. */
1436 unit
= GET_MODE_BITSIZE (bestmode
);
1437 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1438 xbitpos
= bitnum
% unit
;
1439 xop0
= adjust_address (xop0
, bestmode
, xoffset
);
1441 /* Make sure register is big enough for the whole field. */
1442 if (xoffset
* BITS_PER_UNIT
+ unit
1443 < offset
* BITS_PER_UNIT
+ bitsize
)
1446 /* Fetch it to a register in that size. */
1447 xop0
= force_reg (bestmode
, xop0
);
1449 /* XBITPOS counts within UNIT, which is what is expected. */
1452 /* Get ref to first byte containing part of the field. */
1453 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1455 volatile_ok
= save_volatile_ok
;
1458 /* If op0 is a register, we need it in MAXMODE (which is usually
1459 SImode). to make it acceptable to the format of extzv. */
1460 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1462 if (REG_P (xop0
) && GET_MODE (xop0
) != maxmode
)
1463 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
1465 /* On big-endian machines, we count bits from the most significant.
1466 If the bit field insn does not, we must invert. */
1467 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1468 xbitpos
= unit
- bitsize
- xbitpos
;
1470 /* Now convert from counting within UNIT to counting in MAXMODE. */
1471 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
1472 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
1474 unit
= GET_MODE_BITSIZE (maxmode
);
1477 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1479 if (GET_MODE (xtarget
) != maxmode
)
1481 if (REG_P (xtarget
))
1483 int wider
= (GET_MODE_SIZE (maxmode
)
1484 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1485 xtarget
= gen_lowpart (maxmode
, xtarget
);
1487 xspec_target_subreg
= xtarget
;
1490 xtarget
= gen_reg_rtx (maxmode
);
1493 /* If this machine's extzv insists on a register target,
1494 make sure we have one. */
1495 if (! ((*insn_data
[(int) CODE_FOR_extzv
].operand
[0].predicate
)
1496 (xtarget
, maxmode
)))
1497 xtarget
= gen_reg_rtx (maxmode
);
1499 bitsize_rtx
= GEN_INT (bitsize
);
1500 bitpos_rtx
= GEN_INT (xbitpos
);
1502 pat
= gen_extzv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
);
1507 spec_target
= xspec_target
;
1508 spec_target_subreg
= xspec_target_subreg
;
1512 delete_insns_since (last
);
1513 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1519 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1526 && GET_MODE_BITSIZE (extv_mode
) >= bitsize
1527 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1528 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (extv_mode
))))
1530 int xbitpos
= bitpos
, xoffset
= offset
;
1531 rtx bitsize_rtx
, bitpos_rtx
;
1532 rtx last
= get_last_insn ();
1533 rtx xop0
= op0
, xtarget
= target
;
1534 rtx xspec_target
= spec_target
;
1535 rtx xspec_target_subreg
= spec_target_subreg
;
1537 enum machine_mode maxmode
= mode_for_extraction (EP_extv
, 0);
1541 /* Is the memory operand acceptable? */
1542 if (! ((*insn_data
[(int) CODE_FOR_extv
].operand
[1].predicate
)
1543 (xop0
, GET_MODE (xop0
))))
1545 /* No, load into a reg and extract from there. */
1546 enum machine_mode bestmode
;
1548 /* Get the mode to use for inserting into this field. If
1549 OP0 is BLKmode, get the smallest mode consistent with the
1550 alignment. If OP0 is a non-BLKmode object that is no
1551 wider than MAXMODE, use its mode. Otherwise, use the
1552 smallest mode containing the field. */
1554 if (GET_MODE (xop0
) == BLKmode
1555 || (GET_MODE_SIZE (GET_MODE (op0
))
1556 > GET_MODE_SIZE (maxmode
)))
1557 bestmode
= get_best_mode (bitsize
, bitnum
,
1558 MEM_ALIGN (xop0
), maxmode
,
1559 MEM_VOLATILE_P (xop0
));
1561 bestmode
= GET_MODE (xop0
);
1563 if (bestmode
== VOIDmode
1564 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (xop0
))
1565 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (xop0
)))
1568 /* Compute offset as multiple of this unit,
1569 counting in bytes. */
1570 unit
= GET_MODE_BITSIZE (bestmode
);
1571 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1572 xbitpos
= bitnum
% unit
;
1573 xop0
= adjust_address (xop0
, bestmode
, xoffset
);
1575 /* Make sure register is big enough for the whole field. */
1576 if (xoffset
* BITS_PER_UNIT
+ unit
1577 < offset
* BITS_PER_UNIT
+ bitsize
)
1580 /* Fetch it to a register in that size. */
1581 xop0
= force_reg (bestmode
, xop0
);
1583 /* XBITPOS counts within UNIT, which is what is expected. */
1586 /* Get ref to first byte containing part of the field. */
1587 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1590 /* If op0 is a register, we need it in MAXMODE (which is usually
1591 SImode) to make it acceptable to the format of extv. */
1592 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1594 if (REG_P (xop0
) && GET_MODE (xop0
) != maxmode
)
1595 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
1597 /* On big-endian machines, we count bits from the most significant.
1598 If the bit field insn does not, we must invert. */
1599 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1600 xbitpos
= unit
- bitsize
- xbitpos
;
1602 /* XBITPOS counts within a size of UNIT.
1603 Adjust to count within a size of MAXMODE. */
1604 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
1605 xbitpos
+= (GET_MODE_BITSIZE (maxmode
) - unit
);
1607 unit
= GET_MODE_BITSIZE (maxmode
);
1610 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1612 if (GET_MODE (xtarget
) != maxmode
)
1614 if (REG_P (xtarget
))
1616 int wider
= (GET_MODE_SIZE (maxmode
)
1617 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1618 xtarget
= gen_lowpart (maxmode
, xtarget
);
1620 xspec_target_subreg
= xtarget
;
1623 xtarget
= gen_reg_rtx (maxmode
);
1626 /* If this machine's extv insists on a register target,
1627 make sure we have one. */
1628 if (! ((*insn_data
[(int) CODE_FOR_extv
].operand
[0].predicate
)
1629 (xtarget
, maxmode
)))
1630 xtarget
= gen_reg_rtx (maxmode
);
1632 bitsize_rtx
= GEN_INT (bitsize
);
1633 bitpos_rtx
= GEN_INT (xbitpos
);
1635 pat
= gen_extv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
);
1640 spec_target
= xspec_target
;
1641 spec_target_subreg
= xspec_target_subreg
;
1645 delete_insns_since (last
);
1646 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1652 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1655 if (target
== spec_target
)
1657 if (target
== spec_target_subreg
)
1659 if (GET_MODE (target
) != tmode
&& GET_MODE (target
) != mode
)
1661 /* If the target mode is not a scalar integral, first convert to the
1662 integer mode of that size and then access it as a floating-point
1663 value via a SUBREG. */
1664 if (!SCALAR_INT_MODE_P (tmode
))
1666 enum machine_mode smode
1667 = mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1668 target
= convert_to_mode (smode
, target
, unsignedp
);
1669 target
= force_reg (smode
, target
);
1670 return gen_lowpart (tmode
, target
);
1673 return convert_to_mode (tmode
, target
, unsignedp
);
1678 /* Extract a bit field using shifts and boolean operations
1679 Returns an rtx to represent the value.
1680 OP0 addresses a register (word) or memory (byte).
1681 BITPOS says which bit within the word or byte the bit field starts in.
1682 OFFSET says how many bytes farther the bit field starts;
1683 it is 0 if OP0 is a register.
1684 BITSIZE says how many bits long the bit field is.
1685 (If OP0 is a register, it may be narrower than a full word,
1686 but BITPOS still counts within a full word,
1687 which is significant on bigendian machines.)
1689 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1690 If TARGET is nonzero, attempts to store the value there
1691 and return TARGET, but this is not guaranteed.
1692 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1695 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1696 unsigned HOST_WIDE_INT offset
,
1697 unsigned HOST_WIDE_INT bitsize
,
1698 unsigned HOST_WIDE_INT bitpos
, rtx target
,
1701 unsigned int total_bits
= BITS_PER_WORD
;
1702 enum machine_mode mode
;
1704 if (GET_CODE (op0
) == SUBREG
|| REG_P (op0
))
1706 /* Special treatment for a bit field split across two registers. */
1707 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1708 return extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
);
1712 /* Get the proper mode to use for this field. We want a mode that
1713 includes the entire field. If such a mode would be larger than
1714 a word, we won't be doing the extraction the normal way. */
1716 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1717 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1719 if (mode
== VOIDmode
)
1720 /* The only way this should occur is if the field spans word
1722 return extract_split_bit_field (op0
, bitsize
,
1723 bitpos
+ offset
* BITS_PER_UNIT
,
1726 total_bits
= GET_MODE_BITSIZE (mode
);
1728 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1729 be in the range 0 to total_bits-1, and put any excess bytes in
1731 if (bitpos
>= total_bits
)
1733 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1734 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1738 /* Get ref to an aligned byte, halfword, or word containing the field.
1739 Adjust BITPOS to be position within a word,
1740 and OFFSET to be the offset of that word.
1741 Then alter OP0 to refer to that word. */
1742 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1743 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1744 op0
= adjust_address (op0
, mode
, offset
);
1747 mode
= GET_MODE (op0
);
1749 if (BYTES_BIG_ENDIAN
)
1750 /* BITPOS is the distance between our msb and that of OP0.
1751 Convert it to the distance from the lsb. */
1752 bitpos
= total_bits
- bitsize
- bitpos
;
1754 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1755 We have reduced the big-endian case to the little-endian case. */
1761 /* If the field does not already start at the lsb,
1762 shift it so it does. */
1763 tree amount
= build_int_cst (NULL_TREE
, bitpos
);
1764 /* Maybe propagate the target for the shift. */
1765 /* But not if we will return it--could confuse integrate.c. */
1766 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1767 if (tmode
!= mode
) subtarget
= 0;
1768 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1770 /* Convert the value to the desired mode. */
1772 op0
= convert_to_mode (tmode
, op0
, 1);
1774 /* Unless the msb of the field used to be the msb when we shifted,
1775 mask out the upper bits. */
1777 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
)
1778 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1779 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1780 target
, 1, OPTAB_LIB_WIDEN
);
1784 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1785 then arithmetic-shift its lsb to the lsb of the word. */
1786 op0
= force_reg (mode
, op0
);
1790 /* Find the narrowest integer mode that contains the field. */
1792 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1793 mode
= GET_MODE_WIDER_MODE (mode
))
1794 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1796 op0
= convert_to_mode (mode
, op0
, 0);
1800 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1803 = build_int_cst (NULL_TREE
,
1804 GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
));
1805 /* Maybe propagate the target for the shift. */
1806 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1807 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1810 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1811 build_int_cst (NULL_TREE
,
1812 GET_MODE_BITSIZE (mode
) - bitsize
),
1816 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1817 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1818 complement of that if COMPLEMENT. The mask is truncated if
1819 necessary to the width of mode MODE. The mask is zero-extended if
1820 BITSIZE+BITPOS is too small for MODE. */
1823 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, int complement
)
1825 HOST_WIDE_INT masklow
, maskhigh
;
1829 else if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1830 masklow
= (HOST_WIDE_INT
) -1 << bitpos
;
1834 if (bitpos
+ bitsize
< HOST_BITS_PER_WIDE_INT
)
1835 masklow
&= ((unsigned HOST_WIDE_INT
) -1
1836 >> (HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1838 if (bitpos
<= HOST_BITS_PER_WIDE_INT
)
1841 maskhigh
= (HOST_WIDE_INT
) -1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
1845 else if (bitpos
+ bitsize
> HOST_BITS_PER_WIDE_INT
)
1846 maskhigh
&= ((unsigned HOST_WIDE_INT
) -1
1847 >> (2 * HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1853 maskhigh
= ~maskhigh
;
1857 return immed_double_const (masklow
, maskhigh
, mode
);
1860 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1861 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1864 lshift_value (enum machine_mode mode
, rtx value
, int bitpos
, int bitsize
)
1866 unsigned HOST_WIDE_INT v
= INTVAL (value
);
1867 HOST_WIDE_INT low
, high
;
1869 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1870 v
&= ~((HOST_WIDE_INT
) -1 << bitsize
);
1872 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1875 high
= (bitpos
> 0 ? (v
>> (HOST_BITS_PER_WIDE_INT
- bitpos
)) : 0);
1880 high
= v
<< (bitpos
- HOST_BITS_PER_WIDE_INT
);
1883 return immed_double_const (low
, high
, mode
);
1886 /* Extract a bit field from a memory by forcing the alignment of the
1887 memory. This efficient only if the field spans at least 4 boundaries.
1890 BITSIZE is the field width; BITPOS is the position of the first bit.
1891 UNSIGNEDP is true if the result should be zero-extended. */
1894 extract_force_align_mem_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1895 unsigned HOST_WIDE_INT bitpos
,
1898 enum machine_mode mode
, dmode
;
1899 unsigned int m_bitsize
, m_size
;
1900 unsigned int sign_shift_up
, sign_shift_dn
;
1901 rtx base
, a1
, a2
, v1
, v2
, comb
, shift
, result
, start
;
1903 /* Choose a mode that will fit BITSIZE. */
1904 mode
= smallest_mode_for_size (bitsize
, MODE_INT
);
1905 m_size
= GET_MODE_SIZE (mode
);
1906 m_bitsize
= GET_MODE_BITSIZE (mode
);
1908 /* Choose a mode twice as wide. Fail if no such mode exists. */
1909 dmode
= mode_for_size (m_bitsize
* 2, MODE_INT
, false);
1910 if (dmode
== BLKmode
)
1913 do_pending_stack_adjust ();
1914 start
= get_last_insn ();
1916 /* At the end, we'll need an additional shift to deal with sign/zero
1917 extension. By default this will be a left+right shift of the
1918 appropriate size. But we may be able to eliminate one of them. */
1919 sign_shift_up
= sign_shift_dn
= m_bitsize
- bitsize
;
1921 if (STRICT_ALIGNMENT
)
1923 base
= plus_constant (XEXP (op0
, 0), bitpos
/ BITS_PER_UNIT
);
1924 bitpos
%= BITS_PER_UNIT
;
1926 /* We load two values to be concatenate. There's an edge condition
1927 that bears notice -- an aligned value at the end of a page can
1928 only load one value lest we segfault. So the two values we load
1929 are at "base & -size" and "(base + size - 1) & -size". If base
1930 is unaligned, the addresses will be aligned and sequential; if
1931 base is aligned, the addresses will both be equal to base. */
1933 a1
= expand_simple_binop (Pmode
, AND
, force_operand (base
, NULL
),
1934 GEN_INT (-(HOST_WIDE_INT
)m_size
),
1935 NULL
, true, OPTAB_LIB_WIDEN
);
1936 mark_reg_pointer (a1
, m_bitsize
);
1937 v1
= gen_rtx_MEM (mode
, a1
);
1938 set_mem_align (v1
, m_bitsize
);
1939 v1
= force_reg (mode
, validize_mem (v1
));
1941 a2
= plus_constant (base
, GET_MODE_SIZE (mode
) - 1);
1942 a2
= expand_simple_binop (Pmode
, AND
, force_operand (a2
, NULL
),
1943 GEN_INT (-(HOST_WIDE_INT
)m_size
),
1944 NULL
, true, OPTAB_LIB_WIDEN
);
1945 v2
= gen_rtx_MEM (mode
, a2
);
1946 set_mem_align (v2
, m_bitsize
);
1947 v2
= force_reg (mode
, validize_mem (v2
));
1949 /* Combine these two values into a double-word value. */
1950 if (m_bitsize
== BITS_PER_WORD
)
1952 comb
= gen_reg_rtx (dmode
);
1953 emit_insn (gen_rtx_CLOBBER (VOIDmode
, comb
));
1954 emit_move_insn (gen_rtx_SUBREG (mode
, comb
, 0), v1
);
1955 emit_move_insn (gen_rtx_SUBREG (mode
, comb
, m_size
), v2
);
1959 if (BYTES_BIG_ENDIAN
)
1960 comb
= v1
, v1
= v2
, v2
= comb
;
1961 v1
= convert_modes (dmode
, mode
, v1
, true);
1964 v2
= convert_modes (dmode
, mode
, v2
, true);
1965 v2
= expand_simple_binop (dmode
, ASHIFT
, v2
, GEN_INT (m_bitsize
),
1966 NULL
, true, OPTAB_LIB_WIDEN
);
1969 comb
= expand_simple_binop (dmode
, IOR
, v1
, v2
, NULL
,
1970 true, OPTAB_LIB_WIDEN
);
1975 shift
= expand_simple_binop (Pmode
, AND
, base
, GEN_INT (m_size
- 1),
1976 NULL
, true, OPTAB_LIB_WIDEN
);
1977 shift
= expand_mult (Pmode
, shift
, GEN_INT (BITS_PER_UNIT
), NULL
, 1);
1981 if (sign_shift_up
<= bitpos
)
1982 bitpos
-= sign_shift_up
, sign_shift_up
= 0;
1983 shift
= expand_simple_binop (Pmode
, PLUS
, shift
, GEN_INT (bitpos
),
1984 NULL
, true, OPTAB_LIB_WIDEN
);
1989 unsigned HOST_WIDE_INT offset
= bitpos
/ BITS_PER_UNIT
;
1990 bitpos
%= BITS_PER_UNIT
;
1992 /* When strict alignment is not required, we can just load directly
1993 from memory without masking. If the remaining BITPOS offset is
1994 small enough, we may be able to do all operations in MODE as
1995 opposed to DMODE. */
1996 if (bitpos
+ bitsize
<= m_bitsize
)
1998 comb
= adjust_address (op0
, dmode
, offset
);
2000 if (sign_shift_up
<= bitpos
)
2001 bitpos
-= sign_shift_up
, sign_shift_up
= 0;
2002 shift
= GEN_INT (bitpos
);
2005 /* Shift down the double-word such that the requested value is at bit 0. */
2006 if (shift
!= const0_rtx
)
2007 comb
= expand_simple_binop (dmode
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
2008 comb
, shift
, NULL
, unsignedp
, OPTAB_LIB_WIDEN
);
2012 /* If the field exactly matches MODE, then all we need to do is return the
2013 lowpart. Otherwise, shift to get the sign bits set properly. */
2014 result
= force_reg (mode
, gen_lowpart (mode
, comb
));
2017 result
= expand_simple_binop (mode
, ASHIFT
, result
,
2018 GEN_INT (sign_shift_up
),
2019 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
2021 result
= expand_simple_binop (mode
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
2022 result
, GEN_INT (sign_shift_dn
),
2023 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
2028 delete_insns_since (start
);
2032 /* Extract a bit field that is split across two words
2033 and return an RTX for the result.
2035 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2036 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2037 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
2040 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
2041 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
2044 unsigned int bitsdone
= 0;
2045 rtx result
= NULL_RTX
;
2048 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2050 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
2051 unit
= BITS_PER_WORD
;
2054 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
2055 if (0 && bitsize
/ unit
> 2)
2057 rtx tmp
= extract_force_align_mem_bit_field (op0
, bitsize
, bitpos
,
2064 while (bitsdone
< bitsize
)
2066 unsigned HOST_WIDE_INT thissize
;
2068 unsigned HOST_WIDE_INT thispos
;
2069 unsigned HOST_WIDE_INT offset
;
2071 offset
= (bitpos
+ bitsdone
) / unit
;
2072 thispos
= (bitpos
+ bitsdone
) % unit
;
2074 /* THISSIZE must not overrun a word boundary. Otherwise,
2075 extract_fixed_bit_field will call us again, and we will mutually
2077 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
2078 thissize
= MIN (thissize
, unit
- thispos
);
2080 /* If OP0 is a register, then handle OFFSET here.
2082 When handling multiword bitfields, extract_bit_field may pass
2083 down a word_mode SUBREG of a larger REG for a bitfield that actually
2084 crosses a word boundary. Thus, for a SUBREG, we must find
2085 the current word starting from the base register. */
2086 if (GET_CODE (op0
) == SUBREG
)
2088 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
2089 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
2090 GET_MODE (SUBREG_REG (op0
)));
2093 else if (REG_P (op0
))
2095 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
2101 /* Extract the parts in bit-counting order,
2102 whose meaning is determined by BYTES_PER_UNIT.
2103 OFFSET is in UNITs, and UNIT is in bits.
2104 extract_fixed_bit_field wants offset in bytes. */
2105 part
= extract_fixed_bit_field (word_mode
, word
,
2106 offset
* unit
/ BITS_PER_UNIT
,
2107 thissize
, thispos
, 0, 1);
2108 bitsdone
+= thissize
;
2110 /* Shift this part into place for the result. */
2111 if (BYTES_BIG_ENDIAN
)
2113 if (bitsize
!= bitsdone
)
2114 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2115 build_int_cst (NULL_TREE
, bitsize
- bitsdone
),
2120 if (bitsdone
!= thissize
)
2121 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2122 build_int_cst (NULL_TREE
,
2123 bitsdone
- thissize
), 0, 1);
2129 /* Combine the parts with bitwise or. This works
2130 because we extracted each part as an unsigned bit field. */
2131 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
2137 /* Unsigned bit field: we are done. */
2140 /* Signed bit field: sign-extend with two arithmetic shifts. */
2141 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
2142 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2144 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
2145 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2149 /* Add INC into TARGET. */
2152 expand_inc (rtx target
, rtx inc
)
2154 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2156 target
, 0, OPTAB_LIB_WIDEN
);
2157 if (value
!= target
)
2158 emit_move_insn (target
, value
);
2161 /* Subtract DEC from TARGET. */
2164 expand_dec (rtx target
, rtx dec
)
2166 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2168 target
, 0, OPTAB_LIB_WIDEN
);
2169 if (value
!= target
)
2170 emit_move_insn (target
, value
);
2173 /* Output a shift instruction for expression code CODE,
2174 with SHIFTED being the rtx for the value to shift,
2175 and AMOUNT the tree for the amount to shift by.
2176 Store the result in the rtx TARGET, if that is convenient.
2177 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2178 Return the rtx for where the value is. */
2181 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2182 tree amount
, rtx target
, int unsignedp
)
2185 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2186 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2189 /* Previously detected shift-counts computed by NEGATE_EXPR
2190 and shifted in the other direction; but that does not work
2193 op1
= expand_normal (amount
);
2195 if (SHIFT_COUNT_TRUNCATED
)
2197 if (GET_CODE (op1
) == CONST_INT
2198 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2199 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2200 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2201 % GET_MODE_BITSIZE (mode
));
2202 else if (GET_CODE (op1
) == SUBREG
2203 && subreg_lowpart_p (op1
))
2204 op1
= SUBREG_REG (op1
);
2207 if (op1
== const0_rtx
)
2210 /* Check whether its cheaper to implement a left shift by a constant
2211 bit count by a sequence of additions. */
2212 if (code
== LSHIFT_EXPR
2213 && GET_CODE (op1
) == CONST_INT
2215 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
)
2216 && shift_cost
[mode
][INTVAL (op1
)] > INTVAL (op1
) * add_cost
[mode
])
2219 for (i
= 0; i
< INTVAL (op1
); i
++)
2221 temp
= force_reg (mode
, shifted
);
2222 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2223 unsignedp
, OPTAB_LIB_WIDEN
);
2228 for (try = 0; temp
== 0 && try < 3; try++)
2230 enum optab_methods methods
;
2233 methods
= OPTAB_DIRECT
;
2235 methods
= OPTAB_WIDEN
;
2237 methods
= OPTAB_LIB_WIDEN
;
2241 /* Widening does not work for rotation. */
2242 if (methods
== OPTAB_WIDEN
)
2244 else if (methods
== OPTAB_LIB_WIDEN
)
2246 /* If we have been unable to open-code this by a rotation,
2247 do it as the IOR of two shifts. I.e., to rotate A
2248 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2249 where C is the bitsize of A.
2251 It is theoretically possible that the target machine might
2252 not be able to perform either shift and hence we would
2253 be making two libcalls rather than just the one for the
2254 shift (similarly if IOR could not be done). We will allow
2255 this extremely unlikely lossage to avoid complicating the
2258 rtx subtarget
= target
== shifted
? 0 : target
;
2260 tree type
= TREE_TYPE (amount
);
2261 tree new_amount
= make_tree (type
, op1
);
2263 = fold_build2 (MINUS_EXPR
, type
,
2264 build_int_cst (type
, GET_MODE_BITSIZE (mode
)),
2267 shifted
= force_reg (mode
, shifted
);
2269 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2270 mode
, shifted
, new_amount
, 0, 1);
2271 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2272 mode
, shifted
, other_amount
, subtarget
, 1);
2273 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2274 unsignedp
, methods
);
2277 temp
= expand_binop (mode
,
2278 left
? rotl_optab
: rotr_optab
,
2279 shifted
, op1
, target
, unsignedp
, methods
);
2282 temp
= expand_binop (mode
,
2283 left
? ashl_optab
: lshr_optab
,
2284 shifted
, op1
, target
, unsignedp
, methods
);
2286 /* Do arithmetic shifts.
2287 Also, if we are going to widen the operand, we can just as well
2288 use an arithmetic right-shift instead of a logical one. */
2289 if (temp
== 0 && ! rotate
2290 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2292 enum optab_methods methods1
= methods
;
2294 /* If trying to widen a log shift to an arithmetic shift,
2295 don't accept an arithmetic shift of the same size. */
2297 methods1
= OPTAB_MUST_WIDEN
;
2299 /* Arithmetic shift */
2301 temp
= expand_binop (mode
,
2302 left
? ashl_optab
: ashr_optab
,
2303 shifted
, op1
, target
, unsignedp
, methods1
);
2306 /* We used to try extzv here for logical right shifts, but that was
2307 only useful for one machine, the VAX, and caused poor code
2308 generation there for lshrdi3, so the code was deleted and a
2309 define_expand for lshrsi3 was added to vax.md. */
2329 /* This structure holds the "cost" of a multiply sequence. The
2330 "cost" field holds the total rtx_cost of every operator in the
2331 synthetic multiplication sequence, hence cost(a op b) is defined
2332 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2333 The "latency" field holds the minimum possible latency of the
2334 synthetic multiply, on a hypothetical infinitely parallel CPU.
2335 This is the critical path, or the maximum height, of the expression
2336 tree which is the sum of rtx_costs on the most expensive path from
2337 any leaf to the root. Hence latency(a op b) is defined as zero for
2338 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2341 short cost
; /* Total rtx_cost of the multiplication sequence. */
2342 short latency
; /* The latency of the multiplication sequence. */
2345 /* This macro is used to compare a pointer to a mult_cost against an
2346 single integer "rtx_cost" value. This is equivalent to the macro
2347 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2348 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2349 || ((X)->cost == (Y) && (X)->latency < (Y)))
2351 /* This macro is used to compare two pointers to mult_costs against
2352 each other. The macro returns true if X is cheaper than Y.
2353 Currently, the cheaper of two mult_costs is the one with the
2354 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2355 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2356 || ((X)->cost == (Y)->cost \
2357 && (X)->latency < (Y)->latency))
2359 /* This structure records a sequence of operations.
2360 `ops' is the number of operations recorded.
2361 `cost' is their total cost.
2362 The operations are stored in `op' and the corresponding
2363 logarithms of the integer coefficients in `log'.
2365 These are the operations:
2366 alg_zero total := 0;
2367 alg_m total := multiplicand;
2368 alg_shift total := total * coeff
2369 alg_add_t_m2 total := total + multiplicand * coeff;
2370 alg_sub_t_m2 total := total - multiplicand * coeff;
2371 alg_add_factor total := total * coeff + total;
2372 alg_sub_factor total := total * coeff - total;
2373 alg_add_t2_m total := total * coeff + multiplicand;
2374 alg_sub_t2_m total := total * coeff - multiplicand;
2376 The first operand must be either alg_zero or alg_m. */
2380 struct mult_cost cost
;
2382 /* The size of the OP and LOG fields are not directly related to the
2383 word size, but the worst-case algorithms will be if we have few
2384 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2385 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2386 in total wordsize operations. */
2387 enum alg_code op
[MAX_BITS_PER_WORD
];
2388 char log
[MAX_BITS_PER_WORD
];
2391 /* The entry for our multiplication cache/hash table. */
2392 struct alg_hash_entry
{
2393 /* The number we are multiplying by. */
2396 /* The mode in which we are multiplying something by T. */
2397 enum machine_mode mode
;
2399 /* The best multiplication algorithm for t. */
2402 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2403 Otherwise, the cost within which multiplication by T is
2405 struct mult_cost cost
;
2408 /* The number of cache/hash entries. */
2409 #define NUM_ALG_HASH_ENTRIES 307
2411 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2412 actually a hash table. If we have a collision, that the older
2413 entry is kicked out. */
2414 static struct alg_hash_entry alg_hash
[NUM_ALG_HASH_ENTRIES
];
2416 /* Indicates the type of fixup needed after a constant multiplication.
2417 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2418 the result should be negated, and ADD_VARIANT means that the
2419 multiplicand should be added to the result. */
2420 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2422 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2423 const struct mult_cost
*, enum machine_mode mode
);
2424 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2425 struct algorithm
*, enum mult_variant
*, int);
2426 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2427 const struct algorithm
*, enum mult_variant
);
2428 static unsigned HOST_WIDE_INT
choose_multiplier (unsigned HOST_WIDE_INT
, int,
2429 int, rtx
*, int *, int *);
2430 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2431 static rtx
extract_high_half (enum machine_mode
, rtx
);
2432 static rtx
expand_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2433 static rtx
expand_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2435 /* Compute and return the best algorithm for multiplying by T.
2436 The algorithm must cost less than cost_limit
2437 If retval.cost >= COST_LIMIT, no algorithm was found and all
2438 other field of the returned struct are undefined.
2439 MODE is the machine mode of the multiplication. */
2442 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2443 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2446 struct algorithm
*alg_in
, *best_alg
;
2447 struct mult_cost best_cost
;
2448 struct mult_cost new_limit
;
2449 int op_cost
, op_latency
;
2450 unsigned HOST_WIDE_INT q
;
2451 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
2453 bool cache_hit
= false;
2454 enum alg_code cache_alg
= alg_zero
;
2456 /* Indicate that no algorithm is yet found. If no algorithm
2457 is found, this value will be returned and indicate failure. */
2458 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2459 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2461 if (cost_limit
->cost
< 0
2462 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2465 /* Restrict the bits of "t" to the multiplication's mode. */
2466 t
&= GET_MODE_MASK (mode
);
2468 /* t == 1 can be done in zero cost. */
2472 alg_out
->cost
.cost
= 0;
2473 alg_out
->cost
.latency
= 0;
2474 alg_out
->op
[0] = alg_m
;
2478 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2482 if (MULT_COST_LESS (cost_limit
, zero_cost
))
2487 alg_out
->cost
.cost
= zero_cost
;
2488 alg_out
->cost
.latency
= zero_cost
;
2489 alg_out
->op
[0] = alg_zero
;
2494 /* We'll be needing a couple extra algorithm structures now. */
2496 alg_in
= alloca (sizeof (struct algorithm
));
2497 best_alg
= alloca (sizeof (struct algorithm
));
2498 best_cost
= *cost_limit
;
2500 /* Compute the hash index. */
2501 hash_index
= (t
^ (unsigned int) mode
) % NUM_ALG_HASH_ENTRIES
;
2503 /* See if we already know what to do for T. */
2504 if (alg_hash
[hash_index
].t
== t
2505 && alg_hash
[hash_index
].mode
== mode
2506 && alg_hash
[hash_index
].alg
!= alg_unknown
)
2508 cache_alg
= alg_hash
[hash_index
].alg
;
2510 if (cache_alg
== alg_impossible
)
2512 /* The cache tells us that it's impossible to synthesize
2513 multiplication by T within alg_hash[hash_index].cost. */
2514 if (!CHEAPER_MULT_COST (&alg_hash
[hash_index
].cost
, cost_limit
))
2515 /* COST_LIMIT is at least as restrictive as the one
2516 recorded in the hash table, in which case we have no
2517 hope of synthesizing a multiplication. Just
2521 /* If we get here, COST_LIMIT is less restrictive than the
2522 one recorded in the hash table, so we may be able to
2523 synthesize a multiplication. Proceed as if we didn't
2524 have the cache entry. */
2528 if (CHEAPER_MULT_COST (cost_limit
, &alg_hash
[hash_index
].cost
))
2529 /* The cached algorithm shows that this multiplication
2530 requires more cost than COST_LIMIT. Just return. This
2531 way, we don't clobber this cache entry with
2532 alg_impossible but retain useful information. */
2544 goto do_alg_addsub_t_m2
;
2546 case alg_add_factor
:
2547 case alg_sub_factor
:
2548 goto do_alg_addsub_factor
;
2551 goto do_alg_add_t2_m
;
2554 goto do_alg_sub_t2_m
;
2562 /* If we have a group of zero bits at the low-order part of T, try
2563 multiplying by the remaining bits and then doing a shift. */
2568 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2572 /* The function expand_shift will choose between a shift and
2573 a sequence of additions, so the observed cost is given as
2574 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2575 op_cost
= m
* add_cost
[mode
];
2576 if (shift_cost
[mode
][m
] < op_cost
)
2577 op_cost
= shift_cost
[mode
][m
];
2578 new_limit
.cost
= best_cost
.cost
- op_cost
;
2579 new_limit
.latency
= best_cost
.latency
- op_cost
;
2580 synth_mult (alg_in
, q
, &new_limit
, mode
);
2582 alg_in
->cost
.cost
+= op_cost
;
2583 alg_in
->cost
.latency
+= op_cost
;
2584 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2586 struct algorithm
*x
;
2587 best_cost
= alg_in
->cost
;
2588 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2589 best_alg
->log
[best_alg
->ops
] = m
;
2590 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2597 /* If we have an odd number, add or subtract one. */
2600 unsigned HOST_WIDE_INT w
;
2603 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2605 /* If T was -1, then W will be zero after the loop. This is another
2606 case where T ends with ...111. Handling this with (T + 1) and
2607 subtract 1 produces slightly better code and results in algorithm
2608 selection much faster than treating it like the ...0111 case
2612 /* Reject the case where t is 3.
2613 Thus we prefer addition in that case. */
2616 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2618 op_cost
= add_cost
[mode
];
2619 new_limit
.cost
= best_cost
.cost
- op_cost
;
2620 new_limit
.latency
= best_cost
.latency
- op_cost
;
2621 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2623 alg_in
->cost
.cost
+= op_cost
;
2624 alg_in
->cost
.latency
+= op_cost
;
2625 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2627 struct algorithm
*x
;
2628 best_cost
= alg_in
->cost
;
2629 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2630 best_alg
->log
[best_alg
->ops
] = 0;
2631 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2636 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2638 op_cost
= add_cost
[mode
];
2639 new_limit
.cost
= best_cost
.cost
- op_cost
;
2640 new_limit
.latency
= best_cost
.latency
- op_cost
;
2641 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2643 alg_in
->cost
.cost
+= op_cost
;
2644 alg_in
->cost
.latency
+= op_cost
;
2645 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2647 struct algorithm
*x
;
2648 best_cost
= alg_in
->cost
;
2649 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2650 best_alg
->log
[best_alg
->ops
] = 0;
2651 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2658 /* Look for factors of t of the form
2659 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2660 If we find such a factor, we can multiply by t using an algorithm that
2661 multiplies by q, shift the result by m and add/subtract it to itself.
2663 We search for large factors first and loop down, even if large factors
2664 are less probable than small; if we find a large factor we will find a
2665 good sequence quickly, and therefore be able to prune (by decreasing
2666 COST_LIMIT) the search. */
2668 do_alg_addsub_factor
:
2669 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2671 unsigned HOST_WIDE_INT d
;
2673 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2674 if (t
% d
== 0 && t
> d
&& m
< maxm
2675 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2677 /* If the target has a cheap shift-and-add instruction use
2678 that in preference to a shift insn followed by an add insn.
2679 Assume that the shift-and-add is "atomic" with a latency
2680 equal to its cost, otherwise assume that on superscalar
2681 hardware the shift may be executed concurrently with the
2682 earlier steps in the algorithm. */
2683 op_cost
= add_cost
[mode
] + shift_cost
[mode
][m
];
2684 if (shiftadd_cost
[mode
][m
] < op_cost
)
2686 op_cost
= shiftadd_cost
[mode
][m
];
2687 op_latency
= op_cost
;
2690 op_latency
= add_cost
[mode
];
2692 new_limit
.cost
= best_cost
.cost
- op_cost
;
2693 new_limit
.latency
= best_cost
.latency
- op_latency
;
2694 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2696 alg_in
->cost
.cost
+= op_cost
;
2697 alg_in
->cost
.latency
+= op_latency
;
2698 if (alg_in
->cost
.latency
< op_cost
)
2699 alg_in
->cost
.latency
= op_cost
;
2700 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2702 struct algorithm
*x
;
2703 best_cost
= alg_in
->cost
;
2704 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2705 best_alg
->log
[best_alg
->ops
] = m
;
2706 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2708 /* Other factors will have been taken care of in the recursion. */
2712 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2713 if (t
% d
== 0 && t
> d
&& m
< maxm
2714 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2716 /* If the target has a cheap shift-and-subtract insn use
2717 that in preference to a shift insn followed by a sub insn.
2718 Assume that the shift-and-sub is "atomic" with a latency
2719 equal to it's cost, otherwise assume that on superscalar
2720 hardware the shift may be executed concurrently with the
2721 earlier steps in the algorithm. */
2722 op_cost
= add_cost
[mode
] + shift_cost
[mode
][m
];
2723 if (shiftsub_cost
[mode
][m
] < op_cost
)
2725 op_cost
= shiftsub_cost
[mode
][m
];
2726 op_latency
= op_cost
;
2729 op_latency
= add_cost
[mode
];
2731 new_limit
.cost
= best_cost
.cost
- op_cost
;
2732 new_limit
.latency
= best_cost
.latency
- op_latency
;
2733 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2735 alg_in
->cost
.cost
+= op_cost
;
2736 alg_in
->cost
.latency
+= op_latency
;
2737 if (alg_in
->cost
.latency
< op_cost
)
2738 alg_in
->cost
.latency
= op_cost
;
2739 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2741 struct algorithm
*x
;
2742 best_cost
= alg_in
->cost
;
2743 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2744 best_alg
->log
[best_alg
->ops
] = m
;
2745 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2753 /* Try shift-and-add (load effective address) instructions,
2754 i.e. do a*3, a*5, a*9. */
2761 if (m
>= 0 && m
< maxm
)
2763 op_cost
= shiftadd_cost
[mode
][m
];
2764 new_limit
.cost
= best_cost
.cost
- op_cost
;
2765 new_limit
.latency
= best_cost
.latency
- op_cost
;
2766 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2768 alg_in
->cost
.cost
+= op_cost
;
2769 alg_in
->cost
.latency
+= op_cost
;
2770 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2772 struct algorithm
*x
;
2773 best_cost
= alg_in
->cost
;
2774 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2775 best_alg
->log
[best_alg
->ops
] = m
;
2776 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2786 if (m
>= 0 && m
< maxm
)
2788 op_cost
= shiftsub_cost
[mode
][m
];
2789 new_limit
.cost
= best_cost
.cost
- op_cost
;
2790 new_limit
.latency
= best_cost
.latency
- op_cost
;
2791 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2793 alg_in
->cost
.cost
+= op_cost
;
2794 alg_in
->cost
.latency
+= op_cost
;
2795 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2797 struct algorithm
*x
;
2798 best_cost
= alg_in
->cost
;
2799 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2800 best_alg
->log
[best_alg
->ops
] = m
;
2801 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2809 /* If best_cost has not decreased, we have not found any algorithm. */
2810 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2812 /* We failed to find an algorithm. Record alg_impossible for
2813 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2814 we are asked to find an algorithm for T within the same or
2815 lower COST_LIMIT, we can immediately return to the
2817 alg_hash
[hash_index
].t
= t
;
2818 alg_hash
[hash_index
].mode
= mode
;
2819 alg_hash
[hash_index
].alg
= alg_impossible
;
2820 alg_hash
[hash_index
].cost
= *cost_limit
;
2824 /* Cache the result. */
2827 alg_hash
[hash_index
].t
= t
;
2828 alg_hash
[hash_index
].mode
= mode
;
2829 alg_hash
[hash_index
].alg
= best_alg
->op
[best_alg
->ops
];
2830 alg_hash
[hash_index
].cost
.cost
= best_cost
.cost
;
2831 alg_hash
[hash_index
].cost
.latency
= best_cost
.latency
;
2834 /* If we are getting a too long sequence for `struct algorithm'
2835 to record, make this search fail. */
2836 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2839 /* Copy the algorithm from temporary space to the space at alg_out.
2840 We avoid using structure assignment because the majority of
2841 best_alg is normally undefined, and this is a critical function. */
2842 alg_out
->ops
= best_alg
->ops
+ 1;
2843 alg_out
->cost
= best_cost
;
2844 memcpy (alg_out
->op
, best_alg
->op
,
2845 alg_out
->ops
* sizeof *alg_out
->op
);
2846 memcpy (alg_out
->log
, best_alg
->log
,
2847 alg_out
->ops
* sizeof *alg_out
->log
);
2850 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2851 Try three variations:
2853 - a shift/add sequence based on VAL itself
2854 - a shift/add sequence based on -VAL, followed by a negation
2855 - a shift/add sequence based on VAL - 1, followed by an addition.
2857 Return true if the cheapest of these cost less than MULT_COST,
2858 describing the algorithm in *ALG and final fixup in *VARIANT. */
2861 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2862 struct algorithm
*alg
, enum mult_variant
*variant
,
2865 struct algorithm alg2
;
2866 struct mult_cost limit
;
2869 /* Fail quickly for impossible bounds. */
2873 /* Ensure that mult_cost provides a reasonable upper bound.
2874 Any constant multiplication can be performed with less
2875 than 2 * bits additions. */
2876 op_cost
= 2 * GET_MODE_BITSIZE (mode
) * add_cost
[mode
];
2877 if (mult_cost
> op_cost
)
2878 mult_cost
= op_cost
;
2880 *variant
= basic_variant
;
2881 limit
.cost
= mult_cost
;
2882 limit
.latency
= mult_cost
;
2883 synth_mult (alg
, val
, &limit
, mode
);
2885 /* This works only if the inverted value actually fits in an
2887 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2889 op_cost
= neg_cost
[mode
];
2890 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2892 limit
.cost
= alg
->cost
.cost
- op_cost
;
2893 limit
.latency
= alg
->cost
.latency
- op_cost
;
2897 limit
.cost
= mult_cost
- op_cost
;
2898 limit
.latency
= mult_cost
- op_cost
;
2901 synth_mult (&alg2
, -val
, &limit
, mode
);
2902 alg2
.cost
.cost
+= op_cost
;
2903 alg2
.cost
.latency
+= op_cost
;
2904 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2905 *alg
= alg2
, *variant
= negate_variant
;
2908 /* This proves very useful for division-by-constant. */
2909 op_cost
= add_cost
[mode
];
2910 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2912 limit
.cost
= alg
->cost
.cost
- op_cost
;
2913 limit
.latency
= alg
->cost
.latency
- op_cost
;
2917 limit
.cost
= mult_cost
- op_cost
;
2918 limit
.latency
= mult_cost
- op_cost
;
2921 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2922 alg2
.cost
.cost
+= op_cost
;
2923 alg2
.cost
.latency
+= op_cost
;
2924 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2925 *alg
= alg2
, *variant
= add_variant
;
2927 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2930 /* A subroutine of expand_mult, used for constant multiplications.
2931 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2932 convenient. Use the shift/add sequence described by ALG and apply
2933 the final fixup specified by VARIANT. */
2936 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2937 rtx target
, const struct algorithm
*alg
,
2938 enum mult_variant variant
)
2940 HOST_WIDE_INT val_so_far
;
2941 rtx insn
, accum
, tem
;
2943 enum machine_mode nmode
;
2945 /* Avoid referencing memory over and over.
2946 For speed, but also for correctness when mem is volatile. */
2948 op0
= force_reg (mode
, op0
);
2950 /* ACCUM starts out either as OP0 or as a zero, depending on
2951 the first operation. */
2953 if (alg
->op
[0] == alg_zero
)
2955 accum
= copy_to_mode_reg (mode
, const0_rtx
);
2958 else if (alg
->op
[0] == alg_m
)
2960 accum
= copy_to_mode_reg (mode
, op0
);
2966 for (opno
= 1; opno
< alg
->ops
; opno
++)
2968 int log
= alg
->log
[opno
];
2969 rtx shift_subtarget
= optimize
? 0 : accum
;
2971 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
2974 rtx accum_target
= optimize
? 0 : accum
;
2976 switch (alg
->op
[opno
])
2979 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2980 build_int_cst (NULL_TREE
, log
),
2986 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2987 build_int_cst (NULL_TREE
, log
),
2989 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2990 add_target
? add_target
: accum_target
);
2991 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2995 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2996 build_int_cst (NULL_TREE
, log
),
2998 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2999 add_target
? add_target
: accum_target
);
3000 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
3004 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3005 build_int_cst (NULL_TREE
, log
),
3008 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
3009 add_target
? add_target
: accum_target
);
3010 val_so_far
= (val_so_far
<< log
) + 1;
3014 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3015 build_int_cst (NULL_TREE
, log
),
3016 shift_subtarget
, 0);
3017 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
3018 add_target
? add_target
: accum_target
);
3019 val_so_far
= (val_so_far
<< log
) - 1;
3022 case alg_add_factor
:
3023 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3024 build_int_cst (NULL_TREE
, log
),
3026 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
3027 add_target
? add_target
: accum_target
);
3028 val_so_far
+= val_so_far
<< log
;
3031 case alg_sub_factor
:
3032 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3033 build_int_cst (NULL_TREE
, log
),
3035 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
3037 ? add_target
: (optimize
? 0 : tem
)));
3038 val_so_far
= (val_so_far
<< log
) - val_so_far
;
3045 /* Write a REG_EQUAL note on the last insn so that we can cse
3046 multiplication sequences. Note that if ACCUM is a SUBREG,
3047 we've set the inner register and must properly indicate
3050 tem
= op0
, nmode
= mode
;
3051 if (GET_CODE (accum
) == SUBREG
)
3053 nmode
= GET_MODE (SUBREG_REG (accum
));
3054 tem
= gen_lowpart (nmode
, op0
);
3057 insn
= get_last_insn ();
3058 set_unique_reg_note (insn
, REG_EQUAL
,
3059 gen_rtx_MULT (nmode
, tem
, GEN_INT (val_so_far
)));
3062 if (variant
== negate_variant
)
3064 val_so_far
= -val_so_far
;
3065 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3067 else if (variant
== add_variant
)
3069 val_so_far
= val_so_far
+ 1;
3070 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3073 /* Compare only the bits of val and val_so_far that are significant
3074 in the result mode, to avoid sign-/zero-extension confusion. */
3075 val
&= GET_MODE_MASK (mode
);
3076 val_so_far
&= GET_MODE_MASK (mode
);
3077 gcc_assert (val
== val_so_far
);
3082 /* Perform a multiplication and return an rtx for the result.
3083 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3084 TARGET is a suggestion for where to store the result (an rtx).
3086 We check specially for a constant integer as OP1.
3087 If you want this check for OP0 as well, then before calling
3088 you should swap the two operands if OP0 would be constant. */
3091 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3094 enum mult_variant variant
;
3095 struct algorithm algorithm
;
3098 /* Handling const0_rtx here allows us to use zero as a rogue value for
3100 if (op1
== const0_rtx
)
3102 if (op1
== const1_rtx
)
3104 if (op1
== constm1_rtx
)
3105 return expand_unop (mode
,
3106 GET_MODE_CLASS (mode
) == MODE_INT
3107 && !unsignedp
&& flag_trapv
3108 ? negv_optab
: neg_optab
,
3111 /* These are the operations that are potentially turned into a sequence
3112 of shifts and additions. */
3113 if (SCALAR_INT_MODE_P (mode
)
3114 && (unsignedp
|| !flag_trapv
))
3116 HOST_WIDE_INT coeff
= 0;
3117 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3119 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3120 less than or equal in size to `unsigned int' this doesn't matter.
3121 If the mode is larger than `unsigned int', then synth_mult works
3122 only if the constant value exactly fits in an `unsigned int' without
3123 any truncation. This means that multiplying by negative values does
3124 not work; results are off by 2^32 on a 32 bit machine. */
3126 if (GET_CODE (op1
) == CONST_INT
)
3128 /* Attempt to handle multiplication of DImode values by negative
3129 coefficients, by performing the multiplication by a positive
3130 multiplier and then inverting the result. */
3131 if (INTVAL (op1
) < 0
3132 && GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
)
3134 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3135 result is interpreted as an unsigned coefficient.
3136 Exclude cost of op0 from max_cost to match the cost
3137 calculation of the synth_mult. */
3138 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
)
3141 && choose_mult_variant (mode
, -INTVAL (op1
), &algorithm
,
3142 &variant
, max_cost
))
3144 rtx temp
= expand_mult_const (mode
, op0
, -INTVAL (op1
),
3145 NULL_RTX
, &algorithm
,
3147 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3150 else coeff
= INTVAL (op1
);
3152 else if (GET_CODE (op1
) == CONST_DOUBLE
)
3154 /* If we are multiplying in DImode, it may still be a win
3155 to try to work with shifts and adds. */
3156 if (CONST_DOUBLE_HIGH (op1
) == 0)
3157 coeff
= CONST_DOUBLE_LOW (op1
);
3158 else if (CONST_DOUBLE_LOW (op1
) == 0
3159 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1
)))
3161 int shift
= floor_log2 (CONST_DOUBLE_HIGH (op1
))
3162 + HOST_BITS_PER_WIDE_INT
;
3163 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3164 build_int_cst (NULL_TREE
, shift
),
3169 /* We used to test optimize here, on the grounds that it's better to
3170 produce a smaller program when -O is not used. But this causes
3171 such a terrible slowdown sometimes that it seems better to always
3175 /* Special case powers of two. */
3176 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3177 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3178 build_int_cst (NULL_TREE
, floor_log2 (coeff
)),
3181 /* Exclude cost of op0 from max_cost to match the cost
3182 calculation of the synth_mult. */
3183 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
);
3184 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3186 return expand_mult_const (mode
, op0
, coeff
, target
,
3187 &algorithm
, variant
);
3191 if (GET_CODE (op0
) == CONST_DOUBLE
)
3198 /* Expand x*2.0 as x+x. */
3199 if (GET_CODE (op1
) == CONST_DOUBLE
3200 && SCALAR_FLOAT_MODE_P (mode
))
3203 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
3205 if (REAL_VALUES_EQUAL (d
, dconst2
))
3207 op0
= force_reg (GET_MODE (op0
), op0
);
3208 return expand_binop (mode
, add_optab
, op0
, op0
,
3209 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3213 /* This used to use umul_optab if unsigned, but for non-widening multiply
3214 there is no difference between signed and unsigned. */
3215 op0
= expand_binop (mode
,
3217 && flag_trapv
&& (GET_MODE_CLASS(mode
) == MODE_INT
)
3218 ? smulv_optab
: smul_optab
,
3219 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3224 /* Return the smallest n such that 2**n >= X. */
3227 ceil_log2 (unsigned HOST_WIDE_INT x
)
3229 return floor_log2 (x
- 1) + 1;
3232 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3233 replace division by D, and put the least significant N bits of the result
3234 in *MULTIPLIER_PTR and return the most significant bit.
3236 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3237 needed precision is in PRECISION (should be <= N).
3239 PRECISION should be as small as possible so this function can choose
3240 multiplier more freely.
3242 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3243 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3245 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3246 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3249 unsigned HOST_WIDE_INT
3250 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3251 rtx
*multiplier_ptr
, int *post_shift_ptr
, int *lgup_ptr
)
3253 HOST_WIDE_INT mhigh_hi
, mlow_hi
;
3254 unsigned HOST_WIDE_INT mhigh_lo
, mlow_lo
;
3255 int lgup
, post_shift
;
3257 unsigned HOST_WIDE_INT nl
, dummy1
;
3258 HOST_WIDE_INT nh
, dummy2
;
3260 /* lgup = ceil(log2(divisor)); */
3261 lgup
= ceil_log2 (d
);
3263 gcc_assert (lgup
<= n
);
3266 pow2
= n
+ lgup
- precision
;
3268 /* We could handle this with some effort, but this case is much
3269 better handled directly with a scc insn, so rely on caller using
3271 gcc_assert (pow
!= 2 * HOST_BITS_PER_WIDE_INT
);
3273 /* mlow = 2^(N + lgup)/d */
3274 if (pow
>= HOST_BITS_PER_WIDE_INT
)
3276 nh
= (HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
3282 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
3284 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3285 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
3287 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3288 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
3289 nh
|= (HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
3291 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
3292 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3293 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
3295 gcc_assert (!mhigh_hi
|| nh
- d
< d
);
3296 gcc_assert (mhigh_hi
<= 1 && mlow_hi
<= 1);
3297 /* Assert that mlow < mhigh. */
3298 gcc_assert (mlow_hi
< mhigh_hi
3299 || (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
));
3301 /* If precision == N, then mlow, mhigh exceed 2^N
3302 (but they do not exceed 2^(N+1)). */
3304 /* Reduce to lowest terms. */
3305 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3307 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
3308 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
3318 *post_shift_ptr
= post_shift
;
3320 if (n
< HOST_BITS_PER_WIDE_INT
)
3322 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3323 *multiplier_ptr
= GEN_INT (mhigh_lo
& mask
);
3324 return mhigh_lo
>= mask
;
3328 *multiplier_ptr
= GEN_INT (mhigh_lo
);
3333 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3334 congruent to 1 (mod 2**N). */
3336 static unsigned HOST_WIDE_INT
3337 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3339 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3341 /* The algorithm notes that the choice y = x satisfies
3342 x*y == 1 mod 2^3, since x is assumed odd.
3343 Each iteration doubles the number of bits of significance in y. */
3345 unsigned HOST_WIDE_INT mask
;
3346 unsigned HOST_WIDE_INT y
= x
;
3349 mask
= (n
== HOST_BITS_PER_WIDE_INT
3350 ? ~(unsigned HOST_WIDE_INT
) 0
3351 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3355 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3361 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3362 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3363 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3364 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3367 The result is put in TARGET if that is convenient.
3369 MODE is the mode of operation. */
3372 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3373 rtx op1
, rtx target
, int unsignedp
)
3376 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3378 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3379 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3381 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3383 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3386 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3387 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3389 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3390 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3396 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3399 extract_high_half (enum machine_mode mode
, rtx op
)
3401 enum machine_mode wider_mode
;
3403 if (mode
== word_mode
)
3404 return gen_highpart (mode
, op
);
3406 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3408 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3409 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3410 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
)), 0, 1);
3411 return convert_modes (mode
, wider_mode
, op
, 0);
3414 /* Like expand_mult_highpart, but only consider using a multiplication
3415 optab. OP1 is an rtx for the constant operand. */
3418 expand_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3419 rtx target
, int unsignedp
, int max_cost
)
3421 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3422 enum machine_mode wider_mode
;
3427 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3429 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3430 size
= GET_MODE_BITSIZE (mode
);
3432 /* Firstly, try using a multiplication insn that only generates the needed
3433 high part of the product, and in the sign flavor of unsignedp. */
3434 if (mul_highpart_cost
[mode
] < max_cost
)
3436 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3437 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3438 unsignedp
, OPTAB_DIRECT
);
3443 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3444 Need to adjust the result after the multiplication. */
3445 if (size
- 1 < BITS_PER_WORD
3446 && (mul_highpart_cost
[mode
] + 2 * shift_cost
[mode
][size
-1]
3447 + 4 * add_cost
[mode
] < max_cost
))
3449 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3450 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3451 unsignedp
, OPTAB_DIRECT
);
3453 /* We used the wrong signedness. Adjust the result. */
3454 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3458 /* Try widening multiplication. */
3459 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3460 if (moptab
->handlers
[wider_mode
].insn_code
!= CODE_FOR_nothing
3461 && mul_widen_cost
[wider_mode
] < max_cost
)
3463 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3464 unsignedp
, OPTAB_WIDEN
);
3466 return extract_high_half (mode
, tem
);
3469 /* Try widening the mode and perform a non-widening multiplication. */
3470 if (smul_optab
->handlers
[wider_mode
].insn_code
!= CODE_FOR_nothing
3471 && size
- 1 < BITS_PER_WORD
3472 && mul_cost
[wider_mode
] + shift_cost
[mode
][size
-1] < max_cost
)
3474 rtx insns
, wop0
, wop1
;
3476 /* We need to widen the operands, for example to ensure the
3477 constant multiplier is correctly sign or zero extended.
3478 Use a sequence to clean-up any instructions emitted by
3479 the conversions if things don't work out. */
3481 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3482 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3483 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3484 unsignedp
, OPTAB_WIDEN
);
3485 insns
= get_insns ();
3491 return extract_high_half (mode
, tem
);
3495 /* Try widening multiplication of opposite signedness, and adjust. */
3496 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3497 if (moptab
->handlers
[wider_mode
].insn_code
!= CODE_FOR_nothing
3498 && size
- 1 < BITS_PER_WORD
3499 && (mul_widen_cost
[wider_mode
] + 2 * shift_cost
[mode
][size
-1]
3500 + 4 * add_cost
[mode
] < max_cost
))
3502 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3503 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3506 tem
= extract_high_half (mode
, tem
);
3507 /* We used the wrong signedness. Adjust the result. */
3508 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3516 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3517 putting the high half of the result in TARGET if that is convenient,
3518 and return where the result is. If the operation can not be performed,
3521 MODE is the mode of operation and result.
3523 UNSIGNEDP nonzero means unsigned multiply.
3525 MAX_COST is the total allowed cost for the expanded RTL. */
3528 expand_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3529 rtx target
, int unsignedp
, int max_cost
)
3531 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3532 unsigned HOST_WIDE_INT cnst1
;
3534 bool sign_adjust
= false;
3535 enum mult_variant variant
;
3536 struct algorithm alg
;
3539 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3540 /* We can't support modes wider than HOST_BITS_PER_INT. */
3541 gcc_assert (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
);
3543 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3545 /* We can't optimize modes wider than BITS_PER_WORD.
3546 ??? We might be able to perform double-word arithmetic if
3547 mode == word_mode, however all the cost calculations in
3548 synth_mult etc. assume single-word operations. */
3549 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3550 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3551 unsignedp
, max_cost
);
3553 extra_cost
= shift_cost
[mode
][GET_MODE_BITSIZE (mode
) - 1];
3555 /* Check whether we try to multiply by a negative constant. */
3556 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3559 extra_cost
+= add_cost
[mode
];
3562 /* See whether shift/add multiplication is cheap enough. */
3563 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3564 max_cost
- extra_cost
))
3566 /* See whether the specialized multiplication optabs are
3567 cheaper than the shift/add version. */
3568 tem
= expand_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3569 alg
.cost
.cost
+ extra_cost
);
3573 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3574 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3575 tem
= extract_high_half (mode
, tem
);
3577 /* Adjust result for signedness. */
3579 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3583 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3584 unsignedp
, max_cost
);
3588 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3591 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3593 unsigned HOST_WIDE_INT masklow
, maskhigh
;
3594 rtx result
, temp
, shift
, label
;
3597 logd
= floor_log2 (d
);
3598 result
= gen_reg_rtx (mode
);
3600 /* Avoid conditional branches when they're expensive. */
3601 if (BRANCH_COST
>= 2
3604 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3608 signmask
= force_reg (mode
, signmask
);
3609 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3610 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3612 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3613 which instruction sequence to use. If logical right shifts
3614 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3615 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3617 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3618 if (lshr_optab
->handlers
[mode
].insn_code
== CODE_FOR_nothing
3619 || rtx_cost (temp
, SET
) > COSTS_N_INSNS (2))
3621 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3622 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3623 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3624 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3625 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3626 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3627 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3628 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3629 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3630 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3634 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3635 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3636 signmask
= force_reg (mode
, signmask
);
3638 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3639 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3640 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3641 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3642 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3643 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3649 /* Mask contains the mode's signbit and the significant bits of the
3650 modulus. By including the signbit in the operation, many targets
3651 can avoid an explicit compare operation in the following comparison
3654 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3655 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3657 masklow
|= (HOST_WIDE_INT
) -1 << (GET_MODE_BITSIZE (mode
) - 1);
3661 maskhigh
= (HOST_WIDE_INT
) -1
3662 << (GET_MODE_BITSIZE (mode
) - HOST_BITS_PER_WIDE_INT
- 1);
3664 temp
= expand_binop (mode
, and_optab
, op0
,
3665 immed_double_const (masklow
, maskhigh
, mode
),
3666 result
, 1, OPTAB_LIB_WIDEN
);
3668 emit_move_insn (result
, temp
);
3670 label
= gen_label_rtx ();
3671 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3673 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3674 0, OPTAB_LIB_WIDEN
);
3675 masklow
= (HOST_WIDE_INT
) -1 << logd
;
3677 temp
= expand_binop (mode
, ior_optab
, temp
,
3678 immed_double_const (masklow
, maskhigh
, mode
),
3679 result
, 1, OPTAB_LIB_WIDEN
);
3680 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3681 0, OPTAB_LIB_WIDEN
);
3683 emit_move_insn (result
, temp
);
3688 /* Expand signed division of OP0 by a power of two D in mode MODE.
3689 This routine is only called for positive values of D. */
3692 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3698 logd
= floor_log2 (d
);
3699 shift
= build_int_cst (NULL_TREE
, logd
);
3701 if (d
== 2 && BRANCH_COST
>= 1)
3703 temp
= gen_reg_rtx (mode
);
3704 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3705 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3706 0, OPTAB_LIB_WIDEN
);
3707 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3710 #ifdef HAVE_conditional_move
3711 if (BRANCH_COST
>= 2)
3715 /* ??? emit_conditional_move forces a stack adjustment via
3716 compare_from_rtx so, if the sequence is discarded, it will
3717 be lost. Do it now instead. */
3718 do_pending_stack_adjust ();
3721 temp2
= copy_to_mode_reg (mode
, op0
);
3722 temp
= expand_binop (mode
, add_optab
, temp2
, GEN_INT (d
-1),
3723 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3724 temp
= force_reg (mode
, temp
);
3726 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3727 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3728 mode
, temp
, temp2
, mode
, 0);
3731 rtx seq
= get_insns ();
3734 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, shift
, NULL_RTX
, 0);
3740 if (BRANCH_COST
>= 2)
3742 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3744 temp
= gen_reg_rtx (mode
);
3745 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3746 if (shift_cost
[mode
][ushift
] > COSTS_N_INSNS (1))
3747 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (d
- 1),
3748 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3750 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3751 build_int_cst (NULL_TREE
, ushift
),
3753 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3754 0, OPTAB_LIB_WIDEN
);
3755 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3758 label
= gen_label_rtx ();
3759 temp
= copy_to_mode_reg (mode
, op0
);
3760 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3761 expand_inc (temp
, GEN_INT (d
- 1));
3763 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3766 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3767 if that is convenient, and returning where the result is.
3768 You may request either the quotient or the remainder as the result;
3769 specify REM_FLAG nonzero to get the remainder.
3771 CODE is the expression code for which kind of division this is;
3772 it controls how rounding is done. MODE is the machine mode to use.
3773 UNSIGNEDP nonzero means do unsigned division. */
3775 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3776 and then correct it by or'ing in missing high bits
3777 if result of ANDI is nonzero.
3778 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3779 This could optimize to a bfexts instruction.
3780 But C doesn't use these operations, so their optimizations are
3782 /* ??? For modulo, we don't actually need the highpart of the first product,
3783 the low part will do nicely. And for small divisors, the second multiply
3784 can also be a low-part only multiply or even be completely left out.
3785 E.g. to calculate the remainder of a division by 3 with a 32 bit
3786 multiply, multiply with 0x55555556 and extract the upper two bits;
3787 the result is exact for inputs up to 0x1fffffff.
3788 The input range can be reduced by using cross-sum rules.
3789 For odd divisors >= 3, the following table gives right shift counts
3790 so that if a number is shifted by an integer multiple of the given
3791 amount, the remainder stays the same:
3792 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3793 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3794 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3795 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3796 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3798 Cross-sum rules for even numbers can be derived by leaving as many bits
3799 to the right alone as the divisor has zeros to the right.
3800 E.g. if x is an unsigned 32 bit number:
3801 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3805 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3806 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3808 enum machine_mode compute_mode
;
3810 rtx quotient
= 0, remainder
= 0;
3814 optab optab1
, optab2
;
3815 int op1_is_constant
, op1_is_pow2
= 0;
3816 int max_cost
, extra_cost
;
3817 static HOST_WIDE_INT last_div_const
= 0;
3818 static HOST_WIDE_INT ext_op1
;
3820 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
3821 if (op1_is_constant
)
3823 ext_op1
= INTVAL (op1
);
3825 ext_op1
&= GET_MODE_MASK (mode
);
3826 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3827 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3831 This is the structure of expand_divmod:
3833 First comes code to fix up the operands so we can perform the operations
3834 correctly and efficiently.
3836 Second comes a switch statement with code specific for each rounding mode.
3837 For some special operands this code emits all RTL for the desired
3838 operation, for other cases, it generates only a quotient and stores it in
3839 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3840 to indicate that it has not done anything.
3842 Last comes code that finishes the operation. If QUOTIENT is set and
3843 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3844 QUOTIENT is not set, it is computed using trunc rounding.
3846 We try to generate special code for division and remainder when OP1 is a
3847 constant. If |OP1| = 2**n we can use shifts and some other fast
3848 operations. For other values of OP1, we compute a carefully selected
3849 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3852 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3853 half of the product. Different strategies for generating the product are
3854 implemented in expand_mult_highpart.
3856 If what we actually want is the remainder, we generate that by another
3857 by-constant multiplication and a subtraction. */
3859 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3860 code below will malfunction if we are, so check here and handle
3861 the special case if so. */
3862 if (op1
== const1_rtx
)
3863 return rem_flag
? const0_rtx
: op0
;
3865 /* When dividing by -1, we could get an overflow.
3866 negv_optab can handle overflows. */
3867 if (! unsignedp
&& op1
== constm1_rtx
)
3871 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3872 ? negv_optab
: neg_optab
, op0
, target
, 0);
3876 /* Don't use the function value register as a target
3877 since we have to read it as well as write it,
3878 and function-inlining gets confused by this. */
3879 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3880 /* Don't clobber an operand while doing a multi-step calculation. */
3881 || ((rem_flag
|| op1_is_constant
)
3882 && (reg_mentioned_p (target
, op0
)
3883 || (MEM_P (op0
) && MEM_P (target
))))
3884 || reg_mentioned_p (target
, op1
)
3885 || (MEM_P (op1
) && MEM_P (target
))))
3888 /* Get the mode in which to perform this computation. Normally it will
3889 be MODE, but sometimes we can't do the desired operation in MODE.
3890 If so, pick a wider mode in which we can do the operation. Convert
3891 to that mode at the start to avoid repeated conversions.
3893 First see what operations we need. These depend on the expression
3894 we are evaluating. (We assume that divxx3 insns exist under the
3895 same conditions that modxx3 insns and that these insns don't normally
3896 fail. If these assumptions are not correct, we may generate less
3897 efficient code in some cases.)
3899 Then see if we find a mode in which we can open-code that operation
3900 (either a division, modulus, or shift). Finally, check for the smallest
3901 mode for which we can do the operation with a library call. */
3903 /* We might want to refine this now that we have division-by-constant
3904 optimization. Since expand_mult_highpart tries so many variants, it is
3905 not straightforward to generalize this. Maybe we should make an array
3906 of possible modes in init_expmed? Save this for GCC 2.7. */
3908 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3909 ? (unsignedp
? lshr_optab
: ashr_optab
)
3910 : (unsignedp
? udiv_optab
: sdiv_optab
));
3911 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3913 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3915 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3916 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3917 if (optab1
->handlers
[compute_mode
].insn_code
!= CODE_FOR_nothing
3918 || optab2
->handlers
[compute_mode
].insn_code
!= CODE_FOR_nothing
)
3921 if (compute_mode
== VOIDmode
)
3922 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3923 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3924 if (optab1
->handlers
[compute_mode
].libfunc
3925 || optab2
->handlers
[compute_mode
].libfunc
)
3928 /* If we still couldn't find a mode, use MODE, but expand_binop will
3930 if (compute_mode
== VOIDmode
)
3931 compute_mode
= mode
;
3933 if (target
&& GET_MODE (target
) == compute_mode
)
3936 tquotient
= gen_reg_rtx (compute_mode
);
3938 size
= GET_MODE_BITSIZE (compute_mode
);
3940 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3941 (mode), and thereby get better code when OP1 is a constant. Do that
3942 later. It will require going over all usages of SIZE below. */
3943 size
= GET_MODE_BITSIZE (mode
);
3946 /* Only deduct something for a REM if the last divide done was
3947 for a different constant. Then set the constant of the last
3949 max_cost
= unsignedp
? udiv_cost
[compute_mode
] : sdiv_cost
[compute_mode
];
3950 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3951 && INTVAL (op1
) == last_div_const
))
3952 max_cost
-= mul_cost
[compute_mode
] + add_cost
[compute_mode
];
3954 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3956 /* Now convert to the best mode to use. */
3957 if (compute_mode
!= mode
)
3959 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3960 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3962 /* convert_modes may have placed op1 into a register, so we
3963 must recompute the following. */
3964 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
3965 op1_is_pow2
= (op1_is_constant
3966 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3968 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
3971 /* If one of the operands is a volatile MEM, copy it into a register. */
3973 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
3974 op0
= force_reg (compute_mode
, op0
);
3975 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
3976 op1
= force_reg (compute_mode
, op1
);
3978 /* If we need the remainder or if OP1 is constant, we need to
3979 put OP0 in a register in case it has any queued subexpressions. */
3980 if (rem_flag
|| op1_is_constant
)
3981 op0
= force_reg (compute_mode
, op0
);
3983 last
= get_last_insn ();
3985 /* Promote floor rounding to trunc rounding for unsigned operations. */
3988 if (code
== FLOOR_DIV_EXPR
)
3989 code
= TRUNC_DIV_EXPR
;
3990 if (code
== FLOOR_MOD_EXPR
)
3991 code
= TRUNC_MOD_EXPR
;
3992 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
3993 code
= TRUNC_DIV_EXPR
;
3996 if (op1
!= const0_rtx
)
3999 case TRUNC_MOD_EXPR
:
4000 case TRUNC_DIV_EXPR
:
4001 if (op1_is_constant
)
4005 unsigned HOST_WIDE_INT mh
;
4006 int pre_shift
, post_shift
;
4009 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
4010 & GET_MODE_MASK (compute_mode
));
4012 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4014 pre_shift
= floor_log2 (d
);
4018 = expand_binop (compute_mode
, and_optab
, op0
,
4019 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4023 return gen_lowpart (mode
, remainder
);
4025 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4026 build_int_cst (NULL_TREE
,
4030 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4032 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4034 /* Most significant bit of divisor is set; emit an scc
4036 quotient
= emit_store_flag (tquotient
, GEU
, op0
, op1
,
4037 compute_mode
, 1, 1);
4043 /* Find a suitable multiplier and right shift count
4044 instead of multiplying with D. */
4046 mh
= choose_multiplier (d
, size
, size
,
4047 &ml
, &post_shift
, &dummy
);
4049 /* If the suggested multiplier is more than SIZE bits,
4050 we can do better for even divisors, using an
4051 initial right shift. */
4052 if (mh
!= 0 && (d
& 1) == 0)
4054 pre_shift
= floor_log2 (d
& -d
);
4055 mh
= choose_multiplier (d
>> pre_shift
, size
,
4057 &ml
, &post_shift
, &dummy
);
4067 if (post_shift
- 1 >= BITS_PER_WORD
)
4071 = (shift_cost
[compute_mode
][post_shift
- 1]
4072 + shift_cost
[compute_mode
][1]
4073 + 2 * add_cost
[compute_mode
]);
4074 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
4076 max_cost
- extra_cost
);
4079 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4083 (RSHIFT_EXPR
, compute_mode
, t2
,
4084 build_int_cst (NULL_TREE
, 1),
4086 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4089 quotient
= expand_shift
4090 (RSHIFT_EXPR
, compute_mode
, t4
,
4091 build_int_cst (NULL_TREE
, post_shift
- 1),
4098 if (pre_shift
>= BITS_PER_WORD
4099 || post_shift
>= BITS_PER_WORD
)
4103 (RSHIFT_EXPR
, compute_mode
, op0
,
4104 build_int_cst (NULL_TREE
, pre_shift
),
4107 = (shift_cost
[compute_mode
][pre_shift
]
4108 + shift_cost
[compute_mode
][post_shift
]);
4109 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
4111 max_cost
- extra_cost
);
4114 quotient
= expand_shift
4115 (RSHIFT_EXPR
, compute_mode
, t2
,
4116 build_int_cst (NULL_TREE
, post_shift
),
4121 else /* Too wide mode to use tricky code */
4124 insn
= get_last_insn ();
4126 && (set
= single_set (insn
)) != 0
4127 && SET_DEST (set
) == quotient
)
4128 set_unique_reg_note (insn
,
4130 gen_rtx_UDIV (compute_mode
, op0
, op1
));
4132 else /* TRUNC_DIV, signed */
4134 unsigned HOST_WIDE_INT ml
;
4135 int lgup
, post_shift
;
4137 HOST_WIDE_INT d
= INTVAL (op1
);
4138 unsigned HOST_WIDE_INT abs_d
= d
>= 0 ? d
: -d
;
4140 /* n rem d = n rem -d */
4141 if (rem_flag
&& d
< 0)
4144 op1
= gen_int_mode (abs_d
, compute_mode
);
4150 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4152 else if (abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4154 /* This case is not handled correctly below. */
4155 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4156 compute_mode
, 1, 1);
4160 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4161 && (rem_flag
? smod_pow2_cheap
[compute_mode
]
4162 : sdiv_pow2_cheap
[compute_mode
])
4163 /* We assume that cheap metric is true if the
4164 optab has an expander for this mode. */
4165 && (((rem_flag
? smod_optab
: sdiv_optab
)
4166 ->handlers
[compute_mode
].insn_code
4167 != CODE_FOR_nothing
)
4168 || (sdivmod_optab
->handlers
[compute_mode
]
4169 .insn_code
!= CODE_FOR_nothing
)))
4171 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4175 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4177 return gen_lowpart (mode
, remainder
);
4180 if (sdiv_pow2_cheap
[compute_mode
]
4181 && ((sdiv_optab
->handlers
[compute_mode
].insn_code
4182 != CODE_FOR_nothing
)
4183 || (sdivmod_optab
->handlers
[compute_mode
].insn_code
4184 != CODE_FOR_nothing
)))
4185 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4187 gen_int_mode (abs_d
,
4191 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4193 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4194 negate the quotient. */
4197 insn
= get_last_insn ();
4199 && (set
= single_set (insn
)) != 0
4200 && SET_DEST (set
) == quotient
4201 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4202 << (HOST_BITS_PER_WIDE_INT
- 1)))
4203 set_unique_reg_note (insn
,
4205 gen_rtx_DIV (compute_mode
,
4212 quotient
= expand_unop (compute_mode
, neg_optab
,
4213 quotient
, quotient
, 0);
4216 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4218 choose_multiplier (abs_d
, size
, size
- 1,
4219 &mlr
, &post_shift
, &lgup
);
4220 ml
= (unsigned HOST_WIDE_INT
) INTVAL (mlr
);
4221 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4225 if (post_shift
>= BITS_PER_WORD
4226 || size
- 1 >= BITS_PER_WORD
)
4229 extra_cost
= (shift_cost
[compute_mode
][post_shift
]
4230 + shift_cost
[compute_mode
][size
- 1]
4231 + add_cost
[compute_mode
]);
4232 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4234 max_cost
- extra_cost
);
4238 (RSHIFT_EXPR
, compute_mode
, t1
,
4239 build_int_cst (NULL_TREE
, post_shift
),
4242 (RSHIFT_EXPR
, compute_mode
, op0
,
4243 build_int_cst (NULL_TREE
, size
- 1),
4247 = force_operand (gen_rtx_MINUS (compute_mode
,
4252 = force_operand (gen_rtx_MINUS (compute_mode
,
4260 if (post_shift
>= BITS_PER_WORD
4261 || size
- 1 >= BITS_PER_WORD
)
4264 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4265 mlr
= gen_int_mode (ml
, compute_mode
);
4266 extra_cost
= (shift_cost
[compute_mode
][post_shift
]
4267 + shift_cost
[compute_mode
][size
- 1]
4268 + 2 * add_cost
[compute_mode
]);
4269 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4271 max_cost
- extra_cost
);
4274 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4278 (RSHIFT_EXPR
, compute_mode
, t2
,
4279 build_int_cst (NULL_TREE
, post_shift
),
4282 (RSHIFT_EXPR
, compute_mode
, op0
,
4283 build_int_cst (NULL_TREE
, size
- 1),
4287 = force_operand (gen_rtx_MINUS (compute_mode
,
4292 = force_operand (gen_rtx_MINUS (compute_mode
,
4297 else /* Too wide mode to use tricky code */
4300 insn
= get_last_insn ();
4302 && (set
= single_set (insn
)) != 0
4303 && SET_DEST (set
) == quotient
)
4304 set_unique_reg_note (insn
,
4306 gen_rtx_DIV (compute_mode
, op0
, op1
));
4311 delete_insns_since (last
);
4314 case FLOOR_DIV_EXPR
:
4315 case FLOOR_MOD_EXPR
:
4316 /* We will come here only for signed operations. */
4317 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4319 unsigned HOST_WIDE_INT mh
;
4320 int pre_shift
, lgup
, post_shift
;
4321 HOST_WIDE_INT d
= INTVAL (op1
);
4326 /* We could just as easily deal with negative constants here,
4327 but it does not seem worth the trouble for GCC 2.6. */
4328 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4330 pre_shift
= floor_log2 (d
);
4333 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
4334 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4335 remainder
, 0, OPTAB_LIB_WIDEN
);
4337 return gen_lowpart (mode
, remainder
);
4339 quotient
= expand_shift
4340 (RSHIFT_EXPR
, compute_mode
, op0
,
4341 build_int_cst (NULL_TREE
, pre_shift
),
4348 mh
= choose_multiplier (d
, size
, size
- 1,
4349 &ml
, &post_shift
, &lgup
);
4352 if (post_shift
< BITS_PER_WORD
4353 && size
- 1 < BITS_PER_WORD
)
4356 (RSHIFT_EXPR
, compute_mode
, op0
,
4357 build_int_cst (NULL_TREE
, size
- 1),
4359 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4360 NULL_RTX
, 0, OPTAB_WIDEN
);
4361 extra_cost
= (shift_cost
[compute_mode
][post_shift
]
4362 + shift_cost
[compute_mode
][size
- 1]
4363 + 2 * add_cost
[compute_mode
]);
4364 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
4366 max_cost
- extra_cost
);
4370 (RSHIFT_EXPR
, compute_mode
, t3
,
4371 build_int_cst (NULL_TREE
, post_shift
),
4373 quotient
= expand_binop (compute_mode
, xor_optab
,
4374 t4
, t1
, tquotient
, 0,
4382 rtx nsign
, t1
, t2
, t3
, t4
;
4383 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4384 op0
, constm1_rtx
), NULL_RTX
);
4385 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4387 nsign
= expand_shift
4388 (RSHIFT_EXPR
, compute_mode
, t2
,
4389 build_int_cst (NULL_TREE
, size
- 1),
4391 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4393 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4398 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4400 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4409 delete_insns_since (last
);
4411 /* Try using an instruction that produces both the quotient and
4412 remainder, using truncation. We can easily compensate the quotient
4413 or remainder to get floor rounding, once we have the remainder.
4414 Notice that we compute also the final remainder value here,
4415 and return the result right away. */
4416 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4417 target
= gen_reg_rtx (compute_mode
);
4422 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4423 quotient
= gen_reg_rtx (compute_mode
);
4428 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4429 remainder
= gen_reg_rtx (compute_mode
);
4432 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4433 quotient
, remainder
, 0))
4435 /* This could be computed with a branch-less sequence.
4436 Save that for later. */
4438 rtx label
= gen_label_rtx ();
4439 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4440 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4441 NULL_RTX
, 0, OPTAB_WIDEN
);
4442 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4443 expand_dec (quotient
, const1_rtx
);
4444 expand_inc (remainder
, op1
);
4446 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4449 /* No luck with division elimination or divmod. Have to do it
4450 by conditionally adjusting op0 *and* the result. */
4452 rtx label1
, label2
, label3
, label4
, label5
;
4456 quotient
= gen_reg_rtx (compute_mode
);
4457 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4458 label1
= gen_label_rtx ();
4459 label2
= gen_label_rtx ();
4460 label3
= gen_label_rtx ();
4461 label4
= gen_label_rtx ();
4462 label5
= gen_label_rtx ();
4463 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4464 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4465 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4466 quotient
, 0, OPTAB_LIB_WIDEN
);
4467 if (tem
!= quotient
)
4468 emit_move_insn (quotient
, tem
);
4469 emit_jump_insn (gen_jump (label5
));
4471 emit_label (label1
);
4472 expand_inc (adjusted_op0
, const1_rtx
);
4473 emit_jump_insn (gen_jump (label4
));
4475 emit_label (label2
);
4476 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4477 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4478 quotient
, 0, OPTAB_LIB_WIDEN
);
4479 if (tem
!= quotient
)
4480 emit_move_insn (quotient
, tem
);
4481 emit_jump_insn (gen_jump (label5
));
4483 emit_label (label3
);
4484 expand_dec (adjusted_op0
, const1_rtx
);
4485 emit_label (label4
);
4486 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4487 quotient
, 0, OPTAB_LIB_WIDEN
);
4488 if (tem
!= quotient
)
4489 emit_move_insn (quotient
, tem
);
4490 expand_dec (quotient
, const1_rtx
);
4491 emit_label (label5
);
4499 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4502 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4503 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4504 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4506 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4508 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4509 t3
= gen_reg_rtx (compute_mode
);
4510 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4511 compute_mode
, 1, 1);
4515 lab
= gen_label_rtx ();
4516 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4517 expand_inc (t1
, const1_rtx
);
4522 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4528 /* Try using an instruction that produces both the quotient and
4529 remainder, using truncation. We can easily compensate the
4530 quotient or remainder to get ceiling rounding, once we have the
4531 remainder. Notice that we compute also the final remainder
4532 value here, and return the result right away. */
4533 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4534 target
= gen_reg_rtx (compute_mode
);
4538 remainder
= (REG_P (target
)
4539 ? target
: gen_reg_rtx (compute_mode
));
4540 quotient
= gen_reg_rtx (compute_mode
);
4544 quotient
= (REG_P (target
)
4545 ? target
: gen_reg_rtx (compute_mode
));
4546 remainder
= gen_reg_rtx (compute_mode
);
4549 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4552 /* This could be computed with a branch-less sequence.
4553 Save that for later. */
4554 rtx label
= gen_label_rtx ();
4555 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4556 compute_mode
, label
);
4557 expand_inc (quotient
, const1_rtx
);
4558 expand_dec (remainder
, op1
);
4560 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4563 /* No luck with division elimination or divmod. Have to do it
4564 by conditionally adjusting op0 *and* the result. */
4567 rtx adjusted_op0
, tem
;
4569 quotient
= gen_reg_rtx (compute_mode
);
4570 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4571 label1
= gen_label_rtx ();
4572 label2
= gen_label_rtx ();
4573 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4574 compute_mode
, label1
);
4575 emit_move_insn (quotient
, const0_rtx
);
4576 emit_jump_insn (gen_jump (label2
));
4578 emit_label (label1
);
4579 expand_dec (adjusted_op0
, const1_rtx
);
4580 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4581 quotient
, 1, OPTAB_LIB_WIDEN
);
4582 if (tem
!= quotient
)
4583 emit_move_insn (quotient
, tem
);
4584 expand_inc (quotient
, const1_rtx
);
4585 emit_label (label2
);
4590 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4591 && INTVAL (op1
) >= 0)
4593 /* This is extremely similar to the code for the unsigned case
4594 above. For 2.7 we should merge these variants, but for
4595 2.6.1 I don't want to touch the code for unsigned since that
4596 get used in C. The signed case will only be used by other
4600 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4601 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4602 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4604 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4606 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4607 t3
= gen_reg_rtx (compute_mode
);
4608 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4609 compute_mode
, 1, 1);
4613 lab
= gen_label_rtx ();
4614 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4615 expand_inc (t1
, const1_rtx
);
4620 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4626 /* Try using an instruction that produces both the quotient and
4627 remainder, using truncation. We can easily compensate the
4628 quotient or remainder to get ceiling rounding, once we have the
4629 remainder. Notice that we compute also the final remainder
4630 value here, and return the result right away. */
4631 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4632 target
= gen_reg_rtx (compute_mode
);
4635 remainder
= (REG_P (target
)
4636 ? target
: gen_reg_rtx (compute_mode
));
4637 quotient
= gen_reg_rtx (compute_mode
);
4641 quotient
= (REG_P (target
)
4642 ? target
: gen_reg_rtx (compute_mode
));
4643 remainder
= gen_reg_rtx (compute_mode
);
4646 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4649 /* This could be computed with a branch-less sequence.
4650 Save that for later. */
4652 rtx label
= gen_label_rtx ();
4653 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4654 compute_mode
, label
);
4655 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4656 NULL_RTX
, 0, OPTAB_WIDEN
);
4657 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4658 expand_inc (quotient
, const1_rtx
);
4659 expand_dec (remainder
, op1
);
4661 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4664 /* No luck with division elimination or divmod. Have to do it
4665 by conditionally adjusting op0 *and* the result. */
4667 rtx label1
, label2
, label3
, label4
, label5
;
4671 quotient
= gen_reg_rtx (compute_mode
);
4672 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4673 label1
= gen_label_rtx ();
4674 label2
= gen_label_rtx ();
4675 label3
= gen_label_rtx ();
4676 label4
= gen_label_rtx ();
4677 label5
= gen_label_rtx ();
4678 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4679 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4680 compute_mode
, label1
);
4681 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4682 quotient
, 0, OPTAB_LIB_WIDEN
);
4683 if (tem
!= quotient
)
4684 emit_move_insn (quotient
, tem
);
4685 emit_jump_insn (gen_jump (label5
));
4687 emit_label (label1
);
4688 expand_dec (adjusted_op0
, const1_rtx
);
4689 emit_jump_insn (gen_jump (label4
));
4691 emit_label (label2
);
4692 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4693 compute_mode
, label3
);
4694 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4695 quotient
, 0, OPTAB_LIB_WIDEN
);
4696 if (tem
!= quotient
)
4697 emit_move_insn (quotient
, tem
);
4698 emit_jump_insn (gen_jump (label5
));
4700 emit_label (label3
);
4701 expand_inc (adjusted_op0
, const1_rtx
);
4702 emit_label (label4
);
4703 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4704 quotient
, 0, OPTAB_LIB_WIDEN
);
4705 if (tem
!= quotient
)
4706 emit_move_insn (quotient
, tem
);
4707 expand_inc (quotient
, const1_rtx
);
4708 emit_label (label5
);
4713 case EXACT_DIV_EXPR
:
4714 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4716 HOST_WIDE_INT d
= INTVAL (op1
);
4717 unsigned HOST_WIDE_INT ml
;
4721 pre_shift
= floor_log2 (d
& -d
);
4722 ml
= invert_mod2n (d
>> pre_shift
, size
);
4723 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4724 build_int_cst (NULL_TREE
, pre_shift
),
4725 NULL_RTX
, unsignedp
);
4726 quotient
= expand_mult (compute_mode
, t1
,
4727 gen_int_mode (ml
, compute_mode
),
4730 insn
= get_last_insn ();
4731 set_unique_reg_note (insn
,
4733 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4739 case ROUND_DIV_EXPR
:
4740 case ROUND_MOD_EXPR
:
4745 label
= gen_label_rtx ();
4746 quotient
= gen_reg_rtx (compute_mode
);
4747 remainder
= gen_reg_rtx (compute_mode
);
4748 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4751 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4752 quotient
, 1, OPTAB_LIB_WIDEN
);
4753 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4754 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4755 remainder
, 1, OPTAB_LIB_WIDEN
);
4757 tem
= plus_constant (op1
, -1);
4758 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4759 build_int_cst (NULL_TREE
, 1),
4761 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4762 expand_inc (quotient
, const1_rtx
);
4763 expand_dec (remainder
, op1
);
4768 rtx abs_rem
, abs_op1
, tem
, mask
;
4770 label
= gen_label_rtx ();
4771 quotient
= gen_reg_rtx (compute_mode
);
4772 remainder
= gen_reg_rtx (compute_mode
);
4773 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4776 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4777 quotient
, 0, OPTAB_LIB_WIDEN
);
4778 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4779 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4780 remainder
, 0, OPTAB_LIB_WIDEN
);
4782 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4783 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4784 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4785 build_int_cst (NULL_TREE
, 1),
4787 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4788 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4789 NULL_RTX
, 0, OPTAB_WIDEN
);
4790 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4791 build_int_cst (NULL_TREE
, size
- 1),
4793 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4794 NULL_RTX
, 0, OPTAB_WIDEN
);
4795 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4796 NULL_RTX
, 0, OPTAB_WIDEN
);
4797 expand_inc (quotient
, tem
);
4798 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4799 NULL_RTX
, 0, OPTAB_WIDEN
);
4800 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4801 NULL_RTX
, 0, OPTAB_WIDEN
);
4802 expand_dec (remainder
, tem
);
4805 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4813 if (target
&& GET_MODE (target
) != compute_mode
)
4818 /* Try to produce the remainder without producing the quotient.
4819 If we seem to have a divmod pattern that does not require widening,
4820 don't try widening here. We should really have a WIDEN argument
4821 to expand_twoval_binop, since what we'd really like to do here is
4822 1) try a mod insn in compute_mode
4823 2) try a divmod insn in compute_mode
4824 3) try a div insn in compute_mode and multiply-subtract to get
4826 4) try the same things with widening allowed. */
4828 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4831 ((optab2
->handlers
[compute_mode
].insn_code
4832 != CODE_FOR_nothing
)
4833 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4836 /* No luck there. Can we do remainder and divide at once
4837 without a library call? */
4838 remainder
= gen_reg_rtx (compute_mode
);
4839 if (! expand_twoval_binop ((unsignedp
4843 NULL_RTX
, remainder
, unsignedp
))
4848 return gen_lowpart (mode
, remainder
);
4851 /* Produce the quotient. Try a quotient insn, but not a library call.
4852 If we have a divmod in this mode, use it in preference to widening
4853 the div (for this test we assume it will not fail). Note that optab2
4854 is set to the one of the two optabs that the call below will use. */
4856 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4857 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4859 ((optab2
->handlers
[compute_mode
].insn_code
4860 != CODE_FOR_nothing
)
4861 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4865 /* No luck there. Try a quotient-and-remainder insn,
4866 keeping the quotient alone. */
4867 quotient
= gen_reg_rtx (compute_mode
);
4868 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4870 quotient
, NULL_RTX
, unsignedp
))
4874 /* Still no luck. If we are not computing the remainder,
4875 use a library call for the quotient. */
4876 quotient
= sign_expand_binop (compute_mode
,
4877 udiv_optab
, sdiv_optab
,
4879 unsignedp
, OPTAB_LIB_WIDEN
);
4886 if (target
&& GET_MODE (target
) != compute_mode
)
4891 /* No divide instruction either. Use library for remainder. */
4892 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4894 unsignedp
, OPTAB_LIB_WIDEN
);
4895 /* No remainder function. Try a quotient-and-remainder
4896 function, keeping the remainder. */
4899 remainder
= gen_reg_rtx (compute_mode
);
4900 if (!expand_twoval_binop_libfunc
4901 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4903 NULL_RTX
, remainder
,
4904 unsignedp
? UMOD
: MOD
))
4905 remainder
= NULL_RTX
;
4910 /* We divided. Now finish doing X - Y * (X / Y). */
4911 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4912 NULL_RTX
, unsignedp
);
4913 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4914 remainder
, target
, unsignedp
,
4919 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4922 /* Return a tree node with data type TYPE, describing the value of X.
4923 Usually this is an VAR_DECL, if there is no obvious better choice.
4924 X may be an expression, however we only support those expressions
4925 generated by loop.c. */
4928 make_tree (tree type
, rtx x
)
4932 switch (GET_CODE (x
))
4936 HOST_WIDE_INT hi
= 0;
4939 && !(TYPE_UNSIGNED (type
)
4940 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
4941 < HOST_BITS_PER_WIDE_INT
)))
4944 t
= build_int_cst_wide (type
, INTVAL (x
), hi
);
4950 if (GET_MODE (x
) == VOIDmode
)
4951 t
= build_int_cst_wide (type
,
4952 CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
4957 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4958 t
= build_real (type
, d
);
4969 units
= CONST_VECTOR_NUNITS (x
);
4971 /* Build a tree with vector elements. */
4972 for (i
= units
- 1; i
>= 0; --i
)
4974 elt
= CONST_VECTOR_ELT (x
, i
);
4975 t
= tree_cons (NULL_TREE
, make_tree (type
, elt
), t
);
4978 return build_vector (type
, t
);
4982 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4983 make_tree (type
, XEXP (x
, 1)));
4986 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4987 make_tree (type
, XEXP (x
, 1)));
4990 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
4993 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4994 make_tree (type
, XEXP (x
, 1)));
4997 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4998 make_tree (type
, XEXP (x
, 1)));
5001 t
= lang_hooks
.types
.unsigned_type (type
);
5002 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5003 make_tree (t
, XEXP (x
, 0)),
5004 make_tree (type
, XEXP (x
, 1))));
5007 t
= lang_hooks
.types
.signed_type (type
);
5008 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5009 make_tree (t
, XEXP (x
, 0)),
5010 make_tree (type
, XEXP (x
, 1))));
5013 if (TREE_CODE (type
) != REAL_TYPE
)
5014 t
= lang_hooks
.types
.signed_type (type
);
5018 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5019 make_tree (t
, XEXP (x
, 0)),
5020 make_tree (t
, XEXP (x
, 1))));
5022 t
= lang_hooks
.types
.unsigned_type (type
);
5023 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5024 make_tree (t
, XEXP (x
, 0)),
5025 make_tree (t
, XEXP (x
, 1))));
5029 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5030 GET_CODE (x
) == ZERO_EXTEND
);
5031 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5034 t
= build_decl (VAR_DECL
, NULL_TREE
, type
);
5036 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5037 ptr_mode. So convert. */
5038 if (POINTER_TYPE_P (type
))
5039 x
= convert_memory_address (TYPE_MODE (type
), x
);
5041 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5042 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5043 t
->decl_with_rtl
.rtl
= x
;
5049 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5050 and returning TARGET.
5052 If TARGET is 0, a pseudo-register or constant is returned. */
5055 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5059 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5060 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5062 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5066 else if (tem
!= target
)
5067 emit_move_insn (target
, tem
);
5071 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5072 and storing in TARGET. Normally return TARGET.
5073 Return 0 if that cannot be done.
5075 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5076 it is VOIDmode, they cannot both be CONST_INT.
5078 UNSIGNEDP is for the case where we have to widen the operands
5079 to perform the operation. It says to use zero-extension.
5081 NORMALIZEP is 1 if we should convert the result to be either zero
5082 or one. Normalize is -1 if we should convert the result to be
5083 either zero or -1. If NORMALIZEP is zero, the result will be left
5084 "raw" out of the scc insn. */
5087 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5088 enum machine_mode mode
, int unsignedp
, int normalizep
)
5091 enum insn_code icode
;
5092 enum machine_mode compare_mode
;
5093 enum machine_mode target_mode
= GET_MODE (target
);
5095 rtx last
= get_last_insn ();
5096 rtx pattern
, comparison
;
5099 code
= unsigned_condition (code
);
5101 /* If one operand is constant, make it the second one. Only do this
5102 if the other operand is not constant as well. */
5104 if (swap_commutative_operands_p (op0
, op1
))
5109 code
= swap_condition (code
);
5112 if (mode
== VOIDmode
)
5113 mode
= GET_MODE (op0
);
5115 /* For some comparisons with 1 and -1, we can convert this to
5116 comparisons with zero. This will often produce more opportunities for
5117 store-flag insns. */
5122 if (op1
== const1_rtx
)
5123 op1
= const0_rtx
, code
= LE
;
5126 if (op1
== constm1_rtx
)
5127 op1
= const0_rtx
, code
= LT
;
5130 if (op1
== const1_rtx
)
5131 op1
= const0_rtx
, code
= GT
;
5134 if (op1
== constm1_rtx
)
5135 op1
= const0_rtx
, code
= GE
;
5138 if (op1
== const1_rtx
)
5139 op1
= const0_rtx
, code
= NE
;
5142 if (op1
== const1_rtx
)
5143 op1
= const0_rtx
, code
= EQ
;
5149 /* If we are comparing a double-word integer with zero or -1, we can
5150 convert the comparison into one involving a single word. */
5151 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5152 && GET_MODE_CLASS (mode
) == MODE_INT
5153 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5155 if ((code
== EQ
|| code
== NE
)
5156 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5158 rtx op00
, op01
, op0both
;
5160 /* Do a logical OR or AND of the two words and compare the result. */
5161 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5162 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5163 op0both
= expand_binop (word_mode
,
5164 op1
== const0_rtx
? ior_optab
: and_optab
,
5165 op00
, op01
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
5168 return emit_store_flag (target
, code
, op0both
, op1
, word_mode
,
5169 unsignedp
, normalizep
);
5171 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5175 /* If testing the sign bit, can just test on high word. */
5176 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5177 subreg_highpart_offset (word_mode
, mode
));
5178 return emit_store_flag (target
, code
, op0h
, op1
, word_mode
,
5179 unsignedp
, normalizep
);
5183 /* From now on, we won't change CODE, so set ICODE now. */
5184 icode
= setcc_gen_code
[(int) code
];
5186 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5187 complement of A (for GE) and shifting the sign bit to the low bit. */
5188 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5189 && GET_MODE_CLASS (mode
) == MODE_INT
5190 && (normalizep
|| STORE_FLAG_VALUE
== 1
5191 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5192 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5193 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))))
5197 /* If the result is to be wider than OP0, it is best to convert it
5198 first. If it is to be narrower, it is *incorrect* to convert it
5200 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5202 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5206 if (target_mode
!= mode
)
5210 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5211 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5212 ? 0 : subtarget
), 0);
5214 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5215 /* If we are supposed to produce a 0/1 value, we want to do
5216 a logical shift from the sign bit to the low-order bit; for
5217 a -1/0 value, we do an arithmetic shift. */
5218 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5219 size_int (GET_MODE_BITSIZE (mode
) - 1),
5220 subtarget
, normalizep
!= -1);
5222 if (mode
!= target_mode
)
5223 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5228 if (icode
!= CODE_FOR_nothing
)
5230 insn_operand_predicate_fn pred
;
5232 /* We think we may be able to do this with a scc insn. Emit the
5233 comparison and then the scc insn. */
5235 do_pending_stack_adjust ();
5236 last
= get_last_insn ();
5239 = compare_from_rtx (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
);
5240 if (CONSTANT_P (comparison
))
5242 switch (GET_CODE (comparison
))
5245 if (comparison
== const0_rtx
)
5249 #ifdef FLOAT_STORE_FLAG_VALUE
5251 if (comparison
== CONST0_RTX (GET_MODE (comparison
)))
5259 if (normalizep
== 1)
5261 if (normalizep
== -1)
5263 return const_true_rtx
;
5266 /* The code of COMPARISON may not match CODE if compare_from_rtx
5267 decided to swap its operands and reverse the original code.
5269 We know that compare_from_rtx returns either a CONST_INT or
5270 a new comparison code, so it is safe to just extract the
5271 code from COMPARISON. */
5272 code
= GET_CODE (comparison
);
5274 /* Get a reference to the target in the proper mode for this insn. */
5275 compare_mode
= insn_data
[(int) icode
].operand
[0].mode
;
5277 pred
= insn_data
[(int) icode
].operand
[0].predicate
;
5278 if (optimize
|| ! (*pred
) (subtarget
, compare_mode
))
5279 subtarget
= gen_reg_rtx (compare_mode
);
5281 pattern
= GEN_FCN (icode
) (subtarget
);
5284 emit_insn (pattern
);
5286 /* If we are converting to a wider mode, first convert to
5287 TARGET_MODE, then normalize. This produces better combining
5288 opportunities on machines that have a SIGN_EXTRACT when we are
5289 testing a single bit. This mostly benefits the 68k.
5291 If STORE_FLAG_VALUE does not have the sign bit set when
5292 interpreted in COMPARE_MODE, we can do this conversion as
5293 unsigned, which is usually more efficient. */
5294 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (compare_mode
))
5296 convert_move (target
, subtarget
,
5297 (GET_MODE_BITSIZE (compare_mode
)
5298 <= HOST_BITS_PER_WIDE_INT
)
5299 && 0 == (STORE_FLAG_VALUE
5300 & ((HOST_WIDE_INT
) 1
5301 << (GET_MODE_BITSIZE (compare_mode
) -1))));
5303 compare_mode
= target_mode
;
5308 /* If we want to keep subexpressions around, don't reuse our
5314 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
5315 we don't have to do anything. */
5316 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5318 /* STORE_FLAG_VALUE might be the most negative number, so write
5319 the comparison this way to avoid a compiler-time warning. */
5320 else if (- normalizep
== STORE_FLAG_VALUE
)
5321 op0
= expand_unop (compare_mode
, neg_optab
, op0
, subtarget
, 0);
5323 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
5324 makes it hard to use a value of just the sign bit due to
5325 ANSI integer constant typing rules. */
5326 else if (GET_MODE_BITSIZE (compare_mode
) <= HOST_BITS_PER_WIDE_INT
5327 && (STORE_FLAG_VALUE
5328 & ((HOST_WIDE_INT
) 1
5329 << (GET_MODE_BITSIZE (compare_mode
) - 1))))
5330 op0
= expand_shift (RSHIFT_EXPR
, compare_mode
, op0
,
5331 size_int (GET_MODE_BITSIZE (compare_mode
) - 1),
5332 subtarget
, normalizep
== 1);
5335 gcc_assert (STORE_FLAG_VALUE
& 1);
5337 op0
= expand_and (compare_mode
, op0
, const1_rtx
, subtarget
);
5338 if (normalizep
== -1)
5339 op0
= expand_unop (compare_mode
, neg_optab
, op0
, op0
, 0);
5342 /* If we were converting to a smaller mode, do the
5344 if (target_mode
!= compare_mode
)
5346 convert_move (target
, op0
, 0);
5354 delete_insns_since (last
);
5356 /* If optimizing, use different pseudo registers for each insn, instead
5357 of reusing the same pseudo. This leads to better CSE, but slows
5358 down the compiler, since there are more pseudos */
5359 subtarget
= (!optimize
5360 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5362 /* If we reached here, we can't do this with a scc insn. However, there
5363 are some comparisons that can be done directly. For example, if
5364 this is an equality comparison of integers, we can try to exclusive-or
5365 (or subtract) the two operands and use a recursive call to try the
5366 comparison with zero. Don't do any of these cases if branches are
5370 && GET_MODE_CLASS (mode
) == MODE_INT
&& (code
== EQ
|| code
== NE
)
5371 && op1
!= const0_rtx
)
5373 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5377 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5380 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5381 mode
, unsignedp
, normalizep
);
5383 delete_insns_since (last
);
5387 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5388 the constant zero. Reject all other comparisons at this point. Only
5389 do LE and GT if branches are expensive since they are expensive on
5390 2-operand machines. */
5392 if (BRANCH_COST
== 0
5393 || GET_MODE_CLASS (mode
) != MODE_INT
|| op1
!= const0_rtx
5394 || (code
!= EQ
&& code
!= NE
5395 && (BRANCH_COST
<= 1 || (code
!= LE
&& code
!= GT
))))
5398 /* See what we need to return. We can only return a 1, -1, or the
5401 if (normalizep
== 0)
5403 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5404 normalizep
= STORE_FLAG_VALUE
;
5406 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5407 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5408 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
5414 /* Try to put the result of the comparison in the sign bit. Assume we can't
5415 do the necessary operation below. */
5419 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5420 the sign bit set. */
5424 /* This is destructive, so SUBTARGET can't be OP0. */
5425 if (rtx_equal_p (subtarget
, op0
))
5428 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5431 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5435 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5436 number of bits in the mode of OP0, minus one. */
5440 if (rtx_equal_p (subtarget
, op0
))
5443 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5444 size_int (GET_MODE_BITSIZE (mode
) - 1),
5446 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5450 if (code
== EQ
|| code
== NE
)
5452 /* For EQ or NE, one way to do the comparison is to apply an operation
5453 that converts the operand into a positive number if it is nonzero
5454 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5455 for NE we negate. This puts the result in the sign bit. Then we
5456 normalize with a shift, if needed.
5458 Two operations that can do the above actions are ABS and FFS, so try
5459 them. If that doesn't work, and MODE is smaller than a full word,
5460 we can use zero-extension to the wider mode (an unsigned conversion)
5461 as the operation. */
5463 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5464 that is compensated by the subsequent overflow when subtracting
5467 if (abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)
5468 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5469 else if (ffs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)
5470 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5471 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5473 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5480 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5483 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5486 /* If we couldn't do it that way, for NE we can "or" the two's complement
5487 of the value with itself. For EQ, we take the one's complement of
5488 that "or", which is an extra insn, so we only handle EQ if branches
5491 if (tem
== 0 && (code
== NE
|| BRANCH_COST
> 1))
5493 if (rtx_equal_p (subtarget
, op0
))
5496 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5497 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5500 if (tem
&& code
== EQ
)
5501 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5505 if (tem
&& normalizep
)
5506 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5507 size_int (GET_MODE_BITSIZE (mode
) - 1),
5508 subtarget
, normalizep
== 1);
5512 if (GET_MODE (tem
) != target_mode
)
5514 convert_move (target
, tem
, 0);
5517 else if (!subtarget
)
5519 emit_move_insn (target
, tem
);
5524 delete_insns_since (last
);
5529 /* Like emit_store_flag, but always succeeds. */
5532 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5533 enum machine_mode mode
, int unsignedp
, int normalizep
)
5537 /* First see if emit_store_flag can do the job. */
5538 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5542 if (normalizep
== 0)
5545 /* If this failed, we have to do this with set/compare/jump/set code. */
5548 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5549 target
= gen_reg_rtx (GET_MODE (target
));
5551 emit_move_insn (target
, const1_rtx
);
5552 label
= gen_label_rtx ();
5553 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5556 emit_move_insn (target
, const0_rtx
);
5562 /* Perform possibly multi-word comparison and conditional jump to LABEL
5563 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5564 now a thin wrapper around do_compare_rtx_and_jump. */
5567 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5570 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5571 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5572 NULL_RTX
, NULL_RTX
, label
);