re PR target/27861 (ICE in expand_expr_real_1, at expr.c:6916)
[gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
5 Free Software Foundation, Inc.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "real.h"
38 #include "recog.h"
39 #include "langhooks.h"
40
41 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT, rtx);
44 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT, rtx);
46 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT, rtx, int);
50 static rtx mask_rtx (enum machine_mode, int, int, int);
51 static rtx lshift_value (enum machine_mode, rtx, int, int);
52 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int);
54 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
56 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
57
58 /* Test whether a value is zero of a power of two. */
59 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
60
61 /* Nonzero means divides or modulus operations are relatively cheap for
62 powers of two, so don't use branches; emit the operation instead.
63 Usually, this will mean that the MD file will emit non-branch
64 sequences. */
65
66 static bool sdiv_pow2_cheap[NUM_MACHINE_MODES];
67 static bool smod_pow2_cheap[NUM_MACHINE_MODES];
68
69 #ifndef SLOW_UNALIGNED_ACCESS
70 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
71 #endif
72
73 /* For compilers that support multiple targets with different word sizes,
74 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
75 is the H8/300(H) compiler. */
76
77 #ifndef MAX_BITS_PER_WORD
78 #define MAX_BITS_PER_WORD BITS_PER_WORD
79 #endif
80
81 /* Reduce conditional compilation elsewhere. */
82 #ifndef HAVE_insv
83 #define HAVE_insv 0
84 #define CODE_FOR_insv CODE_FOR_nothing
85 #define gen_insv(a,b,c,d) NULL_RTX
86 #endif
87 #ifndef HAVE_extv
88 #define HAVE_extv 0
89 #define CODE_FOR_extv CODE_FOR_nothing
90 #define gen_extv(a,b,c,d) NULL_RTX
91 #endif
92 #ifndef HAVE_extzv
93 #define HAVE_extzv 0
94 #define CODE_FOR_extzv CODE_FOR_nothing
95 #define gen_extzv(a,b,c,d) NULL_RTX
96 #endif
97
98 /* Cost of various pieces of RTL. Note that some of these are indexed by
99 shift count and some by mode. */
100 static int zero_cost;
101 static int add_cost[NUM_MACHINE_MODES];
102 static int neg_cost[NUM_MACHINE_MODES];
103 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
104 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int mul_cost[NUM_MACHINE_MODES];
107 static int sdiv_cost[NUM_MACHINE_MODES];
108 static int udiv_cost[NUM_MACHINE_MODES];
109 static int mul_widen_cost[NUM_MACHINE_MODES];
110 static int mul_highpart_cost[NUM_MACHINE_MODES];
111
112 void
113 init_expmed (void)
114 {
115 struct
116 {
117 struct rtx_def reg; rtunion reg_fld[2];
118 struct rtx_def plus; rtunion plus_fld1;
119 struct rtx_def neg;
120 struct rtx_def mult; rtunion mult_fld1;
121 struct rtx_def sdiv; rtunion sdiv_fld1;
122 struct rtx_def udiv; rtunion udiv_fld1;
123 struct rtx_def zext;
124 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
125 struct rtx_def smod_32; rtunion smod_32_fld1;
126 struct rtx_def wide_mult; rtunion wide_mult_fld1;
127 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
128 struct rtx_def wide_trunc;
129 struct rtx_def shift; rtunion shift_fld1;
130 struct rtx_def shift_mult; rtunion shift_mult_fld1;
131 struct rtx_def shift_add; rtunion shift_add_fld1;
132 struct rtx_def shift_sub; rtunion shift_sub_fld1;
133 } all;
134
135 rtx pow2[MAX_BITS_PER_WORD];
136 rtx cint[MAX_BITS_PER_WORD];
137 int m, n;
138 enum machine_mode mode, wider_mode;
139
140 zero_cost = rtx_cost (const0_rtx, 0);
141
142 for (m = 1; m < MAX_BITS_PER_WORD; m++)
143 {
144 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
145 cint[m] = GEN_INT (m);
146 }
147
148 memset (&all, 0, sizeof all);
149
150 PUT_CODE (&all.reg, REG);
151 /* Avoid using hard regs in ways which may be unsupported. */
152 REGNO (&all.reg) = LAST_VIRTUAL_REGISTER + 1;
153
154 PUT_CODE (&all.plus, PLUS);
155 XEXP (&all.plus, 0) = &all.reg;
156 XEXP (&all.plus, 1) = &all.reg;
157
158 PUT_CODE (&all.neg, NEG);
159 XEXP (&all.neg, 0) = &all.reg;
160
161 PUT_CODE (&all.mult, MULT);
162 XEXP (&all.mult, 0) = &all.reg;
163 XEXP (&all.mult, 1) = &all.reg;
164
165 PUT_CODE (&all.sdiv, DIV);
166 XEXP (&all.sdiv, 0) = &all.reg;
167 XEXP (&all.sdiv, 1) = &all.reg;
168
169 PUT_CODE (&all.udiv, UDIV);
170 XEXP (&all.udiv, 0) = &all.reg;
171 XEXP (&all.udiv, 1) = &all.reg;
172
173 PUT_CODE (&all.sdiv_32, DIV);
174 XEXP (&all.sdiv_32, 0) = &all.reg;
175 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
176
177 PUT_CODE (&all.smod_32, MOD);
178 XEXP (&all.smod_32, 0) = &all.reg;
179 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
180
181 PUT_CODE (&all.zext, ZERO_EXTEND);
182 XEXP (&all.zext, 0) = &all.reg;
183
184 PUT_CODE (&all.wide_mult, MULT);
185 XEXP (&all.wide_mult, 0) = &all.zext;
186 XEXP (&all.wide_mult, 1) = &all.zext;
187
188 PUT_CODE (&all.wide_lshr, LSHIFTRT);
189 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
190
191 PUT_CODE (&all.wide_trunc, TRUNCATE);
192 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
193
194 PUT_CODE (&all.shift, ASHIFT);
195 XEXP (&all.shift, 0) = &all.reg;
196
197 PUT_CODE (&all.shift_mult, MULT);
198 XEXP (&all.shift_mult, 0) = &all.reg;
199
200 PUT_CODE (&all.shift_add, PLUS);
201 XEXP (&all.shift_add, 0) = &all.shift_mult;
202 XEXP (&all.shift_add, 1) = &all.reg;
203
204 PUT_CODE (&all.shift_sub, MINUS);
205 XEXP (&all.shift_sub, 0) = &all.shift_mult;
206 XEXP (&all.shift_sub, 1) = &all.reg;
207
208 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
209 mode != VOIDmode;
210 mode = GET_MODE_WIDER_MODE (mode))
211 {
212 PUT_MODE (&all.reg, mode);
213 PUT_MODE (&all.plus, mode);
214 PUT_MODE (&all.neg, mode);
215 PUT_MODE (&all.mult, mode);
216 PUT_MODE (&all.sdiv, mode);
217 PUT_MODE (&all.udiv, mode);
218 PUT_MODE (&all.sdiv_32, mode);
219 PUT_MODE (&all.smod_32, mode);
220 PUT_MODE (&all.wide_trunc, mode);
221 PUT_MODE (&all.shift, mode);
222 PUT_MODE (&all.shift_mult, mode);
223 PUT_MODE (&all.shift_add, mode);
224 PUT_MODE (&all.shift_sub, mode);
225
226 add_cost[mode] = rtx_cost (&all.plus, SET);
227 neg_cost[mode] = rtx_cost (&all.neg, SET);
228 mul_cost[mode] = rtx_cost (&all.mult, SET);
229 sdiv_cost[mode] = rtx_cost (&all.sdiv, SET);
230 udiv_cost[mode] = rtx_cost (&all.udiv, SET);
231
232 sdiv_pow2_cheap[mode] = (rtx_cost (&all.sdiv_32, SET)
233 <= 2 * add_cost[mode]);
234 smod_pow2_cheap[mode] = (rtx_cost (&all.smod_32, SET)
235 <= 4 * add_cost[mode]);
236
237 wider_mode = GET_MODE_WIDER_MODE (mode);
238 if (wider_mode != VOIDmode)
239 {
240 PUT_MODE (&all.zext, wider_mode);
241 PUT_MODE (&all.wide_mult, wider_mode);
242 PUT_MODE (&all.wide_lshr, wider_mode);
243 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
244
245 mul_widen_cost[wider_mode] = rtx_cost (&all.wide_mult, SET);
246 mul_highpart_cost[mode] = rtx_cost (&all.wide_trunc, SET);
247 }
248
249 shift_cost[mode][0] = 0;
250 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
251
252 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
253 for (m = 1; m < n; m++)
254 {
255 XEXP (&all.shift, 1) = cint[m];
256 XEXP (&all.shift_mult, 1) = pow2[m];
257
258 shift_cost[mode][m] = rtx_cost (&all.shift, SET);
259 shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET);
260 shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET);
261 }
262 }
263 }
264
265 /* Return an rtx representing minus the value of X.
266 MODE is the intended mode of the result,
267 useful if X is a CONST_INT. */
268
269 rtx
270 negate_rtx (enum machine_mode mode, rtx x)
271 {
272 rtx result = simplify_unary_operation (NEG, mode, x, mode);
273
274 if (result == 0)
275 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
276
277 return result;
278 }
279
280 /* Report on the availability of insv/extv/extzv and the desired mode
281 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
282 is false; else the mode of the specified operand. If OPNO is -1,
283 all the caller cares about is whether the insn is available. */
284 enum machine_mode
285 mode_for_extraction (enum extraction_pattern pattern, int opno)
286 {
287 const struct insn_data *data;
288
289 switch (pattern)
290 {
291 case EP_insv:
292 if (HAVE_insv)
293 {
294 data = &insn_data[CODE_FOR_insv];
295 break;
296 }
297 return MAX_MACHINE_MODE;
298
299 case EP_extv:
300 if (HAVE_extv)
301 {
302 data = &insn_data[CODE_FOR_extv];
303 break;
304 }
305 return MAX_MACHINE_MODE;
306
307 case EP_extzv:
308 if (HAVE_extzv)
309 {
310 data = &insn_data[CODE_FOR_extzv];
311 break;
312 }
313 return MAX_MACHINE_MODE;
314
315 default:
316 gcc_unreachable ();
317 }
318
319 if (opno == -1)
320 return VOIDmode;
321
322 /* Everyone who uses this function used to follow it with
323 if (result == VOIDmode) result = word_mode; */
324 if (data->operand[opno].mode == VOIDmode)
325 return word_mode;
326 return data->operand[opno].mode;
327 }
328
329 \f
330 /* Generate code to store value from rtx VALUE
331 into a bit-field within structure STR_RTX
332 containing BITSIZE bits starting at bit BITNUM.
333 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
334 ALIGN is the alignment that STR_RTX is known to have.
335 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
336
337 /* ??? Note that there are two different ideas here for how
338 to determine the size to count bits within, for a register.
339 One is BITS_PER_WORD, and the other is the size of operand 3
340 of the insv pattern.
341
342 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
343 else, we use the mode of operand 3. */
344
345 rtx
346 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
347 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
348 rtx value)
349 {
350 unsigned int unit
351 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
352 unsigned HOST_WIDE_INT offset, bitpos;
353 rtx op0 = str_rtx;
354 int byte_offset;
355 rtx orig_value;
356
357 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
358
359 while (GET_CODE (op0) == SUBREG)
360 {
361 /* The following line once was done only if WORDS_BIG_ENDIAN,
362 but I think that is a mistake. WORDS_BIG_ENDIAN is
363 meaningful at a much higher level; when structures are copied
364 between memory and regs, the higher-numbered regs
365 always get higher addresses. */
366 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
367 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
368
369 byte_offset = 0;
370
371 /* Paradoxical subregs need special handling on big endian machines. */
372 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
373 {
374 int difference = inner_mode_size - outer_mode_size;
375
376 if (WORDS_BIG_ENDIAN)
377 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
378 if (BYTES_BIG_ENDIAN)
379 byte_offset += difference % UNITS_PER_WORD;
380 }
381 else
382 byte_offset = SUBREG_BYTE (op0);
383
384 bitnum += byte_offset * BITS_PER_UNIT;
385 op0 = SUBREG_REG (op0);
386 }
387
388 /* No action is needed if the target is a register and if the field
389 lies completely outside that register. This can occur if the source
390 code contains an out-of-bounds access to a small array. */
391 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
392 return value;
393
394 /* Use vec_set patterns for inserting parts of vectors whenever
395 available. */
396 if (VECTOR_MODE_P (GET_MODE (op0))
397 && !MEM_P (op0)
398 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
399 != CODE_FOR_nothing)
400 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
401 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
402 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
403 {
404 enum machine_mode outermode = GET_MODE (op0);
405 enum machine_mode innermode = GET_MODE_INNER (outermode);
406 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
407 int pos = bitnum / GET_MODE_BITSIZE (innermode);
408 rtx rtxpos = GEN_INT (pos);
409 rtx src = value;
410 rtx dest = op0;
411 rtx pat, seq;
412 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
413 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
414 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
415
416 start_sequence ();
417
418 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
419 src = copy_to_mode_reg (mode1, src);
420
421 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
422 rtxpos = copy_to_mode_reg (mode1, rtxpos);
423
424 /* We could handle this, but we should always be called with a pseudo
425 for our targets and all insns should take them as outputs. */
426 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
427 && (*insn_data[icode].operand[1].predicate) (src, mode1)
428 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
429 pat = GEN_FCN (icode) (dest, src, rtxpos);
430 seq = get_insns ();
431 end_sequence ();
432 if (pat)
433 {
434 emit_insn (seq);
435 emit_insn (pat);
436 return dest;
437 }
438 }
439
440 /* If the target is a register, overwriting the entire object, or storing
441 a full-word or multi-word field can be done with just a SUBREG.
442
443 If the target is memory, storing any naturally aligned field can be
444 done with a simple store. For targets that support fast unaligned
445 memory, any naturally sized, unit aligned field can be done directly. */
446
447 offset = bitnum / unit;
448 bitpos = bitnum % unit;
449 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
450 + (offset * UNITS_PER_WORD);
451
452 if (bitpos == 0
453 && bitsize == GET_MODE_BITSIZE (fieldmode)
454 && (!MEM_P (op0)
455 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
456 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
457 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
458 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
459 || (offset * BITS_PER_UNIT % bitsize == 0
460 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
461 {
462 if (MEM_P (op0))
463 op0 = adjust_address (op0, fieldmode, offset);
464 else if (GET_MODE (op0) != fieldmode)
465 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
466 byte_offset);
467 emit_move_insn (op0, value);
468 return value;
469 }
470
471 /* Make sure we are playing with integral modes. Pun with subregs
472 if we aren't. This must come after the entire register case above,
473 since that case is valid for any mode. The following cases are only
474 valid for integral modes. */
475 {
476 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
477 if (imode != GET_MODE (op0))
478 {
479 if (MEM_P (op0))
480 op0 = adjust_address (op0, imode, 0);
481 else
482 {
483 gcc_assert (imode != BLKmode);
484 op0 = gen_lowpart (imode, op0);
485 }
486 }
487 }
488
489 /* We may be accessing data outside the field, which means
490 we can alias adjacent data. */
491 if (MEM_P (op0))
492 {
493 op0 = shallow_copy_rtx (op0);
494 set_mem_alias_set (op0, 0);
495 set_mem_expr (op0, 0);
496 }
497
498 /* If OP0 is a register, BITPOS must count within a word.
499 But as we have it, it counts within whatever size OP0 now has.
500 On a bigendian machine, these are not the same, so convert. */
501 if (BYTES_BIG_ENDIAN
502 && !MEM_P (op0)
503 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
504 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
505
506 /* Storing an lsb-aligned field in a register
507 can be done with a movestrict instruction. */
508
509 if (!MEM_P (op0)
510 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
511 && bitsize == GET_MODE_BITSIZE (fieldmode)
512 && (movstrict_optab->handlers[fieldmode].insn_code
513 != CODE_FOR_nothing))
514 {
515 int icode = movstrict_optab->handlers[fieldmode].insn_code;
516
517 /* Get appropriate low part of the value being stored. */
518 if (GET_CODE (value) == CONST_INT || REG_P (value))
519 value = gen_lowpart (fieldmode, value);
520 else if (!(GET_CODE (value) == SYMBOL_REF
521 || GET_CODE (value) == LABEL_REF
522 || GET_CODE (value) == CONST))
523 value = convert_to_mode (fieldmode, value, 0);
524
525 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
526 value = copy_to_mode_reg (fieldmode, value);
527
528 if (GET_CODE (op0) == SUBREG)
529 {
530 /* Else we've got some float mode source being extracted into
531 a different float mode destination -- this combination of
532 subregs results in Severe Tire Damage. */
533 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
534 || GET_MODE_CLASS (fieldmode) == MODE_INT
535 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
536 op0 = SUBREG_REG (op0);
537 }
538
539 emit_insn (GEN_FCN (icode)
540 (gen_rtx_SUBREG (fieldmode, op0,
541 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
542 + (offset * UNITS_PER_WORD)),
543 value));
544
545 return value;
546 }
547
548 /* Handle fields bigger than a word. */
549
550 if (bitsize > BITS_PER_WORD)
551 {
552 /* Here we transfer the words of the field
553 in the order least significant first.
554 This is because the most significant word is the one which may
555 be less than full.
556 However, only do that if the value is not BLKmode. */
557
558 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
559 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
560 unsigned int i;
561
562 /* This is the mode we must force value to, so that there will be enough
563 subwords to extract. Note that fieldmode will often (always?) be
564 VOIDmode, because that is what store_field uses to indicate that this
565 is a bit field, but passing VOIDmode to operand_subword_force
566 is not allowed. */
567 fieldmode = GET_MODE (value);
568 if (fieldmode == VOIDmode)
569 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
570
571 for (i = 0; i < nwords; i++)
572 {
573 /* If I is 0, use the low-order word in both field and target;
574 if I is 1, use the next to lowest word; and so on. */
575 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
576 unsigned int bit_offset = (backwards
577 ? MAX ((int) bitsize - ((int) i + 1)
578 * BITS_PER_WORD,
579 0)
580 : (int) i * BITS_PER_WORD);
581
582 store_bit_field (op0, MIN (BITS_PER_WORD,
583 bitsize - i * BITS_PER_WORD),
584 bitnum + bit_offset, word_mode,
585 operand_subword_force (value, wordnum, fieldmode));
586 }
587 return value;
588 }
589
590 /* From here on we can assume that the field to be stored in is
591 a full-word (whatever type that is), since it is shorter than a word. */
592
593 /* OFFSET is the number of words or bytes (UNIT says which)
594 from STR_RTX to the first word or byte containing part of the field. */
595
596 if (!MEM_P (op0))
597 {
598 if (offset != 0
599 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
600 {
601 if (!REG_P (op0))
602 {
603 /* Since this is a destination (lvalue), we can't copy
604 it to a pseudo. We can remove a SUBREG that does not
605 change the size of the operand. Such a SUBREG may
606 have been added above. */
607 gcc_assert (GET_CODE (op0) == SUBREG
608 && (GET_MODE_SIZE (GET_MODE (op0))
609 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
610 op0 = SUBREG_REG (op0);
611 }
612 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
613 op0, (offset * UNITS_PER_WORD));
614 }
615 offset = 0;
616 }
617
618 /* If VALUE has a floating-point or complex mode, access it as an
619 integer of the corresponding size. This can occur on a machine
620 with 64 bit registers that uses SFmode for float. It can also
621 occur for unaligned float or complex fields. */
622 orig_value = value;
623 if (GET_MODE (value) != VOIDmode
624 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
625 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
626 {
627 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
628 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
629 }
630
631 /* Now OFFSET is nonzero only if OP0 is memory
632 and is therefore always measured in bytes. */
633
634 if (HAVE_insv
635 && GET_MODE (value) != BLKmode
636 && bitsize > 0
637 && GET_MODE_BITSIZE (op_mode) >= bitsize
638 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
639 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
640 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
641 VOIDmode))
642 {
643 int xbitpos = bitpos;
644 rtx value1;
645 rtx xop0 = op0;
646 rtx last = get_last_insn ();
647 rtx pat;
648 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
649 int save_volatile_ok = volatile_ok;
650
651 volatile_ok = 1;
652
653 /* If this machine's insv can only insert into a register, copy OP0
654 into a register and save it back later. */
655 if (MEM_P (op0)
656 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
657 (op0, VOIDmode)))
658 {
659 rtx tempreg;
660 enum machine_mode bestmode;
661
662 /* Get the mode to use for inserting into this field. If OP0 is
663 BLKmode, get the smallest mode consistent with the alignment. If
664 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
665 mode. Otherwise, use the smallest mode containing the field. */
666
667 if (GET_MODE (op0) == BLKmode
668 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
669 bestmode
670 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
671 MEM_VOLATILE_P (op0));
672 else
673 bestmode = GET_MODE (op0);
674
675 if (bestmode == VOIDmode
676 || GET_MODE_SIZE (bestmode) < GET_MODE_SIZE (fieldmode)
677 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
678 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
679 goto insv_loses;
680
681 /* Adjust address to point to the containing unit of that mode.
682 Compute offset as multiple of this unit, counting in bytes. */
683 unit = GET_MODE_BITSIZE (bestmode);
684 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
685 bitpos = bitnum % unit;
686 op0 = adjust_address (op0, bestmode, offset);
687
688 /* Fetch that unit, store the bitfield in it, then store
689 the unit. */
690 tempreg = copy_to_reg (op0);
691 store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value);
692 emit_move_insn (op0, tempreg);
693 return value;
694 }
695 volatile_ok = save_volatile_ok;
696
697 /* Add OFFSET into OP0's address. */
698 if (MEM_P (xop0))
699 xop0 = adjust_address (xop0, byte_mode, offset);
700
701 /* If xop0 is a register, we need it in MAXMODE
702 to make it acceptable to the format of insv. */
703 if (GET_CODE (xop0) == SUBREG)
704 /* We can't just change the mode, because this might clobber op0,
705 and we will need the original value of op0 if insv fails. */
706 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
707 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
708 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
709
710 /* On big-endian machines, we count bits from the most significant.
711 If the bit field insn does not, we must invert. */
712
713 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
714 xbitpos = unit - bitsize - xbitpos;
715
716 /* We have been counting XBITPOS within UNIT.
717 Count instead within the size of the register. */
718 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
719 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
720
721 unit = GET_MODE_BITSIZE (maxmode);
722
723 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
724 value1 = value;
725 if (GET_MODE (value) != maxmode)
726 {
727 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
728 {
729 /* Optimization: Don't bother really extending VALUE
730 if it has all the bits we will actually use. However,
731 if we must narrow it, be sure we do it correctly. */
732
733 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
734 {
735 rtx tmp;
736
737 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
738 if (! tmp)
739 tmp = simplify_gen_subreg (maxmode,
740 force_reg (GET_MODE (value),
741 value1),
742 GET_MODE (value), 0);
743 value1 = tmp;
744 }
745 else
746 value1 = gen_lowpart (maxmode, value1);
747 }
748 else if (GET_CODE (value) == CONST_INT)
749 value1 = gen_int_mode (INTVAL (value), maxmode);
750 else
751 /* Parse phase is supposed to make VALUE's data type
752 match that of the component reference, which is a type
753 at least as wide as the field; so VALUE should have
754 a mode that corresponds to that type. */
755 gcc_assert (CONSTANT_P (value));
756 }
757
758 /* If this machine's insv insists on a register,
759 get VALUE1 into a register. */
760 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
761 (value1, maxmode)))
762 value1 = force_reg (maxmode, value1);
763
764 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
765 if (pat)
766 emit_insn (pat);
767 else
768 {
769 delete_insns_since (last);
770 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
771 }
772 }
773 else
774 insv_loses:
775 /* Insv is not available; store using shifts and boolean ops. */
776 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
777 return value;
778 }
779 \f
780 /* Use shifts and boolean operations to store VALUE
781 into a bit field of width BITSIZE
782 in a memory location specified by OP0 except offset by OFFSET bytes.
783 (OFFSET must be 0 if OP0 is a register.)
784 The field starts at position BITPOS within the byte.
785 (If OP0 is a register, it may be a full word or a narrower mode,
786 but BITPOS still counts within a full word,
787 which is significant on bigendian machines.) */
788
789 static void
790 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
791 unsigned HOST_WIDE_INT bitsize,
792 unsigned HOST_WIDE_INT bitpos, rtx value)
793 {
794 enum machine_mode mode;
795 unsigned int total_bits = BITS_PER_WORD;
796 rtx temp;
797 int all_zero = 0;
798 int all_one = 0;
799
800 /* There is a case not handled here:
801 a structure with a known alignment of just a halfword
802 and a field split across two aligned halfwords within the structure.
803 Or likewise a structure with a known alignment of just a byte
804 and a field split across two bytes.
805 Such cases are not supposed to be able to occur. */
806
807 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
808 {
809 gcc_assert (!offset);
810 /* Special treatment for a bit field split across two registers. */
811 if (bitsize + bitpos > BITS_PER_WORD)
812 {
813 store_split_bit_field (op0, bitsize, bitpos, value);
814 return;
815 }
816 }
817 else
818 {
819 /* Get the proper mode to use for this field. We want a mode that
820 includes the entire field. If such a mode would be larger than
821 a word, we won't be doing the extraction the normal way.
822 We don't want a mode bigger than the destination. */
823
824 mode = GET_MODE (op0);
825 if (GET_MODE_BITSIZE (mode) == 0
826 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
827 mode = word_mode;
828 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
829 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
830
831 if (mode == VOIDmode)
832 {
833 /* The only way this should occur is if the field spans word
834 boundaries. */
835 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
836 value);
837 return;
838 }
839
840 total_bits = GET_MODE_BITSIZE (mode);
841
842 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
843 be in the range 0 to total_bits-1, and put any excess bytes in
844 OFFSET. */
845 if (bitpos >= total_bits)
846 {
847 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
848 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
849 * BITS_PER_UNIT);
850 }
851
852 /* Get ref to an aligned byte, halfword, or word containing the field.
853 Adjust BITPOS to be position within a word,
854 and OFFSET to be the offset of that word.
855 Then alter OP0 to refer to that word. */
856 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
857 offset -= (offset % (total_bits / BITS_PER_UNIT));
858 op0 = adjust_address (op0, mode, offset);
859 }
860
861 mode = GET_MODE (op0);
862
863 /* Now MODE is either some integral mode for a MEM as OP0,
864 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
865 The bit field is contained entirely within OP0.
866 BITPOS is the starting bit number within OP0.
867 (OP0's mode may actually be narrower than MODE.) */
868
869 if (BYTES_BIG_ENDIAN)
870 /* BITPOS is the distance between our msb
871 and that of the containing datum.
872 Convert it to the distance from the lsb. */
873 bitpos = total_bits - bitsize - bitpos;
874
875 /* Now BITPOS is always the distance between our lsb
876 and that of OP0. */
877
878 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
879 we must first convert its mode to MODE. */
880
881 if (GET_CODE (value) == CONST_INT)
882 {
883 HOST_WIDE_INT v = INTVAL (value);
884
885 if (bitsize < HOST_BITS_PER_WIDE_INT)
886 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
887
888 if (v == 0)
889 all_zero = 1;
890 else if ((bitsize < HOST_BITS_PER_WIDE_INT
891 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
892 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
893 all_one = 1;
894
895 value = lshift_value (mode, value, bitpos, bitsize);
896 }
897 else
898 {
899 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
900 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
901
902 if (GET_MODE (value) != mode)
903 {
904 if ((REG_P (value) || GET_CODE (value) == SUBREG)
905 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
906 value = gen_lowpart (mode, value);
907 else
908 value = convert_to_mode (mode, value, 1);
909 }
910
911 if (must_and)
912 value = expand_binop (mode, and_optab, value,
913 mask_rtx (mode, 0, bitsize, 0),
914 NULL_RTX, 1, OPTAB_LIB_WIDEN);
915 if (bitpos > 0)
916 value = expand_shift (LSHIFT_EXPR, mode, value,
917 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
918 }
919
920 /* Now clear the chosen bits in OP0,
921 except that if VALUE is -1 we need not bother. */
922 /* We keep the intermediates in registers to allow CSE to combine
923 consecutive bitfield assignments. */
924
925 temp = force_reg (mode, op0);
926
927 if (! all_one)
928 {
929 temp = expand_binop (mode, and_optab, temp,
930 mask_rtx (mode, bitpos, bitsize, 1),
931 NULL_RTX, 1, OPTAB_LIB_WIDEN);
932 temp = force_reg (mode, temp);
933 }
934
935 /* Now logical-or VALUE into OP0, unless it is zero. */
936
937 if (! all_zero)
938 {
939 temp = expand_binop (mode, ior_optab, temp, value,
940 NULL_RTX, 1, OPTAB_LIB_WIDEN);
941 temp = force_reg (mode, temp);
942 }
943
944 if (op0 != temp)
945 emit_move_insn (op0, temp);
946 }
947 \f
948 /* Store a bit field that is split across multiple accessible memory objects.
949
950 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
951 BITSIZE is the field width; BITPOS the position of its first bit
952 (within the word).
953 VALUE is the value to store.
954
955 This does not yet handle fields wider than BITS_PER_WORD. */
956
957 static void
958 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
959 unsigned HOST_WIDE_INT bitpos, rtx value)
960 {
961 unsigned int unit;
962 unsigned int bitsdone = 0;
963
964 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
965 much at a time. */
966 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
967 unit = BITS_PER_WORD;
968 else
969 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
970
971 /* If VALUE is a constant other than a CONST_INT, get it into a register in
972 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
973 that VALUE might be a floating-point constant. */
974 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
975 {
976 rtx word = gen_lowpart_common (word_mode, value);
977
978 if (word && (value != word))
979 value = word;
980 else
981 value = gen_lowpart_common (word_mode,
982 force_reg (GET_MODE (value) != VOIDmode
983 ? GET_MODE (value)
984 : word_mode, value));
985 }
986
987 while (bitsdone < bitsize)
988 {
989 unsigned HOST_WIDE_INT thissize;
990 rtx part, word;
991 unsigned HOST_WIDE_INT thispos;
992 unsigned HOST_WIDE_INT offset;
993
994 offset = (bitpos + bitsdone) / unit;
995 thispos = (bitpos + bitsdone) % unit;
996
997 /* THISSIZE must not overrun a word boundary. Otherwise,
998 store_fixed_bit_field will call us again, and we will mutually
999 recurse forever. */
1000 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1001 thissize = MIN (thissize, unit - thispos);
1002
1003 if (BYTES_BIG_ENDIAN)
1004 {
1005 int total_bits;
1006
1007 /* We must do an endian conversion exactly the same way as it is
1008 done in extract_bit_field, so that the two calls to
1009 extract_fixed_bit_field will have comparable arguments. */
1010 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1011 total_bits = BITS_PER_WORD;
1012 else
1013 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1014
1015 /* Fetch successively less significant portions. */
1016 if (GET_CODE (value) == CONST_INT)
1017 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1018 >> (bitsize - bitsdone - thissize))
1019 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1020 else
1021 /* The args are chosen so that the last part includes the
1022 lsb. Give extract_bit_field the value it needs (with
1023 endianness compensation) to fetch the piece we want. */
1024 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1025 total_bits - bitsize + bitsdone,
1026 NULL_RTX, 1);
1027 }
1028 else
1029 {
1030 /* Fetch successively more significant portions. */
1031 if (GET_CODE (value) == CONST_INT)
1032 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1033 >> bitsdone)
1034 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1035 else
1036 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1037 bitsdone, NULL_RTX, 1);
1038 }
1039
1040 /* If OP0 is a register, then handle OFFSET here.
1041
1042 When handling multiword bitfields, extract_bit_field may pass
1043 down a word_mode SUBREG of a larger REG for a bitfield that actually
1044 crosses a word boundary. Thus, for a SUBREG, we must find
1045 the current word starting from the base register. */
1046 if (GET_CODE (op0) == SUBREG)
1047 {
1048 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1049 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1050 GET_MODE (SUBREG_REG (op0)));
1051 offset = 0;
1052 }
1053 else if (REG_P (op0))
1054 {
1055 word = operand_subword_force (op0, offset, GET_MODE (op0));
1056 offset = 0;
1057 }
1058 else
1059 word = op0;
1060
1061 /* OFFSET is in UNITs, and UNIT is in bits.
1062 store_fixed_bit_field wants offset in bytes. */
1063 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1064 thispos, part);
1065 bitsdone += thissize;
1066 }
1067 }
1068 \f
1069 /* Generate code to extract a byte-field from STR_RTX
1070 containing BITSIZE bits, starting at BITNUM,
1071 and put it in TARGET if possible (if TARGET is nonzero).
1072 Regardless of TARGET, we return the rtx for where the value is placed.
1073
1074 STR_RTX is the structure containing the byte (a REG or MEM).
1075 UNSIGNEDP is nonzero if this is an unsigned bit field.
1076 MODE is the natural mode of the field value once extracted.
1077 TMODE is the mode the caller would like the value to have;
1078 but the value may be returned with type MODE instead.
1079
1080 TOTAL_SIZE is the size in bytes of the containing structure,
1081 or -1 if varying.
1082
1083 If a TARGET is specified and we can store in it at no extra cost,
1084 we do so, and return TARGET.
1085 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1086 if they are equally easy. */
1087
1088 rtx
1089 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1090 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1091 enum machine_mode mode, enum machine_mode tmode)
1092 {
1093 unsigned int unit
1094 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1095 unsigned HOST_WIDE_INT offset, bitpos;
1096 rtx op0 = str_rtx;
1097 rtx spec_target = target;
1098 rtx spec_target_subreg = 0;
1099 enum machine_mode int_mode;
1100 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1101 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1102 enum machine_mode mode1;
1103 int byte_offset;
1104
1105 if (tmode == VOIDmode)
1106 tmode = mode;
1107
1108 while (GET_CODE (op0) == SUBREG)
1109 {
1110 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1111 op0 = SUBREG_REG (op0);
1112 }
1113
1114 /* If we have an out-of-bounds access to a register, just return an
1115 uninitialized register of the required mode. This can occur if the
1116 source code contains an out-of-bounds access to a small array. */
1117 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1118 return gen_reg_rtx (tmode);
1119
1120 if (REG_P (op0)
1121 && mode == GET_MODE (op0)
1122 && bitnum == 0
1123 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1124 {
1125 /* We're trying to extract a full register from itself. */
1126 return op0;
1127 }
1128
1129 /* Use vec_extract patterns for extracting parts of vectors whenever
1130 available. */
1131 if (VECTOR_MODE_P (GET_MODE (op0))
1132 && !MEM_P (op0)
1133 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1134 != CODE_FOR_nothing)
1135 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1136 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1137 {
1138 enum machine_mode outermode = GET_MODE (op0);
1139 enum machine_mode innermode = GET_MODE_INNER (outermode);
1140 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1141 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1142 rtx rtxpos = GEN_INT (pos);
1143 rtx src = op0;
1144 rtx dest = NULL, pat, seq;
1145 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1146 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1147 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1148
1149 if (innermode == tmode || innermode == mode)
1150 dest = target;
1151
1152 if (!dest)
1153 dest = gen_reg_rtx (innermode);
1154
1155 start_sequence ();
1156
1157 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1158 dest = copy_to_mode_reg (mode0, dest);
1159
1160 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1161 src = copy_to_mode_reg (mode1, src);
1162
1163 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1164 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1165
1166 /* We could handle this, but we should always be called with a pseudo
1167 for our targets and all insns should take them as outputs. */
1168 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1169 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1170 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1171
1172 pat = GEN_FCN (icode) (dest, src, rtxpos);
1173 seq = get_insns ();
1174 end_sequence ();
1175 if (pat)
1176 {
1177 emit_insn (seq);
1178 emit_insn (pat);
1179 return dest;
1180 }
1181 }
1182
1183 /* Make sure we are playing with integral modes. Pun with subregs
1184 if we aren't. */
1185 {
1186 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1187 if (imode != GET_MODE (op0))
1188 {
1189 if (MEM_P (op0))
1190 op0 = adjust_address (op0, imode, 0);
1191 else
1192 {
1193 gcc_assert (imode != BLKmode);
1194 op0 = gen_lowpart (imode, op0);
1195
1196 /* If we got a SUBREG, force it into a register since we
1197 aren't going to be able to do another SUBREG on it. */
1198 if (GET_CODE (op0) == SUBREG)
1199 op0 = force_reg (imode, op0);
1200 }
1201 }
1202 }
1203
1204 /* We may be accessing data outside the field, which means
1205 we can alias adjacent data. */
1206 if (MEM_P (op0))
1207 {
1208 op0 = shallow_copy_rtx (op0);
1209 set_mem_alias_set (op0, 0);
1210 set_mem_expr (op0, 0);
1211 }
1212
1213 /* Extraction of a full-word or multi-word value from a structure
1214 in a register or aligned memory can be done with just a SUBREG.
1215 A subword value in the least significant part of a register
1216 can also be extracted with a SUBREG. For this, we need the
1217 byte offset of the value in op0. */
1218
1219 bitpos = bitnum % unit;
1220 offset = bitnum / unit;
1221 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1222
1223 /* If OP0 is a register, BITPOS must count within a word.
1224 But as we have it, it counts within whatever size OP0 now has.
1225 On a bigendian machine, these are not the same, so convert. */
1226 if (BYTES_BIG_ENDIAN
1227 && !MEM_P (op0)
1228 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1229 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1230
1231 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1232 If that's wrong, the solution is to test for it and set TARGET to 0
1233 if needed. */
1234
1235 /* Only scalar integer modes can be converted via subregs. There is an
1236 additional problem for FP modes here in that they can have a precision
1237 which is different from the size. mode_for_size uses precision, but
1238 we want a mode based on the size, so we must avoid calling it for FP
1239 modes. */
1240 mode1 = (SCALAR_INT_MODE_P (tmode)
1241 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1242 : mode);
1243
1244 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1245 && bitpos % BITS_PER_WORD == 0)
1246 || (mode1 != BLKmode
1247 /* ??? The big endian test here is wrong. This is correct
1248 if the value is in a register, and if mode_for_size is not
1249 the same mode as op0. This causes us to get unnecessarily
1250 inefficient code from the Thumb port when -mbig-endian. */
1251 && (BYTES_BIG_ENDIAN
1252 ? bitpos + bitsize == BITS_PER_WORD
1253 : bitpos == 0)))
1254 && ((!MEM_P (op0)
1255 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1256 GET_MODE_BITSIZE (GET_MODE (op0)))
1257 && GET_MODE_SIZE (mode1) != 0
1258 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1259 || (MEM_P (op0)
1260 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1261 || (offset * BITS_PER_UNIT % bitsize == 0
1262 && MEM_ALIGN (op0) % bitsize == 0)))))
1263 {
1264 if (mode1 != GET_MODE (op0))
1265 {
1266 if (MEM_P (op0))
1267 op0 = adjust_address (op0, mode1, offset);
1268 else
1269 {
1270 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1271 byte_offset);
1272 if (sub == NULL)
1273 goto no_subreg_mode_swap;
1274 op0 = sub;
1275 }
1276 }
1277 if (mode1 != mode)
1278 return convert_to_mode (tmode, op0, unsignedp);
1279 return op0;
1280 }
1281 no_subreg_mode_swap:
1282
1283 /* Handle fields bigger than a word. */
1284
1285 if (bitsize > BITS_PER_WORD)
1286 {
1287 /* Here we transfer the words of the field
1288 in the order least significant first.
1289 This is because the most significant word is the one which may
1290 be less than full. */
1291
1292 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1293 unsigned int i;
1294
1295 if (target == 0 || !REG_P (target))
1296 target = gen_reg_rtx (mode);
1297
1298 /* Indicate for flow that the entire target reg is being set. */
1299 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1300
1301 for (i = 0; i < nwords; i++)
1302 {
1303 /* If I is 0, use the low-order word in both field and target;
1304 if I is 1, use the next to lowest word; and so on. */
1305 /* Word number in TARGET to use. */
1306 unsigned int wordnum
1307 = (WORDS_BIG_ENDIAN
1308 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1309 : i);
1310 /* Offset from start of field in OP0. */
1311 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1312 ? MAX (0, ((int) bitsize - ((int) i + 1)
1313 * (int) BITS_PER_WORD))
1314 : (int) i * BITS_PER_WORD);
1315 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1316 rtx result_part
1317 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1318 bitsize - i * BITS_PER_WORD),
1319 bitnum + bit_offset, 1, target_part, mode,
1320 word_mode);
1321
1322 gcc_assert (target_part);
1323
1324 if (result_part != target_part)
1325 emit_move_insn (target_part, result_part);
1326 }
1327
1328 if (unsignedp)
1329 {
1330 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1331 need to be zero'd out. */
1332 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1333 {
1334 unsigned int i, total_words;
1335
1336 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1337 for (i = nwords; i < total_words; i++)
1338 emit_move_insn
1339 (operand_subword (target,
1340 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1341 1, VOIDmode),
1342 const0_rtx);
1343 }
1344 return target;
1345 }
1346
1347 /* Signed bit field: sign-extend with two arithmetic shifts. */
1348 target = expand_shift (LSHIFT_EXPR, mode, target,
1349 build_int_cst (NULL_TREE,
1350 GET_MODE_BITSIZE (mode) - bitsize),
1351 NULL_RTX, 0);
1352 return expand_shift (RSHIFT_EXPR, mode, target,
1353 build_int_cst (NULL_TREE,
1354 GET_MODE_BITSIZE (mode) - bitsize),
1355 NULL_RTX, 0);
1356 }
1357
1358 /* From here on we know the desired field is smaller than a word. */
1359
1360 /* Check if there is a correspondingly-sized integer field, so we can
1361 safely extract it as one size of integer, if necessary; then
1362 truncate or extend to the size that is wanted; then use SUBREGs or
1363 convert_to_mode to get one of the modes we really wanted. */
1364
1365 int_mode = int_mode_for_mode (tmode);
1366 if (int_mode == BLKmode)
1367 int_mode = int_mode_for_mode (mode);
1368 /* Should probably push op0 out to memory and then do a load. */
1369 gcc_assert (int_mode != BLKmode);
1370
1371 /* OFFSET is the number of words or bytes (UNIT says which)
1372 from STR_RTX to the first word or byte containing part of the field. */
1373 if (!MEM_P (op0))
1374 {
1375 if (offset != 0
1376 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1377 {
1378 if (!REG_P (op0))
1379 op0 = copy_to_reg (op0);
1380 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1381 op0, (offset * UNITS_PER_WORD));
1382 }
1383 offset = 0;
1384 }
1385
1386 /* Now OFFSET is nonzero only for memory operands. */
1387
1388 if (unsignedp)
1389 {
1390 if (HAVE_extzv
1391 && bitsize > 0
1392 && GET_MODE_BITSIZE (extzv_mode) >= bitsize
1393 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1394 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1395 {
1396 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1397 rtx bitsize_rtx, bitpos_rtx;
1398 rtx last = get_last_insn ();
1399 rtx xop0 = op0;
1400 rtx xtarget = target;
1401 rtx xspec_target = spec_target;
1402 rtx xspec_target_subreg = spec_target_subreg;
1403 rtx pat;
1404 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1405
1406 if (MEM_P (xop0))
1407 {
1408 int save_volatile_ok = volatile_ok;
1409 volatile_ok = 1;
1410
1411 /* Is the memory operand acceptable? */
1412 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1413 (xop0, GET_MODE (xop0))))
1414 {
1415 /* No, load into a reg and extract from there. */
1416 enum machine_mode bestmode;
1417
1418 /* Get the mode to use for inserting into this field. If
1419 OP0 is BLKmode, get the smallest mode consistent with the
1420 alignment. If OP0 is a non-BLKmode object that is no
1421 wider than MAXMODE, use its mode. Otherwise, use the
1422 smallest mode containing the field. */
1423
1424 if (GET_MODE (xop0) == BLKmode
1425 || (GET_MODE_SIZE (GET_MODE (op0))
1426 > GET_MODE_SIZE (maxmode)))
1427 bestmode = get_best_mode (bitsize, bitnum,
1428 MEM_ALIGN (xop0), maxmode,
1429 MEM_VOLATILE_P (xop0));
1430 else
1431 bestmode = GET_MODE (xop0);
1432
1433 if (bestmode == VOIDmode
1434 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1435 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1436 goto extzv_loses;
1437
1438 /* Compute offset as multiple of this unit,
1439 counting in bytes. */
1440 unit = GET_MODE_BITSIZE (bestmode);
1441 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1442 xbitpos = bitnum % unit;
1443 xop0 = adjust_address (xop0, bestmode, xoffset);
1444
1445 /* Make sure register is big enough for the whole field. */
1446 if (xoffset * BITS_PER_UNIT + unit
1447 < offset * BITS_PER_UNIT + bitsize)
1448 goto extzv_loses;
1449
1450 /* Fetch it to a register in that size. */
1451 xop0 = force_reg (bestmode, xop0);
1452
1453 /* XBITPOS counts within UNIT, which is what is expected. */
1454 }
1455 else
1456 /* Get ref to first byte containing part of the field. */
1457 xop0 = adjust_address (xop0, byte_mode, xoffset);
1458
1459 volatile_ok = save_volatile_ok;
1460 }
1461
1462 /* If op0 is a register, we need it in MAXMODE (which is usually
1463 SImode). to make it acceptable to the format of extzv. */
1464 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1465 goto extzv_loses;
1466 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1467 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1468
1469 /* On big-endian machines, we count bits from the most significant.
1470 If the bit field insn does not, we must invert. */
1471 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1472 xbitpos = unit - bitsize - xbitpos;
1473
1474 /* Now convert from counting within UNIT to counting in MAXMODE. */
1475 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1476 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1477
1478 unit = GET_MODE_BITSIZE (maxmode);
1479
1480 if (xtarget == 0)
1481 xtarget = xspec_target = gen_reg_rtx (tmode);
1482
1483 if (GET_MODE (xtarget) != maxmode)
1484 {
1485 if (REG_P (xtarget))
1486 {
1487 int wider = (GET_MODE_SIZE (maxmode)
1488 > GET_MODE_SIZE (GET_MODE (xtarget)));
1489 xtarget = gen_lowpart (maxmode, xtarget);
1490 if (wider)
1491 xspec_target_subreg = xtarget;
1492 }
1493 else
1494 xtarget = gen_reg_rtx (maxmode);
1495 }
1496
1497 /* If this machine's extzv insists on a register target,
1498 make sure we have one. */
1499 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1500 (xtarget, maxmode)))
1501 xtarget = gen_reg_rtx (maxmode);
1502
1503 bitsize_rtx = GEN_INT (bitsize);
1504 bitpos_rtx = GEN_INT (xbitpos);
1505
1506 pat = gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1507 if (pat)
1508 {
1509 emit_insn (pat);
1510 target = xtarget;
1511 spec_target = xspec_target;
1512 spec_target_subreg = xspec_target_subreg;
1513 }
1514 else
1515 {
1516 delete_insns_since (last);
1517 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1518 bitpos, target, 1);
1519 }
1520 }
1521 else
1522 extzv_loses:
1523 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1524 bitpos, target, 1);
1525 }
1526 else
1527 {
1528 if (HAVE_extv
1529 && bitsize > 0
1530 && GET_MODE_BITSIZE (extv_mode) >= bitsize
1531 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1532 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1533 {
1534 int xbitpos = bitpos, xoffset = offset;
1535 rtx bitsize_rtx, bitpos_rtx;
1536 rtx last = get_last_insn ();
1537 rtx xop0 = op0, xtarget = target;
1538 rtx xspec_target = spec_target;
1539 rtx xspec_target_subreg = spec_target_subreg;
1540 rtx pat;
1541 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1542
1543 if (MEM_P (xop0))
1544 {
1545 /* Is the memory operand acceptable? */
1546 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1547 (xop0, GET_MODE (xop0))))
1548 {
1549 /* No, load into a reg and extract from there. */
1550 enum machine_mode bestmode;
1551
1552 /* Get the mode to use for inserting into this field. If
1553 OP0 is BLKmode, get the smallest mode consistent with the
1554 alignment. If OP0 is a non-BLKmode object that is no
1555 wider than MAXMODE, use its mode. Otherwise, use the
1556 smallest mode containing the field. */
1557
1558 if (GET_MODE (xop0) == BLKmode
1559 || (GET_MODE_SIZE (GET_MODE (op0))
1560 > GET_MODE_SIZE (maxmode)))
1561 bestmode = get_best_mode (bitsize, bitnum,
1562 MEM_ALIGN (xop0), maxmode,
1563 MEM_VOLATILE_P (xop0));
1564 else
1565 bestmode = GET_MODE (xop0);
1566
1567 if (bestmode == VOIDmode
1568 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1569 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1570 goto extv_loses;
1571
1572 /* Compute offset as multiple of this unit,
1573 counting in bytes. */
1574 unit = GET_MODE_BITSIZE (bestmode);
1575 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1576 xbitpos = bitnum % unit;
1577 xop0 = adjust_address (xop0, bestmode, xoffset);
1578
1579 /* Make sure register is big enough for the whole field. */
1580 if (xoffset * BITS_PER_UNIT + unit
1581 < offset * BITS_PER_UNIT + bitsize)
1582 goto extv_loses;
1583
1584 /* Fetch it to a register in that size. */
1585 xop0 = force_reg (bestmode, xop0);
1586
1587 /* XBITPOS counts within UNIT, which is what is expected. */
1588 }
1589 else
1590 /* Get ref to first byte containing part of the field. */
1591 xop0 = adjust_address (xop0, byte_mode, xoffset);
1592 }
1593
1594 /* If op0 is a register, we need it in MAXMODE (which is usually
1595 SImode) to make it acceptable to the format of extv. */
1596 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1597 goto extv_loses;
1598 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1599 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1600
1601 /* On big-endian machines, we count bits from the most significant.
1602 If the bit field insn does not, we must invert. */
1603 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1604 xbitpos = unit - bitsize - xbitpos;
1605
1606 /* XBITPOS counts within a size of UNIT.
1607 Adjust to count within a size of MAXMODE. */
1608 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1609 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1610
1611 unit = GET_MODE_BITSIZE (maxmode);
1612
1613 if (xtarget == 0)
1614 xtarget = xspec_target = gen_reg_rtx (tmode);
1615
1616 if (GET_MODE (xtarget) != maxmode)
1617 {
1618 if (REG_P (xtarget))
1619 {
1620 int wider = (GET_MODE_SIZE (maxmode)
1621 > GET_MODE_SIZE (GET_MODE (xtarget)));
1622 xtarget = gen_lowpart (maxmode, xtarget);
1623 if (wider)
1624 xspec_target_subreg = xtarget;
1625 }
1626 else
1627 xtarget = gen_reg_rtx (maxmode);
1628 }
1629
1630 /* If this machine's extv insists on a register target,
1631 make sure we have one. */
1632 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1633 (xtarget, maxmode)))
1634 xtarget = gen_reg_rtx (maxmode);
1635
1636 bitsize_rtx = GEN_INT (bitsize);
1637 bitpos_rtx = GEN_INT (xbitpos);
1638
1639 pat = gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1640 if (pat)
1641 {
1642 emit_insn (pat);
1643 target = xtarget;
1644 spec_target = xspec_target;
1645 spec_target_subreg = xspec_target_subreg;
1646 }
1647 else
1648 {
1649 delete_insns_since (last);
1650 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1651 bitpos, target, 0);
1652 }
1653 }
1654 else
1655 extv_loses:
1656 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1657 bitpos, target, 0);
1658 }
1659 if (target == spec_target)
1660 return target;
1661 if (target == spec_target_subreg)
1662 return spec_target;
1663 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1664 {
1665 /* If the target mode is not a scalar integral, first convert to the
1666 integer mode of that size and then access it as a floating-point
1667 value via a SUBREG. */
1668 if (!SCALAR_INT_MODE_P (tmode))
1669 {
1670 enum machine_mode smode
1671 = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1672 target = convert_to_mode (smode, target, unsignedp);
1673 target = force_reg (smode, target);
1674 return gen_lowpart (tmode, target);
1675 }
1676
1677 return convert_to_mode (tmode, target, unsignedp);
1678 }
1679 return target;
1680 }
1681 \f
1682 /* Extract a bit field using shifts and boolean operations
1683 Returns an rtx to represent the value.
1684 OP0 addresses a register (word) or memory (byte).
1685 BITPOS says which bit within the word or byte the bit field starts in.
1686 OFFSET says how many bytes farther the bit field starts;
1687 it is 0 if OP0 is a register.
1688 BITSIZE says how many bits long the bit field is.
1689 (If OP0 is a register, it may be narrower than a full word,
1690 but BITPOS still counts within a full word,
1691 which is significant on bigendian machines.)
1692
1693 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1694 If TARGET is nonzero, attempts to store the value there
1695 and return TARGET, but this is not guaranteed.
1696 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1697
1698 static rtx
1699 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1700 unsigned HOST_WIDE_INT offset,
1701 unsigned HOST_WIDE_INT bitsize,
1702 unsigned HOST_WIDE_INT bitpos, rtx target,
1703 int unsignedp)
1704 {
1705 unsigned int total_bits = BITS_PER_WORD;
1706 enum machine_mode mode;
1707
1708 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1709 {
1710 /* Special treatment for a bit field split across two registers. */
1711 if (bitsize + bitpos > BITS_PER_WORD)
1712 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1713 }
1714 else
1715 {
1716 /* Get the proper mode to use for this field. We want a mode that
1717 includes the entire field. If such a mode would be larger than
1718 a word, we won't be doing the extraction the normal way. */
1719
1720 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1721 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1722
1723 if (mode == VOIDmode)
1724 /* The only way this should occur is if the field spans word
1725 boundaries. */
1726 return extract_split_bit_field (op0, bitsize,
1727 bitpos + offset * BITS_PER_UNIT,
1728 unsignedp);
1729
1730 total_bits = GET_MODE_BITSIZE (mode);
1731
1732 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1733 be in the range 0 to total_bits-1, and put any excess bytes in
1734 OFFSET. */
1735 if (bitpos >= total_bits)
1736 {
1737 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1738 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1739 * BITS_PER_UNIT);
1740 }
1741
1742 /* Get ref to an aligned byte, halfword, or word containing the field.
1743 Adjust BITPOS to be position within a word,
1744 and OFFSET to be the offset of that word.
1745 Then alter OP0 to refer to that word. */
1746 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1747 offset -= (offset % (total_bits / BITS_PER_UNIT));
1748 op0 = adjust_address (op0, mode, offset);
1749 }
1750
1751 mode = GET_MODE (op0);
1752
1753 if (BYTES_BIG_ENDIAN)
1754 /* BITPOS is the distance between our msb and that of OP0.
1755 Convert it to the distance from the lsb. */
1756 bitpos = total_bits - bitsize - bitpos;
1757
1758 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1759 We have reduced the big-endian case to the little-endian case. */
1760
1761 if (unsignedp)
1762 {
1763 if (bitpos)
1764 {
1765 /* If the field does not already start at the lsb,
1766 shift it so it does. */
1767 tree amount = build_int_cst (NULL_TREE, bitpos);
1768 /* Maybe propagate the target for the shift. */
1769 /* But not if we will return it--could confuse integrate.c. */
1770 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1771 if (tmode != mode) subtarget = 0;
1772 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1773 }
1774 /* Convert the value to the desired mode. */
1775 if (mode != tmode)
1776 op0 = convert_to_mode (tmode, op0, 1);
1777
1778 /* Unless the msb of the field used to be the msb when we shifted,
1779 mask out the upper bits. */
1780
1781 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1782 return expand_binop (GET_MODE (op0), and_optab, op0,
1783 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1784 target, 1, OPTAB_LIB_WIDEN);
1785 return op0;
1786 }
1787
1788 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1789 then arithmetic-shift its lsb to the lsb of the word. */
1790 op0 = force_reg (mode, op0);
1791 if (mode != tmode)
1792 target = 0;
1793
1794 /* Find the narrowest integer mode that contains the field. */
1795
1796 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1797 mode = GET_MODE_WIDER_MODE (mode))
1798 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1799 {
1800 op0 = convert_to_mode (mode, op0, 0);
1801 break;
1802 }
1803
1804 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1805 {
1806 tree amount
1807 = build_int_cst (NULL_TREE,
1808 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1809 /* Maybe propagate the target for the shift. */
1810 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1811 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1812 }
1813
1814 return expand_shift (RSHIFT_EXPR, mode, op0,
1815 build_int_cst (NULL_TREE,
1816 GET_MODE_BITSIZE (mode) - bitsize),
1817 target, 0);
1818 }
1819 \f
1820 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1821 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1822 complement of that if COMPLEMENT. The mask is truncated if
1823 necessary to the width of mode MODE. The mask is zero-extended if
1824 BITSIZE+BITPOS is too small for MODE. */
1825
1826 static rtx
1827 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1828 {
1829 HOST_WIDE_INT masklow, maskhigh;
1830
1831 if (bitsize == 0)
1832 masklow = 0;
1833 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1834 masklow = (HOST_WIDE_INT) -1 << bitpos;
1835 else
1836 masklow = 0;
1837
1838 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1839 masklow &= ((unsigned HOST_WIDE_INT) -1
1840 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1841
1842 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1843 maskhigh = -1;
1844 else
1845 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1846
1847 if (bitsize == 0)
1848 maskhigh = 0;
1849 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1850 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1851 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1852 else
1853 maskhigh = 0;
1854
1855 if (complement)
1856 {
1857 maskhigh = ~maskhigh;
1858 masklow = ~masklow;
1859 }
1860
1861 return immed_double_const (masklow, maskhigh, mode);
1862 }
1863
1864 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1865 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1866
1867 static rtx
1868 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1869 {
1870 unsigned HOST_WIDE_INT v = INTVAL (value);
1871 HOST_WIDE_INT low, high;
1872
1873 if (bitsize < HOST_BITS_PER_WIDE_INT)
1874 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1875
1876 if (bitpos < HOST_BITS_PER_WIDE_INT)
1877 {
1878 low = v << bitpos;
1879 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1880 }
1881 else
1882 {
1883 low = 0;
1884 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1885 }
1886
1887 return immed_double_const (low, high, mode);
1888 }
1889 \f
1890 /* Extract a bit field from a memory by forcing the alignment of the
1891 memory. This efficient only if the field spans at least 4 boundaries.
1892
1893 OP0 is the MEM.
1894 BITSIZE is the field width; BITPOS is the position of the first bit.
1895 UNSIGNEDP is true if the result should be zero-extended. */
1896
1897 static rtx
1898 extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1899 unsigned HOST_WIDE_INT bitpos,
1900 int unsignedp)
1901 {
1902 enum machine_mode mode, dmode;
1903 unsigned int m_bitsize, m_size;
1904 unsigned int sign_shift_up, sign_shift_dn;
1905 rtx base, a1, a2, v1, v2, comb, shift, result, start;
1906
1907 /* Choose a mode that will fit BITSIZE. */
1908 mode = smallest_mode_for_size (bitsize, MODE_INT);
1909 m_size = GET_MODE_SIZE (mode);
1910 m_bitsize = GET_MODE_BITSIZE (mode);
1911
1912 /* Choose a mode twice as wide. Fail if no such mode exists. */
1913 dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
1914 if (dmode == BLKmode)
1915 return NULL;
1916
1917 do_pending_stack_adjust ();
1918 start = get_last_insn ();
1919
1920 /* At the end, we'll need an additional shift to deal with sign/zero
1921 extension. By default this will be a left+right shift of the
1922 appropriate size. But we may be able to eliminate one of them. */
1923 sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
1924
1925 if (STRICT_ALIGNMENT)
1926 {
1927 base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
1928 bitpos %= BITS_PER_UNIT;
1929
1930 /* We load two values to be concatenate. There's an edge condition
1931 that bears notice -- an aligned value at the end of a page can
1932 only load one value lest we segfault. So the two values we load
1933 are at "base & -size" and "(base + size - 1) & -size". If base
1934 is unaligned, the addresses will be aligned and sequential; if
1935 base is aligned, the addresses will both be equal to base. */
1936
1937 a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
1938 GEN_INT (-(HOST_WIDE_INT)m_size),
1939 NULL, true, OPTAB_LIB_WIDEN);
1940 mark_reg_pointer (a1, m_bitsize);
1941 v1 = gen_rtx_MEM (mode, a1);
1942 set_mem_align (v1, m_bitsize);
1943 v1 = force_reg (mode, validize_mem (v1));
1944
1945 a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
1946 a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
1947 GEN_INT (-(HOST_WIDE_INT)m_size),
1948 NULL, true, OPTAB_LIB_WIDEN);
1949 v2 = gen_rtx_MEM (mode, a2);
1950 set_mem_align (v2, m_bitsize);
1951 v2 = force_reg (mode, validize_mem (v2));
1952
1953 /* Combine these two values into a double-word value. */
1954 if (m_bitsize == BITS_PER_WORD)
1955 {
1956 comb = gen_reg_rtx (dmode);
1957 emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
1958 emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
1959 emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
1960 }
1961 else
1962 {
1963 if (BYTES_BIG_ENDIAN)
1964 comb = v1, v1 = v2, v2 = comb;
1965 v1 = convert_modes (dmode, mode, v1, true);
1966 if (v1 == NULL)
1967 goto fail;
1968 v2 = convert_modes (dmode, mode, v2, true);
1969 v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
1970 NULL, true, OPTAB_LIB_WIDEN);
1971 if (v2 == NULL)
1972 goto fail;
1973 comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
1974 true, OPTAB_LIB_WIDEN);
1975 if (comb == NULL)
1976 goto fail;
1977 }
1978
1979 shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
1980 NULL, true, OPTAB_LIB_WIDEN);
1981 shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
1982
1983 if (bitpos != 0)
1984 {
1985 if (sign_shift_up <= bitpos)
1986 bitpos -= sign_shift_up, sign_shift_up = 0;
1987 shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
1988 NULL, true, OPTAB_LIB_WIDEN);
1989 }
1990 }
1991 else
1992 {
1993 unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
1994 bitpos %= BITS_PER_UNIT;
1995
1996 /* When strict alignment is not required, we can just load directly
1997 from memory without masking. If the remaining BITPOS offset is
1998 small enough, we may be able to do all operations in MODE as
1999 opposed to DMODE. */
2000 if (bitpos + bitsize <= m_bitsize)
2001 dmode = mode;
2002 comb = adjust_address (op0, dmode, offset);
2003
2004 if (sign_shift_up <= bitpos)
2005 bitpos -= sign_shift_up, sign_shift_up = 0;
2006 shift = GEN_INT (bitpos);
2007 }
2008
2009 /* Shift down the double-word such that the requested value is at bit 0. */
2010 if (shift != const0_rtx)
2011 comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
2012 comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
2013 if (comb == NULL)
2014 goto fail;
2015
2016 /* If the field exactly matches MODE, then all we need to do is return the
2017 lowpart. Otherwise, shift to get the sign bits set properly. */
2018 result = force_reg (mode, gen_lowpart (mode, comb));
2019
2020 if (sign_shift_up)
2021 result = expand_simple_binop (mode, ASHIFT, result,
2022 GEN_INT (sign_shift_up),
2023 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2024 if (sign_shift_dn)
2025 result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
2026 result, GEN_INT (sign_shift_dn),
2027 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2028
2029 return result;
2030
2031 fail:
2032 delete_insns_since (start);
2033 return NULL;
2034 }
2035
2036 /* Extract a bit field that is split across two words
2037 and return an RTX for the result.
2038
2039 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2040 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2041 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
2042
2043 static rtx
2044 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
2045 unsigned HOST_WIDE_INT bitpos, int unsignedp)
2046 {
2047 unsigned int unit;
2048 unsigned int bitsdone = 0;
2049 rtx result = NULL_RTX;
2050 int first = 1;
2051
2052 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2053 much at a time. */
2054 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2055 unit = BITS_PER_WORD;
2056 else
2057 {
2058 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2059 if (0 && bitsize / unit > 2)
2060 {
2061 rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
2062 unsignedp);
2063 if (tmp)
2064 return tmp;
2065 }
2066 }
2067
2068 while (bitsdone < bitsize)
2069 {
2070 unsigned HOST_WIDE_INT thissize;
2071 rtx part, word;
2072 unsigned HOST_WIDE_INT thispos;
2073 unsigned HOST_WIDE_INT offset;
2074
2075 offset = (bitpos + bitsdone) / unit;
2076 thispos = (bitpos + bitsdone) % unit;
2077
2078 /* THISSIZE must not overrun a word boundary. Otherwise,
2079 extract_fixed_bit_field will call us again, and we will mutually
2080 recurse forever. */
2081 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2082 thissize = MIN (thissize, unit - thispos);
2083
2084 /* If OP0 is a register, then handle OFFSET here.
2085
2086 When handling multiword bitfields, extract_bit_field may pass
2087 down a word_mode SUBREG of a larger REG for a bitfield that actually
2088 crosses a word boundary. Thus, for a SUBREG, we must find
2089 the current word starting from the base register. */
2090 if (GET_CODE (op0) == SUBREG)
2091 {
2092 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
2093 word = operand_subword_force (SUBREG_REG (op0), word_offset,
2094 GET_MODE (SUBREG_REG (op0)));
2095 offset = 0;
2096 }
2097 else if (REG_P (op0))
2098 {
2099 word = operand_subword_force (op0, offset, GET_MODE (op0));
2100 offset = 0;
2101 }
2102 else
2103 word = op0;
2104
2105 /* Extract the parts in bit-counting order,
2106 whose meaning is determined by BYTES_PER_UNIT.
2107 OFFSET is in UNITs, and UNIT is in bits.
2108 extract_fixed_bit_field wants offset in bytes. */
2109 part = extract_fixed_bit_field (word_mode, word,
2110 offset * unit / BITS_PER_UNIT,
2111 thissize, thispos, 0, 1);
2112 bitsdone += thissize;
2113
2114 /* Shift this part into place for the result. */
2115 if (BYTES_BIG_ENDIAN)
2116 {
2117 if (bitsize != bitsdone)
2118 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2119 build_int_cst (NULL_TREE, bitsize - bitsdone),
2120 0, 1);
2121 }
2122 else
2123 {
2124 if (bitsdone != thissize)
2125 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2126 build_int_cst (NULL_TREE,
2127 bitsdone - thissize), 0, 1);
2128 }
2129
2130 if (first)
2131 result = part;
2132 else
2133 /* Combine the parts with bitwise or. This works
2134 because we extracted each part as an unsigned bit field. */
2135 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2136 OPTAB_LIB_WIDEN);
2137
2138 first = 0;
2139 }
2140
2141 /* Unsigned bit field: we are done. */
2142 if (unsignedp)
2143 return result;
2144 /* Signed bit field: sign-extend with two arithmetic shifts. */
2145 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2146 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2147 NULL_RTX, 0);
2148 return expand_shift (RSHIFT_EXPR, word_mode, result,
2149 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2150 NULL_RTX, 0);
2151 }
2152 \f
2153 /* Add INC into TARGET. */
2154
2155 void
2156 expand_inc (rtx target, rtx inc)
2157 {
2158 rtx value = expand_binop (GET_MODE (target), add_optab,
2159 target, inc,
2160 target, 0, OPTAB_LIB_WIDEN);
2161 if (value != target)
2162 emit_move_insn (target, value);
2163 }
2164
2165 /* Subtract DEC from TARGET. */
2166
2167 void
2168 expand_dec (rtx target, rtx dec)
2169 {
2170 rtx value = expand_binop (GET_MODE (target), sub_optab,
2171 target, dec,
2172 target, 0, OPTAB_LIB_WIDEN);
2173 if (value != target)
2174 emit_move_insn (target, value);
2175 }
2176 \f
2177 /* Output a shift instruction for expression code CODE,
2178 with SHIFTED being the rtx for the value to shift,
2179 and AMOUNT the tree for the amount to shift by.
2180 Store the result in the rtx TARGET, if that is convenient.
2181 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2182 Return the rtx for where the value is. */
2183
2184 rtx
2185 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2186 tree amount, rtx target, int unsignedp)
2187 {
2188 rtx op1, temp = 0;
2189 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2190 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2191 int try;
2192
2193 /* Previously detected shift-counts computed by NEGATE_EXPR
2194 and shifted in the other direction; but that does not work
2195 on all machines. */
2196
2197 op1 = expand_normal (amount);
2198
2199 if (SHIFT_COUNT_TRUNCATED)
2200 {
2201 if (GET_CODE (op1) == CONST_INT
2202 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2203 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2204 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2205 % GET_MODE_BITSIZE (mode));
2206 else if (GET_CODE (op1) == SUBREG
2207 && subreg_lowpart_p (op1))
2208 op1 = SUBREG_REG (op1);
2209 }
2210
2211 if (op1 == const0_rtx)
2212 return shifted;
2213
2214 /* Check whether its cheaper to implement a left shift by a constant
2215 bit count by a sequence of additions. */
2216 if (code == LSHIFT_EXPR
2217 && GET_CODE (op1) == CONST_INT
2218 && INTVAL (op1) > 0
2219 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2220 && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode])
2221 {
2222 int i;
2223 for (i = 0; i < INTVAL (op1); i++)
2224 {
2225 temp = force_reg (mode, shifted);
2226 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2227 unsignedp, OPTAB_LIB_WIDEN);
2228 }
2229 return shifted;
2230 }
2231
2232 for (try = 0; temp == 0 && try < 3; try++)
2233 {
2234 enum optab_methods methods;
2235
2236 if (try == 0)
2237 methods = OPTAB_DIRECT;
2238 else if (try == 1)
2239 methods = OPTAB_WIDEN;
2240 else
2241 methods = OPTAB_LIB_WIDEN;
2242
2243 if (rotate)
2244 {
2245 /* Widening does not work for rotation. */
2246 if (methods == OPTAB_WIDEN)
2247 continue;
2248 else if (methods == OPTAB_LIB_WIDEN)
2249 {
2250 /* If we have been unable to open-code this by a rotation,
2251 do it as the IOR of two shifts. I.e., to rotate A
2252 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2253 where C is the bitsize of A.
2254
2255 It is theoretically possible that the target machine might
2256 not be able to perform either shift and hence we would
2257 be making two libcalls rather than just the one for the
2258 shift (similarly if IOR could not be done). We will allow
2259 this extremely unlikely lossage to avoid complicating the
2260 code below. */
2261
2262 rtx subtarget = target == shifted ? 0 : target;
2263 tree new_amount, other_amount;
2264 rtx temp1;
2265 tree type = TREE_TYPE (amount);
2266 if (GET_MODE (op1) != TYPE_MODE (type)
2267 && GET_MODE (op1) != VOIDmode)
2268 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2269 new_amount = make_tree (type, op1);
2270 other_amount
2271 = fold_build2 (MINUS_EXPR, type,
2272 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2273 new_amount);
2274
2275 shifted = force_reg (mode, shifted);
2276
2277 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2278 mode, shifted, new_amount, 0, 1);
2279 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2280 mode, shifted, other_amount, subtarget, 1);
2281 return expand_binop (mode, ior_optab, temp, temp1, target,
2282 unsignedp, methods);
2283 }
2284
2285 temp = expand_binop (mode,
2286 left ? rotl_optab : rotr_optab,
2287 shifted, op1, target, unsignedp, methods);
2288 }
2289 else if (unsignedp)
2290 temp = expand_binop (mode,
2291 left ? ashl_optab : lshr_optab,
2292 shifted, op1, target, unsignedp, methods);
2293
2294 /* Do arithmetic shifts.
2295 Also, if we are going to widen the operand, we can just as well
2296 use an arithmetic right-shift instead of a logical one. */
2297 if (temp == 0 && ! rotate
2298 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2299 {
2300 enum optab_methods methods1 = methods;
2301
2302 /* If trying to widen a log shift to an arithmetic shift,
2303 don't accept an arithmetic shift of the same size. */
2304 if (unsignedp)
2305 methods1 = OPTAB_MUST_WIDEN;
2306
2307 /* Arithmetic shift */
2308
2309 temp = expand_binop (mode,
2310 left ? ashl_optab : ashr_optab,
2311 shifted, op1, target, unsignedp, methods1);
2312 }
2313
2314 /* We used to try extzv here for logical right shifts, but that was
2315 only useful for one machine, the VAX, and caused poor code
2316 generation there for lshrdi3, so the code was deleted and a
2317 define_expand for lshrsi3 was added to vax.md. */
2318 }
2319
2320 gcc_assert (temp);
2321 return temp;
2322 }
2323 \f
2324 enum alg_code {
2325 alg_unknown,
2326 alg_zero,
2327 alg_m, alg_shift,
2328 alg_add_t_m2,
2329 alg_sub_t_m2,
2330 alg_add_factor,
2331 alg_sub_factor,
2332 alg_add_t2_m,
2333 alg_sub_t2_m,
2334 alg_impossible
2335 };
2336
2337 /* This structure holds the "cost" of a multiply sequence. The
2338 "cost" field holds the total rtx_cost of every operator in the
2339 synthetic multiplication sequence, hence cost(a op b) is defined
2340 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2341 The "latency" field holds the minimum possible latency of the
2342 synthetic multiply, on a hypothetical infinitely parallel CPU.
2343 This is the critical path, or the maximum height, of the expression
2344 tree which is the sum of rtx_costs on the most expensive path from
2345 any leaf to the root. Hence latency(a op b) is defined as zero for
2346 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2347
2348 struct mult_cost {
2349 short cost; /* Total rtx_cost of the multiplication sequence. */
2350 short latency; /* The latency of the multiplication sequence. */
2351 };
2352
2353 /* This macro is used to compare a pointer to a mult_cost against an
2354 single integer "rtx_cost" value. This is equivalent to the macro
2355 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2356 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2357 || ((X)->cost == (Y) && (X)->latency < (Y)))
2358
2359 /* This macro is used to compare two pointers to mult_costs against
2360 each other. The macro returns true if X is cheaper than Y.
2361 Currently, the cheaper of two mult_costs is the one with the
2362 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2363 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2364 || ((X)->cost == (Y)->cost \
2365 && (X)->latency < (Y)->latency))
2366
2367 /* This structure records a sequence of operations.
2368 `ops' is the number of operations recorded.
2369 `cost' is their total cost.
2370 The operations are stored in `op' and the corresponding
2371 logarithms of the integer coefficients in `log'.
2372
2373 These are the operations:
2374 alg_zero total := 0;
2375 alg_m total := multiplicand;
2376 alg_shift total := total * coeff
2377 alg_add_t_m2 total := total + multiplicand * coeff;
2378 alg_sub_t_m2 total := total - multiplicand * coeff;
2379 alg_add_factor total := total * coeff + total;
2380 alg_sub_factor total := total * coeff - total;
2381 alg_add_t2_m total := total * coeff + multiplicand;
2382 alg_sub_t2_m total := total * coeff - multiplicand;
2383
2384 The first operand must be either alg_zero or alg_m. */
2385
2386 struct algorithm
2387 {
2388 struct mult_cost cost;
2389 short ops;
2390 /* The size of the OP and LOG fields are not directly related to the
2391 word size, but the worst-case algorithms will be if we have few
2392 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2393 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2394 in total wordsize operations. */
2395 enum alg_code op[MAX_BITS_PER_WORD];
2396 char log[MAX_BITS_PER_WORD];
2397 };
2398
2399 /* The entry for our multiplication cache/hash table. */
2400 struct alg_hash_entry {
2401 /* The number we are multiplying by. */
2402 unsigned HOST_WIDE_INT t;
2403
2404 /* The mode in which we are multiplying something by T. */
2405 enum machine_mode mode;
2406
2407 /* The best multiplication algorithm for t. */
2408 enum alg_code alg;
2409
2410 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2411 Otherwise, the cost within which multiplication by T is
2412 impossible. */
2413 struct mult_cost cost;
2414 };
2415
2416 /* The number of cache/hash entries. */
2417 #if HOST_BITS_PER_WIDE_INT == 64
2418 #define NUM_ALG_HASH_ENTRIES 1031
2419 #else
2420 #define NUM_ALG_HASH_ENTRIES 307
2421 #endif
2422
2423 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2424 actually a hash table. If we have a collision, that the older
2425 entry is kicked out. */
2426 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2427
2428 /* Indicates the type of fixup needed after a constant multiplication.
2429 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2430 the result should be negated, and ADD_VARIANT means that the
2431 multiplicand should be added to the result. */
2432 enum mult_variant {basic_variant, negate_variant, add_variant};
2433
2434 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2435 const struct mult_cost *, enum machine_mode mode);
2436 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2437 struct algorithm *, enum mult_variant *, int);
2438 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2439 const struct algorithm *, enum mult_variant);
2440 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2441 int, rtx *, int *, int *);
2442 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2443 static rtx extract_high_half (enum machine_mode, rtx);
2444 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2445 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2446 int, int);
2447 /* Compute and return the best algorithm for multiplying by T.
2448 The algorithm must cost less than cost_limit
2449 If retval.cost >= COST_LIMIT, no algorithm was found and all
2450 other field of the returned struct are undefined.
2451 MODE is the machine mode of the multiplication. */
2452
2453 static void
2454 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2455 const struct mult_cost *cost_limit, enum machine_mode mode)
2456 {
2457 int m;
2458 struct algorithm *alg_in, *best_alg;
2459 struct mult_cost best_cost;
2460 struct mult_cost new_limit;
2461 int op_cost, op_latency;
2462 unsigned HOST_WIDE_INT q;
2463 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2464 int hash_index;
2465 bool cache_hit = false;
2466 enum alg_code cache_alg = alg_zero;
2467
2468 /* Indicate that no algorithm is yet found. If no algorithm
2469 is found, this value will be returned and indicate failure. */
2470 alg_out->cost.cost = cost_limit->cost + 1;
2471 alg_out->cost.latency = cost_limit->latency + 1;
2472
2473 if (cost_limit->cost < 0
2474 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2475 return;
2476
2477 /* Restrict the bits of "t" to the multiplication's mode. */
2478 t &= GET_MODE_MASK (mode);
2479
2480 /* t == 1 can be done in zero cost. */
2481 if (t == 1)
2482 {
2483 alg_out->ops = 1;
2484 alg_out->cost.cost = 0;
2485 alg_out->cost.latency = 0;
2486 alg_out->op[0] = alg_m;
2487 return;
2488 }
2489
2490 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2491 fail now. */
2492 if (t == 0)
2493 {
2494 if (MULT_COST_LESS (cost_limit, zero_cost))
2495 return;
2496 else
2497 {
2498 alg_out->ops = 1;
2499 alg_out->cost.cost = zero_cost;
2500 alg_out->cost.latency = zero_cost;
2501 alg_out->op[0] = alg_zero;
2502 return;
2503 }
2504 }
2505
2506 /* We'll be needing a couple extra algorithm structures now. */
2507
2508 alg_in = alloca (sizeof (struct algorithm));
2509 best_alg = alloca (sizeof (struct algorithm));
2510 best_cost = *cost_limit;
2511
2512 /* Compute the hash index. */
2513 hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
2514
2515 /* See if we already know what to do for T. */
2516 if (alg_hash[hash_index].t == t
2517 && alg_hash[hash_index].mode == mode
2518 && alg_hash[hash_index].alg != alg_unknown)
2519 {
2520 cache_alg = alg_hash[hash_index].alg;
2521
2522 if (cache_alg == alg_impossible)
2523 {
2524 /* The cache tells us that it's impossible to synthesize
2525 multiplication by T within alg_hash[hash_index].cost. */
2526 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2527 /* COST_LIMIT is at least as restrictive as the one
2528 recorded in the hash table, in which case we have no
2529 hope of synthesizing a multiplication. Just
2530 return. */
2531 return;
2532
2533 /* If we get here, COST_LIMIT is less restrictive than the
2534 one recorded in the hash table, so we may be able to
2535 synthesize a multiplication. Proceed as if we didn't
2536 have the cache entry. */
2537 }
2538 else
2539 {
2540 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2541 /* The cached algorithm shows that this multiplication
2542 requires more cost than COST_LIMIT. Just return. This
2543 way, we don't clobber this cache entry with
2544 alg_impossible but retain useful information. */
2545 return;
2546
2547 cache_hit = true;
2548
2549 switch (cache_alg)
2550 {
2551 case alg_shift:
2552 goto do_alg_shift;
2553
2554 case alg_add_t_m2:
2555 case alg_sub_t_m2:
2556 goto do_alg_addsub_t_m2;
2557
2558 case alg_add_factor:
2559 case alg_sub_factor:
2560 goto do_alg_addsub_factor;
2561
2562 case alg_add_t2_m:
2563 goto do_alg_add_t2_m;
2564
2565 case alg_sub_t2_m:
2566 goto do_alg_sub_t2_m;
2567
2568 default:
2569 gcc_unreachable ();
2570 }
2571 }
2572 }
2573
2574 /* If we have a group of zero bits at the low-order part of T, try
2575 multiplying by the remaining bits and then doing a shift. */
2576
2577 if ((t & 1) == 0)
2578 {
2579 do_alg_shift:
2580 m = floor_log2 (t & -t); /* m = number of low zero bits */
2581 if (m < maxm)
2582 {
2583 q = t >> m;
2584 /* The function expand_shift will choose between a shift and
2585 a sequence of additions, so the observed cost is given as
2586 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2587 op_cost = m * add_cost[mode];
2588 if (shift_cost[mode][m] < op_cost)
2589 op_cost = shift_cost[mode][m];
2590 new_limit.cost = best_cost.cost - op_cost;
2591 new_limit.latency = best_cost.latency - op_cost;
2592 synth_mult (alg_in, q, &new_limit, mode);
2593
2594 alg_in->cost.cost += op_cost;
2595 alg_in->cost.latency += op_cost;
2596 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2597 {
2598 struct algorithm *x;
2599 best_cost = alg_in->cost;
2600 x = alg_in, alg_in = best_alg, best_alg = x;
2601 best_alg->log[best_alg->ops] = m;
2602 best_alg->op[best_alg->ops] = alg_shift;
2603 }
2604 }
2605 if (cache_hit)
2606 goto done;
2607 }
2608
2609 /* If we have an odd number, add or subtract one. */
2610 if ((t & 1) != 0)
2611 {
2612 unsigned HOST_WIDE_INT w;
2613
2614 do_alg_addsub_t_m2:
2615 for (w = 1; (w & t) != 0; w <<= 1)
2616 ;
2617 /* If T was -1, then W will be zero after the loop. This is another
2618 case where T ends with ...111. Handling this with (T + 1) and
2619 subtract 1 produces slightly better code and results in algorithm
2620 selection much faster than treating it like the ...0111 case
2621 below. */
2622 if (w == 0
2623 || (w > 2
2624 /* Reject the case where t is 3.
2625 Thus we prefer addition in that case. */
2626 && t != 3))
2627 {
2628 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2629
2630 op_cost = add_cost[mode];
2631 new_limit.cost = best_cost.cost - op_cost;
2632 new_limit.latency = best_cost.latency - op_cost;
2633 synth_mult (alg_in, t + 1, &new_limit, mode);
2634
2635 alg_in->cost.cost += op_cost;
2636 alg_in->cost.latency += op_cost;
2637 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2638 {
2639 struct algorithm *x;
2640 best_cost = alg_in->cost;
2641 x = alg_in, alg_in = best_alg, best_alg = x;
2642 best_alg->log[best_alg->ops] = 0;
2643 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2644 }
2645 }
2646 else
2647 {
2648 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2649
2650 op_cost = add_cost[mode];
2651 new_limit.cost = best_cost.cost - op_cost;
2652 new_limit.latency = best_cost.latency - op_cost;
2653 synth_mult (alg_in, t - 1, &new_limit, mode);
2654
2655 alg_in->cost.cost += op_cost;
2656 alg_in->cost.latency += op_cost;
2657 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2658 {
2659 struct algorithm *x;
2660 best_cost = alg_in->cost;
2661 x = alg_in, alg_in = best_alg, best_alg = x;
2662 best_alg->log[best_alg->ops] = 0;
2663 best_alg->op[best_alg->ops] = alg_add_t_m2;
2664 }
2665 }
2666 if (cache_hit)
2667 goto done;
2668 }
2669
2670 /* Look for factors of t of the form
2671 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2672 If we find such a factor, we can multiply by t using an algorithm that
2673 multiplies by q, shift the result by m and add/subtract it to itself.
2674
2675 We search for large factors first and loop down, even if large factors
2676 are less probable than small; if we find a large factor we will find a
2677 good sequence quickly, and therefore be able to prune (by decreasing
2678 COST_LIMIT) the search. */
2679
2680 do_alg_addsub_factor:
2681 for (m = floor_log2 (t - 1); m >= 2; m--)
2682 {
2683 unsigned HOST_WIDE_INT d;
2684
2685 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2686 if (t % d == 0 && t > d && m < maxm
2687 && (!cache_hit || cache_alg == alg_add_factor))
2688 {
2689 /* If the target has a cheap shift-and-add instruction use
2690 that in preference to a shift insn followed by an add insn.
2691 Assume that the shift-and-add is "atomic" with a latency
2692 equal to its cost, otherwise assume that on superscalar
2693 hardware the shift may be executed concurrently with the
2694 earlier steps in the algorithm. */
2695 op_cost = add_cost[mode] + shift_cost[mode][m];
2696 if (shiftadd_cost[mode][m] < op_cost)
2697 {
2698 op_cost = shiftadd_cost[mode][m];
2699 op_latency = op_cost;
2700 }
2701 else
2702 op_latency = add_cost[mode];
2703
2704 new_limit.cost = best_cost.cost - op_cost;
2705 new_limit.latency = best_cost.latency - op_latency;
2706 synth_mult (alg_in, t / d, &new_limit, mode);
2707
2708 alg_in->cost.cost += op_cost;
2709 alg_in->cost.latency += op_latency;
2710 if (alg_in->cost.latency < op_cost)
2711 alg_in->cost.latency = op_cost;
2712 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2713 {
2714 struct algorithm *x;
2715 best_cost = alg_in->cost;
2716 x = alg_in, alg_in = best_alg, best_alg = x;
2717 best_alg->log[best_alg->ops] = m;
2718 best_alg->op[best_alg->ops] = alg_add_factor;
2719 }
2720 /* Other factors will have been taken care of in the recursion. */
2721 break;
2722 }
2723
2724 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2725 if (t % d == 0 && t > d && m < maxm
2726 && (!cache_hit || cache_alg == alg_sub_factor))
2727 {
2728 /* If the target has a cheap shift-and-subtract insn use
2729 that in preference to a shift insn followed by a sub insn.
2730 Assume that the shift-and-sub is "atomic" with a latency
2731 equal to it's cost, otherwise assume that on superscalar
2732 hardware the shift may be executed concurrently with the
2733 earlier steps in the algorithm. */
2734 op_cost = add_cost[mode] + shift_cost[mode][m];
2735 if (shiftsub_cost[mode][m] < op_cost)
2736 {
2737 op_cost = shiftsub_cost[mode][m];
2738 op_latency = op_cost;
2739 }
2740 else
2741 op_latency = add_cost[mode];
2742
2743 new_limit.cost = best_cost.cost - op_cost;
2744 new_limit.latency = best_cost.latency - op_latency;
2745 synth_mult (alg_in, t / d, &new_limit, mode);
2746
2747 alg_in->cost.cost += op_cost;
2748 alg_in->cost.latency += op_latency;
2749 if (alg_in->cost.latency < op_cost)
2750 alg_in->cost.latency = op_cost;
2751 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2752 {
2753 struct algorithm *x;
2754 best_cost = alg_in->cost;
2755 x = alg_in, alg_in = best_alg, best_alg = x;
2756 best_alg->log[best_alg->ops] = m;
2757 best_alg->op[best_alg->ops] = alg_sub_factor;
2758 }
2759 break;
2760 }
2761 }
2762 if (cache_hit)
2763 goto done;
2764
2765 /* Try shift-and-add (load effective address) instructions,
2766 i.e. do a*3, a*5, a*9. */
2767 if ((t & 1) != 0)
2768 {
2769 do_alg_add_t2_m:
2770 q = t - 1;
2771 q = q & -q;
2772 m = exact_log2 (q);
2773 if (m >= 0 && m < maxm)
2774 {
2775 op_cost = shiftadd_cost[mode][m];
2776 new_limit.cost = best_cost.cost - op_cost;
2777 new_limit.latency = best_cost.latency - op_cost;
2778 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2779
2780 alg_in->cost.cost += op_cost;
2781 alg_in->cost.latency += op_cost;
2782 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2783 {
2784 struct algorithm *x;
2785 best_cost = alg_in->cost;
2786 x = alg_in, alg_in = best_alg, best_alg = x;
2787 best_alg->log[best_alg->ops] = m;
2788 best_alg->op[best_alg->ops] = alg_add_t2_m;
2789 }
2790 }
2791 if (cache_hit)
2792 goto done;
2793
2794 do_alg_sub_t2_m:
2795 q = t + 1;
2796 q = q & -q;
2797 m = exact_log2 (q);
2798 if (m >= 0 && m < maxm)
2799 {
2800 op_cost = shiftsub_cost[mode][m];
2801 new_limit.cost = best_cost.cost - op_cost;
2802 new_limit.latency = best_cost.latency - op_cost;
2803 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2804
2805 alg_in->cost.cost += op_cost;
2806 alg_in->cost.latency += op_cost;
2807 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2808 {
2809 struct algorithm *x;
2810 best_cost = alg_in->cost;
2811 x = alg_in, alg_in = best_alg, best_alg = x;
2812 best_alg->log[best_alg->ops] = m;
2813 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2814 }
2815 }
2816 if (cache_hit)
2817 goto done;
2818 }
2819
2820 done:
2821 /* If best_cost has not decreased, we have not found any algorithm. */
2822 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2823 {
2824 /* We failed to find an algorithm. Record alg_impossible for
2825 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2826 we are asked to find an algorithm for T within the same or
2827 lower COST_LIMIT, we can immediately return to the
2828 caller. */
2829 alg_hash[hash_index].t = t;
2830 alg_hash[hash_index].mode = mode;
2831 alg_hash[hash_index].alg = alg_impossible;
2832 alg_hash[hash_index].cost = *cost_limit;
2833 return;
2834 }
2835
2836 /* Cache the result. */
2837 if (!cache_hit)
2838 {
2839 alg_hash[hash_index].t = t;
2840 alg_hash[hash_index].mode = mode;
2841 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2842 alg_hash[hash_index].cost.cost = best_cost.cost;
2843 alg_hash[hash_index].cost.latency = best_cost.latency;
2844 }
2845
2846 /* If we are getting a too long sequence for `struct algorithm'
2847 to record, make this search fail. */
2848 if (best_alg->ops == MAX_BITS_PER_WORD)
2849 return;
2850
2851 /* Copy the algorithm from temporary space to the space at alg_out.
2852 We avoid using structure assignment because the majority of
2853 best_alg is normally undefined, and this is a critical function. */
2854 alg_out->ops = best_alg->ops + 1;
2855 alg_out->cost = best_cost;
2856 memcpy (alg_out->op, best_alg->op,
2857 alg_out->ops * sizeof *alg_out->op);
2858 memcpy (alg_out->log, best_alg->log,
2859 alg_out->ops * sizeof *alg_out->log);
2860 }
2861 \f
2862 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2863 Try three variations:
2864
2865 - a shift/add sequence based on VAL itself
2866 - a shift/add sequence based on -VAL, followed by a negation
2867 - a shift/add sequence based on VAL - 1, followed by an addition.
2868
2869 Return true if the cheapest of these cost less than MULT_COST,
2870 describing the algorithm in *ALG and final fixup in *VARIANT. */
2871
2872 static bool
2873 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2874 struct algorithm *alg, enum mult_variant *variant,
2875 int mult_cost)
2876 {
2877 struct algorithm alg2;
2878 struct mult_cost limit;
2879 int op_cost;
2880
2881 /* Fail quickly for impossible bounds. */
2882 if (mult_cost < 0)
2883 return false;
2884
2885 /* Ensure that mult_cost provides a reasonable upper bound.
2886 Any constant multiplication can be performed with less
2887 than 2 * bits additions. */
2888 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[mode];
2889 if (mult_cost > op_cost)
2890 mult_cost = op_cost;
2891
2892 *variant = basic_variant;
2893 limit.cost = mult_cost;
2894 limit.latency = mult_cost;
2895 synth_mult (alg, val, &limit, mode);
2896
2897 /* This works only if the inverted value actually fits in an
2898 `unsigned int' */
2899 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2900 {
2901 op_cost = neg_cost[mode];
2902 if (MULT_COST_LESS (&alg->cost, mult_cost))
2903 {
2904 limit.cost = alg->cost.cost - op_cost;
2905 limit.latency = alg->cost.latency - op_cost;
2906 }
2907 else
2908 {
2909 limit.cost = mult_cost - op_cost;
2910 limit.latency = mult_cost - op_cost;
2911 }
2912
2913 synth_mult (&alg2, -val, &limit, mode);
2914 alg2.cost.cost += op_cost;
2915 alg2.cost.latency += op_cost;
2916 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2917 *alg = alg2, *variant = negate_variant;
2918 }
2919
2920 /* This proves very useful for division-by-constant. */
2921 op_cost = add_cost[mode];
2922 if (MULT_COST_LESS (&alg->cost, mult_cost))
2923 {
2924 limit.cost = alg->cost.cost - op_cost;
2925 limit.latency = alg->cost.latency - op_cost;
2926 }
2927 else
2928 {
2929 limit.cost = mult_cost - op_cost;
2930 limit.latency = mult_cost - op_cost;
2931 }
2932
2933 synth_mult (&alg2, val - 1, &limit, mode);
2934 alg2.cost.cost += op_cost;
2935 alg2.cost.latency += op_cost;
2936 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2937 *alg = alg2, *variant = add_variant;
2938
2939 return MULT_COST_LESS (&alg->cost, mult_cost);
2940 }
2941
2942 /* A subroutine of expand_mult, used for constant multiplications.
2943 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2944 convenient. Use the shift/add sequence described by ALG and apply
2945 the final fixup specified by VARIANT. */
2946
2947 static rtx
2948 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2949 rtx target, const struct algorithm *alg,
2950 enum mult_variant variant)
2951 {
2952 HOST_WIDE_INT val_so_far;
2953 rtx insn, accum, tem;
2954 int opno;
2955 enum machine_mode nmode;
2956
2957 /* Avoid referencing memory over and over.
2958 For speed, but also for correctness when mem is volatile. */
2959 if (MEM_P (op0))
2960 op0 = force_reg (mode, op0);
2961
2962 /* ACCUM starts out either as OP0 or as a zero, depending on
2963 the first operation. */
2964
2965 if (alg->op[0] == alg_zero)
2966 {
2967 accum = copy_to_mode_reg (mode, const0_rtx);
2968 val_so_far = 0;
2969 }
2970 else if (alg->op[0] == alg_m)
2971 {
2972 accum = copy_to_mode_reg (mode, op0);
2973 val_so_far = 1;
2974 }
2975 else
2976 gcc_unreachable ();
2977
2978 for (opno = 1; opno < alg->ops; opno++)
2979 {
2980 int log = alg->log[opno];
2981 rtx shift_subtarget = optimize ? 0 : accum;
2982 rtx add_target
2983 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2984 && !optimize)
2985 ? target : 0;
2986 rtx accum_target = optimize ? 0 : accum;
2987
2988 switch (alg->op[opno])
2989 {
2990 case alg_shift:
2991 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2992 build_int_cst (NULL_TREE, log),
2993 NULL_RTX, 0);
2994 val_so_far <<= log;
2995 break;
2996
2997 case alg_add_t_m2:
2998 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2999 build_int_cst (NULL_TREE, log),
3000 NULL_RTX, 0);
3001 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3002 add_target ? add_target : accum_target);
3003 val_so_far += (HOST_WIDE_INT) 1 << log;
3004 break;
3005
3006 case alg_sub_t_m2:
3007 tem = expand_shift (LSHIFT_EXPR, mode, op0,
3008 build_int_cst (NULL_TREE, log),
3009 NULL_RTX, 0);
3010 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3011 add_target ? add_target : accum_target);
3012 val_so_far -= (HOST_WIDE_INT) 1 << log;
3013 break;
3014
3015 case alg_add_t2_m:
3016 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3017 build_int_cst (NULL_TREE, log),
3018 shift_subtarget,
3019 0);
3020 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3021 add_target ? add_target : accum_target);
3022 val_so_far = (val_so_far << log) + 1;
3023 break;
3024
3025 case alg_sub_t2_m:
3026 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3027 build_int_cst (NULL_TREE, log),
3028 shift_subtarget, 0);
3029 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3030 add_target ? add_target : accum_target);
3031 val_so_far = (val_so_far << log) - 1;
3032 break;
3033
3034 case alg_add_factor:
3035 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3036 build_int_cst (NULL_TREE, log),
3037 NULL_RTX, 0);
3038 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3039 add_target ? add_target : accum_target);
3040 val_so_far += val_so_far << log;
3041 break;
3042
3043 case alg_sub_factor:
3044 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3045 build_int_cst (NULL_TREE, log),
3046 NULL_RTX, 0);
3047 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3048 (add_target
3049 ? add_target : (optimize ? 0 : tem)));
3050 val_so_far = (val_so_far << log) - val_so_far;
3051 break;
3052
3053 default:
3054 gcc_unreachable ();
3055 }
3056
3057 /* Write a REG_EQUAL note on the last insn so that we can cse
3058 multiplication sequences. Note that if ACCUM is a SUBREG,
3059 we've set the inner register and must properly indicate
3060 that. */
3061
3062 tem = op0, nmode = mode;
3063 if (GET_CODE (accum) == SUBREG)
3064 {
3065 nmode = GET_MODE (SUBREG_REG (accum));
3066 tem = gen_lowpart (nmode, op0);
3067 }
3068
3069 insn = get_last_insn ();
3070 set_unique_reg_note (insn, REG_EQUAL,
3071 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
3072 }
3073
3074 if (variant == negate_variant)
3075 {
3076 val_so_far = -val_so_far;
3077 accum = expand_unop (mode, neg_optab, accum, target, 0);
3078 }
3079 else if (variant == add_variant)
3080 {
3081 val_so_far = val_so_far + 1;
3082 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3083 }
3084
3085 /* Compare only the bits of val and val_so_far that are significant
3086 in the result mode, to avoid sign-/zero-extension confusion. */
3087 val &= GET_MODE_MASK (mode);
3088 val_so_far &= GET_MODE_MASK (mode);
3089 gcc_assert (val == val_so_far);
3090
3091 return accum;
3092 }
3093
3094 /* Perform a multiplication and return an rtx for the result.
3095 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3096 TARGET is a suggestion for where to store the result (an rtx).
3097
3098 We check specially for a constant integer as OP1.
3099 If you want this check for OP0 as well, then before calling
3100 you should swap the two operands if OP0 would be constant. */
3101
3102 rtx
3103 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3104 int unsignedp)
3105 {
3106 enum mult_variant variant;
3107 struct algorithm algorithm;
3108 int max_cost;
3109
3110 /* Handling const0_rtx here allows us to use zero as a rogue value for
3111 coeff below. */
3112 if (op1 == const0_rtx)
3113 return const0_rtx;
3114 if (op1 == const1_rtx)
3115 return op0;
3116 if (op1 == constm1_rtx)
3117 return expand_unop (mode,
3118 GET_MODE_CLASS (mode) == MODE_INT
3119 && !unsignedp && flag_trapv
3120 ? negv_optab : neg_optab,
3121 op0, target, 0);
3122
3123 /* These are the operations that are potentially turned into a sequence
3124 of shifts and additions. */
3125 if (SCALAR_INT_MODE_P (mode)
3126 && (unsignedp || !flag_trapv))
3127 {
3128 HOST_WIDE_INT coeff = 0;
3129 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3130
3131 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3132 less than or equal in size to `unsigned int' this doesn't matter.
3133 If the mode is larger than `unsigned int', then synth_mult works
3134 only if the constant value exactly fits in an `unsigned int' without
3135 any truncation. This means that multiplying by negative values does
3136 not work; results are off by 2^32 on a 32 bit machine. */
3137
3138 if (GET_CODE (op1) == CONST_INT)
3139 {
3140 /* Attempt to handle multiplication of DImode values by negative
3141 coefficients, by performing the multiplication by a positive
3142 multiplier and then inverting the result. */
3143 if (INTVAL (op1) < 0
3144 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3145 {
3146 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3147 result is interpreted as an unsigned coefficient.
3148 Exclude cost of op0 from max_cost to match the cost
3149 calculation of the synth_mult. */
3150 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET)
3151 - neg_cost[mode];
3152 if (max_cost > 0
3153 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3154 &variant, max_cost))
3155 {
3156 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3157 NULL_RTX, &algorithm,
3158 variant);
3159 return expand_unop (mode, neg_optab, temp, target, 0);
3160 }
3161 }
3162 else coeff = INTVAL (op1);
3163 }
3164 else if (GET_CODE (op1) == CONST_DOUBLE)
3165 {
3166 /* If we are multiplying in DImode, it may still be a win
3167 to try to work with shifts and adds. */
3168 if (CONST_DOUBLE_HIGH (op1) == 0)
3169 coeff = CONST_DOUBLE_LOW (op1);
3170 else if (CONST_DOUBLE_LOW (op1) == 0
3171 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3172 {
3173 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3174 + HOST_BITS_PER_WIDE_INT;
3175 return expand_shift (LSHIFT_EXPR, mode, op0,
3176 build_int_cst (NULL_TREE, shift),
3177 target, unsignedp);
3178 }
3179 }
3180
3181 /* We used to test optimize here, on the grounds that it's better to
3182 produce a smaller program when -O is not used. But this causes
3183 such a terrible slowdown sometimes that it seems better to always
3184 use synth_mult. */
3185 if (coeff != 0)
3186 {
3187 /* Special case powers of two. */
3188 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3189 return expand_shift (LSHIFT_EXPR, mode, op0,
3190 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3191 target, unsignedp);
3192
3193 /* Exclude cost of op0 from max_cost to match the cost
3194 calculation of the synth_mult. */
3195 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET);
3196 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3197 max_cost))
3198 return expand_mult_const (mode, op0, coeff, target,
3199 &algorithm, variant);
3200 }
3201 }
3202
3203 if (GET_CODE (op0) == CONST_DOUBLE)
3204 {
3205 rtx temp = op0;
3206 op0 = op1;
3207 op1 = temp;
3208 }
3209
3210 /* Expand x*2.0 as x+x. */
3211 if (GET_CODE (op1) == CONST_DOUBLE
3212 && SCALAR_FLOAT_MODE_P (mode))
3213 {
3214 REAL_VALUE_TYPE d;
3215 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3216
3217 if (REAL_VALUES_EQUAL (d, dconst2))
3218 {
3219 op0 = force_reg (GET_MODE (op0), op0);
3220 return expand_binop (mode, add_optab, op0, op0,
3221 target, unsignedp, OPTAB_LIB_WIDEN);
3222 }
3223 }
3224
3225 /* This used to use umul_optab if unsigned, but for non-widening multiply
3226 there is no difference between signed and unsigned. */
3227 op0 = expand_binop (mode,
3228 ! unsignedp
3229 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3230 ? smulv_optab : smul_optab,
3231 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3232 gcc_assert (op0);
3233 return op0;
3234 }
3235 \f
3236 /* Return the smallest n such that 2**n >= X. */
3237
3238 int
3239 ceil_log2 (unsigned HOST_WIDE_INT x)
3240 {
3241 return floor_log2 (x - 1) + 1;
3242 }
3243
3244 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3245 replace division by D, and put the least significant N bits of the result
3246 in *MULTIPLIER_PTR and return the most significant bit.
3247
3248 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3249 needed precision is in PRECISION (should be <= N).
3250
3251 PRECISION should be as small as possible so this function can choose
3252 multiplier more freely.
3253
3254 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3255 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3256
3257 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3258 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3259
3260 static
3261 unsigned HOST_WIDE_INT
3262 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3263 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3264 {
3265 HOST_WIDE_INT mhigh_hi, mlow_hi;
3266 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3267 int lgup, post_shift;
3268 int pow, pow2;
3269 unsigned HOST_WIDE_INT nl, dummy1;
3270 HOST_WIDE_INT nh, dummy2;
3271
3272 /* lgup = ceil(log2(divisor)); */
3273 lgup = ceil_log2 (d);
3274
3275 gcc_assert (lgup <= n);
3276
3277 pow = n + lgup;
3278 pow2 = n + lgup - precision;
3279
3280 /* We could handle this with some effort, but this case is much
3281 better handled directly with a scc insn, so rely on caller using
3282 that. */
3283 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3284
3285 /* mlow = 2^(N + lgup)/d */
3286 if (pow >= HOST_BITS_PER_WIDE_INT)
3287 {
3288 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3289 nl = 0;
3290 }
3291 else
3292 {
3293 nh = 0;
3294 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3295 }
3296 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3297 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3298
3299 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3300 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3301 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3302 else
3303 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3304 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3305 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3306
3307 gcc_assert (!mhigh_hi || nh - d < d);
3308 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3309 /* Assert that mlow < mhigh. */
3310 gcc_assert (mlow_hi < mhigh_hi
3311 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3312
3313 /* If precision == N, then mlow, mhigh exceed 2^N
3314 (but they do not exceed 2^(N+1)). */
3315
3316 /* Reduce to lowest terms. */
3317 for (post_shift = lgup; post_shift > 0; post_shift--)
3318 {
3319 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3320 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3321 if (ml_lo >= mh_lo)
3322 break;
3323
3324 mlow_hi = 0;
3325 mlow_lo = ml_lo;
3326 mhigh_hi = 0;
3327 mhigh_lo = mh_lo;
3328 }
3329
3330 *post_shift_ptr = post_shift;
3331 *lgup_ptr = lgup;
3332 if (n < HOST_BITS_PER_WIDE_INT)
3333 {
3334 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3335 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3336 return mhigh_lo >= mask;
3337 }
3338 else
3339 {
3340 *multiplier_ptr = GEN_INT (mhigh_lo);
3341 return mhigh_hi;
3342 }
3343 }
3344
3345 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3346 congruent to 1 (mod 2**N). */
3347
3348 static unsigned HOST_WIDE_INT
3349 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3350 {
3351 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3352
3353 /* The algorithm notes that the choice y = x satisfies
3354 x*y == 1 mod 2^3, since x is assumed odd.
3355 Each iteration doubles the number of bits of significance in y. */
3356
3357 unsigned HOST_WIDE_INT mask;
3358 unsigned HOST_WIDE_INT y = x;
3359 int nbit = 3;
3360
3361 mask = (n == HOST_BITS_PER_WIDE_INT
3362 ? ~(unsigned HOST_WIDE_INT) 0
3363 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3364
3365 while (nbit < n)
3366 {
3367 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3368 nbit *= 2;
3369 }
3370 return y;
3371 }
3372
3373 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3374 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3375 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3376 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3377 become signed.
3378
3379 The result is put in TARGET if that is convenient.
3380
3381 MODE is the mode of operation. */
3382
3383 rtx
3384 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3385 rtx op1, rtx target, int unsignedp)
3386 {
3387 rtx tem;
3388 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3389
3390 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3391 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3392 NULL_RTX, 0);
3393 tem = expand_and (mode, tem, op1, NULL_RTX);
3394 adj_operand
3395 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3396 adj_operand);
3397
3398 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3399 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3400 NULL_RTX, 0);
3401 tem = expand_and (mode, tem, op0, NULL_RTX);
3402 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3403 target);
3404
3405 return target;
3406 }
3407
3408 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3409
3410 static rtx
3411 extract_high_half (enum machine_mode mode, rtx op)
3412 {
3413 enum machine_mode wider_mode;
3414
3415 if (mode == word_mode)
3416 return gen_highpart (mode, op);
3417
3418 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3419
3420 wider_mode = GET_MODE_WIDER_MODE (mode);
3421 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3422 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3423 return convert_modes (mode, wider_mode, op, 0);
3424 }
3425
3426 /* Like expand_mult_highpart, but only consider using a multiplication
3427 optab. OP1 is an rtx for the constant operand. */
3428
3429 static rtx
3430 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3431 rtx target, int unsignedp, int max_cost)
3432 {
3433 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3434 enum machine_mode wider_mode;
3435 optab moptab;
3436 rtx tem;
3437 int size;
3438
3439 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3440
3441 wider_mode = GET_MODE_WIDER_MODE (mode);
3442 size = GET_MODE_BITSIZE (mode);
3443
3444 /* Firstly, try using a multiplication insn that only generates the needed
3445 high part of the product, and in the sign flavor of unsignedp. */
3446 if (mul_highpart_cost[mode] < max_cost)
3447 {
3448 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3449 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3450 unsignedp, OPTAB_DIRECT);
3451 if (tem)
3452 return tem;
3453 }
3454
3455 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3456 Need to adjust the result after the multiplication. */
3457 if (size - 1 < BITS_PER_WORD
3458 && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
3459 + 4 * add_cost[mode] < max_cost))
3460 {
3461 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3462 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3463 unsignedp, OPTAB_DIRECT);
3464 if (tem)
3465 /* We used the wrong signedness. Adjust the result. */
3466 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3467 tem, unsignedp);
3468 }
3469
3470 /* Try widening multiplication. */
3471 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3472 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3473 && mul_widen_cost[wider_mode] < max_cost)
3474 {
3475 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3476 unsignedp, OPTAB_WIDEN);
3477 if (tem)
3478 return extract_high_half (mode, tem);
3479 }
3480
3481 /* Try widening the mode and perform a non-widening multiplication. */
3482 if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3483 && size - 1 < BITS_PER_WORD
3484 && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
3485 {
3486 rtx insns, wop0, wop1;
3487
3488 /* We need to widen the operands, for example to ensure the
3489 constant multiplier is correctly sign or zero extended.
3490 Use a sequence to clean-up any instructions emitted by
3491 the conversions if things don't work out. */
3492 start_sequence ();
3493 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3494 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3495 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3496 unsignedp, OPTAB_WIDEN);
3497 insns = get_insns ();
3498 end_sequence ();
3499
3500 if (tem)
3501 {
3502 emit_insn (insns);
3503 return extract_high_half (mode, tem);
3504 }
3505 }
3506
3507 /* Try widening multiplication of opposite signedness, and adjust. */
3508 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3509 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3510 && size - 1 < BITS_PER_WORD
3511 && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
3512 + 4 * add_cost[mode] < max_cost))
3513 {
3514 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3515 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3516 if (tem != 0)
3517 {
3518 tem = extract_high_half (mode, tem);
3519 /* We used the wrong signedness. Adjust the result. */
3520 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3521 target, unsignedp);
3522 }
3523 }
3524
3525 return 0;
3526 }
3527
3528 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3529 putting the high half of the result in TARGET if that is convenient,
3530 and return where the result is. If the operation can not be performed,
3531 0 is returned.
3532
3533 MODE is the mode of operation and result.
3534
3535 UNSIGNEDP nonzero means unsigned multiply.
3536
3537 MAX_COST is the total allowed cost for the expanded RTL. */
3538
3539 static rtx
3540 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3541 rtx target, int unsignedp, int max_cost)
3542 {
3543 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3544 unsigned HOST_WIDE_INT cnst1;
3545 int extra_cost;
3546 bool sign_adjust = false;
3547 enum mult_variant variant;
3548 struct algorithm alg;
3549 rtx tem;
3550
3551 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3552 /* We can't support modes wider than HOST_BITS_PER_INT. */
3553 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3554
3555 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3556
3557 /* We can't optimize modes wider than BITS_PER_WORD.
3558 ??? We might be able to perform double-word arithmetic if
3559 mode == word_mode, however all the cost calculations in
3560 synth_mult etc. assume single-word operations. */
3561 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3562 return expand_mult_highpart_optab (mode, op0, op1, target,
3563 unsignedp, max_cost);
3564
3565 extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3566
3567 /* Check whether we try to multiply by a negative constant. */
3568 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3569 {
3570 sign_adjust = true;
3571 extra_cost += add_cost[mode];
3572 }
3573
3574 /* See whether shift/add multiplication is cheap enough. */
3575 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3576 max_cost - extra_cost))
3577 {
3578 /* See whether the specialized multiplication optabs are
3579 cheaper than the shift/add version. */
3580 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3581 alg.cost.cost + extra_cost);
3582 if (tem)
3583 return tem;
3584
3585 tem = convert_to_mode (wider_mode, op0, unsignedp);
3586 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3587 tem = extract_high_half (mode, tem);
3588
3589 /* Adjust result for signedness. */
3590 if (sign_adjust)
3591 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3592
3593 return tem;
3594 }
3595 return expand_mult_highpart_optab (mode, op0, op1, target,
3596 unsignedp, max_cost);
3597 }
3598
3599
3600 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3601
3602 static rtx
3603 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3604 {
3605 unsigned HOST_WIDE_INT masklow, maskhigh;
3606 rtx result, temp, shift, label;
3607 int logd;
3608
3609 logd = floor_log2 (d);
3610 result = gen_reg_rtx (mode);
3611
3612 /* Avoid conditional branches when they're expensive. */
3613 if (BRANCH_COST >= 2
3614 && !optimize_size)
3615 {
3616 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3617 mode, 0, -1);
3618 if (signmask)
3619 {
3620 signmask = force_reg (mode, signmask);
3621 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3622 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3623
3624 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3625 which instruction sequence to use. If logical right shifts
3626 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3627 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3628
3629 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3630 if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing
3631 || rtx_cost (temp, SET) > COSTS_N_INSNS (2))
3632 {
3633 temp = expand_binop (mode, xor_optab, op0, signmask,
3634 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3635 temp = expand_binop (mode, sub_optab, temp, signmask,
3636 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3637 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3638 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3639 temp = expand_binop (mode, xor_optab, temp, signmask,
3640 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3641 temp = expand_binop (mode, sub_optab, temp, signmask,
3642 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3643 }
3644 else
3645 {
3646 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3647 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3648 signmask = force_reg (mode, signmask);
3649
3650 temp = expand_binop (mode, add_optab, op0, signmask,
3651 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3652 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3653 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3654 temp = expand_binop (mode, sub_optab, temp, signmask,
3655 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3656 }
3657 return temp;
3658 }
3659 }
3660
3661 /* Mask contains the mode's signbit and the significant bits of the
3662 modulus. By including the signbit in the operation, many targets
3663 can avoid an explicit compare operation in the following comparison
3664 against zero. */
3665
3666 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3667 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3668 {
3669 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3670 maskhigh = -1;
3671 }
3672 else
3673 maskhigh = (HOST_WIDE_INT) -1
3674 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3675
3676 temp = expand_binop (mode, and_optab, op0,
3677 immed_double_const (masklow, maskhigh, mode),
3678 result, 1, OPTAB_LIB_WIDEN);
3679 if (temp != result)
3680 emit_move_insn (result, temp);
3681
3682 label = gen_label_rtx ();
3683 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3684
3685 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3686 0, OPTAB_LIB_WIDEN);
3687 masklow = (HOST_WIDE_INT) -1 << logd;
3688 maskhigh = -1;
3689 temp = expand_binop (mode, ior_optab, temp,
3690 immed_double_const (masklow, maskhigh, mode),
3691 result, 1, OPTAB_LIB_WIDEN);
3692 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3693 0, OPTAB_LIB_WIDEN);
3694 if (temp != result)
3695 emit_move_insn (result, temp);
3696 emit_label (label);
3697 return result;
3698 }
3699
3700 /* Expand signed division of OP0 by a power of two D in mode MODE.
3701 This routine is only called for positive values of D. */
3702
3703 static rtx
3704 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3705 {
3706 rtx temp, label;
3707 tree shift;
3708 int logd;
3709
3710 logd = floor_log2 (d);
3711 shift = build_int_cst (NULL_TREE, logd);
3712
3713 if (d == 2 && BRANCH_COST >= 1)
3714 {
3715 temp = gen_reg_rtx (mode);
3716 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3717 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3718 0, OPTAB_LIB_WIDEN);
3719 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3720 }
3721
3722 #ifdef HAVE_conditional_move
3723 if (BRANCH_COST >= 2)
3724 {
3725 rtx temp2;
3726
3727 /* ??? emit_conditional_move forces a stack adjustment via
3728 compare_from_rtx so, if the sequence is discarded, it will
3729 be lost. Do it now instead. */
3730 do_pending_stack_adjust ();
3731
3732 start_sequence ();
3733 temp2 = copy_to_mode_reg (mode, op0);
3734 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3735 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3736 temp = force_reg (mode, temp);
3737
3738 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3739 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3740 mode, temp, temp2, mode, 0);
3741 if (temp2)
3742 {
3743 rtx seq = get_insns ();
3744 end_sequence ();
3745 emit_insn (seq);
3746 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3747 }
3748 end_sequence ();
3749 }
3750 #endif
3751
3752 if (BRANCH_COST >= 2)
3753 {
3754 int ushift = GET_MODE_BITSIZE (mode) - logd;
3755
3756 temp = gen_reg_rtx (mode);
3757 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3758 if (shift_cost[mode][ushift] > COSTS_N_INSNS (1))
3759 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3760 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3761 else
3762 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3763 build_int_cst (NULL_TREE, ushift),
3764 NULL_RTX, 1);
3765 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3766 0, OPTAB_LIB_WIDEN);
3767 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3768 }
3769
3770 label = gen_label_rtx ();
3771 temp = copy_to_mode_reg (mode, op0);
3772 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3773 expand_inc (temp, GEN_INT (d - 1));
3774 emit_label (label);
3775 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3776 }
3777 \f
3778 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3779 if that is convenient, and returning where the result is.
3780 You may request either the quotient or the remainder as the result;
3781 specify REM_FLAG nonzero to get the remainder.
3782
3783 CODE is the expression code for which kind of division this is;
3784 it controls how rounding is done. MODE is the machine mode to use.
3785 UNSIGNEDP nonzero means do unsigned division. */
3786
3787 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3788 and then correct it by or'ing in missing high bits
3789 if result of ANDI is nonzero.
3790 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3791 This could optimize to a bfexts instruction.
3792 But C doesn't use these operations, so their optimizations are
3793 left for later. */
3794 /* ??? For modulo, we don't actually need the highpart of the first product,
3795 the low part will do nicely. And for small divisors, the second multiply
3796 can also be a low-part only multiply or even be completely left out.
3797 E.g. to calculate the remainder of a division by 3 with a 32 bit
3798 multiply, multiply with 0x55555556 and extract the upper two bits;
3799 the result is exact for inputs up to 0x1fffffff.
3800 The input range can be reduced by using cross-sum rules.
3801 For odd divisors >= 3, the following table gives right shift counts
3802 so that if a number is shifted by an integer multiple of the given
3803 amount, the remainder stays the same:
3804 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3805 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3806 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3807 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3808 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3809
3810 Cross-sum rules for even numbers can be derived by leaving as many bits
3811 to the right alone as the divisor has zeros to the right.
3812 E.g. if x is an unsigned 32 bit number:
3813 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3814 */
3815
3816 rtx
3817 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3818 rtx op0, rtx op1, rtx target, int unsignedp)
3819 {
3820 enum machine_mode compute_mode;
3821 rtx tquotient;
3822 rtx quotient = 0, remainder = 0;
3823 rtx last;
3824 int size;
3825 rtx insn, set;
3826 optab optab1, optab2;
3827 int op1_is_constant, op1_is_pow2 = 0;
3828 int max_cost, extra_cost;
3829 static HOST_WIDE_INT last_div_const = 0;
3830 static HOST_WIDE_INT ext_op1;
3831
3832 op1_is_constant = GET_CODE (op1) == CONST_INT;
3833 if (op1_is_constant)
3834 {
3835 ext_op1 = INTVAL (op1);
3836 if (unsignedp)
3837 ext_op1 &= GET_MODE_MASK (mode);
3838 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3839 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3840 }
3841
3842 /*
3843 This is the structure of expand_divmod:
3844
3845 First comes code to fix up the operands so we can perform the operations
3846 correctly and efficiently.
3847
3848 Second comes a switch statement with code specific for each rounding mode.
3849 For some special operands this code emits all RTL for the desired
3850 operation, for other cases, it generates only a quotient and stores it in
3851 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3852 to indicate that it has not done anything.
3853
3854 Last comes code that finishes the operation. If QUOTIENT is set and
3855 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3856 QUOTIENT is not set, it is computed using trunc rounding.
3857
3858 We try to generate special code for division and remainder when OP1 is a
3859 constant. If |OP1| = 2**n we can use shifts and some other fast
3860 operations. For other values of OP1, we compute a carefully selected
3861 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3862 by m.
3863
3864 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3865 half of the product. Different strategies for generating the product are
3866 implemented in expand_mult_highpart.
3867
3868 If what we actually want is the remainder, we generate that by another
3869 by-constant multiplication and a subtraction. */
3870
3871 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3872 code below will malfunction if we are, so check here and handle
3873 the special case if so. */
3874 if (op1 == const1_rtx)
3875 return rem_flag ? const0_rtx : op0;
3876
3877 /* When dividing by -1, we could get an overflow.
3878 negv_optab can handle overflows. */
3879 if (! unsignedp && op1 == constm1_rtx)
3880 {
3881 if (rem_flag)
3882 return const0_rtx;
3883 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3884 ? negv_optab : neg_optab, op0, target, 0);
3885 }
3886
3887 if (target
3888 /* Don't use the function value register as a target
3889 since we have to read it as well as write it,
3890 and function-inlining gets confused by this. */
3891 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3892 /* Don't clobber an operand while doing a multi-step calculation. */
3893 || ((rem_flag || op1_is_constant)
3894 && (reg_mentioned_p (target, op0)
3895 || (MEM_P (op0) && MEM_P (target))))
3896 || reg_mentioned_p (target, op1)
3897 || (MEM_P (op1) && MEM_P (target))))
3898 target = 0;
3899
3900 /* Get the mode in which to perform this computation. Normally it will
3901 be MODE, but sometimes we can't do the desired operation in MODE.
3902 If so, pick a wider mode in which we can do the operation. Convert
3903 to that mode at the start to avoid repeated conversions.
3904
3905 First see what operations we need. These depend on the expression
3906 we are evaluating. (We assume that divxx3 insns exist under the
3907 same conditions that modxx3 insns and that these insns don't normally
3908 fail. If these assumptions are not correct, we may generate less
3909 efficient code in some cases.)
3910
3911 Then see if we find a mode in which we can open-code that operation
3912 (either a division, modulus, or shift). Finally, check for the smallest
3913 mode for which we can do the operation with a library call. */
3914
3915 /* We might want to refine this now that we have division-by-constant
3916 optimization. Since expand_mult_highpart tries so many variants, it is
3917 not straightforward to generalize this. Maybe we should make an array
3918 of possible modes in init_expmed? Save this for GCC 2.7. */
3919
3920 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3921 ? (unsignedp ? lshr_optab : ashr_optab)
3922 : (unsignedp ? udiv_optab : sdiv_optab));
3923 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3924 ? optab1
3925 : (unsignedp ? udivmod_optab : sdivmod_optab));
3926
3927 for (compute_mode = mode; compute_mode != VOIDmode;
3928 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3929 if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3930 || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3931 break;
3932
3933 if (compute_mode == VOIDmode)
3934 for (compute_mode = mode; compute_mode != VOIDmode;
3935 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3936 if (optab1->handlers[compute_mode].libfunc
3937 || optab2->handlers[compute_mode].libfunc)
3938 break;
3939
3940 /* If we still couldn't find a mode, use MODE, but expand_binop will
3941 probably die. */
3942 if (compute_mode == VOIDmode)
3943 compute_mode = mode;
3944
3945 if (target && GET_MODE (target) == compute_mode)
3946 tquotient = target;
3947 else
3948 tquotient = gen_reg_rtx (compute_mode);
3949
3950 size = GET_MODE_BITSIZE (compute_mode);
3951 #if 0
3952 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3953 (mode), and thereby get better code when OP1 is a constant. Do that
3954 later. It will require going over all usages of SIZE below. */
3955 size = GET_MODE_BITSIZE (mode);
3956 #endif
3957
3958 /* Only deduct something for a REM if the last divide done was
3959 for a different constant. Then set the constant of the last
3960 divide. */
3961 max_cost = unsignedp ? udiv_cost[compute_mode] : sdiv_cost[compute_mode];
3962 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3963 && INTVAL (op1) == last_div_const))
3964 max_cost -= mul_cost[compute_mode] + add_cost[compute_mode];
3965
3966 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3967
3968 /* Now convert to the best mode to use. */
3969 if (compute_mode != mode)
3970 {
3971 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3972 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3973
3974 /* convert_modes may have placed op1 into a register, so we
3975 must recompute the following. */
3976 op1_is_constant = GET_CODE (op1) == CONST_INT;
3977 op1_is_pow2 = (op1_is_constant
3978 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3979 || (! unsignedp
3980 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3981 }
3982
3983 /* If one of the operands is a volatile MEM, copy it into a register. */
3984
3985 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3986 op0 = force_reg (compute_mode, op0);
3987 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3988 op1 = force_reg (compute_mode, op1);
3989
3990 /* If we need the remainder or if OP1 is constant, we need to
3991 put OP0 in a register in case it has any queued subexpressions. */
3992 if (rem_flag || op1_is_constant)
3993 op0 = force_reg (compute_mode, op0);
3994
3995 last = get_last_insn ();
3996
3997 /* Promote floor rounding to trunc rounding for unsigned operations. */
3998 if (unsignedp)
3999 {
4000 if (code == FLOOR_DIV_EXPR)
4001 code = TRUNC_DIV_EXPR;
4002 if (code == FLOOR_MOD_EXPR)
4003 code = TRUNC_MOD_EXPR;
4004 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4005 code = TRUNC_DIV_EXPR;
4006 }
4007
4008 if (op1 != const0_rtx)
4009 switch (code)
4010 {
4011 case TRUNC_MOD_EXPR:
4012 case TRUNC_DIV_EXPR:
4013 if (op1_is_constant)
4014 {
4015 if (unsignedp)
4016 {
4017 unsigned HOST_WIDE_INT mh;
4018 int pre_shift, post_shift;
4019 int dummy;
4020 rtx ml;
4021 unsigned HOST_WIDE_INT d = (INTVAL (op1)
4022 & GET_MODE_MASK (compute_mode));
4023
4024 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4025 {
4026 pre_shift = floor_log2 (d);
4027 if (rem_flag)
4028 {
4029 remainder
4030 = expand_binop (compute_mode, and_optab, op0,
4031 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4032 remainder, 1,
4033 OPTAB_LIB_WIDEN);
4034 if (remainder)
4035 return gen_lowpart (mode, remainder);
4036 }
4037 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4038 build_int_cst (NULL_TREE,
4039 pre_shift),
4040 tquotient, 1);
4041 }
4042 else if (size <= HOST_BITS_PER_WIDE_INT)
4043 {
4044 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4045 {
4046 /* Most significant bit of divisor is set; emit an scc
4047 insn. */
4048 quotient = emit_store_flag (tquotient, GEU, op0, op1,
4049 compute_mode, 1, 1);
4050 if (quotient == 0)
4051 goto fail1;
4052 }
4053 else
4054 {
4055 /* Find a suitable multiplier and right shift count
4056 instead of multiplying with D. */
4057
4058 mh = choose_multiplier (d, size, size,
4059 &ml, &post_shift, &dummy);
4060
4061 /* If the suggested multiplier is more than SIZE bits,
4062 we can do better for even divisors, using an
4063 initial right shift. */
4064 if (mh != 0 && (d & 1) == 0)
4065 {
4066 pre_shift = floor_log2 (d & -d);
4067 mh = choose_multiplier (d >> pre_shift, size,
4068 size - pre_shift,
4069 &ml, &post_shift, &dummy);
4070 gcc_assert (!mh);
4071 }
4072 else
4073 pre_shift = 0;
4074
4075 if (mh != 0)
4076 {
4077 rtx t1, t2, t3, t4;
4078
4079 if (post_shift - 1 >= BITS_PER_WORD)
4080 goto fail1;
4081
4082 extra_cost
4083 = (shift_cost[compute_mode][post_shift - 1]
4084 + shift_cost[compute_mode][1]
4085 + 2 * add_cost[compute_mode]);
4086 t1 = expand_mult_highpart (compute_mode, op0, ml,
4087 NULL_RTX, 1,
4088 max_cost - extra_cost);
4089 if (t1 == 0)
4090 goto fail1;
4091 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4092 op0, t1),
4093 NULL_RTX);
4094 t3 = expand_shift
4095 (RSHIFT_EXPR, compute_mode, t2,
4096 build_int_cst (NULL_TREE, 1),
4097 NULL_RTX,1);
4098 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4099 t1, t3),
4100 NULL_RTX);
4101 quotient = expand_shift
4102 (RSHIFT_EXPR, compute_mode, t4,
4103 build_int_cst (NULL_TREE, post_shift - 1),
4104 tquotient, 1);
4105 }
4106 else
4107 {
4108 rtx t1, t2;
4109
4110 if (pre_shift >= BITS_PER_WORD
4111 || post_shift >= BITS_PER_WORD)
4112 goto fail1;
4113
4114 t1 = expand_shift
4115 (RSHIFT_EXPR, compute_mode, op0,
4116 build_int_cst (NULL_TREE, pre_shift),
4117 NULL_RTX, 1);
4118 extra_cost
4119 = (shift_cost[compute_mode][pre_shift]
4120 + shift_cost[compute_mode][post_shift]);
4121 t2 = expand_mult_highpart (compute_mode, t1, ml,
4122 NULL_RTX, 1,
4123 max_cost - extra_cost);
4124 if (t2 == 0)
4125 goto fail1;
4126 quotient = expand_shift
4127 (RSHIFT_EXPR, compute_mode, t2,
4128 build_int_cst (NULL_TREE, post_shift),
4129 tquotient, 1);
4130 }
4131 }
4132 }
4133 else /* Too wide mode to use tricky code */
4134 break;
4135
4136 insn = get_last_insn ();
4137 if (insn != last
4138 && (set = single_set (insn)) != 0
4139 && SET_DEST (set) == quotient)
4140 set_unique_reg_note (insn,
4141 REG_EQUAL,
4142 gen_rtx_UDIV (compute_mode, op0, op1));
4143 }
4144 else /* TRUNC_DIV, signed */
4145 {
4146 unsigned HOST_WIDE_INT ml;
4147 int lgup, post_shift;
4148 rtx mlr;
4149 HOST_WIDE_INT d = INTVAL (op1);
4150 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
4151
4152 /* n rem d = n rem -d */
4153 if (rem_flag && d < 0)
4154 {
4155 d = abs_d;
4156 op1 = gen_int_mode (abs_d, compute_mode);
4157 }
4158
4159 if (d == 1)
4160 quotient = op0;
4161 else if (d == -1)
4162 quotient = expand_unop (compute_mode, neg_optab, op0,
4163 tquotient, 0);
4164 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4165 {
4166 /* This case is not handled correctly below. */
4167 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4168 compute_mode, 1, 1);
4169 if (quotient == 0)
4170 goto fail1;
4171 }
4172 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4173 && (rem_flag ? smod_pow2_cheap[compute_mode]
4174 : sdiv_pow2_cheap[compute_mode])
4175 /* We assume that cheap metric is true if the
4176 optab has an expander for this mode. */
4177 && (((rem_flag ? smod_optab : sdiv_optab)
4178 ->handlers[compute_mode].insn_code
4179 != CODE_FOR_nothing)
4180 || (sdivmod_optab->handlers[compute_mode]
4181 .insn_code != CODE_FOR_nothing)))
4182 ;
4183 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4184 {
4185 if (rem_flag)
4186 {
4187 remainder = expand_smod_pow2 (compute_mode, op0, d);
4188 if (remainder)
4189 return gen_lowpart (mode, remainder);
4190 }
4191
4192 if (sdiv_pow2_cheap[compute_mode]
4193 && ((sdiv_optab->handlers[compute_mode].insn_code
4194 != CODE_FOR_nothing)
4195 || (sdivmod_optab->handlers[compute_mode].insn_code
4196 != CODE_FOR_nothing)))
4197 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4198 compute_mode, op0,
4199 gen_int_mode (abs_d,
4200 compute_mode),
4201 NULL_RTX, 0);
4202 else
4203 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4204
4205 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4206 negate the quotient. */
4207 if (d < 0)
4208 {
4209 insn = get_last_insn ();
4210 if (insn != last
4211 && (set = single_set (insn)) != 0
4212 && SET_DEST (set) == quotient
4213 && abs_d < ((unsigned HOST_WIDE_INT) 1
4214 << (HOST_BITS_PER_WIDE_INT - 1)))
4215 set_unique_reg_note (insn,
4216 REG_EQUAL,
4217 gen_rtx_DIV (compute_mode,
4218 op0,
4219 GEN_INT
4220 (trunc_int_for_mode
4221 (abs_d,
4222 compute_mode))));
4223
4224 quotient = expand_unop (compute_mode, neg_optab,
4225 quotient, quotient, 0);
4226 }
4227 }
4228 else if (size <= HOST_BITS_PER_WIDE_INT)
4229 {
4230 choose_multiplier (abs_d, size, size - 1,
4231 &mlr, &post_shift, &lgup);
4232 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4233 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4234 {
4235 rtx t1, t2, t3;
4236
4237 if (post_shift >= BITS_PER_WORD
4238 || size - 1 >= BITS_PER_WORD)
4239 goto fail1;
4240
4241 extra_cost = (shift_cost[compute_mode][post_shift]
4242 + shift_cost[compute_mode][size - 1]
4243 + add_cost[compute_mode]);
4244 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4245 NULL_RTX, 0,
4246 max_cost - extra_cost);
4247 if (t1 == 0)
4248 goto fail1;
4249 t2 = expand_shift
4250 (RSHIFT_EXPR, compute_mode, t1,
4251 build_int_cst (NULL_TREE, post_shift),
4252 NULL_RTX, 0);
4253 t3 = expand_shift
4254 (RSHIFT_EXPR, compute_mode, op0,
4255 build_int_cst (NULL_TREE, size - 1),
4256 NULL_RTX, 0);
4257 if (d < 0)
4258 quotient
4259 = force_operand (gen_rtx_MINUS (compute_mode,
4260 t3, t2),
4261 tquotient);
4262 else
4263 quotient
4264 = force_operand (gen_rtx_MINUS (compute_mode,
4265 t2, t3),
4266 tquotient);
4267 }
4268 else
4269 {
4270 rtx t1, t2, t3, t4;
4271
4272 if (post_shift >= BITS_PER_WORD
4273 || size - 1 >= BITS_PER_WORD)
4274 goto fail1;
4275
4276 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4277 mlr = gen_int_mode (ml, compute_mode);
4278 extra_cost = (shift_cost[compute_mode][post_shift]
4279 + shift_cost[compute_mode][size - 1]
4280 + 2 * add_cost[compute_mode]);
4281 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4282 NULL_RTX, 0,
4283 max_cost - extra_cost);
4284 if (t1 == 0)
4285 goto fail1;
4286 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4287 t1, op0),
4288 NULL_RTX);
4289 t3 = expand_shift
4290 (RSHIFT_EXPR, compute_mode, t2,
4291 build_int_cst (NULL_TREE, post_shift),
4292 NULL_RTX, 0);
4293 t4 = expand_shift
4294 (RSHIFT_EXPR, compute_mode, op0,
4295 build_int_cst (NULL_TREE, size - 1),
4296 NULL_RTX, 0);
4297 if (d < 0)
4298 quotient
4299 = force_operand (gen_rtx_MINUS (compute_mode,
4300 t4, t3),
4301 tquotient);
4302 else
4303 quotient
4304 = force_operand (gen_rtx_MINUS (compute_mode,
4305 t3, t4),
4306 tquotient);
4307 }
4308 }
4309 else /* Too wide mode to use tricky code */
4310 break;
4311
4312 insn = get_last_insn ();
4313 if (insn != last
4314 && (set = single_set (insn)) != 0
4315 && SET_DEST (set) == quotient)
4316 set_unique_reg_note (insn,
4317 REG_EQUAL,
4318 gen_rtx_DIV (compute_mode, op0, op1));
4319 }
4320 break;
4321 }
4322 fail1:
4323 delete_insns_since (last);
4324 break;
4325
4326 case FLOOR_DIV_EXPR:
4327 case FLOOR_MOD_EXPR:
4328 /* We will come here only for signed operations. */
4329 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4330 {
4331 unsigned HOST_WIDE_INT mh;
4332 int pre_shift, lgup, post_shift;
4333 HOST_WIDE_INT d = INTVAL (op1);
4334 rtx ml;
4335
4336 if (d > 0)
4337 {
4338 /* We could just as easily deal with negative constants here,
4339 but it does not seem worth the trouble for GCC 2.6. */
4340 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4341 {
4342 pre_shift = floor_log2 (d);
4343 if (rem_flag)
4344 {
4345 remainder = expand_binop (compute_mode, and_optab, op0,
4346 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4347 remainder, 0, OPTAB_LIB_WIDEN);
4348 if (remainder)
4349 return gen_lowpart (mode, remainder);
4350 }
4351 quotient = expand_shift
4352 (RSHIFT_EXPR, compute_mode, op0,
4353 build_int_cst (NULL_TREE, pre_shift),
4354 tquotient, 0);
4355 }
4356 else
4357 {
4358 rtx t1, t2, t3, t4;
4359
4360 mh = choose_multiplier (d, size, size - 1,
4361 &ml, &post_shift, &lgup);
4362 gcc_assert (!mh);
4363
4364 if (post_shift < BITS_PER_WORD
4365 && size - 1 < BITS_PER_WORD)
4366 {
4367 t1 = expand_shift
4368 (RSHIFT_EXPR, compute_mode, op0,
4369 build_int_cst (NULL_TREE, size - 1),
4370 NULL_RTX, 0);
4371 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4372 NULL_RTX, 0, OPTAB_WIDEN);
4373 extra_cost = (shift_cost[compute_mode][post_shift]
4374 + shift_cost[compute_mode][size - 1]
4375 + 2 * add_cost[compute_mode]);
4376 t3 = expand_mult_highpart (compute_mode, t2, ml,
4377 NULL_RTX, 1,
4378 max_cost - extra_cost);
4379 if (t3 != 0)
4380 {
4381 t4 = expand_shift
4382 (RSHIFT_EXPR, compute_mode, t3,
4383 build_int_cst (NULL_TREE, post_shift),
4384 NULL_RTX, 1);
4385 quotient = expand_binop (compute_mode, xor_optab,
4386 t4, t1, tquotient, 0,
4387 OPTAB_WIDEN);
4388 }
4389 }
4390 }
4391 }
4392 else
4393 {
4394 rtx nsign, t1, t2, t3, t4;
4395 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4396 op0, constm1_rtx), NULL_RTX);
4397 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4398 0, OPTAB_WIDEN);
4399 nsign = expand_shift
4400 (RSHIFT_EXPR, compute_mode, t2,
4401 build_int_cst (NULL_TREE, size - 1),
4402 NULL_RTX, 0);
4403 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4404 NULL_RTX);
4405 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4406 NULL_RTX, 0);
4407 if (t4)
4408 {
4409 rtx t5;
4410 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4411 NULL_RTX, 0);
4412 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4413 t4, t5),
4414 tquotient);
4415 }
4416 }
4417 }
4418
4419 if (quotient != 0)
4420 break;
4421 delete_insns_since (last);
4422
4423 /* Try using an instruction that produces both the quotient and
4424 remainder, using truncation. We can easily compensate the quotient
4425 or remainder to get floor rounding, once we have the remainder.
4426 Notice that we compute also the final remainder value here,
4427 and return the result right away. */
4428 if (target == 0 || GET_MODE (target) != compute_mode)
4429 target = gen_reg_rtx (compute_mode);
4430
4431 if (rem_flag)
4432 {
4433 remainder
4434 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4435 quotient = gen_reg_rtx (compute_mode);
4436 }
4437 else
4438 {
4439 quotient
4440 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4441 remainder = gen_reg_rtx (compute_mode);
4442 }
4443
4444 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4445 quotient, remainder, 0))
4446 {
4447 /* This could be computed with a branch-less sequence.
4448 Save that for later. */
4449 rtx tem;
4450 rtx label = gen_label_rtx ();
4451 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4452 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4453 NULL_RTX, 0, OPTAB_WIDEN);
4454 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4455 expand_dec (quotient, const1_rtx);
4456 expand_inc (remainder, op1);
4457 emit_label (label);
4458 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4459 }
4460
4461 /* No luck with division elimination or divmod. Have to do it
4462 by conditionally adjusting op0 *and* the result. */
4463 {
4464 rtx label1, label2, label3, label4, label5;
4465 rtx adjusted_op0;
4466 rtx tem;
4467
4468 quotient = gen_reg_rtx (compute_mode);
4469 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4470 label1 = gen_label_rtx ();
4471 label2 = gen_label_rtx ();
4472 label3 = gen_label_rtx ();
4473 label4 = gen_label_rtx ();
4474 label5 = gen_label_rtx ();
4475 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4476 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4477 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4478 quotient, 0, OPTAB_LIB_WIDEN);
4479 if (tem != quotient)
4480 emit_move_insn (quotient, tem);
4481 emit_jump_insn (gen_jump (label5));
4482 emit_barrier ();
4483 emit_label (label1);
4484 expand_inc (adjusted_op0, const1_rtx);
4485 emit_jump_insn (gen_jump (label4));
4486 emit_barrier ();
4487 emit_label (label2);
4488 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4489 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4490 quotient, 0, OPTAB_LIB_WIDEN);
4491 if (tem != quotient)
4492 emit_move_insn (quotient, tem);
4493 emit_jump_insn (gen_jump (label5));
4494 emit_barrier ();
4495 emit_label (label3);
4496 expand_dec (adjusted_op0, const1_rtx);
4497 emit_label (label4);
4498 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4499 quotient, 0, OPTAB_LIB_WIDEN);
4500 if (tem != quotient)
4501 emit_move_insn (quotient, tem);
4502 expand_dec (quotient, const1_rtx);
4503 emit_label (label5);
4504 }
4505 break;
4506
4507 case CEIL_DIV_EXPR:
4508 case CEIL_MOD_EXPR:
4509 if (unsignedp)
4510 {
4511 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4512 {
4513 rtx t1, t2, t3;
4514 unsigned HOST_WIDE_INT d = INTVAL (op1);
4515 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4516 build_int_cst (NULL_TREE, floor_log2 (d)),
4517 tquotient, 1);
4518 t2 = expand_binop (compute_mode, and_optab, op0,
4519 GEN_INT (d - 1),
4520 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4521 t3 = gen_reg_rtx (compute_mode);
4522 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4523 compute_mode, 1, 1);
4524 if (t3 == 0)
4525 {
4526 rtx lab;
4527 lab = gen_label_rtx ();
4528 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4529 expand_inc (t1, const1_rtx);
4530 emit_label (lab);
4531 quotient = t1;
4532 }
4533 else
4534 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4535 t1, t3),
4536 tquotient);
4537 break;
4538 }
4539
4540 /* Try using an instruction that produces both the quotient and
4541 remainder, using truncation. We can easily compensate the
4542 quotient or remainder to get ceiling rounding, once we have the
4543 remainder. Notice that we compute also the final remainder
4544 value here, and return the result right away. */
4545 if (target == 0 || GET_MODE (target) != compute_mode)
4546 target = gen_reg_rtx (compute_mode);
4547
4548 if (rem_flag)
4549 {
4550 remainder = (REG_P (target)
4551 ? target : gen_reg_rtx (compute_mode));
4552 quotient = gen_reg_rtx (compute_mode);
4553 }
4554 else
4555 {
4556 quotient = (REG_P (target)
4557 ? target : gen_reg_rtx (compute_mode));
4558 remainder = gen_reg_rtx (compute_mode);
4559 }
4560
4561 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4562 remainder, 1))
4563 {
4564 /* This could be computed with a branch-less sequence.
4565 Save that for later. */
4566 rtx label = gen_label_rtx ();
4567 do_cmp_and_jump (remainder, const0_rtx, EQ,
4568 compute_mode, label);
4569 expand_inc (quotient, const1_rtx);
4570 expand_dec (remainder, op1);
4571 emit_label (label);
4572 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4573 }
4574
4575 /* No luck with division elimination or divmod. Have to do it
4576 by conditionally adjusting op0 *and* the result. */
4577 {
4578 rtx label1, label2;
4579 rtx adjusted_op0, tem;
4580
4581 quotient = gen_reg_rtx (compute_mode);
4582 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4583 label1 = gen_label_rtx ();
4584 label2 = gen_label_rtx ();
4585 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4586 compute_mode, label1);
4587 emit_move_insn (quotient, const0_rtx);
4588 emit_jump_insn (gen_jump (label2));
4589 emit_barrier ();
4590 emit_label (label1);
4591 expand_dec (adjusted_op0, const1_rtx);
4592 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4593 quotient, 1, OPTAB_LIB_WIDEN);
4594 if (tem != quotient)
4595 emit_move_insn (quotient, tem);
4596 expand_inc (quotient, const1_rtx);
4597 emit_label (label2);
4598 }
4599 }
4600 else /* signed */
4601 {
4602 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4603 && INTVAL (op1) >= 0)
4604 {
4605 /* This is extremely similar to the code for the unsigned case
4606 above. For 2.7 we should merge these variants, but for
4607 2.6.1 I don't want to touch the code for unsigned since that
4608 get used in C. The signed case will only be used by other
4609 languages (Ada). */
4610
4611 rtx t1, t2, t3;
4612 unsigned HOST_WIDE_INT d = INTVAL (op1);
4613 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4614 build_int_cst (NULL_TREE, floor_log2 (d)),
4615 tquotient, 0);
4616 t2 = expand_binop (compute_mode, and_optab, op0,
4617 GEN_INT (d - 1),
4618 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4619 t3 = gen_reg_rtx (compute_mode);
4620 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4621 compute_mode, 1, 1);
4622 if (t3 == 0)
4623 {
4624 rtx lab;
4625 lab = gen_label_rtx ();
4626 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4627 expand_inc (t1, const1_rtx);
4628 emit_label (lab);
4629 quotient = t1;
4630 }
4631 else
4632 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4633 t1, t3),
4634 tquotient);
4635 break;
4636 }
4637
4638 /* Try using an instruction that produces both the quotient and
4639 remainder, using truncation. We can easily compensate the
4640 quotient or remainder to get ceiling rounding, once we have the
4641 remainder. Notice that we compute also the final remainder
4642 value here, and return the result right away. */
4643 if (target == 0 || GET_MODE (target) != compute_mode)
4644 target = gen_reg_rtx (compute_mode);
4645 if (rem_flag)
4646 {
4647 remainder= (REG_P (target)
4648 ? target : gen_reg_rtx (compute_mode));
4649 quotient = gen_reg_rtx (compute_mode);
4650 }
4651 else
4652 {
4653 quotient = (REG_P (target)
4654 ? target : gen_reg_rtx (compute_mode));
4655 remainder = gen_reg_rtx (compute_mode);
4656 }
4657
4658 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4659 remainder, 0))
4660 {
4661 /* This could be computed with a branch-less sequence.
4662 Save that for later. */
4663 rtx tem;
4664 rtx label = gen_label_rtx ();
4665 do_cmp_and_jump (remainder, const0_rtx, EQ,
4666 compute_mode, label);
4667 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4668 NULL_RTX, 0, OPTAB_WIDEN);
4669 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4670 expand_inc (quotient, const1_rtx);
4671 expand_dec (remainder, op1);
4672 emit_label (label);
4673 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4674 }
4675
4676 /* No luck with division elimination or divmod. Have to do it
4677 by conditionally adjusting op0 *and* the result. */
4678 {
4679 rtx label1, label2, label3, label4, label5;
4680 rtx adjusted_op0;
4681 rtx tem;
4682
4683 quotient = gen_reg_rtx (compute_mode);
4684 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4685 label1 = gen_label_rtx ();
4686 label2 = gen_label_rtx ();
4687 label3 = gen_label_rtx ();
4688 label4 = gen_label_rtx ();
4689 label5 = gen_label_rtx ();
4690 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4691 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4692 compute_mode, label1);
4693 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4694 quotient, 0, OPTAB_LIB_WIDEN);
4695 if (tem != quotient)
4696 emit_move_insn (quotient, tem);
4697 emit_jump_insn (gen_jump (label5));
4698 emit_barrier ();
4699 emit_label (label1);
4700 expand_dec (adjusted_op0, const1_rtx);
4701 emit_jump_insn (gen_jump (label4));
4702 emit_barrier ();
4703 emit_label (label2);
4704 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4705 compute_mode, label3);
4706 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4707 quotient, 0, OPTAB_LIB_WIDEN);
4708 if (tem != quotient)
4709 emit_move_insn (quotient, tem);
4710 emit_jump_insn (gen_jump (label5));
4711 emit_barrier ();
4712 emit_label (label3);
4713 expand_inc (adjusted_op0, const1_rtx);
4714 emit_label (label4);
4715 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4716 quotient, 0, OPTAB_LIB_WIDEN);
4717 if (tem != quotient)
4718 emit_move_insn (quotient, tem);
4719 expand_inc (quotient, const1_rtx);
4720 emit_label (label5);
4721 }
4722 }
4723 break;
4724
4725 case EXACT_DIV_EXPR:
4726 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4727 {
4728 HOST_WIDE_INT d = INTVAL (op1);
4729 unsigned HOST_WIDE_INT ml;
4730 int pre_shift;
4731 rtx t1;
4732
4733 pre_shift = floor_log2 (d & -d);
4734 ml = invert_mod2n (d >> pre_shift, size);
4735 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4736 build_int_cst (NULL_TREE, pre_shift),
4737 NULL_RTX, unsignedp);
4738 quotient = expand_mult (compute_mode, t1,
4739 gen_int_mode (ml, compute_mode),
4740 NULL_RTX, 1);
4741
4742 insn = get_last_insn ();
4743 set_unique_reg_note (insn,
4744 REG_EQUAL,
4745 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4746 compute_mode,
4747 op0, op1));
4748 }
4749 break;
4750
4751 case ROUND_DIV_EXPR:
4752 case ROUND_MOD_EXPR:
4753 if (unsignedp)
4754 {
4755 rtx tem;
4756 rtx label;
4757 label = gen_label_rtx ();
4758 quotient = gen_reg_rtx (compute_mode);
4759 remainder = gen_reg_rtx (compute_mode);
4760 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4761 {
4762 rtx tem;
4763 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4764 quotient, 1, OPTAB_LIB_WIDEN);
4765 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4766 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4767 remainder, 1, OPTAB_LIB_WIDEN);
4768 }
4769 tem = plus_constant (op1, -1);
4770 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4771 build_int_cst (NULL_TREE, 1),
4772 NULL_RTX, 1);
4773 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4774 expand_inc (quotient, const1_rtx);
4775 expand_dec (remainder, op1);
4776 emit_label (label);
4777 }
4778 else
4779 {
4780 rtx abs_rem, abs_op1, tem, mask;
4781 rtx label;
4782 label = gen_label_rtx ();
4783 quotient = gen_reg_rtx (compute_mode);
4784 remainder = gen_reg_rtx (compute_mode);
4785 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4786 {
4787 rtx tem;
4788 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4789 quotient, 0, OPTAB_LIB_WIDEN);
4790 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4791 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4792 remainder, 0, OPTAB_LIB_WIDEN);
4793 }
4794 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4795 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4796 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4797 build_int_cst (NULL_TREE, 1),
4798 NULL_RTX, 1);
4799 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4800 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4801 NULL_RTX, 0, OPTAB_WIDEN);
4802 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4803 build_int_cst (NULL_TREE, size - 1),
4804 NULL_RTX, 0);
4805 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4806 NULL_RTX, 0, OPTAB_WIDEN);
4807 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4808 NULL_RTX, 0, OPTAB_WIDEN);
4809 expand_inc (quotient, tem);
4810 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4811 NULL_RTX, 0, OPTAB_WIDEN);
4812 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4813 NULL_RTX, 0, OPTAB_WIDEN);
4814 expand_dec (remainder, tem);
4815 emit_label (label);
4816 }
4817 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4818
4819 default:
4820 gcc_unreachable ();
4821 }
4822
4823 if (quotient == 0)
4824 {
4825 if (target && GET_MODE (target) != compute_mode)
4826 target = 0;
4827
4828 if (rem_flag)
4829 {
4830 /* Try to produce the remainder without producing the quotient.
4831 If we seem to have a divmod pattern that does not require widening,
4832 don't try widening here. We should really have a WIDEN argument
4833 to expand_twoval_binop, since what we'd really like to do here is
4834 1) try a mod insn in compute_mode
4835 2) try a divmod insn in compute_mode
4836 3) try a div insn in compute_mode and multiply-subtract to get
4837 remainder
4838 4) try the same things with widening allowed. */
4839 remainder
4840 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4841 op0, op1, target,
4842 unsignedp,
4843 ((optab2->handlers[compute_mode].insn_code
4844 != CODE_FOR_nothing)
4845 ? OPTAB_DIRECT : OPTAB_WIDEN));
4846 if (remainder == 0)
4847 {
4848 /* No luck there. Can we do remainder and divide at once
4849 without a library call? */
4850 remainder = gen_reg_rtx (compute_mode);
4851 if (! expand_twoval_binop ((unsignedp
4852 ? udivmod_optab
4853 : sdivmod_optab),
4854 op0, op1,
4855 NULL_RTX, remainder, unsignedp))
4856 remainder = 0;
4857 }
4858
4859 if (remainder)
4860 return gen_lowpart (mode, remainder);
4861 }
4862
4863 /* Produce the quotient. Try a quotient insn, but not a library call.
4864 If we have a divmod in this mode, use it in preference to widening
4865 the div (for this test we assume it will not fail). Note that optab2
4866 is set to the one of the two optabs that the call below will use. */
4867 quotient
4868 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4869 op0, op1, rem_flag ? NULL_RTX : target,
4870 unsignedp,
4871 ((optab2->handlers[compute_mode].insn_code
4872 != CODE_FOR_nothing)
4873 ? OPTAB_DIRECT : OPTAB_WIDEN));
4874
4875 if (quotient == 0)
4876 {
4877 /* No luck there. Try a quotient-and-remainder insn,
4878 keeping the quotient alone. */
4879 quotient = gen_reg_rtx (compute_mode);
4880 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4881 op0, op1,
4882 quotient, NULL_RTX, unsignedp))
4883 {
4884 quotient = 0;
4885 if (! rem_flag)
4886 /* Still no luck. If we are not computing the remainder,
4887 use a library call for the quotient. */
4888 quotient = sign_expand_binop (compute_mode,
4889 udiv_optab, sdiv_optab,
4890 op0, op1, target,
4891 unsignedp, OPTAB_LIB_WIDEN);
4892 }
4893 }
4894 }
4895
4896 if (rem_flag)
4897 {
4898 if (target && GET_MODE (target) != compute_mode)
4899 target = 0;
4900
4901 if (quotient == 0)
4902 {
4903 /* No divide instruction either. Use library for remainder. */
4904 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4905 op0, op1, target,
4906 unsignedp, OPTAB_LIB_WIDEN);
4907 /* No remainder function. Try a quotient-and-remainder
4908 function, keeping the remainder. */
4909 if (!remainder)
4910 {
4911 remainder = gen_reg_rtx (compute_mode);
4912 if (!expand_twoval_binop_libfunc
4913 (unsignedp ? udivmod_optab : sdivmod_optab,
4914 op0, op1,
4915 NULL_RTX, remainder,
4916 unsignedp ? UMOD : MOD))
4917 remainder = NULL_RTX;
4918 }
4919 }
4920 else
4921 {
4922 /* We divided. Now finish doing X - Y * (X / Y). */
4923 remainder = expand_mult (compute_mode, quotient, op1,
4924 NULL_RTX, unsignedp);
4925 remainder = expand_binop (compute_mode, sub_optab, op0,
4926 remainder, target, unsignedp,
4927 OPTAB_LIB_WIDEN);
4928 }
4929 }
4930
4931 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4932 }
4933 \f
4934 /* Return a tree node with data type TYPE, describing the value of X.
4935 Usually this is an VAR_DECL, if there is no obvious better choice.
4936 X may be an expression, however we only support those expressions
4937 generated by loop.c. */
4938
4939 tree
4940 make_tree (tree type, rtx x)
4941 {
4942 tree t;
4943
4944 switch (GET_CODE (x))
4945 {
4946 case CONST_INT:
4947 {
4948 HOST_WIDE_INT hi = 0;
4949
4950 if (INTVAL (x) < 0
4951 && !(TYPE_UNSIGNED (type)
4952 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4953 < HOST_BITS_PER_WIDE_INT)))
4954 hi = -1;
4955
4956 t = build_int_cst_wide (type, INTVAL (x), hi);
4957
4958 return t;
4959 }
4960
4961 case CONST_DOUBLE:
4962 if (GET_MODE (x) == VOIDmode)
4963 t = build_int_cst_wide (type,
4964 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4965 else
4966 {
4967 REAL_VALUE_TYPE d;
4968
4969 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4970 t = build_real (type, d);
4971 }
4972
4973 return t;
4974
4975 case CONST_VECTOR:
4976 {
4977 int i, units;
4978 rtx elt;
4979 tree t = NULL_TREE;
4980
4981 units = CONST_VECTOR_NUNITS (x);
4982
4983 /* Build a tree with vector elements. */
4984 for (i = units - 1; i >= 0; --i)
4985 {
4986 elt = CONST_VECTOR_ELT (x, i);
4987 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4988 }
4989
4990 return build_vector (type, t);
4991 }
4992
4993 case PLUS:
4994 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4995 make_tree (type, XEXP (x, 1)));
4996
4997 case MINUS:
4998 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4999 make_tree (type, XEXP (x, 1)));
5000
5001 case NEG:
5002 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5003
5004 case MULT:
5005 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5006 make_tree (type, XEXP (x, 1)));
5007
5008 case ASHIFT:
5009 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5010 make_tree (type, XEXP (x, 1)));
5011
5012 case LSHIFTRT:
5013 t = lang_hooks.types.unsigned_type (type);
5014 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5015 make_tree (t, XEXP (x, 0)),
5016 make_tree (type, XEXP (x, 1))));
5017
5018 case ASHIFTRT:
5019 t = lang_hooks.types.signed_type (type);
5020 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5021 make_tree (t, XEXP (x, 0)),
5022 make_tree (type, XEXP (x, 1))));
5023
5024 case DIV:
5025 if (TREE_CODE (type) != REAL_TYPE)
5026 t = lang_hooks.types.signed_type (type);
5027 else
5028 t = type;
5029
5030 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5031 make_tree (t, XEXP (x, 0)),
5032 make_tree (t, XEXP (x, 1))));
5033 case UDIV:
5034 t = lang_hooks.types.unsigned_type (type);
5035 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5036 make_tree (t, XEXP (x, 0)),
5037 make_tree (t, XEXP (x, 1))));
5038
5039 case SIGN_EXTEND:
5040 case ZERO_EXTEND:
5041 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5042 GET_CODE (x) == ZERO_EXTEND);
5043 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5044
5045 default:
5046 t = build_decl (VAR_DECL, NULL_TREE, type);
5047
5048 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5049 ptr_mode. So convert. */
5050 if (POINTER_TYPE_P (type))
5051 x = convert_memory_address (TYPE_MODE (type), x);
5052
5053 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5054 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5055 t->decl_with_rtl.rtl = x;
5056
5057 return t;
5058 }
5059 }
5060 \f
5061 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5062 and returning TARGET.
5063
5064 If TARGET is 0, a pseudo-register or constant is returned. */
5065
5066 rtx
5067 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5068 {
5069 rtx tem = 0;
5070
5071 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5072 tem = simplify_binary_operation (AND, mode, op0, op1);
5073 if (tem == 0)
5074 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5075
5076 if (target == 0)
5077 target = tem;
5078 else if (tem != target)
5079 emit_move_insn (target, tem);
5080 return target;
5081 }
5082 \f
5083 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5084 and storing in TARGET. Normally return TARGET.
5085 Return 0 if that cannot be done.
5086
5087 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5088 it is VOIDmode, they cannot both be CONST_INT.
5089
5090 UNSIGNEDP is for the case where we have to widen the operands
5091 to perform the operation. It says to use zero-extension.
5092
5093 NORMALIZEP is 1 if we should convert the result to be either zero
5094 or one. Normalize is -1 if we should convert the result to be
5095 either zero or -1. If NORMALIZEP is zero, the result will be left
5096 "raw" out of the scc insn. */
5097
5098 rtx
5099 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5100 enum machine_mode mode, int unsignedp, int normalizep)
5101 {
5102 rtx subtarget;
5103 enum insn_code icode;
5104 enum machine_mode compare_mode;
5105 enum machine_mode target_mode = GET_MODE (target);
5106 rtx tem;
5107 rtx last = get_last_insn ();
5108 rtx pattern, comparison;
5109
5110 if (unsignedp)
5111 code = unsigned_condition (code);
5112
5113 /* If one operand is constant, make it the second one. Only do this
5114 if the other operand is not constant as well. */
5115
5116 if (swap_commutative_operands_p (op0, op1))
5117 {
5118 tem = op0;
5119 op0 = op1;
5120 op1 = tem;
5121 code = swap_condition (code);
5122 }
5123
5124 if (mode == VOIDmode)
5125 mode = GET_MODE (op0);
5126
5127 /* For some comparisons with 1 and -1, we can convert this to
5128 comparisons with zero. This will often produce more opportunities for
5129 store-flag insns. */
5130
5131 switch (code)
5132 {
5133 case LT:
5134 if (op1 == const1_rtx)
5135 op1 = const0_rtx, code = LE;
5136 break;
5137 case LE:
5138 if (op1 == constm1_rtx)
5139 op1 = const0_rtx, code = LT;
5140 break;
5141 case GE:
5142 if (op1 == const1_rtx)
5143 op1 = const0_rtx, code = GT;
5144 break;
5145 case GT:
5146 if (op1 == constm1_rtx)
5147 op1 = const0_rtx, code = GE;
5148 break;
5149 case GEU:
5150 if (op1 == const1_rtx)
5151 op1 = const0_rtx, code = NE;
5152 break;
5153 case LTU:
5154 if (op1 == const1_rtx)
5155 op1 = const0_rtx, code = EQ;
5156 break;
5157 default:
5158 break;
5159 }
5160
5161 /* If we are comparing a double-word integer with zero or -1, we can
5162 convert the comparison into one involving a single word. */
5163 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5164 && GET_MODE_CLASS (mode) == MODE_INT
5165 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5166 {
5167 if ((code == EQ || code == NE)
5168 && (op1 == const0_rtx || op1 == constm1_rtx))
5169 {
5170 rtx op00, op01, op0both;
5171
5172 /* Do a logical OR or AND of the two words and compare the result. */
5173 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5174 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5175 op0both = expand_binop (word_mode,
5176 op1 == const0_rtx ? ior_optab : and_optab,
5177 op00, op01, NULL_RTX, unsignedp, OPTAB_DIRECT);
5178
5179 if (op0both != 0)
5180 return emit_store_flag (target, code, op0both, op1, word_mode,
5181 unsignedp, normalizep);
5182 }
5183 else if ((code == LT || code == GE) && op1 == const0_rtx)
5184 {
5185 rtx op0h;
5186
5187 /* If testing the sign bit, can just test on high word. */
5188 op0h = simplify_gen_subreg (word_mode, op0, mode,
5189 subreg_highpart_offset (word_mode, mode));
5190 return emit_store_flag (target, code, op0h, op1, word_mode,
5191 unsignedp, normalizep);
5192 }
5193 }
5194
5195 /* From now on, we won't change CODE, so set ICODE now. */
5196 icode = setcc_gen_code[(int) code];
5197
5198 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5199 complement of A (for GE) and shifting the sign bit to the low bit. */
5200 if (op1 == const0_rtx && (code == LT || code == GE)
5201 && GET_MODE_CLASS (mode) == MODE_INT
5202 && (normalizep || STORE_FLAG_VALUE == 1
5203 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5204 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5205 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
5206 {
5207 subtarget = target;
5208
5209 /* If the result is to be wider than OP0, it is best to convert it
5210 first. If it is to be narrower, it is *incorrect* to convert it
5211 first. */
5212 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5213 {
5214 op0 = convert_modes (target_mode, mode, op0, 0);
5215 mode = target_mode;
5216 }
5217
5218 if (target_mode != mode)
5219 subtarget = 0;
5220
5221 if (code == GE)
5222 op0 = expand_unop (mode, one_cmpl_optab, op0,
5223 ((STORE_FLAG_VALUE == 1 || normalizep)
5224 ? 0 : subtarget), 0);
5225
5226 if (STORE_FLAG_VALUE == 1 || normalizep)
5227 /* If we are supposed to produce a 0/1 value, we want to do
5228 a logical shift from the sign bit to the low-order bit; for
5229 a -1/0 value, we do an arithmetic shift. */
5230 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5231 size_int (GET_MODE_BITSIZE (mode) - 1),
5232 subtarget, normalizep != -1);
5233
5234 if (mode != target_mode)
5235 op0 = convert_modes (target_mode, mode, op0, 0);
5236
5237 return op0;
5238 }
5239
5240 if (icode != CODE_FOR_nothing)
5241 {
5242 insn_operand_predicate_fn pred;
5243
5244 /* We think we may be able to do this with a scc insn. Emit the
5245 comparison and then the scc insn. */
5246
5247 do_pending_stack_adjust ();
5248 last = get_last_insn ();
5249
5250 comparison
5251 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5252 if (CONSTANT_P (comparison))
5253 {
5254 switch (GET_CODE (comparison))
5255 {
5256 case CONST_INT:
5257 if (comparison == const0_rtx)
5258 return const0_rtx;
5259 break;
5260
5261 #ifdef FLOAT_STORE_FLAG_VALUE
5262 case CONST_DOUBLE:
5263 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5264 return const0_rtx;
5265 break;
5266 #endif
5267 default:
5268 gcc_unreachable ();
5269 }
5270
5271 if (normalizep == 1)
5272 return const1_rtx;
5273 if (normalizep == -1)
5274 return constm1_rtx;
5275 return const_true_rtx;
5276 }
5277
5278 /* The code of COMPARISON may not match CODE if compare_from_rtx
5279 decided to swap its operands and reverse the original code.
5280
5281 We know that compare_from_rtx returns either a CONST_INT or
5282 a new comparison code, so it is safe to just extract the
5283 code from COMPARISON. */
5284 code = GET_CODE (comparison);
5285
5286 /* Get a reference to the target in the proper mode for this insn. */
5287 compare_mode = insn_data[(int) icode].operand[0].mode;
5288 subtarget = target;
5289 pred = insn_data[(int) icode].operand[0].predicate;
5290 if (optimize || ! (*pred) (subtarget, compare_mode))
5291 subtarget = gen_reg_rtx (compare_mode);
5292
5293 pattern = GEN_FCN (icode) (subtarget);
5294 if (pattern)
5295 {
5296 emit_insn (pattern);
5297
5298 /* If we are converting to a wider mode, first convert to
5299 TARGET_MODE, then normalize. This produces better combining
5300 opportunities on machines that have a SIGN_EXTRACT when we are
5301 testing a single bit. This mostly benefits the 68k.
5302
5303 If STORE_FLAG_VALUE does not have the sign bit set when
5304 interpreted in COMPARE_MODE, we can do this conversion as
5305 unsigned, which is usually more efficient. */
5306 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
5307 {
5308 convert_move (target, subtarget,
5309 (GET_MODE_BITSIZE (compare_mode)
5310 <= HOST_BITS_PER_WIDE_INT)
5311 && 0 == (STORE_FLAG_VALUE
5312 & ((HOST_WIDE_INT) 1
5313 << (GET_MODE_BITSIZE (compare_mode) -1))));
5314 op0 = target;
5315 compare_mode = target_mode;
5316 }
5317 else
5318 op0 = subtarget;
5319
5320 /* If we want to keep subexpressions around, don't reuse our
5321 last target. */
5322
5323 if (optimize)
5324 subtarget = 0;
5325
5326 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
5327 we don't have to do anything. */
5328 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5329 ;
5330 /* STORE_FLAG_VALUE might be the most negative number, so write
5331 the comparison this way to avoid a compiler-time warning. */
5332 else if (- normalizep == STORE_FLAG_VALUE)
5333 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
5334
5335 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
5336 makes it hard to use a value of just the sign bit due to
5337 ANSI integer constant typing rules. */
5338 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
5339 && (STORE_FLAG_VALUE
5340 & ((HOST_WIDE_INT) 1
5341 << (GET_MODE_BITSIZE (compare_mode) - 1))))
5342 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
5343 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
5344 subtarget, normalizep == 1);
5345 else
5346 {
5347 gcc_assert (STORE_FLAG_VALUE & 1);
5348
5349 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
5350 if (normalizep == -1)
5351 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
5352 }
5353
5354 /* If we were converting to a smaller mode, do the
5355 conversion now. */
5356 if (target_mode != compare_mode)
5357 {
5358 convert_move (target, op0, 0);
5359 return target;
5360 }
5361 else
5362 return op0;
5363 }
5364 }
5365
5366 delete_insns_since (last);
5367
5368 /* If optimizing, use different pseudo registers for each insn, instead
5369 of reusing the same pseudo. This leads to better CSE, but slows
5370 down the compiler, since there are more pseudos */
5371 subtarget = (!optimize
5372 && (target_mode == mode)) ? target : NULL_RTX;
5373
5374 /* If we reached here, we can't do this with a scc insn. However, there
5375 are some comparisons that can be done directly. For example, if
5376 this is an equality comparison of integers, we can try to exclusive-or
5377 (or subtract) the two operands and use a recursive call to try the
5378 comparison with zero. Don't do any of these cases if branches are
5379 very cheap. */
5380
5381 if (BRANCH_COST > 0
5382 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5383 && op1 != const0_rtx)
5384 {
5385 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5386 OPTAB_WIDEN);
5387
5388 if (tem == 0)
5389 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5390 OPTAB_WIDEN);
5391 if (tem != 0)
5392 tem = emit_store_flag (target, code, tem, const0_rtx,
5393 mode, unsignedp, normalizep);
5394 if (tem == 0)
5395 delete_insns_since (last);
5396 return tem;
5397 }
5398
5399 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5400 the constant zero. Reject all other comparisons at this point. Only
5401 do LE and GT if branches are expensive since they are expensive on
5402 2-operand machines. */
5403
5404 if (BRANCH_COST == 0
5405 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5406 || (code != EQ && code != NE
5407 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
5408 return 0;
5409
5410 /* See what we need to return. We can only return a 1, -1, or the
5411 sign bit. */
5412
5413 if (normalizep == 0)
5414 {
5415 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5416 normalizep = STORE_FLAG_VALUE;
5417
5418 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5419 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5420 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5421 ;
5422 else
5423 return 0;
5424 }
5425
5426 /* Try to put the result of the comparison in the sign bit. Assume we can't
5427 do the necessary operation below. */
5428
5429 tem = 0;
5430
5431 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5432 the sign bit set. */
5433
5434 if (code == LE)
5435 {
5436 /* This is destructive, so SUBTARGET can't be OP0. */
5437 if (rtx_equal_p (subtarget, op0))
5438 subtarget = 0;
5439
5440 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5441 OPTAB_WIDEN);
5442 if (tem)
5443 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5444 OPTAB_WIDEN);
5445 }
5446
5447 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5448 number of bits in the mode of OP0, minus one. */
5449
5450 if (code == GT)
5451 {
5452 if (rtx_equal_p (subtarget, op0))
5453 subtarget = 0;
5454
5455 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5456 size_int (GET_MODE_BITSIZE (mode) - 1),
5457 subtarget, 0);
5458 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5459 OPTAB_WIDEN);
5460 }
5461
5462 if (code == EQ || code == NE)
5463 {
5464 /* For EQ or NE, one way to do the comparison is to apply an operation
5465 that converts the operand into a positive number if it is nonzero
5466 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5467 for NE we negate. This puts the result in the sign bit. Then we
5468 normalize with a shift, if needed.
5469
5470 Two operations that can do the above actions are ABS and FFS, so try
5471 them. If that doesn't work, and MODE is smaller than a full word,
5472 we can use zero-extension to the wider mode (an unsigned conversion)
5473 as the operation. */
5474
5475 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5476 that is compensated by the subsequent overflow when subtracting
5477 one / negating. */
5478
5479 if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5480 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5481 else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5482 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5483 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5484 {
5485 tem = convert_modes (word_mode, mode, op0, 1);
5486 mode = word_mode;
5487 }
5488
5489 if (tem != 0)
5490 {
5491 if (code == EQ)
5492 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5493 0, OPTAB_WIDEN);
5494 else
5495 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5496 }
5497
5498 /* If we couldn't do it that way, for NE we can "or" the two's complement
5499 of the value with itself. For EQ, we take the one's complement of
5500 that "or", which is an extra insn, so we only handle EQ if branches
5501 are expensive. */
5502
5503 if (tem == 0 && (code == NE || BRANCH_COST > 1))
5504 {
5505 if (rtx_equal_p (subtarget, op0))
5506 subtarget = 0;
5507
5508 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5509 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5510 OPTAB_WIDEN);
5511
5512 if (tem && code == EQ)
5513 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5514 }
5515 }
5516
5517 if (tem && normalizep)
5518 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5519 size_int (GET_MODE_BITSIZE (mode) - 1),
5520 subtarget, normalizep == 1);
5521
5522 if (tem)
5523 {
5524 if (GET_MODE (tem) != target_mode)
5525 {
5526 convert_move (target, tem, 0);
5527 tem = target;
5528 }
5529 else if (!subtarget)
5530 {
5531 emit_move_insn (target, tem);
5532 tem = target;
5533 }
5534 }
5535 else
5536 delete_insns_since (last);
5537
5538 return tem;
5539 }
5540
5541 /* Like emit_store_flag, but always succeeds. */
5542
5543 rtx
5544 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5545 enum machine_mode mode, int unsignedp, int normalizep)
5546 {
5547 rtx tem, label;
5548
5549 /* First see if emit_store_flag can do the job. */
5550 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5551 if (tem != 0)
5552 return tem;
5553
5554 if (normalizep == 0)
5555 normalizep = 1;
5556
5557 /* If this failed, we have to do this with set/compare/jump/set code. */
5558
5559 if (!REG_P (target)
5560 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5561 target = gen_reg_rtx (GET_MODE (target));
5562
5563 emit_move_insn (target, const1_rtx);
5564 label = gen_label_rtx ();
5565 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5566 NULL_RTX, label);
5567
5568 emit_move_insn (target, const0_rtx);
5569 emit_label (label);
5570
5571 return target;
5572 }
5573 \f
5574 /* Perform possibly multi-word comparison and conditional jump to LABEL
5575 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5576 now a thin wrapper around do_compare_rtx_and_jump. */
5577
5578 static void
5579 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5580 rtx label)
5581 {
5582 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5583 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5584 NULL_RTX, NULL_RTX, label);
5585 }