expmed.c (expand_shift): Don't use the target of the rotate as target for the first...
[gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
39
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
54 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
55 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
56
57 /* Test whether a value is zero of a power of two. */
58 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
59
60 /* Nonzero means divides or modulus operations are relatively cheap for
61 powers of two, so don't use branches; emit the operation instead.
62 Usually, this will mean that the MD file will emit non-branch
63 sequences. */
64
65 static bool sdiv_pow2_cheap[NUM_MACHINE_MODES];
66 static bool smod_pow2_cheap[NUM_MACHINE_MODES];
67
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 #endif
71
72 /* For compilers that support multiple targets with different word sizes,
73 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
74 is the H8/300(H) compiler. */
75
76 #ifndef MAX_BITS_PER_WORD
77 #define MAX_BITS_PER_WORD BITS_PER_WORD
78 #endif
79
80 /* Reduce conditional compilation elsewhere. */
81 #ifndef HAVE_insv
82 #define HAVE_insv 0
83 #define CODE_FOR_insv CODE_FOR_nothing
84 #define gen_insv(a,b,c,d) NULL_RTX
85 #endif
86 #ifndef HAVE_extv
87 #define HAVE_extv 0
88 #define CODE_FOR_extv CODE_FOR_nothing
89 #define gen_extv(a,b,c,d) NULL_RTX
90 #endif
91 #ifndef HAVE_extzv
92 #define HAVE_extzv 0
93 #define CODE_FOR_extzv CODE_FOR_nothing
94 #define gen_extzv(a,b,c,d) NULL_RTX
95 #endif
96
97 /* Cost of various pieces of RTL. Note that some of these are indexed by
98 shift count and some by mode. */
99 static int zero_cost;
100 static int add_cost[NUM_MACHINE_MODES];
101 static int neg_cost[NUM_MACHINE_MODES];
102 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
103 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
104 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int mul_cost[NUM_MACHINE_MODES];
106 static int div_cost[NUM_MACHINE_MODES];
107 static int mul_widen_cost[NUM_MACHINE_MODES];
108 static int mul_highpart_cost[NUM_MACHINE_MODES];
109
110 void
111 init_expmed (void)
112 {
113 struct
114 {
115 struct rtx_def reg; rtunion reg_fld[2];
116 struct rtx_def plus; rtunion plus_fld1;
117 struct rtx_def neg;
118 struct rtx_def udiv; rtunion udiv_fld1;
119 struct rtx_def mult; rtunion mult_fld1;
120 struct rtx_def div; rtunion div_fld1;
121 struct rtx_def mod; rtunion mod_fld1;
122 struct rtx_def zext;
123 struct rtx_def wide_mult; rtunion wide_mult_fld1;
124 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
125 struct rtx_def wide_trunc;
126 struct rtx_def shift; rtunion shift_fld1;
127 struct rtx_def shift_mult; rtunion shift_mult_fld1;
128 struct rtx_def shift_add; rtunion shift_add_fld1;
129 struct rtx_def shift_sub; rtunion shift_sub_fld1;
130 } all;
131
132 rtx pow2[MAX_BITS_PER_WORD];
133 rtx cint[MAX_BITS_PER_WORD];
134 int m, n;
135 enum machine_mode mode, wider_mode;
136
137 zero_cost = rtx_cost (const0_rtx, 0);
138
139 for (m = 1; m < MAX_BITS_PER_WORD; m++)
140 {
141 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
142 cint[m] = GEN_INT (m);
143 }
144
145 memset (&all, 0, sizeof all);
146
147 PUT_CODE (&all.reg, REG);
148 /* Avoid using hard regs in ways which may be unsupported. */
149 REGNO (&all.reg) = LAST_VIRTUAL_REGISTER + 1;
150
151 PUT_CODE (&all.plus, PLUS);
152 XEXP (&all.plus, 0) = &all.reg;
153 XEXP (&all.plus, 1) = &all.reg;
154
155 PUT_CODE (&all.neg, NEG);
156 XEXP (&all.neg, 0) = &all.reg;
157
158 PUT_CODE (&all.udiv, UDIV);
159 XEXP (&all.udiv, 0) = &all.reg;
160 XEXP (&all.udiv, 1) = &all.reg;
161
162 PUT_CODE (&all.mult, MULT);
163 XEXP (&all.mult, 0) = &all.reg;
164 XEXP (&all.mult, 1) = &all.reg;
165
166 PUT_CODE (&all.div, DIV);
167 XEXP (&all.div, 0) = &all.reg;
168 XEXP (&all.div, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
169
170 PUT_CODE (&all.mod, MOD);
171 XEXP (&all.mod, 0) = &all.reg;
172 XEXP (&all.mod, 1) = XEXP (&all.div, 1);
173
174 PUT_CODE (&all.zext, ZERO_EXTEND);
175 XEXP (&all.zext, 0) = &all.reg;
176
177 PUT_CODE (&all.wide_mult, MULT);
178 XEXP (&all.wide_mult, 0) = &all.zext;
179 XEXP (&all.wide_mult, 1) = &all.zext;
180
181 PUT_CODE (&all.wide_lshr, LSHIFTRT);
182 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
183
184 PUT_CODE (&all.wide_trunc, TRUNCATE);
185 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
186
187 PUT_CODE (&all.shift, ASHIFT);
188 XEXP (&all.shift, 0) = &all.reg;
189
190 PUT_CODE (&all.shift_mult, MULT);
191 XEXP (&all.shift_mult, 0) = &all.reg;
192
193 PUT_CODE (&all.shift_add, PLUS);
194 XEXP (&all.shift_add, 0) = &all.shift_mult;
195 XEXP (&all.shift_add, 1) = &all.reg;
196
197 PUT_CODE (&all.shift_sub, MINUS);
198 XEXP (&all.shift_sub, 0) = &all.shift_mult;
199 XEXP (&all.shift_sub, 1) = &all.reg;
200
201 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
202 mode != VOIDmode;
203 mode = GET_MODE_WIDER_MODE (mode))
204 {
205 PUT_MODE (&all.reg, mode);
206 PUT_MODE (&all.plus, mode);
207 PUT_MODE (&all.neg, mode);
208 PUT_MODE (&all.udiv, mode);
209 PUT_MODE (&all.mult, mode);
210 PUT_MODE (&all.div, mode);
211 PUT_MODE (&all.mod, mode);
212 PUT_MODE (&all.wide_trunc, mode);
213 PUT_MODE (&all.shift, mode);
214 PUT_MODE (&all.shift_mult, mode);
215 PUT_MODE (&all.shift_add, mode);
216 PUT_MODE (&all.shift_sub, mode);
217
218 add_cost[mode] = rtx_cost (&all.plus, SET);
219 neg_cost[mode] = rtx_cost (&all.neg, SET);
220 div_cost[mode] = rtx_cost (&all.udiv, SET);
221 mul_cost[mode] = rtx_cost (&all.mult, SET);
222
223 sdiv_pow2_cheap[mode] = (rtx_cost (&all.div, SET) <= 2 * add_cost[mode]);
224 smod_pow2_cheap[mode] = (rtx_cost (&all.mod, SET) <= 4 * add_cost[mode]);
225
226 wider_mode = GET_MODE_WIDER_MODE (mode);
227 if (wider_mode != VOIDmode)
228 {
229 PUT_MODE (&all.zext, wider_mode);
230 PUT_MODE (&all.wide_mult, wider_mode);
231 PUT_MODE (&all.wide_lshr, wider_mode);
232 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
233
234 mul_widen_cost[wider_mode] = rtx_cost (&all.wide_mult, SET);
235 mul_highpart_cost[mode] = rtx_cost (&all.wide_trunc, SET);
236 }
237
238 shift_cost[mode][0] = 0;
239 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
240
241 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
242 for (m = 1; m < n; m++)
243 {
244 XEXP (&all.shift, 1) = cint[m];
245 XEXP (&all.shift_mult, 1) = pow2[m];
246
247 shift_cost[mode][m] = rtx_cost (&all.shift, SET);
248 shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET);
249 shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET);
250 }
251 }
252 }
253
254 /* Return an rtx representing minus the value of X.
255 MODE is the intended mode of the result,
256 useful if X is a CONST_INT. */
257
258 rtx
259 negate_rtx (enum machine_mode mode, rtx x)
260 {
261 rtx result = simplify_unary_operation (NEG, mode, x, mode);
262
263 if (result == 0)
264 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
265
266 return result;
267 }
268
269 /* Report on the availability of insv/extv/extzv and the desired mode
270 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
271 is false; else the mode of the specified operand. If OPNO is -1,
272 all the caller cares about is whether the insn is available. */
273 enum machine_mode
274 mode_for_extraction (enum extraction_pattern pattern, int opno)
275 {
276 const struct insn_data *data;
277
278 switch (pattern)
279 {
280 case EP_insv:
281 if (HAVE_insv)
282 {
283 data = &insn_data[CODE_FOR_insv];
284 break;
285 }
286 return MAX_MACHINE_MODE;
287
288 case EP_extv:
289 if (HAVE_extv)
290 {
291 data = &insn_data[CODE_FOR_extv];
292 break;
293 }
294 return MAX_MACHINE_MODE;
295
296 case EP_extzv:
297 if (HAVE_extzv)
298 {
299 data = &insn_data[CODE_FOR_extzv];
300 break;
301 }
302 return MAX_MACHINE_MODE;
303
304 default:
305 gcc_unreachable ();
306 }
307
308 if (opno == -1)
309 return VOIDmode;
310
311 /* Everyone who uses this function used to follow it with
312 if (result == VOIDmode) result = word_mode; */
313 if (data->operand[opno].mode == VOIDmode)
314 return word_mode;
315 return data->operand[opno].mode;
316 }
317
318 \f
319 /* Generate code to store value from rtx VALUE
320 into a bit-field within structure STR_RTX
321 containing BITSIZE bits starting at bit BITNUM.
322 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
323 ALIGN is the alignment that STR_RTX is known to have.
324 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
325
326 /* ??? Note that there are two different ideas here for how
327 to determine the size to count bits within, for a register.
328 One is BITS_PER_WORD, and the other is the size of operand 3
329 of the insv pattern.
330
331 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
332 else, we use the mode of operand 3. */
333
334 rtx
335 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
336 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
337 rtx value)
338 {
339 unsigned int unit
340 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
341 unsigned HOST_WIDE_INT offset, bitpos;
342 rtx op0 = str_rtx;
343 int byte_offset;
344 rtx orig_value;
345
346 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
347
348 while (GET_CODE (op0) == SUBREG)
349 {
350 /* The following line once was done only if WORDS_BIG_ENDIAN,
351 but I think that is a mistake. WORDS_BIG_ENDIAN is
352 meaningful at a much higher level; when structures are copied
353 between memory and regs, the higher-numbered regs
354 always get higher addresses. */
355 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
356 op0 = SUBREG_REG (op0);
357 }
358
359 /* No action is needed if the target is a register and if the field
360 lies completely outside that register. This can occur if the source
361 code contains an out-of-bounds access to a small array. */
362 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
363 return value;
364
365 /* Use vec_set patterns for inserting parts of vectors whenever
366 available. */
367 if (VECTOR_MODE_P (GET_MODE (op0))
368 && !MEM_P (op0)
369 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
370 != CODE_FOR_nothing)
371 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
372 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
373 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
374 {
375 enum machine_mode outermode = GET_MODE (op0);
376 enum machine_mode innermode = GET_MODE_INNER (outermode);
377 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
378 int pos = bitnum / GET_MODE_BITSIZE (innermode);
379 rtx rtxpos = GEN_INT (pos);
380 rtx src = value;
381 rtx dest = op0;
382 rtx pat, seq;
383 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
384 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
385 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
386
387 start_sequence ();
388
389 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
390 src = copy_to_mode_reg (mode1, src);
391
392 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
393 rtxpos = copy_to_mode_reg (mode1, rtxpos);
394
395 /* We could handle this, but we should always be called with a pseudo
396 for our targets and all insns should take them as outputs. */
397 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
398 && (*insn_data[icode].operand[1].predicate) (src, mode1)
399 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
400 pat = GEN_FCN (icode) (dest, src, rtxpos);
401 seq = get_insns ();
402 end_sequence ();
403 if (pat)
404 {
405 emit_insn (seq);
406 emit_insn (pat);
407 return dest;
408 }
409 }
410
411 /* If the target is a register, overwriting the entire object, or storing
412 a full-word or multi-word field can be done with just a SUBREG.
413
414 If the target is memory, storing any naturally aligned field can be
415 done with a simple store. For targets that support fast unaligned
416 memory, any naturally sized, unit aligned field can be done directly. */
417
418 offset = bitnum / unit;
419 bitpos = bitnum % unit;
420 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
421 + (offset * UNITS_PER_WORD);
422
423 if (bitpos == 0
424 && bitsize == GET_MODE_BITSIZE (fieldmode)
425 && (!MEM_P (op0)
426 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
427 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
428 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
429 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
430 || (offset * BITS_PER_UNIT % bitsize == 0
431 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
432 {
433 if (GET_MODE (op0) != fieldmode)
434 {
435 if (MEM_P (op0))
436 op0 = adjust_address (op0, fieldmode, offset);
437 else
438 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
439 byte_offset);
440 }
441 emit_move_insn (op0, value);
442 return value;
443 }
444
445 /* Make sure we are playing with integral modes. Pun with subregs
446 if we aren't. This must come after the entire register case above,
447 since that case is valid for any mode. The following cases are only
448 valid for integral modes. */
449 {
450 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
451 if (imode != GET_MODE (op0))
452 {
453 if (MEM_P (op0))
454 op0 = adjust_address (op0, imode, 0);
455 else
456 {
457 gcc_assert (imode != BLKmode);
458 op0 = gen_lowpart (imode, op0);
459 }
460 }
461 }
462
463 /* We may be accessing data outside the field, which means
464 we can alias adjacent data. */
465 if (MEM_P (op0))
466 {
467 op0 = shallow_copy_rtx (op0);
468 set_mem_alias_set (op0, 0);
469 set_mem_expr (op0, 0);
470 }
471
472 /* If OP0 is a register, BITPOS must count within a word.
473 But as we have it, it counts within whatever size OP0 now has.
474 On a bigendian machine, these are not the same, so convert. */
475 if (BYTES_BIG_ENDIAN
476 && !MEM_P (op0)
477 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
478 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
479
480 /* Storing an lsb-aligned field in a register
481 can be done with a movestrict instruction. */
482
483 if (!MEM_P (op0)
484 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
485 && bitsize == GET_MODE_BITSIZE (fieldmode)
486 && (movstrict_optab->handlers[fieldmode].insn_code
487 != CODE_FOR_nothing))
488 {
489 int icode = movstrict_optab->handlers[fieldmode].insn_code;
490
491 /* Get appropriate low part of the value being stored. */
492 if (GET_CODE (value) == CONST_INT || REG_P (value))
493 value = gen_lowpart (fieldmode, value);
494 else if (!(GET_CODE (value) == SYMBOL_REF
495 || GET_CODE (value) == LABEL_REF
496 || GET_CODE (value) == CONST))
497 value = convert_to_mode (fieldmode, value, 0);
498
499 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
500 value = copy_to_mode_reg (fieldmode, value);
501
502 if (GET_CODE (op0) == SUBREG)
503 {
504 /* Else we've got some float mode source being extracted into
505 a different float mode destination -- this combination of
506 subregs results in Severe Tire Damage. */
507 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
508 || GET_MODE_CLASS (fieldmode) == MODE_INT
509 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
510 op0 = SUBREG_REG (op0);
511 }
512
513 emit_insn (GEN_FCN (icode)
514 (gen_rtx_SUBREG (fieldmode, op0,
515 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
516 + (offset * UNITS_PER_WORD)),
517 value));
518
519 return value;
520 }
521
522 /* Handle fields bigger than a word. */
523
524 if (bitsize > BITS_PER_WORD)
525 {
526 /* Here we transfer the words of the field
527 in the order least significant first.
528 This is because the most significant word is the one which may
529 be less than full.
530 However, only do that if the value is not BLKmode. */
531
532 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
533 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
534 unsigned int i;
535
536 /* This is the mode we must force value to, so that there will be enough
537 subwords to extract. Note that fieldmode will often (always?) be
538 VOIDmode, because that is what store_field uses to indicate that this
539 is a bit field, but passing VOIDmode to operand_subword_force
540 is not allowed. */
541 fieldmode = GET_MODE (value);
542 if (fieldmode == VOIDmode)
543 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
544
545 for (i = 0; i < nwords; i++)
546 {
547 /* If I is 0, use the low-order word in both field and target;
548 if I is 1, use the next to lowest word; and so on. */
549 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
550 unsigned int bit_offset = (backwards
551 ? MAX ((int) bitsize - ((int) i + 1)
552 * BITS_PER_WORD,
553 0)
554 : (int) i * BITS_PER_WORD);
555
556 store_bit_field (op0, MIN (BITS_PER_WORD,
557 bitsize - i * BITS_PER_WORD),
558 bitnum + bit_offset, word_mode,
559 operand_subword_force (value, wordnum, fieldmode));
560 }
561 return value;
562 }
563
564 /* From here on we can assume that the field to be stored in is
565 a full-word (whatever type that is), since it is shorter than a word. */
566
567 /* OFFSET is the number of words or bytes (UNIT says which)
568 from STR_RTX to the first word or byte containing part of the field. */
569
570 if (!MEM_P (op0))
571 {
572 if (offset != 0
573 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
574 {
575 if (!REG_P (op0))
576 {
577 /* Since this is a destination (lvalue), we can't copy
578 it to a pseudo. We can remove a SUBREG that does not
579 change the size of the operand. Such a SUBREG may
580 have been added above. */
581 gcc_assert (GET_CODE (op0) == SUBREG
582 && (GET_MODE_SIZE (GET_MODE (op0))
583 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
584 op0 = SUBREG_REG (op0);
585 }
586 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
587 op0, (offset * UNITS_PER_WORD));
588 }
589 offset = 0;
590 }
591
592 /* If VALUE has a floating-point or complex mode, access it as an
593 integer of the corresponding size. This can occur on a machine
594 with 64 bit registers that uses SFmode for float. It can also
595 occur for unaligned float or complex fields. */
596 orig_value = value;
597 if (GET_MODE (value) != VOIDmode
598 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
599 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
600 {
601 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
602 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
603 }
604
605 /* Now OFFSET is nonzero only if OP0 is memory
606 and is therefore always measured in bytes. */
607
608 if (HAVE_insv
609 && GET_MODE (value) != BLKmode
610 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
611 /* Ensure insv's size is wide enough for this field. */
612 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
613 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
614 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
615 {
616 int xbitpos = bitpos;
617 rtx value1;
618 rtx xop0 = op0;
619 rtx last = get_last_insn ();
620 rtx pat;
621 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
622 int save_volatile_ok = volatile_ok;
623
624 volatile_ok = 1;
625
626 /* If this machine's insv can only insert into a register, copy OP0
627 into a register and save it back later. */
628 if (MEM_P (op0)
629 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
630 (op0, VOIDmode)))
631 {
632 rtx tempreg;
633 enum machine_mode bestmode;
634
635 /* Get the mode to use for inserting into this field. If OP0 is
636 BLKmode, get the smallest mode consistent with the alignment. If
637 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
638 mode. Otherwise, use the smallest mode containing the field. */
639
640 if (GET_MODE (op0) == BLKmode
641 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
642 bestmode
643 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
644 MEM_VOLATILE_P (op0));
645 else
646 bestmode = GET_MODE (op0);
647
648 if (bestmode == VOIDmode
649 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
650 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
651 goto insv_loses;
652
653 /* Adjust address to point to the containing unit of that mode.
654 Compute offset as multiple of this unit, counting in bytes. */
655 unit = GET_MODE_BITSIZE (bestmode);
656 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
657 bitpos = bitnum % unit;
658 op0 = adjust_address (op0, bestmode, offset);
659
660 /* Fetch that unit, store the bitfield in it, then store
661 the unit. */
662 tempreg = copy_to_reg (op0);
663 store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value);
664 emit_move_insn (op0, tempreg);
665 return value;
666 }
667 volatile_ok = save_volatile_ok;
668
669 /* Add OFFSET into OP0's address. */
670 if (MEM_P (xop0))
671 xop0 = adjust_address (xop0, byte_mode, offset);
672
673 /* If xop0 is a register, we need it in MAXMODE
674 to make it acceptable to the format of insv. */
675 if (GET_CODE (xop0) == SUBREG)
676 /* We can't just change the mode, because this might clobber op0,
677 and we will need the original value of op0 if insv fails. */
678 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
679 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
680 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
681
682 /* On big-endian machines, we count bits from the most significant.
683 If the bit field insn does not, we must invert. */
684
685 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
686 xbitpos = unit - bitsize - xbitpos;
687
688 /* We have been counting XBITPOS within UNIT.
689 Count instead within the size of the register. */
690 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
691 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
692
693 unit = GET_MODE_BITSIZE (maxmode);
694
695 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
696 value1 = value;
697 if (GET_MODE (value) != maxmode)
698 {
699 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
700 {
701 /* Optimization: Don't bother really extending VALUE
702 if it has all the bits we will actually use. However,
703 if we must narrow it, be sure we do it correctly. */
704
705 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
706 {
707 rtx tmp;
708
709 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
710 if (! tmp)
711 tmp = simplify_gen_subreg (maxmode,
712 force_reg (GET_MODE (value),
713 value1),
714 GET_MODE (value), 0);
715 value1 = tmp;
716 }
717 else
718 value1 = gen_lowpart (maxmode, value1);
719 }
720 else if (GET_CODE (value) == CONST_INT)
721 value1 = gen_int_mode (INTVAL (value), maxmode);
722 else
723 /* Parse phase is supposed to make VALUE's data type
724 match that of the component reference, which is a type
725 at least as wide as the field; so VALUE should have
726 a mode that corresponds to that type. */
727 gcc_assert (CONSTANT_P (value));
728 }
729
730 /* If this machine's insv insists on a register,
731 get VALUE1 into a register. */
732 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
733 (value1, maxmode)))
734 value1 = force_reg (maxmode, value1);
735
736 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
737 if (pat)
738 emit_insn (pat);
739 else
740 {
741 delete_insns_since (last);
742 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
743 }
744 }
745 else
746 insv_loses:
747 /* Insv is not available; store using shifts and boolean ops. */
748 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
749 return value;
750 }
751 \f
752 /* Use shifts and boolean operations to store VALUE
753 into a bit field of width BITSIZE
754 in a memory location specified by OP0 except offset by OFFSET bytes.
755 (OFFSET must be 0 if OP0 is a register.)
756 The field starts at position BITPOS within the byte.
757 (If OP0 is a register, it may be a full word or a narrower mode,
758 but BITPOS still counts within a full word,
759 which is significant on bigendian machines.) */
760
761 static void
762 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
763 unsigned HOST_WIDE_INT bitsize,
764 unsigned HOST_WIDE_INT bitpos, rtx value)
765 {
766 enum machine_mode mode;
767 unsigned int total_bits = BITS_PER_WORD;
768 rtx subtarget, temp;
769 int all_zero = 0;
770 int all_one = 0;
771
772 /* There is a case not handled here:
773 a structure with a known alignment of just a halfword
774 and a field split across two aligned halfwords within the structure.
775 Or likewise a structure with a known alignment of just a byte
776 and a field split across two bytes.
777 Such cases are not supposed to be able to occur. */
778
779 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
780 {
781 gcc_assert (!offset);
782 /* Special treatment for a bit field split across two registers. */
783 if (bitsize + bitpos > BITS_PER_WORD)
784 {
785 store_split_bit_field (op0, bitsize, bitpos, value);
786 return;
787 }
788 }
789 else
790 {
791 /* Get the proper mode to use for this field. We want a mode that
792 includes the entire field. If such a mode would be larger than
793 a word, we won't be doing the extraction the normal way.
794 We don't want a mode bigger than the destination. */
795
796 mode = GET_MODE (op0);
797 if (GET_MODE_BITSIZE (mode) == 0
798 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
799 mode = word_mode;
800 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
801 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
802
803 if (mode == VOIDmode)
804 {
805 /* The only way this should occur is if the field spans word
806 boundaries. */
807 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
808 value);
809 return;
810 }
811
812 total_bits = GET_MODE_BITSIZE (mode);
813
814 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
815 be in the range 0 to total_bits-1, and put any excess bytes in
816 OFFSET. */
817 if (bitpos >= total_bits)
818 {
819 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
820 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
821 * BITS_PER_UNIT);
822 }
823
824 /* Get ref to an aligned byte, halfword, or word containing the field.
825 Adjust BITPOS to be position within a word,
826 and OFFSET to be the offset of that word.
827 Then alter OP0 to refer to that word. */
828 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
829 offset -= (offset % (total_bits / BITS_PER_UNIT));
830 op0 = adjust_address (op0, mode, offset);
831 }
832
833 mode = GET_MODE (op0);
834
835 /* Now MODE is either some integral mode for a MEM as OP0,
836 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
837 The bit field is contained entirely within OP0.
838 BITPOS is the starting bit number within OP0.
839 (OP0's mode may actually be narrower than MODE.) */
840
841 if (BYTES_BIG_ENDIAN)
842 /* BITPOS is the distance between our msb
843 and that of the containing datum.
844 Convert it to the distance from the lsb. */
845 bitpos = total_bits - bitsize - bitpos;
846
847 /* Now BITPOS is always the distance between our lsb
848 and that of OP0. */
849
850 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
851 we must first convert its mode to MODE. */
852
853 if (GET_CODE (value) == CONST_INT)
854 {
855 HOST_WIDE_INT v = INTVAL (value);
856
857 if (bitsize < HOST_BITS_PER_WIDE_INT)
858 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
859
860 if (v == 0)
861 all_zero = 1;
862 else if ((bitsize < HOST_BITS_PER_WIDE_INT
863 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
864 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
865 all_one = 1;
866
867 value = lshift_value (mode, value, bitpos, bitsize);
868 }
869 else
870 {
871 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
872 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
873
874 if (GET_MODE (value) != mode)
875 {
876 if ((REG_P (value) || GET_CODE (value) == SUBREG)
877 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
878 value = gen_lowpart (mode, value);
879 else
880 value = convert_to_mode (mode, value, 1);
881 }
882
883 if (must_and)
884 value = expand_binop (mode, and_optab, value,
885 mask_rtx (mode, 0, bitsize, 0),
886 NULL_RTX, 1, OPTAB_LIB_WIDEN);
887 if (bitpos > 0)
888 value = expand_shift (LSHIFT_EXPR, mode, value,
889 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
890 }
891
892 /* Now clear the chosen bits in OP0,
893 except that if VALUE is -1 we need not bother. */
894
895 subtarget = op0;
896
897 if (! all_one)
898 {
899 temp = expand_binop (mode, and_optab, op0,
900 mask_rtx (mode, bitpos, bitsize, 1),
901 subtarget, 1, OPTAB_LIB_WIDEN);
902 subtarget = temp;
903 }
904 else
905 temp = op0;
906
907 /* Now logical-or VALUE into OP0, unless it is zero. */
908
909 if (! all_zero)
910 temp = expand_binop (mode, ior_optab, temp, value,
911 subtarget, 1, OPTAB_LIB_WIDEN);
912 if (op0 != temp)
913 emit_move_insn (op0, temp);
914 }
915 \f
916 /* Store a bit field that is split across multiple accessible memory objects.
917
918 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
919 BITSIZE is the field width; BITPOS the position of its first bit
920 (within the word).
921 VALUE is the value to store.
922
923 This does not yet handle fields wider than BITS_PER_WORD. */
924
925 static void
926 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
927 unsigned HOST_WIDE_INT bitpos, rtx value)
928 {
929 unsigned int unit;
930 unsigned int bitsdone = 0;
931
932 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
933 much at a time. */
934 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
935 unit = BITS_PER_WORD;
936 else
937 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
938
939 /* If VALUE is a constant other than a CONST_INT, get it into a register in
940 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
941 that VALUE might be a floating-point constant. */
942 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
943 {
944 rtx word = gen_lowpart_common (word_mode, value);
945
946 if (word && (value != word))
947 value = word;
948 else
949 value = gen_lowpart_common (word_mode,
950 force_reg (GET_MODE (value) != VOIDmode
951 ? GET_MODE (value)
952 : word_mode, value));
953 }
954
955 while (bitsdone < bitsize)
956 {
957 unsigned HOST_WIDE_INT thissize;
958 rtx part, word;
959 unsigned HOST_WIDE_INT thispos;
960 unsigned HOST_WIDE_INT offset;
961
962 offset = (bitpos + bitsdone) / unit;
963 thispos = (bitpos + bitsdone) % unit;
964
965 /* THISSIZE must not overrun a word boundary. Otherwise,
966 store_fixed_bit_field will call us again, and we will mutually
967 recurse forever. */
968 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
969 thissize = MIN (thissize, unit - thispos);
970
971 if (BYTES_BIG_ENDIAN)
972 {
973 int total_bits;
974
975 /* We must do an endian conversion exactly the same way as it is
976 done in extract_bit_field, so that the two calls to
977 extract_fixed_bit_field will have comparable arguments. */
978 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
979 total_bits = BITS_PER_WORD;
980 else
981 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
982
983 /* Fetch successively less significant portions. */
984 if (GET_CODE (value) == CONST_INT)
985 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
986 >> (bitsize - bitsdone - thissize))
987 & (((HOST_WIDE_INT) 1 << thissize) - 1));
988 else
989 /* The args are chosen so that the last part includes the
990 lsb. Give extract_bit_field the value it needs (with
991 endianness compensation) to fetch the piece we want. */
992 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
993 total_bits - bitsize + bitsdone,
994 NULL_RTX, 1);
995 }
996 else
997 {
998 /* Fetch successively more significant portions. */
999 if (GET_CODE (value) == CONST_INT)
1000 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1001 >> bitsdone)
1002 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1003 else
1004 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1005 bitsdone, NULL_RTX, 1);
1006 }
1007
1008 /* If OP0 is a register, then handle OFFSET here.
1009
1010 When handling multiword bitfields, extract_bit_field may pass
1011 down a word_mode SUBREG of a larger REG for a bitfield that actually
1012 crosses a word boundary. Thus, for a SUBREG, we must find
1013 the current word starting from the base register. */
1014 if (GET_CODE (op0) == SUBREG)
1015 {
1016 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1017 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1018 GET_MODE (SUBREG_REG (op0)));
1019 offset = 0;
1020 }
1021 else if (REG_P (op0))
1022 {
1023 word = operand_subword_force (op0, offset, GET_MODE (op0));
1024 offset = 0;
1025 }
1026 else
1027 word = op0;
1028
1029 /* OFFSET is in UNITs, and UNIT is in bits.
1030 store_fixed_bit_field wants offset in bytes. */
1031 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1032 thispos, part);
1033 bitsdone += thissize;
1034 }
1035 }
1036 \f
1037 /* Generate code to extract a byte-field from STR_RTX
1038 containing BITSIZE bits, starting at BITNUM,
1039 and put it in TARGET if possible (if TARGET is nonzero).
1040 Regardless of TARGET, we return the rtx for where the value is placed.
1041
1042 STR_RTX is the structure containing the byte (a REG or MEM).
1043 UNSIGNEDP is nonzero if this is an unsigned bit field.
1044 MODE is the natural mode of the field value once extracted.
1045 TMODE is the mode the caller would like the value to have;
1046 but the value may be returned with type MODE instead.
1047
1048 TOTAL_SIZE is the size in bytes of the containing structure,
1049 or -1 if varying.
1050
1051 If a TARGET is specified and we can store in it at no extra cost,
1052 we do so, and return TARGET.
1053 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1054 if they are equally easy. */
1055
1056 rtx
1057 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1058 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1059 enum machine_mode mode, enum machine_mode tmode)
1060 {
1061 unsigned int unit
1062 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1063 unsigned HOST_WIDE_INT offset, bitpos;
1064 rtx op0 = str_rtx;
1065 rtx spec_target = target;
1066 rtx spec_target_subreg = 0;
1067 enum machine_mode int_mode;
1068 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1069 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1070 enum machine_mode mode1;
1071 int byte_offset;
1072
1073 if (tmode == VOIDmode)
1074 tmode = mode;
1075
1076 while (GET_CODE (op0) == SUBREG)
1077 {
1078 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1079 op0 = SUBREG_REG (op0);
1080 }
1081
1082 /* If we have an out-of-bounds access to a register, just return an
1083 uninitialized register of the required mode. This can occur if the
1084 source code contains an out-of-bounds access to a small array. */
1085 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1086 return gen_reg_rtx (tmode);
1087
1088 if (REG_P (op0)
1089 && mode == GET_MODE (op0)
1090 && bitnum == 0
1091 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1092 {
1093 /* We're trying to extract a full register from itself. */
1094 return op0;
1095 }
1096
1097 /* Use vec_extract patterns for extracting parts of vectors whenever
1098 available. */
1099 if (VECTOR_MODE_P (GET_MODE (op0))
1100 && !MEM_P (op0)
1101 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1102 != CODE_FOR_nothing)
1103 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1104 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1105 {
1106 enum machine_mode outermode = GET_MODE (op0);
1107 enum machine_mode innermode = GET_MODE_INNER (outermode);
1108 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1109 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1110 rtx rtxpos = GEN_INT (pos);
1111 rtx src = op0;
1112 rtx dest = NULL, pat, seq;
1113 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1114 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1115 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1116
1117 if (innermode == tmode || innermode == mode)
1118 dest = target;
1119
1120 if (!dest)
1121 dest = gen_reg_rtx (innermode);
1122
1123 start_sequence ();
1124
1125 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1126 dest = copy_to_mode_reg (mode0, dest);
1127
1128 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1129 src = copy_to_mode_reg (mode1, src);
1130
1131 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1132 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1133
1134 /* We could handle this, but we should always be called with a pseudo
1135 for our targets and all insns should take them as outputs. */
1136 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1137 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1138 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1139
1140 pat = GEN_FCN (icode) (dest, src, rtxpos);
1141 seq = get_insns ();
1142 end_sequence ();
1143 if (pat)
1144 {
1145 emit_insn (seq);
1146 emit_insn (pat);
1147 return dest;
1148 }
1149 }
1150
1151 /* Make sure we are playing with integral modes. Pun with subregs
1152 if we aren't. */
1153 {
1154 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1155 if (imode != GET_MODE (op0))
1156 {
1157 if (MEM_P (op0))
1158 op0 = adjust_address (op0, imode, 0);
1159 else
1160 {
1161 gcc_assert (imode != BLKmode);
1162 op0 = gen_lowpart (imode, op0);
1163
1164 /* If we got a SUBREG, force it into a register since we
1165 aren't going to be able to do another SUBREG on it. */
1166 if (GET_CODE (op0) == SUBREG)
1167 op0 = force_reg (imode, op0);
1168 }
1169 }
1170 }
1171
1172 /* We may be accessing data outside the field, which means
1173 we can alias adjacent data. */
1174 if (MEM_P (op0))
1175 {
1176 op0 = shallow_copy_rtx (op0);
1177 set_mem_alias_set (op0, 0);
1178 set_mem_expr (op0, 0);
1179 }
1180
1181 /* Extraction of a full-word or multi-word value from a structure
1182 in a register or aligned memory can be done with just a SUBREG.
1183 A subword value in the least significant part of a register
1184 can also be extracted with a SUBREG. For this, we need the
1185 byte offset of the value in op0. */
1186
1187 bitpos = bitnum % unit;
1188 offset = bitnum / unit;
1189 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1190
1191 /* If OP0 is a register, BITPOS must count within a word.
1192 But as we have it, it counts within whatever size OP0 now has.
1193 On a bigendian machine, these are not the same, so convert. */
1194 if (BYTES_BIG_ENDIAN
1195 && !MEM_P (op0)
1196 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1197 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1198
1199 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1200 If that's wrong, the solution is to test for it and set TARGET to 0
1201 if needed. */
1202
1203 /* Only scalar integer modes can be converted via subregs. There is an
1204 additional problem for FP modes here in that they can have a precision
1205 which is different from the size. mode_for_size uses precision, but
1206 we want a mode based on the size, so we must avoid calling it for FP
1207 modes. */
1208 mode1 = (SCALAR_INT_MODE_P (tmode)
1209 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1210 : mode);
1211
1212 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1213 && bitpos % BITS_PER_WORD == 0)
1214 || (mode1 != BLKmode
1215 /* ??? The big endian test here is wrong. This is correct
1216 if the value is in a register, and if mode_for_size is not
1217 the same mode as op0. This causes us to get unnecessarily
1218 inefficient code from the Thumb port when -mbig-endian. */
1219 && (BYTES_BIG_ENDIAN
1220 ? bitpos + bitsize == BITS_PER_WORD
1221 : bitpos == 0)))
1222 && ((!MEM_P (op0)
1223 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1224 GET_MODE_BITSIZE (GET_MODE (op0)))
1225 && GET_MODE_SIZE (mode1) != 0
1226 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1227 || (MEM_P (op0)
1228 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1229 || (offset * BITS_PER_UNIT % bitsize == 0
1230 && MEM_ALIGN (op0) % bitsize == 0)))))
1231 {
1232 if (mode1 != GET_MODE (op0))
1233 {
1234 if (MEM_P (op0))
1235 op0 = adjust_address (op0, mode1, offset);
1236 else
1237 {
1238 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1239 byte_offset);
1240 if (sub == NULL)
1241 goto no_subreg_mode_swap;
1242 op0 = sub;
1243 }
1244 }
1245 if (mode1 != mode)
1246 return convert_to_mode (tmode, op0, unsignedp);
1247 return op0;
1248 }
1249 no_subreg_mode_swap:
1250
1251 /* Handle fields bigger than a word. */
1252
1253 if (bitsize > BITS_PER_WORD)
1254 {
1255 /* Here we transfer the words of the field
1256 in the order least significant first.
1257 This is because the most significant word is the one which may
1258 be less than full. */
1259
1260 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1261 unsigned int i;
1262
1263 if (target == 0 || !REG_P (target))
1264 target = gen_reg_rtx (mode);
1265
1266 /* Indicate for flow that the entire target reg is being set. */
1267 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1268
1269 for (i = 0; i < nwords; i++)
1270 {
1271 /* If I is 0, use the low-order word in both field and target;
1272 if I is 1, use the next to lowest word; and so on. */
1273 /* Word number in TARGET to use. */
1274 unsigned int wordnum
1275 = (WORDS_BIG_ENDIAN
1276 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1277 : i);
1278 /* Offset from start of field in OP0. */
1279 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1280 ? MAX (0, ((int) bitsize - ((int) i + 1)
1281 * (int) BITS_PER_WORD))
1282 : (int) i * BITS_PER_WORD);
1283 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1284 rtx result_part
1285 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1286 bitsize - i * BITS_PER_WORD),
1287 bitnum + bit_offset, 1, target_part, mode,
1288 word_mode);
1289
1290 gcc_assert (target_part);
1291
1292 if (result_part != target_part)
1293 emit_move_insn (target_part, result_part);
1294 }
1295
1296 if (unsignedp)
1297 {
1298 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1299 need to be zero'd out. */
1300 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1301 {
1302 unsigned int i, total_words;
1303
1304 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1305 for (i = nwords; i < total_words; i++)
1306 emit_move_insn
1307 (operand_subword (target,
1308 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1309 1, VOIDmode),
1310 const0_rtx);
1311 }
1312 return target;
1313 }
1314
1315 /* Signed bit field: sign-extend with two arithmetic shifts. */
1316 target = expand_shift (LSHIFT_EXPR, mode, target,
1317 build_int_cst (NULL_TREE,
1318 GET_MODE_BITSIZE (mode) - bitsize),
1319 NULL_RTX, 0);
1320 return expand_shift (RSHIFT_EXPR, mode, target,
1321 build_int_cst (NULL_TREE,
1322 GET_MODE_BITSIZE (mode) - bitsize),
1323 NULL_RTX, 0);
1324 }
1325
1326 /* From here on we know the desired field is smaller than a word. */
1327
1328 /* Check if there is a correspondingly-sized integer field, so we can
1329 safely extract it as one size of integer, if necessary; then
1330 truncate or extend to the size that is wanted; then use SUBREGs or
1331 convert_to_mode to get one of the modes we really wanted. */
1332
1333 int_mode = int_mode_for_mode (tmode);
1334 if (int_mode == BLKmode)
1335 int_mode = int_mode_for_mode (mode);
1336 /* Should probably push op0 out to memory and then do a load. */
1337 gcc_assert (int_mode != BLKmode);
1338
1339 /* OFFSET is the number of words or bytes (UNIT says which)
1340 from STR_RTX to the first word or byte containing part of the field. */
1341 if (!MEM_P (op0))
1342 {
1343 if (offset != 0
1344 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1345 {
1346 if (!REG_P (op0))
1347 op0 = copy_to_reg (op0);
1348 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1349 op0, (offset * UNITS_PER_WORD));
1350 }
1351 offset = 0;
1352 }
1353
1354 /* Now OFFSET is nonzero only for memory operands. */
1355
1356 if (unsignedp)
1357 {
1358 if (HAVE_extzv
1359 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1360 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1361 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1362 {
1363 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1364 rtx bitsize_rtx, bitpos_rtx;
1365 rtx last = get_last_insn ();
1366 rtx xop0 = op0;
1367 rtx xtarget = target;
1368 rtx xspec_target = spec_target;
1369 rtx xspec_target_subreg = spec_target_subreg;
1370 rtx pat;
1371 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1372
1373 if (MEM_P (xop0))
1374 {
1375 int save_volatile_ok = volatile_ok;
1376 volatile_ok = 1;
1377
1378 /* Is the memory operand acceptable? */
1379 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1380 (xop0, GET_MODE (xop0))))
1381 {
1382 /* No, load into a reg and extract from there. */
1383 enum machine_mode bestmode;
1384
1385 /* Get the mode to use for inserting into this field. If
1386 OP0 is BLKmode, get the smallest mode consistent with the
1387 alignment. If OP0 is a non-BLKmode object that is no
1388 wider than MAXMODE, use its mode. Otherwise, use the
1389 smallest mode containing the field. */
1390
1391 if (GET_MODE (xop0) == BLKmode
1392 || (GET_MODE_SIZE (GET_MODE (op0))
1393 > GET_MODE_SIZE (maxmode)))
1394 bestmode = get_best_mode (bitsize, bitnum,
1395 MEM_ALIGN (xop0), maxmode,
1396 MEM_VOLATILE_P (xop0));
1397 else
1398 bestmode = GET_MODE (xop0);
1399
1400 if (bestmode == VOIDmode
1401 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1402 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1403 goto extzv_loses;
1404
1405 /* Compute offset as multiple of this unit,
1406 counting in bytes. */
1407 unit = GET_MODE_BITSIZE (bestmode);
1408 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1409 xbitpos = bitnum % unit;
1410 xop0 = adjust_address (xop0, bestmode, xoffset);
1411
1412 /* Fetch it to a register in that size. */
1413 xop0 = force_reg (bestmode, xop0);
1414
1415 /* XBITPOS counts within UNIT, which is what is expected. */
1416 }
1417 else
1418 /* Get ref to first byte containing part of the field. */
1419 xop0 = adjust_address (xop0, byte_mode, xoffset);
1420
1421 volatile_ok = save_volatile_ok;
1422 }
1423
1424 /* If op0 is a register, we need it in MAXMODE (which is usually
1425 SImode). to make it acceptable to the format of extzv. */
1426 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1427 goto extzv_loses;
1428 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1429 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1430
1431 /* On big-endian machines, we count bits from the most significant.
1432 If the bit field insn does not, we must invert. */
1433 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1434 xbitpos = unit - bitsize - xbitpos;
1435
1436 /* Now convert from counting within UNIT to counting in MAXMODE. */
1437 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1438 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1439
1440 unit = GET_MODE_BITSIZE (maxmode);
1441
1442 if (xtarget == 0)
1443 xtarget = xspec_target = gen_reg_rtx (tmode);
1444
1445 if (GET_MODE (xtarget) != maxmode)
1446 {
1447 if (REG_P (xtarget))
1448 {
1449 int wider = (GET_MODE_SIZE (maxmode)
1450 > GET_MODE_SIZE (GET_MODE (xtarget)));
1451 xtarget = gen_lowpart (maxmode, xtarget);
1452 if (wider)
1453 xspec_target_subreg = xtarget;
1454 }
1455 else
1456 xtarget = gen_reg_rtx (maxmode);
1457 }
1458
1459 /* If this machine's extzv insists on a register target,
1460 make sure we have one. */
1461 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1462 (xtarget, maxmode)))
1463 xtarget = gen_reg_rtx (maxmode);
1464
1465 bitsize_rtx = GEN_INT (bitsize);
1466 bitpos_rtx = GEN_INT (xbitpos);
1467
1468 pat = gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1469 if (pat)
1470 {
1471 emit_insn (pat);
1472 target = xtarget;
1473 spec_target = xspec_target;
1474 spec_target_subreg = xspec_target_subreg;
1475 }
1476 else
1477 {
1478 delete_insns_since (last);
1479 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1480 bitpos, target, 1);
1481 }
1482 }
1483 else
1484 extzv_loses:
1485 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1486 bitpos, target, 1);
1487 }
1488 else
1489 {
1490 if (HAVE_extv
1491 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1492 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1493 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1494 {
1495 int xbitpos = bitpos, xoffset = offset;
1496 rtx bitsize_rtx, bitpos_rtx;
1497 rtx last = get_last_insn ();
1498 rtx xop0 = op0, xtarget = target;
1499 rtx xspec_target = spec_target;
1500 rtx xspec_target_subreg = spec_target_subreg;
1501 rtx pat;
1502 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1503
1504 if (MEM_P (xop0))
1505 {
1506 /* Is the memory operand acceptable? */
1507 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1508 (xop0, GET_MODE (xop0))))
1509 {
1510 /* No, load into a reg and extract from there. */
1511 enum machine_mode bestmode;
1512
1513 /* Get the mode to use for inserting into this field. If
1514 OP0 is BLKmode, get the smallest mode consistent with the
1515 alignment. If OP0 is a non-BLKmode object that is no
1516 wider than MAXMODE, use its mode. Otherwise, use the
1517 smallest mode containing the field. */
1518
1519 if (GET_MODE (xop0) == BLKmode
1520 || (GET_MODE_SIZE (GET_MODE (op0))
1521 > GET_MODE_SIZE (maxmode)))
1522 bestmode = get_best_mode (bitsize, bitnum,
1523 MEM_ALIGN (xop0), maxmode,
1524 MEM_VOLATILE_P (xop0));
1525 else
1526 bestmode = GET_MODE (xop0);
1527
1528 if (bestmode == VOIDmode
1529 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1530 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1531 goto extv_loses;
1532
1533 /* Compute offset as multiple of this unit,
1534 counting in bytes. */
1535 unit = GET_MODE_BITSIZE (bestmode);
1536 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1537 xbitpos = bitnum % unit;
1538 xop0 = adjust_address (xop0, bestmode, xoffset);
1539
1540 /* Fetch it to a register in that size. */
1541 xop0 = force_reg (bestmode, xop0);
1542
1543 /* XBITPOS counts within UNIT, which is what is expected. */
1544 }
1545 else
1546 /* Get ref to first byte containing part of the field. */
1547 xop0 = adjust_address (xop0, byte_mode, xoffset);
1548 }
1549
1550 /* If op0 is a register, we need it in MAXMODE (which is usually
1551 SImode) to make it acceptable to the format of extv. */
1552 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1553 goto extv_loses;
1554 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1555 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1556
1557 /* On big-endian machines, we count bits from the most significant.
1558 If the bit field insn does not, we must invert. */
1559 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1560 xbitpos = unit - bitsize - xbitpos;
1561
1562 /* XBITPOS counts within a size of UNIT.
1563 Adjust to count within a size of MAXMODE. */
1564 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1565 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1566
1567 unit = GET_MODE_BITSIZE (maxmode);
1568
1569 if (xtarget == 0)
1570 xtarget = xspec_target = gen_reg_rtx (tmode);
1571
1572 if (GET_MODE (xtarget) != maxmode)
1573 {
1574 if (REG_P (xtarget))
1575 {
1576 int wider = (GET_MODE_SIZE (maxmode)
1577 > GET_MODE_SIZE (GET_MODE (xtarget)));
1578 xtarget = gen_lowpart (maxmode, xtarget);
1579 if (wider)
1580 xspec_target_subreg = xtarget;
1581 }
1582 else
1583 xtarget = gen_reg_rtx (maxmode);
1584 }
1585
1586 /* If this machine's extv insists on a register target,
1587 make sure we have one. */
1588 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1589 (xtarget, maxmode)))
1590 xtarget = gen_reg_rtx (maxmode);
1591
1592 bitsize_rtx = GEN_INT (bitsize);
1593 bitpos_rtx = GEN_INT (xbitpos);
1594
1595 pat = gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1596 if (pat)
1597 {
1598 emit_insn (pat);
1599 target = xtarget;
1600 spec_target = xspec_target;
1601 spec_target_subreg = xspec_target_subreg;
1602 }
1603 else
1604 {
1605 delete_insns_since (last);
1606 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1607 bitpos, target, 0);
1608 }
1609 }
1610 else
1611 extv_loses:
1612 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1613 bitpos, target, 0);
1614 }
1615 if (target == spec_target)
1616 return target;
1617 if (target == spec_target_subreg)
1618 return spec_target;
1619 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1620 {
1621 /* If the target mode is not a scalar integral, first convert to the
1622 integer mode of that size and then access it as a floating-point
1623 value via a SUBREG. */
1624 if (!SCALAR_INT_MODE_P (tmode))
1625 {
1626 enum machine_mode smode
1627 = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1628 target = convert_to_mode (smode, target, unsignedp);
1629 target = force_reg (smode, target);
1630 return gen_lowpart (tmode, target);
1631 }
1632
1633 return convert_to_mode (tmode, target, unsignedp);
1634 }
1635 return target;
1636 }
1637 \f
1638 /* Extract a bit field using shifts and boolean operations
1639 Returns an rtx to represent the value.
1640 OP0 addresses a register (word) or memory (byte).
1641 BITPOS says which bit within the word or byte the bit field starts in.
1642 OFFSET says how many bytes farther the bit field starts;
1643 it is 0 if OP0 is a register.
1644 BITSIZE says how many bits long the bit field is.
1645 (If OP0 is a register, it may be narrower than a full word,
1646 but BITPOS still counts within a full word,
1647 which is significant on bigendian machines.)
1648
1649 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1650 If TARGET is nonzero, attempts to store the value there
1651 and return TARGET, but this is not guaranteed.
1652 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1653
1654 static rtx
1655 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1656 unsigned HOST_WIDE_INT offset,
1657 unsigned HOST_WIDE_INT bitsize,
1658 unsigned HOST_WIDE_INT bitpos, rtx target,
1659 int unsignedp)
1660 {
1661 unsigned int total_bits = BITS_PER_WORD;
1662 enum machine_mode mode;
1663
1664 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1665 {
1666 /* Special treatment for a bit field split across two registers. */
1667 if (bitsize + bitpos > BITS_PER_WORD)
1668 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1669 }
1670 else
1671 {
1672 /* Get the proper mode to use for this field. We want a mode that
1673 includes the entire field. If such a mode would be larger than
1674 a word, we won't be doing the extraction the normal way. */
1675
1676 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1677 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1678
1679 if (mode == VOIDmode)
1680 /* The only way this should occur is if the field spans word
1681 boundaries. */
1682 return extract_split_bit_field (op0, bitsize,
1683 bitpos + offset * BITS_PER_UNIT,
1684 unsignedp);
1685
1686 total_bits = GET_MODE_BITSIZE (mode);
1687
1688 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1689 be in the range 0 to total_bits-1, and put any excess bytes in
1690 OFFSET. */
1691 if (bitpos >= total_bits)
1692 {
1693 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1694 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1695 * BITS_PER_UNIT);
1696 }
1697
1698 /* Get ref to an aligned byte, halfword, or word containing the field.
1699 Adjust BITPOS to be position within a word,
1700 and OFFSET to be the offset of that word.
1701 Then alter OP0 to refer to that word. */
1702 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1703 offset -= (offset % (total_bits / BITS_PER_UNIT));
1704 op0 = adjust_address (op0, mode, offset);
1705 }
1706
1707 mode = GET_MODE (op0);
1708
1709 if (BYTES_BIG_ENDIAN)
1710 /* BITPOS is the distance between our msb and that of OP0.
1711 Convert it to the distance from the lsb. */
1712 bitpos = total_bits - bitsize - bitpos;
1713
1714 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1715 We have reduced the big-endian case to the little-endian case. */
1716
1717 if (unsignedp)
1718 {
1719 if (bitpos)
1720 {
1721 /* If the field does not already start at the lsb,
1722 shift it so it does. */
1723 tree amount = build_int_cst (NULL_TREE, bitpos);
1724 /* Maybe propagate the target for the shift. */
1725 /* But not if we will return it--could confuse integrate.c. */
1726 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1727 if (tmode != mode) subtarget = 0;
1728 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1729 }
1730 /* Convert the value to the desired mode. */
1731 if (mode != tmode)
1732 op0 = convert_to_mode (tmode, op0, 1);
1733
1734 /* Unless the msb of the field used to be the msb when we shifted,
1735 mask out the upper bits. */
1736
1737 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1738 return expand_binop (GET_MODE (op0), and_optab, op0,
1739 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1740 target, 1, OPTAB_LIB_WIDEN);
1741 return op0;
1742 }
1743
1744 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1745 then arithmetic-shift its lsb to the lsb of the word. */
1746 op0 = force_reg (mode, op0);
1747 if (mode != tmode)
1748 target = 0;
1749
1750 /* Find the narrowest integer mode that contains the field. */
1751
1752 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1753 mode = GET_MODE_WIDER_MODE (mode))
1754 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1755 {
1756 op0 = convert_to_mode (mode, op0, 0);
1757 break;
1758 }
1759
1760 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1761 {
1762 tree amount
1763 = build_int_cst (NULL_TREE,
1764 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1765 /* Maybe propagate the target for the shift. */
1766 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1767 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1768 }
1769
1770 return expand_shift (RSHIFT_EXPR, mode, op0,
1771 build_int_cst (NULL_TREE,
1772 GET_MODE_BITSIZE (mode) - bitsize),
1773 target, 0);
1774 }
1775 \f
1776 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1777 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1778 complement of that if COMPLEMENT. The mask is truncated if
1779 necessary to the width of mode MODE. The mask is zero-extended if
1780 BITSIZE+BITPOS is too small for MODE. */
1781
1782 static rtx
1783 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1784 {
1785 HOST_WIDE_INT masklow, maskhigh;
1786
1787 if (bitsize == 0)
1788 masklow = 0;
1789 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1790 masklow = (HOST_WIDE_INT) -1 << bitpos;
1791 else
1792 masklow = 0;
1793
1794 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1795 masklow &= ((unsigned HOST_WIDE_INT) -1
1796 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1797
1798 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1799 maskhigh = -1;
1800 else
1801 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1802
1803 if (bitsize == 0)
1804 maskhigh = 0;
1805 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1806 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1807 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1808 else
1809 maskhigh = 0;
1810
1811 if (complement)
1812 {
1813 maskhigh = ~maskhigh;
1814 masklow = ~masklow;
1815 }
1816
1817 return immed_double_const (masklow, maskhigh, mode);
1818 }
1819
1820 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1821 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1822
1823 static rtx
1824 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1825 {
1826 unsigned HOST_WIDE_INT v = INTVAL (value);
1827 HOST_WIDE_INT low, high;
1828
1829 if (bitsize < HOST_BITS_PER_WIDE_INT)
1830 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1831
1832 if (bitpos < HOST_BITS_PER_WIDE_INT)
1833 {
1834 low = v << bitpos;
1835 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1836 }
1837 else
1838 {
1839 low = 0;
1840 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1841 }
1842
1843 return immed_double_const (low, high, mode);
1844 }
1845 \f
1846 /* Extract a bit field from a memory by forcing the alignment of the
1847 memory. This efficient only if the field spans at least 4 boundaries.
1848
1849 OP0 is the MEM.
1850 BITSIZE is the field width; BITPOS is the position of the first bit.
1851 UNSIGNEDP is true if the result should be zero-extended. */
1852
1853 static rtx
1854 extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1855 unsigned HOST_WIDE_INT bitpos,
1856 int unsignedp)
1857 {
1858 enum machine_mode mode, dmode;
1859 unsigned int m_bitsize, m_size;
1860 unsigned int sign_shift_up, sign_shift_dn;
1861 rtx base, a1, a2, v1, v2, comb, shift, result, start;
1862
1863 /* Choose a mode that will fit BITSIZE. */
1864 mode = smallest_mode_for_size (bitsize, MODE_INT);
1865 m_size = GET_MODE_SIZE (mode);
1866 m_bitsize = GET_MODE_BITSIZE (mode);
1867
1868 /* Choose a mode twice as wide. Fail if no such mode exists. */
1869 dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
1870 if (dmode == BLKmode)
1871 return NULL;
1872
1873 do_pending_stack_adjust ();
1874 start = get_last_insn ();
1875
1876 /* At the end, we'll need an additional shift to deal with sign/zero
1877 extension. By default this will be a left+right shift of the
1878 appropriate size. But we may be able to eliminate one of them. */
1879 sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
1880
1881 if (STRICT_ALIGNMENT)
1882 {
1883 base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
1884 bitpos %= BITS_PER_UNIT;
1885
1886 /* We load two values to be concatenate. There's an edge condition
1887 that bears notice -- an aligned value at the end of a page can
1888 only load one value lest we segfault. So the two values we load
1889 are at "base & -size" and "(base + size - 1) & -size". If base
1890 is unaligned, the addresses will be aligned and sequential; if
1891 base is aligned, the addresses will both be equal to base. */
1892
1893 a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
1894 GEN_INT (-(HOST_WIDE_INT)m_size),
1895 NULL, true, OPTAB_LIB_WIDEN);
1896 mark_reg_pointer (a1, m_bitsize);
1897 v1 = gen_rtx_MEM (mode, a1);
1898 set_mem_align (v1, m_bitsize);
1899 v1 = force_reg (mode, validize_mem (v1));
1900
1901 a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
1902 a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
1903 GEN_INT (-(HOST_WIDE_INT)m_size),
1904 NULL, true, OPTAB_LIB_WIDEN);
1905 v2 = gen_rtx_MEM (mode, a2);
1906 set_mem_align (v2, m_bitsize);
1907 v2 = force_reg (mode, validize_mem (v2));
1908
1909 /* Combine these two values into a double-word value. */
1910 if (m_bitsize == BITS_PER_WORD)
1911 {
1912 comb = gen_reg_rtx (dmode);
1913 emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
1914 emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
1915 emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
1916 }
1917 else
1918 {
1919 if (BYTES_BIG_ENDIAN)
1920 comb = v1, v1 = v2, v2 = comb;
1921 v1 = convert_modes (dmode, mode, v1, true);
1922 if (v1 == NULL)
1923 goto fail;
1924 v2 = convert_modes (dmode, mode, v2, true);
1925 v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
1926 NULL, true, OPTAB_LIB_WIDEN);
1927 if (v2 == NULL)
1928 goto fail;
1929 comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
1930 true, OPTAB_LIB_WIDEN);
1931 if (comb == NULL)
1932 goto fail;
1933 }
1934
1935 shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
1936 NULL, true, OPTAB_LIB_WIDEN);
1937 shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
1938
1939 if (bitpos != 0)
1940 {
1941 if (sign_shift_up <= bitpos)
1942 bitpos -= sign_shift_up, sign_shift_up = 0;
1943 shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
1944 NULL, true, OPTAB_LIB_WIDEN);
1945 }
1946 }
1947 else
1948 {
1949 unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
1950 bitpos %= BITS_PER_UNIT;
1951
1952 /* When strict alignment is not required, we can just load directly
1953 from memory without masking. If the remaining BITPOS offset is
1954 small enough, we may be able to do all operations in MODE as
1955 opposed to DMODE. */
1956 if (bitpos + bitsize <= m_bitsize)
1957 dmode = mode;
1958 comb = adjust_address (op0, dmode, offset);
1959
1960 if (sign_shift_up <= bitpos)
1961 bitpos -= sign_shift_up, sign_shift_up = 0;
1962 shift = GEN_INT (bitpos);
1963 }
1964
1965 /* Shift down the double-word such that the requested value is at bit 0. */
1966 if (shift != const0_rtx)
1967 comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
1968 comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
1969 if (comb == NULL)
1970 goto fail;
1971
1972 /* If the field exactly matches MODE, then all we need to do is return the
1973 lowpart. Otherwise, shift to get the sign bits set properly. */
1974 result = force_reg (mode, gen_lowpart (mode, comb));
1975
1976 if (sign_shift_up)
1977 result = expand_simple_binop (mode, ASHIFT, result,
1978 GEN_INT (sign_shift_up),
1979 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1980 if (sign_shift_dn)
1981 result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
1982 result, GEN_INT (sign_shift_dn),
1983 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1984
1985 return result;
1986
1987 fail:
1988 delete_insns_since (start);
1989 return NULL;
1990 }
1991
1992 /* Extract a bit field that is split across two words
1993 and return an RTX for the result.
1994
1995 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1996 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1997 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1998
1999 static rtx
2000 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
2001 unsigned HOST_WIDE_INT bitpos, int unsignedp)
2002 {
2003 unsigned int unit;
2004 unsigned int bitsdone = 0;
2005 rtx result = NULL_RTX;
2006 int first = 1;
2007
2008 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2009 much at a time. */
2010 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2011 unit = BITS_PER_WORD;
2012 else
2013 {
2014 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2015 if (0 && bitsize / unit > 2)
2016 {
2017 rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
2018 unsignedp);
2019 if (tmp)
2020 return tmp;
2021 }
2022 }
2023
2024 while (bitsdone < bitsize)
2025 {
2026 unsigned HOST_WIDE_INT thissize;
2027 rtx part, word;
2028 unsigned HOST_WIDE_INT thispos;
2029 unsigned HOST_WIDE_INT offset;
2030
2031 offset = (bitpos + bitsdone) / unit;
2032 thispos = (bitpos + bitsdone) % unit;
2033
2034 /* THISSIZE must not overrun a word boundary. Otherwise,
2035 extract_fixed_bit_field will call us again, and we will mutually
2036 recurse forever. */
2037 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2038 thissize = MIN (thissize, unit - thispos);
2039
2040 /* If OP0 is a register, then handle OFFSET here.
2041
2042 When handling multiword bitfields, extract_bit_field may pass
2043 down a word_mode SUBREG of a larger REG for a bitfield that actually
2044 crosses a word boundary. Thus, for a SUBREG, we must find
2045 the current word starting from the base register. */
2046 if (GET_CODE (op0) == SUBREG)
2047 {
2048 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
2049 word = operand_subword_force (SUBREG_REG (op0), word_offset,
2050 GET_MODE (SUBREG_REG (op0)));
2051 offset = 0;
2052 }
2053 else if (REG_P (op0))
2054 {
2055 word = operand_subword_force (op0, offset, GET_MODE (op0));
2056 offset = 0;
2057 }
2058 else
2059 word = op0;
2060
2061 /* Extract the parts in bit-counting order,
2062 whose meaning is determined by BYTES_PER_UNIT.
2063 OFFSET is in UNITs, and UNIT is in bits.
2064 extract_fixed_bit_field wants offset in bytes. */
2065 part = extract_fixed_bit_field (word_mode, word,
2066 offset * unit / BITS_PER_UNIT,
2067 thissize, thispos, 0, 1);
2068 bitsdone += thissize;
2069
2070 /* Shift this part into place for the result. */
2071 if (BYTES_BIG_ENDIAN)
2072 {
2073 if (bitsize != bitsdone)
2074 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2075 build_int_cst (NULL_TREE, bitsize - bitsdone),
2076 0, 1);
2077 }
2078 else
2079 {
2080 if (bitsdone != thissize)
2081 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2082 build_int_cst (NULL_TREE,
2083 bitsdone - thissize), 0, 1);
2084 }
2085
2086 if (first)
2087 result = part;
2088 else
2089 /* Combine the parts with bitwise or. This works
2090 because we extracted each part as an unsigned bit field. */
2091 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2092 OPTAB_LIB_WIDEN);
2093
2094 first = 0;
2095 }
2096
2097 /* Unsigned bit field: we are done. */
2098 if (unsignedp)
2099 return result;
2100 /* Signed bit field: sign-extend with two arithmetic shifts. */
2101 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2102 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2103 NULL_RTX, 0);
2104 return expand_shift (RSHIFT_EXPR, word_mode, result,
2105 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2106 NULL_RTX, 0);
2107 }
2108 \f
2109 /* Add INC into TARGET. */
2110
2111 void
2112 expand_inc (rtx target, rtx inc)
2113 {
2114 rtx value = expand_binop (GET_MODE (target), add_optab,
2115 target, inc,
2116 target, 0, OPTAB_LIB_WIDEN);
2117 if (value != target)
2118 emit_move_insn (target, value);
2119 }
2120
2121 /* Subtract DEC from TARGET. */
2122
2123 void
2124 expand_dec (rtx target, rtx dec)
2125 {
2126 rtx value = expand_binop (GET_MODE (target), sub_optab,
2127 target, dec,
2128 target, 0, OPTAB_LIB_WIDEN);
2129 if (value != target)
2130 emit_move_insn (target, value);
2131 }
2132 \f
2133 /* Output a shift instruction for expression code CODE,
2134 with SHIFTED being the rtx for the value to shift,
2135 and AMOUNT the tree for the amount to shift by.
2136 Store the result in the rtx TARGET, if that is convenient.
2137 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2138 Return the rtx for where the value is. */
2139
2140 rtx
2141 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2142 tree amount, rtx target, int unsignedp)
2143 {
2144 rtx op1, temp = 0;
2145 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2146 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2147 int try;
2148
2149 /* Previously detected shift-counts computed by NEGATE_EXPR
2150 and shifted in the other direction; but that does not work
2151 on all machines. */
2152
2153 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
2154
2155 if (SHIFT_COUNT_TRUNCATED)
2156 {
2157 if (GET_CODE (op1) == CONST_INT
2158 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2159 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2160 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2161 % GET_MODE_BITSIZE (mode));
2162 else if (GET_CODE (op1) == SUBREG
2163 && subreg_lowpart_p (op1))
2164 op1 = SUBREG_REG (op1);
2165 }
2166
2167 if (op1 == const0_rtx)
2168 return shifted;
2169
2170 /* Check whether its cheaper to implement a left shift by a constant
2171 bit count by a sequence of additions. */
2172 if (code == LSHIFT_EXPR
2173 && GET_CODE (op1) == CONST_INT
2174 && INTVAL (op1) > 0
2175 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2176 && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode])
2177 {
2178 int i;
2179 for (i = 0; i < INTVAL (op1); i++)
2180 {
2181 temp = force_reg (mode, shifted);
2182 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2183 unsignedp, OPTAB_LIB_WIDEN);
2184 }
2185 return shifted;
2186 }
2187
2188 for (try = 0; temp == 0 && try < 3; try++)
2189 {
2190 enum optab_methods methods;
2191
2192 if (try == 0)
2193 methods = OPTAB_DIRECT;
2194 else if (try == 1)
2195 methods = OPTAB_WIDEN;
2196 else
2197 methods = OPTAB_LIB_WIDEN;
2198
2199 if (rotate)
2200 {
2201 /* Widening does not work for rotation. */
2202 if (methods == OPTAB_WIDEN)
2203 continue;
2204 else if (methods == OPTAB_LIB_WIDEN)
2205 {
2206 /* If we have been unable to open-code this by a rotation,
2207 do it as the IOR of two shifts. I.e., to rotate A
2208 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2209 where C is the bitsize of A.
2210
2211 It is theoretically possible that the target machine might
2212 not be able to perform either shift and hence we would
2213 be making two libcalls rather than just the one for the
2214 shift (similarly if IOR could not be done). We will allow
2215 this extremely unlikely lossage to avoid complicating the
2216 code below. */
2217
2218 rtx subtarget = target == shifted ? 0 : target;
2219 rtx temp1;
2220 tree type = TREE_TYPE (amount);
2221 tree new_amount = make_tree (type, op1);
2222 tree other_amount
2223 = fold_build2 (MINUS_EXPR, type,
2224 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2225 amount);
2226
2227 shifted = force_reg (mode, shifted);
2228
2229 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2230 mode, shifted, new_amount, 0, 1);
2231 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2232 mode, shifted, other_amount, subtarget, 1);
2233 return expand_binop (mode, ior_optab, temp, temp1, target,
2234 unsignedp, methods);
2235 }
2236
2237 temp = expand_binop (mode,
2238 left ? rotl_optab : rotr_optab,
2239 shifted, op1, target, unsignedp, methods);
2240
2241 /* If we don't have the rotate, but we are rotating by a constant
2242 that is in range, try a rotate in the opposite direction. */
2243
2244 if (temp == 0 && GET_CODE (op1) == CONST_INT
2245 && INTVAL (op1) > 0
2246 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2247 temp = expand_binop (mode,
2248 left ? rotr_optab : rotl_optab,
2249 shifted,
2250 GEN_INT (GET_MODE_BITSIZE (mode)
2251 - INTVAL (op1)),
2252 target, unsignedp, methods);
2253 }
2254 else if (unsignedp)
2255 temp = expand_binop (mode,
2256 left ? ashl_optab : lshr_optab,
2257 shifted, op1, target, unsignedp, methods);
2258
2259 /* Do arithmetic shifts.
2260 Also, if we are going to widen the operand, we can just as well
2261 use an arithmetic right-shift instead of a logical one. */
2262 if (temp == 0 && ! rotate
2263 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2264 {
2265 enum optab_methods methods1 = methods;
2266
2267 /* If trying to widen a log shift to an arithmetic shift,
2268 don't accept an arithmetic shift of the same size. */
2269 if (unsignedp)
2270 methods1 = OPTAB_MUST_WIDEN;
2271
2272 /* Arithmetic shift */
2273
2274 temp = expand_binop (mode,
2275 left ? ashl_optab : ashr_optab,
2276 shifted, op1, target, unsignedp, methods1);
2277 }
2278
2279 /* We used to try extzv here for logical right shifts, but that was
2280 only useful for one machine, the VAX, and caused poor code
2281 generation there for lshrdi3, so the code was deleted and a
2282 define_expand for lshrsi3 was added to vax.md. */
2283 }
2284
2285 gcc_assert (temp);
2286 return temp;
2287 }
2288 \f
2289 enum alg_code {
2290 alg_unknown,
2291 alg_zero,
2292 alg_m, alg_shift,
2293 alg_add_t_m2,
2294 alg_sub_t_m2,
2295 alg_add_factor,
2296 alg_sub_factor,
2297 alg_add_t2_m,
2298 alg_sub_t2_m,
2299 alg_impossible
2300 };
2301
2302 /* This structure holds the "cost" of a multiply sequence. The
2303 "cost" field holds the total rtx_cost of every operator in the
2304 synthetic multiplication sequence, hence cost(a op b) is defined
2305 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2306 The "latency" field holds the minimum possible latency of the
2307 synthetic multiply, on a hypothetical infinitely parallel CPU.
2308 This is the critical path, or the maximum height, of the expression
2309 tree which is the sum of rtx_costs on the most expensive path from
2310 any leaf to the root. Hence latency(a op b) is defined as zero for
2311 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2312
2313 struct mult_cost {
2314 short cost; /* Total rtx_cost of the multiplication sequence. */
2315 short latency; /* The latency of the multiplication sequence. */
2316 };
2317
2318 /* This macro is used to compare a pointer to a mult_cost against an
2319 single integer "rtx_cost" value. This is equivalent to the macro
2320 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2321 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2322 || ((X)->cost == (Y) && (X)->latency < (Y)))
2323
2324 /* This macro is used to compare two pointers to mult_costs against
2325 each other. The macro returns true if X is cheaper than Y.
2326 Currently, the cheaper of two mult_costs is the one with the
2327 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2328 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2329 || ((X)->cost == (Y)->cost \
2330 && (X)->latency < (Y)->latency))
2331
2332 /* This structure records a sequence of operations.
2333 `ops' is the number of operations recorded.
2334 `cost' is their total cost.
2335 The operations are stored in `op' and the corresponding
2336 logarithms of the integer coefficients in `log'.
2337
2338 These are the operations:
2339 alg_zero total := 0;
2340 alg_m total := multiplicand;
2341 alg_shift total := total * coeff
2342 alg_add_t_m2 total := total + multiplicand * coeff;
2343 alg_sub_t_m2 total := total - multiplicand * coeff;
2344 alg_add_factor total := total * coeff + total;
2345 alg_sub_factor total := total * coeff - total;
2346 alg_add_t2_m total := total * coeff + multiplicand;
2347 alg_sub_t2_m total := total * coeff - multiplicand;
2348
2349 The first operand must be either alg_zero or alg_m. */
2350
2351 struct algorithm
2352 {
2353 struct mult_cost cost;
2354 short ops;
2355 /* The size of the OP and LOG fields are not directly related to the
2356 word size, but the worst-case algorithms will be if we have few
2357 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2358 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2359 in total wordsize operations. */
2360 enum alg_code op[MAX_BITS_PER_WORD];
2361 char log[MAX_BITS_PER_WORD];
2362 };
2363
2364 /* The entry for our multiplication cache/hash table. */
2365 struct alg_hash_entry {
2366 /* The number we are multiplying by. */
2367 unsigned int t;
2368
2369 /* The mode in which we are multiplying something by T. */
2370 enum machine_mode mode;
2371
2372 /* The best multiplication algorithm for t. */
2373 enum alg_code alg;
2374
2375 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2376 Otherwise, the cost within which multiplication by T is
2377 impossible. */
2378 struct mult_cost cost;
2379 };
2380
2381 /* The number of cache/hash entries. */
2382 #define NUM_ALG_HASH_ENTRIES 307
2383
2384 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2385 actually a hash table. If we have a collision, that the older
2386 entry is kicked out. */
2387 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2388
2389 /* Indicates the type of fixup needed after a constant multiplication.
2390 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2391 the result should be negated, and ADD_VARIANT means that the
2392 multiplicand should be added to the result. */
2393 enum mult_variant {basic_variant, negate_variant, add_variant};
2394
2395 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2396 const struct mult_cost *, enum machine_mode mode);
2397 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2398 struct algorithm *, enum mult_variant *, int);
2399 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2400 const struct algorithm *, enum mult_variant);
2401 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2402 int, rtx *, int *, int *);
2403 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2404 static rtx extract_high_half (enum machine_mode, rtx);
2405 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2406 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2407 int, int);
2408 /* Compute and return the best algorithm for multiplying by T.
2409 The algorithm must cost less than cost_limit
2410 If retval.cost >= COST_LIMIT, no algorithm was found and all
2411 other field of the returned struct are undefined.
2412 MODE is the machine mode of the multiplication. */
2413
2414 static void
2415 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2416 const struct mult_cost *cost_limit, enum machine_mode mode)
2417 {
2418 int m;
2419 struct algorithm *alg_in, *best_alg;
2420 struct mult_cost best_cost;
2421 struct mult_cost new_limit;
2422 int op_cost, op_latency;
2423 unsigned HOST_WIDE_INT q;
2424 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2425 int hash_index;
2426 bool cache_hit = false;
2427 enum alg_code cache_alg = alg_zero;
2428
2429 /* Indicate that no algorithm is yet found. If no algorithm
2430 is found, this value will be returned and indicate failure. */
2431 alg_out->cost.cost = cost_limit->cost + 1;
2432 alg_out->cost.latency = cost_limit->latency + 1;
2433
2434 if (cost_limit->cost < 0
2435 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2436 return;
2437
2438 /* Restrict the bits of "t" to the multiplication's mode. */
2439 t &= GET_MODE_MASK (mode);
2440
2441 /* t == 1 can be done in zero cost. */
2442 if (t == 1)
2443 {
2444 alg_out->ops = 1;
2445 alg_out->cost.cost = 0;
2446 alg_out->cost.latency = 0;
2447 alg_out->op[0] = alg_m;
2448 return;
2449 }
2450
2451 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2452 fail now. */
2453 if (t == 0)
2454 {
2455 if (MULT_COST_LESS (cost_limit, zero_cost))
2456 return;
2457 else
2458 {
2459 alg_out->ops = 1;
2460 alg_out->cost.cost = zero_cost;
2461 alg_out->cost.latency = zero_cost;
2462 alg_out->op[0] = alg_zero;
2463 return;
2464 }
2465 }
2466
2467 /* We'll be needing a couple extra algorithm structures now. */
2468
2469 alg_in = alloca (sizeof (struct algorithm));
2470 best_alg = alloca (sizeof (struct algorithm));
2471 best_cost = *cost_limit;
2472
2473 /* Compute the hash index. */
2474 hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
2475
2476 /* See if we already know what to do for T. */
2477 if (alg_hash[hash_index].t == t
2478 && alg_hash[hash_index].mode == mode
2479 && alg_hash[hash_index].alg != alg_unknown)
2480 {
2481 cache_alg = alg_hash[hash_index].alg;
2482
2483 if (cache_alg == alg_impossible)
2484 {
2485 /* The cache tells us that it's impossible to synthesize
2486 multiplication by T within alg_hash[hash_index].cost. */
2487 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2488 /* COST_LIMIT is at least as restrictive as the one
2489 recorded in the hash table, in which case we have no
2490 hope of synthesizing a multiplication. Just
2491 return. */
2492 return;
2493
2494 /* If we get here, COST_LIMIT is less restrictive than the
2495 one recorded in the hash table, so we may be able to
2496 synthesize a multiplication. Proceed as if we didn't
2497 have the cache entry. */
2498 }
2499 else
2500 {
2501 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2502 /* The cached algorithm shows that this multiplication
2503 requires more cost than COST_LIMIT. Just return. This
2504 way, we don't clobber this cache entry with
2505 alg_impossible but retain useful information. */
2506 return;
2507
2508 cache_hit = true;
2509
2510 switch (cache_alg)
2511 {
2512 case alg_shift:
2513 goto do_alg_shift;
2514
2515 case alg_add_t_m2:
2516 case alg_sub_t_m2:
2517 goto do_alg_addsub_t_m2;
2518
2519 case alg_add_factor:
2520 case alg_sub_factor:
2521 goto do_alg_addsub_factor;
2522
2523 case alg_add_t2_m:
2524 goto do_alg_add_t2_m;
2525
2526 case alg_sub_t2_m:
2527 goto do_alg_sub_t2_m;
2528
2529 default:
2530 gcc_unreachable ();
2531 }
2532 }
2533 }
2534
2535 /* If we have a group of zero bits at the low-order part of T, try
2536 multiplying by the remaining bits and then doing a shift. */
2537
2538 if ((t & 1) == 0)
2539 {
2540 do_alg_shift:
2541 m = floor_log2 (t & -t); /* m = number of low zero bits */
2542 if (m < maxm)
2543 {
2544 q = t >> m;
2545 /* The function expand_shift will choose between a shift and
2546 a sequence of additions, so the observed cost is given as
2547 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2548 op_cost = m * add_cost[mode];
2549 if (shift_cost[mode][m] < op_cost)
2550 op_cost = shift_cost[mode][m];
2551 new_limit.cost = best_cost.cost - op_cost;
2552 new_limit.latency = best_cost.latency - op_cost;
2553 synth_mult (alg_in, q, &new_limit, mode);
2554
2555 alg_in->cost.cost += op_cost;
2556 alg_in->cost.latency += op_cost;
2557 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2558 {
2559 struct algorithm *x;
2560 best_cost = alg_in->cost;
2561 x = alg_in, alg_in = best_alg, best_alg = x;
2562 best_alg->log[best_alg->ops] = m;
2563 best_alg->op[best_alg->ops] = alg_shift;
2564 }
2565 }
2566 if (cache_hit)
2567 goto done;
2568 }
2569
2570 /* If we have an odd number, add or subtract one. */
2571 if ((t & 1) != 0)
2572 {
2573 unsigned HOST_WIDE_INT w;
2574
2575 do_alg_addsub_t_m2:
2576 for (w = 1; (w & t) != 0; w <<= 1)
2577 ;
2578 /* If T was -1, then W will be zero after the loop. This is another
2579 case where T ends with ...111. Handling this with (T + 1) and
2580 subtract 1 produces slightly better code and results in algorithm
2581 selection much faster than treating it like the ...0111 case
2582 below. */
2583 if (w == 0
2584 || (w > 2
2585 /* Reject the case where t is 3.
2586 Thus we prefer addition in that case. */
2587 && t != 3))
2588 {
2589 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2590
2591 op_cost = add_cost[mode];
2592 new_limit.cost = best_cost.cost - op_cost;
2593 new_limit.latency = best_cost.latency - op_cost;
2594 synth_mult (alg_in, t + 1, &new_limit, mode);
2595
2596 alg_in->cost.cost += op_cost;
2597 alg_in->cost.latency += op_cost;
2598 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2599 {
2600 struct algorithm *x;
2601 best_cost = alg_in->cost;
2602 x = alg_in, alg_in = best_alg, best_alg = x;
2603 best_alg->log[best_alg->ops] = 0;
2604 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2605 }
2606 }
2607 else
2608 {
2609 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2610
2611 op_cost = add_cost[mode];
2612 new_limit.cost = best_cost.cost - op_cost;
2613 new_limit.latency = best_cost.latency - op_cost;
2614 synth_mult (alg_in, t - 1, &new_limit, mode);
2615
2616 alg_in->cost.cost += op_cost;
2617 alg_in->cost.latency += op_cost;
2618 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2619 {
2620 struct algorithm *x;
2621 best_cost = alg_in->cost;
2622 x = alg_in, alg_in = best_alg, best_alg = x;
2623 best_alg->log[best_alg->ops] = 0;
2624 best_alg->op[best_alg->ops] = alg_add_t_m2;
2625 }
2626 }
2627 if (cache_hit)
2628 goto done;
2629 }
2630
2631 /* Look for factors of t of the form
2632 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2633 If we find such a factor, we can multiply by t using an algorithm that
2634 multiplies by q, shift the result by m and add/subtract it to itself.
2635
2636 We search for large factors first and loop down, even if large factors
2637 are less probable than small; if we find a large factor we will find a
2638 good sequence quickly, and therefore be able to prune (by decreasing
2639 COST_LIMIT) the search. */
2640
2641 do_alg_addsub_factor:
2642 for (m = floor_log2 (t - 1); m >= 2; m--)
2643 {
2644 unsigned HOST_WIDE_INT d;
2645
2646 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2647 if (t % d == 0 && t > d && m < maxm
2648 && (!cache_hit || cache_alg == alg_add_factor))
2649 {
2650 /* If the target has a cheap shift-and-add instruction use
2651 that in preference to a shift insn followed by an add insn.
2652 Assume that the shift-and-add is "atomic" with a latency
2653 equal to its cost, otherwise assume that on superscalar
2654 hardware the shift may be executed concurrently with the
2655 earlier steps in the algorithm. */
2656 op_cost = add_cost[mode] + shift_cost[mode][m];
2657 if (shiftadd_cost[mode][m] < op_cost)
2658 {
2659 op_cost = shiftadd_cost[mode][m];
2660 op_latency = op_cost;
2661 }
2662 else
2663 op_latency = add_cost[mode];
2664
2665 new_limit.cost = best_cost.cost - op_cost;
2666 new_limit.latency = best_cost.latency - op_latency;
2667 synth_mult (alg_in, t / d, &new_limit, mode);
2668
2669 alg_in->cost.cost += op_cost;
2670 alg_in->cost.latency += op_latency;
2671 if (alg_in->cost.latency < op_cost)
2672 alg_in->cost.latency = op_cost;
2673 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2674 {
2675 struct algorithm *x;
2676 best_cost = alg_in->cost;
2677 x = alg_in, alg_in = best_alg, best_alg = x;
2678 best_alg->log[best_alg->ops] = m;
2679 best_alg->op[best_alg->ops] = alg_add_factor;
2680 }
2681 /* Other factors will have been taken care of in the recursion. */
2682 break;
2683 }
2684
2685 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2686 if (t % d == 0 && t > d && m < maxm
2687 && (!cache_hit || cache_alg == alg_sub_factor))
2688 {
2689 /* If the target has a cheap shift-and-subtract insn use
2690 that in preference to a shift insn followed by a sub insn.
2691 Assume that the shift-and-sub is "atomic" with a latency
2692 equal to it's cost, otherwise assume that on superscalar
2693 hardware the shift may be executed concurrently with the
2694 earlier steps in the algorithm. */
2695 op_cost = add_cost[mode] + shift_cost[mode][m];
2696 if (shiftsub_cost[mode][m] < op_cost)
2697 {
2698 op_cost = shiftsub_cost[mode][m];
2699 op_latency = op_cost;
2700 }
2701 else
2702 op_latency = add_cost[mode];
2703
2704 new_limit.cost = best_cost.cost - op_cost;
2705 new_limit.latency = best_cost.latency - op_latency;
2706 synth_mult (alg_in, t / d, &new_limit, mode);
2707
2708 alg_in->cost.cost += op_cost;
2709 alg_in->cost.latency += op_latency;
2710 if (alg_in->cost.latency < op_cost)
2711 alg_in->cost.latency = op_cost;
2712 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2713 {
2714 struct algorithm *x;
2715 best_cost = alg_in->cost;
2716 x = alg_in, alg_in = best_alg, best_alg = x;
2717 best_alg->log[best_alg->ops] = m;
2718 best_alg->op[best_alg->ops] = alg_sub_factor;
2719 }
2720 break;
2721 }
2722 }
2723 if (cache_hit)
2724 goto done;
2725
2726 /* Try shift-and-add (load effective address) instructions,
2727 i.e. do a*3, a*5, a*9. */
2728 if ((t & 1) != 0)
2729 {
2730 do_alg_add_t2_m:
2731 q = t - 1;
2732 q = q & -q;
2733 m = exact_log2 (q);
2734 if (m >= 0 && m < maxm)
2735 {
2736 op_cost = shiftadd_cost[mode][m];
2737 new_limit.cost = best_cost.cost - op_cost;
2738 new_limit.latency = best_cost.latency - op_cost;
2739 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2740
2741 alg_in->cost.cost += op_cost;
2742 alg_in->cost.latency += op_cost;
2743 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2744 {
2745 struct algorithm *x;
2746 best_cost = alg_in->cost;
2747 x = alg_in, alg_in = best_alg, best_alg = x;
2748 best_alg->log[best_alg->ops] = m;
2749 best_alg->op[best_alg->ops] = alg_add_t2_m;
2750 }
2751 }
2752 if (cache_hit)
2753 goto done;
2754
2755 do_alg_sub_t2_m:
2756 q = t + 1;
2757 q = q & -q;
2758 m = exact_log2 (q);
2759 if (m >= 0 && m < maxm)
2760 {
2761 op_cost = shiftsub_cost[mode][m];
2762 new_limit.cost = best_cost.cost - op_cost;
2763 new_limit.latency = best_cost.latency - op_cost;
2764 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2765
2766 alg_in->cost.cost += op_cost;
2767 alg_in->cost.latency += op_cost;
2768 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2769 {
2770 struct algorithm *x;
2771 best_cost = alg_in->cost;
2772 x = alg_in, alg_in = best_alg, best_alg = x;
2773 best_alg->log[best_alg->ops] = m;
2774 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2775 }
2776 }
2777 if (cache_hit)
2778 goto done;
2779 }
2780
2781 done:
2782 /* If best_cost has not decreased, we have not found any algorithm. */
2783 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2784 {
2785 /* We failed to find an algorithm. Record alg_impossible for
2786 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2787 we are asked to find an algorithm for T within the same or
2788 lower COST_LIMIT, we can immediately return to the
2789 caller. */
2790 alg_hash[hash_index].t = t;
2791 alg_hash[hash_index].mode = mode;
2792 alg_hash[hash_index].alg = alg_impossible;
2793 alg_hash[hash_index].cost = *cost_limit;
2794 return;
2795 }
2796
2797 /* Cache the result. */
2798 if (!cache_hit)
2799 {
2800 alg_hash[hash_index].t = t;
2801 alg_hash[hash_index].mode = mode;
2802 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2803 alg_hash[hash_index].cost.cost = best_cost.cost;
2804 alg_hash[hash_index].cost.latency = best_cost.latency;
2805 }
2806
2807 /* If we are getting a too long sequence for `struct algorithm'
2808 to record, make this search fail. */
2809 if (best_alg->ops == MAX_BITS_PER_WORD)
2810 return;
2811
2812 /* Copy the algorithm from temporary space to the space at alg_out.
2813 We avoid using structure assignment because the majority of
2814 best_alg is normally undefined, and this is a critical function. */
2815 alg_out->ops = best_alg->ops + 1;
2816 alg_out->cost = best_cost;
2817 memcpy (alg_out->op, best_alg->op,
2818 alg_out->ops * sizeof *alg_out->op);
2819 memcpy (alg_out->log, best_alg->log,
2820 alg_out->ops * sizeof *alg_out->log);
2821 }
2822 \f
2823 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2824 Try three variations:
2825
2826 - a shift/add sequence based on VAL itself
2827 - a shift/add sequence based on -VAL, followed by a negation
2828 - a shift/add sequence based on VAL - 1, followed by an addition.
2829
2830 Return true if the cheapest of these cost less than MULT_COST,
2831 describing the algorithm in *ALG and final fixup in *VARIANT. */
2832
2833 static bool
2834 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2835 struct algorithm *alg, enum mult_variant *variant,
2836 int mult_cost)
2837 {
2838 struct algorithm alg2;
2839 struct mult_cost limit;
2840 int op_cost;
2841
2842 *variant = basic_variant;
2843 limit.cost = mult_cost;
2844 limit.latency = mult_cost;
2845 synth_mult (alg, val, &limit, mode);
2846
2847 /* This works only if the inverted value actually fits in an
2848 `unsigned int' */
2849 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2850 {
2851 op_cost = neg_cost[mode];
2852 if (MULT_COST_LESS (&alg->cost, mult_cost))
2853 {
2854 limit.cost = alg->cost.cost - op_cost;
2855 limit.latency = alg->cost.latency - op_cost;
2856 }
2857 else
2858 {
2859 limit.cost = mult_cost - op_cost;
2860 limit.latency = mult_cost - op_cost;
2861 }
2862
2863 synth_mult (&alg2, -val, &limit, mode);
2864 alg2.cost.cost += op_cost;
2865 alg2.cost.latency += op_cost;
2866 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2867 *alg = alg2, *variant = negate_variant;
2868 }
2869
2870 /* This proves very useful for division-by-constant. */
2871 op_cost = add_cost[mode];
2872 if (MULT_COST_LESS (&alg->cost, mult_cost))
2873 {
2874 limit.cost = alg->cost.cost - op_cost;
2875 limit.latency = alg->cost.latency - op_cost;
2876 }
2877 else
2878 {
2879 limit.cost = mult_cost - op_cost;
2880 limit.latency = mult_cost - op_cost;
2881 }
2882
2883 synth_mult (&alg2, val - 1, &limit, mode);
2884 alg2.cost.cost += op_cost;
2885 alg2.cost.latency += op_cost;
2886 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2887 *alg = alg2, *variant = add_variant;
2888
2889 return MULT_COST_LESS (&alg->cost, mult_cost);
2890 }
2891
2892 /* A subroutine of expand_mult, used for constant multiplications.
2893 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2894 convenient. Use the shift/add sequence described by ALG and apply
2895 the final fixup specified by VARIANT. */
2896
2897 static rtx
2898 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2899 rtx target, const struct algorithm *alg,
2900 enum mult_variant variant)
2901 {
2902 HOST_WIDE_INT val_so_far;
2903 rtx insn, accum, tem;
2904 int opno;
2905 enum machine_mode nmode;
2906
2907 /* Avoid referencing memory over and over.
2908 For speed, but also for correctness when mem is volatile. */
2909 if (MEM_P (op0))
2910 op0 = force_reg (mode, op0);
2911
2912 /* ACCUM starts out either as OP0 or as a zero, depending on
2913 the first operation. */
2914
2915 if (alg->op[0] == alg_zero)
2916 {
2917 accum = copy_to_mode_reg (mode, const0_rtx);
2918 val_so_far = 0;
2919 }
2920 else if (alg->op[0] == alg_m)
2921 {
2922 accum = copy_to_mode_reg (mode, op0);
2923 val_so_far = 1;
2924 }
2925 else
2926 gcc_unreachable ();
2927
2928 for (opno = 1; opno < alg->ops; opno++)
2929 {
2930 int log = alg->log[opno];
2931 rtx shift_subtarget = optimize ? 0 : accum;
2932 rtx add_target
2933 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2934 && !optimize)
2935 ? target : 0;
2936 rtx accum_target = optimize ? 0 : accum;
2937
2938 switch (alg->op[opno])
2939 {
2940 case alg_shift:
2941 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2942 build_int_cst (NULL_TREE, log),
2943 NULL_RTX, 0);
2944 val_so_far <<= log;
2945 break;
2946
2947 case alg_add_t_m2:
2948 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2949 build_int_cst (NULL_TREE, log),
2950 NULL_RTX, 0);
2951 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2952 add_target ? add_target : accum_target);
2953 val_so_far += (HOST_WIDE_INT) 1 << log;
2954 break;
2955
2956 case alg_sub_t_m2:
2957 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2958 build_int_cst (NULL_TREE, log),
2959 NULL_RTX, 0);
2960 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2961 add_target ? add_target : accum_target);
2962 val_so_far -= (HOST_WIDE_INT) 1 << log;
2963 break;
2964
2965 case alg_add_t2_m:
2966 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2967 build_int_cst (NULL_TREE, log),
2968 shift_subtarget,
2969 0);
2970 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2971 add_target ? add_target : accum_target);
2972 val_so_far = (val_so_far << log) + 1;
2973 break;
2974
2975 case alg_sub_t2_m:
2976 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2977 build_int_cst (NULL_TREE, log),
2978 shift_subtarget, 0);
2979 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2980 add_target ? add_target : accum_target);
2981 val_so_far = (val_so_far << log) - 1;
2982 break;
2983
2984 case alg_add_factor:
2985 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2986 build_int_cst (NULL_TREE, log),
2987 NULL_RTX, 0);
2988 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2989 add_target ? add_target : accum_target);
2990 val_so_far += val_so_far << log;
2991 break;
2992
2993 case alg_sub_factor:
2994 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2995 build_int_cst (NULL_TREE, log),
2996 NULL_RTX, 0);
2997 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2998 (add_target
2999 ? add_target : (optimize ? 0 : tem)));
3000 val_so_far = (val_so_far << log) - val_so_far;
3001 break;
3002
3003 default:
3004 gcc_unreachable ();
3005 }
3006
3007 /* Write a REG_EQUAL note on the last insn so that we can cse
3008 multiplication sequences. Note that if ACCUM is a SUBREG,
3009 we've set the inner register and must properly indicate
3010 that. */
3011
3012 tem = op0, nmode = mode;
3013 if (GET_CODE (accum) == SUBREG)
3014 {
3015 nmode = GET_MODE (SUBREG_REG (accum));
3016 tem = gen_lowpart (nmode, op0);
3017 }
3018
3019 insn = get_last_insn ();
3020 set_unique_reg_note (insn, REG_EQUAL,
3021 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
3022 }
3023
3024 if (variant == negate_variant)
3025 {
3026 val_so_far = -val_so_far;
3027 accum = expand_unop (mode, neg_optab, accum, target, 0);
3028 }
3029 else if (variant == add_variant)
3030 {
3031 val_so_far = val_so_far + 1;
3032 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3033 }
3034
3035 /* Compare only the bits of val and val_so_far that are significant
3036 in the result mode, to avoid sign-/zero-extension confusion. */
3037 val &= GET_MODE_MASK (mode);
3038 val_so_far &= GET_MODE_MASK (mode);
3039 gcc_assert (val == val_so_far);
3040
3041 return accum;
3042 }
3043
3044 /* Perform a multiplication and return an rtx for the result.
3045 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3046 TARGET is a suggestion for where to store the result (an rtx).
3047
3048 We check specially for a constant integer as OP1.
3049 If you want this check for OP0 as well, then before calling
3050 you should swap the two operands if OP0 would be constant. */
3051
3052 rtx
3053 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3054 int unsignedp)
3055 {
3056 enum mult_variant variant;
3057 struct algorithm algorithm;
3058 int max_cost;
3059
3060 /* Handling const0_rtx here allows us to use zero as a rogue value for
3061 coeff below. */
3062 if (op1 == const0_rtx)
3063 return const0_rtx;
3064 if (op1 == const1_rtx)
3065 return op0;
3066 if (op1 == constm1_rtx)
3067 return expand_unop (mode,
3068 GET_MODE_CLASS (mode) == MODE_INT
3069 && !unsignedp && flag_trapv
3070 ? negv_optab : neg_optab,
3071 op0, target, 0);
3072
3073 /* These are the operations that are potentially turned into a sequence
3074 of shifts and additions. */
3075 if (SCALAR_INT_MODE_P (mode)
3076 && (unsignedp || !flag_trapv))
3077 {
3078 HOST_WIDE_INT coeff = 0;
3079 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3080
3081 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3082 less than or equal in size to `unsigned int' this doesn't matter.
3083 If the mode is larger than `unsigned int', then synth_mult works
3084 only if the constant value exactly fits in an `unsigned int' without
3085 any truncation. This means that multiplying by negative values does
3086 not work; results are off by 2^32 on a 32 bit machine. */
3087
3088 if (GET_CODE (op1) == CONST_INT)
3089 {
3090 /* Attempt to handle multiplication of DImode values by negative
3091 coefficients, by performing the multiplication by a positive
3092 multiplier and then inverting the result. */
3093 if (INTVAL (op1) < 0
3094 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3095 {
3096 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3097 result is interpreted as an unsigned coefficient.
3098 Exclude cost of op0 from max_cost to match the cost
3099 calculation of the synth_mult. */
3100 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET)
3101 - neg_cost[mode];
3102 if (max_cost > 0
3103 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3104 &variant, max_cost))
3105 {
3106 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3107 NULL_RTX, &algorithm,
3108 variant);
3109 return expand_unop (mode, neg_optab, temp, target, 0);
3110 }
3111 }
3112 else coeff = INTVAL (op1);
3113 }
3114 else if (GET_CODE (op1) == CONST_DOUBLE)
3115 {
3116 /* If we are multiplying in DImode, it may still be a win
3117 to try to work with shifts and adds. */
3118 if (CONST_DOUBLE_HIGH (op1) == 0)
3119 coeff = CONST_DOUBLE_LOW (op1);
3120 else if (CONST_DOUBLE_LOW (op1) == 0
3121 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3122 {
3123 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3124 + HOST_BITS_PER_WIDE_INT;
3125 return expand_shift (LSHIFT_EXPR, mode, op0,
3126 build_int_cst (NULL_TREE, shift),
3127 target, unsignedp);
3128 }
3129 }
3130
3131 /* We used to test optimize here, on the grounds that it's better to
3132 produce a smaller program when -O is not used. But this causes
3133 such a terrible slowdown sometimes that it seems better to always
3134 use synth_mult. */
3135 if (coeff != 0)
3136 {
3137 /* Special case powers of two. */
3138 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3139 return expand_shift (LSHIFT_EXPR, mode, op0,
3140 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3141 target, unsignedp);
3142
3143 /* Exclude cost of op0 from max_cost to match the cost
3144 calculation of the synth_mult. */
3145 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET);
3146 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3147 max_cost))
3148 return expand_mult_const (mode, op0, coeff, target,
3149 &algorithm, variant);
3150 }
3151 }
3152
3153 if (GET_CODE (op0) == CONST_DOUBLE)
3154 {
3155 rtx temp = op0;
3156 op0 = op1;
3157 op1 = temp;
3158 }
3159
3160 /* Expand x*2.0 as x+x. */
3161 if (GET_CODE (op1) == CONST_DOUBLE
3162 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3163 {
3164 REAL_VALUE_TYPE d;
3165 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3166
3167 if (REAL_VALUES_EQUAL (d, dconst2))
3168 {
3169 op0 = force_reg (GET_MODE (op0), op0);
3170 return expand_binop (mode, add_optab, op0, op0,
3171 target, unsignedp, OPTAB_LIB_WIDEN);
3172 }
3173 }
3174
3175 /* This used to use umul_optab if unsigned, but for non-widening multiply
3176 there is no difference between signed and unsigned. */
3177 op0 = expand_binop (mode,
3178 ! unsignedp
3179 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3180 ? smulv_optab : smul_optab,
3181 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3182 gcc_assert (op0);
3183 return op0;
3184 }
3185 \f
3186 /* Return the smallest n such that 2**n >= X. */
3187
3188 int
3189 ceil_log2 (unsigned HOST_WIDE_INT x)
3190 {
3191 return floor_log2 (x - 1) + 1;
3192 }
3193
3194 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3195 replace division by D, and put the least significant N bits of the result
3196 in *MULTIPLIER_PTR and return the most significant bit.
3197
3198 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3199 needed precision is in PRECISION (should be <= N).
3200
3201 PRECISION should be as small as possible so this function can choose
3202 multiplier more freely.
3203
3204 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3205 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3206
3207 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3208 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3209
3210 static
3211 unsigned HOST_WIDE_INT
3212 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3213 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3214 {
3215 HOST_WIDE_INT mhigh_hi, mlow_hi;
3216 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3217 int lgup, post_shift;
3218 int pow, pow2;
3219 unsigned HOST_WIDE_INT nl, dummy1;
3220 HOST_WIDE_INT nh, dummy2;
3221
3222 /* lgup = ceil(log2(divisor)); */
3223 lgup = ceil_log2 (d);
3224
3225 gcc_assert (lgup <= n);
3226
3227 pow = n + lgup;
3228 pow2 = n + lgup - precision;
3229
3230 /* We could handle this with some effort, but this case is much
3231 better handled directly with a scc insn, so rely on caller using
3232 that. */
3233 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3234
3235 /* mlow = 2^(N + lgup)/d */
3236 if (pow >= HOST_BITS_PER_WIDE_INT)
3237 {
3238 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3239 nl = 0;
3240 }
3241 else
3242 {
3243 nh = 0;
3244 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3245 }
3246 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3247 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3248
3249 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3250 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3251 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3252 else
3253 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3254 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3255 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3256
3257 gcc_assert (!mhigh_hi || nh - d < d);
3258 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3259 /* Assert that mlow < mhigh. */
3260 gcc_assert (mlow_hi < mhigh_hi
3261 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3262
3263 /* If precision == N, then mlow, mhigh exceed 2^N
3264 (but they do not exceed 2^(N+1)). */
3265
3266 /* Reduce to lowest terms. */
3267 for (post_shift = lgup; post_shift > 0; post_shift--)
3268 {
3269 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3270 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3271 if (ml_lo >= mh_lo)
3272 break;
3273
3274 mlow_hi = 0;
3275 mlow_lo = ml_lo;
3276 mhigh_hi = 0;
3277 mhigh_lo = mh_lo;
3278 }
3279
3280 *post_shift_ptr = post_shift;
3281 *lgup_ptr = lgup;
3282 if (n < HOST_BITS_PER_WIDE_INT)
3283 {
3284 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3285 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3286 return mhigh_lo >= mask;
3287 }
3288 else
3289 {
3290 *multiplier_ptr = GEN_INT (mhigh_lo);
3291 return mhigh_hi;
3292 }
3293 }
3294
3295 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3296 congruent to 1 (mod 2**N). */
3297
3298 static unsigned HOST_WIDE_INT
3299 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3300 {
3301 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3302
3303 /* The algorithm notes that the choice y = x satisfies
3304 x*y == 1 mod 2^3, since x is assumed odd.
3305 Each iteration doubles the number of bits of significance in y. */
3306
3307 unsigned HOST_WIDE_INT mask;
3308 unsigned HOST_WIDE_INT y = x;
3309 int nbit = 3;
3310
3311 mask = (n == HOST_BITS_PER_WIDE_INT
3312 ? ~(unsigned HOST_WIDE_INT) 0
3313 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3314
3315 while (nbit < n)
3316 {
3317 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3318 nbit *= 2;
3319 }
3320 return y;
3321 }
3322
3323 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3324 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3325 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3326 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3327 become signed.
3328
3329 The result is put in TARGET if that is convenient.
3330
3331 MODE is the mode of operation. */
3332
3333 rtx
3334 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3335 rtx op1, rtx target, int unsignedp)
3336 {
3337 rtx tem;
3338 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3339
3340 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3341 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3342 NULL_RTX, 0);
3343 tem = expand_and (mode, tem, op1, NULL_RTX);
3344 adj_operand
3345 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3346 adj_operand);
3347
3348 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3349 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3350 NULL_RTX, 0);
3351 tem = expand_and (mode, tem, op0, NULL_RTX);
3352 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3353 target);
3354
3355 return target;
3356 }
3357
3358 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3359
3360 static rtx
3361 extract_high_half (enum machine_mode mode, rtx op)
3362 {
3363 enum machine_mode wider_mode;
3364
3365 if (mode == word_mode)
3366 return gen_highpart (mode, op);
3367
3368 wider_mode = GET_MODE_WIDER_MODE (mode);
3369 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3370 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3371 return convert_modes (mode, wider_mode, op, 0);
3372 }
3373
3374 /* Like expand_mult_highpart, but only consider using a multiplication
3375 optab. OP1 is an rtx for the constant operand. */
3376
3377 static rtx
3378 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3379 rtx target, int unsignedp, int max_cost)
3380 {
3381 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3382 enum machine_mode wider_mode;
3383 optab moptab;
3384 rtx tem;
3385 int size;
3386
3387 wider_mode = GET_MODE_WIDER_MODE (mode);
3388 size = GET_MODE_BITSIZE (mode);
3389
3390 /* Firstly, try using a multiplication insn that only generates the needed
3391 high part of the product, and in the sign flavor of unsignedp. */
3392 if (mul_highpart_cost[mode] < max_cost)
3393 {
3394 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3395 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3396 unsignedp, OPTAB_DIRECT);
3397 if (tem)
3398 return tem;
3399 }
3400
3401 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3402 Need to adjust the result after the multiplication. */
3403 if (size - 1 < BITS_PER_WORD
3404 && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
3405 + 4 * add_cost[mode] < max_cost))
3406 {
3407 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3408 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3409 unsignedp, OPTAB_DIRECT);
3410 if (tem)
3411 /* We used the wrong signedness. Adjust the result. */
3412 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3413 tem, unsignedp);
3414 }
3415
3416 /* Try widening multiplication. */
3417 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3418 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3419 && mul_widen_cost[wider_mode] < max_cost)
3420 {
3421 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3422 unsignedp, OPTAB_WIDEN);
3423 if (tem)
3424 return extract_high_half (mode, tem);
3425 }
3426
3427 /* Try widening the mode and perform a non-widening multiplication. */
3428 if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3429 && size - 1 < BITS_PER_WORD
3430 && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
3431 {
3432 rtx insns, wop0, wop1;
3433
3434 /* We need to widen the operands, for example to ensure the
3435 constant multiplier is correctly sign or zero extended.
3436 Use a sequence to clean-up any instructions emitted by
3437 the conversions if things don't work out. */
3438 start_sequence ();
3439 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3440 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3441 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3442 unsignedp, OPTAB_WIDEN);
3443 insns = get_insns ();
3444 end_sequence ();
3445
3446 if (tem)
3447 {
3448 emit_insn (insns);
3449 return extract_high_half (mode, tem);
3450 }
3451 }
3452
3453 /* Try widening multiplication of opposite signedness, and adjust. */
3454 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3455 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3456 && size - 1 < BITS_PER_WORD
3457 && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
3458 + 4 * add_cost[mode] < max_cost))
3459 {
3460 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3461 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3462 if (tem != 0)
3463 {
3464 tem = extract_high_half (mode, tem);
3465 /* We used the wrong signedness. Adjust the result. */
3466 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3467 target, unsignedp);
3468 }
3469 }
3470
3471 return 0;
3472 }
3473
3474 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3475 putting the high half of the result in TARGET if that is convenient,
3476 and return where the result is. If the operation can not be performed,
3477 0 is returned.
3478
3479 MODE is the mode of operation and result.
3480
3481 UNSIGNEDP nonzero means unsigned multiply.
3482
3483 MAX_COST is the total allowed cost for the expanded RTL. */
3484
3485 static rtx
3486 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3487 rtx target, int unsignedp, int max_cost)
3488 {
3489 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3490 unsigned HOST_WIDE_INT cnst1;
3491 int extra_cost;
3492 bool sign_adjust = false;
3493 enum mult_variant variant;
3494 struct algorithm alg;
3495 rtx tem;
3496
3497 /* We can't support modes wider than HOST_BITS_PER_INT. */
3498 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3499
3500 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3501
3502 /* We can't optimize modes wider than BITS_PER_WORD.
3503 ??? We might be able to perform double-word arithmetic if
3504 mode == word_mode, however all the cost calculations in
3505 synth_mult etc. assume single-word operations. */
3506 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3507 return expand_mult_highpart_optab (mode, op0, op1, target,
3508 unsignedp, max_cost);
3509
3510 extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3511
3512 /* Check whether we try to multiply by a negative constant. */
3513 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3514 {
3515 sign_adjust = true;
3516 extra_cost += add_cost[mode];
3517 }
3518
3519 /* See whether shift/add multiplication is cheap enough. */
3520 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3521 max_cost - extra_cost))
3522 {
3523 /* See whether the specialized multiplication optabs are
3524 cheaper than the shift/add version. */
3525 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3526 alg.cost.cost + extra_cost);
3527 if (tem)
3528 return tem;
3529
3530 tem = convert_to_mode (wider_mode, op0, unsignedp);
3531 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3532 tem = extract_high_half (mode, tem);
3533
3534 /* Adjust result for signedness. */
3535 if (sign_adjust)
3536 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3537
3538 return tem;
3539 }
3540 return expand_mult_highpart_optab (mode, op0, op1, target,
3541 unsignedp, max_cost);
3542 }
3543
3544
3545 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3546
3547 static rtx
3548 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3549 {
3550 unsigned HOST_WIDE_INT masklow, maskhigh;
3551 rtx result, temp, shift, label;
3552 int logd;
3553
3554 logd = floor_log2 (d);
3555 result = gen_reg_rtx (mode);
3556
3557 /* Avoid conditional branches when they're expensive. */
3558 if (BRANCH_COST >= 2
3559 && !optimize_size)
3560 {
3561 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3562 mode, 0, -1);
3563 if (signmask)
3564 {
3565 signmask = force_reg (mode, signmask);
3566 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3567 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3568
3569 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3570 which instruction sequence to use. If logical right shifts
3571 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3572 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3573
3574 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3575 if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing
3576 || rtx_cost (temp, SET) > COSTS_N_INSNS (2))
3577 {
3578 temp = expand_binop (mode, xor_optab, op0, signmask,
3579 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3580 temp = expand_binop (mode, sub_optab, temp, signmask,
3581 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3582 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3583 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3584 temp = expand_binop (mode, xor_optab, temp, signmask,
3585 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3586 temp = expand_binop (mode, sub_optab, temp, signmask,
3587 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3588 }
3589 else
3590 {
3591 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3592 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3593 signmask = force_reg (mode, signmask);
3594
3595 temp = expand_binop (mode, add_optab, op0, signmask,
3596 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3597 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3598 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3599 temp = expand_binop (mode, sub_optab, temp, signmask,
3600 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3601 }
3602 return temp;
3603 }
3604 }
3605
3606 /* Mask contains the mode's signbit and the significant bits of the
3607 modulus. By including the signbit in the operation, many targets
3608 can avoid an explicit compare operation in the following comparison
3609 against zero. */
3610
3611 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3612 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3613 {
3614 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3615 maskhigh = -1;
3616 }
3617 else
3618 maskhigh = (HOST_WIDE_INT) -1
3619 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3620
3621 temp = expand_binop (mode, and_optab, op0,
3622 immed_double_const (masklow, maskhigh, mode),
3623 result, 1, OPTAB_LIB_WIDEN);
3624 if (temp != result)
3625 emit_move_insn (result, temp);
3626
3627 label = gen_label_rtx ();
3628 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3629
3630 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3631 0, OPTAB_LIB_WIDEN);
3632 masklow = (HOST_WIDE_INT) -1 << logd;
3633 maskhigh = -1;
3634 temp = expand_binop (mode, ior_optab, temp,
3635 immed_double_const (masklow, maskhigh, mode),
3636 result, 1, OPTAB_LIB_WIDEN);
3637 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3638 0, OPTAB_LIB_WIDEN);
3639 if (temp != result)
3640 emit_move_insn (result, temp);
3641 emit_label (label);
3642 return result;
3643 }
3644
3645 /* Expand signed division of OP0 by a power of two D in mode MODE.
3646 This routine is only called for positive values of D. */
3647
3648 static rtx
3649 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3650 {
3651 rtx temp, label;
3652 tree shift;
3653 int logd;
3654
3655 logd = floor_log2 (d);
3656 shift = build_int_cst (NULL_TREE, logd);
3657
3658 if (d == 2 && BRANCH_COST >= 1)
3659 {
3660 temp = gen_reg_rtx (mode);
3661 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3662 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3663 0, OPTAB_LIB_WIDEN);
3664 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3665 }
3666
3667 #ifdef HAVE_conditional_move
3668 if (BRANCH_COST >= 2)
3669 {
3670 rtx temp2;
3671
3672 /* ??? emit_conditional_move forces a stack adjustment via
3673 compare_from_rtx so, if the sequence is discarded, it will
3674 be lost. Do it now instead. */
3675 do_pending_stack_adjust ();
3676
3677 start_sequence ();
3678 temp2 = copy_to_mode_reg (mode, op0);
3679 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3680 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3681 temp = force_reg (mode, temp);
3682
3683 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3684 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3685 mode, temp, temp2, mode, 0);
3686 if (temp2)
3687 {
3688 rtx seq = get_insns ();
3689 end_sequence ();
3690 emit_insn (seq);
3691 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3692 }
3693 end_sequence ();
3694 }
3695 #endif
3696
3697 if (BRANCH_COST >= 2)
3698 {
3699 int ushift = GET_MODE_BITSIZE (mode) - logd;
3700
3701 temp = gen_reg_rtx (mode);
3702 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3703 if (shift_cost[mode][ushift] > COSTS_N_INSNS (1))
3704 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3705 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3706 else
3707 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3708 build_int_cst (NULL_TREE, ushift),
3709 NULL_RTX, 1);
3710 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3711 0, OPTAB_LIB_WIDEN);
3712 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3713 }
3714
3715 label = gen_label_rtx ();
3716 temp = copy_to_mode_reg (mode, op0);
3717 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3718 expand_inc (temp, GEN_INT (d - 1));
3719 emit_label (label);
3720 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3721 }
3722 \f
3723 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3724 if that is convenient, and returning where the result is.
3725 You may request either the quotient or the remainder as the result;
3726 specify REM_FLAG nonzero to get the remainder.
3727
3728 CODE is the expression code for which kind of division this is;
3729 it controls how rounding is done. MODE is the machine mode to use.
3730 UNSIGNEDP nonzero means do unsigned division. */
3731
3732 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3733 and then correct it by or'ing in missing high bits
3734 if result of ANDI is nonzero.
3735 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3736 This could optimize to a bfexts instruction.
3737 But C doesn't use these operations, so their optimizations are
3738 left for later. */
3739 /* ??? For modulo, we don't actually need the highpart of the first product,
3740 the low part will do nicely. And for small divisors, the second multiply
3741 can also be a low-part only multiply or even be completely left out.
3742 E.g. to calculate the remainder of a division by 3 with a 32 bit
3743 multiply, multiply with 0x55555556 and extract the upper two bits;
3744 the result is exact for inputs up to 0x1fffffff.
3745 The input range can be reduced by using cross-sum rules.
3746 For odd divisors >= 3, the following table gives right shift counts
3747 so that if a number is shifted by an integer multiple of the given
3748 amount, the remainder stays the same:
3749 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3750 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3751 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3752 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3753 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3754
3755 Cross-sum rules for even numbers can be derived by leaving as many bits
3756 to the right alone as the divisor has zeros to the right.
3757 E.g. if x is an unsigned 32 bit number:
3758 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3759 */
3760
3761 rtx
3762 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3763 rtx op0, rtx op1, rtx target, int unsignedp)
3764 {
3765 enum machine_mode compute_mode;
3766 rtx tquotient;
3767 rtx quotient = 0, remainder = 0;
3768 rtx last;
3769 int size;
3770 rtx insn, set;
3771 optab optab1, optab2;
3772 int op1_is_constant, op1_is_pow2 = 0;
3773 int max_cost, extra_cost;
3774 static HOST_WIDE_INT last_div_const = 0;
3775 static HOST_WIDE_INT ext_op1;
3776
3777 op1_is_constant = GET_CODE (op1) == CONST_INT;
3778 if (op1_is_constant)
3779 {
3780 ext_op1 = INTVAL (op1);
3781 if (unsignedp)
3782 ext_op1 &= GET_MODE_MASK (mode);
3783 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3784 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3785 }
3786
3787 /*
3788 This is the structure of expand_divmod:
3789
3790 First comes code to fix up the operands so we can perform the operations
3791 correctly and efficiently.
3792
3793 Second comes a switch statement with code specific for each rounding mode.
3794 For some special operands this code emits all RTL for the desired
3795 operation, for other cases, it generates only a quotient and stores it in
3796 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3797 to indicate that it has not done anything.
3798
3799 Last comes code that finishes the operation. If QUOTIENT is set and
3800 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3801 QUOTIENT is not set, it is computed using trunc rounding.
3802
3803 We try to generate special code for division and remainder when OP1 is a
3804 constant. If |OP1| = 2**n we can use shifts and some other fast
3805 operations. For other values of OP1, we compute a carefully selected
3806 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3807 by m.
3808
3809 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3810 half of the product. Different strategies for generating the product are
3811 implemented in expand_mult_highpart.
3812
3813 If what we actually want is the remainder, we generate that by another
3814 by-constant multiplication and a subtraction. */
3815
3816 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3817 code below will malfunction if we are, so check here and handle
3818 the special case if so. */
3819 if (op1 == const1_rtx)
3820 return rem_flag ? const0_rtx : op0;
3821
3822 /* When dividing by -1, we could get an overflow.
3823 negv_optab can handle overflows. */
3824 if (! unsignedp && op1 == constm1_rtx)
3825 {
3826 if (rem_flag)
3827 return const0_rtx;
3828 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3829 ? negv_optab : neg_optab, op0, target, 0);
3830 }
3831
3832 if (target
3833 /* Don't use the function value register as a target
3834 since we have to read it as well as write it,
3835 and function-inlining gets confused by this. */
3836 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3837 /* Don't clobber an operand while doing a multi-step calculation. */
3838 || ((rem_flag || op1_is_constant)
3839 && (reg_mentioned_p (target, op0)
3840 || (MEM_P (op0) && MEM_P (target))))
3841 || reg_mentioned_p (target, op1)
3842 || (MEM_P (op1) && MEM_P (target))))
3843 target = 0;
3844
3845 /* Get the mode in which to perform this computation. Normally it will
3846 be MODE, but sometimes we can't do the desired operation in MODE.
3847 If so, pick a wider mode in which we can do the operation. Convert
3848 to that mode at the start to avoid repeated conversions.
3849
3850 First see what operations we need. These depend on the expression
3851 we are evaluating. (We assume that divxx3 insns exist under the
3852 same conditions that modxx3 insns and that these insns don't normally
3853 fail. If these assumptions are not correct, we may generate less
3854 efficient code in some cases.)
3855
3856 Then see if we find a mode in which we can open-code that operation
3857 (either a division, modulus, or shift). Finally, check for the smallest
3858 mode for which we can do the operation with a library call. */
3859
3860 /* We might want to refine this now that we have division-by-constant
3861 optimization. Since expand_mult_highpart tries so many variants, it is
3862 not straightforward to generalize this. Maybe we should make an array
3863 of possible modes in init_expmed? Save this for GCC 2.7. */
3864
3865 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3866 ? (unsignedp ? lshr_optab : ashr_optab)
3867 : (unsignedp ? udiv_optab : sdiv_optab));
3868 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3869 ? optab1
3870 : (unsignedp ? udivmod_optab : sdivmod_optab));
3871
3872 for (compute_mode = mode; compute_mode != VOIDmode;
3873 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3874 if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3875 || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3876 break;
3877
3878 if (compute_mode == VOIDmode)
3879 for (compute_mode = mode; compute_mode != VOIDmode;
3880 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3881 if (optab1->handlers[compute_mode].libfunc
3882 || optab2->handlers[compute_mode].libfunc)
3883 break;
3884
3885 /* If we still couldn't find a mode, use MODE, but expand_binop will
3886 probably die. */
3887 if (compute_mode == VOIDmode)
3888 compute_mode = mode;
3889
3890 if (target && GET_MODE (target) == compute_mode)
3891 tquotient = target;
3892 else
3893 tquotient = gen_reg_rtx (compute_mode);
3894
3895 size = GET_MODE_BITSIZE (compute_mode);
3896 #if 0
3897 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3898 (mode), and thereby get better code when OP1 is a constant. Do that
3899 later. It will require going over all usages of SIZE below. */
3900 size = GET_MODE_BITSIZE (mode);
3901 #endif
3902
3903 /* Only deduct something for a REM if the last divide done was
3904 for a different constant. Then set the constant of the last
3905 divide. */
3906 max_cost = div_cost[compute_mode]
3907 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3908 && INTVAL (op1) == last_div_const)
3909 ? mul_cost[compute_mode] + add_cost[compute_mode]
3910 : 0);
3911
3912 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3913
3914 /* Now convert to the best mode to use. */
3915 if (compute_mode != mode)
3916 {
3917 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3918 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3919
3920 /* convert_modes may have placed op1 into a register, so we
3921 must recompute the following. */
3922 op1_is_constant = GET_CODE (op1) == CONST_INT;
3923 op1_is_pow2 = (op1_is_constant
3924 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3925 || (! unsignedp
3926 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3927 }
3928
3929 /* If one of the operands is a volatile MEM, copy it into a register. */
3930
3931 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3932 op0 = force_reg (compute_mode, op0);
3933 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3934 op1 = force_reg (compute_mode, op1);
3935
3936 /* If we need the remainder or if OP1 is constant, we need to
3937 put OP0 in a register in case it has any queued subexpressions. */
3938 if (rem_flag || op1_is_constant)
3939 op0 = force_reg (compute_mode, op0);
3940
3941 last = get_last_insn ();
3942
3943 /* Promote floor rounding to trunc rounding for unsigned operations. */
3944 if (unsignedp)
3945 {
3946 if (code == FLOOR_DIV_EXPR)
3947 code = TRUNC_DIV_EXPR;
3948 if (code == FLOOR_MOD_EXPR)
3949 code = TRUNC_MOD_EXPR;
3950 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3951 code = TRUNC_DIV_EXPR;
3952 }
3953
3954 if (op1 != const0_rtx)
3955 switch (code)
3956 {
3957 case TRUNC_MOD_EXPR:
3958 case TRUNC_DIV_EXPR:
3959 if (op1_is_constant)
3960 {
3961 if (unsignedp)
3962 {
3963 unsigned HOST_WIDE_INT mh;
3964 int pre_shift, post_shift;
3965 int dummy;
3966 rtx ml;
3967 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3968 & GET_MODE_MASK (compute_mode));
3969
3970 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3971 {
3972 pre_shift = floor_log2 (d);
3973 if (rem_flag)
3974 {
3975 remainder
3976 = expand_binop (compute_mode, and_optab, op0,
3977 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3978 remainder, 1,
3979 OPTAB_LIB_WIDEN);
3980 if (remainder)
3981 return gen_lowpart (mode, remainder);
3982 }
3983 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3984 build_int_cst (NULL_TREE,
3985 pre_shift),
3986 tquotient, 1);
3987 }
3988 else if (size <= HOST_BITS_PER_WIDE_INT)
3989 {
3990 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3991 {
3992 /* Most significant bit of divisor is set; emit an scc
3993 insn. */
3994 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3995 compute_mode, 1, 1);
3996 if (quotient == 0)
3997 goto fail1;
3998 }
3999 else
4000 {
4001 /* Find a suitable multiplier and right shift count
4002 instead of multiplying with D. */
4003
4004 mh = choose_multiplier (d, size, size,
4005 &ml, &post_shift, &dummy);
4006
4007 /* If the suggested multiplier is more than SIZE bits,
4008 we can do better for even divisors, using an
4009 initial right shift. */
4010 if (mh != 0 && (d & 1) == 0)
4011 {
4012 pre_shift = floor_log2 (d & -d);
4013 mh = choose_multiplier (d >> pre_shift, size,
4014 size - pre_shift,
4015 &ml, &post_shift, &dummy);
4016 gcc_assert (!mh);
4017 }
4018 else
4019 pre_shift = 0;
4020
4021 if (mh != 0)
4022 {
4023 rtx t1, t2, t3, t4;
4024
4025 if (post_shift - 1 >= BITS_PER_WORD)
4026 goto fail1;
4027
4028 extra_cost
4029 = (shift_cost[compute_mode][post_shift - 1]
4030 + shift_cost[compute_mode][1]
4031 + 2 * add_cost[compute_mode]);
4032 t1 = expand_mult_highpart (compute_mode, op0, ml,
4033 NULL_RTX, 1,
4034 max_cost - extra_cost);
4035 if (t1 == 0)
4036 goto fail1;
4037 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4038 op0, t1),
4039 NULL_RTX);
4040 t3 = expand_shift
4041 (RSHIFT_EXPR, compute_mode, t2,
4042 build_int_cst (NULL_TREE, 1),
4043 NULL_RTX,1);
4044 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4045 t1, t3),
4046 NULL_RTX);
4047 quotient = expand_shift
4048 (RSHIFT_EXPR, compute_mode, t4,
4049 build_int_cst (NULL_TREE, post_shift - 1),
4050 tquotient, 1);
4051 }
4052 else
4053 {
4054 rtx t1, t2;
4055
4056 if (pre_shift >= BITS_PER_WORD
4057 || post_shift >= BITS_PER_WORD)
4058 goto fail1;
4059
4060 t1 = expand_shift
4061 (RSHIFT_EXPR, compute_mode, op0,
4062 build_int_cst (NULL_TREE, pre_shift),
4063 NULL_RTX, 1);
4064 extra_cost
4065 = (shift_cost[compute_mode][pre_shift]
4066 + shift_cost[compute_mode][post_shift]);
4067 t2 = expand_mult_highpart (compute_mode, t1, ml,
4068 NULL_RTX, 1,
4069 max_cost - extra_cost);
4070 if (t2 == 0)
4071 goto fail1;
4072 quotient = expand_shift
4073 (RSHIFT_EXPR, compute_mode, t2,
4074 build_int_cst (NULL_TREE, post_shift),
4075 tquotient, 1);
4076 }
4077 }
4078 }
4079 else /* Too wide mode to use tricky code */
4080 break;
4081
4082 insn = get_last_insn ();
4083 if (insn != last
4084 && (set = single_set (insn)) != 0
4085 && SET_DEST (set) == quotient)
4086 set_unique_reg_note (insn,
4087 REG_EQUAL,
4088 gen_rtx_UDIV (compute_mode, op0, op1));
4089 }
4090 else /* TRUNC_DIV, signed */
4091 {
4092 unsigned HOST_WIDE_INT ml;
4093 int lgup, post_shift;
4094 rtx mlr;
4095 HOST_WIDE_INT d = INTVAL (op1);
4096 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
4097
4098 /* n rem d = n rem -d */
4099 if (rem_flag && d < 0)
4100 {
4101 d = abs_d;
4102 op1 = gen_int_mode (abs_d, compute_mode);
4103 }
4104
4105 if (d == 1)
4106 quotient = op0;
4107 else if (d == -1)
4108 quotient = expand_unop (compute_mode, neg_optab, op0,
4109 tquotient, 0);
4110 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4111 {
4112 /* This case is not handled correctly below. */
4113 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4114 compute_mode, 1, 1);
4115 if (quotient == 0)
4116 goto fail1;
4117 }
4118 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4119 && (rem_flag ? smod_pow2_cheap[compute_mode]
4120 : sdiv_pow2_cheap[compute_mode])
4121 /* We assume that cheap metric is true if the
4122 optab has an expander for this mode. */
4123 && (((rem_flag ? smod_optab : sdiv_optab)
4124 ->handlers[compute_mode].insn_code
4125 != CODE_FOR_nothing)
4126 || (sdivmod_optab->handlers[compute_mode]
4127 .insn_code != CODE_FOR_nothing)))
4128 ;
4129 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4130 {
4131 if (rem_flag)
4132 {
4133 remainder = expand_smod_pow2 (compute_mode, op0, d);
4134 if (remainder)
4135 return gen_lowpart (mode, remainder);
4136 }
4137
4138 if (sdiv_pow2_cheap[compute_mode]
4139 && ((sdiv_optab->handlers[compute_mode].insn_code
4140 != CODE_FOR_nothing)
4141 || (sdivmod_optab->handlers[compute_mode].insn_code
4142 != CODE_FOR_nothing)))
4143 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4144 compute_mode, op0,
4145 gen_int_mode (abs_d,
4146 compute_mode),
4147 NULL_RTX, 0);
4148 else
4149 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4150
4151 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4152 negate the quotient. */
4153 if (d < 0)
4154 {
4155 insn = get_last_insn ();
4156 if (insn != last
4157 && (set = single_set (insn)) != 0
4158 && SET_DEST (set) == quotient
4159 && abs_d < ((unsigned HOST_WIDE_INT) 1
4160 << (HOST_BITS_PER_WIDE_INT - 1)))
4161 set_unique_reg_note (insn,
4162 REG_EQUAL,
4163 gen_rtx_DIV (compute_mode,
4164 op0,
4165 GEN_INT
4166 (trunc_int_for_mode
4167 (abs_d,
4168 compute_mode))));
4169
4170 quotient = expand_unop (compute_mode, neg_optab,
4171 quotient, quotient, 0);
4172 }
4173 }
4174 else if (size <= HOST_BITS_PER_WIDE_INT)
4175 {
4176 choose_multiplier (abs_d, size, size - 1,
4177 &mlr, &post_shift, &lgup);
4178 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4179 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4180 {
4181 rtx t1, t2, t3;
4182
4183 if (post_shift >= BITS_PER_WORD
4184 || size - 1 >= BITS_PER_WORD)
4185 goto fail1;
4186
4187 extra_cost = (shift_cost[compute_mode][post_shift]
4188 + shift_cost[compute_mode][size - 1]
4189 + add_cost[compute_mode]);
4190 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4191 NULL_RTX, 0,
4192 max_cost - extra_cost);
4193 if (t1 == 0)
4194 goto fail1;
4195 t2 = expand_shift
4196 (RSHIFT_EXPR, compute_mode, t1,
4197 build_int_cst (NULL_TREE, post_shift),
4198 NULL_RTX, 0);
4199 t3 = expand_shift
4200 (RSHIFT_EXPR, compute_mode, op0,
4201 build_int_cst (NULL_TREE, size - 1),
4202 NULL_RTX, 0);
4203 if (d < 0)
4204 quotient
4205 = force_operand (gen_rtx_MINUS (compute_mode,
4206 t3, t2),
4207 tquotient);
4208 else
4209 quotient
4210 = force_operand (gen_rtx_MINUS (compute_mode,
4211 t2, t3),
4212 tquotient);
4213 }
4214 else
4215 {
4216 rtx t1, t2, t3, t4;
4217
4218 if (post_shift >= BITS_PER_WORD
4219 || size - 1 >= BITS_PER_WORD)
4220 goto fail1;
4221
4222 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4223 mlr = gen_int_mode (ml, compute_mode);
4224 extra_cost = (shift_cost[compute_mode][post_shift]
4225 + shift_cost[compute_mode][size - 1]
4226 + 2 * add_cost[compute_mode]);
4227 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4228 NULL_RTX, 0,
4229 max_cost - extra_cost);
4230 if (t1 == 0)
4231 goto fail1;
4232 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4233 t1, op0),
4234 NULL_RTX);
4235 t3 = expand_shift
4236 (RSHIFT_EXPR, compute_mode, t2,
4237 build_int_cst (NULL_TREE, post_shift),
4238 NULL_RTX, 0);
4239 t4 = expand_shift
4240 (RSHIFT_EXPR, compute_mode, op0,
4241 build_int_cst (NULL_TREE, size - 1),
4242 NULL_RTX, 0);
4243 if (d < 0)
4244 quotient
4245 = force_operand (gen_rtx_MINUS (compute_mode,
4246 t4, t3),
4247 tquotient);
4248 else
4249 quotient
4250 = force_operand (gen_rtx_MINUS (compute_mode,
4251 t3, t4),
4252 tquotient);
4253 }
4254 }
4255 else /* Too wide mode to use tricky code */
4256 break;
4257
4258 insn = get_last_insn ();
4259 if (insn != last
4260 && (set = single_set (insn)) != 0
4261 && SET_DEST (set) == quotient)
4262 set_unique_reg_note (insn,
4263 REG_EQUAL,
4264 gen_rtx_DIV (compute_mode, op0, op1));
4265 }
4266 break;
4267 }
4268 fail1:
4269 delete_insns_since (last);
4270 break;
4271
4272 case FLOOR_DIV_EXPR:
4273 case FLOOR_MOD_EXPR:
4274 /* We will come here only for signed operations. */
4275 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4276 {
4277 unsigned HOST_WIDE_INT mh;
4278 int pre_shift, lgup, post_shift;
4279 HOST_WIDE_INT d = INTVAL (op1);
4280 rtx ml;
4281
4282 if (d > 0)
4283 {
4284 /* We could just as easily deal with negative constants here,
4285 but it does not seem worth the trouble for GCC 2.6. */
4286 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4287 {
4288 pre_shift = floor_log2 (d);
4289 if (rem_flag)
4290 {
4291 remainder = expand_binop (compute_mode, and_optab, op0,
4292 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4293 remainder, 0, OPTAB_LIB_WIDEN);
4294 if (remainder)
4295 return gen_lowpart (mode, remainder);
4296 }
4297 quotient = expand_shift
4298 (RSHIFT_EXPR, compute_mode, op0,
4299 build_int_cst (NULL_TREE, pre_shift),
4300 tquotient, 0);
4301 }
4302 else
4303 {
4304 rtx t1, t2, t3, t4;
4305
4306 mh = choose_multiplier (d, size, size - 1,
4307 &ml, &post_shift, &lgup);
4308 gcc_assert (!mh);
4309
4310 if (post_shift < BITS_PER_WORD
4311 && size - 1 < BITS_PER_WORD)
4312 {
4313 t1 = expand_shift
4314 (RSHIFT_EXPR, compute_mode, op0,
4315 build_int_cst (NULL_TREE, size - 1),
4316 NULL_RTX, 0);
4317 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4318 NULL_RTX, 0, OPTAB_WIDEN);
4319 extra_cost = (shift_cost[compute_mode][post_shift]
4320 + shift_cost[compute_mode][size - 1]
4321 + 2 * add_cost[compute_mode]);
4322 t3 = expand_mult_highpart (compute_mode, t2, ml,
4323 NULL_RTX, 1,
4324 max_cost - extra_cost);
4325 if (t3 != 0)
4326 {
4327 t4 = expand_shift
4328 (RSHIFT_EXPR, compute_mode, t3,
4329 build_int_cst (NULL_TREE, post_shift),
4330 NULL_RTX, 1);
4331 quotient = expand_binop (compute_mode, xor_optab,
4332 t4, t1, tquotient, 0,
4333 OPTAB_WIDEN);
4334 }
4335 }
4336 }
4337 }
4338 else
4339 {
4340 rtx nsign, t1, t2, t3, t4;
4341 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4342 op0, constm1_rtx), NULL_RTX);
4343 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4344 0, OPTAB_WIDEN);
4345 nsign = expand_shift
4346 (RSHIFT_EXPR, compute_mode, t2,
4347 build_int_cst (NULL_TREE, size - 1),
4348 NULL_RTX, 0);
4349 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4350 NULL_RTX);
4351 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4352 NULL_RTX, 0);
4353 if (t4)
4354 {
4355 rtx t5;
4356 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4357 NULL_RTX, 0);
4358 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4359 t4, t5),
4360 tquotient);
4361 }
4362 }
4363 }
4364
4365 if (quotient != 0)
4366 break;
4367 delete_insns_since (last);
4368
4369 /* Try using an instruction that produces both the quotient and
4370 remainder, using truncation. We can easily compensate the quotient
4371 or remainder to get floor rounding, once we have the remainder.
4372 Notice that we compute also the final remainder value here,
4373 and return the result right away. */
4374 if (target == 0 || GET_MODE (target) != compute_mode)
4375 target = gen_reg_rtx (compute_mode);
4376
4377 if (rem_flag)
4378 {
4379 remainder
4380 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4381 quotient = gen_reg_rtx (compute_mode);
4382 }
4383 else
4384 {
4385 quotient
4386 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4387 remainder = gen_reg_rtx (compute_mode);
4388 }
4389
4390 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4391 quotient, remainder, 0))
4392 {
4393 /* This could be computed with a branch-less sequence.
4394 Save that for later. */
4395 rtx tem;
4396 rtx label = gen_label_rtx ();
4397 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4398 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4399 NULL_RTX, 0, OPTAB_WIDEN);
4400 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4401 expand_dec (quotient, const1_rtx);
4402 expand_inc (remainder, op1);
4403 emit_label (label);
4404 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4405 }
4406
4407 /* No luck with division elimination or divmod. Have to do it
4408 by conditionally adjusting op0 *and* the result. */
4409 {
4410 rtx label1, label2, label3, label4, label5;
4411 rtx adjusted_op0;
4412 rtx tem;
4413
4414 quotient = gen_reg_rtx (compute_mode);
4415 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4416 label1 = gen_label_rtx ();
4417 label2 = gen_label_rtx ();
4418 label3 = gen_label_rtx ();
4419 label4 = gen_label_rtx ();
4420 label5 = gen_label_rtx ();
4421 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4422 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4423 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4424 quotient, 0, OPTAB_LIB_WIDEN);
4425 if (tem != quotient)
4426 emit_move_insn (quotient, tem);
4427 emit_jump_insn (gen_jump (label5));
4428 emit_barrier ();
4429 emit_label (label1);
4430 expand_inc (adjusted_op0, const1_rtx);
4431 emit_jump_insn (gen_jump (label4));
4432 emit_barrier ();
4433 emit_label (label2);
4434 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4435 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4436 quotient, 0, OPTAB_LIB_WIDEN);
4437 if (tem != quotient)
4438 emit_move_insn (quotient, tem);
4439 emit_jump_insn (gen_jump (label5));
4440 emit_barrier ();
4441 emit_label (label3);
4442 expand_dec (adjusted_op0, const1_rtx);
4443 emit_label (label4);
4444 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4445 quotient, 0, OPTAB_LIB_WIDEN);
4446 if (tem != quotient)
4447 emit_move_insn (quotient, tem);
4448 expand_dec (quotient, const1_rtx);
4449 emit_label (label5);
4450 }
4451 break;
4452
4453 case CEIL_DIV_EXPR:
4454 case CEIL_MOD_EXPR:
4455 if (unsignedp)
4456 {
4457 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4458 {
4459 rtx t1, t2, t3;
4460 unsigned HOST_WIDE_INT d = INTVAL (op1);
4461 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4462 build_int_cst (NULL_TREE, floor_log2 (d)),
4463 tquotient, 1);
4464 t2 = expand_binop (compute_mode, and_optab, op0,
4465 GEN_INT (d - 1),
4466 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4467 t3 = gen_reg_rtx (compute_mode);
4468 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4469 compute_mode, 1, 1);
4470 if (t3 == 0)
4471 {
4472 rtx lab;
4473 lab = gen_label_rtx ();
4474 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4475 expand_inc (t1, const1_rtx);
4476 emit_label (lab);
4477 quotient = t1;
4478 }
4479 else
4480 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4481 t1, t3),
4482 tquotient);
4483 break;
4484 }
4485
4486 /* Try using an instruction that produces both the quotient and
4487 remainder, using truncation. We can easily compensate the
4488 quotient or remainder to get ceiling rounding, once we have the
4489 remainder. Notice that we compute also the final remainder
4490 value here, and return the result right away. */
4491 if (target == 0 || GET_MODE (target) != compute_mode)
4492 target = gen_reg_rtx (compute_mode);
4493
4494 if (rem_flag)
4495 {
4496 remainder = (REG_P (target)
4497 ? target : gen_reg_rtx (compute_mode));
4498 quotient = gen_reg_rtx (compute_mode);
4499 }
4500 else
4501 {
4502 quotient = (REG_P (target)
4503 ? target : gen_reg_rtx (compute_mode));
4504 remainder = gen_reg_rtx (compute_mode);
4505 }
4506
4507 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4508 remainder, 1))
4509 {
4510 /* This could be computed with a branch-less sequence.
4511 Save that for later. */
4512 rtx label = gen_label_rtx ();
4513 do_cmp_and_jump (remainder, const0_rtx, EQ,
4514 compute_mode, label);
4515 expand_inc (quotient, const1_rtx);
4516 expand_dec (remainder, op1);
4517 emit_label (label);
4518 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4519 }
4520
4521 /* No luck with division elimination or divmod. Have to do it
4522 by conditionally adjusting op0 *and* the result. */
4523 {
4524 rtx label1, label2;
4525 rtx adjusted_op0, tem;
4526
4527 quotient = gen_reg_rtx (compute_mode);
4528 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4529 label1 = gen_label_rtx ();
4530 label2 = gen_label_rtx ();
4531 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4532 compute_mode, label1);
4533 emit_move_insn (quotient, const0_rtx);
4534 emit_jump_insn (gen_jump (label2));
4535 emit_barrier ();
4536 emit_label (label1);
4537 expand_dec (adjusted_op0, const1_rtx);
4538 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4539 quotient, 1, OPTAB_LIB_WIDEN);
4540 if (tem != quotient)
4541 emit_move_insn (quotient, tem);
4542 expand_inc (quotient, const1_rtx);
4543 emit_label (label2);
4544 }
4545 }
4546 else /* signed */
4547 {
4548 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4549 && INTVAL (op1) >= 0)
4550 {
4551 /* This is extremely similar to the code for the unsigned case
4552 above. For 2.7 we should merge these variants, but for
4553 2.6.1 I don't want to touch the code for unsigned since that
4554 get used in C. The signed case will only be used by other
4555 languages (Ada). */
4556
4557 rtx t1, t2, t3;
4558 unsigned HOST_WIDE_INT d = INTVAL (op1);
4559 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4560 build_int_cst (NULL_TREE, floor_log2 (d)),
4561 tquotient, 0);
4562 t2 = expand_binop (compute_mode, and_optab, op0,
4563 GEN_INT (d - 1),
4564 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4565 t3 = gen_reg_rtx (compute_mode);
4566 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4567 compute_mode, 1, 1);
4568 if (t3 == 0)
4569 {
4570 rtx lab;
4571 lab = gen_label_rtx ();
4572 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4573 expand_inc (t1, const1_rtx);
4574 emit_label (lab);
4575 quotient = t1;
4576 }
4577 else
4578 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4579 t1, t3),
4580 tquotient);
4581 break;
4582 }
4583
4584 /* Try using an instruction that produces both the quotient and
4585 remainder, using truncation. We can easily compensate the
4586 quotient or remainder to get ceiling rounding, once we have the
4587 remainder. Notice that we compute also the final remainder
4588 value here, and return the result right away. */
4589 if (target == 0 || GET_MODE (target) != compute_mode)
4590 target = gen_reg_rtx (compute_mode);
4591 if (rem_flag)
4592 {
4593 remainder= (REG_P (target)
4594 ? target : gen_reg_rtx (compute_mode));
4595 quotient = gen_reg_rtx (compute_mode);
4596 }
4597 else
4598 {
4599 quotient = (REG_P (target)
4600 ? target : gen_reg_rtx (compute_mode));
4601 remainder = gen_reg_rtx (compute_mode);
4602 }
4603
4604 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4605 remainder, 0))
4606 {
4607 /* This could be computed with a branch-less sequence.
4608 Save that for later. */
4609 rtx tem;
4610 rtx label = gen_label_rtx ();
4611 do_cmp_and_jump (remainder, const0_rtx, EQ,
4612 compute_mode, label);
4613 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4614 NULL_RTX, 0, OPTAB_WIDEN);
4615 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4616 expand_inc (quotient, const1_rtx);
4617 expand_dec (remainder, op1);
4618 emit_label (label);
4619 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4620 }
4621
4622 /* No luck with division elimination or divmod. Have to do it
4623 by conditionally adjusting op0 *and* the result. */
4624 {
4625 rtx label1, label2, label3, label4, label5;
4626 rtx adjusted_op0;
4627 rtx tem;
4628
4629 quotient = gen_reg_rtx (compute_mode);
4630 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4631 label1 = gen_label_rtx ();
4632 label2 = gen_label_rtx ();
4633 label3 = gen_label_rtx ();
4634 label4 = gen_label_rtx ();
4635 label5 = gen_label_rtx ();
4636 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4637 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4638 compute_mode, label1);
4639 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4640 quotient, 0, OPTAB_LIB_WIDEN);
4641 if (tem != quotient)
4642 emit_move_insn (quotient, tem);
4643 emit_jump_insn (gen_jump (label5));
4644 emit_barrier ();
4645 emit_label (label1);
4646 expand_dec (adjusted_op0, const1_rtx);
4647 emit_jump_insn (gen_jump (label4));
4648 emit_barrier ();
4649 emit_label (label2);
4650 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4651 compute_mode, label3);
4652 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4653 quotient, 0, OPTAB_LIB_WIDEN);
4654 if (tem != quotient)
4655 emit_move_insn (quotient, tem);
4656 emit_jump_insn (gen_jump (label5));
4657 emit_barrier ();
4658 emit_label (label3);
4659 expand_inc (adjusted_op0, const1_rtx);
4660 emit_label (label4);
4661 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4662 quotient, 0, OPTAB_LIB_WIDEN);
4663 if (tem != quotient)
4664 emit_move_insn (quotient, tem);
4665 expand_inc (quotient, const1_rtx);
4666 emit_label (label5);
4667 }
4668 }
4669 break;
4670
4671 case EXACT_DIV_EXPR:
4672 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4673 {
4674 HOST_WIDE_INT d = INTVAL (op1);
4675 unsigned HOST_WIDE_INT ml;
4676 int pre_shift;
4677 rtx t1;
4678
4679 pre_shift = floor_log2 (d & -d);
4680 ml = invert_mod2n (d >> pre_shift, size);
4681 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4682 build_int_cst (NULL_TREE, pre_shift),
4683 NULL_RTX, unsignedp);
4684 quotient = expand_mult (compute_mode, t1,
4685 gen_int_mode (ml, compute_mode),
4686 NULL_RTX, 1);
4687
4688 insn = get_last_insn ();
4689 set_unique_reg_note (insn,
4690 REG_EQUAL,
4691 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4692 compute_mode,
4693 op0, op1));
4694 }
4695 break;
4696
4697 case ROUND_DIV_EXPR:
4698 case ROUND_MOD_EXPR:
4699 if (unsignedp)
4700 {
4701 rtx tem;
4702 rtx label;
4703 label = gen_label_rtx ();
4704 quotient = gen_reg_rtx (compute_mode);
4705 remainder = gen_reg_rtx (compute_mode);
4706 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4707 {
4708 rtx tem;
4709 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4710 quotient, 1, OPTAB_LIB_WIDEN);
4711 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4712 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4713 remainder, 1, OPTAB_LIB_WIDEN);
4714 }
4715 tem = plus_constant (op1, -1);
4716 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4717 build_int_cst (NULL_TREE, 1),
4718 NULL_RTX, 1);
4719 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4720 expand_inc (quotient, const1_rtx);
4721 expand_dec (remainder, op1);
4722 emit_label (label);
4723 }
4724 else
4725 {
4726 rtx abs_rem, abs_op1, tem, mask;
4727 rtx label;
4728 label = gen_label_rtx ();
4729 quotient = gen_reg_rtx (compute_mode);
4730 remainder = gen_reg_rtx (compute_mode);
4731 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4732 {
4733 rtx tem;
4734 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4735 quotient, 0, OPTAB_LIB_WIDEN);
4736 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4737 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4738 remainder, 0, OPTAB_LIB_WIDEN);
4739 }
4740 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4741 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4742 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4743 build_int_cst (NULL_TREE, 1),
4744 NULL_RTX, 1);
4745 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4746 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4747 NULL_RTX, 0, OPTAB_WIDEN);
4748 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4749 build_int_cst (NULL_TREE, size - 1),
4750 NULL_RTX, 0);
4751 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4752 NULL_RTX, 0, OPTAB_WIDEN);
4753 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4754 NULL_RTX, 0, OPTAB_WIDEN);
4755 expand_inc (quotient, tem);
4756 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4757 NULL_RTX, 0, OPTAB_WIDEN);
4758 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4759 NULL_RTX, 0, OPTAB_WIDEN);
4760 expand_dec (remainder, tem);
4761 emit_label (label);
4762 }
4763 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4764
4765 default:
4766 gcc_unreachable ();
4767 }
4768
4769 if (quotient == 0)
4770 {
4771 if (target && GET_MODE (target) != compute_mode)
4772 target = 0;
4773
4774 if (rem_flag)
4775 {
4776 /* Try to produce the remainder without producing the quotient.
4777 If we seem to have a divmod pattern that does not require widening,
4778 don't try widening here. We should really have a WIDEN argument
4779 to expand_twoval_binop, since what we'd really like to do here is
4780 1) try a mod insn in compute_mode
4781 2) try a divmod insn in compute_mode
4782 3) try a div insn in compute_mode and multiply-subtract to get
4783 remainder
4784 4) try the same things with widening allowed. */
4785 remainder
4786 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4787 op0, op1, target,
4788 unsignedp,
4789 ((optab2->handlers[compute_mode].insn_code
4790 != CODE_FOR_nothing)
4791 ? OPTAB_DIRECT : OPTAB_WIDEN));
4792 if (remainder == 0)
4793 {
4794 /* No luck there. Can we do remainder and divide at once
4795 without a library call? */
4796 remainder = gen_reg_rtx (compute_mode);
4797 if (! expand_twoval_binop ((unsignedp
4798 ? udivmod_optab
4799 : sdivmod_optab),
4800 op0, op1,
4801 NULL_RTX, remainder, unsignedp))
4802 remainder = 0;
4803 }
4804
4805 if (remainder)
4806 return gen_lowpart (mode, remainder);
4807 }
4808
4809 /* Produce the quotient. Try a quotient insn, but not a library call.
4810 If we have a divmod in this mode, use it in preference to widening
4811 the div (for this test we assume it will not fail). Note that optab2
4812 is set to the one of the two optabs that the call below will use. */
4813 quotient
4814 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4815 op0, op1, rem_flag ? NULL_RTX : target,
4816 unsignedp,
4817 ((optab2->handlers[compute_mode].insn_code
4818 != CODE_FOR_nothing)
4819 ? OPTAB_DIRECT : OPTAB_WIDEN));
4820
4821 if (quotient == 0)
4822 {
4823 /* No luck there. Try a quotient-and-remainder insn,
4824 keeping the quotient alone. */
4825 quotient = gen_reg_rtx (compute_mode);
4826 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4827 op0, op1,
4828 quotient, NULL_RTX, unsignedp))
4829 {
4830 quotient = 0;
4831 if (! rem_flag)
4832 /* Still no luck. If we are not computing the remainder,
4833 use a library call for the quotient. */
4834 quotient = sign_expand_binop (compute_mode,
4835 udiv_optab, sdiv_optab,
4836 op0, op1, target,
4837 unsignedp, OPTAB_LIB_WIDEN);
4838 }
4839 }
4840 }
4841
4842 if (rem_flag)
4843 {
4844 if (target && GET_MODE (target) != compute_mode)
4845 target = 0;
4846
4847 if (quotient == 0)
4848 {
4849 /* No divide instruction either. Use library for remainder. */
4850 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4851 op0, op1, target,
4852 unsignedp, OPTAB_LIB_WIDEN);
4853 /* No remainder function. Try a quotient-and-remainder
4854 function, keeping the remainder. */
4855 if (!remainder)
4856 {
4857 remainder = gen_reg_rtx (compute_mode);
4858 if (!expand_twoval_binop_libfunc
4859 (unsignedp ? udivmod_optab : sdivmod_optab,
4860 op0, op1,
4861 NULL_RTX, remainder,
4862 unsignedp ? UMOD : MOD))
4863 remainder = NULL_RTX;
4864 }
4865 }
4866 else
4867 {
4868 /* We divided. Now finish doing X - Y * (X / Y). */
4869 remainder = expand_mult (compute_mode, quotient, op1,
4870 NULL_RTX, unsignedp);
4871 remainder = expand_binop (compute_mode, sub_optab, op0,
4872 remainder, target, unsignedp,
4873 OPTAB_LIB_WIDEN);
4874 }
4875 }
4876
4877 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4878 }
4879 \f
4880 /* Return a tree node with data type TYPE, describing the value of X.
4881 Usually this is an VAR_DECL, if there is no obvious better choice.
4882 X may be an expression, however we only support those expressions
4883 generated by loop.c. */
4884
4885 tree
4886 make_tree (tree type, rtx x)
4887 {
4888 tree t;
4889
4890 switch (GET_CODE (x))
4891 {
4892 case CONST_INT:
4893 {
4894 HOST_WIDE_INT hi = 0;
4895
4896 if (INTVAL (x) < 0
4897 && !(TYPE_UNSIGNED (type)
4898 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4899 < HOST_BITS_PER_WIDE_INT)))
4900 hi = -1;
4901
4902 t = build_int_cst_wide (type, INTVAL (x), hi);
4903
4904 return t;
4905 }
4906
4907 case CONST_DOUBLE:
4908 if (GET_MODE (x) == VOIDmode)
4909 t = build_int_cst_wide (type,
4910 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4911 else
4912 {
4913 REAL_VALUE_TYPE d;
4914
4915 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4916 t = build_real (type, d);
4917 }
4918
4919 return t;
4920
4921 case CONST_VECTOR:
4922 {
4923 int i, units;
4924 rtx elt;
4925 tree t = NULL_TREE;
4926
4927 units = CONST_VECTOR_NUNITS (x);
4928
4929 /* Build a tree with vector elements. */
4930 for (i = units - 1; i >= 0; --i)
4931 {
4932 elt = CONST_VECTOR_ELT (x, i);
4933 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4934 }
4935
4936 return build_vector (type, t);
4937 }
4938
4939 case PLUS:
4940 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4941 make_tree (type, XEXP (x, 1)));
4942
4943 case MINUS:
4944 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4945 make_tree (type, XEXP (x, 1)));
4946
4947 case NEG:
4948 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4949
4950 case MULT:
4951 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4952 make_tree (type, XEXP (x, 1)));
4953
4954 case ASHIFT:
4955 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4956 make_tree (type, XEXP (x, 1)));
4957
4958 case LSHIFTRT:
4959 t = lang_hooks.types.unsigned_type (type);
4960 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4961 make_tree (t, XEXP (x, 0)),
4962 make_tree (type, XEXP (x, 1))));
4963
4964 case ASHIFTRT:
4965 t = lang_hooks.types.signed_type (type);
4966 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4967 make_tree (t, XEXP (x, 0)),
4968 make_tree (type, XEXP (x, 1))));
4969
4970 case DIV:
4971 if (TREE_CODE (type) != REAL_TYPE)
4972 t = lang_hooks.types.signed_type (type);
4973 else
4974 t = type;
4975
4976 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4977 make_tree (t, XEXP (x, 0)),
4978 make_tree (t, XEXP (x, 1))));
4979 case UDIV:
4980 t = lang_hooks.types.unsigned_type (type);
4981 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4982 make_tree (t, XEXP (x, 0)),
4983 make_tree (t, XEXP (x, 1))));
4984
4985 case SIGN_EXTEND:
4986 case ZERO_EXTEND:
4987 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4988 GET_CODE (x) == ZERO_EXTEND);
4989 return fold_convert (type, make_tree (t, XEXP (x, 0)));
4990
4991 default:
4992 t = build_decl (VAR_DECL, NULL_TREE, type);
4993
4994 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4995 ptr_mode. So convert. */
4996 if (POINTER_TYPE_P (type))
4997 x = convert_memory_address (TYPE_MODE (type), x);
4998
4999 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5000 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5001 t->decl_with_rtl.rtl = x;
5002
5003 return t;
5004 }
5005 }
5006
5007 /* Check whether the multiplication X * MULT + ADD overflows.
5008 X, MULT and ADD must be CONST_*.
5009 MODE is the machine mode for the computation.
5010 X and MULT must have mode MODE. ADD may have a different mode.
5011 So can X (defaults to same as MODE).
5012 UNSIGNEDP is nonzero to do unsigned multiplication. */
5013
5014 bool
5015 const_mult_add_overflow_p (rtx x, rtx mult, rtx add,
5016 enum machine_mode mode, int unsignedp)
5017 {
5018 tree type, mult_type, add_type, result;
5019
5020 type = lang_hooks.types.type_for_mode (mode, unsignedp);
5021
5022 /* In order to get a proper overflow indication from an unsigned
5023 type, we have to pretend that it's a sizetype. */
5024 mult_type = type;
5025 if (unsignedp)
5026 {
5027 /* FIXME:It would be nice if we could step directly from this
5028 type to its sizetype equivalent. */
5029 mult_type = build_distinct_type_copy (type);
5030 TYPE_IS_SIZETYPE (mult_type) = 1;
5031 }
5032
5033 add_type = (GET_MODE (add) == VOIDmode ? mult_type
5034 : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp));
5035
5036 result = fold_build2 (PLUS_EXPR, mult_type,
5037 fold_build2 (MULT_EXPR, mult_type,
5038 make_tree (mult_type, x),
5039 make_tree (mult_type, mult)),
5040 make_tree (add_type, add));
5041
5042 return TREE_CONSTANT_OVERFLOW (result);
5043 }
5044
5045 /* Return an rtx representing the value of X * MULT + ADD.
5046 TARGET is a suggestion for where to store the result (an rtx).
5047 MODE is the machine mode for the computation.
5048 X and MULT must have mode MODE. ADD may have a different mode.
5049 So can X (defaults to same as MODE).
5050 UNSIGNEDP is nonzero to do unsigned multiplication.
5051 This may emit insns. */
5052
5053 rtx
5054 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
5055 int unsignedp)
5056 {
5057 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
5058 tree add_type = (GET_MODE (add) == VOIDmode
5059 ? type: lang_hooks.types.type_for_mode (GET_MODE (add),
5060 unsignedp));
5061 tree result = fold_build2 (PLUS_EXPR, type,
5062 fold_build2 (MULT_EXPR, type,
5063 make_tree (type, x),
5064 make_tree (type, mult)),
5065 make_tree (add_type, add));
5066
5067 return expand_expr (result, target, VOIDmode, 0);
5068 }
5069 \f
5070 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5071 and returning TARGET.
5072
5073 If TARGET is 0, a pseudo-register or constant is returned. */
5074
5075 rtx
5076 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5077 {
5078 rtx tem = 0;
5079
5080 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5081 tem = simplify_binary_operation (AND, mode, op0, op1);
5082 if (tem == 0)
5083 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5084
5085 if (target == 0)
5086 target = tem;
5087 else if (tem != target)
5088 emit_move_insn (target, tem);
5089 return target;
5090 }
5091 \f
5092 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5093 and storing in TARGET. Normally return TARGET.
5094 Return 0 if that cannot be done.
5095
5096 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5097 it is VOIDmode, they cannot both be CONST_INT.
5098
5099 UNSIGNEDP is for the case where we have to widen the operands
5100 to perform the operation. It says to use zero-extension.
5101
5102 NORMALIZEP is 1 if we should convert the result to be either zero
5103 or one. Normalize is -1 if we should convert the result to be
5104 either zero or -1. If NORMALIZEP is zero, the result will be left
5105 "raw" out of the scc insn. */
5106
5107 rtx
5108 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5109 enum machine_mode mode, int unsignedp, int normalizep)
5110 {
5111 rtx subtarget;
5112 enum insn_code icode;
5113 enum machine_mode compare_mode;
5114 enum machine_mode target_mode = GET_MODE (target);
5115 rtx tem;
5116 rtx last = get_last_insn ();
5117 rtx pattern, comparison;
5118
5119 if (unsignedp)
5120 code = unsigned_condition (code);
5121
5122 /* If one operand is constant, make it the second one. Only do this
5123 if the other operand is not constant as well. */
5124
5125 if (swap_commutative_operands_p (op0, op1))
5126 {
5127 tem = op0;
5128 op0 = op1;
5129 op1 = tem;
5130 code = swap_condition (code);
5131 }
5132
5133 if (mode == VOIDmode)
5134 mode = GET_MODE (op0);
5135
5136 /* For some comparisons with 1 and -1, we can convert this to
5137 comparisons with zero. This will often produce more opportunities for
5138 store-flag insns. */
5139
5140 switch (code)
5141 {
5142 case LT:
5143 if (op1 == const1_rtx)
5144 op1 = const0_rtx, code = LE;
5145 break;
5146 case LE:
5147 if (op1 == constm1_rtx)
5148 op1 = const0_rtx, code = LT;
5149 break;
5150 case GE:
5151 if (op1 == const1_rtx)
5152 op1 = const0_rtx, code = GT;
5153 break;
5154 case GT:
5155 if (op1 == constm1_rtx)
5156 op1 = const0_rtx, code = GE;
5157 break;
5158 case GEU:
5159 if (op1 == const1_rtx)
5160 op1 = const0_rtx, code = NE;
5161 break;
5162 case LTU:
5163 if (op1 == const1_rtx)
5164 op1 = const0_rtx, code = EQ;
5165 break;
5166 default:
5167 break;
5168 }
5169
5170 /* If we are comparing a double-word integer with zero or -1, we can
5171 convert the comparison into one involving a single word. */
5172 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5173 && GET_MODE_CLASS (mode) == MODE_INT
5174 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5175 {
5176 if ((code == EQ || code == NE)
5177 && (op1 == const0_rtx || op1 == constm1_rtx))
5178 {
5179 rtx op00, op01, op0both;
5180
5181 /* Do a logical OR or AND of the two words and compare the result. */
5182 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5183 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5184 op0both = expand_binop (word_mode,
5185 op1 == const0_rtx ? ior_optab : and_optab,
5186 op00, op01, NULL_RTX, unsignedp, OPTAB_DIRECT);
5187
5188 if (op0both != 0)
5189 return emit_store_flag (target, code, op0both, op1, word_mode,
5190 unsignedp, normalizep);
5191 }
5192 else if ((code == LT || code == GE) && op1 == const0_rtx)
5193 {
5194 rtx op0h;
5195
5196 /* If testing the sign bit, can just test on high word. */
5197 op0h = simplify_gen_subreg (word_mode, op0, mode,
5198 subreg_highpart_offset (word_mode, mode));
5199 return emit_store_flag (target, code, op0h, op1, word_mode,
5200 unsignedp, normalizep);
5201 }
5202 }
5203
5204 /* From now on, we won't change CODE, so set ICODE now. */
5205 icode = setcc_gen_code[(int) code];
5206
5207 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5208 complement of A (for GE) and shifting the sign bit to the low bit. */
5209 if (op1 == const0_rtx && (code == LT || code == GE)
5210 && GET_MODE_CLASS (mode) == MODE_INT
5211 && (normalizep || STORE_FLAG_VALUE == 1
5212 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5213 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5214 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
5215 {
5216 subtarget = target;
5217
5218 /* If the result is to be wider than OP0, it is best to convert it
5219 first. If it is to be narrower, it is *incorrect* to convert it
5220 first. */
5221 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5222 {
5223 op0 = convert_modes (target_mode, mode, op0, 0);
5224 mode = target_mode;
5225 }
5226
5227 if (target_mode != mode)
5228 subtarget = 0;
5229
5230 if (code == GE)
5231 op0 = expand_unop (mode, one_cmpl_optab, op0,
5232 ((STORE_FLAG_VALUE == 1 || normalizep)
5233 ? 0 : subtarget), 0);
5234
5235 if (STORE_FLAG_VALUE == 1 || normalizep)
5236 /* If we are supposed to produce a 0/1 value, we want to do
5237 a logical shift from the sign bit to the low-order bit; for
5238 a -1/0 value, we do an arithmetic shift. */
5239 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5240 size_int (GET_MODE_BITSIZE (mode) - 1),
5241 subtarget, normalizep != -1);
5242
5243 if (mode != target_mode)
5244 op0 = convert_modes (target_mode, mode, op0, 0);
5245
5246 return op0;
5247 }
5248
5249 if (icode != CODE_FOR_nothing)
5250 {
5251 insn_operand_predicate_fn pred;
5252
5253 /* We think we may be able to do this with a scc insn. Emit the
5254 comparison and then the scc insn. */
5255
5256 do_pending_stack_adjust ();
5257 last = get_last_insn ();
5258
5259 comparison
5260 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5261 if (CONSTANT_P (comparison))
5262 {
5263 switch (GET_CODE (comparison))
5264 {
5265 case CONST_INT:
5266 if (comparison == const0_rtx)
5267 return const0_rtx;
5268 break;
5269
5270 #ifdef FLOAT_STORE_FLAG_VALUE
5271 case CONST_DOUBLE:
5272 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5273 return const0_rtx;
5274 break;
5275 #endif
5276 default:
5277 gcc_unreachable ();
5278 }
5279
5280 if (normalizep == 1)
5281 return const1_rtx;
5282 if (normalizep == -1)
5283 return constm1_rtx;
5284 return const_true_rtx;
5285 }
5286
5287 /* The code of COMPARISON may not match CODE if compare_from_rtx
5288 decided to swap its operands and reverse the original code.
5289
5290 We know that compare_from_rtx returns either a CONST_INT or
5291 a new comparison code, so it is safe to just extract the
5292 code from COMPARISON. */
5293 code = GET_CODE (comparison);
5294
5295 /* Get a reference to the target in the proper mode for this insn. */
5296 compare_mode = insn_data[(int) icode].operand[0].mode;
5297 subtarget = target;
5298 pred = insn_data[(int) icode].operand[0].predicate;
5299 if (optimize || ! (*pred) (subtarget, compare_mode))
5300 subtarget = gen_reg_rtx (compare_mode);
5301
5302 pattern = GEN_FCN (icode) (subtarget);
5303 if (pattern)
5304 {
5305 emit_insn (pattern);
5306
5307 /* If we are converting to a wider mode, first convert to
5308 TARGET_MODE, then normalize. This produces better combining
5309 opportunities on machines that have a SIGN_EXTRACT when we are
5310 testing a single bit. This mostly benefits the 68k.
5311
5312 If STORE_FLAG_VALUE does not have the sign bit set when
5313 interpreted in COMPARE_MODE, we can do this conversion as
5314 unsigned, which is usually more efficient. */
5315 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
5316 {
5317 convert_move (target, subtarget,
5318 (GET_MODE_BITSIZE (compare_mode)
5319 <= HOST_BITS_PER_WIDE_INT)
5320 && 0 == (STORE_FLAG_VALUE
5321 & ((HOST_WIDE_INT) 1
5322 << (GET_MODE_BITSIZE (compare_mode) -1))));
5323 op0 = target;
5324 compare_mode = target_mode;
5325 }
5326 else
5327 op0 = subtarget;
5328
5329 /* If we want to keep subexpressions around, don't reuse our
5330 last target. */
5331
5332 if (optimize)
5333 subtarget = 0;
5334
5335 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
5336 we don't have to do anything. */
5337 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5338 ;
5339 /* STORE_FLAG_VALUE might be the most negative number, so write
5340 the comparison this way to avoid a compiler-time warning. */
5341 else if (- normalizep == STORE_FLAG_VALUE)
5342 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
5343
5344 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
5345 makes it hard to use a value of just the sign bit due to
5346 ANSI integer constant typing rules. */
5347 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
5348 && (STORE_FLAG_VALUE
5349 & ((HOST_WIDE_INT) 1
5350 << (GET_MODE_BITSIZE (compare_mode) - 1))))
5351 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
5352 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
5353 subtarget, normalizep == 1);
5354 else
5355 {
5356 gcc_assert (STORE_FLAG_VALUE & 1);
5357
5358 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
5359 if (normalizep == -1)
5360 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
5361 }
5362
5363 /* If we were converting to a smaller mode, do the
5364 conversion now. */
5365 if (target_mode != compare_mode)
5366 {
5367 convert_move (target, op0, 0);
5368 return target;
5369 }
5370 else
5371 return op0;
5372 }
5373 }
5374
5375 delete_insns_since (last);
5376
5377 /* If optimizing, use different pseudo registers for each insn, instead
5378 of reusing the same pseudo. This leads to better CSE, but slows
5379 down the compiler, since there are more pseudos */
5380 subtarget = (!optimize
5381 && (target_mode == mode)) ? target : NULL_RTX;
5382
5383 /* If we reached here, we can't do this with a scc insn. However, there
5384 are some comparisons that can be done directly. For example, if
5385 this is an equality comparison of integers, we can try to exclusive-or
5386 (or subtract) the two operands and use a recursive call to try the
5387 comparison with zero. Don't do any of these cases if branches are
5388 very cheap. */
5389
5390 if (BRANCH_COST > 0
5391 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5392 && op1 != const0_rtx)
5393 {
5394 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5395 OPTAB_WIDEN);
5396
5397 if (tem == 0)
5398 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5399 OPTAB_WIDEN);
5400 if (tem != 0)
5401 tem = emit_store_flag (target, code, tem, const0_rtx,
5402 mode, unsignedp, normalizep);
5403 if (tem == 0)
5404 delete_insns_since (last);
5405 return tem;
5406 }
5407
5408 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5409 the constant zero. Reject all other comparisons at this point. Only
5410 do LE and GT if branches are expensive since they are expensive on
5411 2-operand machines. */
5412
5413 if (BRANCH_COST == 0
5414 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5415 || (code != EQ && code != NE
5416 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
5417 return 0;
5418
5419 /* See what we need to return. We can only return a 1, -1, or the
5420 sign bit. */
5421
5422 if (normalizep == 0)
5423 {
5424 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5425 normalizep = STORE_FLAG_VALUE;
5426
5427 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5428 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5429 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5430 ;
5431 else
5432 return 0;
5433 }
5434
5435 /* Try to put the result of the comparison in the sign bit. Assume we can't
5436 do the necessary operation below. */
5437
5438 tem = 0;
5439
5440 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5441 the sign bit set. */
5442
5443 if (code == LE)
5444 {
5445 /* This is destructive, so SUBTARGET can't be OP0. */
5446 if (rtx_equal_p (subtarget, op0))
5447 subtarget = 0;
5448
5449 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5450 OPTAB_WIDEN);
5451 if (tem)
5452 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5453 OPTAB_WIDEN);
5454 }
5455
5456 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5457 number of bits in the mode of OP0, minus one. */
5458
5459 if (code == GT)
5460 {
5461 if (rtx_equal_p (subtarget, op0))
5462 subtarget = 0;
5463
5464 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5465 size_int (GET_MODE_BITSIZE (mode) - 1),
5466 subtarget, 0);
5467 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5468 OPTAB_WIDEN);
5469 }
5470
5471 if (code == EQ || code == NE)
5472 {
5473 /* For EQ or NE, one way to do the comparison is to apply an operation
5474 that converts the operand into a positive number if it is nonzero
5475 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5476 for NE we negate. This puts the result in the sign bit. Then we
5477 normalize with a shift, if needed.
5478
5479 Two operations that can do the above actions are ABS and FFS, so try
5480 them. If that doesn't work, and MODE is smaller than a full word,
5481 we can use zero-extension to the wider mode (an unsigned conversion)
5482 as the operation. */
5483
5484 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5485 that is compensated by the subsequent overflow when subtracting
5486 one / negating. */
5487
5488 if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5489 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5490 else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5491 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5492 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5493 {
5494 tem = convert_modes (word_mode, mode, op0, 1);
5495 mode = word_mode;
5496 }
5497
5498 if (tem != 0)
5499 {
5500 if (code == EQ)
5501 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5502 0, OPTAB_WIDEN);
5503 else
5504 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5505 }
5506
5507 /* If we couldn't do it that way, for NE we can "or" the two's complement
5508 of the value with itself. For EQ, we take the one's complement of
5509 that "or", which is an extra insn, so we only handle EQ if branches
5510 are expensive. */
5511
5512 if (tem == 0 && (code == NE || BRANCH_COST > 1))
5513 {
5514 if (rtx_equal_p (subtarget, op0))
5515 subtarget = 0;
5516
5517 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5518 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5519 OPTAB_WIDEN);
5520
5521 if (tem && code == EQ)
5522 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5523 }
5524 }
5525
5526 if (tem && normalizep)
5527 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5528 size_int (GET_MODE_BITSIZE (mode) - 1),
5529 subtarget, normalizep == 1);
5530
5531 if (tem)
5532 {
5533 if (GET_MODE (tem) != target_mode)
5534 {
5535 convert_move (target, tem, 0);
5536 tem = target;
5537 }
5538 else if (!subtarget)
5539 {
5540 emit_move_insn (target, tem);
5541 tem = target;
5542 }
5543 }
5544 else
5545 delete_insns_since (last);
5546
5547 return tem;
5548 }
5549
5550 /* Like emit_store_flag, but always succeeds. */
5551
5552 rtx
5553 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5554 enum machine_mode mode, int unsignedp, int normalizep)
5555 {
5556 rtx tem, label;
5557
5558 /* First see if emit_store_flag can do the job. */
5559 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5560 if (tem != 0)
5561 return tem;
5562
5563 if (normalizep == 0)
5564 normalizep = 1;
5565
5566 /* If this failed, we have to do this with set/compare/jump/set code. */
5567
5568 if (!REG_P (target)
5569 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5570 target = gen_reg_rtx (GET_MODE (target));
5571
5572 emit_move_insn (target, const1_rtx);
5573 label = gen_label_rtx ();
5574 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5575 NULL_RTX, label);
5576
5577 emit_move_insn (target, const0_rtx);
5578 emit_label (label);
5579
5580 return target;
5581 }
5582 \f
5583 /* Perform possibly multi-word comparison and conditional jump to LABEL
5584 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
5585
5586 The algorithm is based on the code in expr.c:do_jump.
5587
5588 Note that this does not perform a general comparison. Only
5589 variants generated within expmed.c are correctly handled, others
5590 could be handled if needed. */
5591
5592 static void
5593 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5594 rtx label)
5595 {
5596 /* If this mode is an integer too wide to compare properly,
5597 compare word by word. Rely on cse to optimize constant cases. */
5598
5599 if (GET_MODE_CLASS (mode) == MODE_INT
5600 && ! can_compare_p (op, mode, ccp_jump))
5601 {
5602 rtx label2 = gen_label_rtx ();
5603
5604 switch (op)
5605 {
5606 case LTU:
5607 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
5608 break;
5609
5610 case LEU:
5611 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
5612 break;
5613
5614 case LT:
5615 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
5616 break;
5617
5618 case GT:
5619 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
5620 break;
5621
5622 case GE:
5623 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
5624 break;
5625
5626 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
5627 that's the only equality operations we do */
5628 case EQ:
5629 gcc_assert (arg2 == const0_rtx && mode == GET_MODE(arg1));
5630 do_jump_by_parts_equality_rtx (arg1, label2, label);
5631 break;
5632
5633 case NE:
5634 gcc_assert (arg2 == const0_rtx && mode == GET_MODE(arg1));
5635 do_jump_by_parts_equality_rtx (arg1, label, label2);
5636 break;
5637
5638 default:
5639 gcc_unreachable ();
5640 }
5641
5642 emit_label (label2);
5643 }
5644 else
5645 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);
5646 }