re PR target/19690 (ICE with -O3 -march=athlon-xp -mfpmath=sse -mno-80387)
[gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
39
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
54 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
55 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
56
57 /* Test whether a value is zero of a power of two. */
58 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
59
60 /* Nonzero means divides or modulus operations are relatively cheap for
61 powers of two, so don't use branches; emit the operation instead.
62 Usually, this will mean that the MD file will emit non-branch
63 sequences. */
64
65 static bool sdiv_pow2_cheap[NUM_MACHINE_MODES];
66 static bool smod_pow2_cheap[NUM_MACHINE_MODES];
67
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 #endif
71
72 /* For compilers that support multiple targets with different word sizes,
73 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
74 is the H8/300(H) compiler. */
75
76 #ifndef MAX_BITS_PER_WORD
77 #define MAX_BITS_PER_WORD BITS_PER_WORD
78 #endif
79
80 /* Reduce conditional compilation elsewhere. */
81 #ifndef HAVE_insv
82 #define HAVE_insv 0
83 #define CODE_FOR_insv CODE_FOR_nothing
84 #define gen_insv(a,b,c,d) NULL_RTX
85 #endif
86 #ifndef HAVE_extv
87 #define HAVE_extv 0
88 #define CODE_FOR_extv CODE_FOR_nothing
89 #define gen_extv(a,b,c,d) NULL_RTX
90 #endif
91 #ifndef HAVE_extzv
92 #define HAVE_extzv 0
93 #define CODE_FOR_extzv CODE_FOR_nothing
94 #define gen_extzv(a,b,c,d) NULL_RTX
95 #endif
96
97 /* Cost of various pieces of RTL. Note that some of these are indexed by
98 shift count and some by mode. */
99 static int zero_cost;
100 static int add_cost[NUM_MACHINE_MODES];
101 static int neg_cost[NUM_MACHINE_MODES];
102 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
103 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
104 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int mul_cost[NUM_MACHINE_MODES];
106 static int div_cost[NUM_MACHINE_MODES];
107 static int mul_widen_cost[NUM_MACHINE_MODES];
108 static int mul_highpart_cost[NUM_MACHINE_MODES];
109
110 void
111 init_expmed (void)
112 {
113 struct
114 {
115 struct rtx_def reg; rtunion reg_fld[2];
116 struct rtx_def plus; rtunion plus_fld1;
117 struct rtx_def neg;
118 struct rtx_def udiv; rtunion udiv_fld1;
119 struct rtx_def mult; rtunion mult_fld1;
120 struct rtx_def div; rtunion div_fld1;
121 struct rtx_def mod; rtunion mod_fld1;
122 struct rtx_def zext;
123 struct rtx_def wide_mult; rtunion wide_mult_fld1;
124 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
125 struct rtx_def wide_trunc;
126 struct rtx_def shift; rtunion shift_fld1;
127 struct rtx_def shift_mult; rtunion shift_mult_fld1;
128 struct rtx_def shift_add; rtunion shift_add_fld1;
129 struct rtx_def shift_sub; rtunion shift_sub_fld1;
130 } all;
131
132 rtx pow2[MAX_BITS_PER_WORD];
133 rtx cint[MAX_BITS_PER_WORD];
134 int m, n;
135 enum machine_mode mode, wider_mode;
136
137 zero_cost = rtx_cost (const0_rtx, 0);
138
139 for (m = 1; m < MAX_BITS_PER_WORD; m++)
140 {
141 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
142 cint[m] = GEN_INT (m);
143 }
144
145 memset (&all, 0, sizeof all);
146
147 PUT_CODE (&all.reg, REG);
148 REGNO (&all.reg) = 10000;
149
150 PUT_CODE (&all.plus, PLUS);
151 XEXP (&all.plus, 0) = &all.reg;
152 XEXP (&all.plus, 1) = &all.reg;
153
154 PUT_CODE (&all.neg, NEG);
155 XEXP (&all.neg, 0) = &all.reg;
156
157 PUT_CODE (&all.udiv, UDIV);
158 XEXP (&all.udiv, 0) = &all.reg;
159 XEXP (&all.udiv, 1) = &all.reg;
160
161 PUT_CODE (&all.mult, MULT);
162 XEXP (&all.mult, 0) = &all.reg;
163 XEXP (&all.mult, 1) = &all.reg;
164
165 PUT_CODE (&all.div, DIV);
166 XEXP (&all.div, 0) = &all.reg;
167 XEXP (&all.div, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
168
169 PUT_CODE (&all.mod, MOD);
170 XEXP (&all.mod, 0) = &all.reg;
171 XEXP (&all.mod, 1) = XEXP (&all.div, 1);
172
173 PUT_CODE (&all.zext, ZERO_EXTEND);
174 XEXP (&all.zext, 0) = &all.reg;
175
176 PUT_CODE (&all.wide_mult, MULT);
177 XEXP (&all.wide_mult, 0) = &all.zext;
178 XEXP (&all.wide_mult, 1) = &all.zext;
179
180 PUT_CODE (&all.wide_lshr, LSHIFTRT);
181 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
182
183 PUT_CODE (&all.wide_trunc, TRUNCATE);
184 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
185
186 PUT_CODE (&all.shift, ASHIFT);
187 XEXP (&all.shift, 0) = &all.reg;
188
189 PUT_CODE (&all.shift_mult, MULT);
190 XEXP (&all.shift_mult, 0) = &all.reg;
191
192 PUT_CODE (&all.shift_add, PLUS);
193 XEXP (&all.shift_add, 0) = &all.shift_mult;
194 XEXP (&all.shift_add, 1) = &all.reg;
195
196 PUT_CODE (&all.shift_sub, MINUS);
197 XEXP (&all.shift_sub, 0) = &all.shift_mult;
198 XEXP (&all.shift_sub, 1) = &all.reg;
199
200 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
201 mode != VOIDmode;
202 mode = GET_MODE_WIDER_MODE (mode))
203 {
204 PUT_MODE (&all.reg, mode);
205 PUT_MODE (&all.plus, mode);
206 PUT_MODE (&all.neg, mode);
207 PUT_MODE (&all.udiv, mode);
208 PUT_MODE (&all.mult, mode);
209 PUT_MODE (&all.div, mode);
210 PUT_MODE (&all.mod, mode);
211 PUT_MODE (&all.wide_trunc, mode);
212 PUT_MODE (&all.shift, mode);
213 PUT_MODE (&all.shift_mult, mode);
214 PUT_MODE (&all.shift_add, mode);
215 PUT_MODE (&all.shift_sub, mode);
216
217 add_cost[mode] = rtx_cost (&all.plus, SET);
218 neg_cost[mode] = rtx_cost (&all.neg, SET);
219 div_cost[mode] = rtx_cost (&all.udiv, SET);
220 mul_cost[mode] = rtx_cost (&all.mult, SET);
221
222 sdiv_pow2_cheap[mode] = (rtx_cost (&all.div, SET) <= 2 * add_cost[mode]);
223 smod_pow2_cheap[mode] = (rtx_cost (&all.mod, SET) <= 4 * add_cost[mode]);
224
225 wider_mode = GET_MODE_WIDER_MODE (mode);
226 if (wider_mode != VOIDmode)
227 {
228 PUT_MODE (&all.zext, wider_mode);
229 PUT_MODE (&all.wide_mult, wider_mode);
230 PUT_MODE (&all.wide_lshr, wider_mode);
231 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
232
233 mul_widen_cost[wider_mode] = rtx_cost (&all.wide_mult, SET);
234 mul_highpart_cost[mode] = rtx_cost (&all.wide_trunc, SET);
235 }
236
237 shift_cost[mode][0] = 0;
238 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
239
240 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
241 for (m = 1; m < n; m++)
242 {
243 XEXP (&all.shift, 1) = cint[m];
244 XEXP (&all.shift_mult, 1) = pow2[m];
245
246 shift_cost[mode][m] = rtx_cost (&all.shift, SET);
247 shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET);
248 shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET);
249 }
250 }
251 }
252
253 /* Return an rtx representing minus the value of X.
254 MODE is the intended mode of the result,
255 useful if X is a CONST_INT. */
256
257 rtx
258 negate_rtx (enum machine_mode mode, rtx x)
259 {
260 rtx result = simplify_unary_operation (NEG, mode, x, mode);
261
262 if (result == 0)
263 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
264
265 return result;
266 }
267
268 /* Report on the availability of insv/extv/extzv and the desired mode
269 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
270 is false; else the mode of the specified operand. If OPNO is -1,
271 all the caller cares about is whether the insn is available. */
272 enum machine_mode
273 mode_for_extraction (enum extraction_pattern pattern, int opno)
274 {
275 const struct insn_data *data;
276
277 switch (pattern)
278 {
279 case EP_insv:
280 if (HAVE_insv)
281 {
282 data = &insn_data[CODE_FOR_insv];
283 break;
284 }
285 return MAX_MACHINE_MODE;
286
287 case EP_extv:
288 if (HAVE_extv)
289 {
290 data = &insn_data[CODE_FOR_extv];
291 break;
292 }
293 return MAX_MACHINE_MODE;
294
295 case EP_extzv:
296 if (HAVE_extzv)
297 {
298 data = &insn_data[CODE_FOR_extzv];
299 break;
300 }
301 return MAX_MACHINE_MODE;
302
303 default:
304 gcc_unreachable ();
305 }
306
307 if (opno == -1)
308 return VOIDmode;
309
310 /* Everyone who uses this function used to follow it with
311 if (result == VOIDmode) result = word_mode; */
312 if (data->operand[opno].mode == VOIDmode)
313 return word_mode;
314 return data->operand[opno].mode;
315 }
316
317 \f
318 /* Generate code to store value from rtx VALUE
319 into a bit-field within structure STR_RTX
320 containing BITSIZE bits starting at bit BITNUM.
321 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
322 ALIGN is the alignment that STR_RTX is known to have.
323 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
324
325 /* ??? Note that there are two different ideas here for how
326 to determine the size to count bits within, for a register.
327 One is BITS_PER_WORD, and the other is the size of operand 3
328 of the insv pattern.
329
330 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
331 else, we use the mode of operand 3. */
332
333 rtx
334 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
335 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
336 rtx value)
337 {
338 unsigned int unit
339 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
340 unsigned HOST_WIDE_INT offset = bitnum / unit;
341 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
342 rtx op0 = str_rtx;
343 int byte_offset;
344 rtx orig_value;
345
346 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
347
348 while (GET_CODE (op0) == SUBREG)
349 {
350 /* The following line once was done only if WORDS_BIG_ENDIAN,
351 but I think that is a mistake. WORDS_BIG_ENDIAN is
352 meaningful at a much higher level; when structures are copied
353 between memory and regs, the higher-numbered regs
354 always get higher addresses. */
355 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
356 /* We used to adjust BITPOS here, but now we do the whole adjustment
357 right after the loop. */
358 op0 = SUBREG_REG (op0);
359 }
360
361 /* Use vec_set patterns for inserting parts of vectors whenever
362 available. */
363 if (VECTOR_MODE_P (GET_MODE (op0))
364 && !MEM_P (op0)
365 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
366 != CODE_FOR_nothing)
367 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
368 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
369 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
370 {
371 enum machine_mode outermode = GET_MODE (op0);
372 enum machine_mode innermode = GET_MODE_INNER (outermode);
373 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
374 int pos = bitnum / GET_MODE_BITSIZE (innermode);
375 rtx rtxpos = GEN_INT (pos);
376 rtx src = value;
377 rtx dest = op0;
378 rtx pat, seq;
379 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
380 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
381 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
382
383 start_sequence ();
384
385 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
386 src = copy_to_mode_reg (mode1, src);
387
388 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
389 rtxpos = copy_to_mode_reg (mode1, rtxpos);
390
391 /* We could handle this, but we should always be called with a pseudo
392 for our targets and all insns should take them as outputs. */
393 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
394 && (*insn_data[icode].operand[1].predicate) (src, mode1)
395 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
396 pat = GEN_FCN (icode) (dest, src, rtxpos);
397 seq = get_insns ();
398 end_sequence ();
399 if (pat)
400 {
401 emit_insn (seq);
402 emit_insn (pat);
403 return dest;
404 }
405 }
406
407 if (flag_force_mem)
408 {
409 int old_generating_concat_p = generating_concat_p;
410 generating_concat_p = 0;
411 value = force_not_mem (value);
412 generating_concat_p = old_generating_concat_p;
413 }
414
415 /* If the target is a register, overwriting the entire object, or storing
416 a full-word or multi-word field can be done with just a SUBREG.
417
418 If the target is memory, storing any naturally aligned field can be
419 done with a simple store. For targets that support fast unaligned
420 memory, any naturally sized, unit aligned field can be done directly. */
421
422 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
423 + (offset * UNITS_PER_WORD);
424
425 if (bitpos == 0
426 && bitsize == GET_MODE_BITSIZE (fieldmode)
427 && (!MEM_P (op0)
428 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
429 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
430 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
431 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
432 || (offset * BITS_PER_UNIT % bitsize == 0
433 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
434 {
435 if (GET_MODE (op0) != fieldmode)
436 {
437 if (MEM_P (op0))
438 op0 = adjust_address (op0, fieldmode, offset);
439 else
440 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
441 byte_offset);
442 }
443 emit_move_insn (op0, value);
444 return value;
445 }
446
447 /* Make sure we are playing with integral modes. Pun with subregs
448 if we aren't. This must come after the entire register case above,
449 since that case is valid for any mode. The following cases are only
450 valid for integral modes. */
451 {
452 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
453 if (imode != GET_MODE (op0))
454 {
455 if (MEM_P (op0))
456 op0 = adjust_address (op0, imode, 0);
457 else
458 {
459 gcc_assert (imode != BLKmode);
460 op0 = gen_lowpart (imode, op0);
461 }
462 }
463 }
464
465 /* We may be accessing data outside the field, which means
466 we can alias adjacent data. */
467 if (MEM_P (op0))
468 {
469 op0 = shallow_copy_rtx (op0);
470 set_mem_alias_set (op0, 0);
471 set_mem_expr (op0, 0);
472 }
473
474 /* If OP0 is a register, BITPOS must count within a word.
475 But as we have it, it counts within whatever size OP0 now has.
476 On a bigendian machine, these are not the same, so convert. */
477 if (BYTES_BIG_ENDIAN
478 && !MEM_P (op0)
479 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
480 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
481
482 /* Storing an lsb-aligned field in a register
483 can be done with a movestrict instruction. */
484
485 if (!MEM_P (op0)
486 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
487 && bitsize == GET_MODE_BITSIZE (fieldmode)
488 && (movstrict_optab->handlers[fieldmode].insn_code
489 != CODE_FOR_nothing))
490 {
491 int icode = movstrict_optab->handlers[fieldmode].insn_code;
492
493 /* Get appropriate low part of the value being stored. */
494 if (GET_CODE (value) == CONST_INT || REG_P (value))
495 value = gen_lowpart (fieldmode, value);
496 else if (!(GET_CODE (value) == SYMBOL_REF
497 || GET_CODE (value) == LABEL_REF
498 || GET_CODE (value) == CONST))
499 value = convert_to_mode (fieldmode, value, 0);
500
501 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
502 value = copy_to_mode_reg (fieldmode, value);
503
504 if (GET_CODE (op0) == SUBREG)
505 {
506 /* Else we've got some float mode source being extracted into
507 a different float mode destination -- this combination of
508 subregs results in Severe Tire Damage. */
509 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
510 || GET_MODE_CLASS (fieldmode) == MODE_INT
511 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
512 op0 = SUBREG_REG (op0);
513 }
514
515 emit_insn (GEN_FCN (icode)
516 (gen_rtx_SUBREG (fieldmode, op0,
517 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
518 + (offset * UNITS_PER_WORD)),
519 value));
520
521 return value;
522 }
523
524 /* Handle fields bigger than a word. */
525
526 if (bitsize > BITS_PER_WORD)
527 {
528 /* Here we transfer the words of the field
529 in the order least significant first.
530 This is because the most significant word is the one which may
531 be less than full.
532 However, only do that if the value is not BLKmode. */
533
534 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
535 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
536 unsigned int i;
537
538 /* This is the mode we must force value to, so that there will be enough
539 subwords to extract. Note that fieldmode will often (always?) be
540 VOIDmode, because that is what store_field uses to indicate that this
541 is a bit field, but passing VOIDmode to operand_subword_force will
542 result in an abort. */
543 fieldmode = GET_MODE (value);
544 if (fieldmode == VOIDmode)
545 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
546
547 for (i = 0; i < nwords; i++)
548 {
549 /* If I is 0, use the low-order word in both field and target;
550 if I is 1, use the next to lowest word; and so on. */
551 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
552 unsigned int bit_offset = (backwards
553 ? MAX ((int) bitsize - ((int) i + 1)
554 * BITS_PER_WORD,
555 0)
556 : (int) i * BITS_PER_WORD);
557
558 store_bit_field (op0, MIN (BITS_PER_WORD,
559 bitsize - i * BITS_PER_WORD),
560 bitnum + bit_offset, word_mode,
561 operand_subword_force (value, wordnum, fieldmode));
562 }
563 return value;
564 }
565
566 /* From here on we can assume that the field to be stored in is
567 a full-word (whatever type that is), since it is shorter than a word. */
568
569 /* OFFSET is the number of words or bytes (UNIT says which)
570 from STR_RTX to the first word or byte containing part of the field. */
571
572 if (!MEM_P (op0))
573 {
574 if (offset != 0
575 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
576 {
577 if (!REG_P (op0))
578 {
579 /* Since this is a destination (lvalue), we can't copy it to a
580 pseudo. We can trivially remove a SUBREG that does not
581 change the size of the operand. Such a SUBREG may have been
582 added above. Otherwise, abort. */
583 gcc_assert (GET_CODE (op0) == SUBREG
584 && (GET_MODE_SIZE (GET_MODE (op0))
585 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
586 op0 = SUBREG_REG (op0);
587 }
588 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
589 op0, (offset * UNITS_PER_WORD));
590 }
591 offset = 0;
592 }
593
594 /* If VALUE has a floating-point or complex mode, access it as an
595 integer of the corresponding size. This can occur on a machine
596 with 64 bit registers that uses SFmode for float. It can also
597 occur for unaligned float or complex fields. */
598 orig_value = value;
599 if (GET_MODE (value) != VOIDmode
600 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
601 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
602 {
603 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
604 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
605 }
606
607 /* Now OFFSET is nonzero only if OP0 is memory
608 and is therefore always measured in bytes. */
609
610 if (HAVE_insv
611 && GET_MODE (value) != BLKmode
612 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
613 /* Ensure insv's size is wide enough for this field. */
614 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
615 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
616 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
617 {
618 int xbitpos = bitpos;
619 rtx value1;
620 rtx xop0 = op0;
621 rtx last = get_last_insn ();
622 rtx pat;
623 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
624 int save_volatile_ok = volatile_ok;
625
626 volatile_ok = 1;
627
628 /* If this machine's insv can only insert into a register, copy OP0
629 into a register and save it back later. */
630 /* This used to check flag_force_mem, but that was a serious
631 de-optimization now that flag_force_mem is enabled by -O2. */
632 if (MEM_P (op0)
633 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
634 (op0, VOIDmode)))
635 {
636 rtx tempreg;
637 enum machine_mode bestmode;
638
639 /* Get the mode to use for inserting into this field. If OP0 is
640 BLKmode, get the smallest mode consistent with the alignment. If
641 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
642 mode. Otherwise, use the smallest mode containing the field. */
643
644 if (GET_MODE (op0) == BLKmode
645 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
646 bestmode
647 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
648 MEM_VOLATILE_P (op0));
649 else
650 bestmode = GET_MODE (op0);
651
652 if (bestmode == VOIDmode
653 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
654 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
655 goto insv_loses;
656
657 /* Adjust address to point to the containing unit of that mode.
658 Compute offset as multiple of this unit, counting in bytes. */
659 unit = GET_MODE_BITSIZE (bestmode);
660 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
661 bitpos = bitnum % unit;
662 op0 = adjust_address (op0, bestmode, offset);
663
664 /* Fetch that unit, store the bitfield in it, then store
665 the unit. */
666 tempreg = copy_to_reg (op0);
667 store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value);
668 emit_move_insn (op0, tempreg);
669 return value;
670 }
671 volatile_ok = save_volatile_ok;
672
673 /* Add OFFSET into OP0's address. */
674 if (MEM_P (xop0))
675 xop0 = adjust_address (xop0, byte_mode, offset);
676
677 /* If xop0 is a register, we need it in MAXMODE
678 to make it acceptable to the format of insv. */
679 if (GET_CODE (xop0) == SUBREG)
680 /* We can't just change the mode, because this might clobber op0,
681 and we will need the original value of op0 if insv fails. */
682 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
683 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
684 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
685
686 /* On big-endian machines, we count bits from the most significant.
687 If the bit field insn does not, we must invert. */
688
689 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
690 xbitpos = unit - bitsize - xbitpos;
691
692 /* We have been counting XBITPOS within UNIT.
693 Count instead within the size of the register. */
694 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
695 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
696
697 unit = GET_MODE_BITSIZE (maxmode);
698
699 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
700 value1 = value;
701 if (GET_MODE (value) != maxmode)
702 {
703 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
704 {
705 /* Optimization: Don't bother really extending VALUE
706 if it has all the bits we will actually use. However,
707 if we must narrow it, be sure we do it correctly. */
708
709 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
710 {
711 rtx tmp;
712
713 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
714 if (! tmp)
715 tmp = simplify_gen_subreg (maxmode,
716 force_reg (GET_MODE (value),
717 value1),
718 GET_MODE (value), 0);
719 value1 = tmp;
720 }
721 else
722 value1 = gen_lowpart (maxmode, value1);
723 }
724 else if (GET_CODE (value) == CONST_INT)
725 value1 = gen_int_mode (INTVAL (value), maxmode);
726 else
727 /* Parse phase is supposed to make VALUE's data type
728 match that of the component reference, which is a type
729 at least as wide as the field; so VALUE should have
730 a mode that corresponds to that type. */
731 gcc_assert (CONSTANT_P (value));
732 }
733
734 /* If this machine's insv insists on a register,
735 get VALUE1 into a register. */
736 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
737 (value1, maxmode)))
738 value1 = force_reg (maxmode, value1);
739
740 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
741 if (pat)
742 emit_insn (pat);
743 else
744 {
745 delete_insns_since (last);
746 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
747 }
748 }
749 else
750 insv_loses:
751 /* Insv is not available; store using shifts and boolean ops. */
752 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
753 return value;
754 }
755 \f
756 /* Use shifts and boolean operations to store VALUE
757 into a bit field of width BITSIZE
758 in a memory location specified by OP0 except offset by OFFSET bytes.
759 (OFFSET must be 0 if OP0 is a register.)
760 The field starts at position BITPOS within the byte.
761 (If OP0 is a register, it may be a full word or a narrower mode,
762 but BITPOS still counts within a full word,
763 which is significant on bigendian machines.) */
764
765 static void
766 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
767 unsigned HOST_WIDE_INT bitsize,
768 unsigned HOST_WIDE_INT bitpos, rtx value)
769 {
770 enum machine_mode mode;
771 unsigned int total_bits = BITS_PER_WORD;
772 rtx subtarget, temp;
773 int all_zero = 0;
774 int all_one = 0;
775
776 /* There is a case not handled here:
777 a structure with a known alignment of just a halfword
778 and a field split across two aligned halfwords within the structure.
779 Or likewise a structure with a known alignment of just a byte
780 and a field split across two bytes.
781 Such cases are not supposed to be able to occur. */
782
783 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
784 {
785 gcc_assert (!offset);
786 /* Special treatment for a bit field split across two registers. */
787 if (bitsize + bitpos > BITS_PER_WORD)
788 {
789 store_split_bit_field (op0, bitsize, bitpos, value);
790 return;
791 }
792 }
793 else
794 {
795 /* Get the proper mode to use for this field. We want a mode that
796 includes the entire field. If such a mode would be larger than
797 a word, we won't be doing the extraction the normal way.
798 We don't want a mode bigger than the destination. */
799
800 mode = GET_MODE (op0);
801 if (GET_MODE_BITSIZE (mode) == 0
802 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
803 mode = word_mode;
804 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
805 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
806
807 if (mode == VOIDmode)
808 {
809 /* The only way this should occur is if the field spans word
810 boundaries. */
811 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
812 value);
813 return;
814 }
815
816 total_bits = GET_MODE_BITSIZE (mode);
817
818 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
819 be in the range 0 to total_bits-1, and put any excess bytes in
820 OFFSET. */
821 if (bitpos >= total_bits)
822 {
823 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
824 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
825 * BITS_PER_UNIT);
826 }
827
828 /* Get ref to an aligned byte, halfword, or word containing the field.
829 Adjust BITPOS to be position within a word,
830 and OFFSET to be the offset of that word.
831 Then alter OP0 to refer to that word. */
832 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
833 offset -= (offset % (total_bits / BITS_PER_UNIT));
834 op0 = adjust_address (op0, mode, offset);
835 }
836
837 mode = GET_MODE (op0);
838
839 /* Now MODE is either some integral mode for a MEM as OP0,
840 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
841 The bit field is contained entirely within OP0.
842 BITPOS is the starting bit number within OP0.
843 (OP0's mode may actually be narrower than MODE.) */
844
845 if (BYTES_BIG_ENDIAN)
846 /* BITPOS is the distance between our msb
847 and that of the containing datum.
848 Convert it to the distance from the lsb. */
849 bitpos = total_bits - bitsize - bitpos;
850
851 /* Now BITPOS is always the distance between our lsb
852 and that of OP0. */
853
854 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
855 we must first convert its mode to MODE. */
856
857 if (GET_CODE (value) == CONST_INT)
858 {
859 HOST_WIDE_INT v = INTVAL (value);
860
861 if (bitsize < HOST_BITS_PER_WIDE_INT)
862 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
863
864 if (v == 0)
865 all_zero = 1;
866 else if ((bitsize < HOST_BITS_PER_WIDE_INT
867 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
868 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
869 all_one = 1;
870
871 value = lshift_value (mode, value, bitpos, bitsize);
872 }
873 else
874 {
875 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
876 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
877
878 if (GET_MODE (value) != mode)
879 {
880 if ((REG_P (value) || GET_CODE (value) == SUBREG)
881 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
882 value = gen_lowpart (mode, value);
883 else
884 value = convert_to_mode (mode, value, 1);
885 }
886
887 if (must_and)
888 value = expand_binop (mode, and_optab, value,
889 mask_rtx (mode, 0, bitsize, 0),
890 NULL_RTX, 1, OPTAB_LIB_WIDEN);
891 if (bitpos > 0)
892 value = expand_shift (LSHIFT_EXPR, mode, value,
893 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
894 }
895
896 /* Now clear the chosen bits in OP0,
897 except that if VALUE is -1 we need not bother. */
898
899 subtarget = (REG_P (op0) || ! flag_force_mem) ? op0 : 0;
900
901 if (! all_one)
902 {
903 temp = expand_binop (mode, and_optab, op0,
904 mask_rtx (mode, bitpos, bitsize, 1),
905 subtarget, 1, OPTAB_LIB_WIDEN);
906 subtarget = temp;
907 }
908 else
909 temp = op0;
910
911 /* Now logical-or VALUE into OP0, unless it is zero. */
912
913 if (! all_zero)
914 temp = expand_binop (mode, ior_optab, temp, value,
915 subtarget, 1, OPTAB_LIB_WIDEN);
916 if (op0 != temp)
917 emit_move_insn (op0, temp);
918 }
919 \f
920 /* Store a bit field that is split across multiple accessible memory objects.
921
922 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
923 BITSIZE is the field width; BITPOS the position of its first bit
924 (within the word).
925 VALUE is the value to store.
926
927 This does not yet handle fields wider than BITS_PER_WORD. */
928
929 static void
930 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
931 unsigned HOST_WIDE_INT bitpos, rtx value)
932 {
933 unsigned int unit;
934 unsigned int bitsdone = 0;
935
936 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
937 much at a time. */
938 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
939 unit = BITS_PER_WORD;
940 else
941 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
942
943 /* If VALUE is a constant other than a CONST_INT, get it into a register in
944 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
945 that VALUE might be a floating-point constant. */
946 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
947 {
948 rtx word = gen_lowpart_common (word_mode, value);
949
950 if (word && (value != word))
951 value = word;
952 else
953 value = gen_lowpart_common (word_mode,
954 force_reg (GET_MODE (value) != VOIDmode
955 ? GET_MODE (value)
956 : word_mode, value));
957 }
958
959 while (bitsdone < bitsize)
960 {
961 unsigned HOST_WIDE_INT thissize;
962 rtx part, word;
963 unsigned HOST_WIDE_INT thispos;
964 unsigned HOST_WIDE_INT offset;
965
966 offset = (bitpos + bitsdone) / unit;
967 thispos = (bitpos + bitsdone) % unit;
968
969 /* THISSIZE must not overrun a word boundary. Otherwise,
970 store_fixed_bit_field will call us again, and we will mutually
971 recurse forever. */
972 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
973 thissize = MIN (thissize, unit - thispos);
974
975 if (BYTES_BIG_ENDIAN)
976 {
977 int total_bits;
978
979 /* We must do an endian conversion exactly the same way as it is
980 done in extract_bit_field, so that the two calls to
981 extract_fixed_bit_field will have comparable arguments. */
982 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
983 total_bits = BITS_PER_WORD;
984 else
985 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
986
987 /* Fetch successively less significant portions. */
988 if (GET_CODE (value) == CONST_INT)
989 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
990 >> (bitsize - bitsdone - thissize))
991 & (((HOST_WIDE_INT) 1 << thissize) - 1));
992 else
993 /* The args are chosen so that the last part includes the
994 lsb. Give extract_bit_field the value it needs (with
995 endianness compensation) to fetch the piece we want. */
996 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
997 total_bits - bitsize + bitsdone,
998 NULL_RTX, 1);
999 }
1000 else
1001 {
1002 /* Fetch successively more significant portions. */
1003 if (GET_CODE (value) == CONST_INT)
1004 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1005 >> bitsdone)
1006 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1007 else
1008 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1009 bitsdone, NULL_RTX, 1);
1010 }
1011
1012 /* If OP0 is a register, then handle OFFSET here.
1013
1014 When handling multiword bitfields, extract_bit_field may pass
1015 down a word_mode SUBREG of a larger REG for a bitfield that actually
1016 crosses a word boundary. Thus, for a SUBREG, we must find
1017 the current word starting from the base register. */
1018 if (GET_CODE (op0) == SUBREG)
1019 {
1020 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1021 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1022 GET_MODE (SUBREG_REG (op0)));
1023 offset = 0;
1024 }
1025 else if (REG_P (op0))
1026 {
1027 word = operand_subword_force (op0, offset, GET_MODE (op0));
1028 offset = 0;
1029 }
1030 else
1031 word = op0;
1032
1033 /* OFFSET is in UNITs, and UNIT is in bits.
1034 store_fixed_bit_field wants offset in bytes. */
1035 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1036 thispos, part);
1037 bitsdone += thissize;
1038 }
1039 }
1040 \f
1041 /* Generate code to extract a byte-field from STR_RTX
1042 containing BITSIZE bits, starting at BITNUM,
1043 and put it in TARGET if possible (if TARGET is nonzero).
1044 Regardless of TARGET, we return the rtx for where the value is placed.
1045
1046 STR_RTX is the structure containing the byte (a REG or MEM).
1047 UNSIGNEDP is nonzero if this is an unsigned bit field.
1048 MODE is the natural mode of the field value once extracted.
1049 TMODE is the mode the caller would like the value to have;
1050 but the value may be returned with type MODE instead.
1051
1052 TOTAL_SIZE is the size in bytes of the containing structure,
1053 or -1 if varying.
1054
1055 If a TARGET is specified and we can store in it at no extra cost,
1056 we do so, and return TARGET.
1057 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1058 if they are equally easy. */
1059
1060 rtx
1061 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1062 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1063 enum machine_mode mode, enum machine_mode tmode)
1064 {
1065 unsigned int unit
1066 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1067 unsigned HOST_WIDE_INT offset = bitnum / unit;
1068 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1069 rtx op0 = str_rtx;
1070 rtx spec_target = target;
1071 rtx spec_target_subreg = 0;
1072 enum machine_mode int_mode;
1073 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1074 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1075 enum machine_mode mode1;
1076 int byte_offset;
1077
1078 if (tmode == VOIDmode)
1079 tmode = mode;
1080
1081 while (GET_CODE (op0) == SUBREG)
1082 {
1083 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1084 if (bitpos > unit)
1085 {
1086 offset += (bitpos / unit);
1087 bitpos %= unit;
1088 }
1089 op0 = SUBREG_REG (op0);
1090 }
1091
1092 if (REG_P (op0)
1093 && mode == GET_MODE (op0)
1094 && bitnum == 0
1095 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1096 {
1097 /* We're trying to extract a full register from itself. */
1098 return op0;
1099 }
1100
1101 /* Use vec_extract patterns for extracting parts of vectors whenever
1102 available. */
1103 if (VECTOR_MODE_P (GET_MODE (op0))
1104 && !MEM_P (op0)
1105 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1106 != CODE_FOR_nothing)
1107 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1108 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1109 {
1110 enum machine_mode outermode = GET_MODE (op0);
1111 enum machine_mode innermode = GET_MODE_INNER (outermode);
1112 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1113 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1114 rtx rtxpos = GEN_INT (pos);
1115 rtx src = op0;
1116 rtx dest = NULL, pat, seq;
1117 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1118 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1119 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1120
1121 if (innermode == tmode || innermode == mode)
1122 dest = target;
1123
1124 if (!dest)
1125 dest = gen_reg_rtx (innermode);
1126
1127 start_sequence ();
1128
1129 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1130 dest = copy_to_mode_reg (mode0, dest);
1131
1132 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1133 src = copy_to_mode_reg (mode1, src);
1134
1135 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1136 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1137
1138 /* We could handle this, but we should always be called with a pseudo
1139 for our targets and all insns should take them as outputs. */
1140 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1141 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1142 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1143
1144 pat = GEN_FCN (icode) (dest, src, rtxpos);
1145 seq = get_insns ();
1146 end_sequence ();
1147 if (pat)
1148 {
1149 emit_insn (seq);
1150 emit_insn (pat);
1151 return dest;
1152 }
1153 }
1154
1155 /* Make sure we are playing with integral modes. Pun with subregs
1156 if we aren't. */
1157 {
1158 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1159 if (imode != GET_MODE (op0))
1160 {
1161 if (MEM_P (op0))
1162 op0 = adjust_address (op0, imode, 0);
1163 else
1164 {
1165 gcc_assert (imode != BLKmode);
1166 op0 = gen_lowpart (imode, op0);
1167
1168 /* If we got a SUBREG, force it into a register since we
1169 aren't going to be able to do another SUBREG on it. */
1170 if (GET_CODE (op0) == SUBREG)
1171 op0 = force_reg (imode, op0);
1172 }
1173 }
1174 }
1175
1176 /* We may be accessing data outside the field, which means
1177 we can alias adjacent data. */
1178 if (MEM_P (op0))
1179 {
1180 op0 = shallow_copy_rtx (op0);
1181 set_mem_alias_set (op0, 0);
1182 set_mem_expr (op0, 0);
1183 }
1184
1185 /* Extraction of a full-word or multi-word value from a structure
1186 in a register or aligned memory can be done with just a SUBREG.
1187 A subword value in the least significant part of a register
1188 can also be extracted with a SUBREG. For this, we need the
1189 byte offset of the value in op0. */
1190
1191 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1192
1193 /* If OP0 is a register, BITPOS must count within a word.
1194 But as we have it, it counts within whatever size OP0 now has.
1195 On a bigendian machine, these are not the same, so convert. */
1196 if (BYTES_BIG_ENDIAN
1197 && !MEM_P (op0)
1198 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1199 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1200
1201 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1202 If that's wrong, the solution is to test for it and set TARGET to 0
1203 if needed. */
1204
1205 /* Only scalar integer modes can be converted via subregs. There is an
1206 additional problem for FP modes here in that they can have a precision
1207 which is different from the size. mode_for_size uses precision, but
1208 we want a mode based on the size, so we must avoid calling it for FP
1209 modes. */
1210 mode1 = (SCALAR_INT_MODE_P (tmode)
1211 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1212 : mode);
1213
1214 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1215 && bitpos % BITS_PER_WORD == 0)
1216 || (mode1 != BLKmode
1217 /* ??? The big endian test here is wrong. This is correct
1218 if the value is in a register, and if mode_for_size is not
1219 the same mode as op0. This causes us to get unnecessarily
1220 inefficient code from the Thumb port when -mbig-endian. */
1221 && (BYTES_BIG_ENDIAN
1222 ? bitpos + bitsize == BITS_PER_WORD
1223 : bitpos == 0)))
1224 && ((!MEM_P (op0)
1225 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1226 GET_MODE_BITSIZE (GET_MODE (op0)))
1227 && GET_MODE_SIZE (mode1) != 0
1228 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1229 || (MEM_P (op0)
1230 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1231 || (offset * BITS_PER_UNIT % bitsize == 0
1232 && MEM_ALIGN (op0) % bitsize == 0)))))
1233 {
1234 if (mode1 != GET_MODE (op0))
1235 {
1236 if (MEM_P (op0))
1237 op0 = adjust_address (op0, mode1, offset);
1238 else
1239 {
1240 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1241 byte_offset);
1242 if (sub == NULL)
1243 goto no_subreg_mode_swap;
1244 op0 = sub;
1245 }
1246 }
1247 if (mode1 != mode)
1248 return convert_to_mode (tmode, op0, unsignedp);
1249 return op0;
1250 }
1251 no_subreg_mode_swap:
1252
1253 /* Handle fields bigger than a word. */
1254
1255 if (bitsize > BITS_PER_WORD)
1256 {
1257 /* Here we transfer the words of the field
1258 in the order least significant first.
1259 This is because the most significant word is the one which may
1260 be less than full. */
1261
1262 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1263 unsigned int i;
1264
1265 if (target == 0 || !REG_P (target))
1266 target = gen_reg_rtx (mode);
1267
1268 /* Indicate for flow that the entire target reg is being set. */
1269 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1270
1271 for (i = 0; i < nwords; i++)
1272 {
1273 /* If I is 0, use the low-order word in both field and target;
1274 if I is 1, use the next to lowest word; and so on. */
1275 /* Word number in TARGET to use. */
1276 unsigned int wordnum
1277 = (WORDS_BIG_ENDIAN
1278 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1279 : i);
1280 /* Offset from start of field in OP0. */
1281 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1282 ? MAX (0, ((int) bitsize - ((int) i + 1)
1283 * (int) BITS_PER_WORD))
1284 : (int) i * BITS_PER_WORD);
1285 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1286 rtx result_part
1287 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1288 bitsize - i * BITS_PER_WORD),
1289 bitnum + bit_offset, 1, target_part, mode,
1290 word_mode);
1291
1292 gcc_assert (target_part);
1293
1294 if (result_part != target_part)
1295 emit_move_insn (target_part, result_part);
1296 }
1297
1298 if (unsignedp)
1299 {
1300 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1301 need to be zero'd out. */
1302 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1303 {
1304 unsigned int i, total_words;
1305
1306 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1307 for (i = nwords; i < total_words; i++)
1308 emit_move_insn
1309 (operand_subword (target,
1310 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1311 1, VOIDmode),
1312 const0_rtx);
1313 }
1314 return target;
1315 }
1316
1317 /* Signed bit field: sign-extend with two arithmetic shifts. */
1318 target = expand_shift (LSHIFT_EXPR, mode, target,
1319 build_int_cst (NULL_TREE,
1320 GET_MODE_BITSIZE (mode) - bitsize),
1321 NULL_RTX, 0);
1322 return expand_shift (RSHIFT_EXPR, mode, target,
1323 build_int_cst (NULL_TREE,
1324 GET_MODE_BITSIZE (mode) - bitsize),
1325 NULL_RTX, 0);
1326 }
1327
1328 /* From here on we know the desired field is smaller than a word. */
1329
1330 /* Check if there is a correspondingly-sized integer field, so we can
1331 safely extract it as one size of integer, if necessary; then
1332 truncate or extend to the size that is wanted; then use SUBREGs or
1333 convert_to_mode to get one of the modes we really wanted. */
1334
1335 int_mode = int_mode_for_mode (tmode);
1336 if (int_mode == BLKmode)
1337 int_mode = int_mode_for_mode (mode);
1338 /* Should probably push op0 out to memory and then do a load. */
1339 gcc_assert (int_mode != BLKmode);
1340
1341 /* OFFSET is the number of words or bytes (UNIT says which)
1342 from STR_RTX to the first word or byte containing part of the field. */
1343 if (!MEM_P (op0))
1344 {
1345 if (offset != 0
1346 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1347 {
1348 if (!REG_P (op0))
1349 op0 = copy_to_reg (op0);
1350 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1351 op0, (offset * UNITS_PER_WORD));
1352 }
1353 offset = 0;
1354 }
1355
1356 /* Now OFFSET is nonzero only for memory operands. */
1357
1358 if (unsignedp)
1359 {
1360 if (HAVE_extzv
1361 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1362 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1363 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1364 {
1365 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1366 rtx bitsize_rtx, bitpos_rtx;
1367 rtx last = get_last_insn ();
1368 rtx xop0 = op0;
1369 rtx xtarget = target;
1370 rtx xspec_target = spec_target;
1371 rtx xspec_target_subreg = spec_target_subreg;
1372 rtx pat;
1373 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1374
1375 if (MEM_P (xop0))
1376 {
1377 int save_volatile_ok = volatile_ok;
1378 volatile_ok = 1;
1379
1380 /* Is the memory operand acceptable? */
1381 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1382 (xop0, GET_MODE (xop0))))
1383 {
1384 /* No, load into a reg and extract from there. */
1385 enum machine_mode bestmode;
1386
1387 /* Get the mode to use for inserting into this field. If
1388 OP0 is BLKmode, get the smallest mode consistent with the
1389 alignment. If OP0 is a non-BLKmode object that is no
1390 wider than MAXMODE, use its mode. Otherwise, use the
1391 smallest mode containing the field. */
1392
1393 if (GET_MODE (xop0) == BLKmode
1394 || (GET_MODE_SIZE (GET_MODE (op0))
1395 > GET_MODE_SIZE (maxmode)))
1396 bestmode = get_best_mode (bitsize, bitnum,
1397 MEM_ALIGN (xop0), maxmode,
1398 MEM_VOLATILE_P (xop0));
1399 else
1400 bestmode = GET_MODE (xop0);
1401
1402 if (bestmode == VOIDmode
1403 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1404 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1405 goto extzv_loses;
1406
1407 /* Compute offset as multiple of this unit,
1408 counting in bytes. */
1409 unit = GET_MODE_BITSIZE (bestmode);
1410 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1411 xbitpos = bitnum % unit;
1412 xop0 = adjust_address (xop0, bestmode, xoffset);
1413
1414 /* Fetch it to a register in that size. */
1415 xop0 = force_reg (bestmode, xop0);
1416
1417 /* XBITPOS counts within UNIT, which is what is expected. */
1418 }
1419 else
1420 /* Get ref to first byte containing part of the field. */
1421 xop0 = adjust_address (xop0, byte_mode, xoffset);
1422
1423 volatile_ok = save_volatile_ok;
1424 }
1425
1426 /* If op0 is a register, we need it in MAXMODE (which is usually
1427 SImode). to make it acceptable to the format of extzv. */
1428 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1429 goto extzv_loses;
1430 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1431 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1432
1433 /* On big-endian machines, we count bits from the most significant.
1434 If the bit field insn does not, we must invert. */
1435 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1436 xbitpos = unit - bitsize - xbitpos;
1437
1438 /* Now convert from counting within UNIT to counting in MAXMODE. */
1439 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1440 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1441
1442 unit = GET_MODE_BITSIZE (maxmode);
1443
1444 if (xtarget == 0
1445 || (flag_force_mem && MEM_P (xtarget)))
1446 xtarget = xspec_target = gen_reg_rtx (tmode);
1447
1448 if (GET_MODE (xtarget) != maxmode)
1449 {
1450 if (REG_P (xtarget))
1451 {
1452 int wider = (GET_MODE_SIZE (maxmode)
1453 > GET_MODE_SIZE (GET_MODE (xtarget)));
1454 xtarget = gen_lowpart (maxmode, xtarget);
1455 if (wider)
1456 xspec_target_subreg = xtarget;
1457 }
1458 else
1459 xtarget = gen_reg_rtx (maxmode);
1460 }
1461
1462 /* If this machine's extzv insists on a register target,
1463 make sure we have one. */
1464 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1465 (xtarget, maxmode)))
1466 xtarget = gen_reg_rtx (maxmode);
1467
1468 bitsize_rtx = GEN_INT (bitsize);
1469 bitpos_rtx = GEN_INT (xbitpos);
1470
1471 pat = gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1472 if (pat)
1473 {
1474 emit_insn (pat);
1475 target = xtarget;
1476 spec_target = xspec_target;
1477 spec_target_subreg = xspec_target_subreg;
1478 }
1479 else
1480 {
1481 delete_insns_since (last);
1482 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1483 bitpos, target, 1);
1484 }
1485 }
1486 else
1487 extzv_loses:
1488 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1489 bitpos, target, 1);
1490 }
1491 else
1492 {
1493 if (HAVE_extv
1494 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1495 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1496 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1497 {
1498 int xbitpos = bitpos, xoffset = offset;
1499 rtx bitsize_rtx, bitpos_rtx;
1500 rtx last = get_last_insn ();
1501 rtx xop0 = op0, xtarget = target;
1502 rtx xspec_target = spec_target;
1503 rtx xspec_target_subreg = spec_target_subreg;
1504 rtx pat;
1505 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1506
1507 if (MEM_P (xop0))
1508 {
1509 /* Is the memory operand acceptable? */
1510 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1511 (xop0, GET_MODE (xop0))))
1512 {
1513 /* No, load into a reg and extract from there. */
1514 enum machine_mode bestmode;
1515
1516 /* Get the mode to use for inserting into this field. If
1517 OP0 is BLKmode, get the smallest mode consistent with the
1518 alignment. If OP0 is a non-BLKmode object that is no
1519 wider than MAXMODE, use its mode. Otherwise, use the
1520 smallest mode containing the field. */
1521
1522 if (GET_MODE (xop0) == BLKmode
1523 || (GET_MODE_SIZE (GET_MODE (op0))
1524 > GET_MODE_SIZE (maxmode)))
1525 bestmode = get_best_mode (bitsize, bitnum,
1526 MEM_ALIGN (xop0), maxmode,
1527 MEM_VOLATILE_P (xop0));
1528 else
1529 bestmode = GET_MODE (xop0);
1530
1531 if (bestmode == VOIDmode
1532 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1533 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1534 goto extv_loses;
1535
1536 /* Compute offset as multiple of this unit,
1537 counting in bytes. */
1538 unit = GET_MODE_BITSIZE (bestmode);
1539 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1540 xbitpos = bitnum % unit;
1541 xop0 = adjust_address (xop0, bestmode, xoffset);
1542
1543 /* Fetch it to a register in that size. */
1544 xop0 = force_reg (bestmode, xop0);
1545
1546 /* XBITPOS counts within UNIT, which is what is expected. */
1547 }
1548 else
1549 /* Get ref to first byte containing part of the field. */
1550 xop0 = adjust_address (xop0, byte_mode, xoffset);
1551 }
1552
1553 /* If op0 is a register, we need it in MAXMODE (which is usually
1554 SImode) to make it acceptable to the format of extv. */
1555 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1556 goto extv_loses;
1557 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1558 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1559
1560 /* On big-endian machines, we count bits from the most significant.
1561 If the bit field insn does not, we must invert. */
1562 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1563 xbitpos = unit - bitsize - xbitpos;
1564
1565 /* XBITPOS counts within a size of UNIT.
1566 Adjust to count within a size of MAXMODE. */
1567 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1568 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1569
1570 unit = GET_MODE_BITSIZE (maxmode);
1571
1572 if (xtarget == 0
1573 || (flag_force_mem && MEM_P (xtarget)))
1574 xtarget = xspec_target = gen_reg_rtx (tmode);
1575
1576 if (GET_MODE (xtarget) != maxmode)
1577 {
1578 if (REG_P (xtarget))
1579 {
1580 int wider = (GET_MODE_SIZE (maxmode)
1581 > GET_MODE_SIZE (GET_MODE (xtarget)));
1582 xtarget = gen_lowpart (maxmode, xtarget);
1583 if (wider)
1584 xspec_target_subreg = xtarget;
1585 }
1586 else
1587 xtarget = gen_reg_rtx (maxmode);
1588 }
1589
1590 /* If this machine's extv insists on a register target,
1591 make sure we have one. */
1592 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1593 (xtarget, maxmode)))
1594 xtarget = gen_reg_rtx (maxmode);
1595
1596 bitsize_rtx = GEN_INT (bitsize);
1597 bitpos_rtx = GEN_INT (xbitpos);
1598
1599 pat = gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1600 if (pat)
1601 {
1602 emit_insn (pat);
1603 target = xtarget;
1604 spec_target = xspec_target;
1605 spec_target_subreg = xspec_target_subreg;
1606 }
1607 else
1608 {
1609 delete_insns_since (last);
1610 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1611 bitpos, target, 0);
1612 }
1613 }
1614 else
1615 extv_loses:
1616 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1617 bitpos, target, 0);
1618 }
1619 if (target == spec_target)
1620 return target;
1621 if (target == spec_target_subreg)
1622 return spec_target;
1623 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1624 {
1625 /* If the target mode is not a scalar integral, first convert to the
1626 integer mode of that size and then access it as a floating-point
1627 value via a SUBREG. */
1628 if (!SCALAR_INT_MODE_P (tmode))
1629 {
1630 enum machine_mode smode
1631 = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1632 target = convert_to_mode (smode, target, unsignedp);
1633 target = force_reg (smode, target);
1634 return gen_lowpart (tmode, target);
1635 }
1636
1637 return convert_to_mode (tmode, target, unsignedp);
1638 }
1639 return target;
1640 }
1641 \f
1642 /* Extract a bit field using shifts and boolean operations
1643 Returns an rtx to represent the value.
1644 OP0 addresses a register (word) or memory (byte).
1645 BITPOS says which bit within the word or byte the bit field starts in.
1646 OFFSET says how many bytes farther the bit field starts;
1647 it is 0 if OP0 is a register.
1648 BITSIZE says how many bits long the bit field is.
1649 (If OP0 is a register, it may be narrower than a full word,
1650 but BITPOS still counts within a full word,
1651 which is significant on bigendian machines.)
1652
1653 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1654 If TARGET is nonzero, attempts to store the value there
1655 and return TARGET, but this is not guaranteed.
1656 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1657
1658 static rtx
1659 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1660 unsigned HOST_WIDE_INT offset,
1661 unsigned HOST_WIDE_INT bitsize,
1662 unsigned HOST_WIDE_INT bitpos, rtx target,
1663 int unsignedp)
1664 {
1665 unsigned int total_bits = BITS_PER_WORD;
1666 enum machine_mode mode;
1667
1668 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1669 {
1670 /* Special treatment for a bit field split across two registers. */
1671 if (bitsize + bitpos > BITS_PER_WORD)
1672 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1673 }
1674 else
1675 {
1676 /* Get the proper mode to use for this field. We want a mode that
1677 includes the entire field. If such a mode would be larger than
1678 a word, we won't be doing the extraction the normal way. */
1679
1680 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1681 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1682
1683 if (mode == VOIDmode)
1684 /* The only way this should occur is if the field spans word
1685 boundaries. */
1686 return extract_split_bit_field (op0, bitsize,
1687 bitpos + offset * BITS_PER_UNIT,
1688 unsignedp);
1689
1690 total_bits = GET_MODE_BITSIZE (mode);
1691
1692 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1693 be in the range 0 to total_bits-1, and put any excess bytes in
1694 OFFSET. */
1695 if (bitpos >= total_bits)
1696 {
1697 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1698 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1699 * BITS_PER_UNIT);
1700 }
1701
1702 /* Get ref to an aligned byte, halfword, or word containing the field.
1703 Adjust BITPOS to be position within a word,
1704 and OFFSET to be the offset of that word.
1705 Then alter OP0 to refer to that word. */
1706 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1707 offset -= (offset % (total_bits / BITS_PER_UNIT));
1708 op0 = adjust_address (op0, mode, offset);
1709 }
1710
1711 mode = GET_MODE (op0);
1712
1713 if (BYTES_BIG_ENDIAN)
1714 /* BITPOS is the distance between our msb and that of OP0.
1715 Convert it to the distance from the lsb. */
1716 bitpos = total_bits - bitsize - bitpos;
1717
1718 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1719 We have reduced the big-endian case to the little-endian case. */
1720
1721 if (unsignedp)
1722 {
1723 if (bitpos)
1724 {
1725 /* If the field does not already start at the lsb,
1726 shift it so it does. */
1727 tree amount = build_int_cst (NULL_TREE, bitpos);
1728 /* Maybe propagate the target for the shift. */
1729 /* But not if we will return it--could confuse integrate.c. */
1730 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1731 if (tmode != mode) subtarget = 0;
1732 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1733 }
1734 /* Convert the value to the desired mode. */
1735 if (mode != tmode)
1736 op0 = convert_to_mode (tmode, op0, 1);
1737
1738 /* Unless the msb of the field used to be the msb when we shifted,
1739 mask out the upper bits. */
1740
1741 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1742 return expand_binop (GET_MODE (op0), and_optab, op0,
1743 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1744 target, 1, OPTAB_LIB_WIDEN);
1745 return op0;
1746 }
1747
1748 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1749 then arithmetic-shift its lsb to the lsb of the word. */
1750 op0 = force_reg (mode, op0);
1751 if (mode != tmode)
1752 target = 0;
1753
1754 /* Find the narrowest integer mode that contains the field. */
1755
1756 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1757 mode = GET_MODE_WIDER_MODE (mode))
1758 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1759 {
1760 op0 = convert_to_mode (mode, op0, 0);
1761 break;
1762 }
1763
1764 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1765 {
1766 tree amount
1767 = build_int_cst (NULL_TREE,
1768 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1769 /* Maybe propagate the target for the shift. */
1770 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1771 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1772 }
1773
1774 return expand_shift (RSHIFT_EXPR, mode, op0,
1775 build_int_cst (NULL_TREE,
1776 GET_MODE_BITSIZE (mode) - bitsize),
1777 target, 0);
1778 }
1779 \f
1780 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1781 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1782 complement of that if COMPLEMENT. The mask is truncated if
1783 necessary to the width of mode MODE. The mask is zero-extended if
1784 BITSIZE+BITPOS is too small for MODE. */
1785
1786 static rtx
1787 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1788 {
1789 HOST_WIDE_INT masklow, maskhigh;
1790
1791 if (bitsize == 0)
1792 masklow = 0;
1793 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1794 masklow = (HOST_WIDE_INT) -1 << bitpos;
1795 else
1796 masklow = 0;
1797
1798 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1799 masklow &= ((unsigned HOST_WIDE_INT) -1
1800 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1801
1802 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1803 maskhigh = -1;
1804 else
1805 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1806
1807 if (bitsize == 0)
1808 maskhigh = 0;
1809 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1810 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1811 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1812 else
1813 maskhigh = 0;
1814
1815 if (complement)
1816 {
1817 maskhigh = ~maskhigh;
1818 masklow = ~masklow;
1819 }
1820
1821 return immed_double_const (masklow, maskhigh, mode);
1822 }
1823
1824 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1825 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1826
1827 static rtx
1828 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1829 {
1830 unsigned HOST_WIDE_INT v = INTVAL (value);
1831 HOST_WIDE_INT low, high;
1832
1833 if (bitsize < HOST_BITS_PER_WIDE_INT)
1834 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1835
1836 if (bitpos < HOST_BITS_PER_WIDE_INT)
1837 {
1838 low = v << bitpos;
1839 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1840 }
1841 else
1842 {
1843 low = 0;
1844 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1845 }
1846
1847 return immed_double_const (low, high, mode);
1848 }
1849 \f
1850 /* Extract a bit field from a memory by forcing the alignment of the
1851 memory. This efficient only if the field spans at least 4 boundaries.
1852
1853 OP0 is the MEM.
1854 BITSIZE is the field width; BITPOS is the position of the first bit.
1855 UNSIGNEDP is true if the result should be zero-extended. */
1856
1857 static rtx
1858 extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1859 unsigned HOST_WIDE_INT bitpos,
1860 int unsignedp)
1861 {
1862 enum machine_mode mode, dmode;
1863 unsigned int m_bitsize, m_size;
1864 unsigned int sign_shift_up, sign_shift_dn;
1865 rtx base, a1, a2, v1, v2, comb, shift, result, start;
1866
1867 /* Choose a mode that will fit BITSIZE. */
1868 mode = smallest_mode_for_size (bitsize, MODE_INT);
1869 m_size = GET_MODE_SIZE (mode);
1870 m_bitsize = GET_MODE_BITSIZE (mode);
1871
1872 /* Choose a mode twice as wide. Fail if no such mode exists. */
1873 dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
1874 if (dmode == BLKmode)
1875 return NULL;
1876
1877 do_pending_stack_adjust ();
1878 start = get_last_insn ();
1879
1880 /* At the end, we'll need an additional shift to deal with sign/zero
1881 extension. By default this will be a left+right shift of the
1882 appropriate size. But we may be able to eliminate one of them. */
1883 sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
1884
1885 if (STRICT_ALIGNMENT)
1886 {
1887 base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
1888 bitpos %= BITS_PER_UNIT;
1889
1890 /* We load two values to be concatenate. There's an edge condition
1891 that bears notice -- an aligned value at the end of a page can
1892 only load one value lest we segfault. So the two values we load
1893 are at "base & -size" and "(base + size - 1) & -size". If base
1894 is unaligned, the addresses will be aligned and sequential; if
1895 base is aligned, the addresses will both be equal to base. */
1896
1897 a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
1898 GEN_INT (-(HOST_WIDE_INT)m_size),
1899 NULL, true, OPTAB_LIB_WIDEN);
1900 mark_reg_pointer (a1, m_bitsize);
1901 v1 = gen_rtx_MEM (mode, a1);
1902 set_mem_align (v1, m_bitsize);
1903 v1 = force_reg (mode, validize_mem (v1));
1904
1905 a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
1906 a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
1907 GEN_INT (-(HOST_WIDE_INT)m_size),
1908 NULL, true, OPTAB_LIB_WIDEN);
1909 v2 = gen_rtx_MEM (mode, a2);
1910 set_mem_align (v2, m_bitsize);
1911 v2 = force_reg (mode, validize_mem (v2));
1912
1913 /* Combine these two values into a double-word value. */
1914 if (m_bitsize == BITS_PER_WORD)
1915 {
1916 comb = gen_reg_rtx (dmode);
1917 emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
1918 emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
1919 emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
1920 }
1921 else
1922 {
1923 if (BYTES_BIG_ENDIAN)
1924 comb = v1, v1 = v2, v2 = comb;
1925 v1 = convert_modes (dmode, mode, v1, true);
1926 if (v1 == NULL)
1927 goto fail;
1928 v2 = convert_modes (dmode, mode, v2, true);
1929 v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
1930 NULL, true, OPTAB_LIB_WIDEN);
1931 if (v2 == NULL)
1932 goto fail;
1933 comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
1934 true, OPTAB_LIB_WIDEN);
1935 if (comb == NULL)
1936 goto fail;
1937 }
1938
1939 shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
1940 NULL, true, OPTAB_LIB_WIDEN);
1941 shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
1942
1943 if (bitpos != 0)
1944 {
1945 if (sign_shift_up <= bitpos)
1946 bitpos -= sign_shift_up, sign_shift_up = 0;
1947 shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
1948 NULL, true, OPTAB_LIB_WIDEN);
1949 }
1950 }
1951 else
1952 {
1953 unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
1954 bitpos %= BITS_PER_UNIT;
1955
1956 /* When strict alignment is not required, we can just load directly
1957 from memory without masking. If the remaining BITPOS offset is
1958 small enough, we may be able to do all operations in MODE as
1959 opposed to DMODE. */
1960 if (bitpos + bitsize <= m_bitsize)
1961 dmode = mode;
1962 comb = adjust_address (op0, dmode, offset);
1963
1964 if (sign_shift_up <= bitpos)
1965 bitpos -= sign_shift_up, sign_shift_up = 0;
1966 shift = GEN_INT (bitpos);
1967 }
1968
1969 /* Shift down the double-word such that the requested value is at bit 0. */
1970 if (shift != const0_rtx)
1971 comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
1972 comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
1973 if (comb == NULL)
1974 goto fail;
1975
1976 /* If the field exactly matches MODE, then all we need to do is return the
1977 lowpart. Otherwise, shift to get the sign bits set properly. */
1978 result = force_reg (mode, gen_lowpart (mode, comb));
1979
1980 if (sign_shift_up)
1981 result = expand_simple_binop (mode, ASHIFT, result,
1982 GEN_INT (sign_shift_up),
1983 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1984 if (sign_shift_dn)
1985 result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
1986 result, GEN_INT (sign_shift_dn),
1987 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1988
1989 return result;
1990
1991 fail:
1992 delete_insns_since (start);
1993 return NULL;
1994 }
1995
1996 /* Extract a bit field that is split across two words
1997 and return an RTX for the result.
1998
1999 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2000 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2001 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
2002
2003 static rtx
2004 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
2005 unsigned HOST_WIDE_INT bitpos, int unsignedp)
2006 {
2007 unsigned int unit;
2008 unsigned int bitsdone = 0;
2009 rtx result = NULL_RTX;
2010 int first = 1;
2011
2012 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2013 much at a time. */
2014 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2015 unit = BITS_PER_WORD;
2016 else
2017 {
2018 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2019 if (0 && bitsize / unit > 2)
2020 {
2021 rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
2022 unsignedp);
2023 if (tmp)
2024 return tmp;
2025 }
2026 }
2027
2028 while (bitsdone < bitsize)
2029 {
2030 unsigned HOST_WIDE_INT thissize;
2031 rtx part, word;
2032 unsigned HOST_WIDE_INT thispos;
2033 unsigned HOST_WIDE_INT offset;
2034
2035 offset = (bitpos + bitsdone) / unit;
2036 thispos = (bitpos + bitsdone) % unit;
2037
2038 /* THISSIZE must not overrun a word boundary. Otherwise,
2039 extract_fixed_bit_field will call us again, and we will mutually
2040 recurse forever. */
2041 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2042 thissize = MIN (thissize, unit - thispos);
2043
2044 /* If OP0 is a register, then handle OFFSET here.
2045
2046 When handling multiword bitfields, extract_bit_field may pass
2047 down a word_mode SUBREG of a larger REG for a bitfield that actually
2048 crosses a word boundary. Thus, for a SUBREG, we must find
2049 the current word starting from the base register. */
2050 if (GET_CODE (op0) == SUBREG)
2051 {
2052 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
2053 word = operand_subword_force (SUBREG_REG (op0), word_offset,
2054 GET_MODE (SUBREG_REG (op0)));
2055 offset = 0;
2056 }
2057 else if (REG_P (op0))
2058 {
2059 word = operand_subword_force (op0, offset, GET_MODE (op0));
2060 offset = 0;
2061 }
2062 else
2063 word = op0;
2064
2065 /* Extract the parts in bit-counting order,
2066 whose meaning is determined by BYTES_PER_UNIT.
2067 OFFSET is in UNITs, and UNIT is in bits.
2068 extract_fixed_bit_field wants offset in bytes. */
2069 part = extract_fixed_bit_field (word_mode, word,
2070 offset * unit / BITS_PER_UNIT,
2071 thissize, thispos, 0, 1);
2072 bitsdone += thissize;
2073
2074 /* Shift this part into place for the result. */
2075 if (BYTES_BIG_ENDIAN)
2076 {
2077 if (bitsize != bitsdone)
2078 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2079 build_int_cst (NULL_TREE, bitsize - bitsdone),
2080 0, 1);
2081 }
2082 else
2083 {
2084 if (bitsdone != thissize)
2085 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2086 build_int_cst (NULL_TREE,
2087 bitsdone - thissize), 0, 1);
2088 }
2089
2090 if (first)
2091 result = part;
2092 else
2093 /* Combine the parts with bitwise or. This works
2094 because we extracted each part as an unsigned bit field. */
2095 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2096 OPTAB_LIB_WIDEN);
2097
2098 first = 0;
2099 }
2100
2101 /* Unsigned bit field: we are done. */
2102 if (unsignedp)
2103 return result;
2104 /* Signed bit field: sign-extend with two arithmetic shifts. */
2105 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2106 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2107 NULL_RTX, 0);
2108 return expand_shift (RSHIFT_EXPR, word_mode, result,
2109 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2110 NULL_RTX, 0);
2111 }
2112 \f
2113 /* Add INC into TARGET. */
2114
2115 void
2116 expand_inc (rtx target, rtx inc)
2117 {
2118 rtx value = expand_binop (GET_MODE (target), add_optab,
2119 target, inc,
2120 target, 0, OPTAB_LIB_WIDEN);
2121 if (value != target)
2122 emit_move_insn (target, value);
2123 }
2124
2125 /* Subtract DEC from TARGET. */
2126
2127 void
2128 expand_dec (rtx target, rtx dec)
2129 {
2130 rtx value = expand_binop (GET_MODE (target), sub_optab,
2131 target, dec,
2132 target, 0, OPTAB_LIB_WIDEN);
2133 if (value != target)
2134 emit_move_insn (target, value);
2135 }
2136 \f
2137 /* Output a shift instruction for expression code CODE,
2138 with SHIFTED being the rtx for the value to shift,
2139 and AMOUNT the tree for the amount to shift by.
2140 Store the result in the rtx TARGET, if that is convenient.
2141 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2142 Return the rtx for where the value is. */
2143
2144 rtx
2145 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2146 tree amount, rtx target, int unsignedp)
2147 {
2148 rtx op1, temp = 0;
2149 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2150 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2151 int try;
2152
2153 /* Previously detected shift-counts computed by NEGATE_EXPR
2154 and shifted in the other direction; but that does not work
2155 on all machines. */
2156
2157 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
2158
2159 if (SHIFT_COUNT_TRUNCATED)
2160 {
2161 if (GET_CODE (op1) == CONST_INT
2162 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2163 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2164 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2165 % GET_MODE_BITSIZE (mode));
2166 else if (GET_CODE (op1) == SUBREG
2167 && subreg_lowpart_p (op1))
2168 op1 = SUBREG_REG (op1);
2169 }
2170
2171 if (op1 == const0_rtx)
2172 return shifted;
2173
2174 /* Check whether its cheaper to implement a left shift by a constant
2175 bit count by a sequence of additions. */
2176 if (code == LSHIFT_EXPR
2177 && GET_CODE (op1) == CONST_INT
2178 && INTVAL (op1) > 0
2179 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2180 && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode])
2181 {
2182 int i;
2183 for (i = 0; i < INTVAL (op1); i++)
2184 {
2185 temp = force_reg (mode, shifted);
2186 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2187 unsignedp, OPTAB_LIB_WIDEN);
2188 }
2189 return shifted;
2190 }
2191
2192 for (try = 0; temp == 0 && try < 3; try++)
2193 {
2194 enum optab_methods methods;
2195
2196 if (try == 0)
2197 methods = OPTAB_DIRECT;
2198 else if (try == 1)
2199 methods = OPTAB_WIDEN;
2200 else
2201 methods = OPTAB_LIB_WIDEN;
2202
2203 if (rotate)
2204 {
2205 /* Widening does not work for rotation. */
2206 if (methods == OPTAB_WIDEN)
2207 continue;
2208 else if (methods == OPTAB_LIB_WIDEN)
2209 {
2210 /* If we have been unable to open-code this by a rotation,
2211 do it as the IOR of two shifts. I.e., to rotate A
2212 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2213 where C is the bitsize of A.
2214
2215 It is theoretically possible that the target machine might
2216 not be able to perform either shift and hence we would
2217 be making two libcalls rather than just the one for the
2218 shift (similarly if IOR could not be done). We will allow
2219 this extremely unlikely lossage to avoid complicating the
2220 code below. */
2221
2222 rtx subtarget = target == shifted ? 0 : target;
2223 rtx temp1;
2224 tree type = TREE_TYPE (amount);
2225 tree new_amount = make_tree (type, op1);
2226 tree other_amount
2227 = fold (build2 (MINUS_EXPR, type, convert
2228 (type, build_int_cst
2229 (NULL_TREE, GET_MODE_BITSIZE (mode))),
2230 amount));
2231
2232 shifted = force_reg (mode, shifted);
2233
2234 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2235 mode, shifted, new_amount, subtarget, 1);
2236 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2237 mode, shifted, other_amount, 0, 1);
2238 return expand_binop (mode, ior_optab, temp, temp1, target,
2239 unsignedp, methods);
2240 }
2241
2242 temp = expand_binop (mode,
2243 left ? rotl_optab : rotr_optab,
2244 shifted, op1, target, unsignedp, methods);
2245
2246 /* If we don't have the rotate, but we are rotating by a constant
2247 that is in range, try a rotate in the opposite direction. */
2248
2249 if (temp == 0 && GET_CODE (op1) == CONST_INT
2250 && INTVAL (op1) > 0
2251 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2252 temp = expand_binop (mode,
2253 left ? rotr_optab : rotl_optab,
2254 shifted,
2255 GEN_INT (GET_MODE_BITSIZE (mode)
2256 - INTVAL (op1)),
2257 target, unsignedp, methods);
2258 }
2259 else if (unsignedp)
2260 temp = expand_binop (mode,
2261 left ? ashl_optab : lshr_optab,
2262 shifted, op1, target, unsignedp, methods);
2263
2264 /* Do arithmetic shifts.
2265 Also, if we are going to widen the operand, we can just as well
2266 use an arithmetic right-shift instead of a logical one. */
2267 if (temp == 0 && ! rotate
2268 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2269 {
2270 enum optab_methods methods1 = methods;
2271
2272 /* If trying to widen a log shift to an arithmetic shift,
2273 don't accept an arithmetic shift of the same size. */
2274 if (unsignedp)
2275 methods1 = OPTAB_MUST_WIDEN;
2276
2277 /* Arithmetic shift */
2278
2279 temp = expand_binop (mode,
2280 left ? ashl_optab : ashr_optab,
2281 shifted, op1, target, unsignedp, methods1);
2282 }
2283
2284 /* We used to try extzv here for logical right shifts, but that was
2285 only useful for one machine, the VAX, and caused poor code
2286 generation there for lshrdi3, so the code was deleted and a
2287 define_expand for lshrsi3 was added to vax.md. */
2288 }
2289
2290 gcc_assert (temp);
2291 return temp;
2292 }
2293 \f
2294 enum alg_code { alg_unknown, alg_zero, alg_m, alg_shift,
2295 alg_add_t_m2, alg_sub_t_m2,
2296 alg_add_factor, alg_sub_factor,
2297 alg_add_t2_m, alg_sub_t2_m };
2298
2299 /* This structure holds the "cost" of a multiply sequence. The
2300 "cost" field holds the total rtx_cost of every operator in the
2301 synthetic multiplication sequence, hence cost(a op b) is defined
2302 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2303 The "latency" field holds the minimum possible latency of the
2304 synthetic multiply, on a hypothetical infinitely parallel CPU.
2305 This is the critical path, or the maximum height, of the expression
2306 tree which is the sum of rtx_costs on the most expensive path from
2307 any leaf to the root. Hence latency(a op b) is defined as zero for
2308 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2309
2310 struct mult_cost {
2311 short cost; /* Total rtx_cost of the multiplication sequence. */
2312 short latency; /* The latency of the multiplication sequence. */
2313 };
2314
2315 /* This macro is used to compare a pointer to a mult_cost against an
2316 single integer "rtx_cost" value. This is equivalent to the macro
2317 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2318 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2319 || ((X)->cost == (Y) && (X)->latency < (Y)))
2320
2321 /* This macro is used to compare two pointers to mult_costs against
2322 each other. The macro returns true if X is cheaper than Y.
2323 Currently, the cheaper of two mult_costs is the one with the
2324 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2325 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2326 || ((X)->cost == (Y)->cost \
2327 && (X)->latency < (Y)->latency))
2328
2329 /* This structure records a sequence of operations.
2330 `ops' is the number of operations recorded.
2331 `cost' is their total cost.
2332 The operations are stored in `op' and the corresponding
2333 logarithms of the integer coefficients in `log'.
2334
2335 These are the operations:
2336 alg_zero total := 0;
2337 alg_m total := multiplicand;
2338 alg_shift total := total * coeff
2339 alg_add_t_m2 total := total + multiplicand * coeff;
2340 alg_sub_t_m2 total := total - multiplicand * coeff;
2341 alg_add_factor total := total * coeff + total;
2342 alg_sub_factor total := total * coeff - total;
2343 alg_add_t2_m total := total * coeff + multiplicand;
2344 alg_sub_t2_m total := total * coeff - multiplicand;
2345
2346 The first operand must be either alg_zero or alg_m. */
2347
2348 struct algorithm
2349 {
2350 struct mult_cost cost;
2351 short ops;
2352 /* The size of the OP and LOG fields are not directly related to the
2353 word size, but the worst-case algorithms will be if we have few
2354 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2355 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2356 in total wordsize operations. */
2357 enum alg_code op[MAX_BITS_PER_WORD];
2358 char log[MAX_BITS_PER_WORD];
2359 };
2360
2361 /* The entry for our multiplication cache/hash table. */
2362 struct alg_hash_entry {
2363 /* The number we are multiplying by. */
2364 unsigned int t;
2365
2366 /* The mode in which we are multiplying something by T. */
2367 enum machine_mode mode;
2368
2369 /* The best multiplication algorithm for t. */
2370 enum alg_code alg;
2371 };
2372
2373 /* The number of cache/hash entries. */
2374 #define NUM_ALG_HASH_ENTRIES 307
2375
2376 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2377 actually a hash table. If we have a collision, that the older
2378 entry is kicked out. */
2379 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2380
2381 /* Indicates the type of fixup needed after a constant multiplication.
2382 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2383 the result should be negated, and ADD_VARIANT means that the
2384 multiplicand should be added to the result. */
2385 enum mult_variant {basic_variant, negate_variant, add_variant};
2386
2387 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2388 const struct mult_cost *, enum machine_mode mode);
2389 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2390 struct algorithm *, enum mult_variant *, int);
2391 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2392 const struct algorithm *, enum mult_variant);
2393 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2394 int, rtx *, int *, int *);
2395 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2396 static rtx extract_high_half (enum machine_mode, rtx);
2397 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2398 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2399 int, int);
2400 /* Compute and return the best algorithm for multiplying by T.
2401 The algorithm must cost less than cost_limit
2402 If retval.cost >= COST_LIMIT, no algorithm was found and all
2403 other field of the returned struct are undefined.
2404 MODE is the machine mode of the multiplication. */
2405
2406 static void
2407 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2408 const struct mult_cost *cost_limit, enum machine_mode mode)
2409 {
2410 int m;
2411 struct algorithm *alg_in, *best_alg;
2412 struct mult_cost best_cost;
2413 struct mult_cost new_limit;
2414 int op_cost, op_latency;
2415 unsigned HOST_WIDE_INT q;
2416 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2417 int hash_index;
2418 bool cache_hit = false;
2419 enum alg_code cache_alg = alg_zero;
2420
2421 /* Indicate that no algorithm is yet found. If no algorithm
2422 is found, this value will be returned and indicate failure. */
2423 alg_out->cost.cost = cost_limit->cost + 1;
2424 alg_out->cost.latency = cost_limit->latency + 1;
2425
2426 if (cost_limit->cost < 0
2427 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2428 return;
2429
2430 /* Restrict the bits of "t" to the multiplication's mode. */
2431 t &= GET_MODE_MASK (mode);
2432
2433 /* t == 1 can be done in zero cost. */
2434 if (t == 1)
2435 {
2436 alg_out->ops = 1;
2437 alg_out->cost.cost = 0;
2438 alg_out->cost.latency = 0;
2439 alg_out->op[0] = alg_m;
2440 return;
2441 }
2442
2443 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2444 fail now. */
2445 if (t == 0)
2446 {
2447 if (MULT_COST_LESS (cost_limit, zero_cost))
2448 return;
2449 else
2450 {
2451 alg_out->ops = 1;
2452 alg_out->cost.cost = zero_cost;
2453 alg_out->cost.latency = zero_cost;
2454 alg_out->op[0] = alg_zero;
2455 return;
2456 }
2457 }
2458
2459 /* We'll be needing a couple extra algorithm structures now. */
2460
2461 alg_in = alloca (sizeof (struct algorithm));
2462 best_alg = alloca (sizeof (struct algorithm));
2463 best_cost = *cost_limit;
2464
2465 /* Compute the hash index. */
2466 hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
2467
2468 /* See if we already know what to do for T. */
2469 if (alg_hash[hash_index].t == t
2470 && alg_hash[hash_index].mode == mode
2471 && alg_hash[hash_index].alg != alg_unknown)
2472 {
2473 cache_hit = true;
2474 cache_alg = alg_hash[hash_index].alg;
2475 switch (cache_alg)
2476 {
2477 case alg_shift:
2478 goto do_alg_shift;
2479
2480 case alg_add_t_m2:
2481 case alg_sub_t_m2:
2482 goto do_alg_addsub_t_m2;
2483
2484 case alg_add_factor:
2485 case alg_sub_factor:
2486 goto do_alg_addsub_factor;
2487
2488 case alg_add_t2_m:
2489 goto do_alg_add_t2_m;
2490
2491 case alg_sub_t2_m:
2492 goto do_alg_sub_t2_m;
2493
2494 default:
2495 gcc_unreachable ();
2496 }
2497 }
2498
2499 /* If we have a group of zero bits at the low-order part of T, try
2500 multiplying by the remaining bits and then doing a shift. */
2501
2502 if ((t & 1) == 0)
2503 {
2504 do_alg_shift:
2505 m = floor_log2 (t & -t); /* m = number of low zero bits */
2506 if (m < maxm)
2507 {
2508 q = t >> m;
2509 /* The function expand_shift will choose between a shift and
2510 a sequence of additions, so the observed cost is given as
2511 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2512 op_cost = m * add_cost[mode];
2513 if (shift_cost[mode][m] < op_cost)
2514 op_cost = shift_cost[mode][m];
2515 new_limit.cost = best_cost.cost - op_cost;
2516 new_limit.latency = best_cost.latency - op_cost;
2517 synth_mult (alg_in, q, &new_limit, mode);
2518
2519 alg_in->cost.cost += op_cost;
2520 alg_in->cost.latency += op_cost;
2521 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2522 {
2523 struct algorithm *x;
2524 best_cost = alg_in->cost;
2525 x = alg_in, alg_in = best_alg, best_alg = x;
2526 best_alg->log[best_alg->ops] = m;
2527 best_alg->op[best_alg->ops] = alg_shift;
2528 }
2529 }
2530 if (cache_hit)
2531 goto done;
2532 }
2533
2534 /* If we have an odd number, add or subtract one. */
2535 if ((t & 1) != 0)
2536 {
2537 unsigned HOST_WIDE_INT w;
2538
2539 do_alg_addsub_t_m2:
2540 for (w = 1; (w & t) != 0; w <<= 1)
2541 ;
2542 /* If T was -1, then W will be zero after the loop. This is another
2543 case where T ends with ...111. Handling this with (T + 1) and
2544 subtract 1 produces slightly better code and results in algorithm
2545 selection much faster than treating it like the ...0111 case
2546 below. */
2547 if (w == 0
2548 || (w > 2
2549 /* Reject the case where t is 3.
2550 Thus we prefer addition in that case. */
2551 && t != 3))
2552 {
2553 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2554
2555 op_cost = add_cost[mode];
2556 new_limit.cost = best_cost.cost - op_cost;
2557 new_limit.latency = best_cost.latency - op_cost;
2558 synth_mult (alg_in, t + 1, &new_limit, mode);
2559
2560 alg_in->cost.cost += op_cost;
2561 alg_in->cost.latency += op_cost;
2562 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2563 {
2564 struct algorithm *x;
2565 best_cost = alg_in->cost;
2566 x = alg_in, alg_in = best_alg, best_alg = x;
2567 best_alg->log[best_alg->ops] = 0;
2568 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2569 }
2570 }
2571 else
2572 {
2573 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2574
2575 op_cost = add_cost[mode];
2576 new_limit.cost = best_cost.cost - op_cost;
2577 new_limit.latency = best_cost.latency - op_cost;
2578 synth_mult (alg_in, t - 1, &new_limit, mode);
2579
2580 alg_in->cost.cost += op_cost;
2581 alg_in->cost.latency += op_cost;
2582 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2583 {
2584 struct algorithm *x;
2585 best_cost = alg_in->cost;
2586 x = alg_in, alg_in = best_alg, best_alg = x;
2587 best_alg->log[best_alg->ops] = 0;
2588 best_alg->op[best_alg->ops] = alg_add_t_m2;
2589 }
2590 }
2591 if (cache_hit)
2592 goto done;
2593 }
2594
2595 /* Look for factors of t of the form
2596 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2597 If we find such a factor, we can multiply by t using an algorithm that
2598 multiplies by q, shift the result by m and add/subtract it to itself.
2599
2600 We search for large factors first and loop down, even if large factors
2601 are less probable than small; if we find a large factor we will find a
2602 good sequence quickly, and therefore be able to prune (by decreasing
2603 COST_LIMIT) the search. */
2604
2605 do_alg_addsub_factor:
2606 for (m = floor_log2 (t - 1); m >= 2; m--)
2607 {
2608 unsigned HOST_WIDE_INT d;
2609
2610 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2611 if (t % d == 0 && t > d && m < maxm
2612 && (!cache_hit || cache_alg == alg_add_factor))
2613 {
2614 /* If the target has a cheap shift-and-add instruction use
2615 that in preference to a shift insn followed by an add insn.
2616 Assume that the shift-and-add is "atomic" with a latency
2617 equal to its cost, otherwise assume that on superscalar
2618 hardware the shift may be executed concurrently with the
2619 earlier steps in the algorithm. */
2620 op_cost = add_cost[mode] + shift_cost[mode][m];
2621 if (shiftadd_cost[mode][m] < op_cost)
2622 {
2623 op_cost = shiftadd_cost[mode][m];
2624 op_latency = op_cost;
2625 }
2626 else
2627 op_latency = add_cost[mode];
2628
2629 new_limit.cost = best_cost.cost - op_cost;
2630 new_limit.latency = best_cost.latency - op_latency;
2631 synth_mult (alg_in, t / d, &new_limit, mode);
2632
2633 alg_in->cost.cost += op_cost;
2634 alg_in->cost.latency += op_latency;
2635 if (alg_in->cost.latency < op_cost)
2636 alg_in->cost.latency = op_cost;
2637 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2638 {
2639 struct algorithm *x;
2640 best_cost = alg_in->cost;
2641 x = alg_in, alg_in = best_alg, best_alg = x;
2642 best_alg->log[best_alg->ops] = m;
2643 best_alg->op[best_alg->ops] = alg_add_factor;
2644 }
2645 /* Other factors will have been taken care of in the recursion. */
2646 break;
2647 }
2648
2649 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2650 if (t % d == 0 && t > d && m < maxm
2651 && (!cache_hit || cache_alg == alg_sub_factor))
2652 {
2653 /* If the target has a cheap shift-and-subtract insn use
2654 that in preference to a shift insn followed by a sub insn.
2655 Assume that the shift-and-sub is "atomic" with a latency
2656 equal to it's cost, otherwise assume that on superscalar
2657 hardware the shift may be executed concurrently with the
2658 earlier steps in the algorithm. */
2659 op_cost = add_cost[mode] + shift_cost[mode][m];
2660 if (shiftsub_cost[mode][m] < op_cost)
2661 {
2662 op_cost = shiftsub_cost[mode][m];
2663 op_latency = op_cost;
2664 }
2665 else
2666 op_latency = add_cost[mode];
2667
2668 new_limit.cost = best_cost.cost - op_cost;
2669 new_limit.latency = best_cost.latency - op_latency;
2670 synth_mult (alg_in, t / d, &new_limit, mode);
2671
2672 alg_in->cost.cost += op_cost;
2673 alg_in->cost.latency += op_latency;
2674 if (alg_in->cost.latency < op_cost)
2675 alg_in->cost.latency = op_cost;
2676 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2677 {
2678 struct algorithm *x;
2679 best_cost = alg_in->cost;
2680 x = alg_in, alg_in = best_alg, best_alg = x;
2681 best_alg->log[best_alg->ops] = m;
2682 best_alg->op[best_alg->ops] = alg_sub_factor;
2683 }
2684 break;
2685 }
2686 }
2687 if (cache_hit)
2688 goto done;
2689
2690 /* Try shift-and-add (load effective address) instructions,
2691 i.e. do a*3, a*5, a*9. */
2692 if ((t & 1) != 0)
2693 {
2694 do_alg_add_t2_m:
2695 q = t - 1;
2696 q = q & -q;
2697 m = exact_log2 (q);
2698 if (m >= 0 && m < maxm)
2699 {
2700 op_cost = shiftadd_cost[mode][m];
2701 new_limit.cost = best_cost.cost - op_cost;
2702 new_limit.latency = best_cost.latency - op_cost;
2703 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2704
2705 alg_in->cost.cost += op_cost;
2706 alg_in->cost.latency += op_cost;
2707 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2708 {
2709 struct algorithm *x;
2710 best_cost = alg_in->cost;
2711 x = alg_in, alg_in = best_alg, best_alg = x;
2712 best_alg->log[best_alg->ops] = m;
2713 best_alg->op[best_alg->ops] = alg_add_t2_m;
2714 }
2715 }
2716 if (cache_hit)
2717 goto done;
2718
2719 do_alg_sub_t2_m:
2720 q = t + 1;
2721 q = q & -q;
2722 m = exact_log2 (q);
2723 if (m >= 0 && m < maxm)
2724 {
2725 op_cost = shiftsub_cost[mode][m];
2726 new_limit.cost = best_cost.cost - op_cost;
2727 new_limit.latency = best_cost.latency - op_cost;
2728 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2729
2730 alg_in->cost.cost += op_cost;
2731 alg_in->cost.latency += op_cost;
2732 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2733 {
2734 struct algorithm *x;
2735 best_cost = alg_in->cost;
2736 x = alg_in, alg_in = best_alg, best_alg = x;
2737 best_alg->log[best_alg->ops] = m;
2738 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2739 }
2740 }
2741 if (cache_hit)
2742 goto done;
2743 }
2744
2745 done:
2746 /* If best_cost has not decreased, we have not found any algorithm. */
2747 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2748 return;
2749
2750 /* Cache the result. */
2751 if (!cache_hit)
2752 {
2753 alg_hash[hash_index].t = t;
2754 alg_hash[hash_index].mode = mode;
2755 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2756 }
2757
2758 /* If we are getting a too long sequence for `struct algorithm'
2759 to record, make this search fail. */
2760 if (best_alg->ops == MAX_BITS_PER_WORD)
2761 return;
2762
2763 /* Copy the algorithm from temporary space to the space at alg_out.
2764 We avoid using structure assignment because the majority of
2765 best_alg is normally undefined, and this is a critical function. */
2766 alg_out->ops = best_alg->ops + 1;
2767 alg_out->cost = best_cost;
2768 memcpy (alg_out->op, best_alg->op,
2769 alg_out->ops * sizeof *alg_out->op);
2770 memcpy (alg_out->log, best_alg->log,
2771 alg_out->ops * sizeof *alg_out->log);
2772 }
2773 \f
2774 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2775 Try three variations:
2776
2777 - a shift/add sequence based on VAL itself
2778 - a shift/add sequence based on -VAL, followed by a negation
2779 - a shift/add sequence based on VAL - 1, followed by an addition.
2780
2781 Return true if the cheapest of these cost less than MULT_COST,
2782 describing the algorithm in *ALG and final fixup in *VARIANT. */
2783
2784 static bool
2785 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2786 struct algorithm *alg, enum mult_variant *variant,
2787 int mult_cost)
2788 {
2789 struct algorithm alg2;
2790 struct mult_cost limit;
2791 int op_cost;
2792
2793 *variant = basic_variant;
2794 limit.cost = mult_cost;
2795 limit.latency = mult_cost;
2796 synth_mult (alg, val, &limit, mode);
2797
2798 /* This works only if the inverted value actually fits in an
2799 `unsigned int' */
2800 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2801 {
2802 op_cost = neg_cost[mode];
2803 if (MULT_COST_LESS (&alg->cost, mult_cost))
2804 {
2805 limit.cost = alg->cost.cost - op_cost;
2806 limit.latency = alg->cost.latency - op_cost;
2807 }
2808 else
2809 {
2810 limit.cost = mult_cost - op_cost;
2811 limit.latency = mult_cost - op_cost;
2812 }
2813
2814 synth_mult (&alg2, -val, &limit, mode);
2815 alg2.cost.cost += op_cost;
2816 alg2.cost.latency += op_cost;
2817 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2818 *alg = alg2, *variant = negate_variant;
2819 }
2820
2821 /* This proves very useful for division-by-constant. */
2822 op_cost = add_cost[mode];
2823 if (MULT_COST_LESS (&alg->cost, mult_cost))
2824 {
2825 limit.cost = alg->cost.cost - op_cost;
2826 limit.latency = alg->cost.latency - op_cost;
2827 }
2828 else
2829 {
2830 limit.cost = mult_cost - op_cost;
2831 limit.latency = mult_cost - op_cost;
2832 }
2833
2834 synth_mult (&alg2, val - 1, &limit, mode);
2835 alg2.cost.cost += op_cost;
2836 alg2.cost.latency += op_cost;
2837 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2838 *alg = alg2, *variant = add_variant;
2839
2840 return MULT_COST_LESS (&alg->cost, mult_cost);
2841 }
2842
2843 /* A subroutine of expand_mult, used for constant multiplications.
2844 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2845 convenient. Use the shift/add sequence described by ALG and apply
2846 the final fixup specified by VARIANT. */
2847
2848 static rtx
2849 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2850 rtx target, const struct algorithm *alg,
2851 enum mult_variant variant)
2852 {
2853 HOST_WIDE_INT val_so_far;
2854 rtx insn, accum, tem;
2855 int opno;
2856 enum machine_mode nmode;
2857
2858 /* Avoid referencing memory over and over.
2859 For speed, but also for correctness when mem is volatile. */
2860 if (MEM_P (op0))
2861 op0 = force_reg (mode, op0);
2862
2863 /* ACCUM starts out either as OP0 or as a zero, depending on
2864 the first operation. */
2865
2866 if (alg->op[0] == alg_zero)
2867 {
2868 accum = copy_to_mode_reg (mode, const0_rtx);
2869 val_so_far = 0;
2870 }
2871 else if (alg->op[0] == alg_m)
2872 {
2873 accum = copy_to_mode_reg (mode, op0);
2874 val_so_far = 1;
2875 }
2876 else
2877 gcc_unreachable ();
2878
2879 for (opno = 1; opno < alg->ops; opno++)
2880 {
2881 int log = alg->log[opno];
2882 rtx shift_subtarget = optimize ? 0 : accum;
2883 rtx add_target
2884 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2885 && !optimize)
2886 ? target : 0;
2887 rtx accum_target = optimize ? 0 : accum;
2888
2889 switch (alg->op[opno])
2890 {
2891 case alg_shift:
2892 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2893 build_int_cst (NULL_TREE, log),
2894 NULL_RTX, 0);
2895 val_so_far <<= log;
2896 break;
2897
2898 case alg_add_t_m2:
2899 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2900 build_int_cst (NULL_TREE, log),
2901 NULL_RTX, 0);
2902 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2903 add_target ? add_target : accum_target);
2904 val_so_far += (HOST_WIDE_INT) 1 << log;
2905 break;
2906
2907 case alg_sub_t_m2:
2908 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2909 build_int_cst (NULL_TREE, log),
2910 NULL_RTX, 0);
2911 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2912 add_target ? add_target : accum_target);
2913 val_so_far -= (HOST_WIDE_INT) 1 << log;
2914 break;
2915
2916 case alg_add_t2_m:
2917 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2918 build_int_cst (NULL_TREE, log),
2919 shift_subtarget,
2920 0);
2921 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2922 add_target ? add_target : accum_target);
2923 val_so_far = (val_so_far << log) + 1;
2924 break;
2925
2926 case alg_sub_t2_m:
2927 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2928 build_int_cst (NULL_TREE, log),
2929 shift_subtarget, 0);
2930 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2931 add_target ? add_target : accum_target);
2932 val_so_far = (val_so_far << log) - 1;
2933 break;
2934
2935 case alg_add_factor:
2936 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2937 build_int_cst (NULL_TREE, log),
2938 NULL_RTX, 0);
2939 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2940 add_target ? add_target : accum_target);
2941 val_so_far += val_so_far << log;
2942 break;
2943
2944 case alg_sub_factor:
2945 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2946 build_int_cst (NULL_TREE, log),
2947 NULL_RTX, 0);
2948 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2949 (add_target
2950 ? add_target : (optimize ? 0 : tem)));
2951 val_so_far = (val_so_far << log) - val_so_far;
2952 break;
2953
2954 default:
2955 gcc_unreachable ();
2956 }
2957
2958 /* Write a REG_EQUAL note on the last insn so that we can cse
2959 multiplication sequences. Note that if ACCUM is a SUBREG,
2960 we've set the inner register and must properly indicate
2961 that. */
2962
2963 tem = op0, nmode = mode;
2964 if (GET_CODE (accum) == SUBREG)
2965 {
2966 nmode = GET_MODE (SUBREG_REG (accum));
2967 tem = gen_lowpart (nmode, op0);
2968 }
2969
2970 insn = get_last_insn ();
2971 set_unique_reg_note (insn, REG_EQUAL,
2972 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
2973 }
2974
2975 if (variant == negate_variant)
2976 {
2977 val_so_far = -val_so_far;
2978 accum = expand_unop (mode, neg_optab, accum, target, 0);
2979 }
2980 else if (variant == add_variant)
2981 {
2982 val_so_far = val_so_far + 1;
2983 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2984 }
2985
2986 /* Compare only the bits of val and val_so_far that are significant
2987 in the result mode, to avoid sign-/zero-extension confusion. */
2988 val &= GET_MODE_MASK (mode);
2989 val_so_far &= GET_MODE_MASK (mode);
2990 gcc_assert (val == val_so_far);
2991
2992 return accum;
2993 }
2994
2995 /* Perform a multiplication and return an rtx for the result.
2996 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2997 TARGET is a suggestion for where to store the result (an rtx).
2998
2999 We check specially for a constant integer as OP1.
3000 If you want this check for OP0 as well, then before calling
3001 you should swap the two operands if OP0 would be constant. */
3002
3003 rtx
3004 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3005 int unsignedp)
3006 {
3007 rtx const_op1 = op1;
3008 enum mult_variant variant;
3009 struct algorithm algorithm;
3010
3011 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3012 less than or equal in size to `unsigned int' this doesn't matter.
3013 If the mode is larger than `unsigned int', then synth_mult works only
3014 if the constant value exactly fits in an `unsigned int' without any
3015 truncation. This means that multiplying by negative values does
3016 not work; results are off by 2^32 on a 32 bit machine. */
3017
3018 /* If we are multiplying in DImode, it may still be a win
3019 to try to work with shifts and adds. */
3020 if (GET_CODE (op1) == CONST_DOUBLE
3021 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
3022 && HOST_BITS_PER_INT >= BITS_PER_WORD
3023 && CONST_DOUBLE_HIGH (op1) == 0)
3024 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
3025 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
3026 && GET_CODE (op1) == CONST_INT
3027 && INTVAL (op1) < 0)
3028 const_op1 = 0;
3029
3030 /* We used to test optimize here, on the grounds that it's better to
3031 produce a smaller program when -O is not used.
3032 But this causes such a terrible slowdown sometimes
3033 that it seems better to use synth_mult always. */
3034
3035 if (const_op1 && GET_CODE (const_op1) == CONST_INT
3036 && (unsignedp || !flag_trapv))
3037 {
3038 HOST_WIDE_INT coeff = INTVAL (const_op1);
3039 int mult_cost;
3040
3041 /* Special case powers of two. */
3042 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3043 {
3044 if (coeff == 0)
3045 return const0_rtx;
3046 if (coeff == 1)
3047 return op0;
3048 return expand_shift (LSHIFT_EXPR, mode, op0,
3049 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3050 target, unsignedp);
3051 }
3052
3053 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
3054 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3055 mult_cost))
3056 return expand_mult_const (mode, op0, coeff, target,
3057 &algorithm, variant);
3058 }
3059
3060 if (GET_CODE (op0) == CONST_DOUBLE)
3061 {
3062 rtx temp = op0;
3063 op0 = op1;
3064 op1 = temp;
3065 }
3066
3067 /* Expand x*2.0 as x+x. */
3068 if (GET_CODE (op1) == CONST_DOUBLE
3069 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3070 {
3071 REAL_VALUE_TYPE d;
3072 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3073
3074 if (REAL_VALUES_EQUAL (d, dconst2))
3075 {
3076 op0 = force_reg (GET_MODE (op0), op0);
3077 return expand_binop (mode, add_optab, op0, op0,
3078 target, unsignedp, OPTAB_LIB_WIDEN);
3079 }
3080 }
3081
3082 /* This used to use umul_optab if unsigned, but for non-widening multiply
3083 there is no difference between signed and unsigned. */
3084 op0 = expand_binop (mode,
3085 ! unsignedp
3086 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3087 ? smulv_optab : smul_optab,
3088 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3089 gcc_assert (op0);
3090 return op0;
3091 }
3092 \f
3093 /* Return the smallest n such that 2**n >= X. */
3094
3095 int
3096 ceil_log2 (unsigned HOST_WIDE_INT x)
3097 {
3098 return floor_log2 (x - 1) + 1;
3099 }
3100
3101 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3102 replace division by D, and put the least significant N bits of the result
3103 in *MULTIPLIER_PTR and return the most significant bit.
3104
3105 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3106 needed precision is in PRECISION (should be <= N).
3107
3108 PRECISION should be as small as possible so this function can choose
3109 multiplier more freely.
3110
3111 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3112 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3113
3114 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3115 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3116
3117 static
3118 unsigned HOST_WIDE_INT
3119 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3120 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3121 {
3122 HOST_WIDE_INT mhigh_hi, mlow_hi;
3123 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3124 int lgup, post_shift;
3125 int pow, pow2;
3126 unsigned HOST_WIDE_INT nl, dummy1;
3127 HOST_WIDE_INT nh, dummy2;
3128
3129 /* lgup = ceil(log2(divisor)); */
3130 lgup = ceil_log2 (d);
3131
3132 gcc_assert (lgup <= n);
3133
3134 pow = n + lgup;
3135 pow2 = n + lgup - precision;
3136
3137 /* We could handle this with some effort, but this case is much
3138 better handled directly with a scc insn, so rely on caller using
3139 that. */
3140 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3141
3142 /* mlow = 2^(N + lgup)/d */
3143 if (pow >= HOST_BITS_PER_WIDE_INT)
3144 {
3145 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3146 nl = 0;
3147 }
3148 else
3149 {
3150 nh = 0;
3151 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3152 }
3153 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3154 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3155
3156 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3157 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3158 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3159 else
3160 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3161 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3162 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3163
3164 gcc_assert (!mhigh_hi || nh - d < d);
3165 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3166 /* Assert that mlow < mhigh. */
3167 gcc_assert (mlow_hi < mhigh_hi
3168 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3169
3170 /* If precision == N, then mlow, mhigh exceed 2^N
3171 (but they do not exceed 2^(N+1)). */
3172
3173 /* Reduce to lowest terms. */
3174 for (post_shift = lgup; post_shift > 0; post_shift--)
3175 {
3176 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3177 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3178 if (ml_lo >= mh_lo)
3179 break;
3180
3181 mlow_hi = 0;
3182 mlow_lo = ml_lo;
3183 mhigh_hi = 0;
3184 mhigh_lo = mh_lo;
3185 }
3186
3187 *post_shift_ptr = post_shift;
3188 *lgup_ptr = lgup;
3189 if (n < HOST_BITS_PER_WIDE_INT)
3190 {
3191 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3192 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3193 return mhigh_lo >= mask;
3194 }
3195 else
3196 {
3197 *multiplier_ptr = GEN_INT (mhigh_lo);
3198 return mhigh_hi;
3199 }
3200 }
3201
3202 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3203 congruent to 1 (mod 2**N). */
3204
3205 static unsigned HOST_WIDE_INT
3206 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3207 {
3208 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3209
3210 /* The algorithm notes that the choice y = x satisfies
3211 x*y == 1 mod 2^3, since x is assumed odd.
3212 Each iteration doubles the number of bits of significance in y. */
3213
3214 unsigned HOST_WIDE_INT mask;
3215 unsigned HOST_WIDE_INT y = x;
3216 int nbit = 3;
3217
3218 mask = (n == HOST_BITS_PER_WIDE_INT
3219 ? ~(unsigned HOST_WIDE_INT) 0
3220 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3221
3222 while (nbit < n)
3223 {
3224 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3225 nbit *= 2;
3226 }
3227 return y;
3228 }
3229
3230 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3231 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3232 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3233 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3234 become signed.
3235
3236 The result is put in TARGET if that is convenient.
3237
3238 MODE is the mode of operation. */
3239
3240 rtx
3241 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3242 rtx op1, rtx target, int unsignedp)
3243 {
3244 rtx tem;
3245 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3246
3247 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3248 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3249 NULL_RTX, 0);
3250 tem = expand_and (mode, tem, op1, NULL_RTX);
3251 adj_operand
3252 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3253 adj_operand);
3254
3255 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3256 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3257 NULL_RTX, 0);
3258 tem = expand_and (mode, tem, op0, NULL_RTX);
3259 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3260 target);
3261
3262 return target;
3263 }
3264
3265 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3266
3267 static rtx
3268 extract_high_half (enum machine_mode mode, rtx op)
3269 {
3270 enum machine_mode wider_mode;
3271
3272 if (mode == word_mode)
3273 return gen_highpart (mode, op);
3274
3275 wider_mode = GET_MODE_WIDER_MODE (mode);
3276 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3277 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3278 return convert_modes (mode, wider_mode, op, 0);
3279 }
3280
3281 /* Like expand_mult_highpart, but only consider using a multiplication
3282 optab. OP1 is an rtx for the constant operand. */
3283
3284 static rtx
3285 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3286 rtx target, int unsignedp, int max_cost)
3287 {
3288 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3289 enum machine_mode wider_mode;
3290 optab moptab;
3291 rtx tem;
3292 int size;
3293
3294 wider_mode = GET_MODE_WIDER_MODE (mode);
3295 size = GET_MODE_BITSIZE (mode);
3296
3297 /* Firstly, try using a multiplication insn that only generates the needed
3298 high part of the product, and in the sign flavor of unsignedp. */
3299 if (mul_highpart_cost[mode] < max_cost)
3300 {
3301 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3302 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3303 unsignedp, OPTAB_DIRECT);
3304 if (tem)
3305 return tem;
3306 }
3307
3308 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3309 Need to adjust the result after the multiplication. */
3310 if (size - 1 < BITS_PER_WORD
3311 && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
3312 + 4 * add_cost[mode] < max_cost))
3313 {
3314 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3315 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3316 unsignedp, OPTAB_DIRECT);
3317 if (tem)
3318 /* We used the wrong signedness. Adjust the result. */
3319 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3320 tem, unsignedp);
3321 }
3322
3323 /* Try widening multiplication. */
3324 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3325 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3326 && mul_widen_cost[wider_mode] < max_cost)
3327 {
3328 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3329 unsignedp, OPTAB_WIDEN);
3330 if (tem)
3331 return extract_high_half (mode, tem);
3332 }
3333
3334 /* Try widening the mode and perform a non-widening multiplication. */
3335 if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3336 && size - 1 < BITS_PER_WORD
3337 && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
3338 {
3339 rtx insns, wop0, wop1;
3340
3341 /* We need to widen the operands, for example to ensure the
3342 constant multiplier is correctly sign or zero extended.
3343 Use a sequence to clean-up any instructions emitted by
3344 the conversions if things don't work out. */
3345 start_sequence ();
3346 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3347 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3348 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3349 unsignedp, OPTAB_WIDEN);
3350 insns = get_insns ();
3351 end_sequence ();
3352
3353 if (tem)
3354 {
3355 emit_insn (insns);
3356 return extract_high_half (mode, tem);
3357 }
3358 }
3359
3360 /* Try widening multiplication of opposite signedness, and adjust. */
3361 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3362 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3363 && size - 1 < BITS_PER_WORD
3364 && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
3365 + 4 * add_cost[mode] < max_cost))
3366 {
3367 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3368 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3369 if (tem != 0)
3370 {
3371 tem = extract_high_half (mode, tem);
3372 /* We used the wrong signedness. Adjust the result. */
3373 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3374 target, unsignedp);
3375 }
3376 }
3377
3378 return 0;
3379 }
3380
3381 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3382 putting the high half of the result in TARGET if that is convenient,
3383 and return where the result is. If the operation can not be performed,
3384 0 is returned.
3385
3386 MODE is the mode of operation and result.
3387
3388 UNSIGNEDP nonzero means unsigned multiply.
3389
3390 MAX_COST is the total allowed cost for the expanded RTL. */
3391
3392 static rtx
3393 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3394 rtx target, int unsignedp, int max_cost)
3395 {
3396 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3397 unsigned HOST_WIDE_INT cnst1;
3398 int extra_cost;
3399 bool sign_adjust = false;
3400 enum mult_variant variant;
3401 struct algorithm alg;
3402 rtx tem;
3403
3404 /* We can't support modes wider than HOST_BITS_PER_INT. */
3405 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3406
3407 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3408
3409 /* We can't optimize modes wider than BITS_PER_WORD.
3410 ??? We might be able to perform double-word arithmetic if
3411 mode == word_mode, however all the cost calculations in
3412 synth_mult etc. assume single-word operations. */
3413 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3414 return expand_mult_highpart_optab (mode, op0, op1, target,
3415 unsignedp, max_cost);
3416
3417 extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3418
3419 /* Check whether we try to multiply by a negative constant. */
3420 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3421 {
3422 sign_adjust = true;
3423 extra_cost += add_cost[mode];
3424 }
3425
3426 /* See whether shift/add multiplication is cheap enough. */
3427 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3428 max_cost - extra_cost))
3429 {
3430 /* See whether the specialized multiplication optabs are
3431 cheaper than the shift/add version. */
3432 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3433 alg.cost.cost + extra_cost);
3434 if (tem)
3435 return tem;
3436
3437 tem = convert_to_mode (wider_mode, op0, unsignedp);
3438 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3439 tem = extract_high_half (mode, tem);
3440
3441 /* Adjust result for signedness. */
3442 if (sign_adjust)
3443 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3444
3445 return tem;
3446 }
3447 return expand_mult_highpart_optab (mode, op0, op1, target,
3448 unsignedp, max_cost);
3449 }
3450
3451
3452 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3453
3454 static rtx
3455 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3456 {
3457 unsigned HOST_WIDE_INT masklow, maskhigh;
3458 rtx result, temp, shift, label;
3459 int logd;
3460
3461 logd = floor_log2 (d);
3462 result = gen_reg_rtx (mode);
3463
3464 /* Avoid conditional branches when they're expensive. */
3465 if (BRANCH_COST >= 2
3466 && !optimize_size)
3467 {
3468 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3469 mode, 0, -1);
3470 if (signmask)
3471 {
3472 signmask = force_reg (mode, signmask);
3473 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3474 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3475
3476 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3477 which instruction sequence to use. If logical right shifts
3478 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3479 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3480
3481 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3482 if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing
3483 || rtx_cost (temp, SET) > COSTS_N_INSNS (2))
3484 {
3485 temp = expand_binop (mode, xor_optab, op0, signmask,
3486 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3487 temp = expand_binop (mode, sub_optab, temp, signmask,
3488 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3489 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3490 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3491 temp = expand_binop (mode, xor_optab, temp, signmask,
3492 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3493 temp = expand_binop (mode, sub_optab, temp, signmask,
3494 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3495 }
3496 else
3497 {
3498 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3499 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3500 signmask = force_reg (mode, signmask);
3501
3502 temp = expand_binop (mode, add_optab, op0, signmask,
3503 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3504 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3505 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3506 temp = expand_binop (mode, sub_optab, temp, signmask,
3507 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3508 }
3509 return temp;
3510 }
3511 }
3512
3513 /* Mask contains the mode's signbit and the significant bits of the
3514 modulus. By including the signbit in the operation, many targets
3515 can avoid an explicit compare operation in the following comparison
3516 against zero. */
3517
3518 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3519 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3520 {
3521 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3522 maskhigh = -1;
3523 }
3524 else
3525 maskhigh = (HOST_WIDE_INT) -1
3526 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3527
3528 temp = expand_binop (mode, and_optab, op0,
3529 immed_double_const (masklow, maskhigh, mode),
3530 result, 1, OPTAB_LIB_WIDEN);
3531 if (temp != result)
3532 emit_move_insn (result, temp);
3533
3534 label = gen_label_rtx ();
3535 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3536
3537 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3538 0, OPTAB_LIB_WIDEN);
3539 masklow = (HOST_WIDE_INT) -1 << logd;
3540 maskhigh = -1;
3541 temp = expand_binop (mode, ior_optab, temp,
3542 immed_double_const (masklow, maskhigh, mode),
3543 result, 1, OPTAB_LIB_WIDEN);
3544 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3545 0, OPTAB_LIB_WIDEN);
3546 if (temp != result)
3547 emit_move_insn (result, temp);
3548 emit_label (label);
3549 return result;
3550 }
3551
3552 /* Expand signed division of OP0 by a power of two D in mode MODE.
3553 This routine is only called for positive values of D. */
3554
3555 static rtx
3556 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3557 {
3558 rtx temp, label;
3559 tree shift;
3560 int logd;
3561
3562 logd = floor_log2 (d);
3563 shift = build_int_cst (NULL_TREE, logd);
3564
3565 if (d == 2 && BRANCH_COST >= 1)
3566 {
3567 temp = gen_reg_rtx (mode);
3568 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3569 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3570 0, OPTAB_LIB_WIDEN);
3571 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3572 }
3573
3574 #ifdef HAVE_conditional_move
3575 if (BRANCH_COST >= 2)
3576 {
3577 rtx temp2;
3578
3579 /* ??? emit_conditional_move forces a stack adjustment via
3580 compare_from_rtx so, if the sequence is discarded, it will
3581 be lost. Do it now instead. */
3582 do_pending_stack_adjust ();
3583
3584 start_sequence ();
3585 temp2 = copy_to_mode_reg (mode, op0);
3586 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3587 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3588 temp = force_reg (mode, temp);
3589
3590 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3591 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3592 mode, temp, temp2, mode, 0);
3593 if (temp2)
3594 {
3595 rtx seq = get_insns ();
3596 end_sequence ();
3597 emit_insn (seq);
3598 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3599 }
3600 end_sequence ();
3601 }
3602 #endif
3603
3604 if (BRANCH_COST >= 2)
3605 {
3606 int ushift = GET_MODE_BITSIZE (mode) - logd;
3607
3608 temp = gen_reg_rtx (mode);
3609 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3610 if (shift_cost[mode][ushift] > COSTS_N_INSNS (1))
3611 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3612 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3613 else
3614 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3615 build_int_cst (NULL_TREE, ushift),
3616 NULL_RTX, 1);
3617 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3618 0, OPTAB_LIB_WIDEN);
3619 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3620 }
3621
3622 label = gen_label_rtx ();
3623 temp = copy_to_mode_reg (mode, op0);
3624 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3625 expand_inc (temp, GEN_INT (d - 1));
3626 emit_label (label);
3627 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3628 }
3629 \f
3630 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3631 if that is convenient, and returning where the result is.
3632 You may request either the quotient or the remainder as the result;
3633 specify REM_FLAG nonzero to get the remainder.
3634
3635 CODE is the expression code for which kind of division this is;
3636 it controls how rounding is done. MODE is the machine mode to use.
3637 UNSIGNEDP nonzero means do unsigned division. */
3638
3639 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3640 and then correct it by or'ing in missing high bits
3641 if result of ANDI is nonzero.
3642 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3643 This could optimize to a bfexts instruction.
3644 But C doesn't use these operations, so their optimizations are
3645 left for later. */
3646 /* ??? For modulo, we don't actually need the highpart of the first product,
3647 the low part will do nicely. And for small divisors, the second multiply
3648 can also be a low-part only multiply or even be completely left out.
3649 E.g. to calculate the remainder of a division by 3 with a 32 bit
3650 multiply, multiply with 0x55555556 and extract the upper two bits;
3651 the result is exact for inputs up to 0x1fffffff.
3652 The input range can be reduced by using cross-sum rules.
3653 For odd divisors >= 3, the following table gives right shift counts
3654 so that if a number is shifted by an integer multiple of the given
3655 amount, the remainder stays the same:
3656 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3657 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3658 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3659 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3660 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3661
3662 Cross-sum rules for even numbers can be derived by leaving as many bits
3663 to the right alone as the divisor has zeros to the right.
3664 E.g. if x is an unsigned 32 bit number:
3665 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3666 */
3667
3668 rtx
3669 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3670 rtx op0, rtx op1, rtx target, int unsignedp)
3671 {
3672 enum machine_mode compute_mode;
3673 rtx tquotient;
3674 rtx quotient = 0, remainder = 0;
3675 rtx last;
3676 int size;
3677 rtx insn, set;
3678 optab optab1, optab2;
3679 int op1_is_constant, op1_is_pow2 = 0;
3680 int max_cost, extra_cost;
3681 static HOST_WIDE_INT last_div_const = 0;
3682 static HOST_WIDE_INT ext_op1;
3683
3684 op1_is_constant = GET_CODE (op1) == CONST_INT;
3685 if (op1_is_constant)
3686 {
3687 ext_op1 = INTVAL (op1);
3688 if (unsignedp)
3689 ext_op1 &= GET_MODE_MASK (mode);
3690 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3691 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3692 }
3693
3694 /*
3695 This is the structure of expand_divmod:
3696
3697 First comes code to fix up the operands so we can perform the operations
3698 correctly and efficiently.
3699
3700 Second comes a switch statement with code specific for each rounding mode.
3701 For some special operands this code emits all RTL for the desired
3702 operation, for other cases, it generates only a quotient and stores it in
3703 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3704 to indicate that it has not done anything.
3705
3706 Last comes code that finishes the operation. If QUOTIENT is set and
3707 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3708 QUOTIENT is not set, it is computed using trunc rounding.
3709
3710 We try to generate special code for division and remainder when OP1 is a
3711 constant. If |OP1| = 2**n we can use shifts and some other fast
3712 operations. For other values of OP1, we compute a carefully selected
3713 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3714 by m.
3715
3716 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3717 half of the product. Different strategies for generating the product are
3718 implemented in expand_mult_highpart.
3719
3720 If what we actually want is the remainder, we generate that by another
3721 by-constant multiplication and a subtraction. */
3722
3723 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3724 code below will malfunction if we are, so check here and handle
3725 the special case if so. */
3726 if (op1 == const1_rtx)
3727 return rem_flag ? const0_rtx : op0;
3728
3729 /* When dividing by -1, we could get an overflow.
3730 negv_optab can handle overflows. */
3731 if (! unsignedp && op1 == constm1_rtx)
3732 {
3733 if (rem_flag)
3734 return const0_rtx;
3735 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3736 ? negv_optab : neg_optab, op0, target, 0);
3737 }
3738
3739 if (target
3740 /* Don't use the function value register as a target
3741 since we have to read it as well as write it,
3742 and function-inlining gets confused by this. */
3743 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3744 /* Don't clobber an operand while doing a multi-step calculation. */
3745 || ((rem_flag || op1_is_constant)
3746 && (reg_mentioned_p (target, op0)
3747 || (MEM_P (op0) && MEM_P (target))))
3748 || reg_mentioned_p (target, op1)
3749 || (MEM_P (op1) && MEM_P (target))))
3750 target = 0;
3751
3752 /* Get the mode in which to perform this computation. Normally it will
3753 be MODE, but sometimes we can't do the desired operation in MODE.
3754 If so, pick a wider mode in which we can do the operation. Convert
3755 to that mode at the start to avoid repeated conversions.
3756
3757 First see what operations we need. These depend on the expression
3758 we are evaluating. (We assume that divxx3 insns exist under the
3759 same conditions that modxx3 insns and that these insns don't normally
3760 fail. If these assumptions are not correct, we may generate less
3761 efficient code in some cases.)
3762
3763 Then see if we find a mode in which we can open-code that operation
3764 (either a division, modulus, or shift). Finally, check for the smallest
3765 mode for which we can do the operation with a library call. */
3766
3767 /* We might want to refine this now that we have division-by-constant
3768 optimization. Since expand_mult_highpart tries so many variants, it is
3769 not straightforward to generalize this. Maybe we should make an array
3770 of possible modes in init_expmed? Save this for GCC 2.7. */
3771
3772 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3773 ? (unsignedp ? lshr_optab : ashr_optab)
3774 : (unsignedp ? udiv_optab : sdiv_optab));
3775 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3776 ? optab1
3777 : (unsignedp ? udivmod_optab : sdivmod_optab));
3778
3779 for (compute_mode = mode; compute_mode != VOIDmode;
3780 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3781 if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3782 || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3783 break;
3784
3785 if (compute_mode == VOIDmode)
3786 for (compute_mode = mode; compute_mode != VOIDmode;
3787 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3788 if (optab1->handlers[compute_mode].libfunc
3789 || optab2->handlers[compute_mode].libfunc)
3790 break;
3791
3792 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3793 in expand_binop. */
3794 if (compute_mode == VOIDmode)
3795 compute_mode = mode;
3796
3797 if (target && GET_MODE (target) == compute_mode)
3798 tquotient = target;
3799 else
3800 tquotient = gen_reg_rtx (compute_mode);
3801
3802 size = GET_MODE_BITSIZE (compute_mode);
3803 #if 0
3804 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3805 (mode), and thereby get better code when OP1 is a constant. Do that
3806 later. It will require going over all usages of SIZE below. */
3807 size = GET_MODE_BITSIZE (mode);
3808 #endif
3809
3810 /* Only deduct something for a REM if the last divide done was
3811 for a different constant. Then set the constant of the last
3812 divide. */
3813 max_cost = div_cost[compute_mode]
3814 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3815 && INTVAL (op1) == last_div_const)
3816 ? mul_cost[compute_mode] + add_cost[compute_mode]
3817 : 0);
3818
3819 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3820
3821 /* Now convert to the best mode to use. */
3822 if (compute_mode != mode)
3823 {
3824 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3825 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3826
3827 /* convert_modes may have placed op1 into a register, so we
3828 must recompute the following. */
3829 op1_is_constant = GET_CODE (op1) == CONST_INT;
3830 op1_is_pow2 = (op1_is_constant
3831 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3832 || (! unsignedp
3833 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3834 }
3835
3836 /* If one of the operands is a volatile MEM, copy it into a register. */
3837
3838 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3839 op0 = force_reg (compute_mode, op0);
3840 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3841 op1 = force_reg (compute_mode, op1);
3842
3843 /* If we need the remainder or if OP1 is constant, we need to
3844 put OP0 in a register in case it has any queued subexpressions. */
3845 if (rem_flag || op1_is_constant)
3846 op0 = force_reg (compute_mode, op0);
3847
3848 last = get_last_insn ();
3849
3850 /* Promote floor rounding to trunc rounding for unsigned operations. */
3851 if (unsignedp)
3852 {
3853 if (code == FLOOR_DIV_EXPR)
3854 code = TRUNC_DIV_EXPR;
3855 if (code == FLOOR_MOD_EXPR)
3856 code = TRUNC_MOD_EXPR;
3857 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3858 code = TRUNC_DIV_EXPR;
3859 }
3860
3861 if (op1 != const0_rtx)
3862 switch (code)
3863 {
3864 case TRUNC_MOD_EXPR:
3865 case TRUNC_DIV_EXPR:
3866 if (op1_is_constant)
3867 {
3868 if (unsignedp)
3869 {
3870 unsigned HOST_WIDE_INT mh;
3871 int pre_shift, post_shift;
3872 int dummy;
3873 rtx ml;
3874 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3875 & GET_MODE_MASK (compute_mode));
3876
3877 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3878 {
3879 pre_shift = floor_log2 (d);
3880 if (rem_flag)
3881 {
3882 remainder
3883 = expand_binop (compute_mode, and_optab, op0,
3884 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3885 remainder, 1,
3886 OPTAB_LIB_WIDEN);
3887 if (remainder)
3888 return gen_lowpart (mode, remainder);
3889 }
3890 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3891 build_int_cst (NULL_TREE,
3892 pre_shift),
3893 tquotient, 1);
3894 }
3895 else if (size <= HOST_BITS_PER_WIDE_INT)
3896 {
3897 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3898 {
3899 /* Most significant bit of divisor is set; emit an scc
3900 insn. */
3901 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3902 compute_mode, 1, 1);
3903 if (quotient == 0)
3904 goto fail1;
3905 }
3906 else
3907 {
3908 /* Find a suitable multiplier and right shift count
3909 instead of multiplying with D. */
3910
3911 mh = choose_multiplier (d, size, size,
3912 &ml, &post_shift, &dummy);
3913
3914 /* If the suggested multiplier is more than SIZE bits,
3915 we can do better for even divisors, using an
3916 initial right shift. */
3917 if (mh != 0 && (d & 1) == 0)
3918 {
3919 pre_shift = floor_log2 (d & -d);
3920 mh = choose_multiplier (d >> pre_shift, size,
3921 size - pre_shift,
3922 &ml, &post_shift, &dummy);
3923 gcc_assert (!mh);
3924 }
3925 else
3926 pre_shift = 0;
3927
3928 if (mh != 0)
3929 {
3930 rtx t1, t2, t3, t4;
3931
3932 if (post_shift - 1 >= BITS_PER_WORD)
3933 goto fail1;
3934
3935 extra_cost
3936 = (shift_cost[compute_mode][post_shift - 1]
3937 + shift_cost[compute_mode][1]
3938 + 2 * add_cost[compute_mode]);
3939 t1 = expand_mult_highpart (compute_mode, op0, ml,
3940 NULL_RTX, 1,
3941 max_cost - extra_cost);
3942 if (t1 == 0)
3943 goto fail1;
3944 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3945 op0, t1),
3946 NULL_RTX);
3947 t3 = expand_shift
3948 (RSHIFT_EXPR, compute_mode, t2,
3949 build_int_cst (NULL_TREE, 1),
3950 NULL_RTX,1);
3951 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3952 t1, t3),
3953 NULL_RTX);
3954 quotient = expand_shift
3955 (RSHIFT_EXPR, compute_mode, t4,
3956 build_int_cst (NULL_TREE, post_shift - 1),
3957 tquotient, 1);
3958 }
3959 else
3960 {
3961 rtx t1, t2;
3962
3963 if (pre_shift >= BITS_PER_WORD
3964 || post_shift >= BITS_PER_WORD)
3965 goto fail1;
3966
3967 t1 = expand_shift
3968 (RSHIFT_EXPR, compute_mode, op0,
3969 build_int_cst (NULL_TREE, pre_shift),
3970 NULL_RTX, 1);
3971 extra_cost
3972 = (shift_cost[compute_mode][pre_shift]
3973 + shift_cost[compute_mode][post_shift]);
3974 t2 = expand_mult_highpart (compute_mode, t1, ml,
3975 NULL_RTX, 1,
3976 max_cost - extra_cost);
3977 if (t2 == 0)
3978 goto fail1;
3979 quotient = expand_shift
3980 (RSHIFT_EXPR, compute_mode, t2,
3981 build_int_cst (NULL_TREE, post_shift),
3982 tquotient, 1);
3983 }
3984 }
3985 }
3986 else /* Too wide mode to use tricky code */
3987 break;
3988
3989 insn = get_last_insn ();
3990 if (insn != last
3991 && (set = single_set (insn)) != 0
3992 && SET_DEST (set) == quotient)
3993 set_unique_reg_note (insn,
3994 REG_EQUAL,
3995 gen_rtx_UDIV (compute_mode, op0, op1));
3996 }
3997 else /* TRUNC_DIV, signed */
3998 {
3999 unsigned HOST_WIDE_INT ml;
4000 int lgup, post_shift;
4001 rtx mlr;
4002 HOST_WIDE_INT d = INTVAL (op1);
4003 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
4004
4005 /* n rem d = n rem -d */
4006 if (rem_flag && d < 0)
4007 {
4008 d = abs_d;
4009 op1 = gen_int_mode (abs_d, compute_mode);
4010 }
4011
4012 if (d == 1)
4013 quotient = op0;
4014 else if (d == -1)
4015 quotient = expand_unop (compute_mode, neg_optab, op0,
4016 tquotient, 0);
4017 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4018 {
4019 /* This case is not handled correctly below. */
4020 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4021 compute_mode, 1, 1);
4022 if (quotient == 0)
4023 goto fail1;
4024 }
4025 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4026 && (rem_flag ? smod_pow2_cheap[compute_mode]
4027 : sdiv_pow2_cheap[compute_mode])
4028 /* We assume that cheap metric is true if the
4029 optab has an expander for this mode. */
4030 && (((rem_flag ? smod_optab : sdiv_optab)
4031 ->handlers[compute_mode].insn_code
4032 != CODE_FOR_nothing)
4033 || (sdivmod_optab->handlers[compute_mode]
4034 .insn_code != CODE_FOR_nothing)))
4035 ;
4036 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4037 {
4038 if (rem_flag)
4039 {
4040 remainder = expand_smod_pow2 (compute_mode, op0, d);
4041 if (remainder)
4042 return gen_lowpart (mode, remainder);
4043 }
4044
4045 if (sdiv_pow2_cheap[compute_mode]
4046 && ((sdiv_optab->handlers[compute_mode].insn_code
4047 != CODE_FOR_nothing)
4048 || (sdivmod_optab->handlers[compute_mode].insn_code
4049 != CODE_FOR_nothing)))
4050 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4051 compute_mode, op0,
4052 gen_int_mode (abs_d,
4053 compute_mode),
4054 NULL_RTX, 0);
4055 else
4056 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4057
4058 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4059 negate the quotient. */
4060 if (d < 0)
4061 {
4062 insn = get_last_insn ();
4063 if (insn != last
4064 && (set = single_set (insn)) != 0
4065 && SET_DEST (set) == quotient
4066 && abs_d < ((unsigned HOST_WIDE_INT) 1
4067 << (HOST_BITS_PER_WIDE_INT - 1)))
4068 set_unique_reg_note (insn,
4069 REG_EQUAL,
4070 gen_rtx_DIV (compute_mode,
4071 op0,
4072 GEN_INT
4073 (trunc_int_for_mode
4074 (abs_d,
4075 compute_mode))));
4076
4077 quotient = expand_unop (compute_mode, neg_optab,
4078 quotient, quotient, 0);
4079 }
4080 }
4081 else if (size <= HOST_BITS_PER_WIDE_INT)
4082 {
4083 choose_multiplier (abs_d, size, size - 1,
4084 &mlr, &post_shift, &lgup);
4085 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4086 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4087 {
4088 rtx t1, t2, t3;
4089
4090 if (post_shift >= BITS_PER_WORD
4091 || size - 1 >= BITS_PER_WORD)
4092 goto fail1;
4093
4094 extra_cost = (shift_cost[compute_mode][post_shift]
4095 + shift_cost[compute_mode][size - 1]
4096 + add_cost[compute_mode]);
4097 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4098 NULL_RTX, 0,
4099 max_cost - extra_cost);
4100 if (t1 == 0)
4101 goto fail1;
4102 t2 = expand_shift
4103 (RSHIFT_EXPR, compute_mode, t1,
4104 build_int_cst (NULL_TREE, post_shift),
4105 NULL_RTX, 0);
4106 t3 = expand_shift
4107 (RSHIFT_EXPR, compute_mode, op0,
4108 build_int_cst (NULL_TREE, size - 1),
4109 NULL_RTX, 0);
4110 if (d < 0)
4111 quotient
4112 = force_operand (gen_rtx_MINUS (compute_mode,
4113 t3, t2),
4114 tquotient);
4115 else
4116 quotient
4117 = force_operand (gen_rtx_MINUS (compute_mode,
4118 t2, t3),
4119 tquotient);
4120 }
4121 else
4122 {
4123 rtx t1, t2, t3, t4;
4124
4125 if (post_shift >= BITS_PER_WORD
4126 || size - 1 >= BITS_PER_WORD)
4127 goto fail1;
4128
4129 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4130 mlr = gen_int_mode (ml, compute_mode);
4131 extra_cost = (shift_cost[compute_mode][post_shift]
4132 + shift_cost[compute_mode][size - 1]
4133 + 2 * add_cost[compute_mode]);
4134 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4135 NULL_RTX, 0,
4136 max_cost - extra_cost);
4137 if (t1 == 0)
4138 goto fail1;
4139 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4140 t1, op0),
4141 NULL_RTX);
4142 t3 = expand_shift
4143 (RSHIFT_EXPR, compute_mode, t2,
4144 build_int_cst (NULL_TREE, post_shift),
4145 NULL_RTX, 0);
4146 t4 = expand_shift
4147 (RSHIFT_EXPR, compute_mode, op0,
4148 build_int_cst (NULL_TREE, size - 1),
4149 NULL_RTX, 0);
4150 if (d < 0)
4151 quotient
4152 = force_operand (gen_rtx_MINUS (compute_mode,
4153 t4, t3),
4154 tquotient);
4155 else
4156 quotient
4157 = force_operand (gen_rtx_MINUS (compute_mode,
4158 t3, t4),
4159 tquotient);
4160 }
4161 }
4162 else /* Too wide mode to use tricky code */
4163 break;
4164
4165 insn = get_last_insn ();
4166 if (insn != last
4167 && (set = single_set (insn)) != 0
4168 && SET_DEST (set) == quotient)
4169 set_unique_reg_note (insn,
4170 REG_EQUAL,
4171 gen_rtx_DIV (compute_mode, op0, op1));
4172 }
4173 break;
4174 }
4175 fail1:
4176 delete_insns_since (last);
4177 break;
4178
4179 case FLOOR_DIV_EXPR:
4180 case FLOOR_MOD_EXPR:
4181 /* We will come here only for signed operations. */
4182 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4183 {
4184 unsigned HOST_WIDE_INT mh;
4185 int pre_shift, lgup, post_shift;
4186 HOST_WIDE_INT d = INTVAL (op1);
4187 rtx ml;
4188
4189 if (d > 0)
4190 {
4191 /* We could just as easily deal with negative constants here,
4192 but it does not seem worth the trouble for GCC 2.6. */
4193 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4194 {
4195 pre_shift = floor_log2 (d);
4196 if (rem_flag)
4197 {
4198 remainder = expand_binop (compute_mode, and_optab, op0,
4199 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4200 remainder, 0, OPTAB_LIB_WIDEN);
4201 if (remainder)
4202 return gen_lowpart (mode, remainder);
4203 }
4204 quotient = expand_shift
4205 (RSHIFT_EXPR, compute_mode, op0,
4206 build_int_cst (NULL_TREE, pre_shift),
4207 tquotient, 0);
4208 }
4209 else
4210 {
4211 rtx t1, t2, t3, t4;
4212
4213 mh = choose_multiplier (d, size, size - 1,
4214 &ml, &post_shift, &lgup);
4215 gcc_assert (!mh);
4216
4217 if (post_shift < BITS_PER_WORD
4218 && size - 1 < BITS_PER_WORD)
4219 {
4220 t1 = expand_shift
4221 (RSHIFT_EXPR, compute_mode, op0,
4222 build_int_cst (NULL_TREE, size - 1),
4223 NULL_RTX, 0);
4224 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4225 NULL_RTX, 0, OPTAB_WIDEN);
4226 extra_cost = (shift_cost[compute_mode][post_shift]
4227 + shift_cost[compute_mode][size - 1]
4228 + 2 * add_cost[compute_mode]);
4229 t3 = expand_mult_highpart (compute_mode, t2, ml,
4230 NULL_RTX, 1,
4231 max_cost - extra_cost);
4232 if (t3 != 0)
4233 {
4234 t4 = expand_shift
4235 (RSHIFT_EXPR, compute_mode, t3,
4236 build_int_cst (NULL_TREE, post_shift),
4237 NULL_RTX, 1);
4238 quotient = expand_binop (compute_mode, xor_optab,
4239 t4, t1, tquotient, 0,
4240 OPTAB_WIDEN);
4241 }
4242 }
4243 }
4244 }
4245 else
4246 {
4247 rtx nsign, t1, t2, t3, t4;
4248 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4249 op0, constm1_rtx), NULL_RTX);
4250 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4251 0, OPTAB_WIDEN);
4252 nsign = expand_shift
4253 (RSHIFT_EXPR, compute_mode, t2,
4254 build_int_cst (NULL_TREE, size - 1),
4255 NULL_RTX, 0);
4256 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4257 NULL_RTX);
4258 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4259 NULL_RTX, 0);
4260 if (t4)
4261 {
4262 rtx t5;
4263 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4264 NULL_RTX, 0);
4265 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4266 t4, t5),
4267 tquotient);
4268 }
4269 }
4270 }
4271
4272 if (quotient != 0)
4273 break;
4274 delete_insns_since (last);
4275
4276 /* Try using an instruction that produces both the quotient and
4277 remainder, using truncation. We can easily compensate the quotient
4278 or remainder to get floor rounding, once we have the remainder.
4279 Notice that we compute also the final remainder value here,
4280 and return the result right away. */
4281 if (target == 0 || GET_MODE (target) != compute_mode)
4282 target = gen_reg_rtx (compute_mode);
4283
4284 if (rem_flag)
4285 {
4286 remainder
4287 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4288 quotient = gen_reg_rtx (compute_mode);
4289 }
4290 else
4291 {
4292 quotient
4293 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4294 remainder = gen_reg_rtx (compute_mode);
4295 }
4296
4297 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4298 quotient, remainder, 0))
4299 {
4300 /* This could be computed with a branch-less sequence.
4301 Save that for later. */
4302 rtx tem;
4303 rtx label = gen_label_rtx ();
4304 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4305 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4306 NULL_RTX, 0, OPTAB_WIDEN);
4307 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4308 expand_dec (quotient, const1_rtx);
4309 expand_inc (remainder, op1);
4310 emit_label (label);
4311 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4312 }
4313
4314 /* No luck with division elimination or divmod. Have to do it
4315 by conditionally adjusting op0 *and* the result. */
4316 {
4317 rtx label1, label2, label3, label4, label5;
4318 rtx adjusted_op0;
4319 rtx tem;
4320
4321 quotient = gen_reg_rtx (compute_mode);
4322 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4323 label1 = gen_label_rtx ();
4324 label2 = gen_label_rtx ();
4325 label3 = gen_label_rtx ();
4326 label4 = gen_label_rtx ();
4327 label5 = gen_label_rtx ();
4328 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4329 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4330 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4331 quotient, 0, OPTAB_LIB_WIDEN);
4332 if (tem != quotient)
4333 emit_move_insn (quotient, tem);
4334 emit_jump_insn (gen_jump (label5));
4335 emit_barrier ();
4336 emit_label (label1);
4337 expand_inc (adjusted_op0, const1_rtx);
4338 emit_jump_insn (gen_jump (label4));
4339 emit_barrier ();
4340 emit_label (label2);
4341 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4342 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4343 quotient, 0, OPTAB_LIB_WIDEN);
4344 if (tem != quotient)
4345 emit_move_insn (quotient, tem);
4346 emit_jump_insn (gen_jump (label5));
4347 emit_barrier ();
4348 emit_label (label3);
4349 expand_dec (adjusted_op0, const1_rtx);
4350 emit_label (label4);
4351 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4352 quotient, 0, OPTAB_LIB_WIDEN);
4353 if (tem != quotient)
4354 emit_move_insn (quotient, tem);
4355 expand_dec (quotient, const1_rtx);
4356 emit_label (label5);
4357 }
4358 break;
4359
4360 case CEIL_DIV_EXPR:
4361 case CEIL_MOD_EXPR:
4362 if (unsignedp)
4363 {
4364 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4365 {
4366 rtx t1, t2, t3;
4367 unsigned HOST_WIDE_INT d = INTVAL (op1);
4368 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4369 build_int_cst (NULL_TREE, floor_log2 (d)),
4370 tquotient, 1);
4371 t2 = expand_binop (compute_mode, and_optab, op0,
4372 GEN_INT (d - 1),
4373 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4374 t3 = gen_reg_rtx (compute_mode);
4375 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4376 compute_mode, 1, 1);
4377 if (t3 == 0)
4378 {
4379 rtx lab;
4380 lab = gen_label_rtx ();
4381 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4382 expand_inc (t1, const1_rtx);
4383 emit_label (lab);
4384 quotient = t1;
4385 }
4386 else
4387 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4388 t1, t3),
4389 tquotient);
4390 break;
4391 }
4392
4393 /* Try using an instruction that produces both the quotient and
4394 remainder, using truncation. We can easily compensate the
4395 quotient or remainder to get ceiling rounding, once we have the
4396 remainder. Notice that we compute also the final remainder
4397 value here, and return the result right away. */
4398 if (target == 0 || GET_MODE (target) != compute_mode)
4399 target = gen_reg_rtx (compute_mode);
4400
4401 if (rem_flag)
4402 {
4403 remainder = (REG_P (target)
4404 ? target : gen_reg_rtx (compute_mode));
4405 quotient = gen_reg_rtx (compute_mode);
4406 }
4407 else
4408 {
4409 quotient = (REG_P (target)
4410 ? target : gen_reg_rtx (compute_mode));
4411 remainder = gen_reg_rtx (compute_mode);
4412 }
4413
4414 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4415 remainder, 1))
4416 {
4417 /* This could be computed with a branch-less sequence.
4418 Save that for later. */
4419 rtx label = gen_label_rtx ();
4420 do_cmp_and_jump (remainder, const0_rtx, EQ,
4421 compute_mode, label);
4422 expand_inc (quotient, const1_rtx);
4423 expand_dec (remainder, op1);
4424 emit_label (label);
4425 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4426 }
4427
4428 /* No luck with division elimination or divmod. Have to do it
4429 by conditionally adjusting op0 *and* the result. */
4430 {
4431 rtx label1, label2;
4432 rtx adjusted_op0, tem;
4433
4434 quotient = gen_reg_rtx (compute_mode);
4435 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4436 label1 = gen_label_rtx ();
4437 label2 = gen_label_rtx ();
4438 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4439 compute_mode, label1);
4440 emit_move_insn (quotient, const0_rtx);
4441 emit_jump_insn (gen_jump (label2));
4442 emit_barrier ();
4443 emit_label (label1);
4444 expand_dec (adjusted_op0, const1_rtx);
4445 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4446 quotient, 1, OPTAB_LIB_WIDEN);
4447 if (tem != quotient)
4448 emit_move_insn (quotient, tem);
4449 expand_inc (quotient, const1_rtx);
4450 emit_label (label2);
4451 }
4452 }
4453 else /* signed */
4454 {
4455 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4456 && INTVAL (op1) >= 0)
4457 {
4458 /* This is extremely similar to the code for the unsigned case
4459 above. For 2.7 we should merge these variants, but for
4460 2.6.1 I don't want to touch the code for unsigned since that
4461 get used in C. The signed case will only be used by other
4462 languages (Ada). */
4463
4464 rtx t1, t2, t3;
4465 unsigned HOST_WIDE_INT d = INTVAL (op1);
4466 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4467 build_int_cst (NULL_TREE, floor_log2 (d)),
4468 tquotient, 0);
4469 t2 = expand_binop (compute_mode, and_optab, op0,
4470 GEN_INT (d - 1),
4471 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4472 t3 = gen_reg_rtx (compute_mode);
4473 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4474 compute_mode, 1, 1);
4475 if (t3 == 0)
4476 {
4477 rtx lab;
4478 lab = gen_label_rtx ();
4479 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4480 expand_inc (t1, const1_rtx);
4481 emit_label (lab);
4482 quotient = t1;
4483 }
4484 else
4485 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4486 t1, t3),
4487 tquotient);
4488 break;
4489 }
4490
4491 /* Try using an instruction that produces both the quotient and
4492 remainder, using truncation. We can easily compensate the
4493 quotient or remainder to get ceiling rounding, once we have the
4494 remainder. Notice that we compute also the final remainder
4495 value here, and return the result right away. */
4496 if (target == 0 || GET_MODE (target) != compute_mode)
4497 target = gen_reg_rtx (compute_mode);
4498 if (rem_flag)
4499 {
4500 remainder= (REG_P (target)
4501 ? target : gen_reg_rtx (compute_mode));
4502 quotient = gen_reg_rtx (compute_mode);
4503 }
4504 else
4505 {
4506 quotient = (REG_P (target)
4507 ? target : gen_reg_rtx (compute_mode));
4508 remainder = gen_reg_rtx (compute_mode);
4509 }
4510
4511 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4512 remainder, 0))
4513 {
4514 /* This could be computed with a branch-less sequence.
4515 Save that for later. */
4516 rtx tem;
4517 rtx label = gen_label_rtx ();
4518 do_cmp_and_jump (remainder, const0_rtx, EQ,
4519 compute_mode, label);
4520 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4521 NULL_RTX, 0, OPTAB_WIDEN);
4522 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4523 expand_inc (quotient, const1_rtx);
4524 expand_dec (remainder, op1);
4525 emit_label (label);
4526 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4527 }
4528
4529 /* No luck with division elimination or divmod. Have to do it
4530 by conditionally adjusting op0 *and* the result. */
4531 {
4532 rtx label1, label2, label3, label4, label5;
4533 rtx adjusted_op0;
4534 rtx tem;
4535
4536 quotient = gen_reg_rtx (compute_mode);
4537 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4538 label1 = gen_label_rtx ();
4539 label2 = gen_label_rtx ();
4540 label3 = gen_label_rtx ();
4541 label4 = gen_label_rtx ();
4542 label5 = gen_label_rtx ();
4543 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4544 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4545 compute_mode, label1);
4546 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4547 quotient, 0, OPTAB_LIB_WIDEN);
4548 if (tem != quotient)
4549 emit_move_insn (quotient, tem);
4550 emit_jump_insn (gen_jump (label5));
4551 emit_barrier ();
4552 emit_label (label1);
4553 expand_dec (adjusted_op0, const1_rtx);
4554 emit_jump_insn (gen_jump (label4));
4555 emit_barrier ();
4556 emit_label (label2);
4557 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4558 compute_mode, label3);
4559 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4560 quotient, 0, OPTAB_LIB_WIDEN);
4561 if (tem != quotient)
4562 emit_move_insn (quotient, tem);
4563 emit_jump_insn (gen_jump (label5));
4564 emit_barrier ();
4565 emit_label (label3);
4566 expand_inc (adjusted_op0, const1_rtx);
4567 emit_label (label4);
4568 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4569 quotient, 0, OPTAB_LIB_WIDEN);
4570 if (tem != quotient)
4571 emit_move_insn (quotient, tem);
4572 expand_inc (quotient, const1_rtx);
4573 emit_label (label5);
4574 }
4575 }
4576 break;
4577
4578 case EXACT_DIV_EXPR:
4579 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4580 {
4581 HOST_WIDE_INT d = INTVAL (op1);
4582 unsigned HOST_WIDE_INT ml;
4583 int pre_shift;
4584 rtx t1;
4585
4586 pre_shift = floor_log2 (d & -d);
4587 ml = invert_mod2n (d >> pre_shift, size);
4588 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4589 build_int_cst (NULL_TREE, pre_shift),
4590 NULL_RTX, unsignedp);
4591 quotient = expand_mult (compute_mode, t1,
4592 gen_int_mode (ml, compute_mode),
4593 NULL_RTX, 1);
4594
4595 insn = get_last_insn ();
4596 set_unique_reg_note (insn,
4597 REG_EQUAL,
4598 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4599 compute_mode,
4600 op0, op1));
4601 }
4602 break;
4603
4604 case ROUND_DIV_EXPR:
4605 case ROUND_MOD_EXPR:
4606 if (unsignedp)
4607 {
4608 rtx tem;
4609 rtx label;
4610 label = gen_label_rtx ();
4611 quotient = gen_reg_rtx (compute_mode);
4612 remainder = gen_reg_rtx (compute_mode);
4613 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4614 {
4615 rtx tem;
4616 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4617 quotient, 1, OPTAB_LIB_WIDEN);
4618 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4619 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4620 remainder, 1, OPTAB_LIB_WIDEN);
4621 }
4622 tem = plus_constant (op1, -1);
4623 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4624 build_int_cst (NULL_TREE, 1),
4625 NULL_RTX, 1);
4626 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4627 expand_inc (quotient, const1_rtx);
4628 expand_dec (remainder, op1);
4629 emit_label (label);
4630 }
4631 else
4632 {
4633 rtx abs_rem, abs_op1, tem, mask;
4634 rtx label;
4635 label = gen_label_rtx ();
4636 quotient = gen_reg_rtx (compute_mode);
4637 remainder = gen_reg_rtx (compute_mode);
4638 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4639 {
4640 rtx tem;
4641 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4642 quotient, 0, OPTAB_LIB_WIDEN);
4643 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4644 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4645 remainder, 0, OPTAB_LIB_WIDEN);
4646 }
4647 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4648 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4649 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4650 build_int_cst (NULL_TREE, 1),
4651 NULL_RTX, 1);
4652 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4653 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4654 NULL_RTX, 0, OPTAB_WIDEN);
4655 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4656 build_int_cst (NULL_TREE, size - 1),
4657 NULL_RTX, 0);
4658 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4659 NULL_RTX, 0, OPTAB_WIDEN);
4660 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4661 NULL_RTX, 0, OPTAB_WIDEN);
4662 expand_inc (quotient, tem);
4663 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4664 NULL_RTX, 0, OPTAB_WIDEN);
4665 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4666 NULL_RTX, 0, OPTAB_WIDEN);
4667 expand_dec (remainder, tem);
4668 emit_label (label);
4669 }
4670 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4671
4672 default:
4673 gcc_unreachable ();
4674 }
4675
4676 if (quotient == 0)
4677 {
4678 if (target && GET_MODE (target) != compute_mode)
4679 target = 0;
4680
4681 if (rem_flag)
4682 {
4683 /* Try to produce the remainder without producing the quotient.
4684 If we seem to have a divmod pattern that does not require widening,
4685 don't try widening here. We should really have a WIDEN argument
4686 to expand_twoval_binop, since what we'd really like to do here is
4687 1) try a mod insn in compute_mode
4688 2) try a divmod insn in compute_mode
4689 3) try a div insn in compute_mode and multiply-subtract to get
4690 remainder
4691 4) try the same things with widening allowed. */
4692 remainder
4693 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4694 op0, op1, target,
4695 unsignedp,
4696 ((optab2->handlers[compute_mode].insn_code
4697 != CODE_FOR_nothing)
4698 ? OPTAB_DIRECT : OPTAB_WIDEN));
4699 if (remainder == 0)
4700 {
4701 /* No luck there. Can we do remainder and divide at once
4702 without a library call? */
4703 remainder = gen_reg_rtx (compute_mode);
4704 if (! expand_twoval_binop ((unsignedp
4705 ? udivmod_optab
4706 : sdivmod_optab),
4707 op0, op1,
4708 NULL_RTX, remainder, unsignedp))
4709 remainder = 0;
4710 }
4711
4712 if (remainder)
4713 return gen_lowpart (mode, remainder);
4714 }
4715
4716 /* Produce the quotient. Try a quotient insn, but not a library call.
4717 If we have a divmod in this mode, use it in preference to widening
4718 the div (for this test we assume it will not fail). Note that optab2
4719 is set to the one of the two optabs that the call below will use. */
4720 quotient
4721 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4722 op0, op1, rem_flag ? NULL_RTX : target,
4723 unsignedp,
4724 ((optab2->handlers[compute_mode].insn_code
4725 != CODE_FOR_nothing)
4726 ? OPTAB_DIRECT : OPTAB_WIDEN));
4727
4728 if (quotient == 0)
4729 {
4730 /* No luck there. Try a quotient-and-remainder insn,
4731 keeping the quotient alone. */
4732 quotient = gen_reg_rtx (compute_mode);
4733 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4734 op0, op1,
4735 quotient, NULL_RTX, unsignedp))
4736 {
4737 quotient = 0;
4738 if (! rem_flag)
4739 /* Still no luck. If we are not computing the remainder,
4740 use a library call for the quotient. */
4741 quotient = sign_expand_binop (compute_mode,
4742 udiv_optab, sdiv_optab,
4743 op0, op1, target,
4744 unsignedp, OPTAB_LIB_WIDEN);
4745 }
4746 }
4747 }
4748
4749 if (rem_flag)
4750 {
4751 if (target && GET_MODE (target) != compute_mode)
4752 target = 0;
4753
4754 if (quotient == 0)
4755 {
4756 /* No divide instruction either. Use library for remainder. */
4757 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4758 op0, op1, target,
4759 unsignedp, OPTAB_LIB_WIDEN);
4760 /* No remainder function. Try a quotient-and-remainder
4761 function, keeping the remainder. */
4762 if (!remainder)
4763 {
4764 remainder = gen_reg_rtx (compute_mode);
4765 if (!expand_twoval_binop_libfunc
4766 (unsignedp ? udivmod_optab : sdivmod_optab,
4767 op0, op1,
4768 NULL_RTX, remainder,
4769 unsignedp ? UMOD : MOD))
4770 remainder = NULL_RTX;
4771 }
4772 }
4773 else
4774 {
4775 /* We divided. Now finish doing X - Y * (X / Y). */
4776 remainder = expand_mult (compute_mode, quotient, op1,
4777 NULL_RTX, unsignedp);
4778 remainder = expand_binop (compute_mode, sub_optab, op0,
4779 remainder, target, unsignedp,
4780 OPTAB_LIB_WIDEN);
4781 }
4782 }
4783
4784 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4785 }
4786 \f
4787 /* Return a tree node with data type TYPE, describing the value of X.
4788 Usually this is an VAR_DECL, if there is no obvious better choice.
4789 X may be an expression, however we only support those expressions
4790 generated by loop.c. */
4791
4792 tree
4793 make_tree (tree type, rtx x)
4794 {
4795 tree t;
4796
4797 switch (GET_CODE (x))
4798 {
4799 case CONST_INT:
4800 {
4801 HOST_WIDE_INT hi = 0;
4802
4803 if (INTVAL (x) < 0
4804 && !(TYPE_UNSIGNED (type)
4805 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4806 < HOST_BITS_PER_WIDE_INT)))
4807 hi = -1;
4808
4809 t = build_int_cst_wide (type, INTVAL (x), hi);
4810
4811 return t;
4812 }
4813
4814 case CONST_DOUBLE:
4815 if (GET_MODE (x) == VOIDmode)
4816 t = build_int_cst_wide (type,
4817 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4818 else
4819 {
4820 REAL_VALUE_TYPE d;
4821
4822 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4823 t = build_real (type, d);
4824 }
4825
4826 return t;
4827
4828 case CONST_VECTOR:
4829 {
4830 int i, units;
4831 rtx elt;
4832 tree t = NULL_TREE;
4833
4834 units = CONST_VECTOR_NUNITS (x);
4835
4836 /* Build a tree with vector elements. */
4837 for (i = units - 1; i >= 0; --i)
4838 {
4839 elt = CONST_VECTOR_ELT (x, i);
4840 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4841 }
4842
4843 return build_vector (type, t);
4844 }
4845
4846 case PLUS:
4847 return fold (build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4848 make_tree (type, XEXP (x, 1))));
4849
4850 case MINUS:
4851 return fold (build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4852 make_tree (type, XEXP (x, 1))));
4853
4854 case NEG:
4855 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4856
4857 case MULT:
4858 return fold (build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4859 make_tree (type, XEXP (x, 1))));
4860
4861 case ASHIFT:
4862 return fold (build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4863 make_tree (type, XEXP (x, 1))));
4864
4865 case LSHIFTRT:
4866 t = lang_hooks.types.unsigned_type (type);
4867 return fold (convert (type,
4868 build2 (RSHIFT_EXPR, t,
4869 make_tree (t, XEXP (x, 0)),
4870 make_tree (type, XEXP (x, 1)))));
4871
4872 case ASHIFTRT:
4873 t = lang_hooks.types.signed_type (type);
4874 return fold (convert (type,
4875 build2 (RSHIFT_EXPR, t,
4876 make_tree (t, XEXP (x, 0)),
4877 make_tree (type, XEXP (x, 1)))));
4878
4879 case DIV:
4880 if (TREE_CODE (type) != REAL_TYPE)
4881 t = lang_hooks.types.signed_type (type);
4882 else
4883 t = type;
4884
4885 return fold (convert (type,
4886 build2 (TRUNC_DIV_EXPR, t,
4887 make_tree (t, XEXP (x, 0)),
4888 make_tree (t, XEXP (x, 1)))));
4889 case UDIV:
4890 t = lang_hooks.types.unsigned_type (type);
4891 return fold (convert (type,
4892 build2 (TRUNC_DIV_EXPR, t,
4893 make_tree (t, XEXP (x, 0)),
4894 make_tree (t, XEXP (x, 1)))));
4895
4896 case SIGN_EXTEND:
4897 case ZERO_EXTEND:
4898 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4899 GET_CODE (x) == ZERO_EXTEND);
4900 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4901
4902 default:
4903 t = build_decl (VAR_DECL, NULL_TREE, type);
4904
4905 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4906 ptr_mode. So convert. */
4907 if (POINTER_TYPE_P (type))
4908 x = convert_memory_address (TYPE_MODE (type), x);
4909
4910 /* Note that we do *not* use SET_DECL_RTL here, because we do not
4911 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
4912 t->decl.rtl = x;
4913
4914 return t;
4915 }
4916 }
4917
4918 /* Check whether the multiplication X * MULT + ADD overflows.
4919 X, MULT and ADD must be CONST_*.
4920 MODE is the machine mode for the computation.
4921 X and MULT must have mode MODE. ADD may have a different mode.
4922 So can X (defaults to same as MODE).
4923 UNSIGNEDP is nonzero to do unsigned multiplication. */
4924
4925 bool
4926 const_mult_add_overflow_p (rtx x, rtx mult, rtx add,
4927 enum machine_mode mode, int unsignedp)
4928 {
4929 tree type, mult_type, add_type, result;
4930
4931 type = lang_hooks.types.type_for_mode (mode, unsignedp);
4932
4933 /* In order to get a proper overflow indication from an unsigned
4934 type, we have to pretend that it's a sizetype. */
4935 mult_type = type;
4936 if (unsignedp)
4937 {
4938 /* FIXME:It would be nice if we could step directly from this
4939 type to its sizetype equivalent. */
4940 mult_type = build_distinct_type_copy (type);
4941 TYPE_IS_SIZETYPE (mult_type) = 1;
4942 }
4943
4944 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4945 : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp));
4946
4947 result = fold (build2 (PLUS_EXPR, mult_type,
4948 fold (build2 (MULT_EXPR, mult_type,
4949 make_tree (mult_type, x),
4950 make_tree (mult_type, mult))),
4951 make_tree (add_type, add)));
4952
4953 return TREE_CONSTANT_OVERFLOW (result);
4954 }
4955
4956 /* Return an rtx representing the value of X * MULT + ADD.
4957 TARGET is a suggestion for where to store the result (an rtx).
4958 MODE is the machine mode for the computation.
4959 X and MULT must have mode MODE. ADD may have a different mode.
4960 So can X (defaults to same as MODE).
4961 UNSIGNEDP is nonzero to do unsigned multiplication.
4962 This may emit insns. */
4963
4964 rtx
4965 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
4966 int unsignedp)
4967 {
4968 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
4969 tree add_type = (GET_MODE (add) == VOIDmode
4970 ? type: lang_hooks.types.type_for_mode (GET_MODE (add),
4971 unsignedp));
4972 tree result = fold (build2 (PLUS_EXPR, type,
4973 fold (build2 (MULT_EXPR, type,
4974 make_tree (type, x),
4975 make_tree (type, mult))),
4976 make_tree (add_type, add)));
4977
4978 return expand_expr (result, target, VOIDmode, 0);
4979 }
4980 \f
4981 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4982 and returning TARGET.
4983
4984 If TARGET is 0, a pseudo-register or constant is returned. */
4985
4986 rtx
4987 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4988 {
4989 rtx tem = 0;
4990
4991 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4992 tem = simplify_binary_operation (AND, mode, op0, op1);
4993 if (tem == 0)
4994 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4995
4996 if (target == 0)
4997 target = tem;
4998 else if (tem != target)
4999 emit_move_insn (target, tem);
5000 return target;
5001 }
5002 \f
5003 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5004 and storing in TARGET. Normally return TARGET.
5005 Return 0 if that cannot be done.
5006
5007 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5008 it is VOIDmode, they cannot both be CONST_INT.
5009
5010 UNSIGNEDP is for the case where we have to widen the operands
5011 to perform the operation. It says to use zero-extension.
5012
5013 NORMALIZEP is 1 if we should convert the result to be either zero
5014 or one. Normalize is -1 if we should convert the result to be
5015 either zero or -1. If NORMALIZEP is zero, the result will be left
5016 "raw" out of the scc insn. */
5017
5018 rtx
5019 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5020 enum machine_mode mode, int unsignedp, int normalizep)
5021 {
5022 rtx subtarget;
5023 enum insn_code icode;
5024 enum machine_mode compare_mode;
5025 enum machine_mode target_mode = GET_MODE (target);
5026 rtx tem;
5027 rtx last = get_last_insn ();
5028 rtx pattern, comparison;
5029
5030 if (unsignedp)
5031 code = unsigned_condition (code);
5032
5033 /* If one operand is constant, make it the second one. Only do this
5034 if the other operand is not constant as well. */
5035
5036 if (swap_commutative_operands_p (op0, op1))
5037 {
5038 tem = op0;
5039 op0 = op1;
5040 op1 = tem;
5041 code = swap_condition (code);
5042 }
5043
5044 if (mode == VOIDmode)
5045 mode = GET_MODE (op0);
5046
5047 /* For some comparisons with 1 and -1, we can convert this to
5048 comparisons with zero. This will often produce more opportunities for
5049 store-flag insns. */
5050
5051 switch (code)
5052 {
5053 case LT:
5054 if (op1 == const1_rtx)
5055 op1 = const0_rtx, code = LE;
5056 break;
5057 case LE:
5058 if (op1 == constm1_rtx)
5059 op1 = const0_rtx, code = LT;
5060 break;
5061 case GE:
5062 if (op1 == const1_rtx)
5063 op1 = const0_rtx, code = GT;
5064 break;
5065 case GT:
5066 if (op1 == constm1_rtx)
5067 op1 = const0_rtx, code = GE;
5068 break;
5069 case GEU:
5070 if (op1 == const1_rtx)
5071 op1 = const0_rtx, code = NE;
5072 break;
5073 case LTU:
5074 if (op1 == const1_rtx)
5075 op1 = const0_rtx, code = EQ;
5076 break;
5077 default:
5078 break;
5079 }
5080
5081 /* If we are comparing a double-word integer with zero or -1, we can
5082 convert the comparison into one involving a single word. */
5083 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5084 && GET_MODE_CLASS (mode) == MODE_INT
5085 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5086 {
5087 if ((code == EQ || code == NE)
5088 && (op1 == const0_rtx || op1 == constm1_rtx))
5089 {
5090 rtx op00, op01, op0both;
5091
5092 /* Do a logical OR or AND of the two words and compare the result. */
5093 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5094 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5095 op0both = expand_binop (word_mode,
5096 op1 == const0_rtx ? ior_optab : and_optab,
5097 op00, op01, NULL_RTX, unsignedp, OPTAB_DIRECT);
5098
5099 if (op0both != 0)
5100 return emit_store_flag (target, code, op0both, op1, word_mode,
5101 unsignedp, normalizep);
5102 }
5103 else if ((code == LT || code == GE) && op1 == const0_rtx)
5104 {
5105 rtx op0h;
5106
5107 /* If testing the sign bit, can just test on high word. */
5108 op0h = simplify_gen_subreg (word_mode, op0, mode,
5109 subreg_highpart_offset (word_mode, mode));
5110 return emit_store_flag (target, code, op0h, op1, word_mode,
5111 unsignedp, normalizep);
5112 }
5113 }
5114
5115 /* From now on, we won't change CODE, so set ICODE now. */
5116 icode = setcc_gen_code[(int) code];
5117
5118 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5119 complement of A (for GE) and shifting the sign bit to the low bit. */
5120 if (op1 == const0_rtx && (code == LT || code == GE)
5121 && GET_MODE_CLASS (mode) == MODE_INT
5122 && (normalizep || STORE_FLAG_VALUE == 1
5123 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5124 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5125 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
5126 {
5127 subtarget = target;
5128
5129 /* If the result is to be wider than OP0, it is best to convert it
5130 first. If it is to be narrower, it is *incorrect* to convert it
5131 first. */
5132 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5133 {
5134 op0 = convert_modes (target_mode, mode, op0, 0);
5135 mode = target_mode;
5136 }
5137
5138 if (target_mode != mode)
5139 subtarget = 0;
5140
5141 if (code == GE)
5142 op0 = expand_unop (mode, one_cmpl_optab, op0,
5143 ((STORE_FLAG_VALUE == 1 || normalizep)
5144 ? 0 : subtarget), 0);
5145
5146 if (STORE_FLAG_VALUE == 1 || normalizep)
5147 /* If we are supposed to produce a 0/1 value, we want to do
5148 a logical shift from the sign bit to the low-order bit; for
5149 a -1/0 value, we do an arithmetic shift. */
5150 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5151 size_int (GET_MODE_BITSIZE (mode) - 1),
5152 subtarget, normalizep != -1);
5153
5154 if (mode != target_mode)
5155 op0 = convert_modes (target_mode, mode, op0, 0);
5156
5157 return op0;
5158 }
5159
5160 if (icode != CODE_FOR_nothing)
5161 {
5162 insn_operand_predicate_fn pred;
5163
5164 /* We think we may be able to do this with a scc insn. Emit the
5165 comparison and then the scc insn. */
5166
5167 do_pending_stack_adjust ();
5168 last = get_last_insn ();
5169
5170 comparison
5171 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5172 if (CONSTANT_P (comparison))
5173 {
5174 switch (GET_CODE (comparison))
5175 {
5176 case CONST_INT:
5177 if (comparison == const0_rtx)
5178 return const0_rtx;
5179 break;
5180
5181 #ifdef FLOAT_STORE_FLAG_VALUE
5182 case CONST_DOUBLE:
5183 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5184 return const0_rtx;
5185 break;
5186 #endif
5187 default:
5188 gcc_unreachable ();
5189 }
5190
5191 if (normalizep == 1)
5192 return const1_rtx;
5193 if (normalizep == -1)
5194 return constm1_rtx;
5195 return const_true_rtx;
5196 }
5197
5198 /* The code of COMPARISON may not match CODE if compare_from_rtx
5199 decided to swap its operands and reverse the original code.
5200
5201 We know that compare_from_rtx returns either a CONST_INT or
5202 a new comparison code, so it is safe to just extract the
5203 code from COMPARISON. */
5204 code = GET_CODE (comparison);
5205
5206 /* Get a reference to the target in the proper mode for this insn. */
5207 compare_mode = insn_data[(int) icode].operand[0].mode;
5208 subtarget = target;
5209 pred = insn_data[(int) icode].operand[0].predicate;
5210 if (optimize || ! (*pred) (subtarget, compare_mode))
5211 subtarget = gen_reg_rtx (compare_mode);
5212
5213 pattern = GEN_FCN (icode) (subtarget);
5214 if (pattern)
5215 {
5216 emit_insn (pattern);
5217
5218 /* If we are converting to a wider mode, first convert to
5219 TARGET_MODE, then normalize. This produces better combining
5220 opportunities on machines that have a SIGN_EXTRACT when we are
5221 testing a single bit. This mostly benefits the 68k.
5222
5223 If STORE_FLAG_VALUE does not have the sign bit set when
5224 interpreted in COMPARE_MODE, we can do this conversion as
5225 unsigned, which is usually more efficient. */
5226 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
5227 {
5228 convert_move (target, subtarget,
5229 (GET_MODE_BITSIZE (compare_mode)
5230 <= HOST_BITS_PER_WIDE_INT)
5231 && 0 == (STORE_FLAG_VALUE
5232 & ((HOST_WIDE_INT) 1
5233 << (GET_MODE_BITSIZE (compare_mode) -1))));
5234 op0 = target;
5235 compare_mode = target_mode;
5236 }
5237 else
5238 op0 = subtarget;
5239
5240 /* If we want to keep subexpressions around, don't reuse our
5241 last target. */
5242
5243 if (optimize)
5244 subtarget = 0;
5245
5246 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
5247 we don't have to do anything. */
5248 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5249 ;
5250 /* STORE_FLAG_VALUE might be the most negative number, so write
5251 the comparison this way to avoid a compiler-time warning. */
5252 else if (- normalizep == STORE_FLAG_VALUE)
5253 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
5254
5255 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
5256 makes it hard to use a value of just the sign bit due to
5257 ANSI integer constant typing rules. */
5258 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
5259 && (STORE_FLAG_VALUE
5260 & ((HOST_WIDE_INT) 1
5261 << (GET_MODE_BITSIZE (compare_mode) - 1))))
5262 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
5263 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
5264 subtarget, normalizep == 1);
5265 else
5266 {
5267 gcc_assert (STORE_FLAG_VALUE & 1);
5268
5269 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
5270 if (normalizep == -1)
5271 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
5272 }
5273
5274 /* If we were converting to a smaller mode, do the
5275 conversion now. */
5276 if (target_mode != compare_mode)
5277 {
5278 convert_move (target, op0, 0);
5279 return target;
5280 }
5281 else
5282 return op0;
5283 }
5284 }
5285
5286 delete_insns_since (last);
5287
5288 /* If optimizing, use different pseudo registers for each insn, instead
5289 of reusing the same pseudo. This leads to better CSE, but slows
5290 down the compiler, since there are more pseudos */
5291 subtarget = (!optimize
5292 && (target_mode == mode)) ? target : NULL_RTX;
5293
5294 /* If we reached here, we can't do this with a scc insn. However, there
5295 are some comparisons that can be done directly. For example, if
5296 this is an equality comparison of integers, we can try to exclusive-or
5297 (or subtract) the two operands and use a recursive call to try the
5298 comparison with zero. Don't do any of these cases if branches are
5299 very cheap. */
5300
5301 if (BRANCH_COST > 0
5302 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5303 && op1 != const0_rtx)
5304 {
5305 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5306 OPTAB_WIDEN);
5307
5308 if (tem == 0)
5309 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5310 OPTAB_WIDEN);
5311 if (tem != 0)
5312 tem = emit_store_flag (target, code, tem, const0_rtx,
5313 mode, unsignedp, normalizep);
5314 if (tem == 0)
5315 delete_insns_since (last);
5316 return tem;
5317 }
5318
5319 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5320 the constant zero. Reject all other comparisons at this point. Only
5321 do LE and GT if branches are expensive since they are expensive on
5322 2-operand machines. */
5323
5324 if (BRANCH_COST == 0
5325 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5326 || (code != EQ && code != NE
5327 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
5328 return 0;
5329
5330 /* See what we need to return. We can only return a 1, -1, or the
5331 sign bit. */
5332
5333 if (normalizep == 0)
5334 {
5335 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5336 normalizep = STORE_FLAG_VALUE;
5337
5338 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5339 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5340 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5341 ;
5342 else
5343 return 0;
5344 }
5345
5346 /* Try to put the result of the comparison in the sign bit. Assume we can't
5347 do the necessary operation below. */
5348
5349 tem = 0;
5350
5351 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5352 the sign bit set. */
5353
5354 if (code == LE)
5355 {
5356 /* This is destructive, so SUBTARGET can't be OP0. */
5357 if (rtx_equal_p (subtarget, op0))
5358 subtarget = 0;
5359
5360 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5361 OPTAB_WIDEN);
5362 if (tem)
5363 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5364 OPTAB_WIDEN);
5365 }
5366
5367 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5368 number of bits in the mode of OP0, minus one. */
5369
5370 if (code == GT)
5371 {
5372 if (rtx_equal_p (subtarget, op0))
5373 subtarget = 0;
5374
5375 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5376 size_int (GET_MODE_BITSIZE (mode) - 1),
5377 subtarget, 0);
5378 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5379 OPTAB_WIDEN);
5380 }
5381
5382 if (code == EQ || code == NE)
5383 {
5384 /* For EQ or NE, one way to do the comparison is to apply an operation
5385 that converts the operand into a positive number if it is nonzero
5386 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5387 for NE we negate. This puts the result in the sign bit. Then we
5388 normalize with a shift, if needed.
5389
5390 Two operations that can do the above actions are ABS and FFS, so try
5391 them. If that doesn't work, and MODE is smaller than a full word,
5392 we can use zero-extension to the wider mode (an unsigned conversion)
5393 as the operation. */
5394
5395 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5396 that is compensated by the subsequent overflow when subtracting
5397 one / negating. */
5398
5399 if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5400 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5401 else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5402 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5403 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5404 {
5405 tem = convert_modes (word_mode, mode, op0, 1);
5406 mode = word_mode;
5407 }
5408
5409 if (tem != 0)
5410 {
5411 if (code == EQ)
5412 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5413 0, OPTAB_WIDEN);
5414 else
5415 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5416 }
5417
5418 /* If we couldn't do it that way, for NE we can "or" the two's complement
5419 of the value with itself. For EQ, we take the one's complement of
5420 that "or", which is an extra insn, so we only handle EQ if branches
5421 are expensive. */
5422
5423 if (tem == 0 && (code == NE || BRANCH_COST > 1))
5424 {
5425 if (rtx_equal_p (subtarget, op0))
5426 subtarget = 0;
5427
5428 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5429 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5430 OPTAB_WIDEN);
5431
5432 if (tem && code == EQ)
5433 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5434 }
5435 }
5436
5437 if (tem && normalizep)
5438 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5439 size_int (GET_MODE_BITSIZE (mode) - 1),
5440 subtarget, normalizep == 1);
5441
5442 if (tem)
5443 {
5444 if (GET_MODE (tem) != target_mode)
5445 {
5446 convert_move (target, tem, 0);
5447 tem = target;
5448 }
5449 else if (!subtarget)
5450 {
5451 emit_move_insn (target, tem);
5452 tem = target;
5453 }
5454 }
5455 else
5456 delete_insns_since (last);
5457
5458 return tem;
5459 }
5460
5461 /* Like emit_store_flag, but always succeeds. */
5462
5463 rtx
5464 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5465 enum machine_mode mode, int unsignedp, int normalizep)
5466 {
5467 rtx tem, label;
5468
5469 /* First see if emit_store_flag can do the job. */
5470 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5471 if (tem != 0)
5472 return tem;
5473
5474 if (normalizep == 0)
5475 normalizep = 1;
5476
5477 /* If this failed, we have to do this with set/compare/jump/set code. */
5478
5479 if (!REG_P (target)
5480 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5481 target = gen_reg_rtx (GET_MODE (target));
5482
5483 emit_move_insn (target, const1_rtx);
5484 label = gen_label_rtx ();
5485 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5486 NULL_RTX, label);
5487
5488 emit_move_insn (target, const0_rtx);
5489 emit_label (label);
5490
5491 return target;
5492 }
5493 \f
5494 /* Perform possibly multi-word comparison and conditional jump to LABEL
5495 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
5496
5497 The algorithm is based on the code in expr.c:do_jump.
5498
5499 Note that this does not perform a general comparison. Only variants
5500 generated within expmed.c are correctly handled, others abort (but could
5501 be handled if needed). */
5502
5503 static void
5504 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5505 rtx label)
5506 {
5507 /* If this mode is an integer too wide to compare properly,
5508 compare word by word. Rely on cse to optimize constant cases. */
5509
5510 if (GET_MODE_CLASS (mode) == MODE_INT
5511 && ! can_compare_p (op, mode, ccp_jump))
5512 {
5513 rtx label2 = gen_label_rtx ();
5514
5515 switch (op)
5516 {
5517 case LTU:
5518 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
5519 break;
5520
5521 case LEU:
5522 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
5523 break;
5524
5525 case LT:
5526 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
5527 break;
5528
5529 case GT:
5530 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
5531 break;
5532
5533 case GE:
5534 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
5535 break;
5536
5537 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
5538 that's the only equality operations we do */
5539 case EQ:
5540 gcc_assert (arg2 == const0_rtx && mode == GET_MODE(arg1));
5541 do_jump_by_parts_equality_rtx (arg1, label2, label);
5542 break;
5543
5544 case NE:
5545 gcc_assert (arg2 == const0_rtx && mode == GET_MODE(arg1));
5546 do_jump_by_parts_equality_rtx (arg1, label, label2);
5547 break;
5548
5549 default:
5550 gcc_unreachable ();
5551 }
5552
5553 emit_label (label2);
5554 }
5555 else
5556 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);
5557 }