ae76695a133b7dc15e60bfccd5deb7bf014ae397
[gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
39
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
54
55 /* Nonzero means divides or modulus operations are relatively cheap for
56 powers of two, so don't use branches; emit the operation instead.
57 Usually, this will mean that the MD file will emit non-branch
58 sequences. */
59
60 static int sdiv_pow2_cheap[NUM_MACHINE_MODES];
61 static int smod_pow2_cheap[NUM_MACHINE_MODES];
62
63 #ifndef SLOW_UNALIGNED_ACCESS
64 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
65 #endif
66
67 /* For compilers that support multiple targets with different word sizes,
68 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
69 is the H8/300(H) compiler. */
70
71 #ifndef MAX_BITS_PER_WORD
72 #define MAX_BITS_PER_WORD BITS_PER_WORD
73 #endif
74
75 /* Reduce conditional compilation elsewhere. */
76 #ifndef HAVE_insv
77 #define HAVE_insv 0
78 #define CODE_FOR_insv CODE_FOR_nothing
79 #define gen_insv(a,b,c,d) NULL_RTX
80 #endif
81 #ifndef HAVE_extv
82 #define HAVE_extv 0
83 #define CODE_FOR_extv CODE_FOR_nothing
84 #define gen_extv(a,b,c,d) NULL_RTX
85 #endif
86 #ifndef HAVE_extzv
87 #define HAVE_extzv 0
88 #define CODE_FOR_extzv CODE_FOR_nothing
89 #define gen_extzv(a,b,c,d) NULL_RTX
90 #endif
91
92 /* Cost of various pieces of RTL. Note that some of these are indexed by
93 shift count and some by mode. */
94 static int zero_cost;
95 static int add_cost[NUM_MACHINE_MODES];
96 static int neg_cost[NUM_MACHINE_MODES];
97 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
98 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
99 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
100 static int mul_cost[NUM_MACHINE_MODES];
101 static int div_cost[NUM_MACHINE_MODES];
102 static int mul_widen_cost[NUM_MACHINE_MODES];
103 static int mul_highpart_cost[NUM_MACHINE_MODES];
104
105 void
106 init_expmed (void)
107 {
108 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
109 rtx shift_pat, shiftadd_pat, shiftsub_pat;
110 rtx pow2[MAX_BITS_PER_WORD];
111 rtx cint[MAX_BITS_PER_WORD];
112 int dummy;
113 int m, n;
114 enum machine_mode mode, wider_mode;
115
116 start_sequence ();
117
118 zero_cost = rtx_cost (const0_rtx, 0);
119
120 init_recog ();
121
122 for (m = 1; m < MAX_BITS_PER_WORD; m++)
123 {
124 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
125 cint[m] = GEN_INT (m);
126 }
127
128 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
129 mode != VOIDmode;
130 mode = GET_MODE_WIDER_MODE (mode))
131 {
132 reg = gen_rtx_REG (mode, 10000);
133 add_cost[mode] = rtx_cost (gen_rtx_PLUS (mode, reg, reg), SET);
134 neg_cost[mode] = rtx_cost (gen_rtx_NEG (mode, reg), SET);
135 div_cost[mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
136 mul_cost[mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
137
138 sdiv_pow2_cheap[mode]
139 = (rtx_cost (gen_rtx_DIV (mode, reg, GEN_INT (32)), SET)
140 <= 2 * add_cost[mode]);
141 smod_pow2_cheap[mode]
142 = (rtx_cost (gen_rtx_MOD (mode, reg, GEN_INT (32)), SET)
143 <= 2 * add_cost[mode]);
144
145 wider_mode = GET_MODE_WIDER_MODE (mode);
146 if (wider_mode != VOIDmode)
147 {
148 mul_widen_cost[wider_mode]
149 = rtx_cost (gen_rtx_MULT (wider_mode,
150 gen_rtx_ZERO_EXTEND (wider_mode, reg),
151 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
152 SET);
153 mul_highpart_cost[mode]
154 = rtx_cost (gen_rtx_TRUNCATE
155 (mode,
156 gen_rtx_LSHIFTRT (wider_mode,
157 gen_rtx_MULT (wider_mode,
158 gen_rtx_ZERO_EXTEND
159 (wider_mode, reg),
160 gen_rtx_ZERO_EXTEND
161 (wider_mode, reg)),
162 GEN_INT (GET_MODE_BITSIZE (mode)))),
163 SET);
164 }
165
166 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
167 gen_rtx_ASHIFT (mode, reg,
168 const0_rtx)));
169
170 shiftadd_insn
171 = emit_insn (gen_rtx_SET (VOIDmode, reg,
172 gen_rtx_PLUS (mode,
173 gen_rtx_MULT (mode,
174 reg,
175 const0_rtx),
176 reg)));
177
178 shiftsub_insn
179 = emit_insn (gen_rtx_SET (VOIDmode, reg,
180 gen_rtx_MINUS (mode,
181 gen_rtx_MULT (mode,
182 reg,
183 const0_rtx),
184 reg)));
185
186 shift_pat = PATTERN (shift_insn);
187 shiftadd_pat = PATTERN (shiftadd_insn);
188 shiftsub_pat = PATTERN (shiftsub_insn);
189
190 shift_cost[mode][0] = 0;
191 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
192
193 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
194 for (m = 1; m < n; m++)
195 {
196 shift_cost[mode][m] = 32000;
197 XEXP (SET_SRC (shift_pat), 1) = cint[m];
198 if (recog (shift_pat, shift_insn, &dummy) >= 0)
199 shift_cost[mode][m] = rtx_cost (SET_SRC (shift_pat), SET);
200
201 shiftadd_cost[mode][m] = 32000;
202 XEXP (XEXP (SET_SRC (shiftadd_pat), 0), 1) = pow2[m];
203 if (recog (shiftadd_pat, shiftadd_insn, &dummy) >= 0)
204 shiftadd_cost[mode][m] = rtx_cost (SET_SRC (shiftadd_pat), SET);
205
206 shiftsub_cost[mode][m] = 32000;
207 XEXP (XEXP (SET_SRC (shiftsub_pat), 0), 1) = pow2[m];
208 if (recog (shiftsub_pat, shiftsub_insn, &dummy) >= 0)
209 shiftsub_cost[mode][m] = rtx_cost (SET_SRC (shiftsub_pat), SET);
210 }
211 }
212
213 end_sequence ();
214 }
215
216 /* Return an rtx representing minus the value of X.
217 MODE is the intended mode of the result,
218 useful if X is a CONST_INT. */
219
220 rtx
221 negate_rtx (enum machine_mode mode, rtx x)
222 {
223 rtx result = simplify_unary_operation (NEG, mode, x, mode);
224
225 if (result == 0)
226 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
227
228 return result;
229 }
230
231 /* Report on the availability of insv/extv/extzv and the desired mode
232 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
233 is false; else the mode of the specified operand. If OPNO is -1,
234 all the caller cares about is whether the insn is available. */
235 enum machine_mode
236 mode_for_extraction (enum extraction_pattern pattern, int opno)
237 {
238 const struct insn_data *data;
239
240 switch (pattern)
241 {
242 case EP_insv:
243 if (HAVE_insv)
244 {
245 data = &insn_data[CODE_FOR_insv];
246 break;
247 }
248 return MAX_MACHINE_MODE;
249
250 case EP_extv:
251 if (HAVE_extv)
252 {
253 data = &insn_data[CODE_FOR_extv];
254 break;
255 }
256 return MAX_MACHINE_MODE;
257
258 case EP_extzv:
259 if (HAVE_extzv)
260 {
261 data = &insn_data[CODE_FOR_extzv];
262 break;
263 }
264 return MAX_MACHINE_MODE;
265
266 default:
267 abort ();
268 }
269
270 if (opno == -1)
271 return VOIDmode;
272
273 /* Everyone who uses this function used to follow it with
274 if (result == VOIDmode) result = word_mode; */
275 if (data->operand[opno].mode == VOIDmode)
276 return word_mode;
277 return data->operand[opno].mode;
278 }
279
280 \f
281 /* Generate code to store value from rtx VALUE
282 into a bit-field within structure STR_RTX
283 containing BITSIZE bits starting at bit BITNUM.
284 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
285 ALIGN is the alignment that STR_RTX is known to have.
286 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
287
288 /* ??? Note that there are two different ideas here for how
289 to determine the size to count bits within, for a register.
290 One is BITS_PER_WORD, and the other is the size of operand 3
291 of the insv pattern.
292
293 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
294 else, we use the mode of operand 3. */
295
296 rtx
297 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
298 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
299 rtx value, HOST_WIDE_INT total_size)
300 {
301 unsigned int unit
302 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
303 unsigned HOST_WIDE_INT offset = bitnum / unit;
304 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
305 rtx op0 = str_rtx;
306 int byte_offset;
307
308 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
309
310 /* Discount the part of the structure before the desired byte.
311 We need to know how many bytes are safe to reference after it. */
312 if (total_size >= 0)
313 total_size -= (bitpos / BIGGEST_ALIGNMENT
314 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
315
316 while (GET_CODE (op0) == SUBREG)
317 {
318 /* The following line once was done only if WORDS_BIG_ENDIAN,
319 but I think that is a mistake. WORDS_BIG_ENDIAN is
320 meaningful at a much higher level; when structures are copied
321 between memory and regs, the higher-numbered regs
322 always get higher addresses. */
323 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
324 /* We used to adjust BITPOS here, but now we do the whole adjustment
325 right after the loop. */
326 op0 = SUBREG_REG (op0);
327 }
328
329 value = protect_from_queue (value, 0);
330
331 /* Use vec_extract patterns for extracting parts of vectors whenever
332 available. */
333 if (VECTOR_MODE_P (GET_MODE (op0))
334 && GET_CODE (op0) != MEM
335 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
336 != CODE_FOR_nothing)
337 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
338 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
339 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
340 {
341 enum machine_mode outermode = GET_MODE (op0);
342 enum machine_mode innermode = GET_MODE_INNER (outermode);
343 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
344 int pos = bitnum / GET_MODE_BITSIZE (innermode);
345 rtx rtxpos = GEN_INT (pos);
346 rtx src = value;
347 rtx dest = op0;
348 rtx pat, seq;
349 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
350 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
351 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
352
353 start_sequence ();
354
355 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
356 src = copy_to_mode_reg (mode1, src);
357
358 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
359 rtxpos = copy_to_mode_reg (mode1, rtxpos);
360
361 /* We could handle this, but we should always be called with a pseudo
362 for our targets and all insns should take them as outputs. */
363 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
364 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
365 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
366 abort ();
367 pat = GEN_FCN (icode) (dest, src, rtxpos);
368 seq = get_insns ();
369 end_sequence ();
370 if (pat)
371 {
372 emit_insn (seq);
373 emit_insn (pat);
374 return dest;
375 }
376 }
377
378 if (flag_force_mem)
379 {
380 int old_generating_concat_p = generating_concat_p;
381 generating_concat_p = 0;
382 value = force_not_mem (value);
383 generating_concat_p = old_generating_concat_p;
384 }
385
386 /* If the target is a register, overwriting the entire object, or storing
387 a full-word or multi-word field can be done with just a SUBREG.
388
389 If the target is memory, storing any naturally aligned field can be
390 done with a simple store. For targets that support fast unaligned
391 memory, any naturally sized, unit aligned field can be done directly. */
392
393 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
394 + (offset * UNITS_PER_WORD);
395
396 if (bitpos == 0
397 && bitsize == GET_MODE_BITSIZE (fieldmode)
398 && (GET_CODE (op0) != MEM
399 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
400 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
401 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
402 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
403 || (offset * BITS_PER_UNIT % bitsize == 0
404 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
405 {
406 if (GET_MODE (op0) != fieldmode)
407 {
408 if (GET_CODE (op0) == SUBREG)
409 {
410 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
411 || GET_MODE_CLASS (fieldmode) == MODE_INT
412 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
413 op0 = SUBREG_REG (op0);
414 else
415 /* Else we've got some float mode source being extracted into
416 a different float mode destination -- this combination of
417 subregs results in Severe Tire Damage. */
418 abort ();
419 }
420 if (REG_P (op0))
421 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
422 else
423 op0 = adjust_address (op0, fieldmode, offset);
424 }
425 emit_move_insn (op0, value);
426 return value;
427 }
428
429 /* Make sure we are playing with integral modes. Pun with subregs
430 if we aren't. This must come after the entire register case above,
431 since that case is valid for any mode. The following cases are only
432 valid for integral modes. */
433 {
434 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
435 if (imode != GET_MODE (op0))
436 {
437 if (GET_CODE (op0) == MEM)
438 op0 = adjust_address (op0, imode, 0);
439 else if (imode != BLKmode)
440 op0 = gen_lowpart (imode, op0);
441 else
442 abort ();
443 }
444 }
445
446 /* We may be accessing data outside the field, which means
447 we can alias adjacent data. */
448 if (GET_CODE (op0) == MEM)
449 {
450 op0 = shallow_copy_rtx (op0);
451 set_mem_alias_set (op0, 0);
452 set_mem_expr (op0, 0);
453 }
454
455 /* If OP0 is a register, BITPOS must count within a word.
456 But as we have it, it counts within whatever size OP0 now has.
457 On a bigendian machine, these are not the same, so convert. */
458 if (BYTES_BIG_ENDIAN
459 && GET_CODE (op0) != MEM
460 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
461 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
462
463 /* Storing an lsb-aligned field in a register
464 can be done with a movestrict instruction. */
465
466 if (GET_CODE (op0) != MEM
467 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
468 && bitsize == GET_MODE_BITSIZE (fieldmode)
469 && (movstrict_optab->handlers[fieldmode].insn_code
470 != CODE_FOR_nothing))
471 {
472 int icode = movstrict_optab->handlers[fieldmode].insn_code;
473
474 /* Get appropriate low part of the value being stored. */
475 if (GET_CODE (value) == CONST_INT || REG_P (value))
476 value = gen_lowpart (fieldmode, value);
477 else if (!(GET_CODE (value) == SYMBOL_REF
478 || GET_CODE (value) == LABEL_REF
479 || GET_CODE (value) == CONST))
480 value = convert_to_mode (fieldmode, value, 0);
481
482 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
483 value = copy_to_mode_reg (fieldmode, value);
484
485 if (GET_CODE (op0) == SUBREG)
486 {
487 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
488 || GET_MODE_CLASS (fieldmode) == MODE_INT
489 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
490 op0 = SUBREG_REG (op0);
491 else
492 /* Else we've got some float mode source being extracted into
493 a different float mode destination -- this combination of
494 subregs results in Severe Tire Damage. */
495 abort ();
496 }
497
498 emit_insn (GEN_FCN (icode)
499 (gen_rtx_SUBREG (fieldmode, op0,
500 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
501 + (offset * UNITS_PER_WORD)),
502 value));
503
504 return value;
505 }
506
507 /* Handle fields bigger than a word. */
508
509 if (bitsize > BITS_PER_WORD)
510 {
511 /* Here we transfer the words of the field
512 in the order least significant first.
513 This is because the most significant word is the one which may
514 be less than full.
515 However, only do that if the value is not BLKmode. */
516
517 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
518 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
519 unsigned int i;
520
521 /* This is the mode we must force value to, so that there will be enough
522 subwords to extract. Note that fieldmode will often (always?) be
523 VOIDmode, because that is what store_field uses to indicate that this
524 is a bit field, but passing VOIDmode to operand_subword_force will
525 result in an abort. */
526 fieldmode = GET_MODE (value);
527 if (fieldmode == VOIDmode)
528 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
529
530 for (i = 0; i < nwords; i++)
531 {
532 /* If I is 0, use the low-order word in both field and target;
533 if I is 1, use the next to lowest word; and so on. */
534 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
535 unsigned int bit_offset = (backwards
536 ? MAX ((int) bitsize - ((int) i + 1)
537 * BITS_PER_WORD,
538 0)
539 : (int) i * BITS_PER_WORD);
540
541 store_bit_field (op0, MIN (BITS_PER_WORD,
542 bitsize - i * BITS_PER_WORD),
543 bitnum + bit_offset, word_mode,
544 operand_subword_force (value, wordnum, fieldmode),
545 total_size);
546 }
547 return value;
548 }
549
550 /* From here on we can assume that the field to be stored in is
551 a full-word (whatever type that is), since it is shorter than a word. */
552
553 /* OFFSET is the number of words or bytes (UNIT says which)
554 from STR_RTX to the first word or byte containing part of the field. */
555
556 if (GET_CODE (op0) != MEM)
557 {
558 if (offset != 0
559 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
560 {
561 if (!REG_P (op0))
562 {
563 /* Since this is a destination (lvalue), we can't copy it to a
564 pseudo. We can trivially remove a SUBREG that does not
565 change the size of the operand. Such a SUBREG may have been
566 added above. Otherwise, abort. */
567 if (GET_CODE (op0) == SUBREG
568 && (GET_MODE_SIZE (GET_MODE (op0))
569 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
570 op0 = SUBREG_REG (op0);
571 else
572 abort ();
573 }
574 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
575 op0, (offset * UNITS_PER_WORD));
576 }
577 offset = 0;
578 }
579 else
580 op0 = protect_from_queue (op0, 1);
581
582 /* If VALUE is a floating-point mode, access it as an integer of the
583 corresponding size. This can occur on a machine with 64 bit registers
584 that uses SFmode for float. This can also occur for unaligned float
585 structure fields. */
586 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
587 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
588 value = gen_lowpart ((GET_MODE (value) == VOIDmode
589 ? word_mode : int_mode_for_mode (GET_MODE (value))),
590 value);
591
592 /* Now OFFSET is nonzero only if OP0 is memory
593 and is therefore always measured in bytes. */
594
595 if (HAVE_insv
596 && GET_MODE (value) != BLKmode
597 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
598 /* Ensure insv's size is wide enough for this field. */
599 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
600 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
601 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
602 {
603 int xbitpos = bitpos;
604 rtx value1;
605 rtx xop0 = op0;
606 rtx last = get_last_insn ();
607 rtx pat;
608 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
609 int save_volatile_ok = volatile_ok;
610
611 volatile_ok = 1;
612
613 /* If this machine's insv can only insert into a register, copy OP0
614 into a register and save it back later. */
615 /* This used to check flag_force_mem, but that was a serious
616 de-optimization now that flag_force_mem is enabled by -O2. */
617 if (GET_CODE (op0) == MEM
618 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
619 (op0, VOIDmode)))
620 {
621 rtx tempreg;
622 enum machine_mode bestmode;
623
624 /* Get the mode to use for inserting into this field. If OP0 is
625 BLKmode, get the smallest mode consistent with the alignment. If
626 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
627 mode. Otherwise, use the smallest mode containing the field. */
628
629 if (GET_MODE (op0) == BLKmode
630 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
631 bestmode
632 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
633 MEM_VOLATILE_P (op0));
634 else
635 bestmode = GET_MODE (op0);
636
637 if (bestmode == VOIDmode
638 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
639 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
640 goto insv_loses;
641
642 /* Adjust address to point to the containing unit of that mode.
643 Compute offset as multiple of this unit, counting in bytes. */
644 unit = GET_MODE_BITSIZE (bestmode);
645 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
646 bitpos = bitnum % unit;
647 op0 = adjust_address (op0, bestmode, offset);
648
649 /* Fetch that unit, store the bitfield in it, then store
650 the unit. */
651 tempreg = copy_to_reg (op0);
652 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
653 total_size);
654 emit_move_insn (op0, tempreg);
655 return value;
656 }
657 volatile_ok = save_volatile_ok;
658
659 /* Add OFFSET into OP0's address. */
660 if (GET_CODE (xop0) == MEM)
661 xop0 = adjust_address (xop0, byte_mode, offset);
662
663 /* If xop0 is a register, we need it in MAXMODE
664 to make it acceptable to the format of insv. */
665 if (GET_CODE (xop0) == SUBREG)
666 /* We can't just change the mode, because this might clobber op0,
667 and we will need the original value of op0 if insv fails. */
668 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
669 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
670 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
671
672 /* On big-endian machines, we count bits from the most significant.
673 If the bit field insn does not, we must invert. */
674
675 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
676 xbitpos = unit - bitsize - xbitpos;
677
678 /* We have been counting XBITPOS within UNIT.
679 Count instead within the size of the register. */
680 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
681 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
682
683 unit = GET_MODE_BITSIZE (maxmode);
684
685 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
686 value1 = value;
687 if (GET_MODE (value) != maxmode)
688 {
689 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
690 {
691 /* Optimization: Don't bother really extending VALUE
692 if it has all the bits we will actually use. However,
693 if we must narrow it, be sure we do it correctly. */
694
695 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
696 {
697 rtx tmp;
698
699 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
700 if (! tmp)
701 tmp = simplify_gen_subreg (maxmode,
702 force_reg (GET_MODE (value),
703 value1),
704 GET_MODE (value), 0);
705 value1 = tmp;
706 }
707 else
708 value1 = gen_lowpart (maxmode, value1);
709 }
710 else if (GET_CODE (value) == CONST_INT)
711 value1 = gen_int_mode (INTVAL (value), maxmode);
712 else if (!CONSTANT_P (value))
713 /* Parse phase is supposed to make VALUE's data type
714 match that of the component reference, which is a type
715 at least as wide as the field; so VALUE should have
716 a mode that corresponds to that type. */
717 abort ();
718 }
719
720 /* If this machine's insv insists on a register,
721 get VALUE1 into a register. */
722 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
723 (value1, maxmode)))
724 value1 = force_reg (maxmode, value1);
725
726 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
727 if (pat)
728 emit_insn (pat);
729 else
730 {
731 delete_insns_since (last);
732 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
733 }
734 }
735 else
736 insv_loses:
737 /* Insv is not available; store using shifts and boolean ops. */
738 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
739 return value;
740 }
741 \f
742 /* Use shifts and boolean operations to store VALUE
743 into a bit field of width BITSIZE
744 in a memory location specified by OP0 except offset by OFFSET bytes.
745 (OFFSET must be 0 if OP0 is a register.)
746 The field starts at position BITPOS within the byte.
747 (If OP0 is a register, it may be a full word or a narrower mode,
748 but BITPOS still counts within a full word,
749 which is significant on bigendian machines.)
750
751 Note that protect_from_queue has already been done on OP0 and VALUE. */
752
753 static void
754 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
755 unsigned HOST_WIDE_INT bitsize,
756 unsigned HOST_WIDE_INT bitpos, rtx value)
757 {
758 enum machine_mode mode;
759 unsigned int total_bits = BITS_PER_WORD;
760 rtx subtarget, temp;
761 int all_zero = 0;
762 int all_one = 0;
763
764 /* There is a case not handled here:
765 a structure with a known alignment of just a halfword
766 and a field split across two aligned halfwords within the structure.
767 Or likewise a structure with a known alignment of just a byte
768 and a field split across two bytes.
769 Such cases are not supposed to be able to occur. */
770
771 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
772 {
773 if (offset != 0)
774 abort ();
775 /* Special treatment for a bit field split across two registers. */
776 if (bitsize + bitpos > BITS_PER_WORD)
777 {
778 store_split_bit_field (op0, bitsize, bitpos, value);
779 return;
780 }
781 }
782 else
783 {
784 /* Get the proper mode to use for this field. We want a mode that
785 includes the entire field. If such a mode would be larger than
786 a word, we won't be doing the extraction the normal way.
787 We don't want a mode bigger than the destination. */
788
789 mode = GET_MODE (op0);
790 if (GET_MODE_BITSIZE (mode) == 0
791 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
792 mode = word_mode;
793 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
794 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
795
796 if (mode == VOIDmode)
797 {
798 /* The only way this should occur is if the field spans word
799 boundaries. */
800 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
801 value);
802 return;
803 }
804
805 total_bits = GET_MODE_BITSIZE (mode);
806
807 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
808 be in the range 0 to total_bits-1, and put any excess bytes in
809 OFFSET. */
810 if (bitpos >= total_bits)
811 {
812 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
813 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
814 * BITS_PER_UNIT);
815 }
816
817 /* Get ref to an aligned byte, halfword, or word containing the field.
818 Adjust BITPOS to be position within a word,
819 and OFFSET to be the offset of that word.
820 Then alter OP0 to refer to that word. */
821 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
822 offset -= (offset % (total_bits / BITS_PER_UNIT));
823 op0 = adjust_address (op0, mode, offset);
824 }
825
826 mode = GET_MODE (op0);
827
828 /* Now MODE is either some integral mode for a MEM as OP0,
829 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
830 The bit field is contained entirely within OP0.
831 BITPOS is the starting bit number within OP0.
832 (OP0's mode may actually be narrower than MODE.) */
833
834 if (BYTES_BIG_ENDIAN)
835 /* BITPOS is the distance between our msb
836 and that of the containing datum.
837 Convert it to the distance from the lsb. */
838 bitpos = total_bits - bitsize - bitpos;
839
840 /* Now BITPOS is always the distance between our lsb
841 and that of OP0. */
842
843 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
844 we must first convert its mode to MODE. */
845
846 if (GET_CODE (value) == CONST_INT)
847 {
848 HOST_WIDE_INT v = INTVAL (value);
849
850 if (bitsize < HOST_BITS_PER_WIDE_INT)
851 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
852
853 if (v == 0)
854 all_zero = 1;
855 else if ((bitsize < HOST_BITS_PER_WIDE_INT
856 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
857 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
858 all_one = 1;
859
860 value = lshift_value (mode, value, bitpos, bitsize);
861 }
862 else
863 {
864 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
865 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
866
867 if (GET_MODE (value) != mode)
868 {
869 if ((REG_P (value) || GET_CODE (value) == SUBREG)
870 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
871 value = gen_lowpart (mode, value);
872 else
873 value = convert_to_mode (mode, value, 1);
874 }
875
876 if (must_and)
877 value = expand_binop (mode, and_optab, value,
878 mask_rtx (mode, 0, bitsize, 0),
879 NULL_RTX, 1, OPTAB_LIB_WIDEN);
880 if (bitpos > 0)
881 value = expand_shift (LSHIFT_EXPR, mode, value,
882 build_int_2 (bitpos, 0), NULL_RTX, 1);
883 }
884
885 /* Now clear the chosen bits in OP0,
886 except that if VALUE is -1 we need not bother. */
887
888 subtarget = (REG_P (op0) || ! flag_force_mem) ? op0 : 0;
889
890 if (! all_one)
891 {
892 temp = expand_binop (mode, and_optab, op0,
893 mask_rtx (mode, bitpos, bitsize, 1),
894 subtarget, 1, OPTAB_LIB_WIDEN);
895 subtarget = temp;
896 }
897 else
898 temp = op0;
899
900 /* Now logical-or VALUE into OP0, unless it is zero. */
901
902 if (! all_zero)
903 temp = expand_binop (mode, ior_optab, temp, value,
904 subtarget, 1, OPTAB_LIB_WIDEN);
905 if (op0 != temp)
906 emit_move_insn (op0, temp);
907 }
908 \f
909 /* Store a bit field that is split across multiple accessible memory objects.
910
911 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
912 BITSIZE is the field width; BITPOS the position of its first bit
913 (within the word).
914 VALUE is the value to store.
915
916 This does not yet handle fields wider than BITS_PER_WORD. */
917
918 static void
919 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
920 unsigned HOST_WIDE_INT bitpos, rtx value)
921 {
922 unsigned int unit;
923 unsigned int bitsdone = 0;
924
925 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
926 much at a time. */
927 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
928 unit = BITS_PER_WORD;
929 else
930 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
931
932 /* If VALUE is a constant other than a CONST_INT, get it into a register in
933 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
934 that VALUE might be a floating-point constant. */
935 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
936 {
937 rtx word = gen_lowpart_common (word_mode, value);
938
939 if (word && (value != word))
940 value = word;
941 else
942 value = gen_lowpart_common (word_mode,
943 force_reg (GET_MODE (value) != VOIDmode
944 ? GET_MODE (value)
945 : word_mode, value));
946 }
947 else if (GET_CODE (value) == ADDRESSOF)
948 value = copy_to_reg (value);
949
950 while (bitsdone < bitsize)
951 {
952 unsigned HOST_WIDE_INT thissize;
953 rtx part, word;
954 unsigned HOST_WIDE_INT thispos;
955 unsigned HOST_WIDE_INT offset;
956
957 offset = (bitpos + bitsdone) / unit;
958 thispos = (bitpos + bitsdone) % unit;
959
960 /* THISSIZE must not overrun a word boundary. Otherwise,
961 store_fixed_bit_field will call us again, and we will mutually
962 recurse forever. */
963 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
964 thissize = MIN (thissize, unit - thispos);
965
966 if (BYTES_BIG_ENDIAN)
967 {
968 int total_bits;
969
970 /* We must do an endian conversion exactly the same way as it is
971 done in extract_bit_field, so that the two calls to
972 extract_fixed_bit_field will have comparable arguments. */
973 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
974 total_bits = BITS_PER_WORD;
975 else
976 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
977
978 /* Fetch successively less significant portions. */
979 if (GET_CODE (value) == CONST_INT)
980 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
981 >> (bitsize - bitsdone - thissize))
982 & (((HOST_WIDE_INT) 1 << thissize) - 1));
983 else
984 /* The args are chosen so that the last part includes the
985 lsb. Give extract_bit_field the value it needs (with
986 endianness compensation) to fetch the piece we want. */
987 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
988 total_bits - bitsize + bitsdone,
989 NULL_RTX, 1);
990 }
991 else
992 {
993 /* Fetch successively more significant portions. */
994 if (GET_CODE (value) == CONST_INT)
995 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
996 >> bitsdone)
997 & (((HOST_WIDE_INT) 1 << thissize) - 1));
998 else
999 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1000 bitsdone, NULL_RTX, 1);
1001 }
1002
1003 /* If OP0 is a register, then handle OFFSET here.
1004
1005 When handling multiword bitfields, extract_bit_field may pass
1006 down a word_mode SUBREG of a larger REG for a bitfield that actually
1007 crosses a word boundary. Thus, for a SUBREG, we must find
1008 the current word starting from the base register. */
1009 if (GET_CODE (op0) == SUBREG)
1010 {
1011 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1012 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1013 GET_MODE (SUBREG_REG (op0)));
1014 offset = 0;
1015 }
1016 else if (REG_P (op0))
1017 {
1018 word = operand_subword_force (op0, offset, GET_MODE (op0));
1019 offset = 0;
1020 }
1021 else
1022 word = op0;
1023
1024 /* OFFSET is in UNITs, and UNIT is in bits.
1025 store_fixed_bit_field wants offset in bytes. */
1026 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1027 thispos, part);
1028 bitsdone += thissize;
1029 }
1030 }
1031 \f
1032 /* Generate code to extract a byte-field from STR_RTX
1033 containing BITSIZE bits, starting at BITNUM,
1034 and put it in TARGET if possible (if TARGET is nonzero).
1035 Regardless of TARGET, we return the rtx for where the value is placed.
1036 It may be a QUEUED.
1037
1038 STR_RTX is the structure containing the byte (a REG or MEM).
1039 UNSIGNEDP is nonzero if this is an unsigned bit field.
1040 MODE is the natural mode of the field value once extracted.
1041 TMODE is the mode the caller would like the value to have;
1042 but the value may be returned with type MODE instead.
1043
1044 TOTAL_SIZE is the size in bytes of the containing structure,
1045 or -1 if varying.
1046
1047 If a TARGET is specified and we can store in it at no extra cost,
1048 we do so, and return TARGET.
1049 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1050 if they are equally easy. */
1051
1052 rtx
1053 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1054 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1055 enum machine_mode mode, enum machine_mode tmode,
1056 HOST_WIDE_INT total_size)
1057 {
1058 unsigned int unit
1059 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1060 unsigned HOST_WIDE_INT offset = bitnum / unit;
1061 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1062 rtx op0 = str_rtx;
1063 rtx spec_target = target;
1064 rtx spec_target_subreg = 0;
1065 enum machine_mode int_mode;
1066 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1067 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1068 enum machine_mode mode1;
1069 int byte_offset;
1070
1071 /* Discount the part of the structure before the desired byte.
1072 We need to know how many bytes are safe to reference after it. */
1073 if (total_size >= 0)
1074 total_size -= (bitpos / BIGGEST_ALIGNMENT
1075 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1076
1077 if (tmode == VOIDmode)
1078 tmode = mode;
1079
1080 while (GET_CODE (op0) == SUBREG)
1081 {
1082 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1083 if (bitpos > unit)
1084 {
1085 offset += (bitpos / unit);
1086 bitpos %= unit;
1087 }
1088 op0 = SUBREG_REG (op0);
1089 }
1090
1091 if (REG_P (op0)
1092 && mode == GET_MODE (op0)
1093 && bitnum == 0
1094 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1095 {
1096 /* We're trying to extract a full register from itself. */
1097 return op0;
1098 }
1099
1100 /* Use vec_extract patterns for extracting parts of vectors whenever
1101 available. */
1102 if (VECTOR_MODE_P (GET_MODE (op0))
1103 && GET_CODE (op0) != MEM
1104 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1105 != CODE_FOR_nothing)
1106 && ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1107 == bitsize / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1108 {
1109 enum machine_mode outermode = GET_MODE (op0);
1110 enum machine_mode innermode = GET_MODE_INNER (outermode);
1111 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1112 int pos = bitnum / GET_MODE_BITSIZE (innermode);
1113 rtx rtxpos = GEN_INT (pos);
1114 rtx src = op0;
1115 rtx dest = NULL, pat, seq;
1116 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1117 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1118 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1119
1120 if (innermode == tmode || innermode == mode)
1121 dest = target;
1122
1123 if (!dest)
1124 dest = gen_reg_rtx (innermode);
1125
1126 start_sequence ();
1127
1128 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1129 dest = copy_to_mode_reg (mode0, dest);
1130
1131 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1132 src = copy_to_mode_reg (mode1, src);
1133
1134 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1135 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1136
1137 /* We could handle this, but we should always be called with a pseudo
1138 for our targets and all insns should take them as outputs. */
1139 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
1140 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
1141 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1142 abort ();
1143
1144 pat = GEN_FCN (icode) (dest, src, rtxpos);
1145 seq = get_insns ();
1146 end_sequence ();
1147 if (pat)
1148 {
1149 emit_insn (seq);
1150 emit_insn (pat);
1151 return dest;
1152 }
1153 }
1154
1155 /* Make sure we are playing with integral modes. Pun with subregs
1156 if we aren't. */
1157 {
1158 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1159 if (imode != GET_MODE (op0))
1160 {
1161 if (GET_CODE (op0) == MEM)
1162 op0 = adjust_address (op0, imode, 0);
1163 else if (imode != BLKmode)
1164 op0 = gen_lowpart (imode, op0);
1165 else
1166 abort ();
1167 }
1168 }
1169
1170 /* We may be accessing data outside the field, which means
1171 we can alias adjacent data. */
1172 if (GET_CODE (op0) == MEM)
1173 {
1174 op0 = shallow_copy_rtx (op0);
1175 set_mem_alias_set (op0, 0);
1176 set_mem_expr (op0, 0);
1177 }
1178
1179 /* Extraction of a full-word or multi-word value from a structure
1180 in a register or aligned memory can be done with just a SUBREG.
1181 A subword value in the least significant part of a register
1182 can also be extracted with a SUBREG. For this, we need the
1183 byte offset of the value in op0. */
1184
1185 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1186
1187 /* If OP0 is a register, BITPOS must count within a word.
1188 But as we have it, it counts within whatever size OP0 now has.
1189 On a bigendian machine, these are not the same, so convert. */
1190 if (BYTES_BIG_ENDIAN
1191 && GET_CODE (op0) != MEM
1192 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1193 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1194
1195 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1196 If that's wrong, the solution is to test for it and set TARGET to 0
1197 if needed. */
1198
1199 /* Only scalar integer modes can be converted via subregs. There is an
1200 additional problem for FP modes here in that they can have a precision
1201 which is different from the size. mode_for_size uses precision, but
1202 we want a mode based on the size, so we must avoid calling it for FP
1203 modes. */
1204 mode1 = (SCALAR_INT_MODE_P (tmode)
1205 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1206 : mode);
1207
1208 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1209 && bitpos % BITS_PER_WORD == 0)
1210 || (mode1 != BLKmode
1211 /* ??? The big endian test here is wrong. This is correct
1212 if the value is in a register, and if mode_for_size is not
1213 the same mode as op0. This causes us to get unnecessarily
1214 inefficient code from the Thumb port when -mbig-endian. */
1215 && (BYTES_BIG_ENDIAN
1216 ? bitpos + bitsize == BITS_PER_WORD
1217 : bitpos == 0)))
1218 && ((GET_CODE (op0) != MEM
1219 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1220 GET_MODE_BITSIZE (GET_MODE (op0)))
1221 && GET_MODE_SIZE (mode1) != 0
1222 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1223 || (GET_CODE (op0) == MEM
1224 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1225 || (offset * BITS_PER_UNIT % bitsize == 0
1226 && MEM_ALIGN (op0) % bitsize == 0)))))
1227 {
1228 if (mode1 != GET_MODE (op0))
1229 {
1230 if (GET_CODE (op0) == SUBREG)
1231 {
1232 if (GET_MODE (SUBREG_REG (op0)) == mode1
1233 || GET_MODE_CLASS (mode1) == MODE_INT
1234 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1235 op0 = SUBREG_REG (op0);
1236 else
1237 /* Else we've got some float mode source being extracted into
1238 a different float mode destination -- this combination of
1239 subregs results in Severe Tire Damage. */
1240 goto no_subreg_mode_swap;
1241 }
1242 if (REG_P (op0))
1243 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1244 else
1245 op0 = adjust_address (op0, mode1, offset);
1246 }
1247 if (mode1 != mode)
1248 return convert_to_mode (tmode, op0, unsignedp);
1249 return op0;
1250 }
1251 no_subreg_mode_swap:
1252
1253 /* Handle fields bigger than a word. */
1254
1255 if (bitsize > BITS_PER_WORD)
1256 {
1257 /* Here we transfer the words of the field
1258 in the order least significant first.
1259 This is because the most significant word is the one which may
1260 be less than full. */
1261
1262 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1263 unsigned int i;
1264
1265 if (target == 0 || !REG_P (target))
1266 target = gen_reg_rtx (mode);
1267
1268 /* Indicate for flow that the entire target reg is being set. */
1269 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1270
1271 for (i = 0; i < nwords; i++)
1272 {
1273 /* If I is 0, use the low-order word in both field and target;
1274 if I is 1, use the next to lowest word; and so on. */
1275 /* Word number in TARGET to use. */
1276 unsigned int wordnum
1277 = (WORDS_BIG_ENDIAN
1278 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1279 : i);
1280 /* Offset from start of field in OP0. */
1281 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1282 ? MAX (0, ((int) bitsize - ((int) i + 1)
1283 * (int) BITS_PER_WORD))
1284 : (int) i * BITS_PER_WORD);
1285 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1286 rtx result_part
1287 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1288 bitsize - i * BITS_PER_WORD),
1289 bitnum + bit_offset, 1, target_part, mode,
1290 word_mode, total_size);
1291
1292 if (target_part == 0)
1293 abort ();
1294
1295 if (result_part != target_part)
1296 emit_move_insn (target_part, result_part);
1297 }
1298
1299 if (unsignedp)
1300 {
1301 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1302 need to be zero'd out. */
1303 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1304 {
1305 unsigned int i, total_words;
1306
1307 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1308 for (i = nwords; i < total_words; i++)
1309 emit_move_insn
1310 (operand_subword (target,
1311 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1312 1, VOIDmode),
1313 const0_rtx);
1314 }
1315 return target;
1316 }
1317
1318 /* Signed bit field: sign-extend with two arithmetic shifts. */
1319 target = expand_shift (LSHIFT_EXPR, mode, target,
1320 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1321 NULL_RTX, 0);
1322 return expand_shift (RSHIFT_EXPR, mode, target,
1323 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1324 NULL_RTX, 0);
1325 }
1326
1327 /* From here on we know the desired field is smaller than a word. */
1328
1329 /* Check if there is a correspondingly-sized integer field, so we can
1330 safely extract it as one size of integer, if necessary; then
1331 truncate or extend to the size that is wanted; then use SUBREGs or
1332 convert_to_mode to get one of the modes we really wanted. */
1333
1334 int_mode = int_mode_for_mode (tmode);
1335 if (int_mode == BLKmode)
1336 int_mode = int_mode_for_mode (mode);
1337 if (int_mode == BLKmode)
1338 abort (); /* Should probably push op0 out to memory and then
1339 do a load. */
1340
1341 /* OFFSET is the number of words or bytes (UNIT says which)
1342 from STR_RTX to the first word or byte containing part of the field. */
1343
1344 if (GET_CODE (op0) != MEM)
1345 {
1346 if (offset != 0
1347 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1348 {
1349 if (!REG_P (op0))
1350 op0 = copy_to_reg (op0);
1351 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1352 op0, (offset * UNITS_PER_WORD));
1353 }
1354 offset = 0;
1355 }
1356 else
1357 op0 = protect_from_queue (str_rtx, 1);
1358
1359 /* Now OFFSET is nonzero only for memory operands. */
1360
1361 if (unsignedp)
1362 {
1363 if (HAVE_extzv
1364 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1365 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1366 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1367 {
1368 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1369 rtx bitsize_rtx, bitpos_rtx;
1370 rtx last = get_last_insn ();
1371 rtx xop0 = op0;
1372 rtx xtarget = target;
1373 rtx xspec_target = spec_target;
1374 rtx xspec_target_subreg = spec_target_subreg;
1375 rtx pat;
1376 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1377
1378 if (GET_CODE (xop0) == MEM)
1379 {
1380 int save_volatile_ok = volatile_ok;
1381 volatile_ok = 1;
1382
1383 /* Is the memory operand acceptable? */
1384 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1385 (xop0, GET_MODE (xop0))))
1386 {
1387 /* No, load into a reg and extract from there. */
1388 enum machine_mode bestmode;
1389
1390 /* Get the mode to use for inserting into this field. If
1391 OP0 is BLKmode, get the smallest mode consistent with the
1392 alignment. If OP0 is a non-BLKmode object that is no
1393 wider than MAXMODE, use its mode. Otherwise, use the
1394 smallest mode containing the field. */
1395
1396 if (GET_MODE (xop0) == BLKmode
1397 || (GET_MODE_SIZE (GET_MODE (op0))
1398 > GET_MODE_SIZE (maxmode)))
1399 bestmode = get_best_mode (bitsize, bitnum,
1400 MEM_ALIGN (xop0), maxmode,
1401 MEM_VOLATILE_P (xop0));
1402 else
1403 bestmode = GET_MODE (xop0);
1404
1405 if (bestmode == VOIDmode
1406 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1407 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1408 goto extzv_loses;
1409
1410 /* Compute offset as multiple of this unit,
1411 counting in bytes. */
1412 unit = GET_MODE_BITSIZE (bestmode);
1413 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1414 xbitpos = bitnum % unit;
1415 xop0 = adjust_address (xop0, bestmode, xoffset);
1416
1417 /* Fetch it to a register in that size. */
1418 xop0 = force_reg (bestmode, xop0);
1419
1420 /* XBITPOS counts within UNIT, which is what is expected. */
1421 }
1422 else
1423 /* Get ref to first byte containing part of the field. */
1424 xop0 = adjust_address (xop0, byte_mode, xoffset);
1425
1426 volatile_ok = save_volatile_ok;
1427 }
1428
1429 /* If op0 is a register, we need it in MAXMODE (which is usually
1430 SImode). to make it acceptable to the format of extzv. */
1431 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1432 goto extzv_loses;
1433 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1434 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1435
1436 /* On big-endian machines, we count bits from the most significant.
1437 If the bit field insn does not, we must invert. */
1438 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1439 xbitpos = unit - bitsize - xbitpos;
1440
1441 /* Now convert from counting within UNIT to counting in MAXMODE. */
1442 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1443 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1444
1445 unit = GET_MODE_BITSIZE (maxmode);
1446
1447 if (xtarget == 0
1448 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1449 xtarget = xspec_target = gen_reg_rtx (tmode);
1450
1451 if (GET_MODE (xtarget) != maxmode)
1452 {
1453 if (REG_P (xtarget))
1454 {
1455 int wider = (GET_MODE_SIZE (maxmode)
1456 > GET_MODE_SIZE (GET_MODE (xtarget)));
1457 xtarget = gen_lowpart (maxmode, xtarget);
1458 if (wider)
1459 xspec_target_subreg = xtarget;
1460 }
1461 else
1462 xtarget = gen_reg_rtx (maxmode);
1463 }
1464
1465 /* If this machine's extzv insists on a register target,
1466 make sure we have one. */
1467 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1468 (xtarget, maxmode)))
1469 xtarget = gen_reg_rtx (maxmode);
1470
1471 bitsize_rtx = GEN_INT (bitsize);
1472 bitpos_rtx = GEN_INT (xbitpos);
1473
1474 pat = gen_extzv (protect_from_queue (xtarget, 1),
1475 xop0, bitsize_rtx, bitpos_rtx);
1476 if (pat)
1477 {
1478 emit_insn (pat);
1479 target = xtarget;
1480 spec_target = xspec_target;
1481 spec_target_subreg = xspec_target_subreg;
1482 }
1483 else
1484 {
1485 delete_insns_since (last);
1486 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1487 bitpos, target, 1);
1488 }
1489 }
1490 else
1491 extzv_loses:
1492 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1493 bitpos, target, 1);
1494 }
1495 else
1496 {
1497 if (HAVE_extv
1498 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1499 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1500 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1501 {
1502 int xbitpos = bitpos, xoffset = offset;
1503 rtx bitsize_rtx, bitpos_rtx;
1504 rtx last = get_last_insn ();
1505 rtx xop0 = op0, xtarget = target;
1506 rtx xspec_target = spec_target;
1507 rtx xspec_target_subreg = spec_target_subreg;
1508 rtx pat;
1509 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1510
1511 if (GET_CODE (xop0) == MEM)
1512 {
1513 /* Is the memory operand acceptable? */
1514 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1515 (xop0, GET_MODE (xop0))))
1516 {
1517 /* No, load into a reg and extract from there. */
1518 enum machine_mode bestmode;
1519
1520 /* Get the mode to use for inserting into this field. If
1521 OP0 is BLKmode, get the smallest mode consistent with the
1522 alignment. If OP0 is a non-BLKmode object that is no
1523 wider than MAXMODE, use its mode. Otherwise, use the
1524 smallest mode containing the field. */
1525
1526 if (GET_MODE (xop0) == BLKmode
1527 || (GET_MODE_SIZE (GET_MODE (op0))
1528 > GET_MODE_SIZE (maxmode)))
1529 bestmode = get_best_mode (bitsize, bitnum,
1530 MEM_ALIGN (xop0), maxmode,
1531 MEM_VOLATILE_P (xop0));
1532 else
1533 bestmode = GET_MODE (xop0);
1534
1535 if (bestmode == VOIDmode
1536 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1537 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1538 goto extv_loses;
1539
1540 /* Compute offset as multiple of this unit,
1541 counting in bytes. */
1542 unit = GET_MODE_BITSIZE (bestmode);
1543 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1544 xbitpos = bitnum % unit;
1545 xop0 = adjust_address (xop0, bestmode, xoffset);
1546
1547 /* Fetch it to a register in that size. */
1548 xop0 = force_reg (bestmode, xop0);
1549
1550 /* XBITPOS counts within UNIT, which is what is expected. */
1551 }
1552 else
1553 /* Get ref to first byte containing part of the field. */
1554 xop0 = adjust_address (xop0, byte_mode, xoffset);
1555 }
1556
1557 /* If op0 is a register, we need it in MAXMODE (which is usually
1558 SImode) to make it acceptable to the format of extv. */
1559 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1560 goto extv_loses;
1561 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1562 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1563
1564 /* On big-endian machines, we count bits from the most significant.
1565 If the bit field insn does not, we must invert. */
1566 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1567 xbitpos = unit - bitsize - xbitpos;
1568
1569 /* XBITPOS counts within a size of UNIT.
1570 Adjust to count within a size of MAXMODE. */
1571 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1572 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1573
1574 unit = GET_MODE_BITSIZE (maxmode);
1575
1576 if (xtarget == 0
1577 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1578 xtarget = xspec_target = gen_reg_rtx (tmode);
1579
1580 if (GET_MODE (xtarget) != maxmode)
1581 {
1582 if (REG_P (xtarget))
1583 {
1584 int wider = (GET_MODE_SIZE (maxmode)
1585 > GET_MODE_SIZE (GET_MODE (xtarget)));
1586 xtarget = gen_lowpart (maxmode, xtarget);
1587 if (wider)
1588 xspec_target_subreg = xtarget;
1589 }
1590 else
1591 xtarget = gen_reg_rtx (maxmode);
1592 }
1593
1594 /* If this machine's extv insists on a register target,
1595 make sure we have one. */
1596 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1597 (xtarget, maxmode)))
1598 xtarget = gen_reg_rtx (maxmode);
1599
1600 bitsize_rtx = GEN_INT (bitsize);
1601 bitpos_rtx = GEN_INT (xbitpos);
1602
1603 pat = gen_extv (protect_from_queue (xtarget, 1),
1604 xop0, bitsize_rtx, bitpos_rtx);
1605 if (pat)
1606 {
1607 emit_insn (pat);
1608 target = xtarget;
1609 spec_target = xspec_target;
1610 spec_target_subreg = xspec_target_subreg;
1611 }
1612 else
1613 {
1614 delete_insns_since (last);
1615 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1616 bitpos, target, 0);
1617 }
1618 }
1619 else
1620 extv_loses:
1621 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1622 bitpos, target, 0);
1623 }
1624 if (target == spec_target)
1625 return target;
1626 if (target == spec_target_subreg)
1627 return spec_target;
1628 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1629 {
1630 /* If the target mode is floating-point, first convert to the
1631 integer mode of that size and then access it as a floating-point
1632 value via a SUBREG. */
1633 if (GET_MODE_CLASS (tmode) != MODE_INT
1634 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1635 {
1636 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1637 MODE_INT, 0),
1638 target, unsignedp);
1639 return gen_lowpart (tmode, target);
1640 }
1641 else
1642 return convert_to_mode (tmode, target, unsignedp);
1643 }
1644 return target;
1645 }
1646 \f
1647 /* Extract a bit field using shifts and boolean operations
1648 Returns an rtx to represent the value.
1649 OP0 addresses a register (word) or memory (byte).
1650 BITPOS says which bit within the word or byte the bit field starts in.
1651 OFFSET says how many bytes farther the bit field starts;
1652 it is 0 if OP0 is a register.
1653 BITSIZE says how many bits long the bit field is.
1654 (If OP0 is a register, it may be narrower than a full word,
1655 but BITPOS still counts within a full word,
1656 which is significant on bigendian machines.)
1657
1658 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1659 If TARGET is nonzero, attempts to store the value there
1660 and return TARGET, but this is not guaranteed.
1661 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1662
1663 static rtx
1664 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1665 unsigned HOST_WIDE_INT offset,
1666 unsigned HOST_WIDE_INT bitsize,
1667 unsigned HOST_WIDE_INT bitpos, rtx target,
1668 int unsignedp)
1669 {
1670 unsigned int total_bits = BITS_PER_WORD;
1671 enum machine_mode mode;
1672
1673 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1674 {
1675 /* Special treatment for a bit field split across two registers. */
1676 if (bitsize + bitpos > BITS_PER_WORD)
1677 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1678 }
1679 else
1680 {
1681 /* Get the proper mode to use for this field. We want a mode that
1682 includes the entire field. If such a mode would be larger than
1683 a word, we won't be doing the extraction the normal way. */
1684
1685 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1686 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1687
1688 if (mode == VOIDmode)
1689 /* The only way this should occur is if the field spans word
1690 boundaries. */
1691 return extract_split_bit_field (op0, bitsize,
1692 bitpos + offset * BITS_PER_UNIT,
1693 unsignedp);
1694
1695 total_bits = GET_MODE_BITSIZE (mode);
1696
1697 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1698 be in the range 0 to total_bits-1, and put any excess bytes in
1699 OFFSET. */
1700 if (bitpos >= total_bits)
1701 {
1702 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1703 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1704 * BITS_PER_UNIT);
1705 }
1706
1707 /* Get ref to an aligned byte, halfword, or word containing the field.
1708 Adjust BITPOS to be position within a word,
1709 and OFFSET to be the offset of that word.
1710 Then alter OP0 to refer to that word. */
1711 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1712 offset -= (offset % (total_bits / BITS_PER_UNIT));
1713 op0 = adjust_address (op0, mode, offset);
1714 }
1715
1716 mode = GET_MODE (op0);
1717
1718 if (BYTES_BIG_ENDIAN)
1719 /* BITPOS is the distance between our msb and that of OP0.
1720 Convert it to the distance from the lsb. */
1721 bitpos = total_bits - bitsize - bitpos;
1722
1723 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1724 We have reduced the big-endian case to the little-endian case. */
1725
1726 if (unsignedp)
1727 {
1728 if (bitpos)
1729 {
1730 /* If the field does not already start at the lsb,
1731 shift it so it does. */
1732 tree amount = build_int_2 (bitpos, 0);
1733 /* Maybe propagate the target for the shift. */
1734 /* But not if we will return it--could confuse integrate.c. */
1735 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1736 if (tmode != mode) subtarget = 0;
1737 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1738 }
1739 /* Convert the value to the desired mode. */
1740 if (mode != tmode)
1741 op0 = convert_to_mode (tmode, op0, 1);
1742
1743 /* Unless the msb of the field used to be the msb when we shifted,
1744 mask out the upper bits. */
1745
1746 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1747 return expand_binop (GET_MODE (op0), and_optab, op0,
1748 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1749 target, 1, OPTAB_LIB_WIDEN);
1750 return op0;
1751 }
1752
1753 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1754 then arithmetic-shift its lsb to the lsb of the word. */
1755 op0 = force_reg (mode, op0);
1756 if (mode != tmode)
1757 target = 0;
1758
1759 /* Find the narrowest integer mode that contains the field. */
1760
1761 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1762 mode = GET_MODE_WIDER_MODE (mode))
1763 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1764 {
1765 op0 = convert_to_mode (mode, op0, 0);
1766 break;
1767 }
1768
1769 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1770 {
1771 tree amount
1772 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1773 /* Maybe propagate the target for the shift. */
1774 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1775 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1776 }
1777
1778 return expand_shift (RSHIFT_EXPR, mode, op0,
1779 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1780 target, 0);
1781 }
1782 \f
1783 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1784 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1785 complement of that if COMPLEMENT. The mask is truncated if
1786 necessary to the width of mode MODE. The mask is zero-extended if
1787 BITSIZE+BITPOS is too small for MODE. */
1788
1789 static rtx
1790 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1791 {
1792 HOST_WIDE_INT masklow, maskhigh;
1793
1794 if (bitsize == 0)
1795 masklow = 0;
1796 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1797 masklow = (HOST_WIDE_INT) -1 << bitpos;
1798 else
1799 masklow = 0;
1800
1801 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1802 masklow &= ((unsigned HOST_WIDE_INT) -1
1803 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1804
1805 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1806 maskhigh = -1;
1807 else
1808 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1809
1810 if (bitsize == 0)
1811 maskhigh = 0;
1812 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1813 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1814 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1815 else
1816 maskhigh = 0;
1817
1818 if (complement)
1819 {
1820 maskhigh = ~maskhigh;
1821 masklow = ~masklow;
1822 }
1823
1824 return immed_double_const (masklow, maskhigh, mode);
1825 }
1826
1827 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1828 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1829
1830 static rtx
1831 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1832 {
1833 unsigned HOST_WIDE_INT v = INTVAL (value);
1834 HOST_WIDE_INT low, high;
1835
1836 if (bitsize < HOST_BITS_PER_WIDE_INT)
1837 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1838
1839 if (bitpos < HOST_BITS_PER_WIDE_INT)
1840 {
1841 low = v << bitpos;
1842 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1843 }
1844 else
1845 {
1846 low = 0;
1847 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1848 }
1849
1850 return immed_double_const (low, high, mode);
1851 }
1852 \f
1853 /* Extract a bit field that is split across two words
1854 and return an RTX for the result.
1855
1856 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1857 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1858 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1859
1860 static rtx
1861 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1862 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1863 {
1864 unsigned int unit;
1865 unsigned int bitsdone = 0;
1866 rtx result = NULL_RTX;
1867 int first = 1;
1868
1869 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1870 much at a time. */
1871 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1872 unit = BITS_PER_WORD;
1873 else
1874 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1875
1876 while (bitsdone < bitsize)
1877 {
1878 unsigned HOST_WIDE_INT thissize;
1879 rtx part, word;
1880 unsigned HOST_WIDE_INT thispos;
1881 unsigned HOST_WIDE_INT offset;
1882
1883 offset = (bitpos + bitsdone) / unit;
1884 thispos = (bitpos + bitsdone) % unit;
1885
1886 /* THISSIZE must not overrun a word boundary. Otherwise,
1887 extract_fixed_bit_field will call us again, and we will mutually
1888 recurse forever. */
1889 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1890 thissize = MIN (thissize, unit - thispos);
1891
1892 /* If OP0 is a register, then handle OFFSET here.
1893
1894 When handling multiword bitfields, extract_bit_field may pass
1895 down a word_mode SUBREG of a larger REG for a bitfield that actually
1896 crosses a word boundary. Thus, for a SUBREG, we must find
1897 the current word starting from the base register. */
1898 if (GET_CODE (op0) == SUBREG)
1899 {
1900 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1901 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1902 GET_MODE (SUBREG_REG (op0)));
1903 offset = 0;
1904 }
1905 else if (REG_P (op0))
1906 {
1907 word = operand_subword_force (op0, offset, GET_MODE (op0));
1908 offset = 0;
1909 }
1910 else
1911 word = op0;
1912
1913 /* Extract the parts in bit-counting order,
1914 whose meaning is determined by BYTES_PER_UNIT.
1915 OFFSET is in UNITs, and UNIT is in bits.
1916 extract_fixed_bit_field wants offset in bytes. */
1917 part = extract_fixed_bit_field (word_mode, word,
1918 offset * unit / BITS_PER_UNIT,
1919 thissize, thispos, 0, 1);
1920 bitsdone += thissize;
1921
1922 /* Shift this part into place for the result. */
1923 if (BYTES_BIG_ENDIAN)
1924 {
1925 if (bitsize != bitsdone)
1926 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1927 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1928 }
1929 else
1930 {
1931 if (bitsdone != thissize)
1932 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1933 build_int_2 (bitsdone - thissize, 0), 0, 1);
1934 }
1935
1936 if (first)
1937 result = part;
1938 else
1939 /* Combine the parts with bitwise or. This works
1940 because we extracted each part as an unsigned bit field. */
1941 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1942 OPTAB_LIB_WIDEN);
1943
1944 first = 0;
1945 }
1946
1947 /* Unsigned bit field: we are done. */
1948 if (unsignedp)
1949 return result;
1950 /* Signed bit field: sign-extend with two arithmetic shifts. */
1951 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1952 build_int_2 (BITS_PER_WORD - bitsize, 0),
1953 NULL_RTX, 0);
1954 return expand_shift (RSHIFT_EXPR, word_mode, result,
1955 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1956 }
1957 \f
1958 /* Add INC into TARGET. */
1959
1960 void
1961 expand_inc (rtx target, rtx inc)
1962 {
1963 rtx value = expand_binop (GET_MODE (target), add_optab,
1964 target, inc,
1965 target, 0, OPTAB_LIB_WIDEN);
1966 if (value != target)
1967 emit_move_insn (target, value);
1968 }
1969
1970 /* Subtract DEC from TARGET. */
1971
1972 void
1973 expand_dec (rtx target, rtx dec)
1974 {
1975 rtx value = expand_binop (GET_MODE (target), sub_optab,
1976 target, dec,
1977 target, 0, OPTAB_LIB_WIDEN);
1978 if (value != target)
1979 emit_move_insn (target, value);
1980 }
1981 \f
1982 /* Output a shift instruction for expression code CODE,
1983 with SHIFTED being the rtx for the value to shift,
1984 and AMOUNT the tree for the amount to shift by.
1985 Store the result in the rtx TARGET, if that is convenient.
1986 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1987 Return the rtx for where the value is. */
1988
1989 rtx
1990 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
1991 tree amount, rtx target, int unsignedp)
1992 {
1993 rtx op1, temp = 0;
1994 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1995 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1996 int try;
1997
1998 /* Previously detected shift-counts computed by NEGATE_EXPR
1999 and shifted in the other direction; but that does not work
2000 on all machines. */
2001
2002 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
2003
2004 if (SHIFT_COUNT_TRUNCATED)
2005 {
2006 if (GET_CODE (op1) == CONST_INT
2007 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2008 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2009 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2010 % GET_MODE_BITSIZE (mode));
2011 else if (GET_CODE (op1) == SUBREG
2012 && subreg_lowpart_p (op1))
2013 op1 = SUBREG_REG (op1);
2014 }
2015
2016 if (op1 == const0_rtx)
2017 return shifted;
2018
2019 for (try = 0; temp == 0 && try < 3; try++)
2020 {
2021 enum optab_methods methods;
2022
2023 if (try == 0)
2024 methods = OPTAB_DIRECT;
2025 else if (try == 1)
2026 methods = OPTAB_WIDEN;
2027 else
2028 methods = OPTAB_LIB_WIDEN;
2029
2030 if (rotate)
2031 {
2032 /* Widening does not work for rotation. */
2033 if (methods == OPTAB_WIDEN)
2034 continue;
2035 else if (methods == OPTAB_LIB_WIDEN)
2036 {
2037 /* If we have been unable to open-code this by a rotation,
2038 do it as the IOR of two shifts. I.e., to rotate A
2039 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2040 where C is the bitsize of A.
2041
2042 It is theoretically possible that the target machine might
2043 not be able to perform either shift and hence we would
2044 be making two libcalls rather than just the one for the
2045 shift (similarly if IOR could not be done). We will allow
2046 this extremely unlikely lossage to avoid complicating the
2047 code below. */
2048
2049 rtx subtarget = target == shifted ? 0 : target;
2050 rtx temp1;
2051 tree type = TREE_TYPE (amount);
2052 tree new_amount = make_tree (type, op1);
2053 tree other_amount
2054 = fold (build (MINUS_EXPR, type,
2055 convert (type,
2056 build_int_2 (GET_MODE_BITSIZE (mode),
2057 0)),
2058 amount));
2059
2060 shifted = force_reg (mode, shifted);
2061
2062 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2063 mode, shifted, new_amount, subtarget, 1);
2064 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2065 mode, shifted, other_amount, 0, 1);
2066 return expand_binop (mode, ior_optab, temp, temp1, target,
2067 unsignedp, methods);
2068 }
2069
2070 temp = expand_binop (mode,
2071 left ? rotl_optab : rotr_optab,
2072 shifted, op1, target, unsignedp, methods);
2073
2074 /* If we don't have the rotate, but we are rotating by a constant
2075 that is in range, try a rotate in the opposite direction. */
2076
2077 if (temp == 0 && GET_CODE (op1) == CONST_INT
2078 && INTVAL (op1) > 0
2079 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2080 temp = expand_binop (mode,
2081 left ? rotr_optab : rotl_optab,
2082 shifted,
2083 GEN_INT (GET_MODE_BITSIZE (mode)
2084 - INTVAL (op1)),
2085 target, unsignedp, methods);
2086 }
2087 else if (unsignedp)
2088 temp = expand_binop (mode,
2089 left ? ashl_optab : lshr_optab,
2090 shifted, op1, target, unsignedp, methods);
2091
2092 /* Do arithmetic shifts.
2093 Also, if we are going to widen the operand, we can just as well
2094 use an arithmetic right-shift instead of a logical one. */
2095 if (temp == 0 && ! rotate
2096 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2097 {
2098 enum optab_methods methods1 = methods;
2099
2100 /* If trying to widen a log shift to an arithmetic shift,
2101 don't accept an arithmetic shift of the same size. */
2102 if (unsignedp)
2103 methods1 = OPTAB_MUST_WIDEN;
2104
2105 /* Arithmetic shift */
2106
2107 temp = expand_binop (mode,
2108 left ? ashl_optab : ashr_optab,
2109 shifted, op1, target, unsignedp, methods1);
2110 }
2111
2112 /* We used to try extzv here for logical right shifts, but that was
2113 only useful for one machine, the VAX, and caused poor code
2114 generation there for lshrdi3, so the code was deleted and a
2115 define_expand for lshrsi3 was added to vax.md. */
2116 }
2117
2118 if (temp == 0)
2119 abort ();
2120 return temp;
2121 }
2122 \f
2123 enum alg_code { alg_zero, alg_m, alg_shift,
2124 alg_add_t_m2, alg_sub_t_m2,
2125 alg_add_factor, alg_sub_factor,
2126 alg_add_t2_m, alg_sub_t2_m,
2127 alg_add, alg_subtract, alg_factor, alg_shiftop };
2128
2129 /* This structure records a sequence of operations.
2130 `ops' is the number of operations recorded.
2131 `cost' is their total cost.
2132 The operations are stored in `op' and the corresponding
2133 logarithms of the integer coefficients in `log'.
2134
2135 These are the operations:
2136 alg_zero total := 0;
2137 alg_m total := multiplicand;
2138 alg_shift total := total * coeff
2139 alg_add_t_m2 total := total + multiplicand * coeff;
2140 alg_sub_t_m2 total := total - multiplicand * coeff;
2141 alg_add_factor total := total * coeff + total;
2142 alg_sub_factor total := total * coeff - total;
2143 alg_add_t2_m total := total * coeff + multiplicand;
2144 alg_sub_t2_m total := total * coeff - multiplicand;
2145
2146 The first operand must be either alg_zero or alg_m. */
2147
2148 struct algorithm
2149 {
2150 short cost;
2151 short ops;
2152 /* The size of the OP and LOG fields are not directly related to the
2153 word size, but the worst-case algorithms will be if we have few
2154 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2155 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2156 in total wordsize operations. */
2157 enum alg_code op[MAX_BITS_PER_WORD];
2158 char log[MAX_BITS_PER_WORD];
2159 };
2160
2161 /* Indicates the type of fixup needed after a constant multiplication.
2162 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2163 the result should be negated, and ADD_VARIANT means that the
2164 multiplicand should be added to the result. */
2165 enum mult_variant {basic_variant, negate_variant, add_variant};
2166
2167 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2168 int, enum machine_mode mode);
2169 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2170 struct algorithm *, enum mult_variant *, int);
2171 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2172 const struct algorithm *, enum mult_variant);
2173 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2174 int, unsigned HOST_WIDE_INT *,
2175 int *, int *);
2176 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2177 static rtx extract_high_half (enum machine_mode, rtx);
2178 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2179 int, int);
2180 /* Compute and return the best algorithm for multiplying by T.
2181 The algorithm must cost less than cost_limit
2182 If retval.cost >= COST_LIMIT, no algorithm was found and all
2183 other field of the returned struct are undefined.
2184 MODE is the machine mode of the multiplication. */
2185
2186 static void
2187 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2188 int cost_limit, enum machine_mode mode)
2189 {
2190 int m;
2191 struct algorithm *alg_in, *best_alg;
2192 int cost;
2193 unsigned HOST_WIDE_INT q;
2194 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2195
2196 /* Indicate that no algorithm is yet found. If no algorithm
2197 is found, this value will be returned and indicate failure. */
2198 alg_out->cost = cost_limit;
2199
2200 if (cost_limit <= 0)
2201 return;
2202
2203 /* Restrict the bits of "t" to the multiplication's mode. */
2204 t &= GET_MODE_MASK (mode);
2205
2206 /* t == 1 can be done in zero cost. */
2207 if (t == 1)
2208 {
2209 alg_out->ops = 1;
2210 alg_out->cost = 0;
2211 alg_out->op[0] = alg_m;
2212 return;
2213 }
2214
2215 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2216 fail now. */
2217 if (t == 0)
2218 {
2219 if (zero_cost >= cost_limit)
2220 return;
2221 else
2222 {
2223 alg_out->ops = 1;
2224 alg_out->cost = zero_cost;
2225 alg_out->op[0] = alg_zero;
2226 return;
2227 }
2228 }
2229
2230 /* We'll be needing a couple extra algorithm structures now. */
2231
2232 alg_in = alloca (sizeof (struct algorithm));
2233 best_alg = alloca (sizeof (struct algorithm));
2234
2235 /* If we have a group of zero bits at the low-order part of T, try
2236 multiplying by the remaining bits and then doing a shift. */
2237
2238 if ((t & 1) == 0)
2239 {
2240 m = floor_log2 (t & -t); /* m = number of low zero bits */
2241 if (m < maxm)
2242 {
2243 q = t >> m;
2244 cost = shift_cost[mode][m];
2245 synth_mult (alg_in, q, cost_limit - cost, mode);
2246
2247 cost += alg_in->cost;
2248 if (cost < cost_limit)
2249 {
2250 struct algorithm *x;
2251 x = alg_in, alg_in = best_alg, best_alg = x;
2252 best_alg->log[best_alg->ops] = m;
2253 best_alg->op[best_alg->ops] = alg_shift;
2254 cost_limit = cost;
2255 }
2256 }
2257 }
2258
2259 /* If we have an odd number, add or subtract one. */
2260 if ((t & 1) != 0)
2261 {
2262 unsigned HOST_WIDE_INT w;
2263
2264 for (w = 1; (w & t) != 0; w <<= 1)
2265 ;
2266 /* If T was -1, then W will be zero after the loop. This is another
2267 case where T ends with ...111. Handling this with (T + 1) and
2268 subtract 1 produces slightly better code and results in algorithm
2269 selection much faster than treating it like the ...0111 case
2270 below. */
2271 if (w == 0
2272 || (w > 2
2273 /* Reject the case where t is 3.
2274 Thus we prefer addition in that case. */
2275 && t != 3))
2276 {
2277 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2278
2279 cost = add_cost[mode];
2280 synth_mult (alg_in, t + 1, cost_limit - cost, mode);
2281
2282 cost += alg_in->cost;
2283 if (cost < cost_limit)
2284 {
2285 struct algorithm *x;
2286 x = alg_in, alg_in = best_alg, best_alg = x;
2287 best_alg->log[best_alg->ops] = 0;
2288 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2289 cost_limit = cost;
2290 }
2291 }
2292 else
2293 {
2294 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2295
2296 cost = add_cost[mode];
2297 synth_mult (alg_in, t - 1, cost_limit - cost, mode);
2298
2299 cost += alg_in->cost;
2300 if (cost < cost_limit)
2301 {
2302 struct algorithm *x;
2303 x = alg_in, alg_in = best_alg, best_alg = x;
2304 best_alg->log[best_alg->ops] = 0;
2305 best_alg->op[best_alg->ops] = alg_add_t_m2;
2306 cost_limit = cost;
2307 }
2308 }
2309 }
2310
2311 /* Look for factors of t of the form
2312 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2313 If we find such a factor, we can multiply by t using an algorithm that
2314 multiplies by q, shift the result by m and add/subtract it to itself.
2315
2316 We search for large factors first and loop down, even if large factors
2317 are less probable than small; if we find a large factor we will find a
2318 good sequence quickly, and therefore be able to prune (by decreasing
2319 COST_LIMIT) the search. */
2320
2321 for (m = floor_log2 (t - 1); m >= 2; m--)
2322 {
2323 unsigned HOST_WIDE_INT d;
2324
2325 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2326 if (t % d == 0 && t > d && m < maxm)
2327 {
2328 cost = add_cost[mode] + shift_cost[mode][m];
2329 if (shiftadd_cost[mode][m] < cost)
2330 cost = shiftadd_cost[mode][m];
2331 synth_mult (alg_in, t / d, cost_limit - cost, mode);
2332
2333 cost += alg_in->cost;
2334 if (cost < cost_limit)
2335 {
2336 struct algorithm *x;
2337 x = alg_in, alg_in = best_alg, best_alg = x;
2338 best_alg->log[best_alg->ops] = m;
2339 best_alg->op[best_alg->ops] = alg_add_factor;
2340 cost_limit = cost;
2341 }
2342 /* Other factors will have been taken care of in the recursion. */
2343 break;
2344 }
2345
2346 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2347 if (t % d == 0 && t > d && m < maxm)
2348 {
2349 cost = add_cost[mode] + shift_cost[mode][m];
2350 if (shiftsub_cost[mode][m] < cost)
2351 cost = shiftsub_cost[mode][m];
2352 synth_mult (alg_in, t / d, cost_limit - cost, mode);
2353
2354 cost += alg_in->cost;
2355 if (cost < cost_limit)
2356 {
2357 struct algorithm *x;
2358 x = alg_in, alg_in = best_alg, best_alg = x;
2359 best_alg->log[best_alg->ops] = m;
2360 best_alg->op[best_alg->ops] = alg_sub_factor;
2361 cost_limit = cost;
2362 }
2363 break;
2364 }
2365 }
2366
2367 /* Try shift-and-add (load effective address) instructions,
2368 i.e. do a*3, a*5, a*9. */
2369 if ((t & 1) != 0)
2370 {
2371 q = t - 1;
2372 q = q & -q;
2373 m = exact_log2 (q);
2374 if (m >= 0 && m < maxm)
2375 {
2376 cost = shiftadd_cost[mode][m];
2377 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost, mode);
2378
2379 cost += alg_in->cost;
2380 if (cost < cost_limit)
2381 {
2382 struct algorithm *x;
2383 x = alg_in, alg_in = best_alg, best_alg = x;
2384 best_alg->log[best_alg->ops] = m;
2385 best_alg->op[best_alg->ops] = alg_add_t2_m;
2386 cost_limit = cost;
2387 }
2388 }
2389
2390 q = t + 1;
2391 q = q & -q;
2392 m = exact_log2 (q);
2393 if (m >= 0 && m < maxm)
2394 {
2395 cost = shiftsub_cost[mode][m];
2396 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost, mode);
2397
2398 cost += alg_in->cost;
2399 if (cost < cost_limit)
2400 {
2401 struct algorithm *x;
2402 x = alg_in, alg_in = best_alg, best_alg = x;
2403 best_alg->log[best_alg->ops] = m;
2404 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2405 cost_limit = cost;
2406 }
2407 }
2408 }
2409
2410 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2411 we have not found any algorithm. */
2412 if (cost_limit == alg_out->cost)
2413 return;
2414
2415 /* If we are getting a too long sequence for `struct algorithm'
2416 to record, make this search fail. */
2417 if (best_alg->ops == MAX_BITS_PER_WORD)
2418 return;
2419
2420 /* Copy the algorithm from temporary space to the space at alg_out.
2421 We avoid using structure assignment because the majority of
2422 best_alg is normally undefined, and this is a critical function. */
2423 alg_out->ops = best_alg->ops + 1;
2424 alg_out->cost = cost_limit;
2425 memcpy (alg_out->op, best_alg->op,
2426 alg_out->ops * sizeof *alg_out->op);
2427 memcpy (alg_out->log, best_alg->log,
2428 alg_out->ops * sizeof *alg_out->log);
2429 }
2430 \f
2431 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2432 Try three variations:
2433
2434 - a shift/add sequence based on VAL itself
2435 - a shift/add sequence based on -VAL, followed by a negation
2436 - a shift/add sequence based on VAL - 1, followed by an addition.
2437
2438 Return true if the cheapest of these cost less than MULT_COST,
2439 describing the algorithm in *ALG and final fixup in *VARIANT. */
2440
2441 static bool
2442 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2443 struct algorithm *alg, enum mult_variant *variant,
2444 int mult_cost)
2445 {
2446 struct algorithm alg2;
2447
2448 *variant = basic_variant;
2449 synth_mult (alg, val, mult_cost, mode);
2450
2451 /* This works only if the inverted value actually fits in an
2452 `unsigned int' */
2453 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2454 {
2455 synth_mult (&alg2, -val, MIN (alg->cost, mult_cost) - neg_cost[mode],
2456 mode);
2457 alg2.cost += neg_cost[mode];
2458 if (alg2.cost < alg->cost)
2459 *alg = alg2, *variant = negate_variant;
2460 }
2461
2462 /* This proves very useful for division-by-constant. */
2463 synth_mult (&alg2, val - 1, MIN (alg->cost, mult_cost) - add_cost[mode],
2464 mode);
2465 alg2.cost += add_cost[mode];
2466 if (alg2.cost < alg->cost)
2467 *alg = alg2, *variant = add_variant;
2468
2469 return alg->cost < mult_cost;
2470 }
2471
2472 /* A subroutine of expand_mult, used for constant multiplications.
2473 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2474 convenient. Use the shift/add sequence described by ALG and apply
2475 the final fixup specified by VARIANT. */
2476
2477 static rtx
2478 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2479 rtx target, const struct algorithm *alg,
2480 enum mult_variant variant)
2481 {
2482 HOST_WIDE_INT val_so_far;
2483 rtx insn, accum, tem;
2484 int opno;
2485 enum machine_mode nmode;
2486
2487 /* op0 must be register to make mult_cost match the precomputed
2488 shiftadd_cost array. */
2489 op0 = protect_from_queue (op0, 0);
2490
2491 /* Avoid referencing memory over and over.
2492 For speed, but also for correctness when mem is volatile. */
2493 if (GET_CODE (op0) == MEM)
2494 op0 = force_reg (mode, op0);
2495
2496 /* ACCUM starts out either as OP0 or as a zero, depending on
2497 the first operation. */
2498
2499 if (alg->op[0] == alg_zero)
2500 {
2501 accum = copy_to_mode_reg (mode, const0_rtx);
2502 val_so_far = 0;
2503 }
2504 else if (alg->op[0] == alg_m)
2505 {
2506 accum = copy_to_mode_reg (mode, op0);
2507 val_so_far = 1;
2508 }
2509 else
2510 abort ();
2511
2512 for (opno = 1; opno < alg->ops; opno++)
2513 {
2514 int log = alg->log[opno];
2515 int preserve = preserve_subexpressions_p ();
2516 rtx shift_subtarget = preserve ? 0 : accum;
2517 rtx add_target
2518 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2519 && ! preserve)
2520 ? target : 0;
2521 rtx accum_target = preserve ? 0 : accum;
2522
2523 switch (alg->op[opno])
2524 {
2525 case alg_shift:
2526 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2527 build_int_2 (log, 0), NULL_RTX, 0);
2528 val_so_far <<= log;
2529 break;
2530
2531 case alg_add_t_m2:
2532 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2533 build_int_2 (log, 0), NULL_RTX, 0);
2534 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2535 add_target ? add_target : accum_target);
2536 val_so_far += (HOST_WIDE_INT) 1 << log;
2537 break;
2538
2539 case alg_sub_t_m2:
2540 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2541 build_int_2 (log, 0), NULL_RTX, 0);
2542 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2543 add_target ? add_target : accum_target);
2544 val_so_far -= (HOST_WIDE_INT) 1 << log;
2545 break;
2546
2547 case alg_add_t2_m:
2548 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2549 build_int_2 (log, 0), shift_subtarget,
2550 0);
2551 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2552 add_target ? add_target : accum_target);
2553 val_so_far = (val_so_far << log) + 1;
2554 break;
2555
2556 case alg_sub_t2_m:
2557 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2558 build_int_2 (log, 0), shift_subtarget, 0);
2559 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2560 add_target ? add_target : accum_target);
2561 val_so_far = (val_so_far << log) - 1;
2562 break;
2563
2564 case alg_add_factor:
2565 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2566 build_int_2 (log, 0), NULL_RTX, 0);
2567 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2568 add_target ? add_target : accum_target);
2569 val_so_far += val_so_far << log;
2570 break;
2571
2572 case alg_sub_factor:
2573 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2574 build_int_2 (log, 0), NULL_RTX, 0);
2575 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2576 (add_target ? add_target
2577 : preserve ? 0 : tem));
2578 val_so_far = (val_so_far << log) - val_so_far;
2579 break;
2580
2581 default:
2582 abort ();
2583 }
2584
2585 /* Write a REG_EQUAL note on the last insn so that we can cse
2586 multiplication sequences. Note that if ACCUM is a SUBREG,
2587 we've set the inner register and must properly indicate
2588 that. */
2589
2590 tem = op0, nmode = mode;
2591 if (GET_CODE (accum) == SUBREG)
2592 {
2593 nmode = GET_MODE (SUBREG_REG (accum));
2594 tem = gen_lowpart (nmode, op0);
2595 }
2596
2597 insn = get_last_insn ();
2598 set_unique_reg_note (insn, REG_EQUAL,
2599 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
2600 }
2601
2602 if (variant == negate_variant)
2603 {
2604 val_so_far = -val_so_far;
2605 accum = expand_unop (mode, neg_optab, accum, target, 0);
2606 }
2607 else if (variant == add_variant)
2608 {
2609 val_so_far = val_so_far + 1;
2610 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2611 }
2612
2613 /* Compare only the bits of val and val_so_far that are significant
2614 in the result mode, to avoid sign-/zero-extension confusion. */
2615 val &= GET_MODE_MASK (mode);
2616 val_so_far &= GET_MODE_MASK (mode);
2617 if (val != val_so_far)
2618 abort ();
2619
2620 return accum;
2621 }
2622
2623 /* Perform a multiplication and return an rtx for the result.
2624 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2625 TARGET is a suggestion for where to store the result (an rtx).
2626
2627 We check specially for a constant integer as OP1.
2628 If you want this check for OP0 as well, then before calling
2629 you should swap the two operands if OP0 would be constant. */
2630
2631 rtx
2632 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2633 int unsignedp)
2634 {
2635 rtx const_op1 = op1;
2636 enum mult_variant variant;
2637 struct algorithm algorithm;
2638
2639 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2640 less than or equal in size to `unsigned int' this doesn't matter.
2641 If the mode is larger than `unsigned int', then synth_mult works only
2642 if the constant value exactly fits in an `unsigned int' without any
2643 truncation. This means that multiplying by negative values does
2644 not work; results are off by 2^32 on a 32 bit machine. */
2645
2646 /* If we are multiplying in DImode, it may still be a win
2647 to try to work with shifts and adds. */
2648 if (GET_CODE (op1) == CONST_DOUBLE
2649 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2650 && HOST_BITS_PER_INT >= BITS_PER_WORD
2651 && CONST_DOUBLE_HIGH (op1) == 0)
2652 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2653 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2654 && GET_CODE (op1) == CONST_INT
2655 && INTVAL (op1) < 0)
2656 const_op1 = 0;
2657
2658 /* We used to test optimize here, on the grounds that it's better to
2659 produce a smaller program when -O is not used.
2660 But this causes such a terrible slowdown sometimes
2661 that it seems better to use synth_mult always. */
2662
2663 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2664 && (unsignedp || !flag_trapv))
2665 {
2666 int mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2667 mult_cost = MIN (12 * add_cost[mode], mult_cost);
2668
2669 if (choose_mult_variant (mode, INTVAL (const_op1), &algorithm, &variant,
2670 mult_cost))
2671 return expand_mult_const (mode, op0, INTVAL (const_op1), target,
2672 &algorithm, variant);
2673 }
2674
2675 if (GET_CODE (op0) == CONST_DOUBLE)
2676 {
2677 rtx temp = op0;
2678 op0 = op1;
2679 op1 = temp;
2680 }
2681
2682 /* Expand x*2.0 as x+x. */
2683 if (GET_CODE (op1) == CONST_DOUBLE
2684 && GET_MODE_CLASS (mode) == MODE_FLOAT)
2685 {
2686 REAL_VALUE_TYPE d;
2687 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
2688
2689 if (REAL_VALUES_EQUAL (d, dconst2))
2690 {
2691 op0 = force_reg (GET_MODE (op0), op0);
2692 return expand_binop (mode, add_optab, op0, op0,
2693 target, unsignedp, OPTAB_LIB_WIDEN);
2694 }
2695 }
2696
2697 /* This used to use umul_optab if unsigned, but for non-widening multiply
2698 there is no difference between signed and unsigned. */
2699 op0 = expand_binop (mode,
2700 ! unsignedp
2701 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2702 ? smulv_optab : smul_optab,
2703 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2704 if (op0 == 0)
2705 abort ();
2706 return op0;
2707 }
2708 \f
2709 /* Return the smallest n such that 2**n >= X. */
2710
2711 int
2712 ceil_log2 (unsigned HOST_WIDE_INT x)
2713 {
2714 return floor_log2 (x - 1) + 1;
2715 }
2716
2717 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2718 replace division by D, and put the least significant N bits of the result
2719 in *MULTIPLIER_PTR and return the most significant bit.
2720
2721 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2722 needed precision is in PRECISION (should be <= N).
2723
2724 PRECISION should be as small as possible so this function can choose
2725 multiplier more freely.
2726
2727 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2728 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2729
2730 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2731 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2732
2733 static
2734 unsigned HOST_WIDE_INT
2735 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
2736 unsigned HOST_WIDE_INT *multiplier_ptr,
2737 int *post_shift_ptr, int *lgup_ptr)
2738 {
2739 HOST_WIDE_INT mhigh_hi, mlow_hi;
2740 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2741 int lgup, post_shift;
2742 int pow, pow2;
2743 unsigned HOST_WIDE_INT nl, dummy1;
2744 HOST_WIDE_INT nh, dummy2;
2745
2746 /* lgup = ceil(log2(divisor)); */
2747 lgup = ceil_log2 (d);
2748
2749 if (lgup > n)
2750 abort ();
2751
2752 pow = n + lgup;
2753 pow2 = n + lgup - precision;
2754
2755 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2756 {
2757 /* We could handle this with some effort, but this case is much better
2758 handled directly with a scc insn, so rely on caller using that. */
2759 abort ();
2760 }
2761
2762 /* mlow = 2^(N + lgup)/d */
2763 if (pow >= HOST_BITS_PER_WIDE_INT)
2764 {
2765 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2766 nl = 0;
2767 }
2768 else
2769 {
2770 nh = 0;
2771 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2772 }
2773 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2774 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2775
2776 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2777 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2778 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2779 else
2780 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2781 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2782 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2783
2784 if (mhigh_hi && nh - d >= d)
2785 abort ();
2786 if (mhigh_hi > 1 || mlow_hi > 1)
2787 abort ();
2788 /* Assert that mlow < mhigh. */
2789 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2790 abort ();
2791
2792 /* If precision == N, then mlow, mhigh exceed 2^N
2793 (but they do not exceed 2^(N+1)). */
2794
2795 /* Reduce to lowest terms. */
2796 for (post_shift = lgup; post_shift > 0; post_shift--)
2797 {
2798 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2799 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2800 if (ml_lo >= mh_lo)
2801 break;
2802
2803 mlow_hi = 0;
2804 mlow_lo = ml_lo;
2805 mhigh_hi = 0;
2806 mhigh_lo = mh_lo;
2807 }
2808
2809 *post_shift_ptr = post_shift;
2810 *lgup_ptr = lgup;
2811 if (n < HOST_BITS_PER_WIDE_INT)
2812 {
2813 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2814 *multiplier_ptr = mhigh_lo & mask;
2815 return mhigh_lo >= mask;
2816 }
2817 else
2818 {
2819 *multiplier_ptr = mhigh_lo;
2820 return mhigh_hi;
2821 }
2822 }
2823
2824 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2825 congruent to 1 (mod 2**N). */
2826
2827 static unsigned HOST_WIDE_INT
2828 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
2829 {
2830 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2831
2832 /* The algorithm notes that the choice y = x satisfies
2833 x*y == 1 mod 2^3, since x is assumed odd.
2834 Each iteration doubles the number of bits of significance in y. */
2835
2836 unsigned HOST_WIDE_INT mask;
2837 unsigned HOST_WIDE_INT y = x;
2838 int nbit = 3;
2839
2840 mask = (n == HOST_BITS_PER_WIDE_INT
2841 ? ~(unsigned HOST_WIDE_INT) 0
2842 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2843
2844 while (nbit < n)
2845 {
2846 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2847 nbit *= 2;
2848 }
2849 return y;
2850 }
2851
2852 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2853 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2854 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2855 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2856 become signed.
2857
2858 The result is put in TARGET if that is convenient.
2859
2860 MODE is the mode of operation. */
2861
2862 rtx
2863 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
2864 rtx op1, rtx target, int unsignedp)
2865 {
2866 rtx tem;
2867 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2868
2869 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2870 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2871 NULL_RTX, 0);
2872 tem = expand_and (mode, tem, op1, NULL_RTX);
2873 adj_operand
2874 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2875 adj_operand);
2876
2877 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2878 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2879 NULL_RTX, 0);
2880 tem = expand_and (mode, tem, op0, NULL_RTX);
2881 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2882 target);
2883
2884 return target;
2885 }
2886
2887 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
2888
2889 static rtx
2890 extract_high_half (enum machine_mode mode, rtx op)
2891 {
2892 enum machine_mode wider_mode;
2893
2894 if (mode == word_mode)
2895 return gen_highpart (mode, op);
2896
2897 wider_mode = GET_MODE_WIDER_MODE (mode);
2898 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
2899 build_int_2 (GET_MODE_BITSIZE (mode), 0), 0, 1);
2900 return convert_modes (mode, wider_mode, op, 0);
2901 }
2902
2903 /* Like expand_mult_highpart, but only consider using a multiplication
2904 optab. OP1 is an rtx for the constant operand. */
2905
2906 static rtx
2907 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
2908 rtx target, int unsignedp, int max_cost)
2909 {
2910 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
2911 enum machine_mode wider_mode;
2912 optab moptab;
2913 rtx tem;
2914 int size;
2915
2916 wider_mode = GET_MODE_WIDER_MODE (mode);
2917 size = GET_MODE_BITSIZE (mode);
2918
2919 /* Firstly, try using a multiplication insn that only generates the needed
2920 high part of the product, and in the sign flavor of unsignedp. */
2921 if (mul_highpart_cost[mode] < max_cost)
2922 {
2923 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2924 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
2925 unsignedp, OPTAB_DIRECT);
2926 if (tem)
2927 return tem;
2928 }
2929
2930 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2931 Need to adjust the result after the multiplication. */
2932 if (size - 1 < BITS_PER_WORD
2933 && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
2934 + 4 * add_cost[mode] < max_cost))
2935 {
2936 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2937 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
2938 unsignedp, OPTAB_DIRECT);
2939 if (tem)
2940 /* We used the wrong signedness. Adjust the result. */
2941 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
2942 tem, unsignedp);
2943 }
2944
2945 /* Try widening multiplication. */
2946 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2947 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
2948 && mul_widen_cost[wider_mode] < max_cost)
2949 {
2950 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
2951 unsignedp, OPTAB_WIDEN);
2952 if (tem)
2953 return extract_high_half (mode, tem);
2954 }
2955
2956 /* Try widening the mode and perform a non-widening multiplication. */
2957 moptab = smul_optab;
2958 if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
2959 && size - 1 < BITS_PER_WORD
2960 && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
2961 {
2962 tem = expand_binop (wider_mode, moptab, op0, op1, 0,
2963 unsignedp, OPTAB_WIDEN);
2964 if (tem)
2965 return extract_high_half (mode, tem);
2966 }
2967
2968 /* Try widening multiplication of opposite signedness, and adjust. */
2969 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2970 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
2971 && size - 1 < BITS_PER_WORD
2972 && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
2973 + 4 * add_cost[mode] < max_cost))
2974 {
2975 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
2976 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2977 if (tem != 0)
2978 {
2979 tem = extract_high_half (mode, tem);
2980 /* We used the wrong signedness. Adjust the result. */
2981 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
2982 target, unsignedp);
2983 }
2984 }
2985
2986 return 0;
2987 }
2988
2989 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2990 in TARGET if that is convenient, and return where the result is. If the
2991 operation can not be performed, 0 is returned.
2992
2993 MODE is the mode of operation and result.
2994
2995 UNSIGNEDP nonzero means unsigned multiply.
2996
2997 MAX_COST is the total allowed cost for the expanded RTL. */
2998
2999 rtx
3000 expand_mult_highpart (enum machine_mode mode, rtx op0,
3001 unsigned HOST_WIDE_INT cnst1, rtx target,
3002 int unsignedp, int max_cost)
3003 {
3004 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3005 int extra_cost;
3006 bool sign_adjust = false;
3007 enum mult_variant variant;
3008 struct algorithm alg;
3009 rtx op1, tem;
3010
3011 /* We can't support modes wider than HOST_BITS_PER_INT. */
3012 if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3013 abort ();
3014
3015 op1 = gen_int_mode (cnst1, wider_mode);
3016 cnst1 &= GET_MODE_MASK (mode);
3017
3018 /* We can't optimize modes wider than BITS_PER_WORD.
3019 ??? We might be able to perform double-word arithmetic if
3020 mode == word_mode, however all the cost calculations in
3021 synth_mult etc. assume single-word operations. */
3022 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3023 return expand_mult_highpart_optab (mode, op0, op1, target,
3024 unsignedp, max_cost);
3025
3026 extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3027
3028 /* Check whether we try to multiply by a negative constant. */
3029 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3030 {
3031 sign_adjust = true;
3032 extra_cost += add_cost[mode];
3033 }
3034
3035 /* See whether shift/add multiplication is cheap enough. */
3036 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3037 max_cost - extra_cost))
3038 {
3039 /* See whether the specialized multiplication optabs are
3040 cheaper than the shift/add version. */
3041 tem = expand_mult_highpart_optab (mode, op0, op1, target,
3042 unsignedp, alg.cost + extra_cost);
3043 if (tem)
3044 return tem;
3045
3046 tem = convert_to_mode (wider_mode, op0, unsignedp);
3047 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3048 tem = extract_high_half (mode, tem);
3049
3050 /* Adjust result for signedness. */
3051 if (sign_adjust)
3052 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3053
3054 return tem;
3055 }
3056 return expand_mult_highpart_optab (mode, op0, op1, target,
3057 unsignedp, max_cost);
3058 }
3059 \f
3060 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3061 if that is convenient, and returning where the result is.
3062 You may request either the quotient or the remainder as the result;
3063 specify REM_FLAG nonzero to get the remainder.
3064
3065 CODE is the expression code for which kind of division this is;
3066 it controls how rounding is done. MODE is the machine mode to use.
3067 UNSIGNEDP nonzero means do unsigned division. */
3068
3069 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3070 and then correct it by or'ing in missing high bits
3071 if result of ANDI is nonzero.
3072 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3073 This could optimize to a bfexts instruction.
3074 But C doesn't use these operations, so their optimizations are
3075 left for later. */
3076 /* ??? For modulo, we don't actually need the highpart of the first product,
3077 the low part will do nicely. And for small divisors, the second multiply
3078 can also be a low-part only multiply or even be completely left out.
3079 E.g. to calculate the remainder of a division by 3 with a 32 bit
3080 multiply, multiply with 0x55555556 and extract the upper two bits;
3081 the result is exact for inputs up to 0x1fffffff.
3082 The input range can be reduced by using cross-sum rules.
3083 For odd divisors >= 3, the following table gives right shift counts
3084 so that if a number is shifted by an integer multiple of the given
3085 amount, the remainder stays the same:
3086 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3087 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3088 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3089 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3090 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3091
3092 Cross-sum rules for even numbers can be derived by leaving as many bits
3093 to the right alone as the divisor has zeros to the right.
3094 E.g. if x is an unsigned 32 bit number:
3095 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3096 */
3097
3098 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
3099
3100 rtx
3101 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3102 rtx op0, rtx op1, rtx target, int unsignedp)
3103 {
3104 enum machine_mode compute_mode;
3105 rtx tquotient;
3106 rtx quotient = 0, remainder = 0;
3107 rtx last;
3108 int size;
3109 rtx insn, set;
3110 optab optab1, optab2;
3111 int op1_is_constant, op1_is_pow2 = 0;
3112 int max_cost, extra_cost;
3113 static HOST_WIDE_INT last_div_const = 0;
3114 static HOST_WIDE_INT ext_op1;
3115
3116 op1_is_constant = GET_CODE (op1) == CONST_INT;
3117 if (op1_is_constant)
3118 {
3119 ext_op1 = INTVAL (op1);
3120 if (unsignedp)
3121 ext_op1 &= GET_MODE_MASK (mode);
3122 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3123 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3124 }
3125
3126 /*
3127 This is the structure of expand_divmod:
3128
3129 First comes code to fix up the operands so we can perform the operations
3130 correctly and efficiently.
3131
3132 Second comes a switch statement with code specific for each rounding mode.
3133 For some special operands this code emits all RTL for the desired
3134 operation, for other cases, it generates only a quotient and stores it in
3135 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3136 to indicate that it has not done anything.
3137
3138 Last comes code that finishes the operation. If QUOTIENT is set and
3139 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3140 QUOTIENT is not set, it is computed using trunc rounding.
3141
3142 We try to generate special code for division and remainder when OP1 is a
3143 constant. If |OP1| = 2**n we can use shifts and some other fast
3144 operations. For other values of OP1, we compute a carefully selected
3145 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3146 by m.
3147
3148 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3149 half of the product. Different strategies for generating the product are
3150 implemented in expand_mult_highpart.
3151
3152 If what we actually want is the remainder, we generate that by another
3153 by-constant multiplication and a subtraction. */
3154
3155 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3156 code below will malfunction if we are, so check here and handle
3157 the special case if so. */
3158 if (op1 == const1_rtx)
3159 return rem_flag ? const0_rtx : op0;
3160
3161 /* When dividing by -1, we could get an overflow.
3162 negv_optab can handle overflows. */
3163 if (! unsignedp && op1 == constm1_rtx)
3164 {
3165 if (rem_flag)
3166 return const0_rtx;
3167 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3168 ? negv_optab : neg_optab, op0, target, 0);
3169 }
3170
3171 if (target
3172 /* Don't use the function value register as a target
3173 since we have to read it as well as write it,
3174 and function-inlining gets confused by this. */
3175 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3176 /* Don't clobber an operand while doing a multi-step calculation. */
3177 || ((rem_flag || op1_is_constant)
3178 && (reg_mentioned_p (target, op0)
3179 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3180 || reg_mentioned_p (target, op1)
3181 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3182 target = 0;
3183
3184 /* Get the mode in which to perform this computation. Normally it will
3185 be MODE, but sometimes we can't do the desired operation in MODE.
3186 If so, pick a wider mode in which we can do the operation. Convert
3187 to that mode at the start to avoid repeated conversions.
3188
3189 First see what operations we need. These depend on the expression
3190 we are evaluating. (We assume that divxx3 insns exist under the
3191 same conditions that modxx3 insns and that these insns don't normally
3192 fail. If these assumptions are not correct, we may generate less
3193 efficient code in some cases.)
3194
3195 Then see if we find a mode in which we can open-code that operation
3196 (either a division, modulus, or shift). Finally, check for the smallest
3197 mode for which we can do the operation with a library call. */
3198
3199 /* We might want to refine this now that we have division-by-constant
3200 optimization. Since expand_mult_highpart tries so many variants, it is
3201 not straightforward to generalize this. Maybe we should make an array
3202 of possible modes in init_expmed? Save this for GCC 2.7. */
3203
3204 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3205 ? (unsignedp ? lshr_optab : ashr_optab)
3206 : (unsignedp ? udiv_optab : sdiv_optab));
3207 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3208 ? optab1
3209 : (unsignedp ? udivmod_optab : sdivmod_optab));
3210
3211 for (compute_mode = mode; compute_mode != VOIDmode;
3212 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3213 if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3214 || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3215 break;
3216
3217 if (compute_mode == VOIDmode)
3218 for (compute_mode = mode; compute_mode != VOIDmode;
3219 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3220 if (optab1->handlers[compute_mode].libfunc
3221 || optab2->handlers[compute_mode].libfunc)
3222 break;
3223
3224 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3225 in expand_binop. */
3226 if (compute_mode == VOIDmode)
3227 compute_mode = mode;
3228
3229 if (target && GET_MODE (target) == compute_mode)
3230 tquotient = target;
3231 else
3232 tquotient = gen_reg_rtx (compute_mode);
3233
3234 size = GET_MODE_BITSIZE (compute_mode);
3235 #if 0
3236 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3237 (mode), and thereby get better code when OP1 is a constant. Do that
3238 later. It will require going over all usages of SIZE below. */
3239 size = GET_MODE_BITSIZE (mode);
3240 #endif
3241
3242 /* Only deduct something for a REM if the last divide done was
3243 for a different constant. Then set the constant of the last
3244 divide. */
3245 max_cost = div_cost[compute_mode]
3246 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3247 && INTVAL (op1) == last_div_const)
3248 ? mul_cost[compute_mode] + add_cost[compute_mode]
3249 : 0);
3250
3251 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3252
3253 /* Now convert to the best mode to use. */
3254 if (compute_mode != mode)
3255 {
3256 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3257 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3258
3259 /* convert_modes may have placed op1 into a register, so we
3260 must recompute the following. */
3261 op1_is_constant = GET_CODE (op1) == CONST_INT;
3262 op1_is_pow2 = (op1_is_constant
3263 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3264 || (! unsignedp
3265 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3266 }
3267
3268 /* If one of the operands is a volatile MEM, copy it into a register. */
3269
3270 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3271 op0 = force_reg (compute_mode, op0);
3272 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3273 op1 = force_reg (compute_mode, op1);
3274
3275 /* If we need the remainder or if OP1 is constant, we need to
3276 put OP0 in a register in case it has any queued subexpressions. */
3277 if (rem_flag || op1_is_constant)
3278 op0 = force_reg (compute_mode, op0);
3279
3280 last = get_last_insn ();
3281
3282 /* Promote floor rounding to trunc rounding for unsigned operations. */
3283 if (unsignedp)
3284 {
3285 if (code == FLOOR_DIV_EXPR)
3286 code = TRUNC_DIV_EXPR;
3287 if (code == FLOOR_MOD_EXPR)
3288 code = TRUNC_MOD_EXPR;
3289 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3290 code = TRUNC_DIV_EXPR;
3291 }
3292
3293 if (op1 != const0_rtx)
3294 switch (code)
3295 {
3296 case TRUNC_MOD_EXPR:
3297 case TRUNC_DIV_EXPR:
3298 if (op1_is_constant)
3299 {
3300 if (unsignedp)
3301 {
3302 unsigned HOST_WIDE_INT mh, ml;
3303 int pre_shift, post_shift;
3304 int dummy;
3305 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3306 & GET_MODE_MASK (compute_mode));
3307
3308 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3309 {
3310 pre_shift = floor_log2 (d);
3311 if (rem_flag)
3312 {
3313 remainder
3314 = expand_binop (compute_mode, and_optab, op0,
3315 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3316 remainder, 1,
3317 OPTAB_LIB_WIDEN);
3318 if (remainder)
3319 return gen_lowpart (mode, remainder);
3320 }
3321 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3322 build_int_2 (pre_shift, 0),
3323 tquotient, 1);
3324 }
3325 else if (size <= HOST_BITS_PER_WIDE_INT)
3326 {
3327 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3328 {
3329 /* Most significant bit of divisor is set; emit an scc
3330 insn. */
3331 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3332 compute_mode, 1, 1);
3333 if (quotient == 0)
3334 goto fail1;
3335 }
3336 else
3337 {
3338 /* Find a suitable multiplier and right shift count
3339 instead of multiplying with D. */
3340
3341 mh = choose_multiplier (d, size, size,
3342 &ml, &post_shift, &dummy);
3343
3344 /* If the suggested multiplier is more than SIZE bits,
3345 we can do better for even divisors, using an
3346 initial right shift. */
3347 if (mh != 0 && (d & 1) == 0)
3348 {
3349 pre_shift = floor_log2 (d & -d);
3350 mh = choose_multiplier (d >> pre_shift, size,
3351 size - pre_shift,
3352 &ml, &post_shift, &dummy);
3353 if (mh)
3354 abort ();
3355 }
3356 else
3357 pre_shift = 0;
3358
3359 if (mh != 0)
3360 {
3361 rtx t1, t2, t3, t4;
3362
3363 if (post_shift - 1 >= BITS_PER_WORD)
3364 goto fail1;
3365
3366 extra_cost
3367 = (shift_cost[compute_mode][post_shift - 1]
3368 + shift_cost[compute_mode][1]
3369 + 2 * add_cost[compute_mode]);
3370 t1 = expand_mult_highpart (compute_mode, op0, ml,
3371 NULL_RTX, 1,
3372 max_cost - extra_cost);
3373 if (t1 == 0)
3374 goto fail1;
3375 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3376 op0, t1),
3377 NULL_RTX);
3378 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3379 build_int_2 (1, 0), NULL_RTX,1);
3380 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3381 t1, t3),
3382 NULL_RTX);
3383 quotient
3384 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3385 build_int_2 (post_shift - 1, 0),
3386 tquotient, 1);
3387 }
3388 else
3389 {
3390 rtx t1, t2;
3391
3392 if (pre_shift >= BITS_PER_WORD
3393 || post_shift >= BITS_PER_WORD)
3394 goto fail1;
3395
3396 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3397 build_int_2 (pre_shift, 0),
3398 NULL_RTX, 1);
3399 extra_cost
3400 = (shift_cost[compute_mode][pre_shift]
3401 + shift_cost[compute_mode][post_shift]);
3402 t2 = expand_mult_highpart (compute_mode, t1, ml,
3403 NULL_RTX, 1,
3404 max_cost - extra_cost);
3405 if (t2 == 0)
3406 goto fail1;
3407 quotient
3408 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3409 build_int_2 (post_shift, 0),
3410 tquotient, 1);
3411 }
3412 }
3413 }
3414 else /* Too wide mode to use tricky code */
3415 break;
3416
3417 insn = get_last_insn ();
3418 if (insn != last
3419 && (set = single_set (insn)) != 0
3420 && SET_DEST (set) == quotient)
3421 set_unique_reg_note (insn,
3422 REG_EQUAL,
3423 gen_rtx_UDIV (compute_mode, op0, op1));
3424 }
3425 else /* TRUNC_DIV, signed */
3426 {
3427 unsigned HOST_WIDE_INT ml;
3428 int lgup, post_shift;
3429 HOST_WIDE_INT d = INTVAL (op1);
3430 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3431
3432 /* n rem d = n rem -d */
3433 if (rem_flag && d < 0)
3434 {
3435 d = abs_d;
3436 op1 = gen_int_mode (abs_d, compute_mode);
3437 }
3438
3439 if (d == 1)
3440 quotient = op0;
3441 else if (d == -1)
3442 quotient = expand_unop (compute_mode, neg_optab, op0,
3443 tquotient, 0);
3444 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3445 {
3446 /* This case is not handled correctly below. */
3447 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3448 compute_mode, 1, 1);
3449 if (quotient == 0)
3450 goto fail1;
3451 }
3452 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3453 && (rem_flag ? smod_pow2_cheap[compute_mode]
3454 : sdiv_pow2_cheap[compute_mode])
3455 /* ??? The cheap metric is computed only for
3456 word_mode. If this operation is wider, this may
3457 not be so. Assume true if the optab has an
3458 expander for this mode. */
3459 && (((rem_flag ? smod_optab : sdiv_optab)
3460 ->handlers[compute_mode].insn_code
3461 != CODE_FOR_nothing)
3462 || (sdivmod_optab->handlers[compute_mode]
3463 .insn_code != CODE_FOR_nothing)))
3464 ;
3465 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3466 {
3467 lgup = floor_log2 (abs_d);
3468 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3469 {
3470 rtx label = gen_label_rtx ();
3471 rtx t1;
3472
3473 t1 = copy_to_mode_reg (compute_mode, op0);
3474 do_cmp_and_jump (t1, const0_rtx, GE,
3475 compute_mode, label);
3476 expand_inc (t1, gen_int_mode (abs_d - 1,
3477 compute_mode));
3478 emit_label (label);
3479 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3480 build_int_2 (lgup, 0),
3481 tquotient, 0);
3482 }
3483 else
3484 {
3485 rtx t1, t2, t3;
3486 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3487 build_int_2 (size - 1, 0),
3488 NULL_RTX, 0);
3489 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3490 build_int_2 (size - lgup, 0),
3491 NULL_RTX, 1);
3492 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3493 op0, t2),
3494 NULL_RTX);
3495 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3496 build_int_2 (lgup, 0),
3497 tquotient, 0);
3498 }
3499
3500 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3501 the quotient. */
3502 if (d < 0)
3503 {
3504 insn = get_last_insn ();
3505 if (insn != last
3506 && (set = single_set (insn)) != 0
3507 && SET_DEST (set) == quotient
3508 && abs_d < ((unsigned HOST_WIDE_INT) 1
3509 << (HOST_BITS_PER_WIDE_INT - 1)))
3510 set_unique_reg_note (insn,
3511 REG_EQUAL,
3512 gen_rtx_DIV (compute_mode,
3513 op0,
3514 GEN_INT
3515 (trunc_int_for_mode
3516 (abs_d,
3517 compute_mode))));
3518
3519 quotient = expand_unop (compute_mode, neg_optab,
3520 quotient, quotient, 0);
3521 }
3522 }
3523 else if (size <= HOST_BITS_PER_WIDE_INT)
3524 {
3525 choose_multiplier (abs_d, size, size - 1,
3526 &ml, &post_shift, &lgup);
3527 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3528 {
3529 rtx t1, t2, t3;
3530
3531 if (post_shift >= BITS_PER_WORD
3532 || size - 1 >= BITS_PER_WORD)
3533 goto fail1;
3534
3535 extra_cost = (shift_cost[compute_mode][post_shift]
3536 + shift_cost[compute_mode][size - 1]
3537 + add_cost[compute_mode]);
3538 t1 = expand_mult_highpart (compute_mode, op0, ml,
3539 NULL_RTX, 0,
3540 max_cost - extra_cost);
3541 if (t1 == 0)
3542 goto fail1;
3543 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3544 build_int_2 (post_shift, 0), NULL_RTX, 0);
3545 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3546 build_int_2 (size - 1, 0), NULL_RTX, 0);
3547 if (d < 0)
3548 quotient
3549 = force_operand (gen_rtx_MINUS (compute_mode,
3550 t3, t2),
3551 tquotient);
3552 else
3553 quotient
3554 = force_operand (gen_rtx_MINUS (compute_mode,
3555 t2, t3),
3556 tquotient);
3557 }
3558 else
3559 {
3560 rtx t1, t2, t3, t4;
3561
3562 if (post_shift >= BITS_PER_WORD
3563 || size - 1 >= BITS_PER_WORD)
3564 goto fail1;
3565
3566 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3567 extra_cost = (shift_cost[compute_mode][post_shift]
3568 + shift_cost[compute_mode][size - 1]
3569 + 2 * add_cost[compute_mode]);
3570 t1 = expand_mult_highpart (compute_mode, op0, ml,
3571 NULL_RTX, 0,
3572 max_cost - extra_cost);
3573 if (t1 == 0)
3574 goto fail1;
3575 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3576 t1, op0),
3577 NULL_RTX);
3578 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3579 build_int_2 (post_shift, 0),
3580 NULL_RTX, 0);
3581 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3582 build_int_2 (size - 1, 0),
3583 NULL_RTX, 0);
3584 if (d < 0)
3585 quotient
3586 = force_operand (gen_rtx_MINUS (compute_mode,
3587 t4, t3),
3588 tquotient);
3589 else
3590 quotient
3591 = force_operand (gen_rtx_MINUS (compute_mode,
3592 t3, t4),
3593 tquotient);
3594 }
3595 }
3596 else /* Too wide mode to use tricky code */
3597 break;
3598
3599 insn = get_last_insn ();
3600 if (insn != last
3601 && (set = single_set (insn)) != 0
3602 && SET_DEST (set) == quotient)
3603 set_unique_reg_note (insn,
3604 REG_EQUAL,
3605 gen_rtx_DIV (compute_mode, op0, op1));
3606 }
3607 break;
3608 }
3609 fail1:
3610 delete_insns_since (last);
3611 break;
3612
3613 case FLOOR_DIV_EXPR:
3614 case FLOOR_MOD_EXPR:
3615 /* We will come here only for signed operations. */
3616 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3617 {
3618 unsigned HOST_WIDE_INT mh, ml;
3619 int pre_shift, lgup, post_shift;
3620 HOST_WIDE_INT d = INTVAL (op1);
3621
3622 if (d > 0)
3623 {
3624 /* We could just as easily deal with negative constants here,
3625 but it does not seem worth the trouble for GCC 2.6. */
3626 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3627 {
3628 pre_shift = floor_log2 (d);
3629 if (rem_flag)
3630 {
3631 remainder = expand_binop (compute_mode, and_optab, op0,
3632 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3633 remainder, 0, OPTAB_LIB_WIDEN);
3634 if (remainder)
3635 return gen_lowpart (mode, remainder);
3636 }
3637 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3638 build_int_2 (pre_shift, 0),
3639 tquotient, 0);
3640 }
3641 else
3642 {
3643 rtx t1, t2, t3, t4;
3644
3645 mh = choose_multiplier (d, size, size - 1,
3646 &ml, &post_shift, &lgup);
3647 if (mh)
3648 abort ();
3649
3650 if (post_shift < BITS_PER_WORD
3651 && size - 1 < BITS_PER_WORD)
3652 {
3653 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3654 build_int_2 (size - 1, 0),
3655 NULL_RTX, 0);
3656 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3657 NULL_RTX, 0, OPTAB_WIDEN);
3658 extra_cost = (shift_cost[compute_mode][post_shift]
3659 + shift_cost[compute_mode][size - 1]
3660 + 2 * add_cost[compute_mode]);
3661 t3 = expand_mult_highpart (compute_mode, t2, ml,
3662 NULL_RTX, 1,
3663 max_cost - extra_cost);
3664 if (t3 != 0)
3665 {
3666 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3667 build_int_2 (post_shift, 0),
3668 NULL_RTX, 1);
3669 quotient = expand_binop (compute_mode, xor_optab,
3670 t4, t1, tquotient, 0,
3671 OPTAB_WIDEN);
3672 }
3673 }
3674 }
3675 }
3676 else
3677 {
3678 rtx nsign, t1, t2, t3, t4;
3679 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3680 op0, constm1_rtx), NULL_RTX);
3681 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3682 0, OPTAB_WIDEN);
3683 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3684 build_int_2 (size - 1, 0), NULL_RTX, 0);
3685 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3686 NULL_RTX);
3687 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3688 NULL_RTX, 0);
3689 if (t4)
3690 {
3691 rtx t5;
3692 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3693 NULL_RTX, 0);
3694 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3695 t4, t5),
3696 tquotient);
3697 }
3698 }
3699 }
3700
3701 if (quotient != 0)
3702 break;
3703 delete_insns_since (last);
3704
3705 /* Try using an instruction that produces both the quotient and
3706 remainder, using truncation. We can easily compensate the quotient
3707 or remainder to get floor rounding, once we have the remainder.
3708 Notice that we compute also the final remainder value here,
3709 and return the result right away. */
3710 if (target == 0 || GET_MODE (target) != compute_mode)
3711 target = gen_reg_rtx (compute_mode);
3712
3713 if (rem_flag)
3714 {
3715 remainder
3716 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
3717 quotient = gen_reg_rtx (compute_mode);
3718 }
3719 else
3720 {
3721 quotient
3722 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
3723 remainder = gen_reg_rtx (compute_mode);
3724 }
3725
3726 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3727 quotient, remainder, 0))
3728 {
3729 /* This could be computed with a branch-less sequence.
3730 Save that for later. */
3731 rtx tem;
3732 rtx label = gen_label_rtx ();
3733 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3734 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3735 NULL_RTX, 0, OPTAB_WIDEN);
3736 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3737 expand_dec (quotient, const1_rtx);
3738 expand_inc (remainder, op1);
3739 emit_label (label);
3740 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3741 }
3742
3743 /* No luck with division elimination or divmod. Have to do it
3744 by conditionally adjusting op0 *and* the result. */
3745 {
3746 rtx label1, label2, label3, label4, label5;
3747 rtx adjusted_op0;
3748 rtx tem;
3749
3750 quotient = gen_reg_rtx (compute_mode);
3751 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3752 label1 = gen_label_rtx ();
3753 label2 = gen_label_rtx ();
3754 label3 = gen_label_rtx ();
3755 label4 = gen_label_rtx ();
3756 label5 = gen_label_rtx ();
3757 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3758 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3759 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3760 quotient, 0, OPTAB_LIB_WIDEN);
3761 if (tem != quotient)
3762 emit_move_insn (quotient, tem);
3763 emit_jump_insn (gen_jump (label5));
3764 emit_barrier ();
3765 emit_label (label1);
3766 expand_inc (adjusted_op0, const1_rtx);
3767 emit_jump_insn (gen_jump (label4));
3768 emit_barrier ();
3769 emit_label (label2);
3770 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3771 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3772 quotient, 0, OPTAB_LIB_WIDEN);
3773 if (tem != quotient)
3774 emit_move_insn (quotient, tem);
3775 emit_jump_insn (gen_jump (label5));
3776 emit_barrier ();
3777 emit_label (label3);
3778 expand_dec (adjusted_op0, const1_rtx);
3779 emit_label (label4);
3780 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3781 quotient, 0, OPTAB_LIB_WIDEN);
3782 if (tem != quotient)
3783 emit_move_insn (quotient, tem);
3784 expand_dec (quotient, const1_rtx);
3785 emit_label (label5);
3786 }
3787 break;
3788
3789 case CEIL_DIV_EXPR:
3790 case CEIL_MOD_EXPR:
3791 if (unsignedp)
3792 {
3793 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3794 {
3795 rtx t1, t2, t3;
3796 unsigned HOST_WIDE_INT d = INTVAL (op1);
3797 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3798 build_int_2 (floor_log2 (d), 0),
3799 tquotient, 1);
3800 t2 = expand_binop (compute_mode, and_optab, op0,
3801 GEN_INT (d - 1),
3802 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3803 t3 = gen_reg_rtx (compute_mode);
3804 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3805 compute_mode, 1, 1);
3806 if (t3 == 0)
3807 {
3808 rtx lab;
3809 lab = gen_label_rtx ();
3810 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3811 expand_inc (t1, const1_rtx);
3812 emit_label (lab);
3813 quotient = t1;
3814 }
3815 else
3816 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3817 t1, t3),
3818 tquotient);
3819 break;
3820 }
3821
3822 /* Try using an instruction that produces both the quotient and
3823 remainder, using truncation. We can easily compensate the
3824 quotient or remainder to get ceiling rounding, once we have the
3825 remainder. Notice that we compute also the final remainder
3826 value here, and return the result right away. */
3827 if (target == 0 || GET_MODE (target) != compute_mode)
3828 target = gen_reg_rtx (compute_mode);
3829
3830 if (rem_flag)
3831 {
3832 remainder = (REG_P (target)
3833 ? target : gen_reg_rtx (compute_mode));
3834 quotient = gen_reg_rtx (compute_mode);
3835 }
3836 else
3837 {
3838 quotient = (REG_P (target)
3839 ? target : gen_reg_rtx (compute_mode));
3840 remainder = gen_reg_rtx (compute_mode);
3841 }
3842
3843 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3844 remainder, 1))
3845 {
3846 /* This could be computed with a branch-less sequence.
3847 Save that for later. */
3848 rtx label = gen_label_rtx ();
3849 do_cmp_and_jump (remainder, const0_rtx, EQ,
3850 compute_mode, label);
3851 expand_inc (quotient, const1_rtx);
3852 expand_dec (remainder, op1);
3853 emit_label (label);
3854 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3855 }
3856
3857 /* No luck with division elimination or divmod. Have to do it
3858 by conditionally adjusting op0 *and* the result. */
3859 {
3860 rtx label1, label2;
3861 rtx adjusted_op0, tem;
3862
3863 quotient = gen_reg_rtx (compute_mode);
3864 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3865 label1 = gen_label_rtx ();
3866 label2 = gen_label_rtx ();
3867 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3868 compute_mode, label1);
3869 emit_move_insn (quotient, const0_rtx);
3870 emit_jump_insn (gen_jump (label2));
3871 emit_barrier ();
3872 emit_label (label1);
3873 expand_dec (adjusted_op0, const1_rtx);
3874 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3875 quotient, 1, OPTAB_LIB_WIDEN);
3876 if (tem != quotient)
3877 emit_move_insn (quotient, tem);
3878 expand_inc (quotient, const1_rtx);
3879 emit_label (label2);
3880 }
3881 }
3882 else /* signed */
3883 {
3884 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3885 && INTVAL (op1) >= 0)
3886 {
3887 /* This is extremely similar to the code for the unsigned case
3888 above. For 2.7 we should merge these variants, but for
3889 2.6.1 I don't want to touch the code for unsigned since that
3890 get used in C. The signed case will only be used by other
3891 languages (Ada). */
3892
3893 rtx t1, t2, t3;
3894 unsigned HOST_WIDE_INT d = INTVAL (op1);
3895 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3896 build_int_2 (floor_log2 (d), 0),
3897 tquotient, 0);
3898 t2 = expand_binop (compute_mode, and_optab, op0,
3899 GEN_INT (d - 1),
3900 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3901 t3 = gen_reg_rtx (compute_mode);
3902 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3903 compute_mode, 1, 1);
3904 if (t3 == 0)
3905 {
3906 rtx lab;
3907 lab = gen_label_rtx ();
3908 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3909 expand_inc (t1, const1_rtx);
3910 emit_label (lab);
3911 quotient = t1;
3912 }
3913 else
3914 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3915 t1, t3),
3916 tquotient);
3917 break;
3918 }
3919
3920 /* Try using an instruction that produces both the quotient and
3921 remainder, using truncation. We can easily compensate the
3922 quotient or remainder to get ceiling rounding, once we have the
3923 remainder. Notice that we compute also the final remainder
3924 value here, and return the result right away. */
3925 if (target == 0 || GET_MODE (target) != compute_mode)
3926 target = gen_reg_rtx (compute_mode);
3927 if (rem_flag)
3928 {
3929 remainder= (REG_P (target)
3930 ? target : gen_reg_rtx (compute_mode));
3931 quotient = gen_reg_rtx (compute_mode);
3932 }
3933 else
3934 {
3935 quotient = (REG_P (target)
3936 ? target : gen_reg_rtx (compute_mode));
3937 remainder = gen_reg_rtx (compute_mode);
3938 }
3939
3940 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3941 remainder, 0))
3942 {
3943 /* This could be computed with a branch-less sequence.
3944 Save that for later. */
3945 rtx tem;
3946 rtx label = gen_label_rtx ();
3947 do_cmp_and_jump (remainder, const0_rtx, EQ,
3948 compute_mode, label);
3949 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3950 NULL_RTX, 0, OPTAB_WIDEN);
3951 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3952 expand_inc (quotient, const1_rtx);
3953 expand_dec (remainder, op1);
3954 emit_label (label);
3955 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3956 }
3957
3958 /* No luck with division elimination or divmod. Have to do it
3959 by conditionally adjusting op0 *and* the result. */
3960 {
3961 rtx label1, label2, label3, label4, label5;
3962 rtx adjusted_op0;
3963 rtx tem;
3964
3965 quotient = gen_reg_rtx (compute_mode);
3966 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3967 label1 = gen_label_rtx ();
3968 label2 = gen_label_rtx ();
3969 label3 = gen_label_rtx ();
3970 label4 = gen_label_rtx ();
3971 label5 = gen_label_rtx ();
3972 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3973 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3974 compute_mode, label1);
3975 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3976 quotient, 0, OPTAB_LIB_WIDEN);
3977 if (tem != quotient)
3978 emit_move_insn (quotient, tem);
3979 emit_jump_insn (gen_jump (label5));
3980 emit_barrier ();
3981 emit_label (label1);
3982 expand_dec (adjusted_op0, const1_rtx);
3983 emit_jump_insn (gen_jump (label4));
3984 emit_barrier ();
3985 emit_label (label2);
3986 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3987 compute_mode, label3);
3988 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3989 quotient, 0, OPTAB_LIB_WIDEN);
3990 if (tem != quotient)
3991 emit_move_insn (quotient, tem);
3992 emit_jump_insn (gen_jump (label5));
3993 emit_barrier ();
3994 emit_label (label3);
3995 expand_inc (adjusted_op0, const1_rtx);
3996 emit_label (label4);
3997 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3998 quotient, 0, OPTAB_LIB_WIDEN);
3999 if (tem != quotient)
4000 emit_move_insn (quotient, tem);
4001 expand_inc (quotient, const1_rtx);
4002 emit_label (label5);
4003 }
4004 }
4005 break;
4006
4007 case EXACT_DIV_EXPR:
4008 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4009 {
4010 HOST_WIDE_INT d = INTVAL (op1);
4011 unsigned HOST_WIDE_INT ml;
4012 int pre_shift;
4013 rtx t1;
4014
4015 pre_shift = floor_log2 (d & -d);
4016 ml = invert_mod2n (d >> pre_shift, size);
4017 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4018 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
4019 quotient = expand_mult (compute_mode, t1,
4020 gen_int_mode (ml, compute_mode),
4021 NULL_RTX, 1);
4022
4023 insn = get_last_insn ();
4024 set_unique_reg_note (insn,
4025 REG_EQUAL,
4026 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4027 compute_mode,
4028 op0, op1));
4029 }
4030 break;
4031
4032 case ROUND_DIV_EXPR:
4033 case ROUND_MOD_EXPR:
4034 if (unsignedp)
4035 {
4036 rtx tem;
4037 rtx label;
4038 label = gen_label_rtx ();
4039 quotient = gen_reg_rtx (compute_mode);
4040 remainder = gen_reg_rtx (compute_mode);
4041 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4042 {
4043 rtx tem;
4044 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4045 quotient, 1, OPTAB_LIB_WIDEN);
4046 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4047 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4048 remainder, 1, OPTAB_LIB_WIDEN);
4049 }
4050 tem = plus_constant (op1, -1);
4051 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4052 build_int_2 (1, 0), NULL_RTX, 1);
4053 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4054 expand_inc (quotient, const1_rtx);
4055 expand_dec (remainder, op1);
4056 emit_label (label);
4057 }
4058 else
4059 {
4060 rtx abs_rem, abs_op1, tem, mask;
4061 rtx label;
4062 label = gen_label_rtx ();
4063 quotient = gen_reg_rtx (compute_mode);
4064 remainder = gen_reg_rtx (compute_mode);
4065 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4066 {
4067 rtx tem;
4068 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4069 quotient, 0, OPTAB_LIB_WIDEN);
4070 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4071 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4072 remainder, 0, OPTAB_LIB_WIDEN);
4073 }
4074 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4075 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4076 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4077 build_int_2 (1, 0), NULL_RTX, 1);
4078 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4079 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4080 NULL_RTX, 0, OPTAB_WIDEN);
4081 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4082 build_int_2 (size - 1, 0), NULL_RTX, 0);
4083 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4084 NULL_RTX, 0, OPTAB_WIDEN);
4085 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4086 NULL_RTX, 0, OPTAB_WIDEN);
4087 expand_inc (quotient, tem);
4088 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4089 NULL_RTX, 0, OPTAB_WIDEN);
4090 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4091 NULL_RTX, 0, OPTAB_WIDEN);
4092 expand_dec (remainder, tem);
4093 emit_label (label);
4094 }
4095 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4096
4097 default:
4098 abort ();
4099 }
4100
4101 if (quotient == 0)
4102 {
4103 if (target && GET_MODE (target) != compute_mode)
4104 target = 0;
4105
4106 if (rem_flag)
4107 {
4108 /* Try to produce the remainder without producing the quotient.
4109 If we seem to have a divmod pattern that does not require widening,
4110 don't try widening here. We should really have a WIDEN argument
4111 to expand_twoval_binop, since what we'd really like to do here is
4112 1) try a mod insn in compute_mode
4113 2) try a divmod insn in compute_mode
4114 3) try a div insn in compute_mode and multiply-subtract to get
4115 remainder
4116 4) try the same things with widening allowed. */
4117 remainder
4118 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4119 op0, op1, target,
4120 unsignedp,
4121 ((optab2->handlers[compute_mode].insn_code
4122 != CODE_FOR_nothing)
4123 ? OPTAB_DIRECT : OPTAB_WIDEN));
4124 if (remainder == 0)
4125 {
4126 /* No luck there. Can we do remainder and divide at once
4127 without a library call? */
4128 remainder = gen_reg_rtx (compute_mode);
4129 if (! expand_twoval_binop ((unsignedp
4130 ? udivmod_optab
4131 : sdivmod_optab),
4132 op0, op1,
4133 NULL_RTX, remainder, unsignedp))
4134 remainder = 0;
4135 }
4136
4137 if (remainder)
4138 return gen_lowpart (mode, remainder);
4139 }
4140
4141 /* Produce the quotient. Try a quotient insn, but not a library call.
4142 If we have a divmod in this mode, use it in preference to widening
4143 the div (for this test we assume it will not fail). Note that optab2
4144 is set to the one of the two optabs that the call below will use. */
4145 quotient
4146 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4147 op0, op1, rem_flag ? NULL_RTX : target,
4148 unsignedp,
4149 ((optab2->handlers[compute_mode].insn_code
4150 != CODE_FOR_nothing)
4151 ? OPTAB_DIRECT : OPTAB_WIDEN));
4152
4153 if (quotient == 0)
4154 {
4155 /* No luck there. Try a quotient-and-remainder insn,
4156 keeping the quotient alone. */
4157 quotient = gen_reg_rtx (compute_mode);
4158 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4159 op0, op1,
4160 quotient, NULL_RTX, unsignedp))
4161 {
4162 quotient = 0;
4163 if (! rem_flag)
4164 /* Still no luck. If we are not computing the remainder,
4165 use a library call for the quotient. */
4166 quotient = sign_expand_binop (compute_mode,
4167 udiv_optab, sdiv_optab,
4168 op0, op1, target,
4169 unsignedp, OPTAB_LIB_WIDEN);
4170 }
4171 }
4172 }
4173
4174 if (rem_flag)
4175 {
4176 if (target && GET_MODE (target) != compute_mode)
4177 target = 0;
4178
4179 if (quotient == 0)
4180 /* No divide instruction either. Use library for remainder. */
4181 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4182 op0, op1, target,
4183 unsignedp, OPTAB_LIB_WIDEN);
4184 else
4185 {
4186 /* We divided. Now finish doing X - Y * (X / Y). */
4187 remainder = expand_mult (compute_mode, quotient, op1,
4188 NULL_RTX, unsignedp);
4189 remainder = expand_binop (compute_mode, sub_optab, op0,
4190 remainder, target, unsignedp,
4191 OPTAB_LIB_WIDEN);
4192 }
4193 }
4194
4195 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4196 }
4197 \f
4198 /* Return a tree node with data type TYPE, describing the value of X.
4199 Usually this is an RTL_EXPR, if there is no obvious better choice.
4200 X may be an expression, however we only support those expressions
4201 generated by loop.c. */
4202
4203 tree
4204 make_tree (tree type, rtx x)
4205 {
4206 tree t;
4207
4208 switch (GET_CODE (x))
4209 {
4210 case CONST_INT:
4211 t = build_int_2 (INTVAL (x),
4212 (TYPE_UNSIGNED (type)
4213 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4214 < HOST_BITS_PER_WIDE_INT))
4215 || INTVAL (x) >= 0 ? 0 : -1);
4216 TREE_TYPE (t) = type;
4217 return t;
4218
4219 case CONST_DOUBLE:
4220 if (GET_MODE (x) == VOIDmode)
4221 {
4222 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4223 TREE_TYPE (t) = type;
4224 }
4225 else
4226 {
4227 REAL_VALUE_TYPE d;
4228
4229 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4230 t = build_real (type, d);
4231 }
4232
4233 return t;
4234
4235 case CONST_VECTOR:
4236 {
4237 int i, units;
4238 rtx elt;
4239 tree t = NULL_TREE;
4240
4241 units = CONST_VECTOR_NUNITS (x);
4242
4243 /* Build a tree with vector elements. */
4244 for (i = units - 1; i >= 0; --i)
4245 {
4246 elt = CONST_VECTOR_ELT (x, i);
4247 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4248 }
4249
4250 return build_vector (type, t);
4251 }
4252
4253 case PLUS:
4254 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4255 make_tree (type, XEXP (x, 1))));
4256
4257 case MINUS:
4258 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4259 make_tree (type, XEXP (x, 1))));
4260
4261 case NEG:
4262 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4263
4264 case MULT:
4265 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4266 make_tree (type, XEXP (x, 1))));
4267
4268 case ASHIFT:
4269 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4270 make_tree (type, XEXP (x, 1))));
4271
4272 case LSHIFTRT:
4273 t = lang_hooks.types.unsigned_type (type);
4274 return fold (convert (type,
4275 build (RSHIFT_EXPR, t,
4276 make_tree (t, XEXP (x, 0)),
4277 make_tree (type, XEXP (x, 1)))));
4278
4279 case ASHIFTRT:
4280 t = lang_hooks.types.signed_type (type);
4281 return fold (convert (type,
4282 build (RSHIFT_EXPR, t,
4283 make_tree (t, XEXP (x, 0)),
4284 make_tree (type, XEXP (x, 1)))));
4285
4286 case DIV:
4287 if (TREE_CODE (type) != REAL_TYPE)
4288 t = lang_hooks.types.signed_type (type);
4289 else
4290 t = type;
4291
4292 return fold (convert (type,
4293 build (TRUNC_DIV_EXPR, t,
4294 make_tree (t, XEXP (x, 0)),
4295 make_tree (t, XEXP (x, 1)))));
4296 case UDIV:
4297 t = lang_hooks.types.unsigned_type (type);
4298 return fold (convert (type,
4299 build (TRUNC_DIV_EXPR, t,
4300 make_tree (t, XEXP (x, 0)),
4301 make_tree (t, XEXP (x, 1)))));
4302
4303 case SIGN_EXTEND:
4304 case ZERO_EXTEND:
4305 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4306 GET_CODE (x) == ZERO_EXTEND);
4307 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4308
4309 default:
4310 t = make_node (RTL_EXPR);
4311 TREE_TYPE (t) = type;
4312
4313 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4314 ptr_mode. So convert. */
4315 if (POINTER_TYPE_P (type))
4316 x = convert_memory_address (TYPE_MODE (type), x);
4317
4318 RTL_EXPR_RTL (t) = x;
4319 /* There are no insns to be output
4320 when this rtl_expr is used. */
4321 RTL_EXPR_SEQUENCE (t) = 0;
4322 return t;
4323 }
4324 }
4325
4326 /* Check whether the multiplication X * MULT + ADD overflows.
4327 X, MULT and ADD must be CONST_*.
4328 MODE is the machine mode for the computation.
4329 X and MULT must have mode MODE. ADD may have a different mode.
4330 So can X (defaults to same as MODE).
4331 UNSIGNEDP is nonzero to do unsigned multiplication. */
4332
4333 bool
4334 const_mult_add_overflow_p (rtx x, rtx mult, rtx add, enum machine_mode mode, int unsignedp)
4335 {
4336 tree type, mult_type, add_type, result;
4337
4338 type = lang_hooks.types.type_for_mode (mode, unsignedp);
4339
4340 /* In order to get a proper overflow indication from an unsigned
4341 type, we have to pretend that it's a sizetype. */
4342 mult_type = type;
4343 if (unsignedp)
4344 {
4345 mult_type = copy_node (type);
4346 TYPE_IS_SIZETYPE (mult_type) = 1;
4347 }
4348
4349 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4350 : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp));
4351
4352 result = fold (build (PLUS_EXPR, mult_type,
4353 fold (build (MULT_EXPR, mult_type,
4354 make_tree (mult_type, x),
4355 make_tree (mult_type, mult))),
4356 make_tree (add_type, add)));
4357
4358 return TREE_CONSTANT_OVERFLOW (result);
4359 }
4360
4361 /* Return an rtx representing the value of X * MULT + ADD.
4362 TARGET is a suggestion for where to store the result (an rtx).
4363 MODE is the machine mode for the computation.
4364 X and MULT must have mode MODE. ADD may have a different mode.
4365 So can X (defaults to same as MODE).
4366 UNSIGNEDP is nonzero to do unsigned multiplication.
4367 This may emit insns. */
4368
4369 rtx
4370 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
4371 int unsignedp)
4372 {
4373 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
4374 tree add_type = (GET_MODE (add) == VOIDmode
4375 ? type: lang_hooks.types.type_for_mode (GET_MODE (add),
4376 unsignedp));
4377 tree result = fold (build (PLUS_EXPR, type,
4378 fold (build (MULT_EXPR, type,
4379 make_tree (type, x),
4380 make_tree (type, mult))),
4381 make_tree (add_type, add)));
4382
4383 return expand_expr (result, target, VOIDmode, 0);
4384 }
4385 \f
4386 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4387 and returning TARGET.
4388
4389 If TARGET is 0, a pseudo-register or constant is returned. */
4390
4391 rtx
4392 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4393 {
4394 rtx tem = 0;
4395
4396 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4397 tem = simplify_binary_operation (AND, mode, op0, op1);
4398 if (tem == 0)
4399 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4400
4401 if (target == 0)
4402 target = tem;
4403 else if (tem != target)
4404 emit_move_insn (target, tem);
4405 return target;
4406 }
4407 \f
4408 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4409 and storing in TARGET. Normally return TARGET.
4410 Return 0 if that cannot be done.
4411
4412 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4413 it is VOIDmode, they cannot both be CONST_INT.
4414
4415 UNSIGNEDP is for the case where we have to widen the operands
4416 to perform the operation. It says to use zero-extension.
4417
4418 NORMALIZEP is 1 if we should convert the result to be either zero
4419 or one. Normalize is -1 if we should convert the result to be
4420 either zero or -1. If NORMALIZEP is zero, the result will be left
4421 "raw" out of the scc insn. */
4422
4423 rtx
4424 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
4425 enum machine_mode mode, int unsignedp, int normalizep)
4426 {
4427 rtx subtarget;
4428 enum insn_code icode;
4429 enum machine_mode compare_mode;
4430 enum machine_mode target_mode = GET_MODE (target);
4431 rtx tem;
4432 rtx last = get_last_insn ();
4433 rtx pattern, comparison;
4434
4435 /* ??? Ok to do this and then fail? */
4436 op0 = protect_from_queue (op0, 0);
4437 op1 = protect_from_queue (op1, 0);
4438
4439 if (unsignedp)
4440 code = unsigned_condition (code);
4441
4442 /* If one operand is constant, make it the second one. Only do this
4443 if the other operand is not constant as well. */
4444
4445 if (swap_commutative_operands_p (op0, op1))
4446 {
4447 tem = op0;
4448 op0 = op1;
4449 op1 = tem;
4450 code = swap_condition (code);
4451 }
4452
4453 if (mode == VOIDmode)
4454 mode = GET_MODE (op0);
4455
4456 /* For some comparisons with 1 and -1, we can convert this to
4457 comparisons with zero. This will often produce more opportunities for
4458 store-flag insns. */
4459
4460 switch (code)
4461 {
4462 case LT:
4463 if (op1 == const1_rtx)
4464 op1 = const0_rtx, code = LE;
4465 break;
4466 case LE:
4467 if (op1 == constm1_rtx)
4468 op1 = const0_rtx, code = LT;
4469 break;
4470 case GE:
4471 if (op1 == const1_rtx)
4472 op1 = const0_rtx, code = GT;
4473 break;
4474 case GT:
4475 if (op1 == constm1_rtx)
4476 op1 = const0_rtx, code = GE;
4477 break;
4478 case GEU:
4479 if (op1 == const1_rtx)
4480 op1 = const0_rtx, code = NE;
4481 break;
4482 case LTU:
4483 if (op1 == const1_rtx)
4484 op1 = const0_rtx, code = EQ;
4485 break;
4486 default:
4487 break;
4488 }
4489
4490 /* If we are comparing a double-word integer with zero, we can convert
4491 the comparison into one involving a single word. */
4492 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4493 && GET_MODE_CLASS (mode) == MODE_INT
4494 && op1 == const0_rtx
4495 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4496 {
4497 if (code == EQ || code == NE)
4498 {
4499 rtx op00, op01, op0both;
4500
4501 /* Do a logical OR of the two words and compare the result. */
4502 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
4503 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
4504 op0both = expand_binop (word_mode, ior_optab, op00, op01,
4505 NULL_RTX, unsignedp, OPTAB_DIRECT);
4506 if (op0both != 0)
4507 return emit_store_flag (target, code, op0both, op1, word_mode,
4508 unsignedp, normalizep);
4509 }
4510 else if (code == LT || code == GE)
4511 {
4512 rtx op0h;
4513
4514 /* If testing the sign bit, can just test on high word. */
4515 op0h = simplify_gen_subreg (word_mode, op0, mode,
4516 subreg_highpart_offset (word_mode, mode));
4517 return emit_store_flag (target, code, op0h, op1, word_mode,
4518 unsignedp, normalizep);
4519 }
4520 }
4521
4522 /* From now on, we won't change CODE, so set ICODE now. */
4523 icode = setcc_gen_code[(int) code];
4524
4525 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4526 complement of A (for GE) and shifting the sign bit to the low bit. */
4527 if (op1 == const0_rtx && (code == LT || code == GE)
4528 && GET_MODE_CLASS (mode) == MODE_INT
4529 && (normalizep || STORE_FLAG_VALUE == 1
4530 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4531 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4532 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4533 {
4534 subtarget = target;
4535
4536 /* If the result is to be wider than OP0, it is best to convert it
4537 first. If it is to be narrower, it is *incorrect* to convert it
4538 first. */
4539 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4540 {
4541 op0 = protect_from_queue (op0, 0);
4542 op0 = convert_modes (target_mode, mode, op0, 0);
4543 mode = target_mode;
4544 }
4545
4546 if (target_mode != mode)
4547 subtarget = 0;
4548
4549 if (code == GE)
4550 op0 = expand_unop (mode, one_cmpl_optab, op0,
4551 ((STORE_FLAG_VALUE == 1 || normalizep)
4552 ? 0 : subtarget), 0);
4553
4554 if (STORE_FLAG_VALUE == 1 || normalizep)
4555 /* If we are supposed to produce a 0/1 value, we want to do
4556 a logical shift from the sign bit to the low-order bit; for
4557 a -1/0 value, we do an arithmetic shift. */
4558 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4559 size_int (GET_MODE_BITSIZE (mode) - 1),
4560 subtarget, normalizep != -1);
4561
4562 if (mode != target_mode)
4563 op0 = convert_modes (target_mode, mode, op0, 0);
4564
4565 return op0;
4566 }
4567
4568 if (icode != CODE_FOR_nothing)
4569 {
4570 insn_operand_predicate_fn pred;
4571
4572 /* We think we may be able to do this with a scc insn. Emit the
4573 comparison and then the scc insn.
4574
4575 compare_from_rtx may call emit_queue, which would be deleted below
4576 if the scc insn fails. So call it ourselves before setting LAST.
4577 Likewise for do_pending_stack_adjust. */
4578
4579 emit_queue ();
4580 do_pending_stack_adjust ();
4581 last = get_last_insn ();
4582
4583 comparison
4584 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4585 if (CONSTANT_P (comparison))
4586 {
4587 if (GET_CODE (comparison) == CONST_INT)
4588 {
4589 if (comparison == const0_rtx)
4590 return const0_rtx;
4591 }
4592 #ifdef FLOAT_STORE_FLAG_VALUE
4593 else if (GET_CODE (comparison) == CONST_DOUBLE)
4594 {
4595 if (comparison == CONST0_RTX (GET_MODE (comparison)))
4596 return const0_rtx;
4597 }
4598 #endif
4599 else
4600 abort ();
4601 if (normalizep == 1)
4602 return const1_rtx;
4603 if (normalizep == -1)
4604 return constm1_rtx;
4605 return const_true_rtx;
4606 }
4607
4608 /* The code of COMPARISON may not match CODE if compare_from_rtx
4609 decided to swap its operands and reverse the original code.
4610
4611 We know that compare_from_rtx returns either a CONST_INT or
4612 a new comparison code, so it is safe to just extract the
4613 code from COMPARISON. */
4614 code = GET_CODE (comparison);
4615
4616 /* Get a reference to the target in the proper mode for this insn. */
4617 compare_mode = insn_data[(int) icode].operand[0].mode;
4618 subtarget = target;
4619 pred = insn_data[(int) icode].operand[0].predicate;
4620 if (preserve_subexpressions_p ()
4621 || ! (*pred) (subtarget, compare_mode))
4622 subtarget = gen_reg_rtx (compare_mode);
4623
4624 pattern = GEN_FCN (icode) (subtarget);
4625 if (pattern)
4626 {
4627 emit_insn (pattern);
4628
4629 /* If we are converting to a wider mode, first convert to
4630 TARGET_MODE, then normalize. This produces better combining
4631 opportunities on machines that have a SIGN_EXTRACT when we are
4632 testing a single bit. This mostly benefits the 68k.
4633
4634 If STORE_FLAG_VALUE does not have the sign bit set when
4635 interpreted in COMPARE_MODE, we can do this conversion as
4636 unsigned, which is usually more efficient. */
4637 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4638 {
4639 convert_move (target, subtarget,
4640 (GET_MODE_BITSIZE (compare_mode)
4641 <= HOST_BITS_PER_WIDE_INT)
4642 && 0 == (STORE_FLAG_VALUE
4643 & ((HOST_WIDE_INT) 1
4644 << (GET_MODE_BITSIZE (compare_mode) -1))));
4645 op0 = target;
4646 compare_mode = target_mode;
4647 }
4648 else
4649 op0 = subtarget;
4650
4651 /* If we want to keep subexpressions around, don't reuse our
4652 last target. */
4653
4654 if (preserve_subexpressions_p ())
4655 subtarget = 0;
4656
4657 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4658 we don't have to do anything. */
4659 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4660 ;
4661 /* STORE_FLAG_VALUE might be the most negative number, so write
4662 the comparison this way to avoid a compiler-time warning. */
4663 else if (- normalizep == STORE_FLAG_VALUE)
4664 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4665
4666 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4667 makes it hard to use a value of just the sign bit due to
4668 ANSI integer constant typing rules. */
4669 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4670 && (STORE_FLAG_VALUE
4671 & ((HOST_WIDE_INT) 1
4672 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4673 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4674 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4675 subtarget, normalizep == 1);
4676 else if (STORE_FLAG_VALUE & 1)
4677 {
4678 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4679 if (normalizep == -1)
4680 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4681 }
4682 else
4683 abort ();
4684
4685 /* If we were converting to a smaller mode, do the
4686 conversion now. */
4687 if (target_mode != compare_mode)
4688 {
4689 convert_move (target, op0, 0);
4690 return target;
4691 }
4692 else
4693 return op0;
4694 }
4695 }
4696
4697 delete_insns_since (last);
4698
4699 /* If expensive optimizations, use different pseudo registers for each
4700 insn, instead of reusing the same pseudo. This leads to better CSE,
4701 but slows down the compiler, since there are more pseudos */
4702 subtarget = (!flag_expensive_optimizations
4703 && (target_mode == mode)) ? target : NULL_RTX;
4704
4705 /* If we reached here, we can't do this with a scc insn. However, there
4706 are some comparisons that can be done directly. For example, if
4707 this is an equality comparison of integers, we can try to exclusive-or
4708 (or subtract) the two operands and use a recursive call to try the
4709 comparison with zero. Don't do any of these cases if branches are
4710 very cheap. */
4711
4712 if (BRANCH_COST > 0
4713 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4714 && op1 != const0_rtx)
4715 {
4716 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4717 OPTAB_WIDEN);
4718
4719 if (tem == 0)
4720 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4721 OPTAB_WIDEN);
4722 if (tem != 0)
4723 tem = emit_store_flag (target, code, tem, const0_rtx,
4724 mode, unsignedp, normalizep);
4725 if (tem == 0)
4726 delete_insns_since (last);
4727 return tem;
4728 }
4729
4730 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4731 the constant zero. Reject all other comparisons at this point. Only
4732 do LE and GT if branches are expensive since they are expensive on
4733 2-operand machines. */
4734
4735 if (BRANCH_COST == 0
4736 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4737 || (code != EQ && code != NE
4738 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4739 return 0;
4740
4741 /* See what we need to return. We can only return a 1, -1, or the
4742 sign bit. */
4743
4744 if (normalizep == 0)
4745 {
4746 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4747 normalizep = STORE_FLAG_VALUE;
4748
4749 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4750 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4751 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4752 ;
4753 else
4754 return 0;
4755 }
4756
4757 /* Try to put the result of the comparison in the sign bit. Assume we can't
4758 do the necessary operation below. */
4759
4760 tem = 0;
4761
4762 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4763 the sign bit set. */
4764
4765 if (code == LE)
4766 {
4767 /* This is destructive, so SUBTARGET can't be OP0. */
4768 if (rtx_equal_p (subtarget, op0))
4769 subtarget = 0;
4770
4771 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4772 OPTAB_WIDEN);
4773 if (tem)
4774 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4775 OPTAB_WIDEN);
4776 }
4777
4778 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4779 number of bits in the mode of OP0, minus one. */
4780
4781 if (code == GT)
4782 {
4783 if (rtx_equal_p (subtarget, op0))
4784 subtarget = 0;
4785
4786 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4787 size_int (GET_MODE_BITSIZE (mode) - 1),
4788 subtarget, 0);
4789 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4790 OPTAB_WIDEN);
4791 }
4792
4793 if (code == EQ || code == NE)
4794 {
4795 /* For EQ or NE, one way to do the comparison is to apply an operation
4796 that converts the operand into a positive number if it is nonzero
4797 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4798 for NE we negate. This puts the result in the sign bit. Then we
4799 normalize with a shift, if needed.
4800
4801 Two operations that can do the above actions are ABS and FFS, so try
4802 them. If that doesn't work, and MODE is smaller than a full word,
4803 we can use zero-extension to the wider mode (an unsigned conversion)
4804 as the operation. */
4805
4806 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4807 that is compensated by the subsequent overflow when subtracting
4808 one / negating. */
4809
4810 if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
4811 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4812 else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
4813 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4814 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4815 {
4816 op0 = protect_from_queue (op0, 0);
4817 tem = convert_modes (word_mode, mode, op0, 1);
4818 mode = word_mode;
4819 }
4820
4821 if (tem != 0)
4822 {
4823 if (code == EQ)
4824 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4825 0, OPTAB_WIDEN);
4826 else
4827 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4828 }
4829
4830 /* If we couldn't do it that way, for NE we can "or" the two's complement
4831 of the value with itself. For EQ, we take the one's complement of
4832 that "or", which is an extra insn, so we only handle EQ if branches
4833 are expensive. */
4834
4835 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4836 {
4837 if (rtx_equal_p (subtarget, op0))
4838 subtarget = 0;
4839
4840 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4841 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4842 OPTAB_WIDEN);
4843
4844 if (tem && code == EQ)
4845 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4846 }
4847 }
4848
4849 if (tem && normalizep)
4850 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4851 size_int (GET_MODE_BITSIZE (mode) - 1),
4852 subtarget, normalizep == 1);
4853
4854 if (tem)
4855 {
4856 if (GET_MODE (tem) != target_mode)
4857 {
4858 convert_move (target, tem, 0);
4859 tem = target;
4860 }
4861 else if (!subtarget)
4862 {
4863 emit_move_insn (target, tem);
4864 tem = target;
4865 }
4866 }
4867 else
4868 delete_insns_since (last);
4869
4870 return tem;
4871 }
4872
4873 /* Like emit_store_flag, but always succeeds. */
4874
4875 rtx
4876 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
4877 enum machine_mode mode, int unsignedp, int normalizep)
4878 {
4879 rtx tem, label;
4880
4881 /* First see if emit_store_flag can do the job. */
4882 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4883 if (tem != 0)
4884 return tem;
4885
4886 if (normalizep == 0)
4887 normalizep = 1;
4888
4889 /* If this failed, we have to do this with set/compare/jump/set code. */
4890
4891 if (!REG_P (target)
4892 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4893 target = gen_reg_rtx (GET_MODE (target));
4894
4895 emit_move_insn (target, const1_rtx);
4896 label = gen_label_rtx ();
4897 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4898 NULL_RTX, label);
4899
4900 emit_move_insn (target, const0_rtx);
4901 emit_label (label);
4902
4903 return target;
4904 }
4905 \f
4906 /* Perform possibly multi-word comparison and conditional jump to LABEL
4907 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4908
4909 The algorithm is based on the code in expr.c:do_jump.
4910
4911 Note that this does not perform a general comparison. Only variants
4912 generated within expmed.c are correctly handled, others abort (but could
4913 be handled if needed). */
4914
4915 static void
4916 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
4917 rtx label)
4918 {
4919 /* If this mode is an integer too wide to compare properly,
4920 compare word by word. Rely on cse to optimize constant cases. */
4921
4922 if (GET_MODE_CLASS (mode) == MODE_INT
4923 && ! can_compare_p (op, mode, ccp_jump))
4924 {
4925 rtx label2 = gen_label_rtx ();
4926
4927 switch (op)
4928 {
4929 case LTU:
4930 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4931 break;
4932
4933 case LEU:
4934 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4935 break;
4936
4937 case LT:
4938 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4939 break;
4940
4941 case GT:
4942 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4943 break;
4944
4945 case GE:
4946 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4947 break;
4948
4949 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4950 that's the only equality operations we do */
4951 case EQ:
4952 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4953 abort ();
4954 do_jump_by_parts_equality_rtx (arg1, label2, label);
4955 break;
4956
4957 case NE:
4958 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4959 abort ();
4960 do_jump_by_parts_equality_rtx (arg1, label, label2);
4961 break;
4962
4963 default:
4964 abort ();
4965 }
4966
4967 emit_label (label2);
4968 }
4969 else
4970 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);
4971 }