acconfig.h: _GLIBCPP_USING_THREADS and some workaround types added.
[gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-flags.h"
32 #include "insn-codes.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "real.h"
36 #include "recog.h"
37
38 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
39 unsigned HOST_WIDE_INT,
40 unsigned HOST_WIDE_INT, rtx,
41 unsigned int));
42 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT, rtx,
44 unsigned int));
45 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT,
49 rtx, int, unsigned int));
50 static rtx mask_rtx PARAMS ((enum machine_mode, int,
51 int, int));
52 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
53 int, int));
54 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT, int,
56 unsigned int));
57 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
58 enum machine_mode, rtx));
59
60 /* Non-zero means divides or modulus operations are relatively cheap for
61 powers of two, so don't use branches; emit the operation instead.
62 Usually, this will mean that the MD file will emit non-branch
63 sequences. */
64
65 static int sdiv_pow2_cheap, smod_pow2_cheap;
66
67 #ifndef SLOW_UNALIGNED_ACCESS
68 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
69 #endif
70
71 /* For compilers that support multiple targets with different word sizes,
72 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
73 is the H8/300(H) compiler. */
74
75 #ifndef MAX_BITS_PER_WORD
76 #define MAX_BITS_PER_WORD BITS_PER_WORD
77 #endif
78
79 /* Cost of various pieces of RTL. Note that some of these are indexed by
80 shift count and some by mode. */
81 static int add_cost, negate_cost, zero_cost;
82 static int shift_cost[MAX_BITS_PER_WORD];
83 static int shiftadd_cost[MAX_BITS_PER_WORD];
84 static int shiftsub_cost[MAX_BITS_PER_WORD];
85 static int mul_cost[NUM_MACHINE_MODES];
86 static int div_cost[NUM_MACHINE_MODES];
87 static int mul_widen_cost[NUM_MACHINE_MODES];
88 static int mul_highpart_cost[NUM_MACHINE_MODES];
89
90 void
91 init_expmed ()
92 {
93 /* This is "some random pseudo register" for purposes of calling recog
94 to see what insns exist. */
95 rtx reg = gen_rtx_REG (word_mode, 10000);
96 rtx shift_insn, shiftadd_insn, shiftsub_insn;
97 int dummy;
98 int m;
99 enum machine_mode mode, wider_mode;
100
101 start_sequence ();
102
103 reg = gen_rtx_REG (word_mode, 10000);
104
105 zero_cost = rtx_cost (const0_rtx, 0);
106 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
107
108 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
109 gen_rtx_ASHIFT (word_mode, reg,
110 const0_rtx)));
111
112 shiftadd_insn
113 = emit_insn (gen_rtx_SET (VOIDmode, reg,
114 gen_rtx_PLUS (word_mode,
115 gen_rtx_MULT (word_mode,
116 reg, const0_rtx),
117 reg)));
118
119 shiftsub_insn
120 = emit_insn (gen_rtx_SET (VOIDmode, reg,
121 gen_rtx_MINUS (word_mode,
122 gen_rtx_MULT (word_mode,
123 reg, const0_rtx),
124 reg)));
125
126 init_recog ();
127
128 shift_cost[0] = 0;
129 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
130
131 for (m = 1; m < MAX_BITS_PER_WORD; m++)
132 {
133 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
134
135 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
136 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
137 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
138
139 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
140 = GEN_INT ((HOST_WIDE_INT) 1 << m);
141 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
142 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
143
144 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
145 = GEN_INT ((HOST_WIDE_INT) 1 << m);
146 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
147 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
148 }
149
150 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
151
152 sdiv_pow2_cheap
153 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
154 <= 2 * add_cost);
155 smod_pow2_cheap
156 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
157 <= 2 * add_cost);
158
159 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
160 mode != VOIDmode;
161 mode = GET_MODE_WIDER_MODE (mode))
162 {
163 reg = gen_rtx_REG (mode, 10000);
164 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
165 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
166 wider_mode = GET_MODE_WIDER_MODE (mode);
167 if (wider_mode != VOIDmode)
168 {
169 mul_widen_cost[(int) wider_mode]
170 = rtx_cost (gen_rtx_MULT (wider_mode,
171 gen_rtx_ZERO_EXTEND (wider_mode, reg),
172 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
173 SET);
174 mul_highpart_cost[(int) mode]
175 = rtx_cost (gen_rtx_TRUNCATE
176 (mode,
177 gen_rtx_LSHIFTRT (wider_mode,
178 gen_rtx_MULT (wider_mode,
179 gen_rtx_ZERO_EXTEND
180 (wider_mode, reg),
181 gen_rtx_ZERO_EXTEND
182 (wider_mode, reg)),
183 GEN_INT (GET_MODE_BITSIZE (mode)))),
184 SET);
185 }
186 }
187
188 end_sequence ();
189 }
190
191 /* Return an rtx representing minus the value of X.
192 MODE is the intended mode of the result,
193 useful if X is a CONST_INT. */
194
195 rtx
196 negate_rtx (mode, x)
197 enum machine_mode mode;
198 rtx x;
199 {
200 rtx result = simplify_unary_operation (NEG, mode, x, mode);
201
202 if (result == 0)
203 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
204
205 return result;
206 }
207 \f
208 /* Generate code to store value from rtx VALUE
209 into a bit-field within structure STR_RTX
210 containing BITSIZE bits starting at bit BITNUM.
211 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
212 ALIGN is the alignment that STR_RTX is known to have.
213 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
214
215 /* ??? Note that there are two different ideas here for how
216 to determine the size to count bits within, for a register.
217 One is BITS_PER_WORD, and the other is the size of operand 3
218 of the insv pattern.
219
220 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
221 else, we use the mode of operand 3. */
222
223 rtx
224 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
225 rtx str_rtx;
226 unsigned HOST_WIDE_INT bitsize;
227 unsigned HOST_WIDE_INT bitnum;
228 enum machine_mode fieldmode;
229 rtx value;
230 unsigned int align;
231 HOST_WIDE_INT total_size;
232 {
233 unsigned int unit
234 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
235 unsigned HOST_WIDE_INT offset = bitnum / unit;
236 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
237 register rtx op0 = str_rtx;
238 #ifdef HAVE_insv
239 unsigned HOST_WIDE_INT insv_bitsize;
240 enum machine_mode op_mode;
241
242 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
243 if (op_mode == VOIDmode)
244 op_mode = word_mode;
245 insv_bitsize = GET_MODE_BITSIZE (op_mode);
246 #endif
247
248 /* It is wrong to have align==0, since every object is aligned at
249 least at a bit boundary. This usually means a bug elsewhere. */
250 if (align == 0)
251 abort ();
252
253 /* Discount the part of the structure before the desired byte.
254 We need to know how many bytes are safe to reference after it. */
255 if (total_size >= 0)
256 total_size -= (bitpos / BIGGEST_ALIGNMENT
257 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
258
259 while (GET_CODE (op0) == SUBREG)
260 {
261 /* The following line once was done only if WORDS_BIG_ENDIAN,
262 but I think that is a mistake. WORDS_BIG_ENDIAN is
263 meaningful at a much higher level; when structures are copied
264 between memory and regs, the higher-numbered regs
265 always get higher addresses. */
266 offset += SUBREG_WORD (op0);
267 /* We used to adjust BITPOS here, but now we do the whole adjustment
268 right after the loop. */
269 op0 = SUBREG_REG (op0);
270 }
271
272 /* Make sure we are playing with integral modes. Pun with subregs
273 if we aren't. */
274 {
275 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
276 if (imode != GET_MODE (op0))
277 {
278 if (GET_CODE (op0) == MEM)
279 op0 = change_address (op0, imode, NULL_RTX);
280 else if (imode != BLKmode)
281 op0 = gen_lowpart (imode, op0);
282 else
283 abort ();
284 }
285 }
286
287 /* If OP0 is a register, BITPOS must count within a word.
288 But as we have it, it counts within whatever size OP0 now has.
289 On a bigendian machine, these are not the same, so convert. */
290 if (BYTES_BIG_ENDIAN
291 && GET_CODE (op0) != MEM
292 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
293 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
294
295 value = protect_from_queue (value, 0);
296
297 if (flag_force_mem)
298 value = force_not_mem (value);
299
300 if ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
301 || (GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode)
302 && GET_MODE_SIZE (fieldmode) != 0))
303 && (GET_CODE (op0) != MEM
304 || ! SLOW_UNALIGNED_ACCESS (fieldmode, align)
305 || (offset * BITS_PER_UNIT % bitsize == 0
306 && align % GET_MODE_BITSIZE (fieldmode) == 0))
307 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
308 && bitsize == GET_MODE_BITSIZE (fieldmode))
309 {
310 /* Storing in a full-word or multi-word field in a register
311 can be done with just SUBREG. Also, storing in the entire object
312 can be done with just SUBREG. */
313 if (GET_MODE (op0) != fieldmode)
314 {
315 if (GET_CODE (op0) == SUBREG)
316 {
317 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
318 || GET_MODE_CLASS (fieldmode) == MODE_INT
319 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
320 op0 = SUBREG_REG (op0);
321 else
322 /* Else we've got some float mode source being extracted into
323 a different float mode destination -- this combination of
324 subregs results in Severe Tire Damage. */
325 abort ();
326 }
327 if (GET_CODE (op0) == REG)
328 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
329 else
330 op0 = change_address (op0, fieldmode,
331 plus_constant (XEXP (op0, 0), offset));
332 }
333 emit_move_insn (op0, value);
334 return value;
335 }
336
337 /* Storing an lsb-aligned field in a register
338 can be done with a movestrict instruction. */
339
340 if (GET_CODE (op0) != MEM
341 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
342 && bitsize == GET_MODE_BITSIZE (fieldmode)
343 && (movstrict_optab->handlers[(int) fieldmode].insn_code
344 != CODE_FOR_nothing))
345 {
346 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
347
348 /* Get appropriate low part of the value being stored. */
349 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
350 value = gen_lowpart (fieldmode, value);
351 else if (!(GET_CODE (value) == SYMBOL_REF
352 || GET_CODE (value) == LABEL_REF
353 || GET_CODE (value) == CONST))
354 value = convert_to_mode (fieldmode, value, 0);
355
356 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
357 value = copy_to_mode_reg (fieldmode, value);
358
359 if (GET_CODE (op0) == SUBREG)
360 {
361 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
362 || GET_MODE_CLASS (fieldmode) == MODE_INT
363 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
364 op0 = SUBREG_REG (op0);
365 else
366 /* Else we've got some float mode source being extracted into
367 a different float mode destination -- this combination of
368 subregs results in Severe Tire Damage. */
369 abort ();
370 }
371
372 emit_insn (GEN_FCN (icode)
373 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
374
375 return value;
376 }
377
378 /* Handle fields bigger than a word. */
379
380 if (bitsize > BITS_PER_WORD)
381 {
382 /* Here we transfer the words of the field
383 in the order least significant first.
384 This is because the most significant word is the one which may
385 be less than full.
386 However, only do that if the value is not BLKmode. */
387
388 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
389 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
390 unsigned int i;
391
392 /* This is the mode we must force value to, so that there will be enough
393 subwords to extract. Note that fieldmode will often (always?) be
394 VOIDmode, because that is what store_field uses to indicate that this
395 is a bit field, but passing VOIDmode to operand_subword_force will
396 result in an abort. */
397 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
398
399 for (i = 0; i < nwords; i++)
400 {
401 /* If I is 0, use the low-order word in both field and target;
402 if I is 1, use the next to lowest word; and so on. */
403 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
404 unsigned int bit_offset = (backwards
405 ? MAX ((int) bitsize - ((int) i + 1)
406 * BITS_PER_WORD,
407 0)
408 : (int) i * BITS_PER_WORD);
409
410 store_bit_field (op0, MIN (BITS_PER_WORD,
411 bitsize - i * BITS_PER_WORD),
412 bitnum + bit_offset, word_mode,
413 operand_subword_force (value, wordnum,
414 (GET_MODE (value) == VOIDmode
415 ? fieldmode
416 : GET_MODE (value))),
417 align, total_size);
418 }
419 return value;
420 }
421
422 /* From here on we can assume that the field to be stored in is
423 a full-word (whatever type that is), since it is shorter than a word. */
424
425 /* OFFSET is the number of words or bytes (UNIT says which)
426 from STR_RTX to the first word or byte containing part of the field. */
427
428 if (GET_CODE (op0) != MEM)
429 {
430 if (offset != 0
431 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
432 {
433 if (GET_CODE (op0) != REG)
434 {
435 /* Since this is a destination (lvalue), we can't copy it to a
436 pseudo. We can trivially remove a SUBREG that does not
437 change the size of the operand. Such a SUBREG may have been
438 added above. Otherwise, abort. */
439 if (GET_CODE (op0) == SUBREG
440 && (GET_MODE_SIZE (GET_MODE (op0))
441 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
442 op0 = SUBREG_REG (op0);
443 else
444 abort ();
445 }
446 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
447 op0, offset);
448 }
449 offset = 0;
450 }
451 else
452 {
453 op0 = protect_from_queue (op0, 1);
454 }
455
456 /* If VALUE is a floating-point mode, access it as an integer of the
457 corresponding size. This can occur on a machine with 64 bit registers
458 that uses SFmode for float. This can also occur for unaligned float
459 structure fields. */
460 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
461 {
462 if (GET_CODE (value) != REG)
463 value = copy_to_reg (value);
464 value = gen_rtx_SUBREG (word_mode, value, 0);
465 }
466
467 /* Now OFFSET is nonzero only if OP0 is memory
468 and is therefore always measured in bytes. */
469
470 #ifdef HAVE_insv
471 if (HAVE_insv
472 && GET_MODE (value) != BLKmode
473 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
474 /* Ensure insv's size is wide enough for this field. */
475 && (insv_bitsize >= bitsize)
476 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
477 && (bitsize + bitpos > insv_bitsize)))
478 {
479 int xbitpos = bitpos;
480 rtx value1;
481 rtx xop0 = op0;
482 rtx last = get_last_insn ();
483 rtx pat;
484 enum machine_mode maxmode;
485 int save_volatile_ok = volatile_ok;
486
487 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
488 if (maxmode == VOIDmode)
489 maxmode = word_mode;
490
491 volatile_ok = 1;
492
493 /* If this machine's insv can only insert into a register, copy OP0
494 into a register and save it back later. */
495 /* This used to check flag_force_mem, but that was a serious
496 de-optimization now that flag_force_mem is enabled by -O2. */
497 if (GET_CODE (op0) == MEM
498 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
499 (op0, VOIDmode)))
500 {
501 rtx tempreg;
502 enum machine_mode bestmode;
503
504 /* Get the mode to use for inserting into this field. If OP0 is
505 BLKmode, get the smallest mode consistent with the alignment. If
506 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
507 mode. Otherwise, use the smallest mode containing the field. */
508
509 if (GET_MODE (op0) == BLKmode
510 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
511 bestmode
512 = get_best_mode (bitsize, bitnum, align, maxmode,
513 MEM_VOLATILE_P (op0));
514 else
515 bestmode = GET_MODE (op0);
516
517 if (bestmode == VOIDmode
518 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
519 && GET_MODE_BITSIZE (bestmode) > align))
520 goto insv_loses;
521
522 /* Adjust address to point to the containing unit of that mode. */
523 unit = GET_MODE_BITSIZE (bestmode);
524 /* Compute offset as multiple of this unit, counting in bytes. */
525 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
526 bitpos = bitnum % unit;
527 op0 = change_address (op0, bestmode,
528 plus_constant (XEXP (op0, 0), offset));
529
530 /* Fetch that unit, store the bitfield in it, then store
531 the unit. */
532 tempreg = copy_to_reg (op0);
533 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
534 align, total_size);
535 emit_move_insn (op0, tempreg);
536 return value;
537 }
538 volatile_ok = save_volatile_ok;
539
540 /* Add OFFSET into OP0's address. */
541 if (GET_CODE (xop0) == MEM)
542 xop0 = change_address (xop0, byte_mode,
543 plus_constant (XEXP (xop0, 0), offset));
544
545 /* If xop0 is a register, we need it in MAXMODE
546 to make it acceptable to the format of insv. */
547 if (GET_CODE (xop0) == SUBREG)
548 /* We can't just change the mode, because this might clobber op0,
549 and we will need the original value of op0 if insv fails. */
550 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
551 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
552 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
553
554 /* On big-endian machines, we count bits from the most significant.
555 If the bit field insn does not, we must invert. */
556
557 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
558 xbitpos = unit - bitsize - xbitpos;
559
560 /* We have been counting XBITPOS within UNIT.
561 Count instead within the size of the register. */
562 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
563 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
564
565 unit = GET_MODE_BITSIZE (maxmode);
566
567 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
568 value1 = value;
569 if (GET_MODE (value) != maxmode)
570 {
571 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
572 {
573 /* Optimization: Don't bother really extending VALUE
574 if it has all the bits we will actually use. However,
575 if we must narrow it, be sure we do it correctly. */
576
577 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
578 {
579 /* Avoid making subreg of a subreg, or of a mem. */
580 if (GET_CODE (value1) != REG)
581 value1 = copy_to_reg (value1);
582 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
583 }
584 else
585 value1 = gen_lowpart (maxmode, value1);
586 }
587 else if (!CONSTANT_P (value))
588 /* Parse phase is supposed to make VALUE's data type
589 match that of the component reference, which is a type
590 at least as wide as the field; so VALUE should have
591 a mode that corresponds to that type. */
592 abort ();
593 }
594
595 /* If this machine's insv insists on a register,
596 get VALUE1 into a register. */
597 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
598 (value1, maxmode)))
599 value1 = force_reg (maxmode, value1);
600
601 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
602 if (pat)
603 emit_insn (pat);
604 else
605 {
606 delete_insns_since (last);
607 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
608 }
609 }
610 else
611 insv_loses:
612 #endif
613 /* Insv is not available; store using shifts and boolean ops. */
614 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
615 return value;
616 }
617 \f
618 /* Use shifts and boolean operations to store VALUE
619 into a bit field of width BITSIZE
620 in a memory location specified by OP0 except offset by OFFSET bytes.
621 (OFFSET must be 0 if OP0 is a register.)
622 The field starts at position BITPOS within the byte.
623 (If OP0 is a register, it may be a full word or a narrower mode,
624 but BITPOS still counts within a full word,
625 which is significant on bigendian machines.)
626 STRUCT_ALIGN is the alignment the structure is known to have.
627
628 Note that protect_from_queue has already been done on OP0 and VALUE. */
629
630 static void
631 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
632 register rtx op0;
633 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
634 register rtx value;
635 unsigned int struct_align;
636 {
637 register enum machine_mode mode;
638 unsigned int total_bits = BITS_PER_WORD;
639 rtx subtarget, temp;
640 int all_zero = 0;
641 int all_one = 0;
642
643 if (! SLOW_UNALIGNED_ACCESS (word_mode, struct_align))
644 struct_align = BIGGEST_ALIGNMENT;
645
646 /* There is a case not handled here:
647 a structure with a known alignment of just a halfword
648 and a field split across two aligned halfwords within the structure.
649 Or likewise a structure with a known alignment of just a byte
650 and a field split across two bytes.
651 Such cases are not supposed to be able to occur. */
652
653 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
654 {
655 if (offset != 0)
656 abort ();
657 /* Special treatment for a bit field split across two registers. */
658 if (bitsize + bitpos > BITS_PER_WORD)
659 {
660 store_split_bit_field (op0, bitsize, bitpos,
661 value, BITS_PER_WORD);
662 return;
663 }
664 }
665 else
666 {
667 /* Get the proper mode to use for this field. We want a mode that
668 includes the entire field. If such a mode would be larger than
669 a word, we won't be doing the extraction the normal way. */
670
671 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
672 struct_align, word_mode,
673 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
674
675 if (mode == VOIDmode)
676 {
677 /* The only way this should occur is if the field spans word
678 boundaries. */
679 store_split_bit_field (op0,
680 bitsize, bitpos + offset * BITS_PER_UNIT,
681 value, struct_align);
682 return;
683 }
684
685 total_bits = GET_MODE_BITSIZE (mode);
686
687 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
688 be in the range 0 to total_bits-1, and put any excess bytes in
689 OFFSET. */
690 if (bitpos >= total_bits)
691 {
692 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
693 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
694 * BITS_PER_UNIT);
695 }
696
697 /* Get ref to an aligned byte, halfword, or word containing the field.
698 Adjust BITPOS to be position within a word,
699 and OFFSET to be the offset of that word.
700 Then alter OP0 to refer to that word. */
701 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
702 offset -= (offset % (total_bits / BITS_PER_UNIT));
703 op0 = change_address (op0, mode,
704 plus_constant (XEXP (op0, 0), offset));
705 }
706
707 mode = GET_MODE (op0);
708
709 /* Now MODE is either some integral mode for a MEM as OP0,
710 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
711 The bit field is contained entirely within OP0.
712 BITPOS is the starting bit number within OP0.
713 (OP0's mode may actually be narrower than MODE.) */
714
715 if (BYTES_BIG_ENDIAN)
716 /* BITPOS is the distance between our msb
717 and that of the containing datum.
718 Convert it to the distance from the lsb. */
719 bitpos = total_bits - bitsize - bitpos;
720
721 /* Now BITPOS is always the distance between our lsb
722 and that of OP0. */
723
724 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
725 we must first convert its mode to MODE. */
726
727 if (GET_CODE (value) == CONST_INT)
728 {
729 register HOST_WIDE_INT v = INTVAL (value);
730
731 if (bitsize < HOST_BITS_PER_WIDE_INT)
732 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
733
734 if (v == 0)
735 all_zero = 1;
736 else if ((bitsize < HOST_BITS_PER_WIDE_INT
737 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
738 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
739 all_one = 1;
740
741 value = lshift_value (mode, value, bitpos, bitsize);
742 }
743 else
744 {
745 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
746 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
747
748 if (GET_MODE (value) != mode)
749 {
750 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
751 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
752 value = gen_lowpart (mode, value);
753 else
754 value = convert_to_mode (mode, value, 1);
755 }
756
757 if (must_and)
758 value = expand_binop (mode, and_optab, value,
759 mask_rtx (mode, 0, bitsize, 0),
760 NULL_RTX, 1, OPTAB_LIB_WIDEN);
761 if (bitpos > 0)
762 value = expand_shift (LSHIFT_EXPR, mode, value,
763 build_int_2 (bitpos, 0), NULL_RTX, 1);
764 }
765
766 /* Now clear the chosen bits in OP0,
767 except that if VALUE is -1 we need not bother. */
768
769 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
770
771 if (! all_one)
772 {
773 temp = expand_binop (mode, and_optab, op0,
774 mask_rtx (mode, bitpos, bitsize, 1),
775 subtarget, 1, OPTAB_LIB_WIDEN);
776 subtarget = temp;
777 }
778 else
779 temp = op0;
780
781 /* Now logical-or VALUE into OP0, unless it is zero. */
782
783 if (! all_zero)
784 temp = expand_binop (mode, ior_optab, temp, value,
785 subtarget, 1, OPTAB_LIB_WIDEN);
786 if (op0 != temp)
787 emit_move_insn (op0, temp);
788 }
789 \f
790 /* Store a bit field that is split across multiple accessible memory objects.
791
792 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
793 BITSIZE is the field width; BITPOS the position of its first bit
794 (within the word).
795 VALUE is the value to store.
796 ALIGN is the known alignment of OP0.
797 This is also the size of the memory objects to be used.
798
799 This does not yet handle fields wider than BITS_PER_WORD. */
800
801 static void
802 store_split_bit_field (op0, bitsize, bitpos, value, align)
803 rtx op0;
804 unsigned HOST_WIDE_INT bitsize, bitpos;
805 rtx value;
806 unsigned int align;
807 {
808 unsigned int unit;
809 unsigned int bitsdone = 0;
810
811 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
812 much at a time. */
813 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
814 unit = BITS_PER_WORD;
815 else
816 unit = MIN (align, BITS_PER_WORD);
817
818 /* If VALUE is a constant other than a CONST_INT, get it into a register in
819 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
820 that VALUE might be a floating-point constant. */
821 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
822 {
823 rtx word = gen_lowpart_common (word_mode, value);
824
825 if (word && (value != word))
826 value = word;
827 else
828 value = gen_lowpart_common (word_mode,
829 force_reg (GET_MODE (value) != VOIDmode
830 ? GET_MODE (value)
831 : word_mode, value));
832 }
833 else if (GET_CODE (value) == ADDRESSOF)
834 value = copy_to_reg (value);
835
836 while (bitsdone < bitsize)
837 {
838 unsigned HOST_WIDE_INT thissize;
839 rtx part, word;
840 unsigned HOST_WIDE_INT thispos;
841 unsigned HOST_WIDE_INT offset;
842
843 offset = (bitpos + bitsdone) / unit;
844 thispos = (bitpos + bitsdone) % unit;
845
846 /* THISSIZE must not overrun a word boundary. Otherwise,
847 store_fixed_bit_field will call us again, and we will mutually
848 recurse forever. */
849 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
850 thissize = MIN (thissize, unit - thispos);
851
852 if (BYTES_BIG_ENDIAN)
853 {
854 int total_bits;
855
856 /* We must do an endian conversion exactly the same way as it is
857 done in extract_bit_field, so that the two calls to
858 extract_fixed_bit_field will have comparable arguments. */
859 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
860 total_bits = BITS_PER_WORD;
861 else
862 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
863
864 /* Fetch successively less significant portions. */
865 if (GET_CODE (value) == CONST_INT)
866 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
867 >> (bitsize - bitsdone - thissize))
868 & (((HOST_WIDE_INT) 1 << thissize) - 1));
869 else
870 /* The args are chosen so that the last part includes the
871 lsb. Give extract_bit_field the value it needs (with
872 endianness compensation) to fetch the piece we want.
873
874 ??? We have no idea what the alignment of VALUE is, so
875 we have to use a guess. */
876 part
877 = extract_fixed_bit_field
878 (word_mode, value, 0, thissize,
879 total_bits - bitsize + bitsdone, NULL_RTX, 1,
880 GET_MODE (value) == VOIDmode
881 ? UNITS_PER_WORD
882 : (GET_MODE (value) == BLKmode
883 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
884 }
885 else
886 {
887 /* Fetch successively more significant portions. */
888 if (GET_CODE (value) == CONST_INT)
889 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
890 >> bitsdone)
891 & (((HOST_WIDE_INT) 1 << thissize) - 1));
892 else
893 part
894 = extract_fixed_bit_field
895 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
896 GET_MODE (value) == VOIDmode
897 ? UNITS_PER_WORD
898 : (GET_MODE (value) == BLKmode
899 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
900 }
901
902 /* If OP0 is a register, then handle OFFSET here.
903
904 When handling multiword bitfields, extract_bit_field may pass
905 down a word_mode SUBREG of a larger REG for a bitfield that actually
906 crosses a word boundary. Thus, for a SUBREG, we must find
907 the current word starting from the base register. */
908 if (GET_CODE (op0) == SUBREG)
909 {
910 word = operand_subword_force (SUBREG_REG (op0),
911 SUBREG_WORD (op0) + offset,
912 GET_MODE (SUBREG_REG (op0)));
913 offset = 0;
914 }
915 else if (GET_CODE (op0) == REG)
916 {
917 word = operand_subword_force (op0, offset, GET_MODE (op0));
918 offset = 0;
919 }
920 else
921 word = op0;
922
923 /* OFFSET is in UNITs, and UNIT is in bits.
924 store_fixed_bit_field wants offset in bytes. */
925 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
926 thissize, thispos, part, align);
927 bitsdone += thissize;
928 }
929 }
930 \f
931 /* Generate code to extract a byte-field from STR_RTX
932 containing BITSIZE bits, starting at BITNUM,
933 and put it in TARGET if possible (if TARGET is nonzero).
934 Regardless of TARGET, we return the rtx for where the value is placed.
935 It may be a QUEUED.
936
937 STR_RTX is the structure containing the byte (a REG or MEM).
938 UNSIGNEDP is nonzero if this is an unsigned bit field.
939 MODE is the natural mode of the field value once extracted.
940 TMODE is the mode the caller would like the value to have;
941 but the value may be returned with type MODE instead.
942
943 ALIGN is the alignment that STR_RTX is known to have.
944 TOTAL_SIZE is the size in bytes of the containing structure,
945 or -1 if varying.
946
947 If a TARGET is specified and we can store in it at no extra cost,
948 we do so, and return TARGET.
949 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
950 if they are equally easy. */
951
952 rtx
953 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
954 target, mode, tmode, align, total_size)
955 rtx str_rtx;
956 unsigned HOST_WIDE_INT bitsize;
957 unsigned HOST_WIDE_INT bitnum;
958 int unsignedp;
959 rtx target;
960 enum machine_mode mode, tmode;
961 unsigned int align;
962 HOST_WIDE_INT total_size;
963 {
964 unsigned int unit
965 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
966 unsigned HOST_WIDE_INT offset = bitnum / unit;
967 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
968 register rtx op0 = str_rtx;
969 rtx spec_target = target;
970 rtx spec_target_subreg = 0;
971 enum machine_mode int_mode;
972 #ifdef HAVE_extv
973 unsigned HOST_WIDE_INT extv_bitsize;
974 enum machine_mode extv_mode;
975 #endif
976 #ifdef HAVE_extzv
977 unsigned HOST_WIDE_INT extzv_bitsize;
978 enum machine_mode extzv_mode;
979 #endif
980
981 #ifdef HAVE_extv
982 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
983 if (extv_mode == VOIDmode)
984 extv_mode = word_mode;
985 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
986 #endif
987
988 #ifdef HAVE_extzv
989 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
990 if (extzv_mode == VOIDmode)
991 extzv_mode = word_mode;
992 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
993 #endif
994
995 /* Discount the part of the structure before the desired byte.
996 We need to know how many bytes are safe to reference after it. */
997 if (total_size >= 0)
998 total_size -= (bitpos / BIGGEST_ALIGNMENT
999 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1000
1001 if (tmode == VOIDmode)
1002 tmode = mode;
1003 while (GET_CODE (op0) == SUBREG)
1004 {
1005 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1006 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1007
1008 offset += SUBREG_WORD (op0);
1009
1010 inner_size = MIN (inner_size, BITS_PER_WORD);
1011
1012 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1013 {
1014 bitpos += inner_size - outer_size;
1015 if (bitpos > unit)
1016 {
1017 offset += (bitpos / unit);
1018 bitpos %= unit;
1019 }
1020 }
1021
1022 op0 = SUBREG_REG (op0);
1023 }
1024
1025 /* Make sure we are playing with integral modes. Pun with subregs
1026 if we aren't. */
1027 {
1028 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1029 if (imode != GET_MODE (op0))
1030 {
1031 if (GET_CODE (op0) == MEM)
1032 op0 = change_address (op0, imode, NULL_RTX);
1033 else if (imode != BLKmode)
1034 op0 = gen_lowpart (imode, op0);
1035 else
1036 abort ();
1037 }
1038 }
1039
1040 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1041 If that's wrong, the solution is to test for it and set TARGET to 0
1042 if needed. */
1043
1044 /* If OP0 is a register, BITPOS must count within a word.
1045 But as we have it, it counts within whatever size OP0 now has.
1046 On a bigendian machine, these are not the same, so convert. */
1047 if (BYTES_BIG_ENDIAN
1048 && GET_CODE (op0) != MEM
1049 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1050 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1051
1052 /* Extracting a full-word or multi-word value
1053 from a structure in a register or aligned memory.
1054 This can be done with just SUBREG.
1055 So too extracting a subword value in
1056 the least significant part of the register. */
1057
1058 if (((GET_CODE (op0) != MEM
1059 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1060 GET_MODE_BITSIZE (GET_MODE (op0))))
1061 || (GET_CODE (op0) == MEM
1062 && (! SLOW_UNALIGNED_ACCESS (mode, align)
1063 || (offset * BITS_PER_UNIT % bitsize == 0
1064 && align % bitsize == 0))))
1065 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1066 && bitpos % BITS_PER_WORD == 0)
1067 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1068 /* ??? The big endian test here is wrong. This is correct
1069 if the value is in a register, and if mode_for_size is not
1070 the same mode as op0. This causes us to get unnecessarily
1071 inefficient code from the Thumb port when -mbig-endian. */
1072 && (BYTES_BIG_ENDIAN
1073 ? bitpos + bitsize == BITS_PER_WORD
1074 : bitpos == 0))))
1075 {
1076 enum machine_mode mode1
1077 = (VECTOR_MODE_P (tmode) ? mode
1078 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1079
1080 if (mode1 != GET_MODE (op0))
1081 {
1082 if (GET_CODE (op0) == SUBREG)
1083 {
1084 if (GET_MODE (SUBREG_REG (op0)) == mode1
1085 || GET_MODE_CLASS (mode1) == MODE_INT
1086 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1087 op0 = SUBREG_REG (op0);
1088 else
1089 /* Else we've got some float mode source being extracted into
1090 a different float mode destination -- this combination of
1091 subregs results in Severe Tire Damage. */
1092 abort ();
1093 }
1094 if (GET_CODE (op0) == REG)
1095 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1096 else
1097 op0 = change_address (op0, mode1,
1098 plus_constant (XEXP (op0, 0), offset));
1099 }
1100 if (mode1 != mode)
1101 return convert_to_mode (tmode, op0, unsignedp);
1102 return op0;
1103 }
1104
1105 /* Handle fields bigger than a word. */
1106
1107 if (bitsize > BITS_PER_WORD)
1108 {
1109 /* Here we transfer the words of the field
1110 in the order least significant first.
1111 This is because the most significant word is the one which may
1112 be less than full. */
1113
1114 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1115 unsigned int i;
1116
1117 if (target == 0 || GET_CODE (target) != REG)
1118 target = gen_reg_rtx (mode);
1119
1120 /* Indicate for flow that the entire target reg is being set. */
1121 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1122
1123 for (i = 0; i < nwords; i++)
1124 {
1125 /* If I is 0, use the low-order word in both field and target;
1126 if I is 1, use the next to lowest word; and so on. */
1127 /* Word number in TARGET to use. */
1128 unsigned int wordnum
1129 = (WORDS_BIG_ENDIAN
1130 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1131 : i);
1132 /* Offset from start of field in OP0. */
1133 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1134 ? MAX (0, ((int) bitsize - ((int) i + 1)
1135 * (int) BITS_PER_WORD))
1136 : (int) i * BITS_PER_WORD);
1137 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1138 rtx result_part
1139 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1140 bitsize - i * BITS_PER_WORD),
1141 bitnum + bit_offset, 1, target_part, mode,
1142 word_mode, align, total_size);
1143
1144 if (target_part == 0)
1145 abort ();
1146
1147 if (result_part != target_part)
1148 emit_move_insn (target_part, result_part);
1149 }
1150
1151 if (unsignedp)
1152 {
1153 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1154 need to be zero'd out. */
1155 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1156 {
1157 unsigned int i, total_words;
1158
1159 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1160 for (i = nwords; i < total_words; i++)
1161 {
1162 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1163 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1164 emit_move_insn (target_part, const0_rtx);
1165 }
1166 }
1167 return target;
1168 }
1169
1170 /* Signed bit field: sign-extend with two arithmetic shifts. */
1171 target = expand_shift (LSHIFT_EXPR, mode, target,
1172 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1173 NULL_RTX, 0);
1174 return expand_shift (RSHIFT_EXPR, mode, target,
1175 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1176 NULL_RTX, 0);
1177 }
1178
1179 /* From here on we know the desired field is smaller than a word. */
1180
1181 /* Check if there is a correspondingly-sized integer field, so we can
1182 safely extract it as one size of integer, if necessary; then
1183 truncate or extend to the size that is wanted; then use SUBREGs or
1184 convert_to_mode to get one of the modes we really wanted. */
1185
1186 int_mode = int_mode_for_mode (tmode);
1187 if (int_mode == BLKmode)
1188 int_mode = int_mode_for_mode (mode);
1189 if (int_mode == BLKmode)
1190 abort(); /* Should probably push op0 out to memory and then
1191 do a load. */
1192
1193 /* OFFSET is the number of words or bytes (UNIT says which)
1194 from STR_RTX to the first word or byte containing part of the field. */
1195
1196 if (GET_CODE (op0) != MEM)
1197 {
1198 if (offset != 0
1199 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1200 {
1201 if (GET_CODE (op0) != REG)
1202 op0 = copy_to_reg (op0);
1203 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1204 op0, offset);
1205 }
1206 offset = 0;
1207 }
1208 else
1209 {
1210 op0 = protect_from_queue (str_rtx, 1);
1211 }
1212
1213 /* Now OFFSET is nonzero only for memory operands. */
1214
1215 if (unsignedp)
1216 {
1217 #ifdef HAVE_extzv
1218 if (HAVE_extzv
1219 && (extzv_bitsize >= bitsize)
1220 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1221 && (bitsize + bitpos > extzv_bitsize)))
1222 {
1223 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1224 rtx bitsize_rtx, bitpos_rtx;
1225 rtx last = get_last_insn ();
1226 rtx xop0 = op0;
1227 rtx xtarget = target;
1228 rtx xspec_target = spec_target;
1229 rtx xspec_target_subreg = spec_target_subreg;
1230 rtx pat;
1231 enum machine_mode maxmode;
1232
1233 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1234 if (maxmode == VOIDmode)
1235 maxmode = word_mode;
1236
1237 if (GET_CODE (xop0) == MEM)
1238 {
1239 int save_volatile_ok = volatile_ok;
1240 volatile_ok = 1;
1241
1242 /* Is the memory operand acceptable? */
1243 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1244 (xop0, GET_MODE (xop0))))
1245 {
1246 /* No, load into a reg and extract from there. */
1247 enum machine_mode bestmode;
1248
1249 /* Get the mode to use for inserting into this field. If
1250 OP0 is BLKmode, get the smallest mode consistent with the
1251 alignment. If OP0 is a non-BLKmode object that is no
1252 wider than MAXMODE, use its mode. Otherwise, use the
1253 smallest mode containing the field. */
1254
1255 if (GET_MODE (xop0) == BLKmode
1256 || (GET_MODE_SIZE (GET_MODE (op0))
1257 > GET_MODE_SIZE (maxmode)))
1258 bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
1259 MEM_VOLATILE_P (xop0));
1260 else
1261 bestmode = GET_MODE (xop0);
1262
1263 if (bestmode == VOIDmode
1264 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1265 && GET_MODE_BITSIZE (bestmode) > align))
1266 goto extzv_loses;
1267
1268 /* Compute offset as multiple of this unit,
1269 counting in bytes. */
1270 unit = GET_MODE_BITSIZE (bestmode);
1271 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1272 xbitpos = bitnum % unit;
1273 xop0 = change_address (xop0, bestmode,
1274 plus_constant (XEXP (xop0, 0),
1275 xoffset));
1276 /* Fetch it to a register in that size. */
1277 xop0 = force_reg (bestmode, xop0);
1278
1279 /* XBITPOS counts within UNIT, which is what is expected. */
1280 }
1281 else
1282 /* Get ref to first byte containing part of the field. */
1283 xop0 = change_address (xop0, byte_mode,
1284 plus_constant (XEXP (xop0, 0), xoffset));
1285
1286 volatile_ok = save_volatile_ok;
1287 }
1288
1289 /* If op0 is a register, we need it in MAXMODE (which is usually
1290 SImode). to make it acceptable to the format of extzv. */
1291 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1292 goto extzv_loses;
1293 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1294 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1295
1296 /* On big-endian machines, we count bits from the most significant.
1297 If the bit field insn does not, we must invert. */
1298 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1299 xbitpos = unit - bitsize - xbitpos;
1300
1301 /* Now convert from counting within UNIT to counting in MAXMODE. */
1302 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1303 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1304
1305 unit = GET_MODE_BITSIZE (maxmode);
1306
1307 if (xtarget == 0
1308 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1309 xtarget = xspec_target = gen_reg_rtx (tmode);
1310
1311 if (GET_MODE (xtarget) != maxmode)
1312 {
1313 if (GET_CODE (xtarget) == REG)
1314 {
1315 int wider = (GET_MODE_SIZE (maxmode)
1316 > GET_MODE_SIZE (GET_MODE (xtarget)));
1317 xtarget = gen_lowpart (maxmode, xtarget);
1318 if (wider)
1319 xspec_target_subreg = xtarget;
1320 }
1321 else
1322 xtarget = gen_reg_rtx (maxmode);
1323 }
1324
1325 /* If this machine's extzv insists on a register target,
1326 make sure we have one. */
1327 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1328 (xtarget, maxmode)))
1329 xtarget = gen_reg_rtx (maxmode);
1330
1331 bitsize_rtx = GEN_INT (bitsize);
1332 bitpos_rtx = GEN_INT (xbitpos);
1333
1334 pat = gen_extzv (protect_from_queue (xtarget, 1),
1335 xop0, bitsize_rtx, bitpos_rtx);
1336 if (pat)
1337 {
1338 emit_insn (pat);
1339 target = xtarget;
1340 spec_target = xspec_target;
1341 spec_target_subreg = xspec_target_subreg;
1342 }
1343 else
1344 {
1345 delete_insns_since (last);
1346 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1347 bitpos, target, 1, align);
1348 }
1349 }
1350 else
1351 extzv_loses:
1352 #endif
1353 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1354 bitpos, target, 1, align);
1355 }
1356 else
1357 {
1358 #ifdef HAVE_extv
1359 if (HAVE_extv
1360 && (extv_bitsize >= bitsize)
1361 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1362 && (bitsize + bitpos > extv_bitsize)))
1363 {
1364 int xbitpos = bitpos, xoffset = offset;
1365 rtx bitsize_rtx, bitpos_rtx;
1366 rtx last = get_last_insn ();
1367 rtx xop0 = op0, xtarget = target;
1368 rtx xspec_target = spec_target;
1369 rtx xspec_target_subreg = spec_target_subreg;
1370 rtx pat;
1371 enum machine_mode maxmode;
1372
1373 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1374 if (maxmode == VOIDmode)
1375 maxmode = word_mode;
1376
1377 if (GET_CODE (xop0) == MEM)
1378 {
1379 /* Is the memory operand acceptable? */
1380 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1381 (xop0, GET_MODE (xop0))))
1382 {
1383 /* No, load into a reg and extract from there. */
1384 enum machine_mode bestmode;
1385
1386 /* Get the mode to use for inserting into this field. If
1387 OP0 is BLKmode, get the smallest mode consistent with the
1388 alignment. If OP0 is a non-BLKmode object that is no
1389 wider than MAXMODE, use its mode. Otherwise, use the
1390 smallest mode containing the field. */
1391
1392 if (GET_MODE (xop0) == BLKmode
1393 || (GET_MODE_SIZE (GET_MODE (op0))
1394 > GET_MODE_SIZE (maxmode)))
1395 bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
1396 MEM_VOLATILE_P (xop0));
1397 else
1398 bestmode = GET_MODE (xop0);
1399
1400 if (bestmode == VOIDmode
1401 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1402 && GET_MODE_BITSIZE (bestmode) > align))
1403 goto extv_loses;
1404
1405 /* Compute offset as multiple of this unit,
1406 counting in bytes. */
1407 unit = GET_MODE_BITSIZE (bestmode);
1408 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1409 xbitpos = bitnum % unit;
1410 xop0 = change_address (xop0, bestmode,
1411 plus_constant (XEXP (xop0, 0),
1412 xoffset));
1413 /* Fetch it to a register in that size. */
1414 xop0 = force_reg (bestmode, xop0);
1415
1416 /* XBITPOS counts within UNIT, which is what is expected. */
1417 }
1418 else
1419 /* Get ref to first byte containing part of the field. */
1420 xop0 = change_address (xop0, byte_mode,
1421 plus_constant (XEXP (xop0, 0), xoffset));
1422 }
1423
1424 /* If op0 is a register, we need it in MAXMODE (which is usually
1425 SImode) to make it acceptable to the format of extv. */
1426 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1427 goto extv_loses;
1428 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1429 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1430
1431 /* On big-endian machines, we count bits from the most significant.
1432 If the bit field insn does not, we must invert. */
1433 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1434 xbitpos = unit - bitsize - xbitpos;
1435
1436 /* XBITPOS counts within a size of UNIT.
1437 Adjust to count within a size of MAXMODE. */
1438 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1439 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1440
1441 unit = GET_MODE_BITSIZE (maxmode);
1442
1443 if (xtarget == 0
1444 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1445 xtarget = xspec_target = gen_reg_rtx (tmode);
1446
1447 if (GET_MODE (xtarget) != maxmode)
1448 {
1449 if (GET_CODE (xtarget) == REG)
1450 {
1451 int wider = (GET_MODE_SIZE (maxmode)
1452 > GET_MODE_SIZE (GET_MODE (xtarget)));
1453 xtarget = gen_lowpart (maxmode, xtarget);
1454 if (wider)
1455 xspec_target_subreg = xtarget;
1456 }
1457 else
1458 xtarget = gen_reg_rtx (maxmode);
1459 }
1460
1461 /* If this machine's extv insists on a register target,
1462 make sure we have one. */
1463 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1464 (xtarget, maxmode)))
1465 xtarget = gen_reg_rtx (maxmode);
1466
1467 bitsize_rtx = GEN_INT (bitsize);
1468 bitpos_rtx = GEN_INT (xbitpos);
1469
1470 pat = gen_extv (protect_from_queue (xtarget, 1),
1471 xop0, bitsize_rtx, bitpos_rtx);
1472 if (pat)
1473 {
1474 emit_insn (pat);
1475 target = xtarget;
1476 spec_target = xspec_target;
1477 spec_target_subreg = xspec_target_subreg;
1478 }
1479 else
1480 {
1481 delete_insns_since (last);
1482 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1483 bitpos, target, 0, align);
1484 }
1485 }
1486 else
1487 extv_loses:
1488 #endif
1489 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1490 bitpos, target, 0, align);
1491 }
1492 if (target == spec_target)
1493 return target;
1494 if (target == spec_target_subreg)
1495 return spec_target;
1496 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1497 {
1498 /* If the target mode is floating-point, first convert to the
1499 integer mode of that size and then access it as a floating-point
1500 value via a SUBREG. */
1501 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1502 {
1503 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1504 MODE_INT, 0),
1505 target, unsignedp);
1506 if (GET_CODE (target) != REG)
1507 target = copy_to_reg (target);
1508 return gen_rtx_SUBREG (tmode, target, 0);
1509 }
1510 else
1511 return convert_to_mode (tmode, target, unsignedp);
1512 }
1513 return target;
1514 }
1515 \f
1516 /* Extract a bit field using shifts and boolean operations
1517 Returns an rtx to represent the value.
1518 OP0 addresses a register (word) or memory (byte).
1519 BITPOS says which bit within the word or byte the bit field starts in.
1520 OFFSET says how many bytes farther the bit field starts;
1521 it is 0 if OP0 is a register.
1522 BITSIZE says how many bits long the bit field is.
1523 (If OP0 is a register, it may be narrower than a full word,
1524 but BITPOS still counts within a full word,
1525 which is significant on bigendian machines.)
1526
1527 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1528 If TARGET is nonzero, attempts to store the value there
1529 and return TARGET, but this is not guaranteed.
1530 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1531
1532 ALIGN is the alignment that STR_RTX is known to have. */
1533
1534 static rtx
1535 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1536 target, unsignedp, align)
1537 enum machine_mode tmode;
1538 register rtx op0, target;
1539 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1540 int unsignedp;
1541 unsigned int align;
1542 {
1543 unsigned int total_bits = BITS_PER_WORD;
1544 enum machine_mode mode;
1545
1546 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1547 {
1548 /* Special treatment for a bit field split across two registers. */
1549 if (bitsize + bitpos > BITS_PER_WORD)
1550 return extract_split_bit_field (op0, bitsize, bitpos,
1551 unsignedp, align);
1552 }
1553 else
1554 {
1555 /* Get the proper mode to use for this field. We want a mode that
1556 includes the entire field. If such a mode would be larger than
1557 a word, we won't be doing the extraction the normal way. */
1558
1559 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, align,
1560 word_mode,
1561 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1562
1563 if (mode == VOIDmode)
1564 /* The only way this should occur is if the field spans word
1565 boundaries. */
1566 return extract_split_bit_field (op0, bitsize,
1567 bitpos + offset * BITS_PER_UNIT,
1568 unsignedp, align);
1569
1570 total_bits = GET_MODE_BITSIZE (mode);
1571
1572 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1573 be in the range 0 to total_bits-1, and put any excess bytes in
1574 OFFSET. */
1575 if (bitpos >= total_bits)
1576 {
1577 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1578 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1579 * BITS_PER_UNIT);
1580 }
1581
1582 /* Get ref to an aligned byte, halfword, or word containing the field.
1583 Adjust BITPOS to be position within a word,
1584 and OFFSET to be the offset of that word.
1585 Then alter OP0 to refer to that word. */
1586 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1587 offset -= (offset % (total_bits / BITS_PER_UNIT));
1588 op0 = change_address (op0, mode,
1589 plus_constant (XEXP (op0, 0), offset));
1590 }
1591
1592 mode = GET_MODE (op0);
1593
1594 if (BYTES_BIG_ENDIAN)
1595 {
1596 /* BITPOS is the distance between our msb and that of OP0.
1597 Convert it to the distance from the lsb. */
1598
1599 bitpos = total_bits - bitsize - bitpos;
1600 }
1601
1602 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1603 We have reduced the big-endian case to the little-endian case. */
1604
1605 if (unsignedp)
1606 {
1607 if (bitpos)
1608 {
1609 /* If the field does not already start at the lsb,
1610 shift it so it does. */
1611 tree amount = build_int_2 (bitpos, 0);
1612 /* Maybe propagate the target for the shift. */
1613 /* But not if we will return it--could confuse integrate.c. */
1614 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1615 && !REG_FUNCTION_VALUE_P (target)
1616 ? target : 0);
1617 if (tmode != mode) subtarget = 0;
1618 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1619 }
1620 /* Convert the value to the desired mode. */
1621 if (mode != tmode)
1622 op0 = convert_to_mode (tmode, op0, 1);
1623
1624 /* Unless the msb of the field used to be the msb when we shifted,
1625 mask out the upper bits. */
1626
1627 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1628 #if 0
1629 #ifdef SLOW_ZERO_EXTEND
1630 /* Always generate an `and' if
1631 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1632 will combine fruitfully with the zero-extend. */
1633 || tmode != mode
1634 #endif
1635 #endif
1636 )
1637 return expand_binop (GET_MODE (op0), and_optab, op0,
1638 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1639 target, 1, OPTAB_LIB_WIDEN);
1640 return op0;
1641 }
1642
1643 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1644 then arithmetic-shift its lsb to the lsb of the word. */
1645 op0 = force_reg (mode, op0);
1646 if (mode != tmode)
1647 target = 0;
1648
1649 /* Find the narrowest integer mode that contains the field. */
1650
1651 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1652 mode = GET_MODE_WIDER_MODE (mode))
1653 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1654 {
1655 op0 = convert_to_mode (mode, op0, 0);
1656 break;
1657 }
1658
1659 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1660 {
1661 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1662 /* Maybe propagate the target for the shift. */
1663 /* But not if we will return the result--could confuse integrate.c. */
1664 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1665 && ! REG_FUNCTION_VALUE_P (target)
1666 ? target : 0);
1667 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1668 }
1669
1670 return expand_shift (RSHIFT_EXPR, mode, op0,
1671 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1672 target, 0);
1673 }
1674 \f
1675 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1676 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1677 complement of that if COMPLEMENT. The mask is truncated if
1678 necessary to the width of mode MODE. The mask is zero-extended if
1679 BITSIZE+BITPOS is too small for MODE. */
1680
1681 static rtx
1682 mask_rtx (mode, bitpos, bitsize, complement)
1683 enum machine_mode mode;
1684 int bitpos, bitsize, complement;
1685 {
1686 HOST_WIDE_INT masklow, maskhigh;
1687
1688 if (bitpos < HOST_BITS_PER_WIDE_INT)
1689 masklow = (HOST_WIDE_INT) -1 << bitpos;
1690 else
1691 masklow = 0;
1692
1693 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1694 masklow &= ((unsigned HOST_WIDE_INT) -1
1695 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1696
1697 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1698 maskhigh = -1;
1699 else
1700 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1701
1702 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1703 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1704 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1705 else
1706 maskhigh = 0;
1707
1708 if (complement)
1709 {
1710 maskhigh = ~maskhigh;
1711 masklow = ~masklow;
1712 }
1713
1714 return immed_double_const (masklow, maskhigh, mode);
1715 }
1716
1717 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1718 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1719
1720 static rtx
1721 lshift_value (mode, value, bitpos, bitsize)
1722 enum machine_mode mode;
1723 rtx value;
1724 int bitpos, bitsize;
1725 {
1726 unsigned HOST_WIDE_INT v = INTVAL (value);
1727 HOST_WIDE_INT low, high;
1728
1729 if (bitsize < HOST_BITS_PER_WIDE_INT)
1730 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1731
1732 if (bitpos < HOST_BITS_PER_WIDE_INT)
1733 {
1734 low = v << bitpos;
1735 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1736 }
1737 else
1738 {
1739 low = 0;
1740 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1741 }
1742
1743 return immed_double_const (low, high, mode);
1744 }
1745 \f
1746 /* Extract a bit field that is split across two words
1747 and return an RTX for the result.
1748
1749 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1750 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1751 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1752
1753 ALIGN is the known alignment of OP0. This is also the size of the
1754 memory objects to be used. */
1755
1756 static rtx
1757 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1758 rtx op0;
1759 unsigned HOST_WIDE_INT bitsize, bitpos;
1760 int unsignedp;
1761 unsigned int align;
1762 {
1763 unsigned int unit;
1764 unsigned int bitsdone = 0;
1765 rtx result = NULL_RTX;
1766 int first = 1;
1767
1768 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1769 much at a time. */
1770 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1771 unit = BITS_PER_WORD;
1772 else
1773 unit = MIN (align, BITS_PER_WORD);
1774
1775 while (bitsdone < bitsize)
1776 {
1777 unsigned HOST_WIDE_INT thissize;
1778 rtx part, word;
1779 unsigned HOST_WIDE_INT thispos;
1780 unsigned HOST_WIDE_INT offset;
1781
1782 offset = (bitpos + bitsdone) / unit;
1783 thispos = (bitpos + bitsdone) % unit;
1784
1785 /* THISSIZE must not overrun a word boundary. Otherwise,
1786 extract_fixed_bit_field will call us again, and we will mutually
1787 recurse forever. */
1788 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1789 thissize = MIN (thissize, unit - thispos);
1790
1791 /* If OP0 is a register, then handle OFFSET here.
1792
1793 When handling multiword bitfields, extract_bit_field may pass
1794 down a word_mode SUBREG of a larger REG for a bitfield that actually
1795 crosses a word boundary. Thus, for a SUBREG, we must find
1796 the current word starting from the base register. */
1797 if (GET_CODE (op0) == SUBREG)
1798 {
1799 word = operand_subword_force (SUBREG_REG (op0),
1800 SUBREG_WORD (op0) + offset,
1801 GET_MODE (SUBREG_REG (op0)));
1802 offset = 0;
1803 }
1804 else if (GET_CODE (op0) == REG)
1805 {
1806 word = operand_subword_force (op0, offset, GET_MODE (op0));
1807 offset = 0;
1808 }
1809 else
1810 word = op0;
1811
1812 /* Extract the parts in bit-counting order,
1813 whose meaning is determined by BYTES_PER_UNIT.
1814 OFFSET is in UNITs, and UNIT is in bits.
1815 extract_fixed_bit_field wants offset in bytes. */
1816 part = extract_fixed_bit_field (word_mode, word,
1817 offset * unit / BITS_PER_UNIT,
1818 thissize, thispos, 0, 1, align);
1819 bitsdone += thissize;
1820
1821 /* Shift this part into place for the result. */
1822 if (BYTES_BIG_ENDIAN)
1823 {
1824 if (bitsize != bitsdone)
1825 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1826 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1827 }
1828 else
1829 {
1830 if (bitsdone != thissize)
1831 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1832 build_int_2 (bitsdone - thissize, 0), 0, 1);
1833 }
1834
1835 if (first)
1836 result = part;
1837 else
1838 /* Combine the parts with bitwise or. This works
1839 because we extracted each part as an unsigned bit field. */
1840 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1841 OPTAB_LIB_WIDEN);
1842
1843 first = 0;
1844 }
1845
1846 /* Unsigned bit field: we are done. */
1847 if (unsignedp)
1848 return result;
1849 /* Signed bit field: sign-extend with two arithmetic shifts. */
1850 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1851 build_int_2 (BITS_PER_WORD - bitsize, 0),
1852 NULL_RTX, 0);
1853 return expand_shift (RSHIFT_EXPR, word_mode, result,
1854 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1855 }
1856 \f
1857 /* Add INC into TARGET. */
1858
1859 void
1860 expand_inc (target, inc)
1861 rtx target, inc;
1862 {
1863 rtx value = expand_binop (GET_MODE (target), add_optab,
1864 target, inc,
1865 target, 0, OPTAB_LIB_WIDEN);
1866 if (value != target)
1867 emit_move_insn (target, value);
1868 }
1869
1870 /* Subtract DEC from TARGET. */
1871
1872 void
1873 expand_dec (target, dec)
1874 rtx target, dec;
1875 {
1876 rtx value = expand_binop (GET_MODE (target), sub_optab,
1877 target, dec,
1878 target, 0, OPTAB_LIB_WIDEN);
1879 if (value != target)
1880 emit_move_insn (target, value);
1881 }
1882 \f
1883 /* Output a shift instruction for expression code CODE,
1884 with SHIFTED being the rtx for the value to shift,
1885 and AMOUNT the tree for the amount to shift by.
1886 Store the result in the rtx TARGET, if that is convenient.
1887 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1888 Return the rtx for where the value is. */
1889
1890 rtx
1891 expand_shift (code, mode, shifted, amount, target, unsignedp)
1892 enum tree_code code;
1893 register enum machine_mode mode;
1894 rtx shifted;
1895 tree amount;
1896 register rtx target;
1897 int unsignedp;
1898 {
1899 register rtx op1, temp = 0;
1900 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1901 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1902 int try;
1903
1904 /* Previously detected shift-counts computed by NEGATE_EXPR
1905 and shifted in the other direction; but that does not work
1906 on all machines. */
1907
1908 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1909
1910 #ifdef SHIFT_COUNT_TRUNCATED
1911 if (SHIFT_COUNT_TRUNCATED)
1912 {
1913 if (GET_CODE (op1) == CONST_INT
1914 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1915 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1916 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1917 % GET_MODE_BITSIZE (mode));
1918 else if (GET_CODE (op1) == SUBREG
1919 && SUBREG_WORD (op1) == 0)
1920 op1 = SUBREG_REG (op1);
1921 }
1922 #endif
1923
1924 if (op1 == const0_rtx)
1925 return shifted;
1926
1927 for (try = 0; temp == 0 && try < 3; try++)
1928 {
1929 enum optab_methods methods;
1930
1931 if (try == 0)
1932 methods = OPTAB_DIRECT;
1933 else if (try == 1)
1934 methods = OPTAB_WIDEN;
1935 else
1936 methods = OPTAB_LIB_WIDEN;
1937
1938 if (rotate)
1939 {
1940 /* Widening does not work for rotation. */
1941 if (methods == OPTAB_WIDEN)
1942 continue;
1943 else if (methods == OPTAB_LIB_WIDEN)
1944 {
1945 /* If we have been unable to open-code this by a rotation,
1946 do it as the IOR of two shifts. I.e., to rotate A
1947 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1948 where C is the bitsize of A.
1949
1950 It is theoretically possible that the target machine might
1951 not be able to perform either shift and hence we would
1952 be making two libcalls rather than just the one for the
1953 shift (similarly if IOR could not be done). We will allow
1954 this extremely unlikely lossage to avoid complicating the
1955 code below. */
1956
1957 rtx subtarget = target == shifted ? 0 : target;
1958 rtx temp1;
1959 tree type = TREE_TYPE (amount);
1960 tree new_amount = make_tree (type, op1);
1961 tree other_amount
1962 = fold (build (MINUS_EXPR, type,
1963 convert (type,
1964 build_int_2 (GET_MODE_BITSIZE (mode),
1965 0)),
1966 amount));
1967
1968 shifted = force_reg (mode, shifted);
1969
1970 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1971 mode, shifted, new_amount, subtarget, 1);
1972 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1973 mode, shifted, other_amount, 0, 1);
1974 return expand_binop (mode, ior_optab, temp, temp1, target,
1975 unsignedp, methods);
1976 }
1977
1978 temp = expand_binop (mode,
1979 left ? rotl_optab : rotr_optab,
1980 shifted, op1, target, unsignedp, methods);
1981
1982 /* If we don't have the rotate, but we are rotating by a constant
1983 that is in range, try a rotate in the opposite direction. */
1984
1985 if (temp == 0 && GET_CODE (op1) == CONST_INT
1986 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1987 temp = expand_binop (mode,
1988 left ? rotr_optab : rotl_optab,
1989 shifted,
1990 GEN_INT (GET_MODE_BITSIZE (mode)
1991 - INTVAL (op1)),
1992 target, unsignedp, methods);
1993 }
1994 else if (unsignedp)
1995 temp = expand_binop (mode,
1996 left ? ashl_optab : lshr_optab,
1997 shifted, op1, target, unsignedp, methods);
1998
1999 /* Do arithmetic shifts.
2000 Also, if we are going to widen the operand, we can just as well
2001 use an arithmetic right-shift instead of a logical one. */
2002 if (temp == 0 && ! rotate
2003 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2004 {
2005 enum optab_methods methods1 = methods;
2006
2007 /* If trying to widen a log shift to an arithmetic shift,
2008 don't accept an arithmetic shift of the same size. */
2009 if (unsignedp)
2010 methods1 = OPTAB_MUST_WIDEN;
2011
2012 /* Arithmetic shift */
2013
2014 temp = expand_binop (mode,
2015 left ? ashl_optab : ashr_optab,
2016 shifted, op1, target, unsignedp, methods1);
2017 }
2018
2019 /* We used to try extzv here for logical right shifts, but that was
2020 only useful for one machine, the VAX, and caused poor code
2021 generation there for lshrdi3, so the code was deleted and a
2022 define_expand for lshrsi3 was added to vax.md. */
2023 }
2024
2025 if (temp == 0)
2026 abort ();
2027 return temp;
2028 }
2029 \f
2030 enum alg_code { alg_zero, alg_m, alg_shift,
2031 alg_add_t_m2, alg_sub_t_m2,
2032 alg_add_factor, alg_sub_factor,
2033 alg_add_t2_m, alg_sub_t2_m,
2034 alg_add, alg_subtract, alg_factor, alg_shiftop };
2035
2036 /* This structure records a sequence of operations.
2037 `ops' is the number of operations recorded.
2038 `cost' is their total cost.
2039 The operations are stored in `op' and the corresponding
2040 logarithms of the integer coefficients in `log'.
2041
2042 These are the operations:
2043 alg_zero total := 0;
2044 alg_m total := multiplicand;
2045 alg_shift total := total * coeff
2046 alg_add_t_m2 total := total + multiplicand * coeff;
2047 alg_sub_t_m2 total := total - multiplicand * coeff;
2048 alg_add_factor total := total * coeff + total;
2049 alg_sub_factor total := total * coeff - total;
2050 alg_add_t2_m total := total * coeff + multiplicand;
2051 alg_sub_t2_m total := total * coeff - multiplicand;
2052
2053 The first operand must be either alg_zero or alg_m. */
2054
2055 struct algorithm
2056 {
2057 short cost;
2058 short ops;
2059 /* The size of the OP and LOG fields are not directly related to the
2060 word size, but the worst-case algorithms will be if we have few
2061 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2062 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2063 in total wordsize operations. */
2064 enum alg_code op[MAX_BITS_PER_WORD];
2065 char log[MAX_BITS_PER_WORD];
2066 };
2067
2068 static void synth_mult PARAMS ((struct algorithm *,
2069 unsigned HOST_WIDE_INT,
2070 int));
2071 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2072 int, int,
2073 unsigned HOST_WIDE_INT *,
2074 int *, int *));
2075 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2076 int));
2077 /* Compute and return the best algorithm for multiplying by T.
2078 The algorithm must cost less than cost_limit
2079 If retval.cost >= COST_LIMIT, no algorithm was found and all
2080 other field of the returned struct are undefined. */
2081
2082 static void
2083 synth_mult (alg_out, t, cost_limit)
2084 struct algorithm *alg_out;
2085 unsigned HOST_WIDE_INT t;
2086 int cost_limit;
2087 {
2088 int m;
2089 struct algorithm *alg_in, *best_alg;
2090 int cost;
2091 unsigned HOST_WIDE_INT q;
2092
2093 /* Indicate that no algorithm is yet found. If no algorithm
2094 is found, this value will be returned and indicate failure. */
2095 alg_out->cost = cost_limit;
2096
2097 if (cost_limit <= 0)
2098 return;
2099
2100 /* t == 1 can be done in zero cost. */
2101 if (t == 1)
2102 {
2103 alg_out->ops = 1;
2104 alg_out->cost = 0;
2105 alg_out->op[0] = alg_m;
2106 return;
2107 }
2108
2109 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2110 fail now. */
2111 if (t == 0)
2112 {
2113 if (zero_cost >= cost_limit)
2114 return;
2115 else
2116 {
2117 alg_out->ops = 1;
2118 alg_out->cost = zero_cost;
2119 alg_out->op[0] = alg_zero;
2120 return;
2121 }
2122 }
2123
2124 /* We'll be needing a couple extra algorithm structures now. */
2125
2126 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2127 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2128
2129 /* If we have a group of zero bits at the low-order part of T, try
2130 multiplying by the remaining bits and then doing a shift. */
2131
2132 if ((t & 1) == 0)
2133 {
2134 m = floor_log2 (t & -t); /* m = number of low zero bits */
2135 q = t >> m;
2136 cost = shift_cost[m];
2137 synth_mult (alg_in, q, cost_limit - cost);
2138
2139 cost += alg_in->cost;
2140 if (cost < cost_limit)
2141 {
2142 struct algorithm *x;
2143 x = alg_in, alg_in = best_alg, best_alg = x;
2144 best_alg->log[best_alg->ops] = m;
2145 best_alg->op[best_alg->ops] = alg_shift;
2146 cost_limit = cost;
2147 }
2148 }
2149
2150 /* If we have an odd number, add or subtract one. */
2151 if ((t & 1) != 0)
2152 {
2153 unsigned HOST_WIDE_INT w;
2154
2155 for (w = 1; (w & t) != 0; w <<= 1)
2156 ;
2157 /* If T was -1, then W will be zero after the loop. This is another
2158 case where T ends with ...111. Handling this with (T + 1) and
2159 subtract 1 produces slightly better code and results in algorithm
2160 selection much faster than treating it like the ...0111 case
2161 below. */
2162 if (w == 0
2163 || (w > 2
2164 /* Reject the case where t is 3.
2165 Thus we prefer addition in that case. */
2166 && t != 3))
2167 {
2168 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2169
2170 cost = add_cost;
2171 synth_mult (alg_in, t + 1, cost_limit - cost);
2172
2173 cost += alg_in->cost;
2174 if (cost < cost_limit)
2175 {
2176 struct algorithm *x;
2177 x = alg_in, alg_in = best_alg, best_alg = x;
2178 best_alg->log[best_alg->ops] = 0;
2179 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2180 cost_limit = cost;
2181 }
2182 }
2183 else
2184 {
2185 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2186
2187 cost = add_cost;
2188 synth_mult (alg_in, t - 1, cost_limit - cost);
2189
2190 cost += alg_in->cost;
2191 if (cost < cost_limit)
2192 {
2193 struct algorithm *x;
2194 x = alg_in, alg_in = best_alg, best_alg = x;
2195 best_alg->log[best_alg->ops] = 0;
2196 best_alg->op[best_alg->ops] = alg_add_t_m2;
2197 cost_limit = cost;
2198 }
2199 }
2200 }
2201
2202 /* Look for factors of t of the form
2203 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2204 If we find such a factor, we can multiply by t using an algorithm that
2205 multiplies by q, shift the result by m and add/subtract it to itself.
2206
2207 We search for large factors first and loop down, even if large factors
2208 are less probable than small; if we find a large factor we will find a
2209 good sequence quickly, and therefore be able to prune (by decreasing
2210 COST_LIMIT) the search. */
2211
2212 for (m = floor_log2 (t - 1); m >= 2; m--)
2213 {
2214 unsigned HOST_WIDE_INT d;
2215
2216 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2217 if (t % d == 0 && t > d)
2218 {
2219 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2220 synth_mult (alg_in, t / d, cost_limit - cost);
2221
2222 cost += alg_in->cost;
2223 if (cost < cost_limit)
2224 {
2225 struct algorithm *x;
2226 x = alg_in, alg_in = best_alg, best_alg = x;
2227 best_alg->log[best_alg->ops] = m;
2228 best_alg->op[best_alg->ops] = alg_add_factor;
2229 cost_limit = cost;
2230 }
2231 /* Other factors will have been taken care of in the recursion. */
2232 break;
2233 }
2234
2235 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2236 if (t % d == 0 && t > d)
2237 {
2238 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2239 synth_mult (alg_in, t / d, cost_limit - cost);
2240
2241 cost += alg_in->cost;
2242 if (cost < cost_limit)
2243 {
2244 struct algorithm *x;
2245 x = alg_in, alg_in = best_alg, best_alg = x;
2246 best_alg->log[best_alg->ops] = m;
2247 best_alg->op[best_alg->ops] = alg_sub_factor;
2248 cost_limit = cost;
2249 }
2250 break;
2251 }
2252 }
2253
2254 /* Try shift-and-add (load effective address) instructions,
2255 i.e. do a*3, a*5, a*9. */
2256 if ((t & 1) != 0)
2257 {
2258 q = t - 1;
2259 q = q & -q;
2260 m = exact_log2 (q);
2261 if (m >= 0)
2262 {
2263 cost = shiftadd_cost[m];
2264 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2265
2266 cost += alg_in->cost;
2267 if (cost < cost_limit)
2268 {
2269 struct algorithm *x;
2270 x = alg_in, alg_in = best_alg, best_alg = x;
2271 best_alg->log[best_alg->ops] = m;
2272 best_alg->op[best_alg->ops] = alg_add_t2_m;
2273 cost_limit = cost;
2274 }
2275 }
2276
2277 q = t + 1;
2278 q = q & -q;
2279 m = exact_log2 (q);
2280 if (m >= 0)
2281 {
2282 cost = shiftsub_cost[m];
2283 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2284
2285 cost += alg_in->cost;
2286 if (cost < cost_limit)
2287 {
2288 struct algorithm *x;
2289 x = alg_in, alg_in = best_alg, best_alg = x;
2290 best_alg->log[best_alg->ops] = m;
2291 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2292 cost_limit = cost;
2293 }
2294 }
2295 }
2296
2297 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2298 we have not found any algorithm. */
2299 if (cost_limit == alg_out->cost)
2300 return;
2301
2302 /* If we are getting a too long sequence for `struct algorithm'
2303 to record, make this search fail. */
2304 if (best_alg->ops == MAX_BITS_PER_WORD)
2305 return;
2306
2307 /* Copy the algorithm from temporary space to the space at alg_out.
2308 We avoid using structure assignment because the majority of
2309 best_alg is normally undefined, and this is a critical function. */
2310 alg_out->ops = best_alg->ops + 1;
2311 alg_out->cost = cost_limit;
2312 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2313 alg_out->ops * sizeof *alg_out->op);
2314 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2315 alg_out->ops * sizeof *alg_out->log);
2316 }
2317 \f
2318 /* Perform a multiplication and return an rtx for the result.
2319 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2320 TARGET is a suggestion for where to store the result (an rtx).
2321
2322 We check specially for a constant integer as OP1.
2323 If you want this check for OP0 as well, then before calling
2324 you should swap the two operands if OP0 would be constant. */
2325
2326 rtx
2327 expand_mult (mode, op0, op1, target, unsignedp)
2328 enum machine_mode mode;
2329 register rtx op0, op1, target;
2330 int unsignedp;
2331 {
2332 rtx const_op1 = op1;
2333
2334 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2335 less than or equal in size to `unsigned int' this doesn't matter.
2336 If the mode is larger than `unsigned int', then synth_mult works only
2337 if the constant value exactly fits in an `unsigned int' without any
2338 truncation. This means that multiplying by negative values does
2339 not work; results are off by 2^32 on a 32 bit machine. */
2340
2341 /* If we are multiplying in DImode, it may still be a win
2342 to try to work with shifts and adds. */
2343 if (GET_CODE (op1) == CONST_DOUBLE
2344 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2345 && HOST_BITS_PER_INT >= BITS_PER_WORD
2346 && CONST_DOUBLE_HIGH (op1) == 0)
2347 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2348 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2349 && GET_CODE (op1) == CONST_INT
2350 && INTVAL (op1) < 0)
2351 const_op1 = 0;
2352
2353 /* We used to test optimize here, on the grounds that it's better to
2354 produce a smaller program when -O is not used.
2355 But this causes such a terrible slowdown sometimes
2356 that it seems better to use synth_mult always. */
2357
2358 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2359 {
2360 struct algorithm alg;
2361 struct algorithm alg2;
2362 HOST_WIDE_INT val = INTVAL (op1);
2363 HOST_WIDE_INT val_so_far;
2364 rtx insn;
2365 int mult_cost;
2366 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2367
2368 /* Try to do the computation three ways: multiply by the negative of OP1
2369 and then negate, do the multiplication directly, or do multiplication
2370 by OP1 - 1. */
2371
2372 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2373 mult_cost = MIN (12 * add_cost, mult_cost);
2374
2375 synth_mult (&alg, val, mult_cost);
2376
2377 /* This works only if the inverted value actually fits in an
2378 `unsigned int' */
2379 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2380 {
2381 synth_mult (&alg2, - val,
2382 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2383 if (alg2.cost + negate_cost < alg.cost)
2384 alg = alg2, variant = negate_variant;
2385 }
2386
2387 /* This proves very useful for division-by-constant. */
2388 synth_mult (&alg2, val - 1,
2389 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2390 if (alg2.cost + add_cost < alg.cost)
2391 alg = alg2, variant = add_variant;
2392
2393 if (alg.cost < mult_cost)
2394 {
2395 /* We found something cheaper than a multiply insn. */
2396 int opno;
2397 rtx accum, tem;
2398
2399 op0 = protect_from_queue (op0, 0);
2400
2401 /* Avoid referencing memory over and over.
2402 For speed, but also for correctness when mem is volatile. */
2403 if (GET_CODE (op0) == MEM)
2404 op0 = force_reg (mode, op0);
2405
2406 /* ACCUM starts out either as OP0 or as a zero, depending on
2407 the first operation. */
2408
2409 if (alg.op[0] == alg_zero)
2410 {
2411 accum = copy_to_mode_reg (mode, const0_rtx);
2412 val_so_far = 0;
2413 }
2414 else if (alg.op[0] == alg_m)
2415 {
2416 accum = copy_to_mode_reg (mode, op0);
2417 val_so_far = 1;
2418 }
2419 else
2420 abort ();
2421
2422 for (opno = 1; opno < alg.ops; opno++)
2423 {
2424 int log = alg.log[opno];
2425 int preserve = preserve_subexpressions_p ();
2426 rtx shift_subtarget = preserve ? 0 : accum;
2427 rtx add_target
2428 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2429 && ! preserve)
2430 ? target : 0;
2431 rtx accum_target = preserve ? 0 : accum;
2432
2433 switch (alg.op[opno])
2434 {
2435 case alg_shift:
2436 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2437 build_int_2 (log, 0), NULL_RTX, 0);
2438 val_so_far <<= log;
2439 break;
2440
2441 case alg_add_t_m2:
2442 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2443 build_int_2 (log, 0), NULL_RTX, 0);
2444 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2445 add_target
2446 ? add_target : accum_target);
2447 val_so_far += (HOST_WIDE_INT) 1 << log;
2448 break;
2449
2450 case alg_sub_t_m2:
2451 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2452 build_int_2 (log, 0), NULL_RTX, 0);
2453 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2454 add_target
2455 ? add_target : accum_target);
2456 val_so_far -= (HOST_WIDE_INT) 1 << log;
2457 break;
2458
2459 case alg_add_t2_m:
2460 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2461 build_int_2 (log, 0), shift_subtarget,
2462 0);
2463 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2464 add_target
2465 ? add_target : accum_target);
2466 val_so_far = (val_so_far << log) + 1;
2467 break;
2468
2469 case alg_sub_t2_m:
2470 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2471 build_int_2 (log, 0), shift_subtarget,
2472 0);
2473 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2474 add_target
2475 ? add_target : accum_target);
2476 val_so_far = (val_so_far << log) - 1;
2477 break;
2478
2479 case alg_add_factor:
2480 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2481 build_int_2 (log, 0), NULL_RTX, 0);
2482 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2483 add_target
2484 ? add_target : accum_target);
2485 val_so_far += val_so_far << log;
2486 break;
2487
2488 case alg_sub_factor:
2489 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2490 build_int_2 (log, 0), NULL_RTX, 0);
2491 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2492 (add_target ? add_target
2493 : preserve ? 0 : tem));
2494 val_so_far = (val_so_far << log) - val_so_far;
2495 break;
2496
2497 default:
2498 abort ();
2499 }
2500
2501 /* Write a REG_EQUAL note on the last insn so that we can cse
2502 multiplication sequences. */
2503
2504 insn = get_last_insn ();
2505 set_unique_reg_note (insn,
2506 REG_EQUAL,
2507 gen_rtx_MULT (mode, op0,
2508 GEN_INT (val_so_far)));
2509 }
2510
2511 if (variant == negate_variant)
2512 {
2513 val_so_far = - val_so_far;
2514 accum = expand_unop (mode, neg_optab, accum, target, 0);
2515 }
2516 else if (variant == add_variant)
2517 {
2518 val_so_far = val_so_far + 1;
2519 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2520 }
2521
2522 if (val != val_so_far)
2523 abort ();
2524
2525 return accum;
2526 }
2527 }
2528
2529 /* This used to use umul_optab if unsigned, but for non-widening multiply
2530 there is no difference between signed and unsigned. */
2531 op0 = expand_binop (mode, smul_optab,
2532 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2533 if (op0 == 0)
2534 abort ();
2535 return op0;
2536 }
2537 \f
2538 /* Return the smallest n such that 2**n >= X. */
2539
2540 int
2541 ceil_log2 (x)
2542 unsigned HOST_WIDE_INT x;
2543 {
2544 return floor_log2 (x - 1) + 1;
2545 }
2546
2547 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2548 replace division by D, and put the least significant N bits of the result
2549 in *MULTIPLIER_PTR and return the most significant bit.
2550
2551 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2552 needed precision is in PRECISION (should be <= N).
2553
2554 PRECISION should be as small as possible so this function can choose
2555 multiplier more freely.
2556
2557 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2558 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2559
2560 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2561 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2562
2563 static
2564 unsigned HOST_WIDE_INT
2565 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2566 unsigned HOST_WIDE_INT d;
2567 int n;
2568 int precision;
2569 unsigned HOST_WIDE_INT *multiplier_ptr;
2570 int *post_shift_ptr;
2571 int *lgup_ptr;
2572 {
2573 HOST_WIDE_INT mhigh_hi, mlow_hi;
2574 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2575 int lgup, post_shift;
2576 int pow, pow2;
2577 unsigned HOST_WIDE_INT nl, dummy1;
2578 HOST_WIDE_INT nh, dummy2;
2579
2580 /* lgup = ceil(log2(divisor)); */
2581 lgup = ceil_log2 (d);
2582
2583 if (lgup > n)
2584 abort ();
2585
2586 pow = n + lgup;
2587 pow2 = n + lgup - precision;
2588
2589 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2590 {
2591 /* We could handle this with some effort, but this case is much better
2592 handled directly with a scc insn, so rely on caller using that. */
2593 abort ();
2594 }
2595
2596 /* mlow = 2^(N + lgup)/d */
2597 if (pow >= HOST_BITS_PER_WIDE_INT)
2598 {
2599 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2600 nl = 0;
2601 }
2602 else
2603 {
2604 nh = 0;
2605 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2606 }
2607 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2608 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2609
2610 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2611 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2612 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2613 else
2614 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2615 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2616 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2617
2618 if (mhigh_hi && nh - d >= d)
2619 abort ();
2620 if (mhigh_hi > 1 || mlow_hi > 1)
2621 abort ();
2622 /* assert that mlow < mhigh. */
2623 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2624 abort();
2625
2626 /* If precision == N, then mlow, mhigh exceed 2^N
2627 (but they do not exceed 2^(N+1)). */
2628
2629 /* Reduce to lowest terms */
2630 for (post_shift = lgup; post_shift > 0; post_shift--)
2631 {
2632 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2633 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2634 if (ml_lo >= mh_lo)
2635 break;
2636
2637 mlow_hi = 0;
2638 mlow_lo = ml_lo;
2639 mhigh_hi = 0;
2640 mhigh_lo = mh_lo;
2641 }
2642
2643 *post_shift_ptr = post_shift;
2644 *lgup_ptr = lgup;
2645 if (n < HOST_BITS_PER_WIDE_INT)
2646 {
2647 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2648 *multiplier_ptr = mhigh_lo & mask;
2649 return mhigh_lo >= mask;
2650 }
2651 else
2652 {
2653 *multiplier_ptr = mhigh_lo;
2654 return mhigh_hi;
2655 }
2656 }
2657
2658 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2659 congruent to 1 (mod 2**N). */
2660
2661 static unsigned HOST_WIDE_INT
2662 invert_mod2n (x, n)
2663 unsigned HOST_WIDE_INT x;
2664 int n;
2665 {
2666 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2667
2668 /* The algorithm notes that the choice y = x satisfies
2669 x*y == 1 mod 2^3, since x is assumed odd.
2670 Each iteration doubles the number of bits of significance in y. */
2671
2672 unsigned HOST_WIDE_INT mask;
2673 unsigned HOST_WIDE_INT y = x;
2674 int nbit = 3;
2675
2676 mask = (n == HOST_BITS_PER_WIDE_INT
2677 ? ~(unsigned HOST_WIDE_INT) 0
2678 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2679
2680 while (nbit < n)
2681 {
2682 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2683 nbit *= 2;
2684 }
2685 return y;
2686 }
2687
2688 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2689 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2690 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2691 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2692 become signed.
2693
2694 The result is put in TARGET if that is convenient.
2695
2696 MODE is the mode of operation. */
2697
2698 rtx
2699 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2700 enum machine_mode mode;
2701 register rtx adj_operand, op0, op1, target;
2702 int unsignedp;
2703 {
2704 rtx tem;
2705 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2706
2707 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2708 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2709 NULL_RTX, 0);
2710 tem = expand_and (tem, op1, NULL_RTX);
2711 adj_operand
2712 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2713 adj_operand);
2714
2715 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2716 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2717 NULL_RTX, 0);
2718 tem = expand_and (tem, op0, NULL_RTX);
2719 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2720 target);
2721
2722 return target;
2723 }
2724
2725 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2726 in TARGET if that is convenient, and return where the result is. If the
2727 operation can not be performed, 0 is returned.
2728
2729 MODE is the mode of operation and result.
2730
2731 UNSIGNEDP nonzero means unsigned multiply.
2732
2733 MAX_COST is the total allowed cost for the expanded RTL. */
2734
2735 rtx
2736 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2737 enum machine_mode mode;
2738 register rtx op0, target;
2739 unsigned HOST_WIDE_INT cnst1;
2740 int unsignedp;
2741 int max_cost;
2742 {
2743 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2744 optab mul_highpart_optab;
2745 optab moptab;
2746 rtx tem;
2747 int size = GET_MODE_BITSIZE (mode);
2748 rtx op1, wide_op1;
2749
2750 /* We can't support modes wider than HOST_BITS_PER_INT. */
2751 if (size > HOST_BITS_PER_WIDE_INT)
2752 abort ();
2753
2754 op1 = GEN_INT (cnst1);
2755
2756 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2757 wide_op1 = op1;
2758 else
2759 wide_op1
2760 = immed_double_const (cnst1,
2761 (unsignedp
2762 ? (HOST_WIDE_INT) 0
2763 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2764 wider_mode);
2765
2766 /* expand_mult handles constant multiplication of word_mode
2767 or narrower. It does a poor job for large modes. */
2768 if (size < BITS_PER_WORD
2769 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2770 {
2771 /* We have to do this, since expand_binop doesn't do conversion for
2772 multiply. Maybe change expand_binop to handle widening multiply? */
2773 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2774
2775 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2776 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2777 build_int_2 (size, 0), NULL_RTX, 1);
2778 return convert_modes (mode, wider_mode, tem, unsignedp);
2779 }
2780
2781 if (target == 0)
2782 target = gen_reg_rtx (mode);
2783
2784 /* Firstly, try using a multiplication insn that only generates the needed
2785 high part of the product, and in the sign flavor of unsignedp. */
2786 if (mul_highpart_cost[(int) mode] < max_cost)
2787 {
2788 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2789 target = expand_binop (mode, mul_highpart_optab,
2790 op0, op1, target, unsignedp, OPTAB_DIRECT);
2791 if (target)
2792 return target;
2793 }
2794
2795 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2796 Need to adjust the result after the multiplication. */
2797 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2798 {
2799 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2800 target = expand_binop (mode, mul_highpart_optab,
2801 op0, op1, target, unsignedp, OPTAB_DIRECT);
2802 if (target)
2803 /* We used the wrong signedness. Adjust the result. */
2804 return expand_mult_highpart_adjust (mode, target, op0,
2805 op1, target, unsignedp);
2806 }
2807
2808 /* Try widening multiplication. */
2809 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2810 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2811 && mul_widen_cost[(int) wider_mode] < max_cost)
2812 {
2813 op1 = force_reg (mode, op1);
2814 goto try;
2815 }
2816
2817 /* Try widening the mode and perform a non-widening multiplication. */
2818 moptab = smul_optab;
2819 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2820 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2821 {
2822 op1 = wide_op1;
2823 goto try;
2824 }
2825
2826 /* Try widening multiplication of opposite signedness, and adjust. */
2827 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2828 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2829 && (mul_widen_cost[(int) wider_mode]
2830 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2831 {
2832 rtx regop1 = force_reg (mode, op1);
2833 tem = expand_binop (wider_mode, moptab, op0, regop1,
2834 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2835 if (tem != 0)
2836 {
2837 /* Extract the high half of the just generated product. */
2838 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2839 build_int_2 (size, 0), NULL_RTX, 1);
2840 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2841 /* We used the wrong signedness. Adjust the result. */
2842 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2843 target, unsignedp);
2844 }
2845 }
2846
2847 return 0;
2848
2849 try:
2850 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2851 tem = expand_binop (wider_mode, moptab, op0, op1,
2852 NULL_RTX, unsignedp, OPTAB_WIDEN);
2853 if (tem == 0)
2854 return 0;
2855
2856 /* Extract the high half of the just generated product. */
2857 if (mode == word_mode)
2858 {
2859 return gen_highpart (mode, tem);
2860 }
2861 else
2862 {
2863 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2864 build_int_2 (size, 0), NULL_RTX, 1);
2865 return convert_modes (mode, wider_mode, tem, unsignedp);
2866 }
2867 }
2868 \f
2869 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2870 if that is convenient, and returning where the result is.
2871 You may request either the quotient or the remainder as the result;
2872 specify REM_FLAG nonzero to get the remainder.
2873
2874 CODE is the expression code for which kind of division this is;
2875 it controls how rounding is done. MODE is the machine mode to use.
2876 UNSIGNEDP nonzero means do unsigned division. */
2877
2878 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2879 and then correct it by or'ing in missing high bits
2880 if result of ANDI is nonzero.
2881 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2882 This could optimize to a bfexts instruction.
2883 But C doesn't use these operations, so their optimizations are
2884 left for later. */
2885 /* ??? For modulo, we don't actually need the highpart of the first product,
2886 the low part will do nicely. And for small divisors, the second multiply
2887 can also be a low-part only multiply or even be completely left out.
2888 E.g. to calculate the remainder of a division by 3 with a 32 bit
2889 multiply, multiply with 0x55555556 and extract the upper two bits;
2890 the result is exact for inputs up to 0x1fffffff.
2891 The input range can be reduced by using cross-sum rules.
2892 For odd divisors >= 3, the following table gives right shift counts
2893 so that if an number is shifted by an integer multiple of the given
2894 amount, the remainder stays the same:
2895 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2896 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2897 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2898 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2899 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2900
2901 Cross-sum rules for even numbers can be derived by leaving as many bits
2902 to the right alone as the divisor has zeros to the right.
2903 E.g. if x is an unsigned 32 bit number:
2904 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2905 */
2906
2907 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2908
2909 rtx
2910 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2911 int rem_flag;
2912 enum tree_code code;
2913 enum machine_mode mode;
2914 register rtx op0, op1, target;
2915 int unsignedp;
2916 {
2917 enum machine_mode compute_mode;
2918 register rtx tquotient;
2919 rtx quotient = 0, remainder = 0;
2920 rtx last;
2921 int size;
2922 rtx insn, set;
2923 optab optab1, optab2;
2924 int op1_is_constant, op1_is_pow2;
2925 int max_cost, extra_cost;
2926 static HOST_WIDE_INT last_div_const = 0;
2927
2928 op1_is_constant = GET_CODE (op1) == CONST_INT;
2929 op1_is_pow2 = (op1_is_constant
2930 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2931 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2932
2933 /*
2934 This is the structure of expand_divmod:
2935
2936 First comes code to fix up the operands so we can perform the operations
2937 correctly and efficiently.
2938
2939 Second comes a switch statement with code specific for each rounding mode.
2940 For some special operands this code emits all RTL for the desired
2941 operation, for other cases, it generates only a quotient and stores it in
2942 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2943 to indicate that it has not done anything.
2944
2945 Last comes code that finishes the operation. If QUOTIENT is set and
2946 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2947 QUOTIENT is not set, it is computed using trunc rounding.
2948
2949 We try to generate special code for division and remainder when OP1 is a
2950 constant. If |OP1| = 2**n we can use shifts and some other fast
2951 operations. For other values of OP1, we compute a carefully selected
2952 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2953 by m.
2954
2955 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2956 half of the product. Different strategies for generating the product are
2957 implemented in expand_mult_highpart.
2958
2959 If what we actually want is the remainder, we generate that by another
2960 by-constant multiplication and a subtraction. */
2961
2962 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2963 code below will malfunction if we are, so check here and handle
2964 the special case if so. */
2965 if (op1 == const1_rtx)
2966 return rem_flag ? const0_rtx : op0;
2967
2968 if (target
2969 /* Don't use the function value register as a target
2970 since we have to read it as well as write it,
2971 and function-inlining gets confused by this. */
2972 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2973 /* Don't clobber an operand while doing a multi-step calculation. */
2974 || ((rem_flag || op1_is_constant)
2975 && (reg_mentioned_p (target, op0)
2976 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2977 || reg_mentioned_p (target, op1)
2978 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2979 target = 0;
2980
2981 /* Get the mode in which to perform this computation. Normally it will
2982 be MODE, but sometimes we can't do the desired operation in MODE.
2983 If so, pick a wider mode in which we can do the operation. Convert
2984 to that mode at the start to avoid repeated conversions.
2985
2986 First see what operations we need. These depend on the expression
2987 we are evaluating. (We assume that divxx3 insns exist under the
2988 same conditions that modxx3 insns and that these insns don't normally
2989 fail. If these assumptions are not correct, we may generate less
2990 efficient code in some cases.)
2991
2992 Then see if we find a mode in which we can open-code that operation
2993 (either a division, modulus, or shift). Finally, check for the smallest
2994 mode for which we can do the operation with a library call. */
2995
2996 /* We might want to refine this now that we have division-by-constant
2997 optimization. Since expand_mult_highpart tries so many variants, it is
2998 not straightforward to generalize this. Maybe we should make an array
2999 of possible modes in init_expmed? Save this for GCC 2.7. */
3000
3001 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
3002 : (unsignedp ? udiv_optab : sdiv_optab));
3003 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
3004
3005 for (compute_mode = mode; compute_mode != VOIDmode;
3006 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3007 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3008 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3009 break;
3010
3011 if (compute_mode == VOIDmode)
3012 for (compute_mode = mode; compute_mode != VOIDmode;
3013 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3014 if (optab1->handlers[(int) compute_mode].libfunc
3015 || optab2->handlers[(int) compute_mode].libfunc)
3016 break;
3017
3018 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3019 in expand_binop. */
3020 if (compute_mode == VOIDmode)
3021 compute_mode = mode;
3022
3023 if (target && GET_MODE (target) == compute_mode)
3024 tquotient = target;
3025 else
3026 tquotient = gen_reg_rtx (compute_mode);
3027
3028 size = GET_MODE_BITSIZE (compute_mode);
3029 #if 0
3030 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3031 (mode), and thereby get better code when OP1 is a constant. Do that
3032 later. It will require going over all usages of SIZE below. */
3033 size = GET_MODE_BITSIZE (mode);
3034 #endif
3035
3036 /* Only deduct something for a REM if the last divide done was
3037 for a different constant. Then set the constant of the last
3038 divide. */
3039 max_cost = div_cost[(int) compute_mode]
3040 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3041 && INTVAL (op1) == last_div_const)
3042 ? mul_cost[(int) compute_mode] + add_cost : 0);
3043
3044 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3045
3046 /* Now convert to the best mode to use. */
3047 if (compute_mode != mode)
3048 {
3049 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3050 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3051
3052 /* convert_modes may have placed op1 into a register, so we
3053 must recompute the following. */
3054 op1_is_constant = GET_CODE (op1) == CONST_INT;
3055 op1_is_pow2 = (op1_is_constant
3056 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3057 || (! unsignedp
3058 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3059 }
3060
3061 /* If one of the operands is a volatile MEM, copy it into a register. */
3062
3063 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3064 op0 = force_reg (compute_mode, op0);
3065 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3066 op1 = force_reg (compute_mode, op1);
3067
3068 /* If we need the remainder or if OP1 is constant, we need to
3069 put OP0 in a register in case it has any queued subexpressions. */
3070 if (rem_flag || op1_is_constant)
3071 op0 = force_reg (compute_mode, op0);
3072
3073 last = get_last_insn ();
3074
3075 /* Promote floor rounding to trunc rounding for unsigned operations. */
3076 if (unsignedp)
3077 {
3078 if (code == FLOOR_DIV_EXPR)
3079 code = TRUNC_DIV_EXPR;
3080 if (code == FLOOR_MOD_EXPR)
3081 code = TRUNC_MOD_EXPR;
3082 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3083 code = TRUNC_DIV_EXPR;
3084 }
3085
3086 if (op1 != const0_rtx)
3087 switch (code)
3088 {
3089 case TRUNC_MOD_EXPR:
3090 case TRUNC_DIV_EXPR:
3091 if (op1_is_constant)
3092 {
3093 if (unsignedp)
3094 {
3095 unsigned HOST_WIDE_INT mh, ml;
3096 int pre_shift, post_shift;
3097 int dummy;
3098 unsigned HOST_WIDE_INT d = INTVAL (op1);
3099
3100 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3101 {
3102 pre_shift = floor_log2 (d);
3103 if (rem_flag)
3104 {
3105 remainder
3106 = expand_binop (compute_mode, and_optab, op0,
3107 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3108 remainder, 1,
3109 OPTAB_LIB_WIDEN);
3110 if (remainder)
3111 return gen_lowpart (mode, remainder);
3112 }
3113 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3114 build_int_2 (pre_shift, 0),
3115 tquotient, 1);
3116 }
3117 else if (size <= HOST_BITS_PER_WIDE_INT)
3118 {
3119 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3120 {
3121 /* Most significant bit of divisor is set; emit an scc
3122 insn. */
3123 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3124 compute_mode, 1, 1);
3125 if (quotient == 0)
3126 goto fail1;
3127 }
3128 else
3129 {
3130 /* Find a suitable multiplier and right shift count
3131 instead of multiplying with D. */
3132
3133 mh = choose_multiplier (d, size, size,
3134 &ml, &post_shift, &dummy);
3135
3136 /* If the suggested multiplier is more than SIZE bits,
3137 we can do better for even divisors, using an
3138 initial right shift. */
3139 if (mh != 0 && (d & 1) == 0)
3140 {
3141 pre_shift = floor_log2 (d & -d);
3142 mh = choose_multiplier (d >> pre_shift, size,
3143 size - pre_shift,
3144 &ml, &post_shift, &dummy);
3145 if (mh)
3146 abort ();
3147 }
3148 else
3149 pre_shift = 0;
3150
3151 if (mh != 0)
3152 {
3153 rtx t1, t2, t3, t4;
3154
3155 extra_cost = (shift_cost[post_shift - 1]
3156 + shift_cost[1] + 2 * add_cost);
3157 t1 = expand_mult_highpart (compute_mode, op0, ml,
3158 NULL_RTX, 1,
3159 max_cost - extra_cost);
3160 if (t1 == 0)
3161 goto fail1;
3162 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3163 op0, t1),
3164 NULL_RTX);
3165 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3166 build_int_2 (1, 0), NULL_RTX,1);
3167 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3168 t1, t3),
3169 NULL_RTX);
3170 quotient
3171 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3172 build_int_2 (post_shift - 1, 0),
3173 tquotient, 1);
3174 }
3175 else
3176 {
3177 rtx t1, t2;
3178
3179 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3180 build_int_2 (pre_shift, 0),
3181 NULL_RTX, 1);
3182 extra_cost = (shift_cost[pre_shift]
3183 + shift_cost[post_shift]);
3184 t2 = expand_mult_highpart (compute_mode, t1, ml,
3185 NULL_RTX, 1,
3186 max_cost - extra_cost);
3187 if (t2 == 0)
3188 goto fail1;
3189 quotient
3190 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3191 build_int_2 (post_shift, 0),
3192 tquotient, 1);
3193 }
3194 }
3195 }
3196 else /* Too wide mode to use tricky code */
3197 break;
3198
3199 insn = get_last_insn ();
3200 if (insn != last
3201 && (set = single_set (insn)) != 0
3202 && SET_DEST (set) == quotient)
3203 set_unique_reg_note (insn,
3204 REG_EQUAL,
3205 gen_rtx_UDIV (compute_mode, op0, op1));
3206 }
3207 else /* TRUNC_DIV, signed */
3208 {
3209 unsigned HOST_WIDE_INT ml;
3210 int lgup, post_shift;
3211 HOST_WIDE_INT d = INTVAL (op1);
3212 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3213
3214 /* n rem d = n rem -d */
3215 if (rem_flag && d < 0)
3216 {
3217 d = abs_d;
3218 op1 = GEN_INT (abs_d);
3219 }
3220
3221 if (d == 1)
3222 quotient = op0;
3223 else if (d == -1)
3224 quotient = expand_unop (compute_mode, neg_optab, op0,
3225 tquotient, 0);
3226 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3227 {
3228 /* This case is not handled correctly below. */
3229 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3230 compute_mode, 1, 1);
3231 if (quotient == 0)
3232 goto fail1;
3233 }
3234 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3235 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3236 ;
3237 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3238 {
3239 lgup = floor_log2 (abs_d);
3240 if (abs_d != 2 && BRANCH_COST < 3)
3241 {
3242 rtx label = gen_label_rtx ();
3243 rtx t1;
3244
3245 t1 = copy_to_mode_reg (compute_mode, op0);
3246 do_cmp_and_jump (t1, const0_rtx, GE,
3247 compute_mode, label);
3248 expand_inc (t1, GEN_INT (abs_d - 1));
3249 emit_label (label);
3250 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3251 build_int_2 (lgup, 0),
3252 tquotient, 0);
3253 }
3254 else
3255 {
3256 rtx t1, t2, t3;
3257 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3258 build_int_2 (size - 1, 0),
3259 NULL_RTX, 0);
3260 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3261 build_int_2 (size - lgup, 0),
3262 NULL_RTX, 1);
3263 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3264 op0, t2),
3265 NULL_RTX);
3266 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3267 build_int_2 (lgup, 0),
3268 tquotient, 0);
3269 }
3270
3271 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3272 the quotient. */
3273 if (d < 0)
3274 {
3275 insn = get_last_insn ();
3276 if (insn != last
3277 && (set = single_set (insn)) != 0
3278 && SET_DEST (set) == quotient
3279 && abs_d < ((unsigned HOST_WIDE_INT) 1
3280 << (HOST_BITS_PER_WIDE_INT - 1)))
3281 set_unique_reg_note (insn,
3282 REG_EQUAL,
3283 gen_rtx_DIV (compute_mode,
3284 op0,
3285 GEN_INT (abs_d)));
3286
3287 quotient = expand_unop (compute_mode, neg_optab,
3288 quotient, quotient, 0);
3289 }
3290 }
3291 else if (size <= HOST_BITS_PER_WIDE_INT)
3292 {
3293 choose_multiplier (abs_d, size, size - 1,
3294 &ml, &post_shift, &lgup);
3295 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3296 {
3297 rtx t1, t2, t3;
3298
3299 extra_cost = (shift_cost[post_shift]
3300 + shift_cost[size - 1] + add_cost);
3301 t1 = expand_mult_highpart (compute_mode, op0, ml,
3302 NULL_RTX, 0,
3303 max_cost - extra_cost);
3304 if (t1 == 0)
3305 goto fail1;
3306 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3307 build_int_2 (post_shift, 0), NULL_RTX, 0);
3308 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3309 build_int_2 (size - 1, 0), NULL_RTX, 0);
3310 if (d < 0)
3311 quotient
3312 = force_operand (gen_rtx_MINUS (compute_mode,
3313 t3, t2),
3314 tquotient);
3315 else
3316 quotient
3317 = force_operand (gen_rtx_MINUS (compute_mode,
3318 t2, t3),
3319 tquotient);
3320 }
3321 else
3322 {
3323 rtx t1, t2, t3, t4;
3324
3325 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3326 extra_cost = (shift_cost[post_shift]
3327 + shift_cost[size - 1] + 2 * add_cost);
3328 t1 = expand_mult_highpart (compute_mode, op0, ml,
3329 NULL_RTX, 0,
3330 max_cost - extra_cost);
3331 if (t1 == 0)
3332 goto fail1;
3333 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3334 t1, op0),
3335 NULL_RTX);
3336 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3337 build_int_2 (post_shift, 0),
3338 NULL_RTX, 0);
3339 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3340 build_int_2 (size - 1, 0),
3341 NULL_RTX, 0);
3342 if (d < 0)
3343 quotient
3344 = force_operand (gen_rtx_MINUS (compute_mode,
3345 t4, t3),
3346 tquotient);
3347 else
3348 quotient
3349 = force_operand (gen_rtx_MINUS (compute_mode,
3350 t3, t4),
3351 tquotient);
3352 }
3353 }
3354 else /* Too wide mode to use tricky code */
3355 break;
3356
3357 insn = get_last_insn ();
3358 if (insn != last
3359 && (set = single_set (insn)) != 0
3360 && SET_DEST (set) == quotient)
3361 set_unique_reg_note (insn,
3362 REG_EQUAL,
3363 gen_rtx_DIV (compute_mode, op0, op1));
3364 }
3365 break;
3366 }
3367 fail1:
3368 delete_insns_since (last);
3369 break;
3370
3371 case FLOOR_DIV_EXPR:
3372 case FLOOR_MOD_EXPR:
3373 /* We will come here only for signed operations. */
3374 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3375 {
3376 unsigned HOST_WIDE_INT mh, ml;
3377 int pre_shift, lgup, post_shift;
3378 HOST_WIDE_INT d = INTVAL (op1);
3379
3380 if (d > 0)
3381 {
3382 /* We could just as easily deal with negative constants here,
3383 but it does not seem worth the trouble for GCC 2.6. */
3384 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3385 {
3386 pre_shift = floor_log2 (d);
3387 if (rem_flag)
3388 {
3389 remainder = expand_binop (compute_mode, and_optab, op0,
3390 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3391 remainder, 0, OPTAB_LIB_WIDEN);
3392 if (remainder)
3393 return gen_lowpart (mode, remainder);
3394 }
3395 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3396 build_int_2 (pre_shift, 0),
3397 tquotient, 0);
3398 }
3399 else
3400 {
3401 rtx t1, t2, t3, t4;
3402
3403 mh = choose_multiplier (d, size, size - 1,
3404 &ml, &post_shift, &lgup);
3405 if (mh)
3406 abort ();
3407
3408 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3409 build_int_2 (size - 1, 0), NULL_RTX, 0);
3410 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3411 NULL_RTX, 0, OPTAB_WIDEN);
3412 extra_cost = (shift_cost[post_shift]
3413 + shift_cost[size - 1] + 2 * add_cost);
3414 t3 = expand_mult_highpart (compute_mode, t2, ml,
3415 NULL_RTX, 1,
3416 max_cost - extra_cost);
3417 if (t3 != 0)
3418 {
3419 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3420 build_int_2 (post_shift, 0),
3421 NULL_RTX, 1);
3422 quotient = expand_binop (compute_mode, xor_optab,
3423 t4, t1, tquotient, 0,
3424 OPTAB_WIDEN);
3425 }
3426 }
3427 }
3428 else
3429 {
3430 rtx nsign, t1, t2, t3, t4;
3431 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3432 op0, constm1_rtx), NULL_RTX);
3433 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3434 0, OPTAB_WIDEN);
3435 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3436 build_int_2 (size - 1, 0), NULL_RTX, 0);
3437 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3438 NULL_RTX);
3439 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3440 NULL_RTX, 0);
3441 if (t4)
3442 {
3443 rtx t5;
3444 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3445 NULL_RTX, 0);
3446 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3447 t4, t5),
3448 tquotient);
3449 }
3450 }
3451 }
3452
3453 if (quotient != 0)
3454 break;
3455 delete_insns_since (last);
3456
3457 /* Try using an instruction that produces both the quotient and
3458 remainder, using truncation. We can easily compensate the quotient
3459 or remainder to get floor rounding, once we have the remainder.
3460 Notice that we compute also the final remainder value here,
3461 and return the result right away. */
3462 if (target == 0 || GET_MODE (target) != compute_mode)
3463 target = gen_reg_rtx (compute_mode);
3464
3465 if (rem_flag)
3466 {
3467 remainder
3468 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3469 quotient = gen_reg_rtx (compute_mode);
3470 }
3471 else
3472 {
3473 quotient
3474 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3475 remainder = gen_reg_rtx (compute_mode);
3476 }
3477
3478 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3479 quotient, remainder, 0))
3480 {
3481 /* This could be computed with a branch-less sequence.
3482 Save that for later. */
3483 rtx tem;
3484 rtx label = gen_label_rtx ();
3485 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3486 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3487 NULL_RTX, 0, OPTAB_WIDEN);
3488 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3489 expand_dec (quotient, const1_rtx);
3490 expand_inc (remainder, op1);
3491 emit_label (label);
3492 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3493 }
3494
3495 /* No luck with division elimination or divmod. Have to do it
3496 by conditionally adjusting op0 *and* the result. */
3497 {
3498 rtx label1, label2, label3, label4, label5;
3499 rtx adjusted_op0;
3500 rtx tem;
3501
3502 quotient = gen_reg_rtx (compute_mode);
3503 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3504 label1 = gen_label_rtx ();
3505 label2 = gen_label_rtx ();
3506 label3 = gen_label_rtx ();
3507 label4 = gen_label_rtx ();
3508 label5 = gen_label_rtx ();
3509 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3510 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3511 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3512 quotient, 0, OPTAB_LIB_WIDEN);
3513 if (tem != quotient)
3514 emit_move_insn (quotient, tem);
3515 emit_jump_insn (gen_jump (label5));
3516 emit_barrier ();
3517 emit_label (label1);
3518 expand_inc (adjusted_op0, const1_rtx);
3519 emit_jump_insn (gen_jump (label4));
3520 emit_barrier ();
3521 emit_label (label2);
3522 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3523 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3524 quotient, 0, OPTAB_LIB_WIDEN);
3525 if (tem != quotient)
3526 emit_move_insn (quotient, tem);
3527 emit_jump_insn (gen_jump (label5));
3528 emit_barrier ();
3529 emit_label (label3);
3530 expand_dec (adjusted_op0, const1_rtx);
3531 emit_label (label4);
3532 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3533 quotient, 0, OPTAB_LIB_WIDEN);
3534 if (tem != quotient)
3535 emit_move_insn (quotient, tem);
3536 expand_dec (quotient, const1_rtx);
3537 emit_label (label5);
3538 }
3539 break;
3540
3541 case CEIL_DIV_EXPR:
3542 case CEIL_MOD_EXPR:
3543 if (unsignedp)
3544 {
3545 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3546 {
3547 rtx t1, t2, t3;
3548 unsigned HOST_WIDE_INT d = INTVAL (op1);
3549 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3550 build_int_2 (floor_log2 (d), 0),
3551 tquotient, 1);
3552 t2 = expand_binop (compute_mode, and_optab, op0,
3553 GEN_INT (d - 1),
3554 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3555 t3 = gen_reg_rtx (compute_mode);
3556 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3557 compute_mode, 1, 1);
3558 if (t3 == 0)
3559 {
3560 rtx lab;
3561 lab = gen_label_rtx ();
3562 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3563 expand_inc (t1, const1_rtx);
3564 emit_label (lab);
3565 quotient = t1;
3566 }
3567 else
3568 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3569 t1, t3),
3570 tquotient);
3571 break;
3572 }
3573
3574 /* Try using an instruction that produces both the quotient and
3575 remainder, using truncation. We can easily compensate the
3576 quotient or remainder to get ceiling rounding, once we have the
3577 remainder. Notice that we compute also the final remainder
3578 value here, and return the result right away. */
3579 if (target == 0 || GET_MODE (target) != compute_mode)
3580 target = gen_reg_rtx (compute_mode);
3581
3582 if (rem_flag)
3583 {
3584 remainder = (GET_CODE (target) == REG
3585 ? target : gen_reg_rtx (compute_mode));
3586 quotient = gen_reg_rtx (compute_mode);
3587 }
3588 else
3589 {
3590 quotient = (GET_CODE (target) == REG
3591 ? target : gen_reg_rtx (compute_mode));
3592 remainder = gen_reg_rtx (compute_mode);
3593 }
3594
3595 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3596 remainder, 1))
3597 {
3598 /* This could be computed with a branch-less sequence.
3599 Save that for later. */
3600 rtx label = gen_label_rtx ();
3601 do_cmp_and_jump (remainder, const0_rtx, EQ,
3602 compute_mode, label);
3603 expand_inc (quotient, const1_rtx);
3604 expand_dec (remainder, op1);
3605 emit_label (label);
3606 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3607 }
3608
3609 /* No luck with division elimination or divmod. Have to do it
3610 by conditionally adjusting op0 *and* the result. */
3611 {
3612 rtx label1, label2;
3613 rtx adjusted_op0, tem;
3614
3615 quotient = gen_reg_rtx (compute_mode);
3616 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3617 label1 = gen_label_rtx ();
3618 label2 = gen_label_rtx ();
3619 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3620 compute_mode, label1);
3621 emit_move_insn (quotient, const0_rtx);
3622 emit_jump_insn (gen_jump (label2));
3623 emit_barrier ();
3624 emit_label (label1);
3625 expand_dec (adjusted_op0, const1_rtx);
3626 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3627 quotient, 1, OPTAB_LIB_WIDEN);
3628 if (tem != quotient)
3629 emit_move_insn (quotient, tem);
3630 expand_inc (quotient, const1_rtx);
3631 emit_label (label2);
3632 }
3633 }
3634 else /* signed */
3635 {
3636 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3637 && INTVAL (op1) >= 0)
3638 {
3639 /* This is extremely similar to the code for the unsigned case
3640 above. For 2.7 we should merge these variants, but for
3641 2.6.1 I don't want to touch the code for unsigned since that
3642 get used in C. The signed case will only be used by other
3643 languages (Ada). */
3644
3645 rtx t1, t2, t3;
3646 unsigned HOST_WIDE_INT d = INTVAL (op1);
3647 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3648 build_int_2 (floor_log2 (d), 0),
3649 tquotient, 0);
3650 t2 = expand_binop (compute_mode, and_optab, op0,
3651 GEN_INT (d - 1),
3652 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3653 t3 = gen_reg_rtx (compute_mode);
3654 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3655 compute_mode, 1, 1);
3656 if (t3 == 0)
3657 {
3658 rtx lab;
3659 lab = gen_label_rtx ();
3660 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3661 expand_inc (t1, const1_rtx);
3662 emit_label (lab);
3663 quotient = t1;
3664 }
3665 else
3666 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3667 t1, t3),
3668 tquotient);
3669 break;
3670 }
3671
3672 /* Try using an instruction that produces both the quotient and
3673 remainder, using truncation. We can easily compensate the
3674 quotient or remainder to get ceiling rounding, once we have the
3675 remainder. Notice that we compute also the final remainder
3676 value here, and return the result right away. */
3677 if (target == 0 || GET_MODE (target) != compute_mode)
3678 target = gen_reg_rtx (compute_mode);
3679 if (rem_flag)
3680 {
3681 remainder= (GET_CODE (target) == REG
3682 ? target : gen_reg_rtx (compute_mode));
3683 quotient = gen_reg_rtx (compute_mode);
3684 }
3685 else
3686 {
3687 quotient = (GET_CODE (target) == REG
3688 ? target : gen_reg_rtx (compute_mode));
3689 remainder = gen_reg_rtx (compute_mode);
3690 }
3691
3692 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3693 remainder, 0))
3694 {
3695 /* This could be computed with a branch-less sequence.
3696 Save that for later. */
3697 rtx tem;
3698 rtx label = gen_label_rtx ();
3699 do_cmp_and_jump (remainder, const0_rtx, EQ,
3700 compute_mode, label);
3701 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3702 NULL_RTX, 0, OPTAB_WIDEN);
3703 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3704 expand_inc (quotient, const1_rtx);
3705 expand_dec (remainder, op1);
3706 emit_label (label);
3707 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3708 }
3709
3710 /* No luck with division elimination or divmod. Have to do it
3711 by conditionally adjusting op0 *and* the result. */
3712 {
3713 rtx label1, label2, label3, label4, label5;
3714 rtx adjusted_op0;
3715 rtx tem;
3716
3717 quotient = gen_reg_rtx (compute_mode);
3718 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3719 label1 = gen_label_rtx ();
3720 label2 = gen_label_rtx ();
3721 label3 = gen_label_rtx ();
3722 label4 = gen_label_rtx ();
3723 label5 = gen_label_rtx ();
3724 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3725 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3726 compute_mode, label1);
3727 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3728 quotient, 0, OPTAB_LIB_WIDEN);
3729 if (tem != quotient)
3730 emit_move_insn (quotient, tem);
3731 emit_jump_insn (gen_jump (label5));
3732 emit_barrier ();
3733 emit_label (label1);
3734 expand_dec (adjusted_op0, const1_rtx);
3735 emit_jump_insn (gen_jump (label4));
3736 emit_barrier ();
3737 emit_label (label2);
3738 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3739 compute_mode, label3);
3740 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3741 quotient, 0, OPTAB_LIB_WIDEN);
3742 if (tem != quotient)
3743 emit_move_insn (quotient, tem);
3744 emit_jump_insn (gen_jump (label5));
3745 emit_barrier ();
3746 emit_label (label3);
3747 expand_inc (adjusted_op0, const1_rtx);
3748 emit_label (label4);
3749 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3750 quotient, 0, OPTAB_LIB_WIDEN);
3751 if (tem != quotient)
3752 emit_move_insn (quotient, tem);
3753 expand_inc (quotient, const1_rtx);
3754 emit_label (label5);
3755 }
3756 }
3757 break;
3758
3759 case EXACT_DIV_EXPR:
3760 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3761 {
3762 HOST_WIDE_INT d = INTVAL (op1);
3763 unsigned HOST_WIDE_INT ml;
3764 int post_shift;
3765 rtx t1;
3766
3767 post_shift = floor_log2 (d & -d);
3768 ml = invert_mod2n (d >> post_shift, size);
3769 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3770 unsignedp);
3771 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3772 build_int_2 (post_shift, 0),
3773 NULL_RTX, unsignedp);
3774
3775 insn = get_last_insn ();
3776 set_unique_reg_note (insn,
3777 REG_EQUAL,
3778 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3779 compute_mode,
3780 op0, op1));
3781 }
3782 break;
3783
3784 case ROUND_DIV_EXPR:
3785 case ROUND_MOD_EXPR:
3786 if (unsignedp)
3787 {
3788 rtx tem;
3789 rtx label;
3790 label = gen_label_rtx ();
3791 quotient = gen_reg_rtx (compute_mode);
3792 remainder = gen_reg_rtx (compute_mode);
3793 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3794 {
3795 rtx tem;
3796 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3797 quotient, 1, OPTAB_LIB_WIDEN);
3798 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3799 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3800 remainder, 1, OPTAB_LIB_WIDEN);
3801 }
3802 tem = plus_constant (op1, -1);
3803 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3804 build_int_2 (1, 0), NULL_RTX, 1);
3805 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3806 expand_inc (quotient, const1_rtx);
3807 expand_dec (remainder, op1);
3808 emit_label (label);
3809 }
3810 else
3811 {
3812 rtx abs_rem, abs_op1, tem, mask;
3813 rtx label;
3814 label = gen_label_rtx ();
3815 quotient = gen_reg_rtx (compute_mode);
3816 remainder = gen_reg_rtx (compute_mode);
3817 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3818 {
3819 rtx tem;
3820 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3821 quotient, 0, OPTAB_LIB_WIDEN);
3822 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3823 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3824 remainder, 0, OPTAB_LIB_WIDEN);
3825 }
3826 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0);
3827 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0);
3828 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3829 build_int_2 (1, 0), NULL_RTX, 1);
3830 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3831 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3832 NULL_RTX, 0, OPTAB_WIDEN);
3833 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3834 build_int_2 (size - 1, 0), NULL_RTX, 0);
3835 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3836 NULL_RTX, 0, OPTAB_WIDEN);
3837 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3838 NULL_RTX, 0, OPTAB_WIDEN);
3839 expand_inc (quotient, tem);
3840 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3841 NULL_RTX, 0, OPTAB_WIDEN);
3842 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3843 NULL_RTX, 0, OPTAB_WIDEN);
3844 expand_dec (remainder, tem);
3845 emit_label (label);
3846 }
3847 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3848
3849 default:
3850 abort ();
3851 }
3852
3853 if (quotient == 0)
3854 {
3855 if (target && GET_MODE (target) != compute_mode)
3856 target = 0;
3857
3858 if (rem_flag)
3859 {
3860 /* Try to produce the remainder without producing the quotient.
3861 If we seem to have a divmod patten that does not require widening,
3862 don't try windening here. We should really have an WIDEN argument
3863 to expand_twoval_binop, since what we'd really like to do here is
3864 1) try a mod insn in compute_mode
3865 2) try a divmod insn in compute_mode
3866 3) try a div insn in compute_mode and multiply-subtract to get
3867 remainder
3868 4) try the same things with widening allowed. */
3869 remainder
3870 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3871 op0, op1, target,
3872 unsignedp,
3873 ((optab2->handlers[(int) compute_mode].insn_code
3874 != CODE_FOR_nothing)
3875 ? OPTAB_DIRECT : OPTAB_WIDEN));
3876 if (remainder == 0)
3877 {
3878 /* No luck there. Can we do remainder and divide at once
3879 without a library call? */
3880 remainder = gen_reg_rtx (compute_mode);
3881 if (! expand_twoval_binop ((unsignedp
3882 ? udivmod_optab
3883 : sdivmod_optab),
3884 op0, op1,
3885 NULL_RTX, remainder, unsignedp))
3886 remainder = 0;
3887 }
3888
3889 if (remainder)
3890 return gen_lowpart (mode, remainder);
3891 }
3892
3893 /* Produce the quotient. Try a quotient insn, but not a library call.
3894 If we have a divmod in this mode, use it in preference to widening
3895 the div (for this test we assume it will not fail). Note that optab2
3896 is set to the one of the two optabs that the call below will use. */
3897 quotient
3898 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3899 op0, op1, rem_flag ? NULL_RTX : target,
3900 unsignedp,
3901 ((optab2->handlers[(int) compute_mode].insn_code
3902 != CODE_FOR_nothing)
3903 ? OPTAB_DIRECT : OPTAB_WIDEN));
3904
3905 if (quotient == 0)
3906 {
3907 /* No luck there. Try a quotient-and-remainder insn,
3908 keeping the quotient alone. */
3909 quotient = gen_reg_rtx (compute_mode);
3910 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3911 op0, op1,
3912 quotient, NULL_RTX, unsignedp))
3913 {
3914 quotient = 0;
3915 if (! rem_flag)
3916 /* Still no luck. If we are not computing the remainder,
3917 use a library call for the quotient. */
3918 quotient = sign_expand_binop (compute_mode,
3919 udiv_optab, sdiv_optab,
3920 op0, op1, target,
3921 unsignedp, OPTAB_LIB_WIDEN);
3922 }
3923 }
3924 }
3925
3926 if (rem_flag)
3927 {
3928 if (target && GET_MODE (target) != compute_mode)
3929 target = 0;
3930
3931 if (quotient == 0)
3932 /* No divide instruction either. Use library for remainder. */
3933 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3934 op0, op1, target,
3935 unsignedp, OPTAB_LIB_WIDEN);
3936 else
3937 {
3938 /* We divided. Now finish doing X - Y * (X / Y). */
3939 remainder = expand_mult (compute_mode, quotient, op1,
3940 NULL_RTX, unsignedp);
3941 remainder = expand_binop (compute_mode, sub_optab, op0,
3942 remainder, target, unsignedp,
3943 OPTAB_LIB_WIDEN);
3944 }
3945 }
3946
3947 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3948 }
3949 \f
3950 /* Return a tree node with data type TYPE, describing the value of X.
3951 Usually this is an RTL_EXPR, if there is no obvious better choice.
3952 X may be an expression, however we only support those expressions
3953 generated by loop.c. */
3954
3955 tree
3956 make_tree (type, x)
3957 tree type;
3958 rtx x;
3959 {
3960 tree t;
3961
3962 switch (GET_CODE (x))
3963 {
3964 case CONST_INT:
3965 t = build_int_2 (INTVAL (x),
3966 (TREE_UNSIGNED (type)
3967 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3968 || INTVAL (x) >= 0 ? 0 : -1);
3969 TREE_TYPE (t) = type;
3970 return t;
3971
3972 case CONST_DOUBLE:
3973 if (GET_MODE (x) == VOIDmode)
3974 {
3975 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3976 TREE_TYPE (t) = type;
3977 }
3978 else
3979 {
3980 REAL_VALUE_TYPE d;
3981
3982 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3983 t = build_real (type, d);
3984 }
3985
3986 return t;
3987
3988 case PLUS:
3989 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3990 make_tree (type, XEXP (x, 1))));
3991
3992 case MINUS:
3993 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3994 make_tree (type, XEXP (x, 1))));
3995
3996 case NEG:
3997 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3998
3999 case MULT:
4000 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4001 make_tree (type, XEXP (x, 1))));
4002
4003 case ASHIFT:
4004 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4005 make_tree (type, XEXP (x, 1))));
4006
4007 case LSHIFTRT:
4008 return fold (convert (type,
4009 build (RSHIFT_EXPR, unsigned_type (type),
4010 make_tree (unsigned_type (type),
4011 XEXP (x, 0)),
4012 make_tree (type, XEXP (x, 1)))));
4013
4014 case ASHIFTRT:
4015 return fold (convert (type,
4016 build (RSHIFT_EXPR, signed_type (type),
4017 make_tree (signed_type (type), XEXP (x, 0)),
4018 make_tree (type, XEXP (x, 1)))));
4019
4020 case DIV:
4021 if (TREE_CODE (type) != REAL_TYPE)
4022 t = signed_type (type);
4023 else
4024 t = type;
4025
4026 return fold (convert (type,
4027 build (TRUNC_DIV_EXPR, t,
4028 make_tree (t, XEXP (x, 0)),
4029 make_tree (t, XEXP (x, 1)))));
4030 case UDIV:
4031 t = unsigned_type (type);
4032 return fold (convert (type,
4033 build (TRUNC_DIV_EXPR, t,
4034 make_tree (t, XEXP (x, 0)),
4035 make_tree (t, XEXP (x, 1)))));
4036 default:
4037 t = make_node (RTL_EXPR);
4038 TREE_TYPE (t) = type;
4039 RTL_EXPR_RTL (t) = x;
4040 /* There are no insns to be output
4041 when this rtl_expr is used. */
4042 RTL_EXPR_SEQUENCE (t) = 0;
4043 return t;
4044 }
4045 }
4046
4047 /* Return an rtx representing the value of X * MULT + ADD.
4048 TARGET is a suggestion for where to store the result (an rtx).
4049 MODE is the machine mode for the computation.
4050 X and MULT must have mode MODE. ADD may have a different mode.
4051 So can X (defaults to same as MODE).
4052 UNSIGNEDP is non-zero to do unsigned multiplication.
4053 This may emit insns. */
4054
4055 rtx
4056 expand_mult_add (x, target, mult, add, mode, unsignedp)
4057 rtx x, target, mult, add;
4058 enum machine_mode mode;
4059 int unsignedp;
4060 {
4061 tree type = type_for_mode (mode, unsignedp);
4062 tree add_type = (GET_MODE (add) == VOIDmode
4063 ? type : type_for_mode (GET_MODE (add), unsignedp));
4064 tree result = fold (build (PLUS_EXPR, type,
4065 fold (build (MULT_EXPR, type,
4066 make_tree (type, x),
4067 make_tree (type, mult))),
4068 make_tree (add_type, add)));
4069
4070 return expand_expr (result, target, VOIDmode, 0);
4071 }
4072 \f
4073 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4074 and returning TARGET.
4075
4076 If TARGET is 0, a pseudo-register or constant is returned. */
4077
4078 rtx
4079 expand_and (op0, op1, target)
4080 rtx op0, op1, target;
4081 {
4082 enum machine_mode mode = VOIDmode;
4083 rtx tem;
4084
4085 if (GET_MODE (op0) != VOIDmode)
4086 mode = GET_MODE (op0);
4087 else if (GET_MODE (op1) != VOIDmode)
4088 mode = GET_MODE (op1);
4089
4090 if (mode != VOIDmode)
4091 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4092 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4093 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4094 else
4095 abort ();
4096
4097 if (target == 0)
4098 target = tem;
4099 else if (tem != target)
4100 emit_move_insn (target, tem);
4101 return target;
4102 }
4103 \f
4104 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4105 and storing in TARGET. Normally return TARGET.
4106 Return 0 if that cannot be done.
4107
4108 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4109 it is VOIDmode, they cannot both be CONST_INT.
4110
4111 UNSIGNEDP is for the case where we have to widen the operands
4112 to perform the operation. It says to use zero-extension.
4113
4114 NORMALIZEP is 1 if we should convert the result to be either zero
4115 or one. Normalize is -1 if we should convert the result to be
4116 either zero or -1. If NORMALIZEP is zero, the result will be left
4117 "raw" out of the scc insn. */
4118
4119 rtx
4120 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4121 rtx target;
4122 enum rtx_code code;
4123 rtx op0, op1;
4124 enum machine_mode mode;
4125 int unsignedp;
4126 int normalizep;
4127 {
4128 rtx subtarget;
4129 enum insn_code icode;
4130 enum machine_mode compare_mode;
4131 enum machine_mode target_mode = GET_MODE (target);
4132 rtx tem;
4133 rtx last = get_last_insn ();
4134 rtx pattern, comparison;
4135
4136 if (unsignedp)
4137 code = unsigned_condition (code);
4138
4139 /* If one operand is constant, make it the second one. Only do this
4140 if the other operand is not constant as well. */
4141
4142 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4143 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4144 {
4145 tem = op0;
4146 op0 = op1;
4147 op1 = tem;
4148 code = swap_condition (code);
4149 }
4150
4151 if (mode == VOIDmode)
4152 mode = GET_MODE (op0);
4153
4154 /* For some comparisons with 1 and -1, we can convert this to
4155 comparisons with zero. This will often produce more opportunities for
4156 store-flag insns. */
4157
4158 switch (code)
4159 {
4160 case LT:
4161 if (op1 == const1_rtx)
4162 op1 = const0_rtx, code = LE;
4163 break;
4164 case LE:
4165 if (op1 == constm1_rtx)
4166 op1 = const0_rtx, code = LT;
4167 break;
4168 case GE:
4169 if (op1 == const1_rtx)
4170 op1 = const0_rtx, code = GT;
4171 break;
4172 case GT:
4173 if (op1 == constm1_rtx)
4174 op1 = const0_rtx, code = GE;
4175 break;
4176 case GEU:
4177 if (op1 == const1_rtx)
4178 op1 = const0_rtx, code = NE;
4179 break;
4180 case LTU:
4181 if (op1 == const1_rtx)
4182 op1 = const0_rtx, code = EQ;
4183 break;
4184 default:
4185 break;
4186 }
4187
4188 /* If we are comparing a double-word integer with zero, we can convert
4189 the comparison into one involving a single word. */
4190 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4191 && GET_MODE_CLASS (mode) == MODE_INT
4192 && op1 == const0_rtx)
4193 {
4194 if (code == EQ || code == NE)
4195 {
4196 /* Do a logical OR of the two words and compare the result. */
4197 rtx op0h = gen_highpart (word_mode, op0);
4198 rtx op0l = gen_lowpart (word_mode, op0);
4199 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4200 NULL_RTX, unsignedp, OPTAB_DIRECT);
4201 if (op0both != 0)
4202 return emit_store_flag (target, code, op0both, op1, word_mode,
4203 unsignedp, normalizep);
4204 }
4205 else if (code == LT || code == GE)
4206 /* If testing the sign bit, can just test on high word. */
4207 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4208 op1, word_mode, unsignedp, normalizep);
4209 }
4210
4211 /* From now on, we won't change CODE, so set ICODE now. */
4212 icode = setcc_gen_code[(int) code];
4213
4214 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4215 complement of A (for GE) and shifting the sign bit to the low bit. */
4216 if (op1 == const0_rtx && (code == LT || code == GE)
4217 && GET_MODE_CLASS (mode) == MODE_INT
4218 && (normalizep || STORE_FLAG_VALUE == 1
4219 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4220 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4221 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4222 {
4223 subtarget = target;
4224
4225 /* If the result is to be wider than OP0, it is best to convert it
4226 first. If it is to be narrower, it is *incorrect* to convert it
4227 first. */
4228 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4229 {
4230 op0 = protect_from_queue (op0, 0);
4231 op0 = convert_modes (target_mode, mode, op0, 0);
4232 mode = target_mode;
4233 }
4234
4235 if (target_mode != mode)
4236 subtarget = 0;
4237
4238 if (code == GE)
4239 op0 = expand_unop (mode, one_cmpl_optab, op0,
4240 ((STORE_FLAG_VALUE == 1 || normalizep)
4241 ? 0 : subtarget), 0);
4242
4243 if (STORE_FLAG_VALUE == 1 || normalizep)
4244 /* If we are supposed to produce a 0/1 value, we want to do
4245 a logical shift from the sign bit to the low-order bit; for
4246 a -1/0 value, we do an arithmetic shift. */
4247 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4248 size_int (GET_MODE_BITSIZE (mode) - 1),
4249 subtarget, normalizep != -1);
4250
4251 if (mode != target_mode)
4252 op0 = convert_modes (target_mode, mode, op0, 0);
4253
4254 return op0;
4255 }
4256
4257 if (icode != CODE_FOR_nothing)
4258 {
4259 insn_operand_predicate_fn pred;
4260
4261 /* We think we may be able to do this with a scc insn. Emit the
4262 comparison and then the scc insn.
4263
4264 compare_from_rtx may call emit_queue, which would be deleted below
4265 if the scc insn fails. So call it ourselves before setting LAST.
4266 Likewise for do_pending_stack_adjust. */
4267
4268 emit_queue ();
4269 do_pending_stack_adjust ();
4270 last = get_last_insn ();
4271
4272 comparison
4273 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4274 if (GET_CODE (comparison) == CONST_INT)
4275 return (comparison == const0_rtx ? const0_rtx
4276 : normalizep == 1 ? const1_rtx
4277 : normalizep == -1 ? constm1_rtx
4278 : const_true_rtx);
4279
4280 /* If the code of COMPARISON doesn't match CODE, something is
4281 wrong; we can no longer be sure that we have the operation.
4282 We could handle this case, but it should not happen. */
4283
4284 if (GET_CODE (comparison) != code)
4285 abort ();
4286
4287 /* Get a reference to the target in the proper mode for this insn. */
4288 compare_mode = insn_data[(int) icode].operand[0].mode;
4289 subtarget = target;
4290 pred = insn_data[(int) icode].operand[0].predicate;
4291 if (preserve_subexpressions_p ()
4292 || ! (*pred) (subtarget, compare_mode))
4293 subtarget = gen_reg_rtx (compare_mode);
4294
4295 pattern = GEN_FCN (icode) (subtarget);
4296 if (pattern)
4297 {
4298 emit_insn (pattern);
4299
4300 /* If we are converting to a wider mode, first convert to
4301 TARGET_MODE, then normalize. This produces better combining
4302 opportunities on machines that have a SIGN_EXTRACT when we are
4303 testing a single bit. This mostly benefits the 68k.
4304
4305 If STORE_FLAG_VALUE does not have the sign bit set when
4306 interpreted in COMPARE_MODE, we can do this conversion as
4307 unsigned, which is usually more efficient. */
4308 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4309 {
4310 convert_move (target, subtarget,
4311 (GET_MODE_BITSIZE (compare_mode)
4312 <= HOST_BITS_PER_WIDE_INT)
4313 && 0 == (STORE_FLAG_VALUE
4314 & ((HOST_WIDE_INT) 1
4315 << (GET_MODE_BITSIZE (compare_mode) -1))));
4316 op0 = target;
4317 compare_mode = target_mode;
4318 }
4319 else
4320 op0 = subtarget;
4321
4322 /* If we want to keep subexpressions around, don't reuse our
4323 last target. */
4324
4325 if (preserve_subexpressions_p ())
4326 subtarget = 0;
4327
4328 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4329 we don't have to do anything. */
4330 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4331 ;
4332 /* STORE_FLAG_VALUE might be the most negative number, so write
4333 the comparison this way to avoid a compiler-time warning. */
4334 else if (- normalizep == STORE_FLAG_VALUE)
4335 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4336
4337 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4338 makes it hard to use a value of just the sign bit due to
4339 ANSI integer constant typing rules. */
4340 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4341 && (STORE_FLAG_VALUE
4342 & ((HOST_WIDE_INT) 1
4343 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4344 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4345 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4346 subtarget, normalizep == 1);
4347 else if (STORE_FLAG_VALUE & 1)
4348 {
4349 op0 = expand_and (op0, const1_rtx, subtarget);
4350 if (normalizep == -1)
4351 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4352 }
4353 else
4354 abort ();
4355
4356 /* If we were converting to a smaller mode, do the
4357 conversion now. */
4358 if (target_mode != compare_mode)
4359 {
4360 convert_move (target, op0, 0);
4361 return target;
4362 }
4363 else
4364 return op0;
4365 }
4366 }
4367
4368 delete_insns_since (last);
4369
4370 /* If expensive optimizations, use different pseudo registers for each
4371 insn, instead of reusing the same pseudo. This leads to better CSE,
4372 but slows down the compiler, since there are more pseudos */
4373 subtarget = (!flag_expensive_optimizations
4374 && (target_mode == mode)) ? target : NULL_RTX;
4375
4376 /* If we reached here, we can't do this with a scc insn. However, there
4377 are some comparisons that can be done directly. For example, if
4378 this is an equality comparison of integers, we can try to exclusive-or
4379 (or subtract) the two operands and use a recursive call to try the
4380 comparison with zero. Don't do any of these cases if branches are
4381 very cheap. */
4382
4383 if (BRANCH_COST > 0
4384 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4385 && op1 != const0_rtx)
4386 {
4387 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4388 OPTAB_WIDEN);
4389
4390 if (tem == 0)
4391 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4392 OPTAB_WIDEN);
4393 if (tem != 0)
4394 tem = emit_store_flag (target, code, tem, const0_rtx,
4395 mode, unsignedp, normalizep);
4396 if (tem == 0)
4397 delete_insns_since (last);
4398 return tem;
4399 }
4400
4401 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4402 the constant zero. Reject all other comparisons at this point. Only
4403 do LE and GT if branches are expensive since they are expensive on
4404 2-operand machines. */
4405
4406 if (BRANCH_COST == 0
4407 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4408 || (code != EQ && code != NE
4409 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4410 return 0;
4411
4412 /* See what we need to return. We can only return a 1, -1, or the
4413 sign bit. */
4414
4415 if (normalizep == 0)
4416 {
4417 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4418 normalizep = STORE_FLAG_VALUE;
4419
4420 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4421 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4422 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4423 ;
4424 else
4425 return 0;
4426 }
4427
4428 /* Try to put the result of the comparison in the sign bit. Assume we can't
4429 do the necessary operation below. */
4430
4431 tem = 0;
4432
4433 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4434 the sign bit set. */
4435
4436 if (code == LE)
4437 {
4438 /* This is destructive, so SUBTARGET can't be OP0. */
4439 if (rtx_equal_p (subtarget, op0))
4440 subtarget = 0;
4441
4442 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4443 OPTAB_WIDEN);
4444 if (tem)
4445 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4446 OPTAB_WIDEN);
4447 }
4448
4449 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4450 number of bits in the mode of OP0, minus one. */
4451
4452 if (code == GT)
4453 {
4454 if (rtx_equal_p (subtarget, op0))
4455 subtarget = 0;
4456
4457 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4458 size_int (GET_MODE_BITSIZE (mode) - 1),
4459 subtarget, 0);
4460 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4461 OPTAB_WIDEN);
4462 }
4463
4464 if (code == EQ || code == NE)
4465 {
4466 /* For EQ or NE, one way to do the comparison is to apply an operation
4467 that converts the operand into a positive number if it is non-zero
4468 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4469 for NE we negate. This puts the result in the sign bit. Then we
4470 normalize with a shift, if needed.
4471
4472 Two operations that can do the above actions are ABS and FFS, so try
4473 them. If that doesn't work, and MODE is smaller than a full word,
4474 we can use zero-extension to the wider mode (an unsigned conversion)
4475 as the operation. */
4476
4477 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4478 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4479 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4480 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4481 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4482 {
4483 op0 = protect_from_queue (op0, 0);
4484 tem = convert_modes (word_mode, mode, op0, 1);
4485 mode = word_mode;
4486 }
4487
4488 if (tem != 0)
4489 {
4490 if (code == EQ)
4491 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4492 0, OPTAB_WIDEN);
4493 else
4494 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4495 }
4496
4497 /* If we couldn't do it that way, for NE we can "or" the two's complement
4498 of the value with itself. For EQ, we take the one's complement of
4499 that "or", which is an extra insn, so we only handle EQ if branches
4500 are expensive. */
4501
4502 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4503 {
4504 if (rtx_equal_p (subtarget, op0))
4505 subtarget = 0;
4506
4507 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4508 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4509 OPTAB_WIDEN);
4510
4511 if (tem && code == EQ)
4512 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4513 }
4514 }
4515
4516 if (tem && normalizep)
4517 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4518 size_int (GET_MODE_BITSIZE (mode) - 1),
4519 subtarget, normalizep == 1);
4520
4521 if (tem)
4522 {
4523 if (GET_MODE (tem) != target_mode)
4524 {
4525 convert_move (target, tem, 0);
4526 tem = target;
4527 }
4528 else if (!subtarget)
4529 {
4530 emit_move_insn (target, tem);
4531 tem = target;
4532 }
4533 }
4534 else
4535 delete_insns_since (last);
4536
4537 return tem;
4538 }
4539
4540 /* Like emit_store_flag, but always succeeds. */
4541
4542 rtx
4543 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4544 rtx target;
4545 enum rtx_code code;
4546 rtx op0, op1;
4547 enum machine_mode mode;
4548 int unsignedp;
4549 int normalizep;
4550 {
4551 rtx tem, label;
4552
4553 /* First see if emit_store_flag can do the job. */
4554 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4555 if (tem != 0)
4556 return tem;
4557
4558 if (normalizep == 0)
4559 normalizep = 1;
4560
4561 /* If this failed, we have to do this with set/compare/jump/set code. */
4562
4563 if (GET_CODE (target) != REG
4564 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4565 target = gen_reg_rtx (GET_MODE (target));
4566
4567 emit_move_insn (target, const1_rtx);
4568 label = gen_label_rtx ();
4569 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4570 NULL_RTX, label);
4571
4572 emit_move_insn (target, const0_rtx);
4573 emit_label (label);
4574
4575 return target;
4576 }
4577 \f
4578 /* Perform possibly multi-word comparison and conditional jump to LABEL
4579 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4580
4581 The algorithm is based on the code in expr.c:do_jump.
4582
4583 Note that this does not perform a general comparison. Only variants
4584 generated within expmed.c are correctly handled, others abort (but could
4585 be handled if needed). */
4586
4587 static void
4588 do_cmp_and_jump (arg1, arg2, op, mode, label)
4589 rtx arg1, arg2, label;
4590 enum rtx_code op;
4591 enum machine_mode mode;
4592 {
4593 /* If this mode is an integer too wide to compare properly,
4594 compare word by word. Rely on cse to optimize constant cases. */
4595
4596 if (GET_MODE_CLASS (mode) == MODE_INT
4597 && ! can_compare_p (op, mode, ccp_jump))
4598 {
4599 rtx label2 = gen_label_rtx ();
4600
4601 switch (op)
4602 {
4603 case LTU:
4604 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4605 break;
4606
4607 case LEU:
4608 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4609 break;
4610
4611 case LT:
4612 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4613 break;
4614
4615 case GT:
4616 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4617 break;
4618
4619 case GE:
4620 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4621 break;
4622
4623 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4624 that's the only equality operations we do */
4625 case EQ:
4626 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4627 abort();
4628 do_jump_by_parts_equality_rtx (arg1, label2, label);
4629 break;
4630
4631 case NE:
4632 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4633 abort();
4634 do_jump_by_parts_equality_rtx (arg1, label, label2);
4635 break;
4636
4637 default:
4638 abort();
4639 }
4640
4641 emit_label (label2);
4642 }
4643 else
4644 {
4645 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);
4646 }
4647 }