re PR middle-end/47968 (ICE: in gen_lowpart_general, at rtlhooks.c:51 when converting...
[gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 2011
6 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "recog.h"
38 #include "langhooks.h"
39 #include "df.h"
40 #include "target.h"
41 #include "expmed.h"
42
43 struct target_expmed default_target_expmed;
44 #if SWITCHABLE_TARGET
45 struct target_expmed *this_target_expmed = &default_target_expmed;
46 #endif
47
48 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx);
51 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, rtx);
53 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
54 unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT,
56 unsigned HOST_WIDE_INT, rtx, int, bool);
57 static rtx mask_rtx (enum machine_mode, int, int, int);
58 static rtx lshift_value (enum machine_mode, rtx, int, int);
59 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
60 unsigned HOST_WIDE_INT, int);
61 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
62 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
63 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
64
65 /* Test whether a value is zero of a power of two. */
66 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
67
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 #endif
71
72
73 /* Reduce conditional compilation elsewhere. */
74 #ifndef HAVE_insv
75 #define HAVE_insv 0
76 #define CODE_FOR_insv CODE_FOR_nothing
77 #define gen_insv(a,b,c,d) NULL_RTX
78 #endif
79 #ifndef HAVE_extv
80 #define HAVE_extv 0
81 #define CODE_FOR_extv CODE_FOR_nothing
82 #define gen_extv(a,b,c,d) NULL_RTX
83 #endif
84 #ifndef HAVE_extzv
85 #define HAVE_extzv 0
86 #define CODE_FOR_extzv CODE_FOR_nothing
87 #define gen_extzv(a,b,c,d) NULL_RTX
88 #endif
89
90 void
91 init_expmed (void)
92 {
93 struct
94 {
95 struct rtx_def reg; rtunion reg_fld[2];
96 struct rtx_def plus; rtunion plus_fld1;
97 struct rtx_def neg;
98 struct rtx_def mult; rtunion mult_fld1;
99 struct rtx_def sdiv; rtunion sdiv_fld1;
100 struct rtx_def udiv; rtunion udiv_fld1;
101 struct rtx_def zext;
102 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
103 struct rtx_def smod_32; rtunion smod_32_fld1;
104 struct rtx_def wide_mult; rtunion wide_mult_fld1;
105 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
106 struct rtx_def wide_trunc;
107 struct rtx_def shift; rtunion shift_fld1;
108 struct rtx_def shift_mult; rtunion shift_mult_fld1;
109 struct rtx_def shift_add; rtunion shift_add_fld1;
110 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
111 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
112 } all;
113
114 rtx pow2[MAX_BITS_PER_WORD];
115 rtx cint[MAX_BITS_PER_WORD];
116 int m, n;
117 enum machine_mode mode, wider_mode;
118 int speed;
119
120
121 for (m = 1; m < MAX_BITS_PER_WORD; m++)
122 {
123 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
124 cint[m] = GEN_INT (m);
125 }
126 memset (&all, 0, sizeof all);
127
128 PUT_CODE (&all.reg, REG);
129 /* Avoid using hard regs in ways which may be unsupported. */
130 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
131
132 PUT_CODE (&all.plus, PLUS);
133 XEXP (&all.plus, 0) = &all.reg;
134 XEXP (&all.plus, 1) = &all.reg;
135
136 PUT_CODE (&all.neg, NEG);
137 XEXP (&all.neg, 0) = &all.reg;
138
139 PUT_CODE (&all.mult, MULT);
140 XEXP (&all.mult, 0) = &all.reg;
141 XEXP (&all.mult, 1) = &all.reg;
142
143 PUT_CODE (&all.sdiv, DIV);
144 XEXP (&all.sdiv, 0) = &all.reg;
145 XEXP (&all.sdiv, 1) = &all.reg;
146
147 PUT_CODE (&all.udiv, UDIV);
148 XEXP (&all.udiv, 0) = &all.reg;
149 XEXP (&all.udiv, 1) = &all.reg;
150
151 PUT_CODE (&all.sdiv_32, DIV);
152 XEXP (&all.sdiv_32, 0) = &all.reg;
153 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
154
155 PUT_CODE (&all.smod_32, MOD);
156 XEXP (&all.smod_32, 0) = &all.reg;
157 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
158
159 PUT_CODE (&all.zext, ZERO_EXTEND);
160 XEXP (&all.zext, 0) = &all.reg;
161
162 PUT_CODE (&all.wide_mult, MULT);
163 XEXP (&all.wide_mult, 0) = &all.zext;
164 XEXP (&all.wide_mult, 1) = &all.zext;
165
166 PUT_CODE (&all.wide_lshr, LSHIFTRT);
167 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
168
169 PUT_CODE (&all.wide_trunc, TRUNCATE);
170 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
171
172 PUT_CODE (&all.shift, ASHIFT);
173 XEXP (&all.shift, 0) = &all.reg;
174
175 PUT_CODE (&all.shift_mult, MULT);
176 XEXP (&all.shift_mult, 0) = &all.reg;
177
178 PUT_CODE (&all.shift_add, PLUS);
179 XEXP (&all.shift_add, 0) = &all.shift_mult;
180 XEXP (&all.shift_add, 1) = &all.reg;
181
182 PUT_CODE (&all.shift_sub0, MINUS);
183 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
184 XEXP (&all.shift_sub0, 1) = &all.reg;
185
186 PUT_CODE (&all.shift_sub1, MINUS);
187 XEXP (&all.shift_sub1, 0) = &all.reg;
188 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
189
190 for (speed = 0; speed < 2; speed++)
191 {
192 crtl->maybe_hot_insn_p = speed;
193 zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
194
195 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
196 mode != VOIDmode;
197 mode = GET_MODE_WIDER_MODE (mode))
198 {
199 PUT_MODE (&all.reg, mode);
200 PUT_MODE (&all.plus, mode);
201 PUT_MODE (&all.neg, mode);
202 PUT_MODE (&all.mult, mode);
203 PUT_MODE (&all.sdiv, mode);
204 PUT_MODE (&all.udiv, mode);
205 PUT_MODE (&all.sdiv_32, mode);
206 PUT_MODE (&all.smod_32, mode);
207 PUT_MODE (&all.wide_trunc, mode);
208 PUT_MODE (&all.shift, mode);
209 PUT_MODE (&all.shift_mult, mode);
210 PUT_MODE (&all.shift_add, mode);
211 PUT_MODE (&all.shift_sub0, mode);
212 PUT_MODE (&all.shift_sub1, mode);
213
214 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
215 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
216 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
217 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
218 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
219
220 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
221 <= 2 * add_cost[speed][mode]);
222 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
223 <= 4 * add_cost[speed][mode]);
224
225 wider_mode = GET_MODE_WIDER_MODE (mode);
226 if (wider_mode != VOIDmode)
227 {
228 PUT_MODE (&all.zext, wider_mode);
229 PUT_MODE (&all.wide_mult, wider_mode);
230 PUT_MODE (&all.wide_lshr, wider_mode);
231 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
232
233 mul_widen_cost[speed][wider_mode]
234 = rtx_cost (&all.wide_mult, SET, speed);
235 mul_highpart_cost[speed][mode]
236 = rtx_cost (&all.wide_trunc, SET, speed);
237 }
238
239 shift_cost[speed][mode][0] = 0;
240 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
241 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
242
243 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
244 for (m = 1; m < n; m++)
245 {
246 XEXP (&all.shift, 1) = cint[m];
247 XEXP (&all.shift_mult, 1) = pow2[m];
248
249 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
250 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
251 shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
252 shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
253 }
254 }
255 }
256 if (alg_hash_used_p)
257 memset (alg_hash, 0, sizeof (alg_hash));
258 else
259 alg_hash_used_p = true;
260 default_rtl_profile ();
261 }
262
263 /* Return an rtx representing minus the value of X.
264 MODE is the intended mode of the result,
265 useful if X is a CONST_INT. */
266
267 rtx
268 negate_rtx (enum machine_mode mode, rtx x)
269 {
270 rtx result = simplify_unary_operation (NEG, mode, x, mode);
271
272 if (result == 0)
273 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
274
275 return result;
276 }
277
278 /* Report on the availability of insv/extv/extzv and the desired mode
279 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
280 is false; else the mode of the specified operand. If OPNO is -1,
281 all the caller cares about is whether the insn is available. */
282 enum machine_mode
283 mode_for_extraction (enum extraction_pattern pattern, int opno)
284 {
285 const struct insn_data_d *data;
286
287 switch (pattern)
288 {
289 case EP_insv:
290 if (HAVE_insv)
291 {
292 data = &insn_data[CODE_FOR_insv];
293 break;
294 }
295 return MAX_MACHINE_MODE;
296
297 case EP_extv:
298 if (HAVE_extv)
299 {
300 data = &insn_data[CODE_FOR_extv];
301 break;
302 }
303 return MAX_MACHINE_MODE;
304
305 case EP_extzv:
306 if (HAVE_extzv)
307 {
308 data = &insn_data[CODE_FOR_extzv];
309 break;
310 }
311 return MAX_MACHINE_MODE;
312
313 default:
314 gcc_unreachable ();
315 }
316
317 if (opno == -1)
318 return VOIDmode;
319
320 /* Everyone who uses this function used to follow it with
321 if (result == VOIDmode) result = word_mode; */
322 if (data->operand[opno].mode == VOIDmode)
323 return word_mode;
324 return data->operand[opno].mode;
325 }
326
327 /* Return true if X, of mode MODE, matches the predicate for operand
328 OPNO of instruction ICODE. Allow volatile memories, regardless of
329 the ambient volatile_ok setting. */
330
331 static bool
332 check_predicate_volatile_ok (enum insn_code icode, int opno,
333 rtx x, enum machine_mode mode)
334 {
335 bool save_volatile_ok, result;
336
337 save_volatile_ok = volatile_ok;
338 result = insn_data[(int) icode].operand[opno].predicate (x, mode);
339 volatile_ok = save_volatile_ok;
340 return result;
341 }
342 \f
343 /* A subroutine of store_bit_field, with the same arguments. Return true
344 if the operation could be implemented.
345
346 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
347 no other way of implementing the operation. If FALLBACK_P is false,
348 return false instead. */
349
350 static bool
351 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
352 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
353 rtx value, bool fallback_p)
354 {
355 unsigned int unit
356 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
357 unsigned HOST_WIDE_INT offset, bitpos;
358 rtx op0 = str_rtx;
359 int byte_offset;
360 rtx orig_value;
361
362 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
363
364 while (GET_CODE (op0) == SUBREG)
365 {
366 /* The following line once was done only if WORDS_BIG_ENDIAN,
367 but I think that is a mistake. WORDS_BIG_ENDIAN is
368 meaningful at a much higher level; when structures are copied
369 between memory and regs, the higher-numbered regs
370 always get higher addresses. */
371 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
372 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
373
374 byte_offset = 0;
375
376 /* Paradoxical subregs need special handling on big endian machines. */
377 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
378 {
379 int difference = inner_mode_size - outer_mode_size;
380
381 if (WORDS_BIG_ENDIAN)
382 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
383 if (BYTES_BIG_ENDIAN)
384 byte_offset += difference % UNITS_PER_WORD;
385 }
386 else
387 byte_offset = SUBREG_BYTE (op0);
388
389 bitnum += byte_offset * BITS_PER_UNIT;
390 op0 = SUBREG_REG (op0);
391 }
392
393 /* No action is needed if the target is a register and if the field
394 lies completely outside that register. This can occur if the source
395 code contains an out-of-bounds access to a small array. */
396 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
397 return true;
398
399 /* Use vec_set patterns for inserting parts of vectors whenever
400 available. */
401 if (VECTOR_MODE_P (GET_MODE (op0))
402 && !MEM_P (op0)
403 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
404 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
405 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
406 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
407 {
408 enum machine_mode outermode = GET_MODE (op0);
409 enum machine_mode innermode = GET_MODE_INNER (outermode);
410 int icode = (int) optab_handler (vec_set_optab, outermode);
411 int pos = bitnum / GET_MODE_BITSIZE (innermode);
412 rtx rtxpos = GEN_INT (pos);
413 rtx src = value;
414 rtx dest = op0;
415 rtx pat, seq;
416 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
417 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
418 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
419
420 start_sequence ();
421
422 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
423 src = copy_to_mode_reg (mode1, src);
424
425 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
426 rtxpos = copy_to_mode_reg (mode1, rtxpos);
427
428 /* We could handle this, but we should always be called with a pseudo
429 for our targets and all insns should take them as outputs. */
430 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
431 && (*insn_data[icode].operand[1].predicate) (src, mode1)
432 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
433 pat = GEN_FCN (icode) (dest, src, rtxpos);
434 seq = get_insns ();
435 end_sequence ();
436 if (pat)
437 {
438 emit_insn (seq);
439 emit_insn (pat);
440 return true;
441 }
442 }
443
444 /* If the target is a register, overwriting the entire object, or storing
445 a full-word or multi-word field can be done with just a SUBREG.
446
447 If the target is memory, storing any naturally aligned field can be
448 done with a simple store. For targets that support fast unaligned
449 memory, any naturally sized, unit aligned field can be done directly. */
450
451 offset = bitnum / unit;
452 bitpos = bitnum % unit;
453 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
454 + (offset * UNITS_PER_WORD);
455
456 if (bitpos == 0
457 && bitsize == GET_MODE_BITSIZE (fieldmode)
458 && (!MEM_P (op0)
459 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
460 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
461 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
462 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
463 || (offset * BITS_PER_UNIT % bitsize == 0
464 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
465 {
466 if (MEM_P (op0))
467 op0 = adjust_address (op0, fieldmode, offset);
468 else if (GET_MODE (op0) != fieldmode)
469 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
470 byte_offset);
471 emit_move_insn (op0, value);
472 return true;
473 }
474
475 /* Make sure we are playing with integral modes. Pun with subregs
476 if we aren't. This must come after the entire register case above,
477 since that case is valid for any mode. The following cases are only
478 valid for integral modes. */
479 {
480 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
481 if (imode != GET_MODE (op0))
482 {
483 if (MEM_P (op0))
484 op0 = adjust_address (op0, imode, 0);
485 else
486 {
487 gcc_assert (imode != BLKmode);
488 op0 = gen_lowpart (imode, op0);
489 }
490 }
491 }
492
493 /* We may be accessing data outside the field, which means
494 we can alias adjacent data. */
495 if (MEM_P (op0))
496 {
497 op0 = shallow_copy_rtx (op0);
498 set_mem_alias_set (op0, 0);
499 set_mem_expr (op0, 0);
500 }
501
502 /* If OP0 is a register, BITPOS must count within a word.
503 But as we have it, it counts within whatever size OP0 now has.
504 On a bigendian machine, these are not the same, so convert. */
505 if (BYTES_BIG_ENDIAN
506 && !MEM_P (op0)
507 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
508 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
509
510 /* Storing an lsb-aligned field in a register
511 can be done with a movestrict instruction. */
512
513 if (!MEM_P (op0)
514 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
515 && bitsize == GET_MODE_BITSIZE (fieldmode)
516 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
517 {
518 int icode = optab_handler (movstrict_optab, fieldmode);
519 rtx insn;
520 rtx start = get_last_insn ();
521 rtx arg0 = op0;
522
523 /* Get appropriate low part of the value being stored. */
524 if (CONST_INT_P (value) || REG_P (value))
525 value = gen_lowpart (fieldmode, value);
526 else if (!(GET_CODE (value) == SYMBOL_REF
527 || GET_CODE (value) == LABEL_REF
528 || GET_CODE (value) == CONST))
529 value = convert_to_mode (fieldmode, value, 0);
530
531 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
532 value = copy_to_mode_reg (fieldmode, value);
533
534 if (GET_CODE (op0) == SUBREG)
535 {
536 /* Else we've got some float mode source being extracted into
537 a different float mode destination -- this combination of
538 subregs results in Severe Tire Damage. */
539 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
540 || GET_MODE_CLASS (fieldmode) == MODE_INT
541 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
542 arg0 = SUBREG_REG (op0);
543 }
544
545 insn = (GEN_FCN (icode)
546 (gen_rtx_SUBREG (fieldmode, arg0,
547 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
548 + (offset * UNITS_PER_WORD)),
549 value));
550 if (insn)
551 {
552 emit_insn (insn);
553 return true;
554 }
555 delete_insns_since (start);
556 }
557
558 /* Handle fields bigger than a word. */
559
560 if (bitsize > BITS_PER_WORD)
561 {
562 /* Here we transfer the words of the field
563 in the order least significant first.
564 This is because the most significant word is the one which may
565 be less than full.
566 However, only do that if the value is not BLKmode. */
567
568 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
569 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
570 unsigned int i;
571 rtx last;
572
573 /* This is the mode we must force value to, so that there will be enough
574 subwords to extract. Note that fieldmode will often (always?) be
575 VOIDmode, because that is what store_field uses to indicate that this
576 is a bit field, but passing VOIDmode to operand_subword_force
577 is not allowed. */
578 fieldmode = GET_MODE (value);
579 if (fieldmode == VOIDmode)
580 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
581
582 last = get_last_insn ();
583 for (i = 0; i < nwords; i++)
584 {
585 /* If I is 0, use the low-order word in both field and target;
586 if I is 1, use the next to lowest word; and so on. */
587 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
588 unsigned int bit_offset = (backwards
589 ? MAX ((int) bitsize - ((int) i + 1)
590 * BITS_PER_WORD,
591 0)
592 : (int) i * BITS_PER_WORD);
593 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
594
595 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
596 bitsize - i * BITS_PER_WORD),
597 bitnum + bit_offset, word_mode,
598 value_word, fallback_p))
599 {
600 delete_insns_since (last);
601 return false;
602 }
603 }
604 return true;
605 }
606
607 /* From here on we can assume that the field to be stored in is
608 a full-word (whatever type that is), since it is shorter than a word. */
609
610 /* OFFSET is the number of words or bytes (UNIT says which)
611 from STR_RTX to the first word or byte containing part of the field. */
612
613 if (!MEM_P (op0))
614 {
615 if (offset != 0
616 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
617 {
618 if (!REG_P (op0))
619 {
620 /* Since this is a destination (lvalue), we can't copy
621 it to a pseudo. We can remove a SUBREG that does not
622 change the size of the operand. Such a SUBREG may
623 have been added above. */
624 gcc_assert (GET_CODE (op0) == SUBREG
625 && (GET_MODE_SIZE (GET_MODE (op0))
626 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
627 op0 = SUBREG_REG (op0);
628 }
629 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
630 op0, (offset * UNITS_PER_WORD));
631 }
632 offset = 0;
633 }
634
635 /* If VALUE has a floating-point or complex mode, access it as an
636 integer of the corresponding size. This can occur on a machine
637 with 64 bit registers that uses SFmode for float. It can also
638 occur for unaligned float or complex fields. */
639 orig_value = value;
640 if (GET_MODE (value) != VOIDmode
641 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
642 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
643 {
644 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
645 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
646 }
647
648 /* Now OFFSET is nonzero only if OP0 is memory
649 and is therefore always measured in bytes. */
650
651 if (HAVE_insv
652 && GET_MODE (value) != BLKmode
653 && bitsize > 0
654 && GET_MODE_BITSIZE (op_mode) >= bitsize
655 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
656 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
657 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
658 VOIDmode)
659 && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
660 {
661 int xbitpos = bitpos;
662 rtx value1;
663 rtx xop0 = op0;
664 rtx last = get_last_insn ();
665 rtx pat;
666 bool copy_back = false;
667
668 /* Add OFFSET into OP0's address. */
669 if (MEM_P (xop0))
670 xop0 = adjust_address (xop0, byte_mode, offset);
671
672 /* If xop0 is a register, we need it in OP_MODE
673 to make it acceptable to the format of insv. */
674 if (GET_CODE (xop0) == SUBREG)
675 /* We can't just change the mode, because this might clobber op0,
676 and we will need the original value of op0 if insv fails. */
677 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
678 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
679 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
680
681 /* If the destination is a paradoxical subreg such that we need a
682 truncate to the inner mode, perform the insertion on a temporary and
683 truncate the result to the original destination. Note that we can't
684 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
685 X) 0)) is (reg:N X). */
686 if (GET_CODE (xop0) == SUBREG
687 && REG_P (SUBREG_REG (xop0))
688 && (!TRULY_NOOP_TRUNCATION
689 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0))),
690 GET_MODE_BITSIZE (op_mode))))
691 {
692 rtx tem = gen_reg_rtx (op_mode);
693 emit_move_insn (tem, xop0);
694 xop0 = tem;
695 copy_back = true;
696 }
697
698 /* On big-endian machines, we count bits from the most significant.
699 If the bit field insn does not, we must invert. */
700
701 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
702 xbitpos = unit - bitsize - xbitpos;
703
704 /* We have been counting XBITPOS within UNIT.
705 Count instead within the size of the register. */
706 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
707 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
708
709 unit = GET_MODE_BITSIZE (op_mode);
710
711 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
712 value1 = value;
713 if (GET_MODE (value) != op_mode)
714 {
715 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
716 {
717 /* Optimization: Don't bother really extending VALUE
718 if it has all the bits we will actually use. However,
719 if we must narrow it, be sure we do it correctly. */
720
721 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
722 {
723 rtx tmp;
724
725 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
726 if (! tmp)
727 tmp = simplify_gen_subreg (op_mode,
728 force_reg (GET_MODE (value),
729 value1),
730 GET_MODE (value), 0);
731 value1 = tmp;
732 }
733 else
734 value1 = gen_lowpart (op_mode, value1);
735 }
736 else if (CONST_INT_P (value))
737 value1 = gen_int_mode (INTVAL (value), op_mode);
738 else
739 /* Parse phase is supposed to make VALUE's data type
740 match that of the component reference, which is a type
741 at least as wide as the field; so VALUE should have
742 a mode that corresponds to that type. */
743 gcc_assert (CONSTANT_P (value));
744 }
745
746 /* If this machine's insv insists on a register,
747 get VALUE1 into a register. */
748 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
749 (value1, op_mode)))
750 value1 = force_reg (op_mode, value1);
751
752 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
753 if (pat)
754 {
755 emit_insn (pat);
756
757 if (copy_back)
758 convert_move (op0, xop0, true);
759 return true;
760 }
761 delete_insns_since (last);
762 }
763
764 /* If OP0 is a memory, try copying it to a register and seeing if a
765 cheap register alternative is available. */
766 if (HAVE_insv && MEM_P (op0))
767 {
768 enum machine_mode bestmode;
769
770 /* Get the mode to use for inserting into this field. If OP0 is
771 BLKmode, get the smallest mode consistent with the alignment. If
772 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
773 mode. Otherwise, use the smallest mode containing the field. */
774
775 if (GET_MODE (op0) == BLKmode
776 || (op_mode != MAX_MACHINE_MODE
777 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
778 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
779 (op_mode == MAX_MACHINE_MODE
780 ? VOIDmode : op_mode),
781 MEM_VOLATILE_P (op0));
782 else
783 bestmode = GET_MODE (op0);
784
785 if (bestmode != VOIDmode
786 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
787 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
788 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
789 {
790 rtx last, tempreg, xop0;
791 unsigned HOST_WIDE_INT xoffset, xbitpos;
792
793 last = get_last_insn ();
794
795 /* Adjust address to point to the containing unit of
796 that mode. Compute the offset as a multiple of this unit,
797 counting in bytes. */
798 unit = GET_MODE_BITSIZE (bestmode);
799 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
800 xbitpos = bitnum % unit;
801 xop0 = adjust_address (op0, bestmode, xoffset);
802
803 /* Fetch that unit, store the bitfield in it, then store
804 the unit. */
805 tempreg = copy_to_reg (xop0);
806 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
807 fieldmode, orig_value, false))
808 {
809 emit_move_insn (xop0, tempreg);
810 return true;
811 }
812 delete_insns_since (last);
813 }
814 }
815
816 if (!fallback_p)
817 return false;
818
819 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
820 return true;
821 }
822
823 /* Generate code to store value from rtx VALUE
824 into a bit-field within structure STR_RTX
825 containing BITSIZE bits starting at bit BITNUM.
826 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
827
828 void
829 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
830 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
831 rtx value)
832 {
833 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
834 gcc_unreachable ();
835 }
836 \f
837 /* Use shifts and boolean operations to store VALUE
838 into a bit field of width BITSIZE
839 in a memory location specified by OP0 except offset by OFFSET bytes.
840 (OFFSET must be 0 if OP0 is a register.)
841 The field starts at position BITPOS within the byte.
842 (If OP0 is a register, it may be a full word or a narrower mode,
843 but BITPOS still counts within a full word,
844 which is significant on bigendian machines.) */
845
846 static void
847 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
848 unsigned HOST_WIDE_INT bitsize,
849 unsigned HOST_WIDE_INT bitpos, rtx value)
850 {
851 enum machine_mode mode;
852 unsigned int total_bits = BITS_PER_WORD;
853 rtx temp;
854 int all_zero = 0;
855 int all_one = 0;
856
857 /* There is a case not handled here:
858 a structure with a known alignment of just a halfword
859 and a field split across two aligned halfwords within the structure.
860 Or likewise a structure with a known alignment of just a byte
861 and a field split across two bytes.
862 Such cases are not supposed to be able to occur. */
863
864 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
865 {
866 gcc_assert (!offset);
867 /* Special treatment for a bit field split across two registers. */
868 if (bitsize + bitpos > BITS_PER_WORD)
869 {
870 store_split_bit_field (op0, bitsize, bitpos, value);
871 return;
872 }
873 }
874 else
875 {
876 /* Get the proper mode to use for this field. We want a mode that
877 includes the entire field. If such a mode would be larger than
878 a word, we won't be doing the extraction the normal way.
879 We don't want a mode bigger than the destination. */
880
881 mode = GET_MODE (op0);
882 if (GET_MODE_BITSIZE (mode) == 0
883 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
884 mode = word_mode;
885
886 if (MEM_VOLATILE_P (op0)
887 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
888 && flag_strict_volatile_bitfields > 0)
889 mode = GET_MODE (op0);
890 else
891 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
892 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
893
894 if (mode == VOIDmode)
895 {
896 /* The only way this should occur is if the field spans word
897 boundaries. */
898 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
899 value);
900 return;
901 }
902
903 total_bits = GET_MODE_BITSIZE (mode);
904
905 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
906 be in the range 0 to total_bits-1, and put any excess bytes in
907 OFFSET. */
908 if (bitpos >= total_bits)
909 {
910 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
911 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
912 * BITS_PER_UNIT);
913 }
914
915 /* Get ref to an aligned byte, halfword, or word containing the field.
916 Adjust BITPOS to be position within a word,
917 and OFFSET to be the offset of that word.
918 Then alter OP0 to refer to that word. */
919 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
920 offset -= (offset % (total_bits / BITS_PER_UNIT));
921 op0 = adjust_address (op0, mode, offset);
922 }
923
924 mode = GET_MODE (op0);
925
926 /* Now MODE is either some integral mode for a MEM as OP0,
927 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
928 The bit field is contained entirely within OP0.
929 BITPOS is the starting bit number within OP0.
930 (OP0's mode may actually be narrower than MODE.) */
931
932 if (BYTES_BIG_ENDIAN)
933 /* BITPOS is the distance between our msb
934 and that of the containing datum.
935 Convert it to the distance from the lsb. */
936 bitpos = total_bits - bitsize - bitpos;
937
938 /* Now BITPOS is always the distance between our lsb
939 and that of OP0. */
940
941 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
942 we must first convert its mode to MODE. */
943
944 if (CONST_INT_P (value))
945 {
946 HOST_WIDE_INT v = INTVAL (value);
947
948 if (bitsize < HOST_BITS_PER_WIDE_INT)
949 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
950
951 if (v == 0)
952 all_zero = 1;
953 else if ((bitsize < HOST_BITS_PER_WIDE_INT
954 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
955 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
956 all_one = 1;
957
958 value = lshift_value (mode, value, bitpos, bitsize);
959 }
960 else
961 {
962 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
963 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
964
965 if (GET_MODE (value) != mode)
966 value = convert_to_mode (mode, value, 1);
967
968 if (must_and)
969 value = expand_binop (mode, and_optab, value,
970 mask_rtx (mode, 0, bitsize, 0),
971 NULL_RTX, 1, OPTAB_LIB_WIDEN);
972 if (bitpos > 0)
973 value = expand_shift (LSHIFT_EXPR, mode, value,
974 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
975 }
976
977 /* Now clear the chosen bits in OP0,
978 except that if VALUE is -1 we need not bother. */
979 /* We keep the intermediates in registers to allow CSE to combine
980 consecutive bitfield assignments. */
981
982 temp = force_reg (mode, op0);
983
984 if (! all_one)
985 {
986 temp = expand_binop (mode, and_optab, temp,
987 mask_rtx (mode, bitpos, bitsize, 1),
988 NULL_RTX, 1, OPTAB_LIB_WIDEN);
989 temp = force_reg (mode, temp);
990 }
991
992 /* Now logical-or VALUE into OP0, unless it is zero. */
993
994 if (! all_zero)
995 {
996 temp = expand_binop (mode, ior_optab, temp, value,
997 NULL_RTX, 1, OPTAB_LIB_WIDEN);
998 temp = force_reg (mode, temp);
999 }
1000
1001 if (op0 != temp)
1002 {
1003 op0 = copy_rtx (op0);
1004 emit_move_insn (op0, temp);
1005 }
1006 }
1007 \f
1008 /* Store a bit field that is split across multiple accessible memory objects.
1009
1010 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1011 BITSIZE is the field width; BITPOS the position of its first bit
1012 (within the word).
1013 VALUE is the value to store.
1014
1015 This does not yet handle fields wider than BITS_PER_WORD. */
1016
1017 static void
1018 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1019 unsigned HOST_WIDE_INT bitpos, rtx value)
1020 {
1021 unsigned int unit;
1022 unsigned int bitsdone = 0;
1023
1024 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1025 much at a time. */
1026 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1027 unit = BITS_PER_WORD;
1028 else
1029 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1030
1031 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1032 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1033 that VALUE might be a floating-point constant. */
1034 if (CONSTANT_P (value) && !CONST_INT_P (value))
1035 {
1036 rtx word = gen_lowpart_common (word_mode, value);
1037
1038 if (word && (value != word))
1039 value = word;
1040 else
1041 value = gen_lowpart_common (word_mode,
1042 force_reg (GET_MODE (value) != VOIDmode
1043 ? GET_MODE (value)
1044 : word_mode, value));
1045 }
1046
1047 while (bitsdone < bitsize)
1048 {
1049 unsigned HOST_WIDE_INT thissize;
1050 rtx part, word;
1051 unsigned HOST_WIDE_INT thispos;
1052 unsigned HOST_WIDE_INT offset;
1053
1054 offset = (bitpos + bitsdone) / unit;
1055 thispos = (bitpos + bitsdone) % unit;
1056
1057 /* THISSIZE must not overrun a word boundary. Otherwise,
1058 store_fixed_bit_field will call us again, and we will mutually
1059 recurse forever. */
1060 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1061 thissize = MIN (thissize, unit - thispos);
1062
1063 if (BYTES_BIG_ENDIAN)
1064 {
1065 int total_bits;
1066
1067 /* We must do an endian conversion exactly the same way as it is
1068 done in extract_bit_field, so that the two calls to
1069 extract_fixed_bit_field will have comparable arguments. */
1070 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1071 total_bits = BITS_PER_WORD;
1072 else
1073 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1074
1075 /* Fetch successively less significant portions. */
1076 if (CONST_INT_P (value))
1077 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1078 >> (bitsize - bitsdone - thissize))
1079 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1080 else
1081 /* The args are chosen so that the last part includes the
1082 lsb. Give extract_bit_field the value it needs (with
1083 endianness compensation) to fetch the piece we want. */
1084 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1085 total_bits - bitsize + bitsdone,
1086 NULL_RTX, 1, false);
1087 }
1088 else
1089 {
1090 /* Fetch successively more significant portions. */
1091 if (CONST_INT_P (value))
1092 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1093 >> bitsdone)
1094 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1095 else
1096 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1097 bitsdone, NULL_RTX, 1, false);
1098 }
1099
1100 /* If OP0 is a register, then handle OFFSET here.
1101
1102 When handling multiword bitfields, extract_bit_field may pass
1103 down a word_mode SUBREG of a larger REG for a bitfield that actually
1104 crosses a word boundary. Thus, for a SUBREG, we must find
1105 the current word starting from the base register. */
1106 if (GET_CODE (op0) == SUBREG)
1107 {
1108 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1109 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1110 GET_MODE (SUBREG_REG (op0)));
1111 offset = 0;
1112 }
1113 else if (REG_P (op0))
1114 {
1115 word = operand_subword_force (op0, offset, GET_MODE (op0));
1116 offset = 0;
1117 }
1118 else
1119 word = op0;
1120
1121 /* OFFSET is in UNITs, and UNIT is in bits.
1122 store_fixed_bit_field wants offset in bytes. */
1123 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1124 thispos, part);
1125 bitsdone += thissize;
1126 }
1127 }
1128 \f
1129 /* A subroutine of extract_bit_field_1 that converts return value X
1130 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1131 to extract_bit_field. */
1132
1133 static rtx
1134 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1135 enum machine_mode tmode, bool unsignedp)
1136 {
1137 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1138 return x;
1139
1140 /* If the x mode is not a scalar integral, first convert to the
1141 integer mode of that size and then access it as a floating-point
1142 value via a SUBREG. */
1143 if (!SCALAR_INT_MODE_P (tmode))
1144 {
1145 enum machine_mode smode;
1146
1147 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1148 x = convert_to_mode (smode, x, unsignedp);
1149 x = force_reg (smode, x);
1150 return gen_lowpart (tmode, x);
1151 }
1152
1153 return convert_to_mode (tmode, x, unsignedp);
1154 }
1155
1156 /* A subroutine of extract_bit_field, with the same arguments.
1157 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1158 if we can find no other means of implementing the operation.
1159 if FALLBACK_P is false, return NULL instead. */
1160
1161 static rtx
1162 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1163 unsigned HOST_WIDE_INT bitnum,
1164 int unsignedp, bool packedp, rtx target,
1165 enum machine_mode mode, enum machine_mode tmode,
1166 bool fallback_p)
1167 {
1168 unsigned int unit
1169 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1170 unsigned HOST_WIDE_INT offset, bitpos;
1171 rtx op0 = str_rtx;
1172 enum machine_mode int_mode;
1173 enum machine_mode ext_mode;
1174 enum machine_mode mode1;
1175 enum insn_code icode;
1176 int byte_offset;
1177
1178 if (tmode == VOIDmode)
1179 tmode = mode;
1180
1181 while (GET_CODE (op0) == SUBREG)
1182 {
1183 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1184 op0 = SUBREG_REG (op0);
1185 }
1186
1187 /* If we have an out-of-bounds access to a register, just return an
1188 uninitialized register of the required mode. This can occur if the
1189 source code contains an out-of-bounds access to a small array. */
1190 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1191 return gen_reg_rtx (tmode);
1192
1193 if (REG_P (op0)
1194 && mode == GET_MODE (op0)
1195 && bitnum == 0
1196 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1197 {
1198 /* We're trying to extract a full register from itself. */
1199 return op0;
1200 }
1201
1202 /* See if we can get a better vector mode before extracting. */
1203 if (VECTOR_MODE_P (GET_MODE (op0))
1204 && !MEM_P (op0)
1205 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1206 {
1207 enum machine_mode new_mode;
1208
1209 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1210 new_mode = MIN_MODE_VECTOR_FLOAT;
1211 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1212 new_mode = MIN_MODE_VECTOR_FRACT;
1213 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1214 new_mode = MIN_MODE_VECTOR_UFRACT;
1215 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1216 new_mode = MIN_MODE_VECTOR_ACCUM;
1217 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1218 new_mode = MIN_MODE_VECTOR_UACCUM;
1219 else
1220 new_mode = MIN_MODE_VECTOR_INT;
1221
1222 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1223 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1224 && targetm.vector_mode_supported_p (new_mode))
1225 break;
1226 if (new_mode != VOIDmode)
1227 op0 = gen_lowpart (new_mode, op0);
1228 }
1229
1230 /* Use vec_extract patterns for extracting parts of vectors whenever
1231 available. */
1232 if (VECTOR_MODE_P (GET_MODE (op0))
1233 && !MEM_P (op0)
1234 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1235 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1236 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1237 {
1238 enum machine_mode outermode = GET_MODE (op0);
1239 enum machine_mode innermode = GET_MODE_INNER (outermode);
1240 int icode = (int) optab_handler (vec_extract_optab, outermode);
1241 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1242 rtx rtxpos = GEN_INT (pos);
1243 rtx src = op0;
1244 rtx dest = NULL, pat, seq;
1245 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1246 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1247 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1248
1249 if (innermode == tmode || innermode == mode)
1250 dest = target;
1251
1252 if (!dest)
1253 dest = gen_reg_rtx (innermode);
1254
1255 start_sequence ();
1256
1257 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1258 dest = copy_to_mode_reg (mode0, dest);
1259
1260 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1261 src = copy_to_mode_reg (mode1, src);
1262
1263 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1264 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1265
1266 /* We could handle this, but we should always be called with a pseudo
1267 for our targets and all insns should take them as outputs. */
1268 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1269 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1270 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1271
1272 pat = GEN_FCN (icode) (dest, src, rtxpos);
1273 seq = get_insns ();
1274 end_sequence ();
1275 if (pat)
1276 {
1277 emit_insn (seq);
1278 emit_insn (pat);
1279 if (mode0 != mode)
1280 return gen_lowpart (tmode, dest);
1281 return dest;
1282 }
1283 }
1284
1285 /* Make sure we are playing with integral modes. Pun with subregs
1286 if we aren't. */
1287 {
1288 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1289 if (imode != GET_MODE (op0))
1290 {
1291 if (MEM_P (op0))
1292 op0 = adjust_address (op0, imode, 0);
1293 else if (imode != BLKmode)
1294 {
1295 op0 = gen_lowpart (imode, op0);
1296
1297 /* If we got a SUBREG, force it into a register since we
1298 aren't going to be able to do another SUBREG on it. */
1299 if (GET_CODE (op0) == SUBREG)
1300 op0 = force_reg (imode, op0);
1301 }
1302 else if (REG_P (op0))
1303 {
1304 rtx reg, subreg;
1305 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1306 MODE_INT);
1307 reg = gen_reg_rtx (imode);
1308 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1309 emit_move_insn (subreg, op0);
1310 op0 = reg;
1311 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1312 }
1313 else
1314 {
1315 rtx mem = assign_stack_temp (GET_MODE (op0),
1316 GET_MODE_SIZE (GET_MODE (op0)), 0);
1317 emit_move_insn (mem, op0);
1318 op0 = adjust_address (mem, BLKmode, 0);
1319 }
1320 }
1321 }
1322
1323 /* We may be accessing data outside the field, which means
1324 we can alias adjacent data. */
1325 if (MEM_P (op0))
1326 {
1327 op0 = shallow_copy_rtx (op0);
1328 set_mem_alias_set (op0, 0);
1329 set_mem_expr (op0, 0);
1330 }
1331
1332 /* Extraction of a full-word or multi-word value from a structure
1333 in a register or aligned memory can be done with just a SUBREG.
1334 A subword value in the least significant part of a register
1335 can also be extracted with a SUBREG. For this, we need the
1336 byte offset of the value in op0. */
1337
1338 bitpos = bitnum % unit;
1339 offset = bitnum / unit;
1340 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1341
1342 /* If OP0 is a register, BITPOS must count within a word.
1343 But as we have it, it counts within whatever size OP0 now has.
1344 On a bigendian machine, these are not the same, so convert. */
1345 if (BYTES_BIG_ENDIAN
1346 && !MEM_P (op0)
1347 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1348 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1349
1350 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1351 If that's wrong, the solution is to test for it and set TARGET to 0
1352 if needed. */
1353
1354 /* Only scalar integer modes can be converted via subregs. There is an
1355 additional problem for FP modes here in that they can have a precision
1356 which is different from the size. mode_for_size uses precision, but
1357 we want a mode based on the size, so we must avoid calling it for FP
1358 modes. */
1359 mode1 = (SCALAR_INT_MODE_P (tmode)
1360 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1361 : mode);
1362
1363 /* If the bitfield is volatile, we need to make sure the access
1364 remains on a type-aligned boundary. */
1365 if (GET_CODE (op0) == MEM
1366 && MEM_VOLATILE_P (op0)
1367 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1368 && flag_strict_volatile_bitfields > 0)
1369 goto no_subreg_mode_swap;
1370
1371 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1372 && bitpos % BITS_PER_WORD == 0)
1373 || (mode1 != BLKmode
1374 /* ??? The big endian test here is wrong. This is correct
1375 if the value is in a register, and if mode_for_size is not
1376 the same mode as op0. This causes us to get unnecessarily
1377 inefficient code from the Thumb port when -mbig-endian. */
1378 && (BYTES_BIG_ENDIAN
1379 ? bitpos + bitsize == BITS_PER_WORD
1380 : bitpos == 0)))
1381 && ((!MEM_P (op0)
1382 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1383 GET_MODE_BITSIZE (GET_MODE (op0)))
1384 && GET_MODE_SIZE (mode1) != 0
1385 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1386 || (MEM_P (op0)
1387 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1388 || (offset * BITS_PER_UNIT % bitsize == 0
1389 && MEM_ALIGN (op0) % bitsize == 0)))))
1390 {
1391 if (MEM_P (op0))
1392 op0 = adjust_address (op0, mode1, offset);
1393 else if (mode1 != GET_MODE (op0))
1394 {
1395 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1396 byte_offset);
1397 if (sub == NULL)
1398 goto no_subreg_mode_swap;
1399 op0 = sub;
1400 }
1401 if (mode1 != mode)
1402 return convert_to_mode (tmode, op0, unsignedp);
1403 return op0;
1404 }
1405 no_subreg_mode_swap:
1406
1407 /* Handle fields bigger than a word. */
1408
1409 if (bitsize > BITS_PER_WORD)
1410 {
1411 /* Here we transfer the words of the field
1412 in the order least significant first.
1413 This is because the most significant word is the one which may
1414 be less than full. */
1415
1416 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1417 unsigned int i;
1418
1419 if (target == 0 || !REG_P (target))
1420 target = gen_reg_rtx (mode);
1421
1422 /* Indicate for flow that the entire target reg is being set. */
1423 emit_clobber (target);
1424
1425 for (i = 0; i < nwords; i++)
1426 {
1427 /* If I is 0, use the low-order word in both field and target;
1428 if I is 1, use the next to lowest word; and so on. */
1429 /* Word number in TARGET to use. */
1430 unsigned int wordnum
1431 = (WORDS_BIG_ENDIAN
1432 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1433 : i);
1434 /* Offset from start of field in OP0. */
1435 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1436 ? MAX (0, ((int) bitsize - ((int) i + 1)
1437 * (int) BITS_PER_WORD))
1438 : (int) i * BITS_PER_WORD);
1439 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1440 rtx result_part
1441 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1442 bitsize - i * BITS_PER_WORD),
1443 bitnum + bit_offset, 1, false, target_part, mode,
1444 word_mode);
1445
1446 gcc_assert (target_part);
1447
1448 if (result_part != target_part)
1449 emit_move_insn (target_part, result_part);
1450 }
1451
1452 if (unsignedp)
1453 {
1454 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1455 need to be zero'd out. */
1456 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1457 {
1458 unsigned int i, total_words;
1459
1460 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1461 for (i = nwords; i < total_words; i++)
1462 emit_move_insn
1463 (operand_subword (target,
1464 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1465 1, VOIDmode),
1466 const0_rtx);
1467 }
1468 return target;
1469 }
1470
1471 /* Signed bit field: sign-extend with two arithmetic shifts. */
1472 target = expand_shift (LSHIFT_EXPR, mode, target,
1473 build_int_cst (NULL_TREE,
1474 GET_MODE_BITSIZE (mode) - bitsize),
1475 NULL_RTX, 0);
1476 return expand_shift (RSHIFT_EXPR, mode, target,
1477 build_int_cst (NULL_TREE,
1478 GET_MODE_BITSIZE (mode) - bitsize),
1479 NULL_RTX, 0);
1480 }
1481
1482 /* From here on we know the desired field is smaller than a word. */
1483
1484 /* Check if there is a correspondingly-sized integer field, so we can
1485 safely extract it as one size of integer, if necessary; then
1486 truncate or extend to the size that is wanted; then use SUBREGs or
1487 convert_to_mode to get one of the modes we really wanted. */
1488
1489 int_mode = int_mode_for_mode (tmode);
1490 if (int_mode == BLKmode)
1491 int_mode = int_mode_for_mode (mode);
1492 /* Should probably push op0 out to memory and then do a load. */
1493 gcc_assert (int_mode != BLKmode);
1494
1495 /* OFFSET is the number of words or bytes (UNIT says which)
1496 from STR_RTX to the first word or byte containing part of the field. */
1497 if (!MEM_P (op0))
1498 {
1499 if (offset != 0
1500 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1501 {
1502 if (!REG_P (op0))
1503 op0 = copy_to_reg (op0);
1504 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1505 op0, (offset * UNITS_PER_WORD));
1506 }
1507 offset = 0;
1508 }
1509
1510 /* Now OFFSET is nonzero only for memory operands. */
1511 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1512 icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv;
1513 if (ext_mode != MAX_MACHINE_MODE
1514 && bitsize > 0
1515 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1516 /* If op0 is a register, we need it in EXT_MODE to make it
1517 acceptable to the format of ext(z)v. */
1518 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1519 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1520 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
1521 && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
1522 {
1523 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1524 rtx bitsize_rtx, bitpos_rtx;
1525 rtx last = get_last_insn ();
1526 rtx xop0 = op0;
1527 rtx xtarget = target;
1528 rtx xspec_target = target;
1529 rtx xspec_target_subreg = 0;
1530 rtx pat;
1531
1532 /* If op0 is a register, we need it in EXT_MODE to make it
1533 acceptable to the format of ext(z)v. */
1534 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1535 xop0 = gen_lowpart_SUBREG (ext_mode, xop0);
1536 if (MEM_P (xop0))
1537 /* Get ref to first byte containing part of the field. */
1538 xop0 = adjust_address (xop0, byte_mode, xoffset);
1539
1540 /* On big-endian machines, we count bits from the most significant.
1541 If the bit field insn does not, we must invert. */
1542 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1543 xbitpos = unit - bitsize - xbitpos;
1544
1545 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1546 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1547 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1548
1549 unit = GET_MODE_BITSIZE (ext_mode);
1550
1551 if (xtarget == 0)
1552 xtarget = xspec_target = gen_reg_rtx (tmode);
1553
1554 if (GET_MODE (xtarget) != ext_mode)
1555 {
1556 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1557 between the mode of the extraction (word_mode) and the target
1558 mode. Instead, create a temporary and use convert_move to set
1559 the target. */
1560 if (REG_P (xtarget)
1561 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1562 GET_MODE_BITSIZE (ext_mode)))
1563 {
1564 xtarget = gen_lowpart (ext_mode, xtarget);
1565 if (GET_MODE_SIZE (ext_mode)
1566 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1567 xspec_target_subreg = xtarget;
1568 }
1569 else
1570 xtarget = gen_reg_rtx (ext_mode);
1571 }
1572
1573 /* If this machine's ext(z)v insists on a register target,
1574 make sure we have one. */
1575 if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
1576 xtarget = gen_reg_rtx (ext_mode);
1577
1578 bitsize_rtx = GEN_INT (bitsize);
1579 bitpos_rtx = GEN_INT (xbitpos);
1580
1581 pat = (unsignedp
1582 ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
1583 : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
1584 if (pat)
1585 {
1586 emit_insn (pat);
1587 if (xtarget == xspec_target)
1588 return xtarget;
1589 if (xtarget == xspec_target_subreg)
1590 return xspec_target;
1591 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1592 }
1593 delete_insns_since (last);
1594 }
1595
1596 /* If OP0 is a memory, try copying it to a register and seeing if a
1597 cheap register alternative is available. */
1598 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1599 {
1600 enum machine_mode bestmode;
1601
1602 /* Get the mode to use for inserting into this field. If
1603 OP0 is BLKmode, get the smallest mode consistent with the
1604 alignment. If OP0 is a non-BLKmode object that is no
1605 wider than EXT_MODE, use its mode. Otherwise, use the
1606 smallest mode containing the field. */
1607
1608 if (GET_MODE (op0) == BLKmode
1609 || (ext_mode != MAX_MACHINE_MODE
1610 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1611 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1612 (ext_mode == MAX_MACHINE_MODE
1613 ? VOIDmode : ext_mode),
1614 MEM_VOLATILE_P (op0));
1615 else
1616 bestmode = GET_MODE (op0);
1617
1618 if (bestmode != VOIDmode
1619 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1620 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1621 {
1622 unsigned HOST_WIDE_INT xoffset, xbitpos;
1623
1624 /* Compute the offset as a multiple of this unit,
1625 counting in bytes. */
1626 unit = GET_MODE_BITSIZE (bestmode);
1627 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1628 xbitpos = bitnum % unit;
1629
1630 /* Make sure the register is big enough for the whole field. */
1631 if (xoffset * BITS_PER_UNIT + unit
1632 >= offset * BITS_PER_UNIT + bitsize)
1633 {
1634 rtx last, result, xop0;
1635
1636 last = get_last_insn ();
1637
1638 /* Fetch it to a register in that size. */
1639 xop0 = adjust_address (op0, bestmode, xoffset);
1640 xop0 = force_reg (bestmode, xop0);
1641 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1642 unsignedp, packedp, target,
1643 mode, tmode, false);
1644 if (result)
1645 return result;
1646
1647 delete_insns_since (last);
1648 }
1649 }
1650 }
1651
1652 if (!fallback_p)
1653 return NULL;
1654
1655 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1656 bitpos, target, unsignedp, packedp);
1657 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1658 }
1659
1660 /* Generate code to extract a byte-field from STR_RTX
1661 containing BITSIZE bits, starting at BITNUM,
1662 and put it in TARGET if possible (if TARGET is nonzero).
1663 Regardless of TARGET, we return the rtx for where the value is placed.
1664
1665 STR_RTX is the structure containing the byte (a REG or MEM).
1666 UNSIGNEDP is nonzero if this is an unsigned bit field.
1667 PACKEDP is nonzero if the field has the packed attribute.
1668 MODE is the natural mode of the field value once extracted.
1669 TMODE is the mode the caller would like the value to have;
1670 but the value may be returned with type MODE instead.
1671
1672 If a TARGET is specified and we can store in it at no extra cost,
1673 we do so, and return TARGET.
1674 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1675 if they are equally easy. */
1676
1677 rtx
1678 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1679 unsigned HOST_WIDE_INT bitnum, int unsignedp, bool packedp,
1680 rtx target, enum machine_mode mode, enum machine_mode tmode)
1681 {
1682 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, packedp,
1683 target, mode, tmode, true);
1684 }
1685 \f
1686 /* Extract a bit field using shifts and boolean operations
1687 Returns an rtx to represent the value.
1688 OP0 addresses a register (word) or memory (byte).
1689 BITPOS says which bit within the word or byte the bit field starts in.
1690 OFFSET says how many bytes farther the bit field starts;
1691 it is 0 if OP0 is a register.
1692 BITSIZE says how many bits long the bit field is.
1693 (If OP0 is a register, it may be narrower than a full word,
1694 but BITPOS still counts within a full word,
1695 which is significant on bigendian machines.)
1696
1697 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1698 PACKEDP is true if the field has the packed attribute.
1699
1700 If TARGET is nonzero, attempts to store the value there
1701 and return TARGET, but this is not guaranteed.
1702 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1703
1704 static rtx
1705 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1706 unsigned HOST_WIDE_INT offset,
1707 unsigned HOST_WIDE_INT bitsize,
1708 unsigned HOST_WIDE_INT bitpos, rtx target,
1709 int unsignedp, bool packedp)
1710 {
1711 unsigned int total_bits = BITS_PER_WORD;
1712 enum machine_mode mode;
1713
1714 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1715 {
1716 /* Special treatment for a bit field split across two registers. */
1717 if (bitsize + bitpos > BITS_PER_WORD)
1718 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1719 }
1720 else
1721 {
1722 /* Get the proper mode to use for this field. We want a mode that
1723 includes the entire field. If such a mode would be larger than
1724 a word, we won't be doing the extraction the normal way. */
1725
1726 if (MEM_VOLATILE_P (op0)
1727 && flag_strict_volatile_bitfields > 0)
1728 {
1729 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1730 mode = GET_MODE (op0);
1731 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1732 mode = GET_MODE (target);
1733 else
1734 mode = tmode;
1735 }
1736 else
1737 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1738 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1739
1740 if (mode == VOIDmode)
1741 /* The only way this should occur is if the field spans word
1742 boundaries. */
1743 return extract_split_bit_field (op0, bitsize,
1744 bitpos + offset * BITS_PER_UNIT,
1745 unsignedp);
1746
1747 total_bits = GET_MODE_BITSIZE (mode);
1748
1749 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1750 be in the range 0 to total_bits-1, and put any excess bytes in
1751 OFFSET. */
1752 if (bitpos >= total_bits)
1753 {
1754 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1755 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1756 * BITS_PER_UNIT);
1757 }
1758
1759 /* If we're accessing a volatile MEM, we can't do the next
1760 alignment step if it results in a multi-word access where we
1761 otherwise wouldn't have one. So, check for that case
1762 here. */
1763 if (MEM_P (op0)
1764 && MEM_VOLATILE_P (op0)
1765 && flag_strict_volatile_bitfields > 0
1766 && bitpos + bitsize <= total_bits
1767 && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
1768 {
1769 if (STRICT_ALIGNMENT)
1770 {
1771 static bool informed_about_misalignment = false;
1772 bool warned;
1773
1774 if (packedp)
1775 {
1776 if (bitsize == total_bits)
1777 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1778 "multiple accesses to volatile structure member"
1779 " because of packed attribute");
1780 else
1781 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1782 "multiple accesses to volatile structure bitfield"
1783 " because of packed attribute");
1784
1785 return extract_split_bit_field (op0, bitsize,
1786 bitpos + offset * BITS_PER_UNIT,
1787 unsignedp);
1788 }
1789
1790 if (bitsize == total_bits)
1791 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1792 "mis-aligned access used for structure member");
1793 else
1794 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1795 "mis-aligned access used for structure bitfield");
1796
1797 if (! informed_about_misalignment && warned)
1798 {
1799 informed_about_misalignment = true;
1800 inform (input_location,
1801 "when a volatile object spans multiple type-sized locations,"
1802 " the compiler must choose between using a single mis-aligned access to"
1803 " preserve the volatility, or using multiple aligned accesses to avoid"
1804 " runtime faults; this code may fail at runtime if the hardware does"
1805 " not allow this access");
1806 }
1807 }
1808 }
1809 else
1810 {
1811
1812 /* Get ref to an aligned byte, halfword, or word containing the field.
1813 Adjust BITPOS to be position within a word,
1814 and OFFSET to be the offset of that word.
1815 Then alter OP0 to refer to that word. */
1816 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1817 offset -= (offset % (total_bits / BITS_PER_UNIT));
1818 }
1819
1820 op0 = adjust_address (op0, mode, offset);
1821 }
1822
1823 mode = GET_MODE (op0);
1824
1825 if (BYTES_BIG_ENDIAN)
1826 /* BITPOS is the distance between our msb and that of OP0.
1827 Convert it to the distance from the lsb. */
1828 bitpos = total_bits - bitsize - bitpos;
1829
1830 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1831 We have reduced the big-endian case to the little-endian case. */
1832
1833 if (unsignedp)
1834 {
1835 if (bitpos)
1836 {
1837 /* If the field does not already start at the lsb,
1838 shift it so it does. */
1839 tree amount = build_int_cst (NULL_TREE, bitpos);
1840 /* Maybe propagate the target for the shift. */
1841 /* But not if we will return it--could confuse integrate.c. */
1842 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1843 if (tmode != mode) subtarget = 0;
1844 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1845 }
1846 /* Convert the value to the desired mode. */
1847 if (mode != tmode)
1848 op0 = convert_to_mode (tmode, op0, 1);
1849
1850 /* Unless the msb of the field used to be the msb when we shifted,
1851 mask out the upper bits. */
1852
1853 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1854 return expand_binop (GET_MODE (op0), and_optab, op0,
1855 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1856 target, 1, OPTAB_LIB_WIDEN);
1857 return op0;
1858 }
1859
1860 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1861 then arithmetic-shift its lsb to the lsb of the word. */
1862 op0 = force_reg (mode, op0);
1863 if (mode != tmode)
1864 target = 0;
1865
1866 /* Find the narrowest integer mode that contains the field. */
1867
1868 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1869 mode = GET_MODE_WIDER_MODE (mode))
1870 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1871 {
1872 op0 = convert_to_mode (mode, op0, 0);
1873 break;
1874 }
1875
1876 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1877 {
1878 tree amount
1879 = build_int_cst (NULL_TREE,
1880 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1881 /* Maybe propagate the target for the shift. */
1882 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1883 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1884 }
1885
1886 return expand_shift (RSHIFT_EXPR, mode, op0,
1887 build_int_cst (NULL_TREE,
1888 GET_MODE_BITSIZE (mode) - bitsize),
1889 target, 0);
1890 }
1891 \f
1892 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1893 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1894 complement of that if COMPLEMENT. The mask is truncated if
1895 necessary to the width of mode MODE. The mask is zero-extended if
1896 BITSIZE+BITPOS is too small for MODE. */
1897
1898 static rtx
1899 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1900 {
1901 double_int mask;
1902
1903 mask = double_int_mask (bitsize);
1904 mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1905
1906 if (complement)
1907 mask = double_int_not (mask);
1908
1909 return immed_double_int_const (mask, mode);
1910 }
1911
1912 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1913 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1914
1915 static rtx
1916 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1917 {
1918 double_int val;
1919
1920 val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
1921 val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1922
1923 return immed_double_int_const (val, mode);
1924 }
1925 \f
1926 /* Extract a bit field that is split across two words
1927 and return an RTX for the result.
1928
1929 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1930 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1931 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1932
1933 static rtx
1934 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1935 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1936 {
1937 unsigned int unit;
1938 unsigned int bitsdone = 0;
1939 rtx result = NULL_RTX;
1940 int first = 1;
1941
1942 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1943 much at a time. */
1944 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1945 unit = BITS_PER_WORD;
1946 else
1947 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1948
1949 while (bitsdone < bitsize)
1950 {
1951 unsigned HOST_WIDE_INT thissize;
1952 rtx part, word;
1953 unsigned HOST_WIDE_INT thispos;
1954 unsigned HOST_WIDE_INT offset;
1955
1956 offset = (bitpos + bitsdone) / unit;
1957 thispos = (bitpos + bitsdone) % unit;
1958
1959 /* THISSIZE must not overrun a word boundary. Otherwise,
1960 extract_fixed_bit_field will call us again, and we will mutually
1961 recurse forever. */
1962 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1963 thissize = MIN (thissize, unit - thispos);
1964
1965 /* If OP0 is a register, then handle OFFSET here.
1966
1967 When handling multiword bitfields, extract_bit_field may pass
1968 down a word_mode SUBREG of a larger REG for a bitfield that actually
1969 crosses a word boundary. Thus, for a SUBREG, we must find
1970 the current word starting from the base register. */
1971 if (GET_CODE (op0) == SUBREG)
1972 {
1973 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1974 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1975 GET_MODE (SUBREG_REG (op0)));
1976 offset = 0;
1977 }
1978 else if (REG_P (op0))
1979 {
1980 word = operand_subword_force (op0, offset, GET_MODE (op0));
1981 offset = 0;
1982 }
1983 else
1984 word = op0;
1985
1986 /* Extract the parts in bit-counting order,
1987 whose meaning is determined by BYTES_PER_UNIT.
1988 OFFSET is in UNITs, and UNIT is in bits.
1989 extract_fixed_bit_field wants offset in bytes. */
1990 part = extract_fixed_bit_field (word_mode, word,
1991 offset * unit / BITS_PER_UNIT,
1992 thissize, thispos, 0, 1, false);
1993 bitsdone += thissize;
1994
1995 /* Shift this part into place for the result. */
1996 if (BYTES_BIG_ENDIAN)
1997 {
1998 if (bitsize != bitsdone)
1999 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2000 build_int_cst (NULL_TREE, bitsize - bitsdone),
2001 0, 1);
2002 }
2003 else
2004 {
2005 if (bitsdone != thissize)
2006 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2007 build_int_cst (NULL_TREE,
2008 bitsdone - thissize), 0, 1);
2009 }
2010
2011 if (first)
2012 result = part;
2013 else
2014 /* Combine the parts with bitwise or. This works
2015 because we extracted each part as an unsigned bit field. */
2016 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2017 OPTAB_LIB_WIDEN);
2018
2019 first = 0;
2020 }
2021
2022 /* Unsigned bit field: we are done. */
2023 if (unsignedp)
2024 return result;
2025 /* Signed bit field: sign-extend with two arithmetic shifts. */
2026 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2027 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2028 NULL_RTX, 0);
2029 return expand_shift (RSHIFT_EXPR, word_mode, result,
2030 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2031 NULL_RTX, 0);
2032 }
2033 \f
2034 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2035 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2036 MODE, fill the upper bits with zeros. Fail if the layout of either
2037 mode is unknown (as for CC modes) or if the extraction would involve
2038 unprofitable mode punning. Return the value on success, otherwise
2039 return null.
2040
2041 This is different from gen_lowpart* in these respects:
2042
2043 - the returned value must always be considered an rvalue
2044
2045 - when MODE is wider than SRC_MODE, the extraction involves
2046 a zero extension
2047
2048 - when MODE is smaller than SRC_MODE, the extraction involves
2049 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2050
2051 In other words, this routine performs a computation, whereas the
2052 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2053 operations. */
2054
2055 rtx
2056 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
2057 {
2058 enum machine_mode int_mode, src_int_mode;
2059
2060 if (mode == src_mode)
2061 return src;
2062
2063 if (CONSTANT_P (src))
2064 {
2065 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2066 fails, it will happily create (subreg (symbol_ref)) or similar
2067 invalid SUBREGs. */
2068 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2069 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2070 if (ret)
2071 return ret;
2072
2073 if (GET_MODE (src) == VOIDmode
2074 || !validate_subreg (mode, src_mode, src, byte))
2075 return NULL_RTX;
2076
2077 src = force_reg (GET_MODE (src), src);
2078 return gen_rtx_SUBREG (mode, src, byte);
2079 }
2080
2081 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2082 return NULL_RTX;
2083
2084 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2085 && MODES_TIEABLE_P (mode, src_mode))
2086 {
2087 rtx x = gen_lowpart_common (mode, src);
2088 if (x)
2089 return x;
2090 }
2091
2092 src_int_mode = int_mode_for_mode (src_mode);
2093 int_mode = int_mode_for_mode (mode);
2094 if (src_int_mode == BLKmode || int_mode == BLKmode)
2095 return NULL_RTX;
2096
2097 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2098 return NULL_RTX;
2099 if (!MODES_TIEABLE_P (int_mode, mode))
2100 return NULL_RTX;
2101
2102 src = gen_lowpart (src_int_mode, src);
2103 src = convert_modes (int_mode, src_int_mode, src, true);
2104 src = gen_lowpart (mode, src);
2105 return src;
2106 }
2107 \f
2108 /* Add INC into TARGET. */
2109
2110 void
2111 expand_inc (rtx target, rtx inc)
2112 {
2113 rtx value = expand_binop (GET_MODE (target), add_optab,
2114 target, inc,
2115 target, 0, OPTAB_LIB_WIDEN);
2116 if (value != target)
2117 emit_move_insn (target, value);
2118 }
2119
2120 /* Subtract DEC from TARGET. */
2121
2122 void
2123 expand_dec (rtx target, rtx dec)
2124 {
2125 rtx value = expand_binop (GET_MODE (target), sub_optab,
2126 target, dec,
2127 target, 0, OPTAB_LIB_WIDEN);
2128 if (value != target)
2129 emit_move_insn (target, value);
2130 }
2131 \f
2132 /* Output a shift instruction for expression code CODE,
2133 with SHIFTED being the rtx for the value to shift,
2134 and AMOUNT the tree for the amount to shift by.
2135 Store the result in the rtx TARGET, if that is convenient.
2136 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2137 Return the rtx for where the value is. */
2138
2139 rtx
2140 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2141 tree amount, rtx target, int unsignedp)
2142 {
2143 rtx op1, temp = 0;
2144 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2145 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2146 optab lshift_optab = ashl_optab;
2147 optab rshift_arith_optab = ashr_optab;
2148 optab rshift_uns_optab = lshr_optab;
2149 optab lrotate_optab = rotl_optab;
2150 optab rrotate_optab = rotr_optab;
2151 enum machine_mode op1_mode;
2152 int attempt;
2153 bool speed = optimize_insn_for_speed_p ();
2154
2155 op1 = expand_normal (amount);
2156 op1_mode = GET_MODE (op1);
2157
2158 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2159 shift amount is a vector, use the vector/vector shift patterns. */
2160 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2161 {
2162 lshift_optab = vashl_optab;
2163 rshift_arith_optab = vashr_optab;
2164 rshift_uns_optab = vlshr_optab;
2165 lrotate_optab = vrotl_optab;
2166 rrotate_optab = vrotr_optab;
2167 }
2168
2169 /* Previously detected shift-counts computed by NEGATE_EXPR
2170 and shifted in the other direction; but that does not work
2171 on all machines. */
2172
2173 if (SHIFT_COUNT_TRUNCATED)
2174 {
2175 if (CONST_INT_P (op1)
2176 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2177 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2178 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2179 % GET_MODE_BITSIZE (mode));
2180 else if (GET_CODE (op1) == SUBREG
2181 && subreg_lowpart_p (op1)
2182 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2183 op1 = SUBREG_REG (op1);
2184 }
2185
2186 if (op1 == const0_rtx)
2187 return shifted;
2188
2189 /* Check whether its cheaper to implement a left shift by a constant
2190 bit count by a sequence of additions. */
2191 if (code == LSHIFT_EXPR
2192 && CONST_INT_P (op1)
2193 && INTVAL (op1) > 0
2194 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2195 && INTVAL (op1) < MAX_BITS_PER_WORD
2196 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2197 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2198 {
2199 int i;
2200 for (i = 0; i < INTVAL (op1); i++)
2201 {
2202 temp = force_reg (mode, shifted);
2203 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2204 unsignedp, OPTAB_LIB_WIDEN);
2205 }
2206 return shifted;
2207 }
2208
2209 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2210 {
2211 enum optab_methods methods;
2212
2213 if (attempt == 0)
2214 methods = OPTAB_DIRECT;
2215 else if (attempt == 1)
2216 methods = OPTAB_WIDEN;
2217 else
2218 methods = OPTAB_LIB_WIDEN;
2219
2220 if (rotate)
2221 {
2222 /* Widening does not work for rotation. */
2223 if (methods == OPTAB_WIDEN)
2224 continue;
2225 else if (methods == OPTAB_LIB_WIDEN)
2226 {
2227 /* If we have been unable to open-code this by a rotation,
2228 do it as the IOR of two shifts. I.e., to rotate A
2229 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2230 where C is the bitsize of A.
2231
2232 It is theoretically possible that the target machine might
2233 not be able to perform either shift and hence we would
2234 be making two libcalls rather than just the one for the
2235 shift (similarly if IOR could not be done). We will allow
2236 this extremely unlikely lossage to avoid complicating the
2237 code below. */
2238
2239 rtx subtarget = target == shifted ? 0 : target;
2240 tree new_amount, other_amount;
2241 rtx temp1;
2242 tree type = TREE_TYPE (amount);
2243 if (GET_MODE (op1) != TYPE_MODE (type)
2244 && GET_MODE (op1) != VOIDmode)
2245 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2246 new_amount = make_tree (type, op1);
2247 other_amount
2248 = fold_build2 (MINUS_EXPR, type,
2249 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2250 new_amount);
2251
2252 shifted = force_reg (mode, shifted);
2253
2254 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2255 mode, shifted, new_amount, 0, 1);
2256 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2257 mode, shifted, other_amount, subtarget, 1);
2258 return expand_binop (mode, ior_optab, temp, temp1, target,
2259 unsignedp, methods);
2260 }
2261
2262 temp = expand_binop (mode,
2263 left ? lrotate_optab : rrotate_optab,
2264 shifted, op1, target, unsignedp, methods);
2265 }
2266 else if (unsignedp)
2267 temp = expand_binop (mode,
2268 left ? lshift_optab : rshift_uns_optab,
2269 shifted, op1, target, unsignedp, methods);
2270
2271 /* Do arithmetic shifts.
2272 Also, if we are going to widen the operand, we can just as well
2273 use an arithmetic right-shift instead of a logical one. */
2274 if (temp == 0 && ! rotate
2275 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2276 {
2277 enum optab_methods methods1 = methods;
2278
2279 /* If trying to widen a log shift to an arithmetic shift,
2280 don't accept an arithmetic shift of the same size. */
2281 if (unsignedp)
2282 methods1 = OPTAB_MUST_WIDEN;
2283
2284 /* Arithmetic shift */
2285
2286 temp = expand_binop (mode,
2287 left ? lshift_optab : rshift_arith_optab,
2288 shifted, op1, target, unsignedp, methods1);
2289 }
2290
2291 /* We used to try extzv here for logical right shifts, but that was
2292 only useful for one machine, the VAX, and caused poor code
2293 generation there for lshrdi3, so the code was deleted and a
2294 define_expand for lshrsi3 was added to vax.md. */
2295 }
2296
2297 gcc_assert (temp);
2298 return temp;
2299 }
2300 \f
2301 /* Indicates the type of fixup needed after a constant multiplication.
2302 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2303 the result should be negated, and ADD_VARIANT means that the
2304 multiplicand should be added to the result. */
2305 enum mult_variant {basic_variant, negate_variant, add_variant};
2306
2307 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2308 const struct mult_cost *, enum machine_mode mode);
2309 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2310 struct algorithm *, enum mult_variant *, int);
2311 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2312 const struct algorithm *, enum mult_variant);
2313 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2314 int, rtx *, int *, int *);
2315 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2316 static rtx extract_high_half (enum machine_mode, rtx);
2317 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2318 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2319 int, int);
2320 /* Compute and return the best algorithm for multiplying by T.
2321 The algorithm must cost less than cost_limit
2322 If retval.cost >= COST_LIMIT, no algorithm was found and all
2323 other field of the returned struct are undefined.
2324 MODE is the machine mode of the multiplication. */
2325
2326 static void
2327 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2328 const struct mult_cost *cost_limit, enum machine_mode mode)
2329 {
2330 int m;
2331 struct algorithm *alg_in, *best_alg;
2332 struct mult_cost best_cost;
2333 struct mult_cost new_limit;
2334 int op_cost, op_latency;
2335 unsigned HOST_WIDE_INT orig_t = t;
2336 unsigned HOST_WIDE_INT q;
2337 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2338 int hash_index;
2339 bool cache_hit = false;
2340 enum alg_code cache_alg = alg_zero;
2341 bool speed = optimize_insn_for_speed_p ();
2342
2343 /* Indicate that no algorithm is yet found. If no algorithm
2344 is found, this value will be returned and indicate failure. */
2345 alg_out->cost.cost = cost_limit->cost + 1;
2346 alg_out->cost.latency = cost_limit->latency + 1;
2347
2348 if (cost_limit->cost < 0
2349 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2350 return;
2351
2352 /* Restrict the bits of "t" to the multiplication's mode. */
2353 t &= GET_MODE_MASK (mode);
2354
2355 /* t == 1 can be done in zero cost. */
2356 if (t == 1)
2357 {
2358 alg_out->ops = 1;
2359 alg_out->cost.cost = 0;
2360 alg_out->cost.latency = 0;
2361 alg_out->op[0] = alg_m;
2362 return;
2363 }
2364
2365 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2366 fail now. */
2367 if (t == 0)
2368 {
2369 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2370 return;
2371 else
2372 {
2373 alg_out->ops = 1;
2374 alg_out->cost.cost = zero_cost[speed];
2375 alg_out->cost.latency = zero_cost[speed];
2376 alg_out->op[0] = alg_zero;
2377 return;
2378 }
2379 }
2380
2381 /* We'll be needing a couple extra algorithm structures now. */
2382
2383 alg_in = XALLOCA (struct algorithm);
2384 best_alg = XALLOCA (struct algorithm);
2385 best_cost = *cost_limit;
2386
2387 /* Compute the hash index. */
2388 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2389
2390 /* See if we already know what to do for T. */
2391 if (alg_hash[hash_index].t == t
2392 && alg_hash[hash_index].mode == mode
2393 && alg_hash[hash_index].mode == mode
2394 && alg_hash[hash_index].speed == speed
2395 && alg_hash[hash_index].alg != alg_unknown)
2396 {
2397 cache_alg = alg_hash[hash_index].alg;
2398
2399 if (cache_alg == alg_impossible)
2400 {
2401 /* The cache tells us that it's impossible to synthesize
2402 multiplication by T within alg_hash[hash_index].cost. */
2403 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2404 /* COST_LIMIT is at least as restrictive as the one
2405 recorded in the hash table, in which case we have no
2406 hope of synthesizing a multiplication. Just
2407 return. */
2408 return;
2409
2410 /* If we get here, COST_LIMIT is less restrictive than the
2411 one recorded in the hash table, so we may be able to
2412 synthesize a multiplication. Proceed as if we didn't
2413 have the cache entry. */
2414 }
2415 else
2416 {
2417 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2418 /* The cached algorithm shows that this multiplication
2419 requires more cost than COST_LIMIT. Just return. This
2420 way, we don't clobber this cache entry with
2421 alg_impossible but retain useful information. */
2422 return;
2423
2424 cache_hit = true;
2425
2426 switch (cache_alg)
2427 {
2428 case alg_shift:
2429 goto do_alg_shift;
2430
2431 case alg_add_t_m2:
2432 case alg_sub_t_m2:
2433 goto do_alg_addsub_t_m2;
2434
2435 case alg_add_factor:
2436 case alg_sub_factor:
2437 goto do_alg_addsub_factor;
2438
2439 case alg_add_t2_m:
2440 goto do_alg_add_t2_m;
2441
2442 case alg_sub_t2_m:
2443 goto do_alg_sub_t2_m;
2444
2445 default:
2446 gcc_unreachable ();
2447 }
2448 }
2449 }
2450
2451 /* If we have a group of zero bits at the low-order part of T, try
2452 multiplying by the remaining bits and then doing a shift. */
2453
2454 if ((t & 1) == 0)
2455 {
2456 do_alg_shift:
2457 m = floor_log2 (t & -t); /* m = number of low zero bits */
2458 if (m < maxm)
2459 {
2460 q = t >> m;
2461 /* The function expand_shift will choose between a shift and
2462 a sequence of additions, so the observed cost is given as
2463 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2464 op_cost = m * add_cost[speed][mode];
2465 if (shift_cost[speed][mode][m] < op_cost)
2466 op_cost = shift_cost[speed][mode][m];
2467 new_limit.cost = best_cost.cost - op_cost;
2468 new_limit.latency = best_cost.latency - op_cost;
2469 synth_mult (alg_in, q, &new_limit, mode);
2470
2471 alg_in->cost.cost += op_cost;
2472 alg_in->cost.latency += op_cost;
2473 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2474 {
2475 struct algorithm *x;
2476 best_cost = alg_in->cost;
2477 x = alg_in, alg_in = best_alg, best_alg = x;
2478 best_alg->log[best_alg->ops] = m;
2479 best_alg->op[best_alg->ops] = alg_shift;
2480 }
2481
2482 /* See if treating ORIG_T as a signed number yields a better
2483 sequence. Try this sequence only for a negative ORIG_T
2484 as it would be useless for a non-negative ORIG_T. */
2485 if ((HOST_WIDE_INT) orig_t < 0)
2486 {
2487 /* Shift ORIG_T as follows because a right shift of a
2488 negative-valued signed type is implementation
2489 defined. */
2490 q = ~(~orig_t >> m);
2491 /* The function expand_shift will choose between a shift
2492 and a sequence of additions, so the observed cost is
2493 given as MIN (m * add_cost[speed][mode],
2494 shift_cost[speed][mode][m]). */
2495 op_cost = m * add_cost[speed][mode];
2496 if (shift_cost[speed][mode][m] < op_cost)
2497 op_cost = shift_cost[speed][mode][m];
2498 new_limit.cost = best_cost.cost - op_cost;
2499 new_limit.latency = best_cost.latency - op_cost;
2500 synth_mult (alg_in, q, &new_limit, mode);
2501
2502 alg_in->cost.cost += op_cost;
2503 alg_in->cost.latency += op_cost;
2504 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2505 {
2506 struct algorithm *x;
2507 best_cost = alg_in->cost;
2508 x = alg_in, alg_in = best_alg, best_alg = x;
2509 best_alg->log[best_alg->ops] = m;
2510 best_alg->op[best_alg->ops] = alg_shift;
2511 }
2512 }
2513 }
2514 if (cache_hit)
2515 goto done;
2516 }
2517
2518 /* If we have an odd number, add or subtract one. */
2519 if ((t & 1) != 0)
2520 {
2521 unsigned HOST_WIDE_INT w;
2522
2523 do_alg_addsub_t_m2:
2524 for (w = 1; (w & t) != 0; w <<= 1)
2525 ;
2526 /* If T was -1, then W will be zero after the loop. This is another
2527 case where T ends with ...111. Handling this with (T + 1) and
2528 subtract 1 produces slightly better code and results in algorithm
2529 selection much faster than treating it like the ...0111 case
2530 below. */
2531 if (w == 0
2532 || (w > 2
2533 /* Reject the case where t is 3.
2534 Thus we prefer addition in that case. */
2535 && t != 3))
2536 {
2537 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2538
2539 op_cost = add_cost[speed][mode];
2540 new_limit.cost = best_cost.cost - op_cost;
2541 new_limit.latency = best_cost.latency - op_cost;
2542 synth_mult (alg_in, t + 1, &new_limit, mode);
2543
2544 alg_in->cost.cost += op_cost;
2545 alg_in->cost.latency += op_cost;
2546 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2547 {
2548 struct algorithm *x;
2549 best_cost = alg_in->cost;
2550 x = alg_in, alg_in = best_alg, best_alg = x;
2551 best_alg->log[best_alg->ops] = 0;
2552 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2553 }
2554 }
2555 else
2556 {
2557 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2558
2559 op_cost = add_cost[speed][mode];
2560 new_limit.cost = best_cost.cost - op_cost;
2561 new_limit.latency = best_cost.latency - op_cost;
2562 synth_mult (alg_in, t - 1, &new_limit, mode);
2563
2564 alg_in->cost.cost += op_cost;
2565 alg_in->cost.latency += op_cost;
2566 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2567 {
2568 struct algorithm *x;
2569 best_cost = alg_in->cost;
2570 x = alg_in, alg_in = best_alg, best_alg = x;
2571 best_alg->log[best_alg->ops] = 0;
2572 best_alg->op[best_alg->ops] = alg_add_t_m2;
2573 }
2574 }
2575
2576 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2577 quickly with a - a * n for some appropriate constant n. */
2578 m = exact_log2 (-orig_t + 1);
2579 if (m >= 0 && m < maxm)
2580 {
2581 op_cost = shiftsub1_cost[speed][mode][m];
2582 new_limit.cost = best_cost.cost - op_cost;
2583 new_limit.latency = best_cost.latency - op_cost;
2584 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2585
2586 alg_in->cost.cost += op_cost;
2587 alg_in->cost.latency += op_cost;
2588 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2589 {
2590 struct algorithm *x;
2591 best_cost = alg_in->cost;
2592 x = alg_in, alg_in = best_alg, best_alg = x;
2593 best_alg->log[best_alg->ops] = m;
2594 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2595 }
2596 }
2597
2598 if (cache_hit)
2599 goto done;
2600 }
2601
2602 /* Look for factors of t of the form
2603 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2604 If we find such a factor, we can multiply by t using an algorithm that
2605 multiplies by q, shift the result by m and add/subtract it to itself.
2606
2607 We search for large factors first and loop down, even if large factors
2608 are less probable than small; if we find a large factor we will find a
2609 good sequence quickly, and therefore be able to prune (by decreasing
2610 COST_LIMIT) the search. */
2611
2612 do_alg_addsub_factor:
2613 for (m = floor_log2 (t - 1); m >= 2; m--)
2614 {
2615 unsigned HOST_WIDE_INT d;
2616
2617 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2618 if (t % d == 0 && t > d && m < maxm
2619 && (!cache_hit || cache_alg == alg_add_factor))
2620 {
2621 /* If the target has a cheap shift-and-add instruction use
2622 that in preference to a shift insn followed by an add insn.
2623 Assume that the shift-and-add is "atomic" with a latency
2624 equal to its cost, otherwise assume that on superscalar
2625 hardware the shift may be executed concurrently with the
2626 earlier steps in the algorithm. */
2627 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2628 if (shiftadd_cost[speed][mode][m] < op_cost)
2629 {
2630 op_cost = shiftadd_cost[speed][mode][m];
2631 op_latency = op_cost;
2632 }
2633 else
2634 op_latency = add_cost[speed][mode];
2635
2636 new_limit.cost = best_cost.cost - op_cost;
2637 new_limit.latency = best_cost.latency - op_latency;
2638 synth_mult (alg_in, t / d, &new_limit, mode);
2639
2640 alg_in->cost.cost += op_cost;
2641 alg_in->cost.latency += op_latency;
2642 if (alg_in->cost.latency < op_cost)
2643 alg_in->cost.latency = op_cost;
2644 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2645 {
2646 struct algorithm *x;
2647 best_cost = alg_in->cost;
2648 x = alg_in, alg_in = best_alg, best_alg = x;
2649 best_alg->log[best_alg->ops] = m;
2650 best_alg->op[best_alg->ops] = alg_add_factor;
2651 }
2652 /* Other factors will have been taken care of in the recursion. */
2653 break;
2654 }
2655
2656 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2657 if (t % d == 0 && t > d && m < maxm
2658 && (!cache_hit || cache_alg == alg_sub_factor))
2659 {
2660 /* If the target has a cheap shift-and-subtract insn use
2661 that in preference to a shift insn followed by a sub insn.
2662 Assume that the shift-and-sub is "atomic" with a latency
2663 equal to it's cost, otherwise assume that on superscalar
2664 hardware the shift may be executed concurrently with the
2665 earlier steps in the algorithm. */
2666 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2667 if (shiftsub0_cost[speed][mode][m] < op_cost)
2668 {
2669 op_cost = shiftsub0_cost[speed][mode][m];
2670 op_latency = op_cost;
2671 }
2672 else
2673 op_latency = add_cost[speed][mode];
2674
2675 new_limit.cost = best_cost.cost - op_cost;
2676 new_limit.latency = best_cost.latency - op_latency;
2677 synth_mult (alg_in, t / d, &new_limit, mode);
2678
2679 alg_in->cost.cost += op_cost;
2680 alg_in->cost.latency += op_latency;
2681 if (alg_in->cost.latency < op_cost)
2682 alg_in->cost.latency = op_cost;
2683 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2684 {
2685 struct algorithm *x;
2686 best_cost = alg_in->cost;
2687 x = alg_in, alg_in = best_alg, best_alg = x;
2688 best_alg->log[best_alg->ops] = m;
2689 best_alg->op[best_alg->ops] = alg_sub_factor;
2690 }
2691 break;
2692 }
2693 }
2694 if (cache_hit)
2695 goto done;
2696
2697 /* Try shift-and-add (load effective address) instructions,
2698 i.e. do a*3, a*5, a*9. */
2699 if ((t & 1) != 0)
2700 {
2701 do_alg_add_t2_m:
2702 q = t - 1;
2703 q = q & -q;
2704 m = exact_log2 (q);
2705 if (m >= 0 && m < maxm)
2706 {
2707 op_cost = shiftadd_cost[speed][mode][m];
2708 new_limit.cost = best_cost.cost - op_cost;
2709 new_limit.latency = best_cost.latency - op_cost;
2710 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2711
2712 alg_in->cost.cost += op_cost;
2713 alg_in->cost.latency += op_cost;
2714 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2715 {
2716 struct algorithm *x;
2717 best_cost = alg_in->cost;
2718 x = alg_in, alg_in = best_alg, best_alg = x;
2719 best_alg->log[best_alg->ops] = m;
2720 best_alg->op[best_alg->ops] = alg_add_t2_m;
2721 }
2722 }
2723 if (cache_hit)
2724 goto done;
2725
2726 do_alg_sub_t2_m:
2727 q = t + 1;
2728 q = q & -q;
2729 m = exact_log2 (q);
2730 if (m >= 0 && m < maxm)
2731 {
2732 op_cost = shiftsub0_cost[speed][mode][m];
2733 new_limit.cost = best_cost.cost - op_cost;
2734 new_limit.latency = best_cost.latency - op_cost;
2735 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2736
2737 alg_in->cost.cost += op_cost;
2738 alg_in->cost.latency += op_cost;
2739 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2740 {
2741 struct algorithm *x;
2742 best_cost = alg_in->cost;
2743 x = alg_in, alg_in = best_alg, best_alg = x;
2744 best_alg->log[best_alg->ops] = m;
2745 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2746 }
2747 }
2748 if (cache_hit)
2749 goto done;
2750 }
2751
2752 done:
2753 /* If best_cost has not decreased, we have not found any algorithm. */
2754 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2755 {
2756 /* We failed to find an algorithm. Record alg_impossible for
2757 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2758 we are asked to find an algorithm for T within the same or
2759 lower COST_LIMIT, we can immediately return to the
2760 caller. */
2761 alg_hash[hash_index].t = t;
2762 alg_hash[hash_index].mode = mode;
2763 alg_hash[hash_index].speed = speed;
2764 alg_hash[hash_index].alg = alg_impossible;
2765 alg_hash[hash_index].cost = *cost_limit;
2766 return;
2767 }
2768
2769 /* Cache the result. */
2770 if (!cache_hit)
2771 {
2772 alg_hash[hash_index].t = t;
2773 alg_hash[hash_index].mode = mode;
2774 alg_hash[hash_index].speed = speed;
2775 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2776 alg_hash[hash_index].cost.cost = best_cost.cost;
2777 alg_hash[hash_index].cost.latency = best_cost.latency;
2778 }
2779
2780 /* If we are getting a too long sequence for `struct algorithm'
2781 to record, make this search fail. */
2782 if (best_alg->ops == MAX_BITS_PER_WORD)
2783 return;
2784
2785 /* Copy the algorithm from temporary space to the space at alg_out.
2786 We avoid using structure assignment because the majority of
2787 best_alg is normally undefined, and this is a critical function. */
2788 alg_out->ops = best_alg->ops + 1;
2789 alg_out->cost = best_cost;
2790 memcpy (alg_out->op, best_alg->op,
2791 alg_out->ops * sizeof *alg_out->op);
2792 memcpy (alg_out->log, best_alg->log,
2793 alg_out->ops * sizeof *alg_out->log);
2794 }
2795 \f
2796 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2797 Try three variations:
2798
2799 - a shift/add sequence based on VAL itself
2800 - a shift/add sequence based on -VAL, followed by a negation
2801 - a shift/add sequence based on VAL - 1, followed by an addition.
2802
2803 Return true if the cheapest of these cost less than MULT_COST,
2804 describing the algorithm in *ALG and final fixup in *VARIANT. */
2805
2806 static bool
2807 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2808 struct algorithm *alg, enum mult_variant *variant,
2809 int mult_cost)
2810 {
2811 struct algorithm alg2;
2812 struct mult_cost limit;
2813 int op_cost;
2814 bool speed = optimize_insn_for_speed_p ();
2815
2816 /* Fail quickly for impossible bounds. */
2817 if (mult_cost < 0)
2818 return false;
2819
2820 /* Ensure that mult_cost provides a reasonable upper bound.
2821 Any constant multiplication can be performed with less
2822 than 2 * bits additions. */
2823 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2824 if (mult_cost > op_cost)
2825 mult_cost = op_cost;
2826
2827 *variant = basic_variant;
2828 limit.cost = mult_cost;
2829 limit.latency = mult_cost;
2830 synth_mult (alg, val, &limit, mode);
2831
2832 /* This works only if the inverted value actually fits in an
2833 `unsigned int' */
2834 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2835 {
2836 op_cost = neg_cost[speed][mode];
2837 if (MULT_COST_LESS (&alg->cost, mult_cost))
2838 {
2839 limit.cost = alg->cost.cost - op_cost;
2840 limit.latency = alg->cost.latency - op_cost;
2841 }
2842 else
2843 {
2844 limit.cost = mult_cost - op_cost;
2845 limit.latency = mult_cost - op_cost;
2846 }
2847
2848 synth_mult (&alg2, -val, &limit, mode);
2849 alg2.cost.cost += op_cost;
2850 alg2.cost.latency += op_cost;
2851 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2852 *alg = alg2, *variant = negate_variant;
2853 }
2854
2855 /* This proves very useful for division-by-constant. */
2856 op_cost = add_cost[speed][mode];
2857 if (MULT_COST_LESS (&alg->cost, mult_cost))
2858 {
2859 limit.cost = alg->cost.cost - op_cost;
2860 limit.latency = alg->cost.latency - op_cost;
2861 }
2862 else
2863 {
2864 limit.cost = mult_cost - op_cost;
2865 limit.latency = mult_cost - op_cost;
2866 }
2867
2868 synth_mult (&alg2, val - 1, &limit, mode);
2869 alg2.cost.cost += op_cost;
2870 alg2.cost.latency += op_cost;
2871 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2872 *alg = alg2, *variant = add_variant;
2873
2874 return MULT_COST_LESS (&alg->cost, mult_cost);
2875 }
2876
2877 /* A subroutine of expand_mult, used for constant multiplications.
2878 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2879 convenient. Use the shift/add sequence described by ALG and apply
2880 the final fixup specified by VARIANT. */
2881
2882 static rtx
2883 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2884 rtx target, const struct algorithm *alg,
2885 enum mult_variant variant)
2886 {
2887 HOST_WIDE_INT val_so_far;
2888 rtx insn, accum, tem;
2889 int opno;
2890 enum machine_mode nmode;
2891
2892 /* Avoid referencing memory over and over and invalid sharing
2893 on SUBREGs. */
2894 op0 = force_reg (mode, op0);
2895
2896 /* ACCUM starts out either as OP0 or as a zero, depending on
2897 the first operation. */
2898
2899 if (alg->op[0] == alg_zero)
2900 {
2901 accum = copy_to_mode_reg (mode, const0_rtx);
2902 val_so_far = 0;
2903 }
2904 else if (alg->op[0] == alg_m)
2905 {
2906 accum = copy_to_mode_reg (mode, op0);
2907 val_so_far = 1;
2908 }
2909 else
2910 gcc_unreachable ();
2911
2912 for (opno = 1; opno < alg->ops; opno++)
2913 {
2914 int log = alg->log[opno];
2915 rtx shift_subtarget = optimize ? 0 : accum;
2916 rtx add_target
2917 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2918 && !optimize)
2919 ? target : 0;
2920 rtx accum_target = optimize ? 0 : accum;
2921
2922 switch (alg->op[opno])
2923 {
2924 case alg_shift:
2925 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2926 build_int_cst (NULL_TREE, log),
2927 NULL_RTX, 0);
2928 /* REG_EQUAL note will be attached to the following insn. */
2929 emit_move_insn (accum, tem);
2930 val_so_far <<= log;
2931 break;
2932
2933 case alg_add_t_m2:
2934 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2935 build_int_cst (NULL_TREE, log),
2936 NULL_RTX, 0);
2937 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2938 add_target ? add_target : accum_target);
2939 val_so_far += (HOST_WIDE_INT) 1 << log;
2940 break;
2941
2942 case alg_sub_t_m2:
2943 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2944 build_int_cst (NULL_TREE, log),
2945 NULL_RTX, 0);
2946 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2947 add_target ? add_target : accum_target);
2948 val_so_far -= (HOST_WIDE_INT) 1 << log;
2949 break;
2950
2951 case alg_add_t2_m:
2952 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2953 build_int_cst (NULL_TREE, log),
2954 shift_subtarget,
2955 0);
2956 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2957 add_target ? add_target : accum_target);
2958 val_so_far = (val_so_far << log) + 1;
2959 break;
2960
2961 case alg_sub_t2_m:
2962 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2963 build_int_cst (NULL_TREE, log),
2964 shift_subtarget, 0);
2965 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2966 add_target ? add_target : accum_target);
2967 val_so_far = (val_so_far << log) - 1;
2968 break;
2969
2970 case alg_add_factor:
2971 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2972 build_int_cst (NULL_TREE, log),
2973 NULL_RTX, 0);
2974 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2975 add_target ? add_target : accum_target);
2976 val_so_far += val_so_far << log;
2977 break;
2978
2979 case alg_sub_factor:
2980 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2981 build_int_cst (NULL_TREE, log),
2982 NULL_RTX, 0);
2983 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2984 (add_target
2985 ? add_target : (optimize ? 0 : tem)));
2986 val_so_far = (val_so_far << log) - val_so_far;
2987 break;
2988
2989 default:
2990 gcc_unreachable ();
2991 }
2992
2993 /* Write a REG_EQUAL note on the last insn so that we can cse
2994 multiplication sequences. Note that if ACCUM is a SUBREG,
2995 we've set the inner register and must properly indicate
2996 that. */
2997
2998 tem = op0, nmode = mode;
2999 if (GET_CODE (accum) == SUBREG)
3000 {
3001 nmode = GET_MODE (SUBREG_REG (accum));
3002 tem = gen_lowpart (nmode, op0);
3003 }
3004
3005 insn = get_last_insn ();
3006 set_unique_reg_note (insn, REG_EQUAL,
3007 gen_rtx_MULT (nmode, tem,
3008 GEN_INT (val_so_far)));
3009 }
3010
3011 if (variant == negate_variant)
3012 {
3013 val_so_far = -val_so_far;
3014 accum = expand_unop (mode, neg_optab, accum, target, 0);
3015 }
3016 else if (variant == add_variant)
3017 {
3018 val_so_far = val_so_far + 1;
3019 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3020 }
3021
3022 /* Compare only the bits of val and val_so_far that are significant
3023 in the result mode, to avoid sign-/zero-extension confusion. */
3024 val &= GET_MODE_MASK (mode);
3025 val_so_far &= GET_MODE_MASK (mode);
3026 gcc_assert (val == val_so_far);
3027
3028 return accum;
3029 }
3030
3031 /* Perform a multiplication and return an rtx for the result.
3032 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3033 TARGET is a suggestion for where to store the result (an rtx).
3034
3035 We check specially for a constant integer as OP1.
3036 If you want this check for OP0 as well, then before calling
3037 you should swap the two operands if OP0 would be constant. */
3038
3039 rtx
3040 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3041 int unsignedp)
3042 {
3043 enum mult_variant variant;
3044 struct algorithm algorithm;
3045 int max_cost;
3046 bool speed = optimize_insn_for_speed_p ();
3047
3048 /* Handling const0_rtx here allows us to use zero as a rogue value for
3049 coeff below. */
3050 if (op1 == const0_rtx)
3051 return const0_rtx;
3052 if (op1 == const1_rtx)
3053 return op0;
3054 if (op1 == constm1_rtx)
3055 return expand_unop (mode,
3056 GET_MODE_CLASS (mode) == MODE_INT
3057 && !unsignedp && flag_trapv
3058 ? negv_optab : neg_optab,
3059 op0, target, 0);
3060
3061 /* These are the operations that are potentially turned into a sequence
3062 of shifts and additions. */
3063 if (SCALAR_INT_MODE_P (mode)
3064 && (unsignedp || !flag_trapv))
3065 {
3066 HOST_WIDE_INT coeff = 0;
3067 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3068
3069 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3070 less than or equal in size to `unsigned int' this doesn't matter.
3071 If the mode is larger than `unsigned int', then synth_mult works
3072 only if the constant value exactly fits in an `unsigned int' without
3073 any truncation. This means that multiplying by negative values does
3074 not work; results are off by 2^32 on a 32 bit machine. */
3075
3076 if (CONST_INT_P (op1))
3077 {
3078 /* Attempt to handle multiplication of DImode values by negative
3079 coefficients, by performing the multiplication by a positive
3080 multiplier and then inverting the result. */
3081 if (INTVAL (op1) < 0
3082 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3083 {
3084 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3085 result is interpreted as an unsigned coefficient.
3086 Exclude cost of op0 from max_cost to match the cost
3087 calculation of the synth_mult. */
3088 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3089 - neg_cost[speed][mode];
3090 if (max_cost > 0
3091 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3092 &variant, max_cost))
3093 {
3094 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3095 NULL_RTX, &algorithm,
3096 variant);
3097 return expand_unop (mode, neg_optab, temp, target, 0);
3098 }
3099 }
3100 else coeff = INTVAL (op1);
3101 }
3102 else if (GET_CODE (op1) == CONST_DOUBLE)
3103 {
3104 /* If we are multiplying in DImode, it may still be a win
3105 to try to work with shifts and adds. */
3106 if (CONST_DOUBLE_HIGH (op1) == 0
3107 && CONST_DOUBLE_LOW (op1) > 0)
3108 coeff = CONST_DOUBLE_LOW (op1);
3109 else if (CONST_DOUBLE_LOW (op1) == 0
3110 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3111 {
3112 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3113 + HOST_BITS_PER_WIDE_INT;
3114 return expand_shift (LSHIFT_EXPR, mode, op0,
3115 build_int_cst (NULL_TREE, shift),
3116 target, unsignedp);
3117 }
3118 }
3119
3120 /* We used to test optimize here, on the grounds that it's better to
3121 produce a smaller program when -O is not used. But this causes
3122 such a terrible slowdown sometimes that it seems better to always
3123 use synth_mult. */
3124 if (coeff != 0)
3125 {
3126 /* Special case powers of two. */
3127 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3128 return expand_shift (LSHIFT_EXPR, mode, op0,
3129 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3130 target, unsignedp);
3131
3132 /* Exclude cost of op0 from max_cost to match the cost
3133 calculation of the synth_mult. */
3134 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3135 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3136 max_cost))
3137 return expand_mult_const (mode, op0, coeff, target,
3138 &algorithm, variant);
3139 }
3140 }
3141
3142 if (GET_CODE (op0) == CONST_DOUBLE)
3143 {
3144 rtx temp = op0;
3145 op0 = op1;
3146 op1 = temp;
3147 }
3148
3149 /* Expand x*2.0 as x+x. */
3150 if (GET_CODE (op1) == CONST_DOUBLE
3151 && SCALAR_FLOAT_MODE_P (mode))
3152 {
3153 REAL_VALUE_TYPE d;
3154 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3155
3156 if (REAL_VALUES_EQUAL (d, dconst2))
3157 {
3158 op0 = force_reg (GET_MODE (op0), op0);
3159 return expand_binop (mode, add_optab, op0, op0,
3160 target, unsignedp, OPTAB_LIB_WIDEN);
3161 }
3162 }
3163
3164 /* This used to use umul_optab if unsigned, but for non-widening multiply
3165 there is no difference between signed and unsigned. */
3166 op0 = expand_binop (mode,
3167 ! unsignedp
3168 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3169 ? smulv_optab : smul_optab,
3170 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3171 gcc_assert (op0);
3172 return op0;
3173 }
3174
3175 /* Perform a widening multiplication and return an rtx for the result.
3176 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3177 TARGET is a suggestion for where to store the result (an rtx).
3178 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3179 or smul_widen_optab.
3180
3181 We check specially for a constant integer as OP1, comparing the
3182 cost of a widening multiply against the cost of a sequence of shifts
3183 and adds. */
3184
3185 rtx
3186 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3187 int unsignedp, optab this_optab)
3188 {
3189 bool speed = optimize_insn_for_speed_p ();
3190 rtx cop1;
3191
3192 if (CONST_INT_P (op1)
3193 && GET_MODE (op0) != VOIDmode
3194 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3195 this_optab == umul_widen_optab))
3196 && CONST_INT_P (cop1)
3197 && (INTVAL (cop1) >= 0
3198 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT))
3199 {
3200 HOST_WIDE_INT coeff = INTVAL (cop1);
3201 int max_cost;
3202 enum mult_variant variant;
3203 struct algorithm algorithm;
3204
3205 /* Special case powers of two. */
3206 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3207 {
3208 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3209 return expand_shift (LSHIFT_EXPR, mode, op0,
3210 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3211 target, unsignedp);
3212 }
3213
3214 /* Exclude cost of op0 from max_cost to match the cost
3215 calculation of the synth_mult. */
3216 max_cost = mul_widen_cost[speed][mode];
3217 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3218 max_cost))
3219 {
3220 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3221 return expand_mult_const (mode, op0, coeff, target,
3222 &algorithm, variant);
3223 }
3224 }
3225 return expand_binop (mode, this_optab, op0, op1, target,
3226 unsignedp, OPTAB_LIB_WIDEN);
3227 }
3228 \f
3229 /* Return the smallest n such that 2**n >= X. */
3230
3231 int
3232 ceil_log2 (unsigned HOST_WIDE_INT x)
3233 {
3234 return floor_log2 (x - 1) + 1;
3235 }
3236
3237 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3238 replace division by D, and put the least significant N bits of the result
3239 in *MULTIPLIER_PTR and return the most significant bit.
3240
3241 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3242 needed precision is in PRECISION (should be <= N).
3243
3244 PRECISION should be as small as possible so this function can choose
3245 multiplier more freely.
3246
3247 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3248 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3249
3250 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3251 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3252
3253 static
3254 unsigned HOST_WIDE_INT
3255 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3256 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3257 {
3258 HOST_WIDE_INT mhigh_hi, mlow_hi;
3259 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3260 int lgup, post_shift;
3261 int pow, pow2;
3262 unsigned HOST_WIDE_INT nl, dummy1;
3263 HOST_WIDE_INT nh, dummy2;
3264
3265 /* lgup = ceil(log2(divisor)); */
3266 lgup = ceil_log2 (d);
3267
3268 gcc_assert (lgup <= n);
3269
3270 pow = n + lgup;
3271 pow2 = n + lgup - precision;
3272
3273 /* We could handle this with some effort, but this case is much
3274 better handled directly with a scc insn, so rely on caller using
3275 that. */
3276 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3277
3278 /* mlow = 2^(N + lgup)/d */
3279 if (pow >= HOST_BITS_PER_WIDE_INT)
3280 {
3281 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3282 nl = 0;
3283 }
3284 else
3285 {
3286 nh = 0;
3287 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3288 }
3289 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3290 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3291
3292 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3293 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3294 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3295 else
3296 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3297 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3298 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3299
3300 gcc_assert (!mhigh_hi || nh - d < d);
3301 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3302 /* Assert that mlow < mhigh. */
3303 gcc_assert (mlow_hi < mhigh_hi
3304 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3305
3306 /* If precision == N, then mlow, mhigh exceed 2^N
3307 (but they do not exceed 2^(N+1)). */
3308
3309 /* Reduce to lowest terms. */
3310 for (post_shift = lgup; post_shift > 0; post_shift--)
3311 {
3312 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3313 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3314 if (ml_lo >= mh_lo)
3315 break;
3316
3317 mlow_hi = 0;
3318 mlow_lo = ml_lo;
3319 mhigh_hi = 0;
3320 mhigh_lo = mh_lo;
3321 }
3322
3323 *post_shift_ptr = post_shift;
3324 *lgup_ptr = lgup;
3325 if (n < HOST_BITS_PER_WIDE_INT)
3326 {
3327 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3328 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3329 return mhigh_lo >= mask;
3330 }
3331 else
3332 {
3333 *multiplier_ptr = GEN_INT (mhigh_lo);
3334 return mhigh_hi;
3335 }
3336 }
3337
3338 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3339 congruent to 1 (mod 2**N). */
3340
3341 static unsigned HOST_WIDE_INT
3342 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3343 {
3344 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3345
3346 /* The algorithm notes that the choice y = x satisfies
3347 x*y == 1 mod 2^3, since x is assumed odd.
3348 Each iteration doubles the number of bits of significance in y. */
3349
3350 unsigned HOST_WIDE_INT mask;
3351 unsigned HOST_WIDE_INT y = x;
3352 int nbit = 3;
3353
3354 mask = (n == HOST_BITS_PER_WIDE_INT
3355 ? ~(unsigned HOST_WIDE_INT) 0
3356 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3357
3358 while (nbit < n)
3359 {
3360 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3361 nbit *= 2;
3362 }
3363 return y;
3364 }
3365
3366 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3367 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3368 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3369 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3370 become signed.
3371
3372 The result is put in TARGET if that is convenient.
3373
3374 MODE is the mode of operation. */
3375
3376 rtx
3377 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3378 rtx op1, rtx target, int unsignedp)
3379 {
3380 rtx tem;
3381 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3382
3383 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3384 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3385 NULL_RTX, 0);
3386 tem = expand_and (mode, tem, op1, NULL_RTX);
3387 adj_operand
3388 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3389 adj_operand);
3390
3391 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3392 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3393 NULL_RTX, 0);
3394 tem = expand_and (mode, tem, op0, NULL_RTX);
3395 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3396 target);
3397
3398 return target;
3399 }
3400
3401 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3402
3403 static rtx
3404 extract_high_half (enum machine_mode mode, rtx op)
3405 {
3406 enum machine_mode wider_mode;
3407
3408 if (mode == word_mode)
3409 return gen_highpart (mode, op);
3410
3411 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3412
3413 wider_mode = GET_MODE_WIDER_MODE (mode);
3414 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3415 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3416 return convert_modes (mode, wider_mode, op, 0);
3417 }
3418
3419 /* Like expand_mult_highpart, but only consider using a multiplication
3420 optab. OP1 is an rtx for the constant operand. */
3421
3422 static rtx
3423 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3424 rtx target, int unsignedp, int max_cost)
3425 {
3426 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3427 enum machine_mode wider_mode;
3428 optab moptab;
3429 rtx tem;
3430 int size;
3431 bool speed = optimize_insn_for_speed_p ();
3432
3433 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3434
3435 wider_mode = GET_MODE_WIDER_MODE (mode);
3436 size = GET_MODE_BITSIZE (mode);
3437
3438 /* Firstly, try using a multiplication insn that only generates the needed
3439 high part of the product, and in the sign flavor of unsignedp. */
3440 if (mul_highpart_cost[speed][mode] < max_cost)
3441 {
3442 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3443 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3444 unsignedp, OPTAB_DIRECT);
3445 if (tem)
3446 return tem;
3447 }
3448
3449 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3450 Need to adjust the result after the multiplication. */
3451 if (size - 1 < BITS_PER_WORD
3452 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3453 + 4 * add_cost[speed][mode] < max_cost))
3454 {
3455 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3456 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3457 unsignedp, OPTAB_DIRECT);
3458 if (tem)
3459 /* We used the wrong signedness. Adjust the result. */
3460 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3461 tem, unsignedp);
3462 }
3463
3464 /* Try widening multiplication. */
3465 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3466 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3467 && mul_widen_cost[speed][wider_mode] < max_cost)
3468 {
3469 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3470 unsignedp, OPTAB_WIDEN);
3471 if (tem)
3472 return extract_high_half (mode, tem);
3473 }
3474
3475 /* Try widening the mode and perform a non-widening multiplication. */
3476 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3477 && size - 1 < BITS_PER_WORD
3478 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3479 {
3480 rtx insns, wop0, wop1;
3481
3482 /* We need to widen the operands, for example to ensure the
3483 constant multiplier is correctly sign or zero extended.
3484 Use a sequence to clean-up any instructions emitted by
3485 the conversions if things don't work out. */
3486 start_sequence ();
3487 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3488 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3489 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3490 unsignedp, OPTAB_WIDEN);
3491 insns = get_insns ();
3492 end_sequence ();
3493
3494 if (tem)
3495 {
3496 emit_insn (insns);
3497 return extract_high_half (mode, tem);
3498 }
3499 }
3500
3501 /* Try widening multiplication of opposite signedness, and adjust. */
3502 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3503 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3504 && size - 1 < BITS_PER_WORD
3505 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3506 + 4 * add_cost[speed][mode] < max_cost))
3507 {
3508 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3509 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3510 if (tem != 0)
3511 {
3512 tem = extract_high_half (mode, tem);
3513 /* We used the wrong signedness. Adjust the result. */
3514 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3515 target, unsignedp);
3516 }
3517 }
3518
3519 return 0;
3520 }
3521
3522 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3523 putting the high half of the result in TARGET if that is convenient,
3524 and return where the result is. If the operation can not be performed,
3525 0 is returned.
3526
3527 MODE is the mode of operation and result.
3528
3529 UNSIGNEDP nonzero means unsigned multiply.
3530
3531 MAX_COST is the total allowed cost for the expanded RTL. */
3532
3533 static rtx
3534 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3535 rtx target, int unsignedp, int max_cost)
3536 {
3537 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3538 unsigned HOST_WIDE_INT cnst1;
3539 int extra_cost;
3540 bool sign_adjust = false;
3541 enum mult_variant variant;
3542 struct algorithm alg;
3543 rtx tem;
3544 bool speed = optimize_insn_for_speed_p ();
3545
3546 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3547 /* We can't support modes wider than HOST_BITS_PER_INT. */
3548 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3549
3550 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3551
3552 /* We can't optimize modes wider than BITS_PER_WORD.
3553 ??? We might be able to perform double-word arithmetic if
3554 mode == word_mode, however all the cost calculations in
3555 synth_mult etc. assume single-word operations. */
3556 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3557 return expand_mult_highpart_optab (mode, op0, op1, target,
3558 unsignedp, max_cost);
3559
3560 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3561
3562 /* Check whether we try to multiply by a negative constant. */
3563 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3564 {
3565 sign_adjust = true;
3566 extra_cost += add_cost[speed][mode];
3567 }
3568
3569 /* See whether shift/add multiplication is cheap enough. */
3570 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3571 max_cost - extra_cost))
3572 {
3573 /* See whether the specialized multiplication optabs are
3574 cheaper than the shift/add version. */
3575 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3576 alg.cost.cost + extra_cost);
3577 if (tem)
3578 return tem;
3579
3580 tem = convert_to_mode (wider_mode, op0, unsignedp);
3581 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3582 tem = extract_high_half (mode, tem);
3583
3584 /* Adjust result for signedness. */
3585 if (sign_adjust)
3586 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3587
3588 return tem;
3589 }
3590 return expand_mult_highpart_optab (mode, op0, op1, target,
3591 unsignedp, max_cost);
3592 }
3593
3594
3595 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3596
3597 static rtx
3598 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3599 {
3600 unsigned HOST_WIDE_INT masklow, maskhigh;
3601 rtx result, temp, shift, label;
3602 int logd;
3603
3604 logd = floor_log2 (d);
3605 result = gen_reg_rtx (mode);
3606
3607 /* Avoid conditional branches when they're expensive. */
3608 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3609 && optimize_insn_for_speed_p ())
3610 {
3611 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3612 mode, 0, -1);
3613 if (signmask)
3614 {
3615 signmask = force_reg (mode, signmask);
3616 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3617 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3618
3619 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3620 which instruction sequence to use. If logical right shifts
3621 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3622 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3623
3624 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3625 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3626 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3627 {
3628 temp = expand_binop (mode, xor_optab, op0, signmask,
3629 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3630 temp = expand_binop (mode, sub_optab, temp, signmask,
3631 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3632 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3633 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3634 temp = expand_binop (mode, xor_optab, temp, signmask,
3635 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3636 temp = expand_binop (mode, sub_optab, temp, signmask,
3637 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3638 }
3639 else
3640 {
3641 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3642 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3643 signmask = force_reg (mode, signmask);
3644
3645 temp = expand_binop (mode, add_optab, op0, signmask,
3646 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3647 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3648 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3649 temp = expand_binop (mode, sub_optab, temp, signmask,
3650 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3651 }
3652 return temp;
3653 }
3654 }
3655
3656 /* Mask contains the mode's signbit and the significant bits of the
3657 modulus. By including the signbit in the operation, many targets
3658 can avoid an explicit compare operation in the following comparison
3659 against zero. */
3660
3661 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3662 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3663 {
3664 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3665 maskhigh = -1;
3666 }
3667 else
3668 maskhigh = (HOST_WIDE_INT) -1
3669 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3670
3671 temp = expand_binop (mode, and_optab, op0,
3672 immed_double_const (masklow, maskhigh, mode),
3673 result, 1, OPTAB_LIB_WIDEN);
3674 if (temp != result)
3675 emit_move_insn (result, temp);
3676
3677 label = gen_label_rtx ();
3678 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3679
3680 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3681 0, OPTAB_LIB_WIDEN);
3682 masklow = (HOST_WIDE_INT) -1 << logd;
3683 maskhigh = -1;
3684 temp = expand_binop (mode, ior_optab, temp,
3685 immed_double_const (masklow, maskhigh, mode),
3686 result, 1, OPTAB_LIB_WIDEN);
3687 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3688 0, OPTAB_LIB_WIDEN);
3689 if (temp != result)
3690 emit_move_insn (result, temp);
3691 emit_label (label);
3692 return result;
3693 }
3694
3695 /* Expand signed division of OP0 by a power of two D in mode MODE.
3696 This routine is only called for positive values of D. */
3697
3698 static rtx
3699 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3700 {
3701 rtx temp, label;
3702 tree shift;
3703 int logd;
3704
3705 logd = floor_log2 (d);
3706 shift = build_int_cst (NULL_TREE, logd);
3707
3708 if (d == 2
3709 && BRANCH_COST (optimize_insn_for_speed_p (),
3710 false) >= 1)
3711 {
3712 temp = gen_reg_rtx (mode);
3713 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3714 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3715 0, OPTAB_LIB_WIDEN);
3716 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3717 }
3718
3719 #ifdef HAVE_conditional_move
3720 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3721 >= 2)
3722 {
3723 rtx temp2;
3724
3725 /* ??? emit_conditional_move forces a stack adjustment via
3726 compare_from_rtx so, if the sequence is discarded, it will
3727 be lost. Do it now instead. */
3728 do_pending_stack_adjust ();
3729
3730 start_sequence ();
3731 temp2 = copy_to_mode_reg (mode, op0);
3732 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3733 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3734 temp = force_reg (mode, temp);
3735
3736 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3737 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3738 mode, temp, temp2, mode, 0);
3739 if (temp2)
3740 {
3741 rtx seq = get_insns ();
3742 end_sequence ();
3743 emit_insn (seq);
3744 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3745 }
3746 end_sequence ();
3747 }
3748 #endif
3749
3750 if (BRANCH_COST (optimize_insn_for_speed_p (),
3751 false) >= 2)
3752 {
3753 int ushift = GET_MODE_BITSIZE (mode) - logd;
3754
3755 temp = gen_reg_rtx (mode);
3756 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3757 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3758 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3759 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3760 else
3761 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3762 build_int_cst (NULL_TREE, ushift),
3763 NULL_RTX, 1);
3764 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3765 0, OPTAB_LIB_WIDEN);
3766 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3767 }
3768
3769 label = gen_label_rtx ();
3770 temp = copy_to_mode_reg (mode, op0);
3771 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3772 expand_inc (temp, GEN_INT (d - 1));
3773 emit_label (label);
3774 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3775 }
3776 \f
3777 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3778 if that is convenient, and returning where the result is.
3779 You may request either the quotient or the remainder as the result;
3780 specify REM_FLAG nonzero to get the remainder.
3781
3782 CODE is the expression code for which kind of division this is;
3783 it controls how rounding is done. MODE is the machine mode to use.
3784 UNSIGNEDP nonzero means do unsigned division. */
3785
3786 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3787 and then correct it by or'ing in missing high bits
3788 if result of ANDI is nonzero.
3789 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3790 This could optimize to a bfexts instruction.
3791 But C doesn't use these operations, so their optimizations are
3792 left for later. */
3793 /* ??? For modulo, we don't actually need the highpart of the first product,
3794 the low part will do nicely. And for small divisors, the second multiply
3795 can also be a low-part only multiply or even be completely left out.
3796 E.g. to calculate the remainder of a division by 3 with a 32 bit
3797 multiply, multiply with 0x55555556 and extract the upper two bits;
3798 the result is exact for inputs up to 0x1fffffff.
3799 The input range can be reduced by using cross-sum rules.
3800 For odd divisors >= 3, the following table gives right shift counts
3801 so that if a number is shifted by an integer multiple of the given
3802 amount, the remainder stays the same:
3803 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3804 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3805 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3806 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3807 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3808
3809 Cross-sum rules for even numbers can be derived by leaving as many bits
3810 to the right alone as the divisor has zeros to the right.
3811 E.g. if x is an unsigned 32 bit number:
3812 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3813 */
3814
3815 rtx
3816 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3817 rtx op0, rtx op1, rtx target, int unsignedp)
3818 {
3819 enum machine_mode compute_mode;
3820 rtx tquotient;
3821 rtx quotient = 0, remainder = 0;
3822 rtx last;
3823 int size;
3824 rtx insn, set;
3825 optab optab1, optab2;
3826 int op1_is_constant, op1_is_pow2 = 0;
3827 int max_cost, extra_cost;
3828 static HOST_WIDE_INT last_div_const = 0;
3829 static HOST_WIDE_INT ext_op1;
3830 bool speed = optimize_insn_for_speed_p ();
3831
3832 op1_is_constant = CONST_INT_P (op1);
3833 if (op1_is_constant)
3834 {
3835 ext_op1 = INTVAL (op1);
3836 if (unsignedp)
3837 ext_op1 &= GET_MODE_MASK (mode);
3838 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3839 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3840 }
3841
3842 /*
3843 This is the structure of expand_divmod:
3844
3845 First comes code to fix up the operands so we can perform the operations
3846 correctly and efficiently.
3847
3848 Second comes a switch statement with code specific for each rounding mode.
3849 For some special operands this code emits all RTL for the desired
3850 operation, for other cases, it generates only a quotient and stores it in
3851 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3852 to indicate that it has not done anything.
3853
3854 Last comes code that finishes the operation. If QUOTIENT is set and
3855 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3856 QUOTIENT is not set, it is computed using trunc rounding.
3857
3858 We try to generate special code for division and remainder when OP1 is a
3859 constant. If |OP1| = 2**n we can use shifts and some other fast
3860 operations. For other values of OP1, we compute a carefully selected
3861 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3862 by m.
3863
3864 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3865 half of the product. Different strategies for generating the product are
3866 implemented in expand_mult_highpart.
3867
3868 If what we actually want is the remainder, we generate that by another
3869 by-constant multiplication and a subtraction. */
3870
3871 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3872 code below will malfunction if we are, so check here and handle
3873 the special case if so. */
3874 if (op1 == const1_rtx)
3875 return rem_flag ? const0_rtx : op0;
3876
3877 /* When dividing by -1, we could get an overflow.
3878 negv_optab can handle overflows. */
3879 if (! unsignedp && op1 == constm1_rtx)
3880 {
3881 if (rem_flag)
3882 return const0_rtx;
3883 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3884 ? negv_optab : neg_optab, op0, target, 0);
3885 }
3886
3887 if (target
3888 /* Don't use the function value register as a target
3889 since we have to read it as well as write it,
3890 and function-inlining gets confused by this. */
3891 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3892 /* Don't clobber an operand while doing a multi-step calculation. */
3893 || ((rem_flag || op1_is_constant)
3894 && (reg_mentioned_p (target, op0)
3895 || (MEM_P (op0) && MEM_P (target))))
3896 || reg_mentioned_p (target, op1)
3897 || (MEM_P (op1) && MEM_P (target))))
3898 target = 0;
3899
3900 /* Get the mode in which to perform this computation. Normally it will
3901 be MODE, but sometimes we can't do the desired operation in MODE.
3902 If so, pick a wider mode in which we can do the operation. Convert
3903 to that mode at the start to avoid repeated conversions.
3904
3905 First see what operations we need. These depend on the expression
3906 we are evaluating. (We assume that divxx3 insns exist under the
3907 same conditions that modxx3 insns and that these insns don't normally
3908 fail. If these assumptions are not correct, we may generate less
3909 efficient code in some cases.)
3910
3911 Then see if we find a mode in which we can open-code that operation
3912 (either a division, modulus, or shift). Finally, check for the smallest
3913 mode for which we can do the operation with a library call. */
3914
3915 /* We might want to refine this now that we have division-by-constant
3916 optimization. Since expand_mult_highpart tries so many variants, it is
3917 not straightforward to generalize this. Maybe we should make an array
3918 of possible modes in init_expmed? Save this for GCC 2.7. */
3919
3920 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3921 ? (unsignedp ? lshr_optab : ashr_optab)
3922 : (unsignedp ? udiv_optab : sdiv_optab));
3923 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3924 ? optab1
3925 : (unsignedp ? udivmod_optab : sdivmod_optab));
3926
3927 for (compute_mode = mode; compute_mode != VOIDmode;
3928 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3929 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
3930 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
3931 break;
3932
3933 if (compute_mode == VOIDmode)
3934 for (compute_mode = mode; compute_mode != VOIDmode;
3935 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3936 if (optab_libfunc (optab1, compute_mode)
3937 || optab_libfunc (optab2, compute_mode))
3938 break;
3939
3940 /* If we still couldn't find a mode, use MODE, but expand_binop will
3941 probably die. */
3942 if (compute_mode == VOIDmode)
3943 compute_mode = mode;
3944
3945 if (target && GET_MODE (target) == compute_mode)
3946 tquotient = target;
3947 else
3948 tquotient = gen_reg_rtx (compute_mode);
3949
3950 size = GET_MODE_BITSIZE (compute_mode);
3951 #if 0
3952 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3953 (mode), and thereby get better code when OP1 is a constant. Do that
3954 later. It will require going over all usages of SIZE below. */
3955 size = GET_MODE_BITSIZE (mode);
3956 #endif
3957
3958 /* Only deduct something for a REM if the last divide done was
3959 for a different constant. Then set the constant of the last
3960 divide. */
3961 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3962 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3963 && INTVAL (op1) == last_div_const))
3964 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3965
3966 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3967
3968 /* Now convert to the best mode to use. */
3969 if (compute_mode != mode)
3970 {
3971 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3972 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3973
3974 /* convert_modes may have placed op1 into a register, so we
3975 must recompute the following. */
3976 op1_is_constant = CONST_INT_P (op1);
3977 op1_is_pow2 = (op1_is_constant
3978 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3979 || (! unsignedp
3980 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3981 }
3982
3983 /* If one of the operands is a volatile MEM, copy it into a register. */
3984
3985 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3986 op0 = force_reg (compute_mode, op0);
3987 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3988 op1 = force_reg (compute_mode, op1);
3989
3990 /* If we need the remainder or if OP1 is constant, we need to
3991 put OP0 in a register in case it has any queued subexpressions. */
3992 if (rem_flag || op1_is_constant)
3993 op0 = force_reg (compute_mode, op0);
3994
3995 last = get_last_insn ();
3996
3997 /* Promote floor rounding to trunc rounding for unsigned operations. */
3998 if (unsignedp)
3999 {
4000 if (code == FLOOR_DIV_EXPR)
4001 code = TRUNC_DIV_EXPR;
4002 if (code == FLOOR_MOD_EXPR)
4003 code = TRUNC_MOD_EXPR;
4004 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4005 code = TRUNC_DIV_EXPR;
4006 }
4007
4008 if (op1 != const0_rtx)
4009 switch (code)
4010 {
4011 case TRUNC_MOD_EXPR:
4012 case TRUNC_DIV_EXPR:
4013 if (op1_is_constant)
4014 {
4015 if (unsignedp)
4016 {
4017 unsigned HOST_WIDE_INT mh;
4018 int pre_shift, post_shift;
4019 int dummy;
4020 rtx ml;
4021 unsigned HOST_WIDE_INT d = (INTVAL (op1)
4022 & GET_MODE_MASK (compute_mode));
4023
4024 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4025 {
4026 pre_shift = floor_log2 (d);
4027 if (rem_flag)
4028 {
4029 remainder
4030 = expand_binop (compute_mode, and_optab, op0,
4031 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4032 remainder, 1,
4033 OPTAB_LIB_WIDEN);
4034 if (remainder)
4035 return gen_lowpart (mode, remainder);
4036 }
4037 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4038 build_int_cst (NULL_TREE,
4039 pre_shift),
4040 tquotient, 1);
4041 }
4042 else if (size <= HOST_BITS_PER_WIDE_INT)
4043 {
4044 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4045 {
4046 /* Most significant bit of divisor is set; emit an scc
4047 insn. */
4048 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4049 compute_mode, 1, 1);
4050 }
4051 else
4052 {
4053 /* Find a suitable multiplier and right shift count
4054 instead of multiplying with D. */
4055
4056 mh = choose_multiplier (d, size, size,
4057 &ml, &post_shift, &dummy);
4058
4059 /* If the suggested multiplier is more than SIZE bits,
4060 we can do better for even divisors, using an
4061 initial right shift. */
4062 if (mh != 0 && (d & 1) == 0)
4063 {
4064 pre_shift = floor_log2 (d & -d);
4065 mh = choose_multiplier (d >> pre_shift, size,
4066 size - pre_shift,
4067 &ml, &post_shift, &dummy);
4068 gcc_assert (!mh);
4069 }
4070 else
4071 pre_shift = 0;
4072
4073 if (mh != 0)
4074 {
4075 rtx t1, t2, t3, t4;
4076
4077 if (post_shift - 1 >= BITS_PER_WORD)
4078 goto fail1;
4079
4080 extra_cost
4081 = (shift_cost[speed][compute_mode][post_shift - 1]
4082 + shift_cost[speed][compute_mode][1]
4083 + 2 * add_cost[speed][compute_mode]);
4084 t1 = expand_mult_highpart (compute_mode, op0, ml,
4085 NULL_RTX, 1,
4086 max_cost - extra_cost);
4087 if (t1 == 0)
4088 goto fail1;
4089 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4090 op0, t1),
4091 NULL_RTX);
4092 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
4093 integer_one_node, NULL_RTX, 1);
4094 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4095 t1, t3),
4096 NULL_RTX);
4097 quotient = expand_shift
4098 (RSHIFT_EXPR, compute_mode, t4,
4099 build_int_cst (NULL_TREE, post_shift - 1),
4100 tquotient, 1);
4101 }
4102 else
4103 {
4104 rtx t1, t2;
4105
4106 if (pre_shift >= BITS_PER_WORD
4107 || post_shift >= BITS_PER_WORD)
4108 goto fail1;
4109
4110 t1 = expand_shift
4111 (RSHIFT_EXPR, compute_mode, op0,
4112 build_int_cst (NULL_TREE, pre_shift),
4113 NULL_RTX, 1);
4114 extra_cost
4115 = (shift_cost[speed][compute_mode][pre_shift]
4116 + shift_cost[speed][compute_mode][post_shift]);
4117 t2 = expand_mult_highpart (compute_mode, t1, ml,
4118 NULL_RTX, 1,
4119 max_cost - extra_cost);
4120 if (t2 == 0)
4121 goto fail1;
4122 quotient = expand_shift
4123 (RSHIFT_EXPR, compute_mode, t2,
4124 build_int_cst (NULL_TREE, post_shift),
4125 tquotient, 1);
4126 }
4127 }
4128 }
4129 else /* Too wide mode to use tricky code */
4130 break;
4131
4132 insn = get_last_insn ();
4133 if (insn != last
4134 && (set = single_set (insn)) != 0
4135 && SET_DEST (set) == quotient)
4136 set_unique_reg_note (insn,
4137 REG_EQUAL,
4138 gen_rtx_UDIV (compute_mode, op0, op1));
4139 }
4140 else /* TRUNC_DIV, signed */
4141 {
4142 unsigned HOST_WIDE_INT ml;
4143 int lgup, post_shift;
4144 rtx mlr;
4145 HOST_WIDE_INT d = INTVAL (op1);
4146 unsigned HOST_WIDE_INT abs_d;
4147
4148 /* Since d might be INT_MIN, we have to cast to
4149 unsigned HOST_WIDE_INT before negating to avoid
4150 undefined signed overflow. */
4151 abs_d = (d >= 0
4152 ? (unsigned HOST_WIDE_INT) d
4153 : - (unsigned HOST_WIDE_INT) d);
4154
4155 /* n rem d = n rem -d */
4156 if (rem_flag && d < 0)
4157 {
4158 d = abs_d;
4159 op1 = gen_int_mode (abs_d, compute_mode);
4160 }
4161
4162 if (d == 1)
4163 quotient = op0;
4164 else if (d == -1)
4165 quotient = expand_unop (compute_mode, neg_optab, op0,
4166 tquotient, 0);
4167 else if (HOST_BITS_PER_WIDE_INT >= size
4168 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4169 {
4170 /* This case is not handled correctly below. */
4171 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4172 compute_mode, 1, 1);
4173 if (quotient == 0)
4174 goto fail1;
4175 }
4176 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4177 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4178 : sdiv_pow2_cheap[speed][compute_mode])
4179 /* We assume that cheap metric is true if the
4180 optab has an expander for this mode. */
4181 && ((optab_handler ((rem_flag ? smod_optab
4182 : sdiv_optab),
4183 compute_mode)
4184 != CODE_FOR_nothing)
4185 || (optab_handler (sdivmod_optab,
4186 compute_mode)
4187 != CODE_FOR_nothing)))
4188 ;
4189 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4190 {
4191 if (rem_flag)
4192 {
4193 remainder = expand_smod_pow2 (compute_mode, op0, d);
4194 if (remainder)
4195 return gen_lowpart (mode, remainder);
4196 }
4197
4198 if (sdiv_pow2_cheap[speed][compute_mode]
4199 && ((optab_handler (sdiv_optab, compute_mode)
4200 != CODE_FOR_nothing)
4201 || (optab_handler (sdivmod_optab, compute_mode)
4202 != CODE_FOR_nothing)))
4203 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4204 compute_mode, op0,
4205 gen_int_mode (abs_d,
4206 compute_mode),
4207 NULL_RTX, 0);
4208 else
4209 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4210
4211 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4212 negate the quotient. */
4213 if (d < 0)
4214 {
4215 insn = get_last_insn ();
4216 if (insn != last
4217 && (set = single_set (insn)) != 0
4218 && SET_DEST (set) == quotient
4219 && abs_d < ((unsigned HOST_WIDE_INT) 1
4220 << (HOST_BITS_PER_WIDE_INT - 1)))
4221 set_unique_reg_note (insn,
4222 REG_EQUAL,
4223 gen_rtx_DIV (compute_mode,
4224 op0,
4225 GEN_INT
4226 (trunc_int_for_mode
4227 (abs_d,
4228 compute_mode))));
4229
4230 quotient = expand_unop (compute_mode, neg_optab,
4231 quotient, quotient, 0);
4232 }
4233 }
4234 else if (size <= HOST_BITS_PER_WIDE_INT)
4235 {
4236 choose_multiplier (abs_d, size, size - 1,
4237 &mlr, &post_shift, &lgup);
4238 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4239 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4240 {
4241 rtx t1, t2, t3;
4242
4243 if (post_shift >= BITS_PER_WORD
4244 || size - 1 >= BITS_PER_WORD)
4245 goto fail1;
4246
4247 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4248 + shift_cost[speed][compute_mode][size - 1]
4249 + add_cost[speed][compute_mode]);
4250 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4251 NULL_RTX, 0,
4252 max_cost - extra_cost);
4253 if (t1 == 0)
4254 goto fail1;
4255 t2 = expand_shift
4256 (RSHIFT_EXPR, compute_mode, t1,
4257 build_int_cst (NULL_TREE, post_shift),
4258 NULL_RTX, 0);
4259 t3 = expand_shift
4260 (RSHIFT_EXPR, compute_mode, op0,
4261 build_int_cst (NULL_TREE, size - 1),
4262 NULL_RTX, 0);
4263 if (d < 0)
4264 quotient
4265 = force_operand (gen_rtx_MINUS (compute_mode,
4266 t3, t2),
4267 tquotient);
4268 else
4269 quotient
4270 = force_operand (gen_rtx_MINUS (compute_mode,
4271 t2, t3),
4272 tquotient);
4273 }
4274 else
4275 {
4276 rtx t1, t2, t3, t4;
4277
4278 if (post_shift >= BITS_PER_WORD
4279 || size - 1 >= BITS_PER_WORD)
4280 goto fail1;
4281
4282 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4283 mlr = gen_int_mode (ml, compute_mode);
4284 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4285 + shift_cost[speed][compute_mode][size - 1]
4286 + 2 * add_cost[speed][compute_mode]);
4287 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4288 NULL_RTX, 0,
4289 max_cost - extra_cost);
4290 if (t1 == 0)
4291 goto fail1;
4292 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4293 t1, op0),
4294 NULL_RTX);
4295 t3 = expand_shift
4296 (RSHIFT_EXPR, compute_mode, t2,
4297 build_int_cst (NULL_TREE, post_shift),
4298 NULL_RTX, 0);
4299 t4 = expand_shift
4300 (RSHIFT_EXPR, compute_mode, op0,
4301 build_int_cst (NULL_TREE, size - 1),
4302 NULL_RTX, 0);
4303 if (d < 0)
4304 quotient
4305 = force_operand (gen_rtx_MINUS (compute_mode,
4306 t4, t3),
4307 tquotient);
4308 else
4309 quotient
4310 = force_operand (gen_rtx_MINUS (compute_mode,
4311 t3, t4),
4312 tquotient);
4313 }
4314 }
4315 else /* Too wide mode to use tricky code */
4316 break;
4317
4318 insn = get_last_insn ();
4319 if (insn != last
4320 && (set = single_set (insn)) != 0
4321 && SET_DEST (set) == quotient)
4322 set_unique_reg_note (insn,
4323 REG_EQUAL,
4324 gen_rtx_DIV (compute_mode, op0, op1));
4325 }
4326 break;
4327 }
4328 fail1:
4329 delete_insns_since (last);
4330 break;
4331
4332 case FLOOR_DIV_EXPR:
4333 case FLOOR_MOD_EXPR:
4334 /* We will come here only for signed operations. */
4335 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4336 {
4337 unsigned HOST_WIDE_INT mh;
4338 int pre_shift, lgup, post_shift;
4339 HOST_WIDE_INT d = INTVAL (op1);
4340 rtx ml;
4341
4342 if (d > 0)
4343 {
4344 /* We could just as easily deal with negative constants here,
4345 but it does not seem worth the trouble for GCC 2.6. */
4346 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4347 {
4348 pre_shift = floor_log2 (d);
4349 if (rem_flag)
4350 {
4351 remainder = expand_binop (compute_mode, and_optab, op0,
4352 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4353 remainder, 0, OPTAB_LIB_WIDEN);
4354 if (remainder)
4355 return gen_lowpart (mode, remainder);
4356 }
4357 quotient = expand_shift
4358 (RSHIFT_EXPR, compute_mode, op0,
4359 build_int_cst (NULL_TREE, pre_shift),
4360 tquotient, 0);
4361 }
4362 else
4363 {
4364 rtx t1, t2, t3, t4;
4365
4366 mh = choose_multiplier (d, size, size - 1,
4367 &ml, &post_shift, &lgup);
4368 gcc_assert (!mh);
4369
4370 if (post_shift < BITS_PER_WORD
4371 && size - 1 < BITS_PER_WORD)
4372 {
4373 t1 = expand_shift
4374 (RSHIFT_EXPR, compute_mode, op0,
4375 build_int_cst (NULL_TREE, size - 1),
4376 NULL_RTX, 0);
4377 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4378 NULL_RTX, 0, OPTAB_WIDEN);
4379 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4380 + shift_cost[speed][compute_mode][size - 1]
4381 + 2 * add_cost[speed][compute_mode]);
4382 t3 = expand_mult_highpart (compute_mode, t2, ml,
4383 NULL_RTX, 1,
4384 max_cost - extra_cost);
4385 if (t3 != 0)
4386 {
4387 t4 = expand_shift
4388 (RSHIFT_EXPR, compute_mode, t3,
4389 build_int_cst (NULL_TREE, post_shift),
4390 NULL_RTX, 1);
4391 quotient = expand_binop (compute_mode, xor_optab,
4392 t4, t1, tquotient, 0,
4393 OPTAB_WIDEN);
4394 }
4395 }
4396 }
4397 }
4398 else
4399 {
4400 rtx nsign, t1, t2, t3, t4;
4401 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4402 op0, constm1_rtx), NULL_RTX);
4403 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4404 0, OPTAB_WIDEN);
4405 nsign = expand_shift
4406 (RSHIFT_EXPR, compute_mode, t2,
4407 build_int_cst (NULL_TREE, size - 1),
4408 NULL_RTX, 0);
4409 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4410 NULL_RTX);
4411 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4412 NULL_RTX, 0);
4413 if (t4)
4414 {
4415 rtx t5;
4416 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4417 NULL_RTX, 0);
4418 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4419 t4, t5),
4420 tquotient);
4421 }
4422 }
4423 }
4424
4425 if (quotient != 0)
4426 break;
4427 delete_insns_since (last);
4428
4429 /* Try using an instruction that produces both the quotient and
4430 remainder, using truncation. We can easily compensate the quotient
4431 or remainder to get floor rounding, once we have the remainder.
4432 Notice that we compute also the final remainder value here,
4433 and return the result right away. */
4434 if (target == 0 || GET_MODE (target) != compute_mode)
4435 target = gen_reg_rtx (compute_mode);
4436
4437 if (rem_flag)
4438 {
4439 remainder
4440 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4441 quotient = gen_reg_rtx (compute_mode);
4442 }
4443 else
4444 {
4445 quotient
4446 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4447 remainder = gen_reg_rtx (compute_mode);
4448 }
4449
4450 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4451 quotient, remainder, 0))
4452 {
4453 /* This could be computed with a branch-less sequence.
4454 Save that for later. */
4455 rtx tem;
4456 rtx label = gen_label_rtx ();
4457 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4458 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4459 NULL_RTX, 0, OPTAB_WIDEN);
4460 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4461 expand_dec (quotient, const1_rtx);
4462 expand_inc (remainder, op1);
4463 emit_label (label);
4464 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4465 }
4466
4467 /* No luck with division elimination or divmod. Have to do it
4468 by conditionally adjusting op0 *and* the result. */
4469 {
4470 rtx label1, label2, label3, label4, label5;
4471 rtx adjusted_op0;
4472 rtx tem;
4473
4474 quotient = gen_reg_rtx (compute_mode);
4475 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4476 label1 = gen_label_rtx ();
4477 label2 = gen_label_rtx ();
4478 label3 = gen_label_rtx ();
4479 label4 = gen_label_rtx ();
4480 label5 = gen_label_rtx ();
4481 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4482 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4483 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4484 quotient, 0, OPTAB_LIB_WIDEN);
4485 if (tem != quotient)
4486 emit_move_insn (quotient, tem);
4487 emit_jump_insn (gen_jump (label5));
4488 emit_barrier ();
4489 emit_label (label1);
4490 expand_inc (adjusted_op0, const1_rtx);
4491 emit_jump_insn (gen_jump (label4));
4492 emit_barrier ();
4493 emit_label (label2);
4494 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4495 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4496 quotient, 0, OPTAB_LIB_WIDEN);
4497 if (tem != quotient)
4498 emit_move_insn (quotient, tem);
4499 emit_jump_insn (gen_jump (label5));
4500 emit_barrier ();
4501 emit_label (label3);
4502 expand_dec (adjusted_op0, const1_rtx);
4503 emit_label (label4);
4504 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4505 quotient, 0, OPTAB_LIB_WIDEN);
4506 if (tem != quotient)
4507 emit_move_insn (quotient, tem);
4508 expand_dec (quotient, const1_rtx);
4509 emit_label (label5);
4510 }
4511 break;
4512
4513 case CEIL_DIV_EXPR:
4514 case CEIL_MOD_EXPR:
4515 if (unsignedp)
4516 {
4517 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4518 {
4519 rtx t1, t2, t3;
4520 unsigned HOST_WIDE_INT d = INTVAL (op1);
4521 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4522 build_int_cst (NULL_TREE, floor_log2 (d)),
4523 tquotient, 1);
4524 t2 = expand_binop (compute_mode, and_optab, op0,
4525 GEN_INT (d - 1),
4526 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4527 t3 = gen_reg_rtx (compute_mode);
4528 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4529 compute_mode, 1, 1);
4530 if (t3 == 0)
4531 {
4532 rtx lab;
4533 lab = gen_label_rtx ();
4534 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4535 expand_inc (t1, const1_rtx);
4536 emit_label (lab);
4537 quotient = t1;
4538 }
4539 else
4540 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4541 t1, t3),
4542 tquotient);
4543 break;
4544 }
4545
4546 /* Try using an instruction that produces both the quotient and
4547 remainder, using truncation. We can easily compensate the
4548 quotient or remainder to get ceiling rounding, once we have the
4549 remainder. Notice that we compute also the final remainder
4550 value here, and return the result right away. */
4551 if (target == 0 || GET_MODE (target) != compute_mode)
4552 target = gen_reg_rtx (compute_mode);
4553
4554 if (rem_flag)
4555 {
4556 remainder = (REG_P (target)
4557 ? target : gen_reg_rtx (compute_mode));
4558 quotient = gen_reg_rtx (compute_mode);
4559 }
4560 else
4561 {
4562 quotient = (REG_P (target)
4563 ? target : gen_reg_rtx (compute_mode));
4564 remainder = gen_reg_rtx (compute_mode);
4565 }
4566
4567 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4568 remainder, 1))
4569 {
4570 /* This could be computed with a branch-less sequence.
4571 Save that for later. */
4572 rtx label = gen_label_rtx ();
4573 do_cmp_and_jump (remainder, const0_rtx, EQ,
4574 compute_mode, label);
4575 expand_inc (quotient, const1_rtx);
4576 expand_dec (remainder, op1);
4577 emit_label (label);
4578 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4579 }
4580
4581 /* No luck with division elimination or divmod. Have to do it
4582 by conditionally adjusting op0 *and* the result. */
4583 {
4584 rtx label1, label2;
4585 rtx adjusted_op0, tem;
4586
4587 quotient = gen_reg_rtx (compute_mode);
4588 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4589 label1 = gen_label_rtx ();
4590 label2 = gen_label_rtx ();
4591 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4592 compute_mode, label1);
4593 emit_move_insn (quotient, const0_rtx);
4594 emit_jump_insn (gen_jump (label2));
4595 emit_barrier ();
4596 emit_label (label1);
4597 expand_dec (adjusted_op0, const1_rtx);
4598 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4599 quotient, 1, OPTAB_LIB_WIDEN);
4600 if (tem != quotient)
4601 emit_move_insn (quotient, tem);
4602 expand_inc (quotient, const1_rtx);
4603 emit_label (label2);
4604 }
4605 }
4606 else /* signed */
4607 {
4608 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4609 && INTVAL (op1) >= 0)
4610 {
4611 /* This is extremely similar to the code for the unsigned case
4612 above. For 2.7 we should merge these variants, but for
4613 2.6.1 I don't want to touch the code for unsigned since that
4614 get used in C. The signed case will only be used by other
4615 languages (Ada). */
4616
4617 rtx t1, t2, t3;
4618 unsigned HOST_WIDE_INT d = INTVAL (op1);
4619 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4620 build_int_cst (NULL_TREE, floor_log2 (d)),
4621 tquotient, 0);
4622 t2 = expand_binop (compute_mode, and_optab, op0,
4623 GEN_INT (d - 1),
4624 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4625 t3 = gen_reg_rtx (compute_mode);
4626 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4627 compute_mode, 1, 1);
4628 if (t3 == 0)
4629 {
4630 rtx lab;
4631 lab = gen_label_rtx ();
4632 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4633 expand_inc (t1, const1_rtx);
4634 emit_label (lab);
4635 quotient = t1;
4636 }
4637 else
4638 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4639 t1, t3),
4640 tquotient);
4641 break;
4642 }
4643
4644 /* Try using an instruction that produces both the quotient and
4645 remainder, using truncation. We can easily compensate the
4646 quotient or remainder to get ceiling rounding, once we have the
4647 remainder. Notice that we compute also the final remainder
4648 value here, and return the result right away. */
4649 if (target == 0 || GET_MODE (target) != compute_mode)
4650 target = gen_reg_rtx (compute_mode);
4651 if (rem_flag)
4652 {
4653 remainder= (REG_P (target)
4654 ? target : gen_reg_rtx (compute_mode));
4655 quotient = gen_reg_rtx (compute_mode);
4656 }
4657 else
4658 {
4659 quotient = (REG_P (target)
4660 ? target : gen_reg_rtx (compute_mode));
4661 remainder = gen_reg_rtx (compute_mode);
4662 }
4663
4664 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4665 remainder, 0))
4666 {
4667 /* This could be computed with a branch-less sequence.
4668 Save that for later. */
4669 rtx tem;
4670 rtx label = gen_label_rtx ();
4671 do_cmp_and_jump (remainder, const0_rtx, EQ,
4672 compute_mode, label);
4673 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4674 NULL_RTX, 0, OPTAB_WIDEN);
4675 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4676 expand_inc (quotient, const1_rtx);
4677 expand_dec (remainder, op1);
4678 emit_label (label);
4679 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4680 }
4681
4682 /* No luck with division elimination or divmod. Have to do it
4683 by conditionally adjusting op0 *and* the result. */
4684 {
4685 rtx label1, label2, label3, label4, label5;
4686 rtx adjusted_op0;
4687 rtx tem;
4688
4689 quotient = gen_reg_rtx (compute_mode);
4690 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4691 label1 = gen_label_rtx ();
4692 label2 = gen_label_rtx ();
4693 label3 = gen_label_rtx ();
4694 label4 = gen_label_rtx ();
4695 label5 = gen_label_rtx ();
4696 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4697 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4698 compute_mode, label1);
4699 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4700 quotient, 0, OPTAB_LIB_WIDEN);
4701 if (tem != quotient)
4702 emit_move_insn (quotient, tem);
4703 emit_jump_insn (gen_jump (label5));
4704 emit_barrier ();
4705 emit_label (label1);
4706 expand_dec (adjusted_op0, const1_rtx);
4707 emit_jump_insn (gen_jump (label4));
4708 emit_barrier ();
4709 emit_label (label2);
4710 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4711 compute_mode, label3);
4712 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4713 quotient, 0, OPTAB_LIB_WIDEN);
4714 if (tem != quotient)
4715 emit_move_insn (quotient, tem);
4716 emit_jump_insn (gen_jump (label5));
4717 emit_barrier ();
4718 emit_label (label3);
4719 expand_inc (adjusted_op0, const1_rtx);
4720 emit_label (label4);
4721 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4722 quotient, 0, OPTAB_LIB_WIDEN);
4723 if (tem != quotient)
4724 emit_move_insn (quotient, tem);
4725 expand_inc (quotient, const1_rtx);
4726 emit_label (label5);
4727 }
4728 }
4729 break;
4730
4731 case EXACT_DIV_EXPR:
4732 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4733 {
4734 HOST_WIDE_INT d = INTVAL (op1);
4735 unsigned HOST_WIDE_INT ml;
4736 int pre_shift;
4737 rtx t1;
4738
4739 pre_shift = floor_log2 (d & -d);
4740 ml = invert_mod2n (d >> pre_shift, size);
4741 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4742 build_int_cst (NULL_TREE, pre_shift),
4743 NULL_RTX, unsignedp);
4744 quotient = expand_mult (compute_mode, t1,
4745 gen_int_mode (ml, compute_mode),
4746 NULL_RTX, 1);
4747
4748 insn = get_last_insn ();
4749 set_unique_reg_note (insn,
4750 REG_EQUAL,
4751 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4752 compute_mode,
4753 op0, op1));
4754 }
4755 break;
4756
4757 case ROUND_DIV_EXPR:
4758 case ROUND_MOD_EXPR:
4759 if (unsignedp)
4760 {
4761 rtx tem;
4762 rtx label;
4763 label = gen_label_rtx ();
4764 quotient = gen_reg_rtx (compute_mode);
4765 remainder = gen_reg_rtx (compute_mode);
4766 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4767 {
4768 rtx tem;
4769 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4770 quotient, 1, OPTAB_LIB_WIDEN);
4771 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4772 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4773 remainder, 1, OPTAB_LIB_WIDEN);
4774 }
4775 tem = plus_constant (op1, -1);
4776 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4777 integer_one_node, NULL_RTX, 1);
4778 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4779 expand_inc (quotient, const1_rtx);
4780 expand_dec (remainder, op1);
4781 emit_label (label);
4782 }
4783 else
4784 {
4785 rtx abs_rem, abs_op1, tem, mask;
4786 rtx label;
4787 label = gen_label_rtx ();
4788 quotient = gen_reg_rtx (compute_mode);
4789 remainder = gen_reg_rtx (compute_mode);
4790 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4791 {
4792 rtx tem;
4793 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4794 quotient, 0, OPTAB_LIB_WIDEN);
4795 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4796 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4797 remainder, 0, OPTAB_LIB_WIDEN);
4798 }
4799 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4800 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4801 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4802 integer_one_node, NULL_RTX, 1);
4803 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4804 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4805 NULL_RTX, 0, OPTAB_WIDEN);
4806 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4807 build_int_cst (NULL_TREE, size - 1),
4808 NULL_RTX, 0);
4809 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4810 NULL_RTX, 0, OPTAB_WIDEN);
4811 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4812 NULL_RTX, 0, OPTAB_WIDEN);
4813 expand_inc (quotient, tem);
4814 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4815 NULL_RTX, 0, OPTAB_WIDEN);
4816 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4817 NULL_RTX, 0, OPTAB_WIDEN);
4818 expand_dec (remainder, tem);
4819 emit_label (label);
4820 }
4821 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4822
4823 default:
4824 gcc_unreachable ();
4825 }
4826
4827 if (quotient == 0)
4828 {
4829 if (target && GET_MODE (target) != compute_mode)
4830 target = 0;
4831
4832 if (rem_flag)
4833 {
4834 /* Try to produce the remainder without producing the quotient.
4835 If we seem to have a divmod pattern that does not require widening,
4836 don't try widening here. We should really have a WIDEN argument
4837 to expand_twoval_binop, since what we'd really like to do here is
4838 1) try a mod insn in compute_mode
4839 2) try a divmod insn in compute_mode
4840 3) try a div insn in compute_mode and multiply-subtract to get
4841 remainder
4842 4) try the same things with widening allowed. */
4843 remainder
4844 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4845 op0, op1, target,
4846 unsignedp,
4847 ((optab_handler (optab2, compute_mode)
4848 != CODE_FOR_nothing)
4849 ? OPTAB_DIRECT : OPTAB_WIDEN));
4850 if (remainder == 0)
4851 {
4852 /* No luck there. Can we do remainder and divide at once
4853 without a library call? */
4854 remainder = gen_reg_rtx (compute_mode);
4855 if (! expand_twoval_binop ((unsignedp
4856 ? udivmod_optab
4857 : sdivmod_optab),
4858 op0, op1,
4859 NULL_RTX, remainder, unsignedp))
4860 remainder = 0;
4861 }
4862
4863 if (remainder)
4864 return gen_lowpart (mode, remainder);
4865 }
4866
4867 /* Produce the quotient. Try a quotient insn, but not a library call.
4868 If we have a divmod in this mode, use it in preference to widening
4869 the div (for this test we assume it will not fail). Note that optab2
4870 is set to the one of the two optabs that the call below will use. */
4871 quotient
4872 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4873 op0, op1, rem_flag ? NULL_RTX : target,
4874 unsignedp,
4875 ((optab_handler (optab2, compute_mode)
4876 != CODE_FOR_nothing)
4877 ? OPTAB_DIRECT : OPTAB_WIDEN));
4878
4879 if (quotient == 0)
4880 {
4881 /* No luck there. Try a quotient-and-remainder insn,
4882 keeping the quotient alone. */
4883 quotient = gen_reg_rtx (compute_mode);
4884 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4885 op0, op1,
4886 quotient, NULL_RTX, unsignedp))
4887 {
4888 quotient = 0;
4889 if (! rem_flag)
4890 /* Still no luck. If we are not computing the remainder,
4891 use a library call for the quotient. */
4892 quotient = sign_expand_binop (compute_mode,
4893 udiv_optab, sdiv_optab,
4894 op0, op1, target,
4895 unsignedp, OPTAB_LIB_WIDEN);
4896 }
4897 }
4898 }
4899
4900 if (rem_flag)
4901 {
4902 if (target && GET_MODE (target) != compute_mode)
4903 target = 0;
4904
4905 if (quotient == 0)
4906 {
4907 /* No divide instruction either. Use library for remainder. */
4908 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4909 op0, op1, target,
4910 unsignedp, OPTAB_LIB_WIDEN);
4911 /* No remainder function. Try a quotient-and-remainder
4912 function, keeping the remainder. */
4913 if (!remainder)
4914 {
4915 remainder = gen_reg_rtx (compute_mode);
4916 if (!expand_twoval_binop_libfunc
4917 (unsignedp ? udivmod_optab : sdivmod_optab,
4918 op0, op1,
4919 NULL_RTX, remainder,
4920 unsignedp ? UMOD : MOD))
4921 remainder = NULL_RTX;
4922 }
4923 }
4924 else
4925 {
4926 /* We divided. Now finish doing X - Y * (X / Y). */
4927 remainder = expand_mult (compute_mode, quotient, op1,
4928 NULL_RTX, unsignedp);
4929 remainder = expand_binop (compute_mode, sub_optab, op0,
4930 remainder, target, unsignedp,
4931 OPTAB_LIB_WIDEN);
4932 }
4933 }
4934
4935 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4936 }
4937 \f
4938 /* Return a tree node with data type TYPE, describing the value of X.
4939 Usually this is an VAR_DECL, if there is no obvious better choice.
4940 X may be an expression, however we only support those expressions
4941 generated by loop.c. */
4942
4943 tree
4944 make_tree (tree type, rtx x)
4945 {
4946 tree t;
4947
4948 switch (GET_CODE (x))
4949 {
4950 case CONST_INT:
4951 {
4952 HOST_WIDE_INT hi = 0;
4953
4954 if (INTVAL (x) < 0
4955 && !(TYPE_UNSIGNED (type)
4956 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4957 < HOST_BITS_PER_WIDE_INT)))
4958 hi = -1;
4959
4960 t = build_int_cst_wide (type, INTVAL (x), hi);
4961
4962 return t;
4963 }
4964
4965 case CONST_DOUBLE:
4966 if (GET_MODE (x) == VOIDmode)
4967 t = build_int_cst_wide (type,
4968 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4969 else
4970 {
4971 REAL_VALUE_TYPE d;
4972
4973 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4974 t = build_real (type, d);
4975 }
4976
4977 return t;
4978
4979 case CONST_VECTOR:
4980 {
4981 int units = CONST_VECTOR_NUNITS (x);
4982 tree itype = TREE_TYPE (type);
4983 tree t = NULL_TREE;
4984 int i;
4985
4986
4987 /* Build a tree with vector elements. */
4988 for (i = units - 1; i >= 0; --i)
4989 {
4990 rtx elt = CONST_VECTOR_ELT (x, i);
4991 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4992 }
4993
4994 return build_vector (type, t);
4995 }
4996
4997 case PLUS:
4998 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4999 make_tree (type, XEXP (x, 1)));
5000
5001 case MINUS:
5002 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5003 make_tree (type, XEXP (x, 1)));
5004
5005 case NEG:
5006 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5007
5008 case MULT:
5009 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5010 make_tree (type, XEXP (x, 1)));
5011
5012 case ASHIFT:
5013 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5014 make_tree (type, XEXP (x, 1)));
5015
5016 case LSHIFTRT:
5017 t = unsigned_type_for (type);
5018 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5019 make_tree (t, XEXP (x, 0)),
5020 make_tree (type, XEXP (x, 1))));
5021
5022 case ASHIFTRT:
5023 t = signed_type_for (type);
5024 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5025 make_tree (t, XEXP (x, 0)),
5026 make_tree (type, XEXP (x, 1))));
5027
5028 case DIV:
5029 if (TREE_CODE (type) != REAL_TYPE)
5030 t = signed_type_for (type);
5031 else
5032 t = type;
5033
5034 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5035 make_tree (t, XEXP (x, 0)),
5036 make_tree (t, XEXP (x, 1))));
5037 case UDIV:
5038 t = unsigned_type_for (type);
5039 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5040 make_tree (t, XEXP (x, 0)),
5041 make_tree (t, XEXP (x, 1))));
5042
5043 case SIGN_EXTEND:
5044 case ZERO_EXTEND:
5045 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5046 GET_CODE (x) == ZERO_EXTEND);
5047 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5048
5049 case CONST:
5050 return make_tree (type, XEXP (x, 0));
5051
5052 case SYMBOL_REF:
5053 t = SYMBOL_REF_DECL (x);
5054 if (t)
5055 return fold_convert (type, build_fold_addr_expr (t));
5056 /* else fall through. */
5057
5058 default:
5059 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5060
5061 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5062 address mode to pointer mode. */
5063 if (POINTER_TYPE_P (type))
5064 x = convert_memory_address_addr_space
5065 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5066
5067 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5068 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5069 t->decl_with_rtl.rtl = x;
5070
5071 return t;
5072 }
5073 }
5074 \f
5075 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5076 and returning TARGET.
5077
5078 If TARGET is 0, a pseudo-register or constant is returned. */
5079
5080 rtx
5081 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5082 {
5083 rtx tem = 0;
5084
5085 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5086 tem = simplify_binary_operation (AND, mode, op0, op1);
5087 if (tem == 0)
5088 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5089
5090 if (target == 0)
5091 target = tem;
5092 else if (tem != target)
5093 emit_move_insn (target, tem);
5094 return target;
5095 }
5096
5097 /* Helper function for emit_store_flag. */
5098 static rtx
5099 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5100 enum machine_mode mode, enum machine_mode compare_mode,
5101 int unsignedp, rtx x, rtx y, int normalizep,
5102 enum machine_mode target_mode)
5103 {
5104 rtx op0, last, comparison, subtarget, pattern;
5105 enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
5106
5107 last = get_last_insn ();
5108 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5109 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5110 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5111 if (!x || !y
5112 || !insn_data[icode].operand[2].predicate
5113 (x, insn_data[icode].operand[2].mode)
5114 || !insn_data[icode].operand[3].predicate
5115 (y, insn_data[icode].operand[3].mode)
5116 || !insn_data[icode].operand[1].predicate (comparison, VOIDmode))
5117 {
5118 delete_insns_since (last);
5119 return NULL_RTX;
5120 }
5121
5122 if (target_mode == VOIDmode)
5123 target_mode = result_mode;
5124 if (!target)
5125 target = gen_reg_rtx (target_mode);
5126
5127 if (optimize
5128 || !(insn_data[(int) icode].operand[0].predicate (target, result_mode)))
5129 subtarget = gen_reg_rtx (result_mode);
5130 else
5131 subtarget = target;
5132
5133 pattern = GEN_FCN (icode) (subtarget, comparison, x, y);
5134 if (!pattern)
5135 return NULL_RTX;
5136 emit_insn (pattern);
5137
5138 /* If we are converting to a wider mode, first convert to
5139 TARGET_MODE, then normalize. This produces better combining
5140 opportunities on machines that have a SIGN_EXTRACT when we are
5141 testing a single bit. This mostly benefits the 68k.
5142
5143 If STORE_FLAG_VALUE does not have the sign bit set when
5144 interpreted in MODE, we can do this conversion as unsigned, which
5145 is usually more efficient. */
5146 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5147 {
5148 convert_move (target, subtarget,
5149 (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT)
5150 && 0 == (STORE_FLAG_VALUE
5151 & ((HOST_WIDE_INT) 1
5152 << (GET_MODE_BITSIZE (result_mode) -1))));
5153 op0 = target;
5154 result_mode = target_mode;
5155 }
5156 else
5157 op0 = subtarget;
5158
5159 /* If we want to keep subexpressions around, don't reuse our last
5160 target. */
5161 if (optimize)
5162 subtarget = 0;
5163
5164 /* Now normalize to the proper value in MODE. Sometimes we don't
5165 have to do anything. */
5166 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5167 ;
5168 /* STORE_FLAG_VALUE might be the most negative number, so write
5169 the comparison this way to avoid a compiler-time warning. */
5170 else if (- normalizep == STORE_FLAG_VALUE)
5171 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5172
5173 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5174 it hard to use a value of just the sign bit due to ANSI integer
5175 constant typing rules. */
5176 else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
5177 && (STORE_FLAG_VALUE
5178 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
5179 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5180 size_int (GET_MODE_BITSIZE (result_mode) - 1), subtarget,
5181 normalizep == 1);
5182 else
5183 {
5184 gcc_assert (STORE_FLAG_VALUE & 1);
5185
5186 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5187 if (normalizep == -1)
5188 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5189 }
5190
5191 /* If we were converting to a smaller mode, do the conversion now. */
5192 if (target_mode != result_mode)
5193 {
5194 convert_move (target, op0, 0);
5195 return target;
5196 }
5197 else
5198 return op0;
5199 }
5200
5201
5202 /* A subroutine of emit_store_flag only including "tricks" that do not
5203 need a recursive call. These are kept separate to avoid infinite
5204 loops. */
5205
5206 static rtx
5207 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5208 enum machine_mode mode, int unsignedp, int normalizep,
5209 enum machine_mode target_mode)
5210 {
5211 rtx subtarget;
5212 enum insn_code icode;
5213 enum machine_mode compare_mode;
5214 enum mode_class mclass;
5215 enum rtx_code scode;
5216 rtx tem;
5217
5218 if (unsignedp)
5219 code = unsigned_condition (code);
5220 scode = swap_condition (code);
5221
5222 /* If one operand is constant, make it the second one. Only do this
5223 if the other operand is not constant as well. */
5224
5225 if (swap_commutative_operands_p (op0, op1))
5226 {
5227 tem = op0;
5228 op0 = op1;
5229 op1 = tem;
5230 code = swap_condition (code);
5231 }
5232
5233 if (mode == VOIDmode)
5234 mode = GET_MODE (op0);
5235
5236 /* For some comparisons with 1 and -1, we can convert this to
5237 comparisons with zero. This will often produce more opportunities for
5238 store-flag insns. */
5239
5240 switch (code)
5241 {
5242 case LT:
5243 if (op1 == const1_rtx)
5244 op1 = const0_rtx, code = LE;
5245 break;
5246 case LE:
5247 if (op1 == constm1_rtx)
5248 op1 = const0_rtx, code = LT;
5249 break;
5250 case GE:
5251 if (op1 == const1_rtx)
5252 op1 = const0_rtx, code = GT;
5253 break;
5254 case GT:
5255 if (op1 == constm1_rtx)
5256 op1 = const0_rtx, code = GE;
5257 break;
5258 case GEU:
5259 if (op1 == const1_rtx)
5260 op1 = const0_rtx, code = NE;
5261 break;
5262 case LTU:
5263 if (op1 == const1_rtx)
5264 op1 = const0_rtx, code = EQ;
5265 break;
5266 default:
5267 break;
5268 }
5269
5270 /* If we are comparing a double-word integer with zero or -1, we can
5271 convert the comparison into one involving a single word. */
5272 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5273 && GET_MODE_CLASS (mode) == MODE_INT
5274 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5275 {
5276 if ((code == EQ || code == NE)
5277 && (op1 == const0_rtx || op1 == constm1_rtx))
5278 {
5279 rtx op00, op01;
5280
5281 /* Do a logical OR or AND of the two words and compare the
5282 result. */
5283 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5284 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5285 tem = expand_binop (word_mode,
5286 op1 == const0_rtx ? ior_optab : and_optab,
5287 op00, op01, NULL_RTX, unsignedp,
5288 OPTAB_DIRECT);
5289
5290 if (tem != 0)
5291 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5292 unsignedp, normalizep);
5293 }
5294 else if ((code == LT || code == GE) && op1 == const0_rtx)
5295 {
5296 rtx op0h;
5297
5298 /* If testing the sign bit, can just test on high word. */
5299 op0h = simplify_gen_subreg (word_mode, op0, mode,
5300 subreg_highpart_offset (word_mode,
5301 mode));
5302 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5303 unsignedp, normalizep);
5304 }
5305 else
5306 tem = NULL_RTX;
5307
5308 if (tem)
5309 {
5310 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5311 return tem;
5312 if (!target)
5313 target = gen_reg_rtx (target_mode);
5314
5315 convert_move (target, tem,
5316 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE)
5317 & ((HOST_WIDE_INT) 1
5318 << (GET_MODE_BITSIZE (word_mode) -1))));
5319 return target;
5320 }
5321 }
5322
5323 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5324 complement of A (for GE) and shifting the sign bit to the low bit. */
5325 if (op1 == const0_rtx && (code == LT || code == GE)
5326 && GET_MODE_CLASS (mode) == MODE_INT
5327 && (normalizep || STORE_FLAG_VALUE == 1
5328 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5329 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5330 == ((unsigned HOST_WIDE_INT) 1
5331 << (GET_MODE_BITSIZE (mode) - 1))))))
5332 {
5333 subtarget = target;
5334
5335 if (!target)
5336 target_mode = mode;
5337
5338 /* If the result is to be wider than OP0, it is best to convert it
5339 first. If it is to be narrower, it is *incorrect* to convert it
5340 first. */
5341 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5342 {
5343 op0 = convert_modes (target_mode, mode, op0, 0);
5344 mode = target_mode;
5345 }
5346
5347 if (target_mode != mode)
5348 subtarget = 0;
5349
5350 if (code == GE)
5351 op0 = expand_unop (mode, one_cmpl_optab, op0,
5352 ((STORE_FLAG_VALUE == 1 || normalizep)
5353 ? 0 : subtarget), 0);
5354
5355 if (STORE_FLAG_VALUE == 1 || normalizep)
5356 /* If we are supposed to produce a 0/1 value, we want to do
5357 a logical shift from the sign bit to the low-order bit; for
5358 a -1/0 value, we do an arithmetic shift. */
5359 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5360 size_int (GET_MODE_BITSIZE (mode) - 1),
5361 subtarget, normalizep != -1);
5362
5363 if (mode != target_mode)
5364 op0 = convert_modes (target_mode, mode, op0, 0);
5365
5366 return op0;
5367 }
5368
5369 mclass = GET_MODE_CLASS (mode);
5370 for (compare_mode = mode; compare_mode != VOIDmode;
5371 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5372 {
5373 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5374 icode = optab_handler (cstore_optab, optab_mode);
5375 if (icode != CODE_FOR_nothing)
5376 {
5377 do_pending_stack_adjust ();
5378 tem = emit_cstore (target, icode, code, mode, compare_mode,
5379 unsignedp, op0, op1, normalizep, target_mode);
5380 if (tem)
5381 return tem;
5382
5383 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5384 {
5385 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5386 unsignedp, op1, op0, normalizep, target_mode);
5387 if (tem)
5388 return tem;
5389 }
5390 break;
5391 }
5392 }
5393
5394 return 0;
5395 }
5396
5397 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5398 and storing in TARGET. Normally return TARGET.
5399 Return 0 if that cannot be done.
5400
5401 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5402 it is VOIDmode, they cannot both be CONST_INT.
5403
5404 UNSIGNEDP is for the case where we have to widen the operands
5405 to perform the operation. It says to use zero-extension.
5406
5407 NORMALIZEP is 1 if we should convert the result to be either zero
5408 or one. Normalize is -1 if we should convert the result to be
5409 either zero or -1. If NORMALIZEP is zero, the result will be left
5410 "raw" out of the scc insn. */
5411
5412 rtx
5413 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5414 enum machine_mode mode, int unsignedp, int normalizep)
5415 {
5416 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5417 enum rtx_code rcode;
5418 rtx subtarget;
5419 rtx tem, last, trueval;
5420
5421 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5422 target_mode);
5423 if (tem)
5424 return tem;
5425
5426 /* If we reached here, we can't do this with a scc insn, however there
5427 are some comparisons that can be done in other ways. Don't do any
5428 of these cases if branches are very cheap. */
5429 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5430 return 0;
5431
5432 /* See what we need to return. We can only return a 1, -1, or the
5433 sign bit. */
5434
5435 if (normalizep == 0)
5436 {
5437 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5438 normalizep = STORE_FLAG_VALUE;
5439
5440 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5441 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5442 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5443 ;
5444 else
5445 return 0;
5446 }
5447
5448 last = get_last_insn ();
5449
5450 /* If optimizing, use different pseudo registers for each insn, instead
5451 of reusing the same pseudo. This leads to better CSE, but slows
5452 down the compiler, since there are more pseudos */
5453 subtarget = (!optimize
5454 && (target_mode == mode)) ? target : NULL_RTX;
5455 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5456
5457 /* For floating-point comparisons, try the reverse comparison or try
5458 changing the "orderedness" of the comparison. */
5459 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5460 {
5461 enum rtx_code first_code;
5462 bool and_them;
5463
5464 rcode = reverse_condition_maybe_unordered (code);
5465 if (can_compare_p (rcode, mode, ccp_store_flag)
5466 && (code == ORDERED || code == UNORDERED
5467 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5468 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5469 {
5470 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5471 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5472
5473 /* For the reverse comparison, use either an addition or a XOR. */
5474 if (want_add
5475 && rtx_cost (GEN_INT (normalizep), PLUS,
5476 optimize_insn_for_speed_p ()) == 0)
5477 {
5478 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5479 STORE_FLAG_VALUE, target_mode);
5480 if (tem)
5481 return expand_binop (target_mode, add_optab, tem,
5482 GEN_INT (normalizep),
5483 target, 0, OPTAB_WIDEN);
5484 }
5485 else if (!want_add
5486 && rtx_cost (trueval, XOR,
5487 optimize_insn_for_speed_p ()) == 0)
5488 {
5489 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5490 normalizep, target_mode);
5491 if (tem)
5492 return expand_binop (target_mode, xor_optab, tem, trueval,
5493 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5494 }
5495 }
5496
5497 delete_insns_since (last);
5498
5499 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5500 if (code == ORDERED || code == UNORDERED)
5501 return 0;
5502
5503 and_them = split_comparison (code, mode, &first_code, &code);
5504
5505 /* If there are no NaNs, the first comparison should always fall through.
5506 Effectively change the comparison to the other one. */
5507 if (!HONOR_NANS (mode))
5508 {
5509 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5510 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5511 target_mode);
5512 }
5513
5514 #ifdef HAVE_conditional_move
5515 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5516 conditional move. */
5517 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5518 normalizep, target_mode);
5519 if (tem == 0)
5520 return 0;
5521
5522 if (and_them)
5523 tem = emit_conditional_move (target, code, op0, op1, mode,
5524 tem, const0_rtx, GET_MODE (tem), 0);
5525 else
5526 tem = emit_conditional_move (target, code, op0, op1, mode,
5527 trueval, tem, GET_MODE (tem), 0);
5528
5529 if (tem == 0)
5530 delete_insns_since (last);
5531 return tem;
5532 #else
5533 return 0;
5534 #endif
5535 }
5536
5537 /* The remaining tricks only apply to integer comparisons. */
5538
5539 if (GET_MODE_CLASS (mode) != MODE_INT)
5540 return 0;
5541
5542 /* If this is an equality comparison of integers, we can try to exclusive-or
5543 (or subtract) the two operands and use a recursive call to try the
5544 comparison with zero. Don't do any of these cases if branches are
5545 very cheap. */
5546
5547 if ((code == EQ || code == NE) && op1 != const0_rtx)
5548 {
5549 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5550 OPTAB_WIDEN);
5551
5552 if (tem == 0)
5553 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5554 OPTAB_WIDEN);
5555 if (tem != 0)
5556 tem = emit_store_flag (target, code, tem, const0_rtx,
5557 mode, unsignedp, normalizep);
5558 if (tem != 0)
5559 return tem;
5560
5561 delete_insns_since (last);
5562 }
5563
5564 /* For integer comparisons, try the reverse comparison. However, for
5565 small X and if we'd have anyway to extend, implementing "X != 0"
5566 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5567 rcode = reverse_condition (code);
5568 if (can_compare_p (rcode, mode, ccp_store_flag)
5569 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5570 && code == NE
5571 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5572 && op1 == const0_rtx))
5573 {
5574 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5575 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5576
5577 /* Again, for the reverse comparison, use either an addition or a XOR. */
5578 if (want_add
5579 && rtx_cost (GEN_INT (normalizep), PLUS,
5580 optimize_insn_for_speed_p ()) == 0)
5581 {
5582 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5583 STORE_FLAG_VALUE, target_mode);
5584 if (tem != 0)
5585 tem = expand_binop (target_mode, add_optab, tem,
5586 GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
5587 }
5588 else if (!want_add
5589 && rtx_cost (trueval, XOR,
5590 optimize_insn_for_speed_p ()) == 0)
5591 {
5592 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5593 normalizep, target_mode);
5594 if (tem != 0)
5595 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5596 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5597 }
5598
5599 if (tem != 0)
5600 return tem;
5601 delete_insns_since (last);
5602 }
5603
5604 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5605 the constant zero. Reject all other comparisons at this point. Only
5606 do LE and GT if branches are expensive since they are expensive on
5607 2-operand machines. */
5608
5609 if (op1 != const0_rtx
5610 || (code != EQ && code != NE
5611 && (BRANCH_COST (optimize_insn_for_speed_p (),
5612 false) <= 1 || (code != LE && code != GT))))
5613 return 0;
5614
5615 /* Try to put the result of the comparison in the sign bit. Assume we can't
5616 do the necessary operation below. */
5617
5618 tem = 0;
5619
5620 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5621 the sign bit set. */
5622
5623 if (code == LE)
5624 {
5625 /* This is destructive, so SUBTARGET can't be OP0. */
5626 if (rtx_equal_p (subtarget, op0))
5627 subtarget = 0;
5628
5629 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5630 OPTAB_WIDEN);
5631 if (tem)
5632 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5633 OPTAB_WIDEN);
5634 }
5635
5636 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5637 number of bits in the mode of OP0, minus one. */
5638
5639 if (code == GT)
5640 {
5641 if (rtx_equal_p (subtarget, op0))
5642 subtarget = 0;
5643
5644 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5645 size_int (GET_MODE_BITSIZE (mode) - 1),
5646 subtarget, 0);
5647 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5648 OPTAB_WIDEN);
5649 }
5650
5651 if (code == EQ || code == NE)
5652 {
5653 /* For EQ or NE, one way to do the comparison is to apply an operation
5654 that converts the operand into a positive number if it is nonzero
5655 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5656 for NE we negate. This puts the result in the sign bit. Then we
5657 normalize with a shift, if needed.
5658
5659 Two operations that can do the above actions are ABS and FFS, so try
5660 them. If that doesn't work, and MODE is smaller than a full word,
5661 we can use zero-extension to the wider mode (an unsigned conversion)
5662 as the operation. */
5663
5664 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5665 that is compensated by the subsequent overflow when subtracting
5666 one / negating. */
5667
5668 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5669 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5670 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5671 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5672 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5673 {
5674 tem = convert_modes (word_mode, mode, op0, 1);
5675 mode = word_mode;
5676 }
5677
5678 if (tem != 0)
5679 {
5680 if (code == EQ)
5681 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5682 0, OPTAB_WIDEN);
5683 else
5684 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5685 }
5686
5687 /* If we couldn't do it that way, for NE we can "or" the two's complement
5688 of the value with itself. For EQ, we take the one's complement of
5689 that "or", which is an extra insn, so we only handle EQ if branches
5690 are expensive. */
5691
5692 if (tem == 0
5693 && (code == NE
5694 || BRANCH_COST (optimize_insn_for_speed_p (),
5695 false) > 1))
5696 {
5697 if (rtx_equal_p (subtarget, op0))
5698 subtarget = 0;
5699
5700 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5701 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5702 OPTAB_WIDEN);
5703
5704 if (tem && code == EQ)
5705 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5706 }
5707 }
5708
5709 if (tem && normalizep)
5710 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5711 size_int (GET_MODE_BITSIZE (mode) - 1),
5712 subtarget, normalizep == 1);
5713
5714 if (tem)
5715 {
5716 if (!target)
5717 ;
5718 else if (GET_MODE (tem) != target_mode)
5719 {
5720 convert_move (target, tem, 0);
5721 tem = target;
5722 }
5723 else if (!subtarget)
5724 {
5725 emit_move_insn (target, tem);
5726 tem = target;
5727 }
5728 }
5729 else
5730 delete_insns_since (last);
5731
5732 return tem;
5733 }
5734
5735 /* Like emit_store_flag, but always succeeds. */
5736
5737 rtx
5738 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5739 enum machine_mode mode, int unsignedp, int normalizep)
5740 {
5741 rtx tem, label;
5742 rtx trueval, falseval;
5743
5744 /* First see if emit_store_flag can do the job. */
5745 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5746 if (tem != 0)
5747 return tem;
5748
5749 if (!target)
5750 target = gen_reg_rtx (word_mode);
5751
5752 /* If this failed, we have to do this with set/compare/jump/set code.
5753 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5754 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5755 if (code == NE
5756 && GET_MODE_CLASS (mode) == MODE_INT
5757 && REG_P (target)
5758 && op0 == target
5759 && op1 == const0_rtx)
5760 {
5761 label = gen_label_rtx ();
5762 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5763 mode, NULL_RTX, NULL_RTX, label, -1);
5764 emit_move_insn (target, trueval);
5765 emit_label (label);
5766 return target;
5767 }
5768
5769 if (!REG_P (target)
5770 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5771 target = gen_reg_rtx (GET_MODE (target));
5772
5773 /* Jump in the right direction if the target cannot implement CODE
5774 but can jump on its reverse condition. */
5775 falseval = const0_rtx;
5776 if (! can_compare_p (code, mode, ccp_jump)
5777 && (! FLOAT_MODE_P (mode)
5778 || code == ORDERED || code == UNORDERED
5779 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5780 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5781 {
5782 enum rtx_code rcode;
5783 if (FLOAT_MODE_P (mode))
5784 rcode = reverse_condition_maybe_unordered (code);
5785 else
5786 rcode = reverse_condition (code);
5787
5788 /* Canonicalize to UNORDERED for the libcall. */
5789 if (can_compare_p (rcode, mode, ccp_jump)
5790 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5791 {
5792 falseval = trueval;
5793 trueval = const0_rtx;
5794 code = rcode;
5795 }
5796 }
5797
5798 emit_move_insn (target, trueval);
5799 label = gen_label_rtx ();
5800 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5801 NULL_RTX, label, -1);
5802
5803 emit_move_insn (target, falseval);
5804 emit_label (label);
5805
5806 return target;
5807 }
5808 \f
5809 /* Perform possibly multi-word comparison and conditional jump to LABEL
5810 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5811 now a thin wrapper around do_compare_rtx_and_jump. */
5812
5813 static void
5814 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5815 rtx label)
5816 {
5817 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5818 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5819 NULL_RTX, NULL_RTX, label, -1);
5820 }