re PR tree-optimization/47053 (ICE: verify_flow_info failed: BB 2 can not throw but...
[gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 2011
6 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "recog.h"
38 #include "langhooks.h"
39 #include "df.h"
40 #include "target.h"
41 #include "expmed.h"
42
43 struct target_expmed default_target_expmed;
44 #if SWITCHABLE_TARGET
45 struct target_expmed *this_target_expmed = &default_target_expmed;
46 #endif
47
48 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx);
51 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, rtx);
53 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
54 unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT,
56 unsigned HOST_WIDE_INT, rtx, int, bool);
57 static rtx mask_rtx (enum machine_mode, int, int, int);
58 static rtx lshift_value (enum machine_mode, rtx, int, int);
59 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
60 unsigned HOST_WIDE_INT, int);
61 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
62 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
63 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
64
65 /* Test whether a value is zero of a power of two. */
66 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
67
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 #endif
71
72
73 /* Reduce conditional compilation elsewhere. */
74 #ifndef HAVE_insv
75 #define HAVE_insv 0
76 #define CODE_FOR_insv CODE_FOR_nothing
77 #define gen_insv(a,b,c,d) NULL_RTX
78 #endif
79 #ifndef HAVE_extv
80 #define HAVE_extv 0
81 #define CODE_FOR_extv CODE_FOR_nothing
82 #define gen_extv(a,b,c,d) NULL_RTX
83 #endif
84 #ifndef HAVE_extzv
85 #define HAVE_extzv 0
86 #define CODE_FOR_extzv CODE_FOR_nothing
87 #define gen_extzv(a,b,c,d) NULL_RTX
88 #endif
89
90 void
91 init_expmed (void)
92 {
93 struct
94 {
95 struct rtx_def reg; rtunion reg_fld[2];
96 struct rtx_def plus; rtunion plus_fld1;
97 struct rtx_def neg;
98 struct rtx_def mult; rtunion mult_fld1;
99 struct rtx_def sdiv; rtunion sdiv_fld1;
100 struct rtx_def udiv; rtunion udiv_fld1;
101 struct rtx_def zext;
102 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
103 struct rtx_def smod_32; rtunion smod_32_fld1;
104 struct rtx_def wide_mult; rtunion wide_mult_fld1;
105 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
106 struct rtx_def wide_trunc;
107 struct rtx_def shift; rtunion shift_fld1;
108 struct rtx_def shift_mult; rtunion shift_mult_fld1;
109 struct rtx_def shift_add; rtunion shift_add_fld1;
110 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
111 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
112 } all;
113
114 rtx pow2[MAX_BITS_PER_WORD];
115 rtx cint[MAX_BITS_PER_WORD];
116 int m, n;
117 enum machine_mode mode, wider_mode;
118 int speed;
119
120
121 for (m = 1; m < MAX_BITS_PER_WORD; m++)
122 {
123 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
124 cint[m] = GEN_INT (m);
125 }
126 memset (&all, 0, sizeof all);
127
128 PUT_CODE (&all.reg, REG);
129 /* Avoid using hard regs in ways which may be unsupported. */
130 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
131
132 PUT_CODE (&all.plus, PLUS);
133 XEXP (&all.plus, 0) = &all.reg;
134 XEXP (&all.plus, 1) = &all.reg;
135
136 PUT_CODE (&all.neg, NEG);
137 XEXP (&all.neg, 0) = &all.reg;
138
139 PUT_CODE (&all.mult, MULT);
140 XEXP (&all.mult, 0) = &all.reg;
141 XEXP (&all.mult, 1) = &all.reg;
142
143 PUT_CODE (&all.sdiv, DIV);
144 XEXP (&all.sdiv, 0) = &all.reg;
145 XEXP (&all.sdiv, 1) = &all.reg;
146
147 PUT_CODE (&all.udiv, UDIV);
148 XEXP (&all.udiv, 0) = &all.reg;
149 XEXP (&all.udiv, 1) = &all.reg;
150
151 PUT_CODE (&all.sdiv_32, DIV);
152 XEXP (&all.sdiv_32, 0) = &all.reg;
153 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
154
155 PUT_CODE (&all.smod_32, MOD);
156 XEXP (&all.smod_32, 0) = &all.reg;
157 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
158
159 PUT_CODE (&all.zext, ZERO_EXTEND);
160 XEXP (&all.zext, 0) = &all.reg;
161
162 PUT_CODE (&all.wide_mult, MULT);
163 XEXP (&all.wide_mult, 0) = &all.zext;
164 XEXP (&all.wide_mult, 1) = &all.zext;
165
166 PUT_CODE (&all.wide_lshr, LSHIFTRT);
167 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
168
169 PUT_CODE (&all.wide_trunc, TRUNCATE);
170 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
171
172 PUT_CODE (&all.shift, ASHIFT);
173 XEXP (&all.shift, 0) = &all.reg;
174
175 PUT_CODE (&all.shift_mult, MULT);
176 XEXP (&all.shift_mult, 0) = &all.reg;
177
178 PUT_CODE (&all.shift_add, PLUS);
179 XEXP (&all.shift_add, 0) = &all.shift_mult;
180 XEXP (&all.shift_add, 1) = &all.reg;
181
182 PUT_CODE (&all.shift_sub0, MINUS);
183 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
184 XEXP (&all.shift_sub0, 1) = &all.reg;
185
186 PUT_CODE (&all.shift_sub1, MINUS);
187 XEXP (&all.shift_sub1, 0) = &all.reg;
188 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
189
190 for (speed = 0; speed < 2; speed++)
191 {
192 crtl->maybe_hot_insn_p = speed;
193 zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
194
195 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
196 mode != VOIDmode;
197 mode = GET_MODE_WIDER_MODE (mode))
198 {
199 PUT_MODE (&all.reg, mode);
200 PUT_MODE (&all.plus, mode);
201 PUT_MODE (&all.neg, mode);
202 PUT_MODE (&all.mult, mode);
203 PUT_MODE (&all.sdiv, mode);
204 PUT_MODE (&all.udiv, mode);
205 PUT_MODE (&all.sdiv_32, mode);
206 PUT_MODE (&all.smod_32, mode);
207 PUT_MODE (&all.wide_trunc, mode);
208 PUT_MODE (&all.shift, mode);
209 PUT_MODE (&all.shift_mult, mode);
210 PUT_MODE (&all.shift_add, mode);
211 PUT_MODE (&all.shift_sub0, mode);
212 PUT_MODE (&all.shift_sub1, mode);
213
214 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
215 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
216 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
217 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
218 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
219
220 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
221 <= 2 * add_cost[speed][mode]);
222 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
223 <= 4 * add_cost[speed][mode]);
224
225 wider_mode = GET_MODE_WIDER_MODE (mode);
226 if (wider_mode != VOIDmode)
227 {
228 PUT_MODE (&all.zext, wider_mode);
229 PUT_MODE (&all.wide_mult, wider_mode);
230 PUT_MODE (&all.wide_lshr, wider_mode);
231 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
232
233 mul_widen_cost[speed][wider_mode]
234 = rtx_cost (&all.wide_mult, SET, speed);
235 mul_highpart_cost[speed][mode]
236 = rtx_cost (&all.wide_trunc, SET, speed);
237 }
238
239 shift_cost[speed][mode][0] = 0;
240 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
241 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
242
243 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
244 for (m = 1; m < n; m++)
245 {
246 XEXP (&all.shift, 1) = cint[m];
247 XEXP (&all.shift_mult, 1) = pow2[m];
248
249 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
250 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
251 shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
252 shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
253 }
254 }
255 }
256 if (alg_hash_used_p)
257 memset (alg_hash, 0, sizeof (alg_hash));
258 else
259 alg_hash_used_p = true;
260 default_rtl_profile ();
261 }
262
263 /* Return an rtx representing minus the value of X.
264 MODE is the intended mode of the result,
265 useful if X is a CONST_INT. */
266
267 rtx
268 negate_rtx (enum machine_mode mode, rtx x)
269 {
270 rtx result = simplify_unary_operation (NEG, mode, x, mode);
271
272 if (result == 0)
273 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
274
275 return result;
276 }
277
278 /* Report on the availability of insv/extv/extzv and the desired mode
279 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
280 is false; else the mode of the specified operand. If OPNO is -1,
281 all the caller cares about is whether the insn is available. */
282 enum machine_mode
283 mode_for_extraction (enum extraction_pattern pattern, int opno)
284 {
285 const struct insn_data_d *data;
286
287 switch (pattern)
288 {
289 case EP_insv:
290 if (HAVE_insv)
291 {
292 data = &insn_data[CODE_FOR_insv];
293 break;
294 }
295 return MAX_MACHINE_MODE;
296
297 case EP_extv:
298 if (HAVE_extv)
299 {
300 data = &insn_data[CODE_FOR_extv];
301 break;
302 }
303 return MAX_MACHINE_MODE;
304
305 case EP_extzv:
306 if (HAVE_extzv)
307 {
308 data = &insn_data[CODE_FOR_extzv];
309 break;
310 }
311 return MAX_MACHINE_MODE;
312
313 default:
314 gcc_unreachable ();
315 }
316
317 if (opno == -1)
318 return VOIDmode;
319
320 /* Everyone who uses this function used to follow it with
321 if (result == VOIDmode) result = word_mode; */
322 if (data->operand[opno].mode == VOIDmode)
323 return word_mode;
324 return data->operand[opno].mode;
325 }
326
327 /* Return true if X, of mode MODE, matches the predicate for operand
328 OPNO of instruction ICODE. Allow volatile memories, regardless of
329 the ambient volatile_ok setting. */
330
331 static bool
332 check_predicate_volatile_ok (enum insn_code icode, int opno,
333 rtx x, enum machine_mode mode)
334 {
335 bool save_volatile_ok, result;
336
337 save_volatile_ok = volatile_ok;
338 result = insn_data[(int) icode].operand[opno].predicate (x, mode);
339 volatile_ok = save_volatile_ok;
340 return result;
341 }
342 \f
343 /* A subroutine of store_bit_field, with the same arguments. Return true
344 if the operation could be implemented.
345
346 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
347 no other way of implementing the operation. If FALLBACK_P is false,
348 return false instead. */
349
350 static bool
351 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
352 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
353 rtx value, bool fallback_p)
354 {
355 unsigned int unit
356 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
357 unsigned HOST_WIDE_INT offset, bitpos;
358 rtx op0 = str_rtx;
359 int byte_offset;
360 rtx orig_value;
361
362 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
363
364 while (GET_CODE (op0) == SUBREG)
365 {
366 /* The following line once was done only if WORDS_BIG_ENDIAN,
367 but I think that is a mistake. WORDS_BIG_ENDIAN is
368 meaningful at a much higher level; when structures are copied
369 between memory and regs, the higher-numbered regs
370 always get higher addresses. */
371 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
372 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
373
374 byte_offset = 0;
375
376 /* Paradoxical subregs need special handling on big endian machines. */
377 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
378 {
379 int difference = inner_mode_size - outer_mode_size;
380
381 if (WORDS_BIG_ENDIAN)
382 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
383 if (BYTES_BIG_ENDIAN)
384 byte_offset += difference % UNITS_PER_WORD;
385 }
386 else
387 byte_offset = SUBREG_BYTE (op0);
388
389 bitnum += byte_offset * BITS_PER_UNIT;
390 op0 = SUBREG_REG (op0);
391 }
392
393 /* No action is needed if the target is a register and if the field
394 lies completely outside that register. This can occur if the source
395 code contains an out-of-bounds access to a small array. */
396 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
397 return true;
398
399 /* Use vec_set patterns for inserting parts of vectors whenever
400 available. */
401 if (VECTOR_MODE_P (GET_MODE (op0))
402 && !MEM_P (op0)
403 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
404 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
405 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
406 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
407 {
408 enum machine_mode outermode = GET_MODE (op0);
409 enum machine_mode innermode = GET_MODE_INNER (outermode);
410 int icode = (int) optab_handler (vec_set_optab, outermode);
411 int pos = bitnum / GET_MODE_BITSIZE (innermode);
412 rtx rtxpos = GEN_INT (pos);
413 rtx src = value;
414 rtx dest = op0;
415 rtx pat, seq;
416 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
417 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
418 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
419
420 start_sequence ();
421
422 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
423 src = copy_to_mode_reg (mode1, src);
424
425 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
426 rtxpos = copy_to_mode_reg (mode1, rtxpos);
427
428 /* We could handle this, but we should always be called with a pseudo
429 for our targets and all insns should take them as outputs. */
430 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
431 && (*insn_data[icode].operand[1].predicate) (src, mode1)
432 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
433 pat = GEN_FCN (icode) (dest, src, rtxpos);
434 seq = get_insns ();
435 end_sequence ();
436 if (pat)
437 {
438 emit_insn (seq);
439 emit_insn (pat);
440 return true;
441 }
442 }
443
444 /* If the target is a register, overwriting the entire object, or storing
445 a full-word or multi-word field can be done with just a SUBREG.
446
447 If the target is memory, storing any naturally aligned field can be
448 done with a simple store. For targets that support fast unaligned
449 memory, any naturally sized, unit aligned field can be done directly. */
450
451 offset = bitnum / unit;
452 bitpos = bitnum % unit;
453 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
454 + (offset * UNITS_PER_WORD);
455
456 if (bitpos == 0
457 && bitsize == GET_MODE_BITSIZE (fieldmode)
458 && (!MEM_P (op0)
459 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
460 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
461 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
462 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
463 || (offset * BITS_PER_UNIT % bitsize == 0
464 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
465 {
466 if (MEM_P (op0))
467 op0 = adjust_address (op0, fieldmode, offset);
468 else if (GET_MODE (op0) != fieldmode)
469 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
470 byte_offset);
471 emit_move_insn (op0, value);
472 return true;
473 }
474
475 /* Make sure we are playing with integral modes. Pun with subregs
476 if we aren't. This must come after the entire register case above,
477 since that case is valid for any mode. The following cases are only
478 valid for integral modes. */
479 {
480 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
481 if (imode != GET_MODE (op0))
482 {
483 if (MEM_P (op0))
484 op0 = adjust_address (op0, imode, 0);
485 else
486 {
487 gcc_assert (imode != BLKmode);
488 op0 = gen_lowpart (imode, op0);
489 }
490 }
491 }
492
493 /* We may be accessing data outside the field, which means
494 we can alias adjacent data. */
495 if (MEM_P (op0))
496 {
497 op0 = shallow_copy_rtx (op0);
498 set_mem_alias_set (op0, 0);
499 set_mem_expr (op0, 0);
500 }
501
502 /* If OP0 is a register, BITPOS must count within a word.
503 But as we have it, it counts within whatever size OP0 now has.
504 On a bigendian machine, these are not the same, so convert. */
505 if (BYTES_BIG_ENDIAN
506 && !MEM_P (op0)
507 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
508 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
509
510 /* Storing an lsb-aligned field in a register
511 can be done with a movestrict instruction. */
512
513 if (!MEM_P (op0)
514 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
515 && bitsize == GET_MODE_BITSIZE (fieldmode)
516 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
517 {
518 int icode = optab_handler (movstrict_optab, fieldmode);
519 rtx insn;
520 rtx start = get_last_insn ();
521 rtx arg0 = op0;
522
523 /* Get appropriate low part of the value being stored. */
524 if (CONST_INT_P (value) || REG_P (value))
525 value = gen_lowpart (fieldmode, value);
526 else if (!(GET_CODE (value) == SYMBOL_REF
527 || GET_CODE (value) == LABEL_REF
528 || GET_CODE (value) == CONST))
529 value = convert_to_mode (fieldmode, value, 0);
530
531 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
532 value = copy_to_mode_reg (fieldmode, value);
533
534 if (GET_CODE (op0) == SUBREG)
535 {
536 /* Else we've got some float mode source being extracted into
537 a different float mode destination -- this combination of
538 subregs results in Severe Tire Damage. */
539 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
540 || GET_MODE_CLASS (fieldmode) == MODE_INT
541 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
542 arg0 = SUBREG_REG (op0);
543 }
544
545 insn = (GEN_FCN (icode)
546 (gen_rtx_SUBREG (fieldmode, arg0,
547 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
548 + (offset * UNITS_PER_WORD)),
549 value));
550 if (insn)
551 {
552 emit_insn (insn);
553 return true;
554 }
555 delete_insns_since (start);
556 }
557
558 /* Handle fields bigger than a word. */
559
560 if (bitsize > BITS_PER_WORD)
561 {
562 /* Here we transfer the words of the field
563 in the order least significant first.
564 This is because the most significant word is the one which may
565 be less than full.
566 However, only do that if the value is not BLKmode. */
567
568 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
569 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
570 unsigned int i;
571 rtx last;
572
573 /* This is the mode we must force value to, so that there will be enough
574 subwords to extract. Note that fieldmode will often (always?) be
575 VOIDmode, because that is what store_field uses to indicate that this
576 is a bit field, but passing VOIDmode to operand_subword_force
577 is not allowed. */
578 fieldmode = GET_MODE (value);
579 if (fieldmode == VOIDmode)
580 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
581
582 last = get_last_insn ();
583 for (i = 0; i < nwords; i++)
584 {
585 /* If I is 0, use the low-order word in both field and target;
586 if I is 1, use the next to lowest word; and so on. */
587 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
588 unsigned int bit_offset = (backwards
589 ? MAX ((int) bitsize - ((int) i + 1)
590 * BITS_PER_WORD,
591 0)
592 : (int) i * BITS_PER_WORD);
593 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
594
595 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
596 bitsize - i * BITS_PER_WORD),
597 bitnum + bit_offset, word_mode,
598 value_word, fallback_p))
599 {
600 delete_insns_since (last);
601 return false;
602 }
603 }
604 return true;
605 }
606
607 /* From here on we can assume that the field to be stored in is
608 a full-word (whatever type that is), since it is shorter than a word. */
609
610 /* OFFSET is the number of words or bytes (UNIT says which)
611 from STR_RTX to the first word or byte containing part of the field. */
612
613 if (!MEM_P (op0))
614 {
615 if (offset != 0
616 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
617 {
618 if (!REG_P (op0))
619 {
620 /* Since this is a destination (lvalue), we can't copy
621 it to a pseudo. We can remove a SUBREG that does not
622 change the size of the operand. Such a SUBREG may
623 have been added above. */
624 gcc_assert (GET_CODE (op0) == SUBREG
625 && (GET_MODE_SIZE (GET_MODE (op0))
626 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
627 op0 = SUBREG_REG (op0);
628 }
629 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
630 op0, (offset * UNITS_PER_WORD));
631 }
632 offset = 0;
633 }
634
635 /* If VALUE has a floating-point or complex mode, access it as an
636 integer of the corresponding size. This can occur on a machine
637 with 64 bit registers that uses SFmode for float. It can also
638 occur for unaligned float or complex fields. */
639 orig_value = value;
640 if (GET_MODE (value) != VOIDmode
641 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
642 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
643 {
644 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
645 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
646 }
647
648 /* Now OFFSET is nonzero only if OP0 is memory
649 and is therefore always measured in bytes. */
650
651 if (HAVE_insv
652 && GET_MODE (value) != BLKmode
653 && bitsize > 0
654 && GET_MODE_BITSIZE (op_mode) >= bitsize
655 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
656 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
657 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
658 VOIDmode)
659 && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
660 {
661 int xbitpos = bitpos;
662 rtx value1;
663 rtx xop0 = op0;
664 rtx last = get_last_insn ();
665 rtx pat;
666 bool copy_back = false;
667
668 /* Add OFFSET into OP0's address. */
669 if (MEM_P (xop0))
670 xop0 = adjust_address (xop0, byte_mode, offset);
671
672 /* If xop0 is a register, we need it in OP_MODE
673 to make it acceptable to the format of insv. */
674 if (GET_CODE (xop0) == SUBREG)
675 /* We can't just change the mode, because this might clobber op0,
676 and we will need the original value of op0 if insv fails. */
677 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
678 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
679 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
680
681 /* If the destination is a paradoxical subreg such that we need a
682 truncate to the inner mode, perform the insertion on a temporary and
683 truncate the result to the original destination. Note that we can't
684 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
685 X) 0)) is (reg:N X). */
686 if (GET_CODE (xop0) == SUBREG
687 && REG_P (SUBREG_REG (xop0))
688 && (!TRULY_NOOP_TRUNCATION
689 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0))),
690 GET_MODE_BITSIZE (op_mode))))
691 {
692 rtx tem = gen_reg_rtx (op_mode);
693 emit_move_insn (tem, xop0);
694 xop0 = tem;
695 copy_back = true;
696 }
697
698 /* On big-endian machines, we count bits from the most significant.
699 If the bit field insn does not, we must invert. */
700
701 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
702 xbitpos = unit - bitsize - xbitpos;
703
704 /* We have been counting XBITPOS within UNIT.
705 Count instead within the size of the register. */
706 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
707 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
708
709 unit = GET_MODE_BITSIZE (op_mode);
710
711 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
712 value1 = value;
713 if (GET_MODE (value) != op_mode)
714 {
715 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
716 {
717 /* Optimization: Don't bother really extending VALUE
718 if it has all the bits we will actually use. However,
719 if we must narrow it, be sure we do it correctly. */
720
721 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
722 {
723 rtx tmp;
724
725 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
726 if (! tmp)
727 tmp = simplify_gen_subreg (op_mode,
728 force_reg (GET_MODE (value),
729 value1),
730 GET_MODE (value), 0);
731 value1 = tmp;
732 }
733 else
734 value1 = gen_lowpart (op_mode, value1);
735 }
736 else if (CONST_INT_P (value))
737 value1 = gen_int_mode (INTVAL (value), op_mode);
738 else
739 /* Parse phase is supposed to make VALUE's data type
740 match that of the component reference, which is a type
741 at least as wide as the field; so VALUE should have
742 a mode that corresponds to that type. */
743 gcc_assert (CONSTANT_P (value));
744 }
745
746 /* If this machine's insv insists on a register,
747 get VALUE1 into a register. */
748 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
749 (value1, op_mode)))
750 value1 = force_reg (op_mode, value1);
751
752 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
753 if (pat)
754 {
755 emit_insn (pat);
756
757 if (copy_back)
758 convert_move (op0, xop0, true);
759 return true;
760 }
761 delete_insns_since (last);
762 }
763
764 /* If OP0 is a memory, try copying it to a register and seeing if a
765 cheap register alternative is available. */
766 if (HAVE_insv && MEM_P (op0))
767 {
768 enum machine_mode bestmode;
769
770 /* Get the mode to use for inserting into this field. If OP0 is
771 BLKmode, get the smallest mode consistent with the alignment. If
772 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
773 mode. Otherwise, use the smallest mode containing the field. */
774
775 if (GET_MODE (op0) == BLKmode
776 || (op_mode != MAX_MACHINE_MODE
777 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
778 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
779 (op_mode == MAX_MACHINE_MODE
780 ? VOIDmode : op_mode),
781 MEM_VOLATILE_P (op0));
782 else
783 bestmode = GET_MODE (op0);
784
785 if (bestmode != VOIDmode
786 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
787 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
788 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
789 {
790 rtx last, tempreg, xop0;
791 unsigned HOST_WIDE_INT xoffset, xbitpos;
792
793 last = get_last_insn ();
794
795 /* Adjust address to point to the containing unit of
796 that mode. Compute the offset as a multiple of this unit,
797 counting in bytes. */
798 unit = GET_MODE_BITSIZE (bestmode);
799 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
800 xbitpos = bitnum % unit;
801 xop0 = adjust_address (op0, bestmode, xoffset);
802
803 /* Fetch that unit, store the bitfield in it, then store
804 the unit. */
805 tempreg = copy_to_reg (xop0);
806 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
807 fieldmode, orig_value, false))
808 {
809 emit_move_insn (xop0, tempreg);
810 return true;
811 }
812 delete_insns_since (last);
813 }
814 }
815
816 if (!fallback_p)
817 return false;
818
819 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
820 return true;
821 }
822
823 /* Generate code to store value from rtx VALUE
824 into a bit-field within structure STR_RTX
825 containing BITSIZE bits starting at bit BITNUM.
826 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
827
828 void
829 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
830 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
831 rtx value)
832 {
833 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
834 gcc_unreachable ();
835 }
836 \f
837 /* Use shifts and boolean operations to store VALUE
838 into a bit field of width BITSIZE
839 in a memory location specified by OP0 except offset by OFFSET bytes.
840 (OFFSET must be 0 if OP0 is a register.)
841 The field starts at position BITPOS within the byte.
842 (If OP0 is a register, it may be a full word or a narrower mode,
843 but BITPOS still counts within a full word,
844 which is significant on bigendian machines.) */
845
846 static void
847 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
848 unsigned HOST_WIDE_INT bitsize,
849 unsigned HOST_WIDE_INT bitpos, rtx value)
850 {
851 enum machine_mode mode;
852 unsigned int total_bits = BITS_PER_WORD;
853 rtx temp;
854 int all_zero = 0;
855 int all_one = 0;
856
857 /* There is a case not handled here:
858 a structure with a known alignment of just a halfword
859 and a field split across two aligned halfwords within the structure.
860 Or likewise a structure with a known alignment of just a byte
861 and a field split across two bytes.
862 Such cases are not supposed to be able to occur. */
863
864 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
865 {
866 gcc_assert (!offset);
867 /* Special treatment for a bit field split across two registers. */
868 if (bitsize + bitpos > BITS_PER_WORD)
869 {
870 store_split_bit_field (op0, bitsize, bitpos, value);
871 return;
872 }
873 }
874 else
875 {
876 /* Get the proper mode to use for this field. We want a mode that
877 includes the entire field. If such a mode would be larger than
878 a word, we won't be doing the extraction the normal way.
879 We don't want a mode bigger than the destination. */
880
881 mode = GET_MODE (op0);
882 if (GET_MODE_BITSIZE (mode) == 0
883 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
884 mode = word_mode;
885
886 if (MEM_VOLATILE_P (op0)
887 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
888 && flag_strict_volatile_bitfields > 0)
889 mode = GET_MODE (op0);
890 else
891 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
892 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
893
894 if (mode == VOIDmode)
895 {
896 /* The only way this should occur is if the field spans word
897 boundaries. */
898 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
899 value);
900 return;
901 }
902
903 total_bits = GET_MODE_BITSIZE (mode);
904
905 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
906 be in the range 0 to total_bits-1, and put any excess bytes in
907 OFFSET. */
908 if (bitpos >= total_bits)
909 {
910 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
911 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
912 * BITS_PER_UNIT);
913 }
914
915 /* Get ref to an aligned byte, halfword, or word containing the field.
916 Adjust BITPOS to be position within a word,
917 and OFFSET to be the offset of that word.
918 Then alter OP0 to refer to that word. */
919 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
920 offset -= (offset % (total_bits / BITS_PER_UNIT));
921 op0 = adjust_address (op0, mode, offset);
922 }
923
924 mode = GET_MODE (op0);
925
926 /* Now MODE is either some integral mode for a MEM as OP0,
927 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
928 The bit field is contained entirely within OP0.
929 BITPOS is the starting bit number within OP0.
930 (OP0's mode may actually be narrower than MODE.) */
931
932 if (BYTES_BIG_ENDIAN)
933 /* BITPOS is the distance between our msb
934 and that of the containing datum.
935 Convert it to the distance from the lsb. */
936 bitpos = total_bits - bitsize - bitpos;
937
938 /* Now BITPOS is always the distance between our lsb
939 and that of OP0. */
940
941 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
942 we must first convert its mode to MODE. */
943
944 if (CONST_INT_P (value))
945 {
946 HOST_WIDE_INT v = INTVAL (value);
947
948 if (bitsize < HOST_BITS_PER_WIDE_INT)
949 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
950
951 if (v == 0)
952 all_zero = 1;
953 else if ((bitsize < HOST_BITS_PER_WIDE_INT
954 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
955 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
956 all_one = 1;
957
958 value = lshift_value (mode, value, bitpos, bitsize);
959 }
960 else
961 {
962 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
963 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
964
965 if (GET_MODE (value) != mode)
966 value = convert_to_mode (mode, value, 1);
967
968 if (must_and)
969 value = expand_binop (mode, and_optab, value,
970 mask_rtx (mode, 0, bitsize, 0),
971 NULL_RTX, 1, OPTAB_LIB_WIDEN);
972 if (bitpos > 0)
973 value = expand_shift (LSHIFT_EXPR, mode, value,
974 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
975 }
976
977 /* Now clear the chosen bits in OP0,
978 except that if VALUE is -1 we need not bother. */
979 /* We keep the intermediates in registers to allow CSE to combine
980 consecutive bitfield assignments. */
981
982 temp = force_reg (mode, op0);
983
984 if (! all_one)
985 {
986 temp = expand_binop (mode, and_optab, temp,
987 mask_rtx (mode, bitpos, bitsize, 1),
988 NULL_RTX, 1, OPTAB_LIB_WIDEN);
989 temp = force_reg (mode, temp);
990 }
991
992 /* Now logical-or VALUE into OP0, unless it is zero. */
993
994 if (! all_zero)
995 {
996 temp = expand_binop (mode, ior_optab, temp, value,
997 NULL_RTX, 1, OPTAB_LIB_WIDEN);
998 temp = force_reg (mode, temp);
999 }
1000
1001 if (op0 != temp)
1002 {
1003 op0 = copy_rtx (op0);
1004 emit_move_insn (op0, temp);
1005 }
1006 }
1007 \f
1008 /* Store a bit field that is split across multiple accessible memory objects.
1009
1010 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1011 BITSIZE is the field width; BITPOS the position of its first bit
1012 (within the word).
1013 VALUE is the value to store.
1014
1015 This does not yet handle fields wider than BITS_PER_WORD. */
1016
1017 static void
1018 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1019 unsigned HOST_WIDE_INT bitpos, rtx value)
1020 {
1021 unsigned int unit;
1022 unsigned int bitsdone = 0;
1023
1024 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1025 much at a time. */
1026 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1027 unit = BITS_PER_WORD;
1028 else
1029 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1030
1031 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1032 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1033 that VALUE might be a floating-point constant. */
1034 if (CONSTANT_P (value) && !CONST_INT_P (value))
1035 {
1036 rtx word = gen_lowpart_common (word_mode, value);
1037
1038 if (word && (value != word))
1039 value = word;
1040 else
1041 value = gen_lowpart_common (word_mode,
1042 force_reg (GET_MODE (value) != VOIDmode
1043 ? GET_MODE (value)
1044 : word_mode, value));
1045 }
1046
1047 while (bitsdone < bitsize)
1048 {
1049 unsigned HOST_WIDE_INT thissize;
1050 rtx part, word;
1051 unsigned HOST_WIDE_INT thispos;
1052 unsigned HOST_WIDE_INT offset;
1053
1054 offset = (bitpos + bitsdone) / unit;
1055 thispos = (bitpos + bitsdone) % unit;
1056
1057 /* THISSIZE must not overrun a word boundary. Otherwise,
1058 store_fixed_bit_field will call us again, and we will mutually
1059 recurse forever. */
1060 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1061 thissize = MIN (thissize, unit - thispos);
1062
1063 if (BYTES_BIG_ENDIAN)
1064 {
1065 int total_bits;
1066
1067 /* We must do an endian conversion exactly the same way as it is
1068 done in extract_bit_field, so that the two calls to
1069 extract_fixed_bit_field will have comparable arguments. */
1070 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1071 total_bits = BITS_PER_WORD;
1072 else
1073 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1074
1075 /* Fetch successively less significant portions. */
1076 if (CONST_INT_P (value))
1077 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1078 >> (bitsize - bitsdone - thissize))
1079 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1080 else
1081 /* The args are chosen so that the last part includes the
1082 lsb. Give extract_bit_field the value it needs (with
1083 endianness compensation) to fetch the piece we want. */
1084 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1085 total_bits - bitsize + bitsdone,
1086 NULL_RTX, 1, false);
1087 }
1088 else
1089 {
1090 /* Fetch successively more significant portions. */
1091 if (CONST_INT_P (value))
1092 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1093 >> bitsdone)
1094 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1095 else
1096 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1097 bitsdone, NULL_RTX, 1, false);
1098 }
1099
1100 /* If OP0 is a register, then handle OFFSET here.
1101
1102 When handling multiword bitfields, extract_bit_field may pass
1103 down a word_mode SUBREG of a larger REG for a bitfield that actually
1104 crosses a word boundary. Thus, for a SUBREG, we must find
1105 the current word starting from the base register. */
1106 if (GET_CODE (op0) == SUBREG)
1107 {
1108 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1109 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1110 GET_MODE (SUBREG_REG (op0)));
1111 offset = 0;
1112 }
1113 else if (REG_P (op0))
1114 {
1115 word = operand_subword_force (op0, offset, GET_MODE (op0));
1116 offset = 0;
1117 }
1118 else
1119 word = op0;
1120
1121 /* OFFSET is in UNITs, and UNIT is in bits.
1122 store_fixed_bit_field wants offset in bytes. */
1123 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1124 thispos, part);
1125 bitsdone += thissize;
1126 }
1127 }
1128 \f
1129 /* A subroutine of extract_bit_field_1 that converts return value X
1130 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1131 to extract_bit_field. */
1132
1133 static rtx
1134 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1135 enum machine_mode tmode, bool unsignedp)
1136 {
1137 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1138 return x;
1139
1140 /* If the x mode is not a scalar integral, first convert to the
1141 integer mode of that size and then access it as a floating-point
1142 value via a SUBREG. */
1143 if (!SCALAR_INT_MODE_P (tmode))
1144 {
1145 enum machine_mode smode;
1146
1147 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1148 x = convert_to_mode (smode, x, unsignedp);
1149 x = force_reg (smode, x);
1150 return gen_lowpart (tmode, x);
1151 }
1152
1153 return convert_to_mode (tmode, x, unsignedp);
1154 }
1155
1156 /* A subroutine of extract_bit_field, with the same arguments.
1157 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1158 if we can find no other means of implementing the operation.
1159 if FALLBACK_P is false, return NULL instead. */
1160
1161 static rtx
1162 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1163 unsigned HOST_WIDE_INT bitnum,
1164 int unsignedp, bool packedp, rtx target,
1165 enum machine_mode mode, enum machine_mode tmode,
1166 bool fallback_p)
1167 {
1168 unsigned int unit
1169 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1170 unsigned HOST_WIDE_INT offset, bitpos;
1171 rtx op0 = str_rtx;
1172 enum machine_mode int_mode;
1173 enum machine_mode ext_mode;
1174 enum machine_mode mode1;
1175 enum insn_code icode;
1176 int byte_offset;
1177
1178 if (tmode == VOIDmode)
1179 tmode = mode;
1180
1181 while (GET_CODE (op0) == SUBREG)
1182 {
1183 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1184 op0 = SUBREG_REG (op0);
1185 }
1186
1187 /* If we have an out-of-bounds access to a register, just return an
1188 uninitialized register of the required mode. This can occur if the
1189 source code contains an out-of-bounds access to a small array. */
1190 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1191 return gen_reg_rtx (tmode);
1192
1193 if (REG_P (op0)
1194 && mode == GET_MODE (op0)
1195 && bitnum == 0
1196 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1197 {
1198 /* We're trying to extract a full register from itself. */
1199 return op0;
1200 }
1201
1202 /* See if we can get a better vector mode before extracting. */
1203 if (VECTOR_MODE_P (GET_MODE (op0))
1204 && !MEM_P (op0)
1205 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1206 {
1207 enum machine_mode new_mode;
1208 int nunits = GET_MODE_NUNITS (GET_MODE (op0));
1209
1210 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1211 new_mode = MIN_MODE_VECTOR_FLOAT;
1212 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1213 new_mode = MIN_MODE_VECTOR_FRACT;
1214 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1215 new_mode = MIN_MODE_VECTOR_UFRACT;
1216 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1217 new_mode = MIN_MODE_VECTOR_ACCUM;
1218 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1219 new_mode = MIN_MODE_VECTOR_UACCUM;
1220 else
1221 new_mode = MIN_MODE_VECTOR_INT;
1222
1223 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1224 if (GET_MODE_NUNITS (new_mode) == nunits
1225 && GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1226 && targetm.vector_mode_supported_p (new_mode))
1227 break;
1228 if (new_mode != VOIDmode)
1229 op0 = gen_lowpart (new_mode, op0);
1230 }
1231
1232 /* Use vec_extract patterns for extracting parts of vectors whenever
1233 available. */
1234 if (VECTOR_MODE_P (GET_MODE (op0))
1235 && !MEM_P (op0)
1236 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1237 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1238 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1239 {
1240 enum machine_mode outermode = GET_MODE (op0);
1241 enum machine_mode innermode = GET_MODE_INNER (outermode);
1242 int icode = (int) optab_handler (vec_extract_optab, outermode);
1243 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1244 rtx rtxpos = GEN_INT (pos);
1245 rtx src = op0;
1246 rtx dest = NULL, pat, seq;
1247 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1248 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1249 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1250
1251 if (innermode == tmode || innermode == mode)
1252 dest = target;
1253
1254 if (!dest)
1255 dest = gen_reg_rtx (innermode);
1256
1257 start_sequence ();
1258
1259 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1260 dest = copy_to_mode_reg (mode0, dest);
1261
1262 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1263 src = copy_to_mode_reg (mode1, src);
1264
1265 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1266 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1267
1268 /* We could handle this, but we should always be called with a pseudo
1269 for our targets and all insns should take them as outputs. */
1270 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1271 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1272 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1273
1274 pat = GEN_FCN (icode) (dest, src, rtxpos);
1275 seq = get_insns ();
1276 end_sequence ();
1277 if (pat)
1278 {
1279 emit_insn (seq);
1280 emit_insn (pat);
1281 if (mode0 != mode)
1282 return gen_lowpart (tmode, dest);
1283 return dest;
1284 }
1285 }
1286
1287 /* Make sure we are playing with integral modes. Pun with subregs
1288 if we aren't. */
1289 {
1290 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1291 if (imode != GET_MODE (op0))
1292 {
1293 if (MEM_P (op0))
1294 op0 = adjust_address (op0, imode, 0);
1295 else if (imode != BLKmode)
1296 {
1297 op0 = gen_lowpart (imode, op0);
1298
1299 /* If we got a SUBREG, force it into a register since we
1300 aren't going to be able to do another SUBREG on it. */
1301 if (GET_CODE (op0) == SUBREG)
1302 op0 = force_reg (imode, op0);
1303 }
1304 else if (REG_P (op0))
1305 {
1306 rtx reg, subreg;
1307 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1308 MODE_INT);
1309 reg = gen_reg_rtx (imode);
1310 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1311 emit_move_insn (subreg, op0);
1312 op0 = reg;
1313 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1314 }
1315 else
1316 {
1317 rtx mem = assign_stack_temp (GET_MODE (op0),
1318 GET_MODE_SIZE (GET_MODE (op0)), 0);
1319 emit_move_insn (mem, op0);
1320 op0 = adjust_address (mem, BLKmode, 0);
1321 }
1322 }
1323 }
1324
1325 /* We may be accessing data outside the field, which means
1326 we can alias adjacent data. */
1327 if (MEM_P (op0))
1328 {
1329 op0 = shallow_copy_rtx (op0);
1330 set_mem_alias_set (op0, 0);
1331 set_mem_expr (op0, 0);
1332 }
1333
1334 /* Extraction of a full-word or multi-word value from a structure
1335 in a register or aligned memory can be done with just a SUBREG.
1336 A subword value in the least significant part of a register
1337 can also be extracted with a SUBREG. For this, we need the
1338 byte offset of the value in op0. */
1339
1340 bitpos = bitnum % unit;
1341 offset = bitnum / unit;
1342 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1343
1344 /* If OP0 is a register, BITPOS must count within a word.
1345 But as we have it, it counts within whatever size OP0 now has.
1346 On a bigendian machine, these are not the same, so convert. */
1347 if (BYTES_BIG_ENDIAN
1348 && !MEM_P (op0)
1349 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1350 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1351
1352 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1353 If that's wrong, the solution is to test for it and set TARGET to 0
1354 if needed. */
1355
1356 /* Only scalar integer modes can be converted via subregs. There is an
1357 additional problem for FP modes here in that they can have a precision
1358 which is different from the size. mode_for_size uses precision, but
1359 we want a mode based on the size, so we must avoid calling it for FP
1360 modes. */
1361 mode1 = (SCALAR_INT_MODE_P (tmode)
1362 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1363 : mode);
1364
1365 /* If the bitfield is volatile, we need to make sure the access
1366 remains on a type-aligned boundary. */
1367 if (GET_CODE (op0) == MEM
1368 && MEM_VOLATILE_P (op0)
1369 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1370 && flag_strict_volatile_bitfields > 0)
1371 goto no_subreg_mode_swap;
1372
1373 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1374 && bitpos % BITS_PER_WORD == 0)
1375 || (mode1 != BLKmode
1376 /* ??? The big endian test here is wrong. This is correct
1377 if the value is in a register, and if mode_for_size is not
1378 the same mode as op0. This causes us to get unnecessarily
1379 inefficient code from the Thumb port when -mbig-endian. */
1380 && (BYTES_BIG_ENDIAN
1381 ? bitpos + bitsize == BITS_PER_WORD
1382 : bitpos == 0)))
1383 && ((!MEM_P (op0)
1384 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1385 GET_MODE_BITSIZE (GET_MODE (op0)))
1386 && GET_MODE_SIZE (mode1) != 0
1387 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1388 || (MEM_P (op0)
1389 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1390 || (offset * BITS_PER_UNIT % bitsize == 0
1391 && MEM_ALIGN (op0) % bitsize == 0)))))
1392 {
1393 if (MEM_P (op0))
1394 op0 = adjust_address (op0, mode1, offset);
1395 else if (mode1 != GET_MODE (op0))
1396 {
1397 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1398 byte_offset);
1399 if (sub == NULL)
1400 goto no_subreg_mode_swap;
1401 op0 = sub;
1402 }
1403 if (mode1 != mode)
1404 return convert_to_mode (tmode, op0, unsignedp);
1405 return op0;
1406 }
1407 no_subreg_mode_swap:
1408
1409 /* Handle fields bigger than a word. */
1410
1411 if (bitsize > BITS_PER_WORD)
1412 {
1413 /* Here we transfer the words of the field
1414 in the order least significant first.
1415 This is because the most significant word is the one which may
1416 be less than full. */
1417
1418 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1419 unsigned int i;
1420
1421 if (target == 0 || !REG_P (target))
1422 target = gen_reg_rtx (mode);
1423
1424 /* Indicate for flow that the entire target reg is being set. */
1425 emit_clobber (target);
1426
1427 for (i = 0; i < nwords; i++)
1428 {
1429 /* If I is 0, use the low-order word in both field and target;
1430 if I is 1, use the next to lowest word; and so on. */
1431 /* Word number in TARGET to use. */
1432 unsigned int wordnum
1433 = (WORDS_BIG_ENDIAN
1434 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1435 : i);
1436 /* Offset from start of field in OP0. */
1437 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1438 ? MAX (0, ((int) bitsize - ((int) i + 1)
1439 * (int) BITS_PER_WORD))
1440 : (int) i * BITS_PER_WORD);
1441 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1442 rtx result_part
1443 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1444 bitsize - i * BITS_PER_WORD),
1445 bitnum + bit_offset, 1, false, target_part, mode,
1446 word_mode);
1447
1448 gcc_assert (target_part);
1449
1450 if (result_part != target_part)
1451 emit_move_insn (target_part, result_part);
1452 }
1453
1454 if (unsignedp)
1455 {
1456 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1457 need to be zero'd out. */
1458 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1459 {
1460 unsigned int i, total_words;
1461
1462 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1463 for (i = nwords; i < total_words; i++)
1464 emit_move_insn
1465 (operand_subword (target,
1466 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1467 1, VOIDmode),
1468 const0_rtx);
1469 }
1470 return target;
1471 }
1472
1473 /* Signed bit field: sign-extend with two arithmetic shifts. */
1474 target = expand_shift (LSHIFT_EXPR, mode, target,
1475 build_int_cst (NULL_TREE,
1476 GET_MODE_BITSIZE (mode) - bitsize),
1477 NULL_RTX, 0);
1478 return expand_shift (RSHIFT_EXPR, mode, target,
1479 build_int_cst (NULL_TREE,
1480 GET_MODE_BITSIZE (mode) - bitsize),
1481 NULL_RTX, 0);
1482 }
1483
1484 /* From here on we know the desired field is smaller than a word. */
1485
1486 /* Check if there is a correspondingly-sized integer field, so we can
1487 safely extract it as one size of integer, if necessary; then
1488 truncate or extend to the size that is wanted; then use SUBREGs or
1489 convert_to_mode to get one of the modes we really wanted. */
1490
1491 int_mode = int_mode_for_mode (tmode);
1492 if (int_mode == BLKmode)
1493 int_mode = int_mode_for_mode (mode);
1494 /* Should probably push op0 out to memory and then do a load. */
1495 gcc_assert (int_mode != BLKmode);
1496
1497 /* OFFSET is the number of words or bytes (UNIT says which)
1498 from STR_RTX to the first word or byte containing part of the field. */
1499 if (!MEM_P (op0))
1500 {
1501 if (offset != 0
1502 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1503 {
1504 if (!REG_P (op0))
1505 op0 = copy_to_reg (op0);
1506 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1507 op0, (offset * UNITS_PER_WORD));
1508 }
1509 offset = 0;
1510 }
1511
1512 /* Now OFFSET is nonzero only for memory operands. */
1513 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1514 icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv;
1515 if (ext_mode != MAX_MACHINE_MODE
1516 && bitsize > 0
1517 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1518 /* If op0 is a register, we need it in EXT_MODE to make it
1519 acceptable to the format of ext(z)v. */
1520 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1521 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1522 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
1523 && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
1524 {
1525 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1526 rtx bitsize_rtx, bitpos_rtx;
1527 rtx last = get_last_insn ();
1528 rtx xop0 = op0;
1529 rtx xtarget = target;
1530 rtx xspec_target = target;
1531 rtx xspec_target_subreg = 0;
1532 rtx pat;
1533
1534 /* If op0 is a register, we need it in EXT_MODE to make it
1535 acceptable to the format of ext(z)v. */
1536 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1537 xop0 = gen_lowpart_SUBREG (ext_mode, xop0);
1538 if (MEM_P (xop0))
1539 /* Get ref to first byte containing part of the field. */
1540 xop0 = adjust_address (xop0, byte_mode, xoffset);
1541
1542 /* On big-endian machines, we count bits from the most significant.
1543 If the bit field insn does not, we must invert. */
1544 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1545 xbitpos = unit - bitsize - xbitpos;
1546
1547 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1548 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1549 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1550
1551 unit = GET_MODE_BITSIZE (ext_mode);
1552
1553 if (xtarget == 0)
1554 xtarget = xspec_target = gen_reg_rtx (tmode);
1555
1556 if (GET_MODE (xtarget) != ext_mode)
1557 {
1558 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1559 between the mode of the extraction (word_mode) and the target
1560 mode. Instead, create a temporary and use convert_move to set
1561 the target. */
1562 if (REG_P (xtarget)
1563 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1564 GET_MODE_BITSIZE (ext_mode)))
1565 {
1566 xtarget = gen_lowpart (ext_mode, xtarget);
1567 if (GET_MODE_SIZE (ext_mode)
1568 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1569 xspec_target_subreg = xtarget;
1570 }
1571 else
1572 xtarget = gen_reg_rtx (ext_mode);
1573 }
1574
1575 /* If this machine's ext(z)v insists on a register target,
1576 make sure we have one. */
1577 if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
1578 xtarget = gen_reg_rtx (ext_mode);
1579
1580 bitsize_rtx = GEN_INT (bitsize);
1581 bitpos_rtx = GEN_INT (xbitpos);
1582
1583 pat = (unsignedp
1584 ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
1585 : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
1586 if (pat)
1587 {
1588 emit_insn (pat);
1589 if (xtarget == xspec_target)
1590 return xtarget;
1591 if (xtarget == xspec_target_subreg)
1592 return xspec_target;
1593 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1594 }
1595 delete_insns_since (last);
1596 }
1597
1598 /* If OP0 is a memory, try copying it to a register and seeing if a
1599 cheap register alternative is available. */
1600 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1601 {
1602 enum machine_mode bestmode;
1603
1604 /* Get the mode to use for inserting into this field. If
1605 OP0 is BLKmode, get the smallest mode consistent with the
1606 alignment. If OP0 is a non-BLKmode object that is no
1607 wider than EXT_MODE, use its mode. Otherwise, use the
1608 smallest mode containing the field. */
1609
1610 if (GET_MODE (op0) == BLKmode
1611 || (ext_mode != MAX_MACHINE_MODE
1612 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1613 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1614 (ext_mode == MAX_MACHINE_MODE
1615 ? VOIDmode : ext_mode),
1616 MEM_VOLATILE_P (op0));
1617 else
1618 bestmode = GET_MODE (op0);
1619
1620 if (bestmode != VOIDmode
1621 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1622 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1623 {
1624 unsigned HOST_WIDE_INT xoffset, xbitpos;
1625
1626 /* Compute the offset as a multiple of this unit,
1627 counting in bytes. */
1628 unit = GET_MODE_BITSIZE (bestmode);
1629 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1630 xbitpos = bitnum % unit;
1631
1632 /* Make sure the register is big enough for the whole field. */
1633 if (xoffset * BITS_PER_UNIT + unit
1634 >= offset * BITS_PER_UNIT + bitsize)
1635 {
1636 rtx last, result, xop0;
1637
1638 last = get_last_insn ();
1639
1640 /* Fetch it to a register in that size. */
1641 xop0 = adjust_address (op0, bestmode, xoffset);
1642 xop0 = force_reg (bestmode, xop0);
1643 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1644 unsignedp, packedp, target,
1645 mode, tmode, false);
1646 if (result)
1647 return result;
1648
1649 delete_insns_since (last);
1650 }
1651 }
1652 }
1653
1654 if (!fallback_p)
1655 return NULL;
1656
1657 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1658 bitpos, target, unsignedp, packedp);
1659 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1660 }
1661
1662 /* Generate code to extract a byte-field from STR_RTX
1663 containing BITSIZE bits, starting at BITNUM,
1664 and put it in TARGET if possible (if TARGET is nonzero).
1665 Regardless of TARGET, we return the rtx for where the value is placed.
1666
1667 STR_RTX is the structure containing the byte (a REG or MEM).
1668 UNSIGNEDP is nonzero if this is an unsigned bit field.
1669 PACKEDP is nonzero if the field has the packed attribute.
1670 MODE is the natural mode of the field value once extracted.
1671 TMODE is the mode the caller would like the value to have;
1672 but the value may be returned with type MODE instead.
1673
1674 If a TARGET is specified and we can store in it at no extra cost,
1675 we do so, and return TARGET.
1676 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1677 if they are equally easy. */
1678
1679 rtx
1680 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1681 unsigned HOST_WIDE_INT bitnum, int unsignedp, bool packedp,
1682 rtx target, enum machine_mode mode, enum machine_mode tmode)
1683 {
1684 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, packedp,
1685 target, mode, tmode, true);
1686 }
1687 \f
1688 /* Extract a bit field using shifts and boolean operations
1689 Returns an rtx to represent the value.
1690 OP0 addresses a register (word) or memory (byte).
1691 BITPOS says which bit within the word or byte the bit field starts in.
1692 OFFSET says how many bytes farther the bit field starts;
1693 it is 0 if OP0 is a register.
1694 BITSIZE says how many bits long the bit field is.
1695 (If OP0 is a register, it may be narrower than a full word,
1696 but BITPOS still counts within a full word,
1697 which is significant on bigendian machines.)
1698
1699 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1700 PACKEDP is true if the field has the packed attribute.
1701
1702 If TARGET is nonzero, attempts to store the value there
1703 and return TARGET, but this is not guaranteed.
1704 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1705
1706 static rtx
1707 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1708 unsigned HOST_WIDE_INT offset,
1709 unsigned HOST_WIDE_INT bitsize,
1710 unsigned HOST_WIDE_INT bitpos, rtx target,
1711 int unsignedp, bool packedp)
1712 {
1713 unsigned int total_bits = BITS_PER_WORD;
1714 enum machine_mode mode;
1715
1716 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1717 {
1718 /* Special treatment for a bit field split across two registers. */
1719 if (bitsize + bitpos > BITS_PER_WORD)
1720 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1721 }
1722 else
1723 {
1724 /* Get the proper mode to use for this field. We want a mode that
1725 includes the entire field. If such a mode would be larger than
1726 a word, we won't be doing the extraction the normal way. */
1727
1728 if (MEM_VOLATILE_P (op0)
1729 && flag_strict_volatile_bitfields > 0)
1730 {
1731 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1732 mode = GET_MODE (op0);
1733 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1734 mode = GET_MODE (target);
1735 else
1736 mode = tmode;
1737 }
1738 else
1739 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1740 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1741
1742 if (mode == VOIDmode)
1743 /* The only way this should occur is if the field spans word
1744 boundaries. */
1745 return extract_split_bit_field (op0, bitsize,
1746 bitpos + offset * BITS_PER_UNIT,
1747 unsignedp);
1748
1749 total_bits = GET_MODE_BITSIZE (mode);
1750
1751 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1752 be in the range 0 to total_bits-1, and put any excess bytes in
1753 OFFSET. */
1754 if (bitpos >= total_bits)
1755 {
1756 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1757 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1758 * BITS_PER_UNIT);
1759 }
1760
1761 /* If we're accessing a volatile MEM, we can't do the next
1762 alignment step if it results in a multi-word access where we
1763 otherwise wouldn't have one. So, check for that case
1764 here. */
1765 if (MEM_P (op0)
1766 && MEM_VOLATILE_P (op0)
1767 && flag_strict_volatile_bitfields > 0
1768 && bitpos + bitsize <= total_bits
1769 && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
1770 {
1771 if (STRICT_ALIGNMENT)
1772 {
1773 static bool informed_about_misalignment = false;
1774 bool warned;
1775
1776 if (packedp)
1777 {
1778 if (bitsize == total_bits)
1779 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1780 "multiple accesses to volatile structure member"
1781 " because of packed attribute");
1782 else
1783 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1784 "multiple accesses to volatile structure bitfield"
1785 " because of packed attribute");
1786
1787 return extract_split_bit_field (op0, bitsize,
1788 bitpos + offset * BITS_PER_UNIT,
1789 unsignedp);
1790 }
1791
1792 if (bitsize == total_bits)
1793 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1794 "mis-aligned access used for structure member");
1795 else
1796 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1797 "mis-aligned access used for structure bitfield");
1798
1799 if (! informed_about_misalignment && warned)
1800 {
1801 informed_about_misalignment = true;
1802 inform (input_location,
1803 "when a volatile object spans multiple type-sized locations,"
1804 " the compiler must choose between using a single mis-aligned access to"
1805 " preserve the volatility, or using multiple aligned accesses to avoid"
1806 " runtime faults; this code may fail at runtime if the hardware does"
1807 " not allow this access");
1808 }
1809 }
1810 }
1811 else
1812 {
1813
1814 /* Get ref to an aligned byte, halfword, or word containing the field.
1815 Adjust BITPOS to be position within a word,
1816 and OFFSET to be the offset of that word.
1817 Then alter OP0 to refer to that word. */
1818 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1819 offset -= (offset % (total_bits / BITS_PER_UNIT));
1820 }
1821
1822 op0 = adjust_address (op0, mode, offset);
1823 }
1824
1825 mode = GET_MODE (op0);
1826
1827 if (BYTES_BIG_ENDIAN)
1828 /* BITPOS is the distance between our msb and that of OP0.
1829 Convert it to the distance from the lsb. */
1830 bitpos = total_bits - bitsize - bitpos;
1831
1832 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1833 We have reduced the big-endian case to the little-endian case. */
1834
1835 if (unsignedp)
1836 {
1837 if (bitpos)
1838 {
1839 /* If the field does not already start at the lsb,
1840 shift it so it does. */
1841 tree amount = build_int_cst (NULL_TREE, bitpos);
1842 /* Maybe propagate the target for the shift. */
1843 /* But not if we will return it--could confuse integrate.c. */
1844 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1845 if (tmode != mode) subtarget = 0;
1846 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1847 }
1848 /* Convert the value to the desired mode. */
1849 if (mode != tmode)
1850 op0 = convert_to_mode (tmode, op0, 1);
1851
1852 /* Unless the msb of the field used to be the msb when we shifted,
1853 mask out the upper bits. */
1854
1855 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1856 return expand_binop (GET_MODE (op0), and_optab, op0,
1857 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1858 target, 1, OPTAB_LIB_WIDEN);
1859 return op0;
1860 }
1861
1862 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1863 then arithmetic-shift its lsb to the lsb of the word. */
1864 op0 = force_reg (mode, op0);
1865 if (mode != tmode)
1866 target = 0;
1867
1868 /* Find the narrowest integer mode that contains the field. */
1869
1870 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1871 mode = GET_MODE_WIDER_MODE (mode))
1872 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1873 {
1874 op0 = convert_to_mode (mode, op0, 0);
1875 break;
1876 }
1877
1878 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1879 {
1880 tree amount
1881 = build_int_cst (NULL_TREE,
1882 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1883 /* Maybe propagate the target for the shift. */
1884 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1885 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1886 }
1887
1888 return expand_shift (RSHIFT_EXPR, mode, op0,
1889 build_int_cst (NULL_TREE,
1890 GET_MODE_BITSIZE (mode) - bitsize),
1891 target, 0);
1892 }
1893 \f
1894 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1895 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1896 complement of that if COMPLEMENT. The mask is truncated if
1897 necessary to the width of mode MODE. The mask is zero-extended if
1898 BITSIZE+BITPOS is too small for MODE. */
1899
1900 static rtx
1901 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1902 {
1903 double_int mask;
1904
1905 mask = double_int_mask (bitsize);
1906 mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1907
1908 if (complement)
1909 mask = double_int_not (mask);
1910
1911 return immed_double_int_const (mask, mode);
1912 }
1913
1914 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1915 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1916
1917 static rtx
1918 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1919 {
1920 double_int val;
1921
1922 val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
1923 val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1924
1925 return immed_double_int_const (val, mode);
1926 }
1927 \f
1928 /* Extract a bit field that is split across two words
1929 and return an RTX for the result.
1930
1931 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1932 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1933 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1934
1935 static rtx
1936 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1937 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1938 {
1939 unsigned int unit;
1940 unsigned int bitsdone = 0;
1941 rtx result = NULL_RTX;
1942 int first = 1;
1943
1944 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1945 much at a time. */
1946 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1947 unit = BITS_PER_WORD;
1948 else
1949 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1950
1951 while (bitsdone < bitsize)
1952 {
1953 unsigned HOST_WIDE_INT thissize;
1954 rtx part, word;
1955 unsigned HOST_WIDE_INT thispos;
1956 unsigned HOST_WIDE_INT offset;
1957
1958 offset = (bitpos + bitsdone) / unit;
1959 thispos = (bitpos + bitsdone) % unit;
1960
1961 /* THISSIZE must not overrun a word boundary. Otherwise,
1962 extract_fixed_bit_field will call us again, and we will mutually
1963 recurse forever. */
1964 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1965 thissize = MIN (thissize, unit - thispos);
1966
1967 /* If OP0 is a register, then handle OFFSET here.
1968
1969 When handling multiword bitfields, extract_bit_field may pass
1970 down a word_mode SUBREG of a larger REG for a bitfield that actually
1971 crosses a word boundary. Thus, for a SUBREG, we must find
1972 the current word starting from the base register. */
1973 if (GET_CODE (op0) == SUBREG)
1974 {
1975 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1976 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1977 GET_MODE (SUBREG_REG (op0)));
1978 offset = 0;
1979 }
1980 else if (REG_P (op0))
1981 {
1982 word = operand_subword_force (op0, offset, GET_MODE (op0));
1983 offset = 0;
1984 }
1985 else
1986 word = op0;
1987
1988 /* Extract the parts in bit-counting order,
1989 whose meaning is determined by BYTES_PER_UNIT.
1990 OFFSET is in UNITs, and UNIT is in bits.
1991 extract_fixed_bit_field wants offset in bytes. */
1992 part = extract_fixed_bit_field (word_mode, word,
1993 offset * unit / BITS_PER_UNIT,
1994 thissize, thispos, 0, 1, false);
1995 bitsdone += thissize;
1996
1997 /* Shift this part into place for the result. */
1998 if (BYTES_BIG_ENDIAN)
1999 {
2000 if (bitsize != bitsdone)
2001 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2002 build_int_cst (NULL_TREE, bitsize - bitsdone),
2003 0, 1);
2004 }
2005 else
2006 {
2007 if (bitsdone != thissize)
2008 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2009 build_int_cst (NULL_TREE,
2010 bitsdone - thissize), 0, 1);
2011 }
2012
2013 if (first)
2014 result = part;
2015 else
2016 /* Combine the parts with bitwise or. This works
2017 because we extracted each part as an unsigned bit field. */
2018 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2019 OPTAB_LIB_WIDEN);
2020
2021 first = 0;
2022 }
2023
2024 /* Unsigned bit field: we are done. */
2025 if (unsignedp)
2026 return result;
2027 /* Signed bit field: sign-extend with two arithmetic shifts. */
2028 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2029 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2030 NULL_RTX, 0);
2031 return expand_shift (RSHIFT_EXPR, word_mode, result,
2032 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2033 NULL_RTX, 0);
2034 }
2035 \f
2036 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2037 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2038 MODE, fill the upper bits with zeros. Fail if the layout of either
2039 mode is unknown (as for CC modes) or if the extraction would involve
2040 unprofitable mode punning. Return the value on success, otherwise
2041 return null.
2042
2043 This is different from gen_lowpart* in these respects:
2044
2045 - the returned value must always be considered an rvalue
2046
2047 - when MODE is wider than SRC_MODE, the extraction involves
2048 a zero extension
2049
2050 - when MODE is smaller than SRC_MODE, the extraction involves
2051 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2052
2053 In other words, this routine performs a computation, whereas the
2054 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2055 operations. */
2056
2057 rtx
2058 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
2059 {
2060 enum machine_mode int_mode, src_int_mode;
2061
2062 if (mode == src_mode)
2063 return src;
2064
2065 if (CONSTANT_P (src))
2066 {
2067 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2068 fails, it will happily create (subreg (symbol_ref)) or similar
2069 invalid SUBREGs. */
2070 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2071 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2072 if (ret)
2073 return ret;
2074
2075 if (GET_MODE (src) == VOIDmode
2076 || !validate_subreg (mode, src_mode, src, byte))
2077 return NULL_RTX;
2078
2079 src = force_reg (GET_MODE (src), src);
2080 return gen_rtx_SUBREG (mode, src, byte);
2081 }
2082
2083 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2084 return NULL_RTX;
2085
2086 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2087 && MODES_TIEABLE_P (mode, src_mode))
2088 {
2089 rtx x = gen_lowpart_common (mode, src);
2090 if (x)
2091 return x;
2092 }
2093
2094 src_int_mode = int_mode_for_mode (src_mode);
2095 int_mode = int_mode_for_mode (mode);
2096 if (src_int_mode == BLKmode || int_mode == BLKmode)
2097 return NULL_RTX;
2098
2099 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2100 return NULL_RTX;
2101 if (!MODES_TIEABLE_P (int_mode, mode))
2102 return NULL_RTX;
2103
2104 src = gen_lowpart (src_int_mode, src);
2105 src = convert_modes (int_mode, src_int_mode, src, true);
2106 src = gen_lowpart (mode, src);
2107 return src;
2108 }
2109 \f
2110 /* Add INC into TARGET. */
2111
2112 void
2113 expand_inc (rtx target, rtx inc)
2114 {
2115 rtx value = expand_binop (GET_MODE (target), add_optab,
2116 target, inc,
2117 target, 0, OPTAB_LIB_WIDEN);
2118 if (value != target)
2119 emit_move_insn (target, value);
2120 }
2121
2122 /* Subtract DEC from TARGET. */
2123
2124 void
2125 expand_dec (rtx target, rtx dec)
2126 {
2127 rtx value = expand_binop (GET_MODE (target), sub_optab,
2128 target, dec,
2129 target, 0, OPTAB_LIB_WIDEN);
2130 if (value != target)
2131 emit_move_insn (target, value);
2132 }
2133 \f
2134 /* Output a shift instruction for expression code CODE,
2135 with SHIFTED being the rtx for the value to shift,
2136 and AMOUNT the tree for the amount to shift by.
2137 Store the result in the rtx TARGET, if that is convenient.
2138 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2139 Return the rtx for where the value is. */
2140
2141 rtx
2142 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2143 tree amount, rtx target, int unsignedp)
2144 {
2145 rtx op1, temp = 0;
2146 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2147 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2148 optab lshift_optab = ashl_optab;
2149 optab rshift_arith_optab = ashr_optab;
2150 optab rshift_uns_optab = lshr_optab;
2151 optab lrotate_optab = rotl_optab;
2152 optab rrotate_optab = rotr_optab;
2153 enum machine_mode op1_mode;
2154 int attempt;
2155 bool speed = optimize_insn_for_speed_p ();
2156
2157 op1 = expand_normal (amount);
2158 op1_mode = GET_MODE (op1);
2159
2160 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2161 shift amount is a vector, use the vector/vector shift patterns. */
2162 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2163 {
2164 lshift_optab = vashl_optab;
2165 rshift_arith_optab = vashr_optab;
2166 rshift_uns_optab = vlshr_optab;
2167 lrotate_optab = vrotl_optab;
2168 rrotate_optab = vrotr_optab;
2169 }
2170
2171 /* Previously detected shift-counts computed by NEGATE_EXPR
2172 and shifted in the other direction; but that does not work
2173 on all machines. */
2174
2175 if (SHIFT_COUNT_TRUNCATED)
2176 {
2177 if (CONST_INT_P (op1)
2178 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2179 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2180 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2181 % GET_MODE_BITSIZE (mode));
2182 else if (GET_CODE (op1) == SUBREG
2183 && subreg_lowpart_p (op1)
2184 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2185 op1 = SUBREG_REG (op1);
2186 }
2187
2188 if (op1 == const0_rtx)
2189 return shifted;
2190
2191 /* Check whether its cheaper to implement a left shift by a constant
2192 bit count by a sequence of additions. */
2193 if (code == LSHIFT_EXPR
2194 && CONST_INT_P (op1)
2195 && INTVAL (op1) > 0
2196 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2197 && INTVAL (op1) < MAX_BITS_PER_WORD
2198 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2199 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2200 {
2201 int i;
2202 for (i = 0; i < INTVAL (op1); i++)
2203 {
2204 temp = force_reg (mode, shifted);
2205 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2206 unsignedp, OPTAB_LIB_WIDEN);
2207 }
2208 return shifted;
2209 }
2210
2211 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2212 {
2213 enum optab_methods methods;
2214
2215 if (attempt == 0)
2216 methods = OPTAB_DIRECT;
2217 else if (attempt == 1)
2218 methods = OPTAB_WIDEN;
2219 else
2220 methods = OPTAB_LIB_WIDEN;
2221
2222 if (rotate)
2223 {
2224 /* Widening does not work for rotation. */
2225 if (methods == OPTAB_WIDEN)
2226 continue;
2227 else if (methods == OPTAB_LIB_WIDEN)
2228 {
2229 /* If we have been unable to open-code this by a rotation,
2230 do it as the IOR of two shifts. I.e., to rotate A
2231 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2232 where C is the bitsize of A.
2233
2234 It is theoretically possible that the target machine might
2235 not be able to perform either shift and hence we would
2236 be making two libcalls rather than just the one for the
2237 shift (similarly if IOR could not be done). We will allow
2238 this extremely unlikely lossage to avoid complicating the
2239 code below. */
2240
2241 rtx subtarget = target == shifted ? 0 : target;
2242 tree new_amount, other_amount;
2243 rtx temp1;
2244 tree type = TREE_TYPE (amount);
2245 if (GET_MODE (op1) != TYPE_MODE (type)
2246 && GET_MODE (op1) != VOIDmode)
2247 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2248 new_amount = make_tree (type, op1);
2249 other_amount
2250 = fold_build2 (MINUS_EXPR, type,
2251 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2252 new_amount);
2253
2254 shifted = force_reg (mode, shifted);
2255
2256 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2257 mode, shifted, new_amount, 0, 1);
2258 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2259 mode, shifted, other_amount, subtarget, 1);
2260 return expand_binop (mode, ior_optab, temp, temp1, target,
2261 unsignedp, methods);
2262 }
2263
2264 temp = expand_binop (mode,
2265 left ? lrotate_optab : rrotate_optab,
2266 shifted, op1, target, unsignedp, methods);
2267 }
2268 else if (unsignedp)
2269 temp = expand_binop (mode,
2270 left ? lshift_optab : rshift_uns_optab,
2271 shifted, op1, target, unsignedp, methods);
2272
2273 /* Do arithmetic shifts.
2274 Also, if we are going to widen the operand, we can just as well
2275 use an arithmetic right-shift instead of a logical one. */
2276 if (temp == 0 && ! rotate
2277 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2278 {
2279 enum optab_methods methods1 = methods;
2280
2281 /* If trying to widen a log shift to an arithmetic shift,
2282 don't accept an arithmetic shift of the same size. */
2283 if (unsignedp)
2284 methods1 = OPTAB_MUST_WIDEN;
2285
2286 /* Arithmetic shift */
2287
2288 temp = expand_binop (mode,
2289 left ? lshift_optab : rshift_arith_optab,
2290 shifted, op1, target, unsignedp, methods1);
2291 }
2292
2293 /* We used to try extzv here for logical right shifts, but that was
2294 only useful for one machine, the VAX, and caused poor code
2295 generation there for lshrdi3, so the code was deleted and a
2296 define_expand for lshrsi3 was added to vax.md. */
2297 }
2298
2299 gcc_assert (temp);
2300 return temp;
2301 }
2302 \f
2303 /* Indicates the type of fixup needed after a constant multiplication.
2304 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2305 the result should be negated, and ADD_VARIANT means that the
2306 multiplicand should be added to the result. */
2307 enum mult_variant {basic_variant, negate_variant, add_variant};
2308
2309 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2310 const struct mult_cost *, enum machine_mode mode);
2311 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2312 struct algorithm *, enum mult_variant *, int);
2313 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2314 const struct algorithm *, enum mult_variant);
2315 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2316 int, rtx *, int *, int *);
2317 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2318 static rtx extract_high_half (enum machine_mode, rtx);
2319 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2320 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2321 int, int);
2322 /* Compute and return the best algorithm for multiplying by T.
2323 The algorithm must cost less than cost_limit
2324 If retval.cost >= COST_LIMIT, no algorithm was found and all
2325 other field of the returned struct are undefined.
2326 MODE is the machine mode of the multiplication. */
2327
2328 static void
2329 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2330 const struct mult_cost *cost_limit, enum machine_mode mode)
2331 {
2332 int m;
2333 struct algorithm *alg_in, *best_alg;
2334 struct mult_cost best_cost;
2335 struct mult_cost new_limit;
2336 int op_cost, op_latency;
2337 unsigned HOST_WIDE_INT orig_t = t;
2338 unsigned HOST_WIDE_INT q;
2339 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2340 int hash_index;
2341 bool cache_hit = false;
2342 enum alg_code cache_alg = alg_zero;
2343 bool speed = optimize_insn_for_speed_p ();
2344
2345 /* Indicate that no algorithm is yet found. If no algorithm
2346 is found, this value will be returned and indicate failure. */
2347 alg_out->cost.cost = cost_limit->cost + 1;
2348 alg_out->cost.latency = cost_limit->latency + 1;
2349
2350 if (cost_limit->cost < 0
2351 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2352 return;
2353
2354 /* Restrict the bits of "t" to the multiplication's mode. */
2355 t &= GET_MODE_MASK (mode);
2356
2357 /* t == 1 can be done in zero cost. */
2358 if (t == 1)
2359 {
2360 alg_out->ops = 1;
2361 alg_out->cost.cost = 0;
2362 alg_out->cost.latency = 0;
2363 alg_out->op[0] = alg_m;
2364 return;
2365 }
2366
2367 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2368 fail now. */
2369 if (t == 0)
2370 {
2371 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2372 return;
2373 else
2374 {
2375 alg_out->ops = 1;
2376 alg_out->cost.cost = zero_cost[speed];
2377 alg_out->cost.latency = zero_cost[speed];
2378 alg_out->op[0] = alg_zero;
2379 return;
2380 }
2381 }
2382
2383 /* We'll be needing a couple extra algorithm structures now. */
2384
2385 alg_in = XALLOCA (struct algorithm);
2386 best_alg = XALLOCA (struct algorithm);
2387 best_cost = *cost_limit;
2388
2389 /* Compute the hash index. */
2390 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2391
2392 /* See if we already know what to do for T. */
2393 if (alg_hash[hash_index].t == t
2394 && alg_hash[hash_index].mode == mode
2395 && alg_hash[hash_index].mode == mode
2396 && alg_hash[hash_index].speed == speed
2397 && alg_hash[hash_index].alg != alg_unknown)
2398 {
2399 cache_alg = alg_hash[hash_index].alg;
2400
2401 if (cache_alg == alg_impossible)
2402 {
2403 /* The cache tells us that it's impossible to synthesize
2404 multiplication by T within alg_hash[hash_index].cost. */
2405 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2406 /* COST_LIMIT is at least as restrictive as the one
2407 recorded in the hash table, in which case we have no
2408 hope of synthesizing a multiplication. Just
2409 return. */
2410 return;
2411
2412 /* If we get here, COST_LIMIT is less restrictive than the
2413 one recorded in the hash table, so we may be able to
2414 synthesize a multiplication. Proceed as if we didn't
2415 have the cache entry. */
2416 }
2417 else
2418 {
2419 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2420 /* The cached algorithm shows that this multiplication
2421 requires more cost than COST_LIMIT. Just return. This
2422 way, we don't clobber this cache entry with
2423 alg_impossible but retain useful information. */
2424 return;
2425
2426 cache_hit = true;
2427
2428 switch (cache_alg)
2429 {
2430 case alg_shift:
2431 goto do_alg_shift;
2432
2433 case alg_add_t_m2:
2434 case alg_sub_t_m2:
2435 goto do_alg_addsub_t_m2;
2436
2437 case alg_add_factor:
2438 case alg_sub_factor:
2439 goto do_alg_addsub_factor;
2440
2441 case alg_add_t2_m:
2442 goto do_alg_add_t2_m;
2443
2444 case alg_sub_t2_m:
2445 goto do_alg_sub_t2_m;
2446
2447 default:
2448 gcc_unreachable ();
2449 }
2450 }
2451 }
2452
2453 /* If we have a group of zero bits at the low-order part of T, try
2454 multiplying by the remaining bits and then doing a shift. */
2455
2456 if ((t & 1) == 0)
2457 {
2458 do_alg_shift:
2459 m = floor_log2 (t & -t); /* m = number of low zero bits */
2460 if (m < maxm)
2461 {
2462 q = t >> m;
2463 /* The function expand_shift will choose between a shift and
2464 a sequence of additions, so the observed cost is given as
2465 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2466 op_cost = m * add_cost[speed][mode];
2467 if (shift_cost[speed][mode][m] < op_cost)
2468 op_cost = shift_cost[speed][mode][m];
2469 new_limit.cost = best_cost.cost - op_cost;
2470 new_limit.latency = best_cost.latency - op_cost;
2471 synth_mult (alg_in, q, &new_limit, mode);
2472
2473 alg_in->cost.cost += op_cost;
2474 alg_in->cost.latency += op_cost;
2475 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2476 {
2477 struct algorithm *x;
2478 best_cost = alg_in->cost;
2479 x = alg_in, alg_in = best_alg, best_alg = x;
2480 best_alg->log[best_alg->ops] = m;
2481 best_alg->op[best_alg->ops] = alg_shift;
2482 }
2483
2484 /* See if treating ORIG_T as a signed number yields a better
2485 sequence. Try this sequence only for a negative ORIG_T
2486 as it would be useless for a non-negative ORIG_T. */
2487 if ((HOST_WIDE_INT) orig_t < 0)
2488 {
2489 /* Shift ORIG_T as follows because a right shift of a
2490 negative-valued signed type is implementation
2491 defined. */
2492 q = ~(~orig_t >> m);
2493 /* The function expand_shift will choose between a shift
2494 and a sequence of additions, so the observed cost is
2495 given as MIN (m * add_cost[speed][mode],
2496 shift_cost[speed][mode][m]). */
2497 op_cost = m * add_cost[speed][mode];
2498 if (shift_cost[speed][mode][m] < op_cost)
2499 op_cost = shift_cost[speed][mode][m];
2500 new_limit.cost = best_cost.cost - op_cost;
2501 new_limit.latency = best_cost.latency - op_cost;
2502 synth_mult (alg_in, q, &new_limit, mode);
2503
2504 alg_in->cost.cost += op_cost;
2505 alg_in->cost.latency += op_cost;
2506 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2507 {
2508 struct algorithm *x;
2509 best_cost = alg_in->cost;
2510 x = alg_in, alg_in = best_alg, best_alg = x;
2511 best_alg->log[best_alg->ops] = m;
2512 best_alg->op[best_alg->ops] = alg_shift;
2513 }
2514 }
2515 }
2516 if (cache_hit)
2517 goto done;
2518 }
2519
2520 /* If we have an odd number, add or subtract one. */
2521 if ((t & 1) != 0)
2522 {
2523 unsigned HOST_WIDE_INT w;
2524
2525 do_alg_addsub_t_m2:
2526 for (w = 1; (w & t) != 0; w <<= 1)
2527 ;
2528 /* If T was -1, then W will be zero after the loop. This is another
2529 case where T ends with ...111. Handling this with (T + 1) and
2530 subtract 1 produces slightly better code and results in algorithm
2531 selection much faster than treating it like the ...0111 case
2532 below. */
2533 if (w == 0
2534 || (w > 2
2535 /* Reject the case where t is 3.
2536 Thus we prefer addition in that case. */
2537 && t != 3))
2538 {
2539 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2540
2541 op_cost = add_cost[speed][mode];
2542 new_limit.cost = best_cost.cost - op_cost;
2543 new_limit.latency = best_cost.latency - op_cost;
2544 synth_mult (alg_in, t + 1, &new_limit, mode);
2545
2546 alg_in->cost.cost += op_cost;
2547 alg_in->cost.latency += op_cost;
2548 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2549 {
2550 struct algorithm *x;
2551 best_cost = alg_in->cost;
2552 x = alg_in, alg_in = best_alg, best_alg = x;
2553 best_alg->log[best_alg->ops] = 0;
2554 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2555 }
2556 }
2557 else
2558 {
2559 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2560
2561 op_cost = add_cost[speed][mode];
2562 new_limit.cost = best_cost.cost - op_cost;
2563 new_limit.latency = best_cost.latency - op_cost;
2564 synth_mult (alg_in, t - 1, &new_limit, mode);
2565
2566 alg_in->cost.cost += op_cost;
2567 alg_in->cost.latency += op_cost;
2568 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2569 {
2570 struct algorithm *x;
2571 best_cost = alg_in->cost;
2572 x = alg_in, alg_in = best_alg, best_alg = x;
2573 best_alg->log[best_alg->ops] = 0;
2574 best_alg->op[best_alg->ops] = alg_add_t_m2;
2575 }
2576 }
2577
2578 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2579 quickly with a - a * n for some appropriate constant n. */
2580 m = exact_log2 (-orig_t + 1);
2581 if (m >= 0 && m < maxm)
2582 {
2583 op_cost = shiftsub1_cost[speed][mode][m];
2584 new_limit.cost = best_cost.cost - op_cost;
2585 new_limit.latency = best_cost.latency - op_cost;
2586 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2587
2588 alg_in->cost.cost += op_cost;
2589 alg_in->cost.latency += op_cost;
2590 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2591 {
2592 struct algorithm *x;
2593 best_cost = alg_in->cost;
2594 x = alg_in, alg_in = best_alg, best_alg = x;
2595 best_alg->log[best_alg->ops] = m;
2596 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2597 }
2598 }
2599
2600 if (cache_hit)
2601 goto done;
2602 }
2603
2604 /* Look for factors of t of the form
2605 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2606 If we find such a factor, we can multiply by t using an algorithm that
2607 multiplies by q, shift the result by m and add/subtract it to itself.
2608
2609 We search for large factors first and loop down, even if large factors
2610 are less probable than small; if we find a large factor we will find a
2611 good sequence quickly, and therefore be able to prune (by decreasing
2612 COST_LIMIT) the search. */
2613
2614 do_alg_addsub_factor:
2615 for (m = floor_log2 (t - 1); m >= 2; m--)
2616 {
2617 unsigned HOST_WIDE_INT d;
2618
2619 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2620 if (t % d == 0 && t > d && m < maxm
2621 && (!cache_hit || cache_alg == alg_add_factor))
2622 {
2623 /* If the target has a cheap shift-and-add instruction use
2624 that in preference to a shift insn followed by an add insn.
2625 Assume that the shift-and-add is "atomic" with a latency
2626 equal to its cost, otherwise assume that on superscalar
2627 hardware the shift may be executed concurrently with the
2628 earlier steps in the algorithm. */
2629 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2630 if (shiftadd_cost[speed][mode][m] < op_cost)
2631 {
2632 op_cost = shiftadd_cost[speed][mode][m];
2633 op_latency = op_cost;
2634 }
2635 else
2636 op_latency = add_cost[speed][mode];
2637
2638 new_limit.cost = best_cost.cost - op_cost;
2639 new_limit.latency = best_cost.latency - op_latency;
2640 synth_mult (alg_in, t / d, &new_limit, mode);
2641
2642 alg_in->cost.cost += op_cost;
2643 alg_in->cost.latency += op_latency;
2644 if (alg_in->cost.latency < op_cost)
2645 alg_in->cost.latency = op_cost;
2646 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2647 {
2648 struct algorithm *x;
2649 best_cost = alg_in->cost;
2650 x = alg_in, alg_in = best_alg, best_alg = x;
2651 best_alg->log[best_alg->ops] = m;
2652 best_alg->op[best_alg->ops] = alg_add_factor;
2653 }
2654 /* Other factors will have been taken care of in the recursion. */
2655 break;
2656 }
2657
2658 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2659 if (t % d == 0 && t > d && m < maxm
2660 && (!cache_hit || cache_alg == alg_sub_factor))
2661 {
2662 /* If the target has a cheap shift-and-subtract insn use
2663 that in preference to a shift insn followed by a sub insn.
2664 Assume that the shift-and-sub is "atomic" with a latency
2665 equal to it's cost, otherwise assume that on superscalar
2666 hardware the shift may be executed concurrently with the
2667 earlier steps in the algorithm. */
2668 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2669 if (shiftsub0_cost[speed][mode][m] < op_cost)
2670 {
2671 op_cost = shiftsub0_cost[speed][mode][m];
2672 op_latency = op_cost;
2673 }
2674 else
2675 op_latency = add_cost[speed][mode];
2676
2677 new_limit.cost = best_cost.cost - op_cost;
2678 new_limit.latency = best_cost.latency - op_latency;
2679 synth_mult (alg_in, t / d, &new_limit, mode);
2680
2681 alg_in->cost.cost += op_cost;
2682 alg_in->cost.latency += op_latency;
2683 if (alg_in->cost.latency < op_cost)
2684 alg_in->cost.latency = op_cost;
2685 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2686 {
2687 struct algorithm *x;
2688 best_cost = alg_in->cost;
2689 x = alg_in, alg_in = best_alg, best_alg = x;
2690 best_alg->log[best_alg->ops] = m;
2691 best_alg->op[best_alg->ops] = alg_sub_factor;
2692 }
2693 break;
2694 }
2695 }
2696 if (cache_hit)
2697 goto done;
2698
2699 /* Try shift-and-add (load effective address) instructions,
2700 i.e. do a*3, a*5, a*9. */
2701 if ((t & 1) != 0)
2702 {
2703 do_alg_add_t2_m:
2704 q = t - 1;
2705 q = q & -q;
2706 m = exact_log2 (q);
2707 if (m >= 0 && m < maxm)
2708 {
2709 op_cost = shiftadd_cost[speed][mode][m];
2710 new_limit.cost = best_cost.cost - op_cost;
2711 new_limit.latency = best_cost.latency - op_cost;
2712 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2713
2714 alg_in->cost.cost += op_cost;
2715 alg_in->cost.latency += op_cost;
2716 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2717 {
2718 struct algorithm *x;
2719 best_cost = alg_in->cost;
2720 x = alg_in, alg_in = best_alg, best_alg = x;
2721 best_alg->log[best_alg->ops] = m;
2722 best_alg->op[best_alg->ops] = alg_add_t2_m;
2723 }
2724 }
2725 if (cache_hit)
2726 goto done;
2727
2728 do_alg_sub_t2_m:
2729 q = t + 1;
2730 q = q & -q;
2731 m = exact_log2 (q);
2732 if (m >= 0 && m < maxm)
2733 {
2734 op_cost = shiftsub0_cost[speed][mode][m];
2735 new_limit.cost = best_cost.cost - op_cost;
2736 new_limit.latency = best_cost.latency - op_cost;
2737 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2738
2739 alg_in->cost.cost += op_cost;
2740 alg_in->cost.latency += op_cost;
2741 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2742 {
2743 struct algorithm *x;
2744 best_cost = alg_in->cost;
2745 x = alg_in, alg_in = best_alg, best_alg = x;
2746 best_alg->log[best_alg->ops] = m;
2747 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2748 }
2749 }
2750 if (cache_hit)
2751 goto done;
2752 }
2753
2754 done:
2755 /* If best_cost has not decreased, we have not found any algorithm. */
2756 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2757 {
2758 /* We failed to find an algorithm. Record alg_impossible for
2759 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2760 we are asked to find an algorithm for T within the same or
2761 lower COST_LIMIT, we can immediately return to the
2762 caller. */
2763 alg_hash[hash_index].t = t;
2764 alg_hash[hash_index].mode = mode;
2765 alg_hash[hash_index].speed = speed;
2766 alg_hash[hash_index].alg = alg_impossible;
2767 alg_hash[hash_index].cost = *cost_limit;
2768 return;
2769 }
2770
2771 /* Cache the result. */
2772 if (!cache_hit)
2773 {
2774 alg_hash[hash_index].t = t;
2775 alg_hash[hash_index].mode = mode;
2776 alg_hash[hash_index].speed = speed;
2777 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2778 alg_hash[hash_index].cost.cost = best_cost.cost;
2779 alg_hash[hash_index].cost.latency = best_cost.latency;
2780 }
2781
2782 /* If we are getting a too long sequence for `struct algorithm'
2783 to record, make this search fail. */
2784 if (best_alg->ops == MAX_BITS_PER_WORD)
2785 return;
2786
2787 /* Copy the algorithm from temporary space to the space at alg_out.
2788 We avoid using structure assignment because the majority of
2789 best_alg is normally undefined, and this is a critical function. */
2790 alg_out->ops = best_alg->ops + 1;
2791 alg_out->cost = best_cost;
2792 memcpy (alg_out->op, best_alg->op,
2793 alg_out->ops * sizeof *alg_out->op);
2794 memcpy (alg_out->log, best_alg->log,
2795 alg_out->ops * sizeof *alg_out->log);
2796 }
2797 \f
2798 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2799 Try three variations:
2800
2801 - a shift/add sequence based on VAL itself
2802 - a shift/add sequence based on -VAL, followed by a negation
2803 - a shift/add sequence based on VAL - 1, followed by an addition.
2804
2805 Return true if the cheapest of these cost less than MULT_COST,
2806 describing the algorithm in *ALG and final fixup in *VARIANT. */
2807
2808 static bool
2809 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2810 struct algorithm *alg, enum mult_variant *variant,
2811 int mult_cost)
2812 {
2813 struct algorithm alg2;
2814 struct mult_cost limit;
2815 int op_cost;
2816 bool speed = optimize_insn_for_speed_p ();
2817
2818 /* Fail quickly for impossible bounds. */
2819 if (mult_cost < 0)
2820 return false;
2821
2822 /* Ensure that mult_cost provides a reasonable upper bound.
2823 Any constant multiplication can be performed with less
2824 than 2 * bits additions. */
2825 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2826 if (mult_cost > op_cost)
2827 mult_cost = op_cost;
2828
2829 *variant = basic_variant;
2830 limit.cost = mult_cost;
2831 limit.latency = mult_cost;
2832 synth_mult (alg, val, &limit, mode);
2833
2834 /* This works only if the inverted value actually fits in an
2835 `unsigned int' */
2836 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2837 {
2838 op_cost = neg_cost[speed][mode];
2839 if (MULT_COST_LESS (&alg->cost, mult_cost))
2840 {
2841 limit.cost = alg->cost.cost - op_cost;
2842 limit.latency = alg->cost.latency - op_cost;
2843 }
2844 else
2845 {
2846 limit.cost = mult_cost - op_cost;
2847 limit.latency = mult_cost - op_cost;
2848 }
2849
2850 synth_mult (&alg2, -val, &limit, mode);
2851 alg2.cost.cost += op_cost;
2852 alg2.cost.latency += op_cost;
2853 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2854 *alg = alg2, *variant = negate_variant;
2855 }
2856
2857 /* This proves very useful for division-by-constant. */
2858 op_cost = add_cost[speed][mode];
2859 if (MULT_COST_LESS (&alg->cost, mult_cost))
2860 {
2861 limit.cost = alg->cost.cost - op_cost;
2862 limit.latency = alg->cost.latency - op_cost;
2863 }
2864 else
2865 {
2866 limit.cost = mult_cost - op_cost;
2867 limit.latency = mult_cost - op_cost;
2868 }
2869
2870 synth_mult (&alg2, val - 1, &limit, mode);
2871 alg2.cost.cost += op_cost;
2872 alg2.cost.latency += op_cost;
2873 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2874 *alg = alg2, *variant = add_variant;
2875
2876 return MULT_COST_LESS (&alg->cost, mult_cost);
2877 }
2878
2879 /* A subroutine of expand_mult, used for constant multiplications.
2880 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2881 convenient. Use the shift/add sequence described by ALG and apply
2882 the final fixup specified by VARIANT. */
2883
2884 static rtx
2885 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2886 rtx target, const struct algorithm *alg,
2887 enum mult_variant variant)
2888 {
2889 HOST_WIDE_INT val_so_far;
2890 rtx insn, accum, tem;
2891 int opno;
2892 enum machine_mode nmode;
2893
2894 /* Avoid referencing memory over and over and invalid sharing
2895 on SUBREGs. */
2896 op0 = force_reg (mode, op0);
2897
2898 /* ACCUM starts out either as OP0 or as a zero, depending on
2899 the first operation. */
2900
2901 if (alg->op[0] == alg_zero)
2902 {
2903 accum = copy_to_mode_reg (mode, const0_rtx);
2904 val_so_far = 0;
2905 }
2906 else if (alg->op[0] == alg_m)
2907 {
2908 accum = copy_to_mode_reg (mode, op0);
2909 val_so_far = 1;
2910 }
2911 else
2912 gcc_unreachable ();
2913
2914 for (opno = 1; opno < alg->ops; opno++)
2915 {
2916 int log = alg->log[opno];
2917 rtx shift_subtarget = optimize ? 0 : accum;
2918 rtx add_target
2919 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2920 && !optimize)
2921 ? target : 0;
2922 rtx accum_target = optimize ? 0 : accum;
2923
2924 switch (alg->op[opno])
2925 {
2926 case alg_shift:
2927 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2928 build_int_cst (NULL_TREE, log),
2929 NULL_RTX, 0);
2930 /* REG_EQUAL note will be attached to the following insn. */
2931 emit_move_insn (accum, tem);
2932 val_so_far <<= log;
2933 break;
2934
2935 case alg_add_t_m2:
2936 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2937 build_int_cst (NULL_TREE, log),
2938 NULL_RTX, 0);
2939 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2940 add_target ? add_target : accum_target);
2941 val_so_far += (HOST_WIDE_INT) 1 << log;
2942 break;
2943
2944 case alg_sub_t_m2:
2945 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2946 build_int_cst (NULL_TREE, log),
2947 NULL_RTX, 0);
2948 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2949 add_target ? add_target : accum_target);
2950 val_so_far -= (HOST_WIDE_INT) 1 << log;
2951 break;
2952
2953 case alg_add_t2_m:
2954 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2955 build_int_cst (NULL_TREE, log),
2956 shift_subtarget,
2957 0);
2958 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2959 add_target ? add_target : accum_target);
2960 val_so_far = (val_so_far << log) + 1;
2961 break;
2962
2963 case alg_sub_t2_m:
2964 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2965 build_int_cst (NULL_TREE, log),
2966 shift_subtarget, 0);
2967 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2968 add_target ? add_target : accum_target);
2969 val_so_far = (val_so_far << log) - 1;
2970 break;
2971
2972 case alg_add_factor:
2973 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2974 build_int_cst (NULL_TREE, log),
2975 NULL_RTX, 0);
2976 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2977 add_target ? add_target : accum_target);
2978 val_so_far += val_so_far << log;
2979 break;
2980
2981 case alg_sub_factor:
2982 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2983 build_int_cst (NULL_TREE, log),
2984 NULL_RTX, 0);
2985 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2986 (add_target
2987 ? add_target : (optimize ? 0 : tem)));
2988 val_so_far = (val_so_far << log) - val_so_far;
2989 break;
2990
2991 default:
2992 gcc_unreachable ();
2993 }
2994
2995 /* Write a REG_EQUAL note on the last insn so that we can cse
2996 multiplication sequences. Note that if ACCUM is a SUBREG,
2997 we've set the inner register and must properly indicate
2998 that. */
2999
3000 tem = op0, nmode = mode;
3001 if (GET_CODE (accum) == SUBREG)
3002 {
3003 nmode = GET_MODE (SUBREG_REG (accum));
3004 tem = gen_lowpart (nmode, op0);
3005 }
3006
3007 insn = get_last_insn ();
3008 set_unique_reg_note (insn, REG_EQUAL,
3009 gen_rtx_MULT (nmode, tem,
3010 GEN_INT (val_so_far)));
3011 }
3012
3013 if (variant == negate_variant)
3014 {
3015 val_so_far = -val_so_far;
3016 accum = expand_unop (mode, neg_optab, accum, target, 0);
3017 }
3018 else if (variant == add_variant)
3019 {
3020 val_so_far = val_so_far + 1;
3021 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3022 }
3023
3024 /* Compare only the bits of val and val_so_far that are significant
3025 in the result mode, to avoid sign-/zero-extension confusion. */
3026 val &= GET_MODE_MASK (mode);
3027 val_so_far &= GET_MODE_MASK (mode);
3028 gcc_assert (val == val_so_far);
3029
3030 return accum;
3031 }
3032
3033 /* Perform a multiplication and return an rtx for the result.
3034 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3035 TARGET is a suggestion for where to store the result (an rtx).
3036
3037 We check specially for a constant integer as OP1.
3038 If you want this check for OP0 as well, then before calling
3039 you should swap the two operands if OP0 would be constant. */
3040
3041 rtx
3042 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3043 int unsignedp)
3044 {
3045 enum mult_variant variant;
3046 struct algorithm algorithm;
3047 int max_cost;
3048 bool speed = optimize_insn_for_speed_p ();
3049
3050 /* Handling const0_rtx here allows us to use zero as a rogue value for
3051 coeff below. */
3052 if (op1 == const0_rtx)
3053 return const0_rtx;
3054 if (op1 == const1_rtx)
3055 return op0;
3056 if (op1 == constm1_rtx)
3057 return expand_unop (mode,
3058 GET_MODE_CLASS (mode) == MODE_INT
3059 && !unsignedp && flag_trapv
3060 ? negv_optab : neg_optab,
3061 op0, target, 0);
3062
3063 /* These are the operations that are potentially turned into a sequence
3064 of shifts and additions. */
3065 if (SCALAR_INT_MODE_P (mode)
3066 && (unsignedp || !flag_trapv))
3067 {
3068 HOST_WIDE_INT coeff = 0;
3069 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3070
3071 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3072 less than or equal in size to `unsigned int' this doesn't matter.
3073 If the mode is larger than `unsigned int', then synth_mult works
3074 only if the constant value exactly fits in an `unsigned int' without
3075 any truncation. This means that multiplying by negative values does
3076 not work; results are off by 2^32 on a 32 bit machine. */
3077
3078 if (CONST_INT_P (op1))
3079 {
3080 /* Attempt to handle multiplication of DImode values by negative
3081 coefficients, by performing the multiplication by a positive
3082 multiplier and then inverting the result. */
3083 if (INTVAL (op1) < 0
3084 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3085 {
3086 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3087 result is interpreted as an unsigned coefficient.
3088 Exclude cost of op0 from max_cost to match the cost
3089 calculation of the synth_mult. */
3090 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3091 - neg_cost[speed][mode];
3092 if (max_cost > 0
3093 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3094 &variant, max_cost))
3095 {
3096 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3097 NULL_RTX, &algorithm,
3098 variant);
3099 return expand_unop (mode, neg_optab, temp, target, 0);
3100 }
3101 }
3102 else coeff = INTVAL (op1);
3103 }
3104 else if (GET_CODE (op1) == CONST_DOUBLE)
3105 {
3106 /* If we are multiplying in DImode, it may still be a win
3107 to try to work with shifts and adds. */
3108 if (CONST_DOUBLE_HIGH (op1) == 0
3109 && CONST_DOUBLE_LOW (op1) > 0)
3110 coeff = CONST_DOUBLE_LOW (op1);
3111 else if (CONST_DOUBLE_LOW (op1) == 0
3112 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3113 {
3114 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3115 + HOST_BITS_PER_WIDE_INT;
3116 return expand_shift (LSHIFT_EXPR, mode, op0,
3117 build_int_cst (NULL_TREE, shift),
3118 target, unsignedp);
3119 }
3120 }
3121
3122 /* We used to test optimize here, on the grounds that it's better to
3123 produce a smaller program when -O is not used. But this causes
3124 such a terrible slowdown sometimes that it seems better to always
3125 use synth_mult. */
3126 if (coeff != 0)
3127 {
3128 /* Special case powers of two. */
3129 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3130 return expand_shift (LSHIFT_EXPR, mode, op0,
3131 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3132 target, unsignedp);
3133
3134 /* Exclude cost of op0 from max_cost to match the cost
3135 calculation of the synth_mult. */
3136 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3137 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3138 max_cost))
3139 return expand_mult_const (mode, op0, coeff, target,
3140 &algorithm, variant);
3141 }
3142 }
3143
3144 if (GET_CODE (op0) == CONST_DOUBLE)
3145 {
3146 rtx temp = op0;
3147 op0 = op1;
3148 op1 = temp;
3149 }
3150
3151 /* Expand x*2.0 as x+x. */
3152 if (GET_CODE (op1) == CONST_DOUBLE
3153 && SCALAR_FLOAT_MODE_P (mode))
3154 {
3155 REAL_VALUE_TYPE d;
3156 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3157
3158 if (REAL_VALUES_EQUAL (d, dconst2))
3159 {
3160 op0 = force_reg (GET_MODE (op0), op0);
3161 return expand_binop (mode, add_optab, op0, op0,
3162 target, unsignedp, OPTAB_LIB_WIDEN);
3163 }
3164 }
3165
3166 /* This used to use umul_optab if unsigned, but for non-widening multiply
3167 there is no difference between signed and unsigned. */
3168 op0 = expand_binop (mode,
3169 ! unsignedp
3170 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3171 ? smulv_optab : smul_optab,
3172 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3173 gcc_assert (op0);
3174 return op0;
3175 }
3176
3177 /* Perform a widening multiplication and return an rtx for the result.
3178 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3179 TARGET is a suggestion for where to store the result (an rtx).
3180 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3181 or smul_widen_optab.
3182
3183 We check specially for a constant integer as OP1, comparing the
3184 cost of a widening multiply against the cost of a sequence of shifts
3185 and adds. */
3186
3187 rtx
3188 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3189 int unsignedp, optab this_optab)
3190 {
3191 bool speed = optimize_insn_for_speed_p ();
3192 rtx cop1;
3193
3194 if (CONST_INT_P (op1)
3195 && GET_MODE (op0) != VOIDmode
3196 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3197 this_optab == umul_widen_optab))
3198 && CONST_INT_P (cop1)
3199 && (INTVAL (cop1) >= 0
3200 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT))
3201 {
3202 HOST_WIDE_INT coeff = INTVAL (cop1);
3203 int max_cost;
3204 enum mult_variant variant;
3205 struct algorithm algorithm;
3206
3207 /* Special case powers of two. */
3208 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3209 {
3210 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3211 return expand_shift (LSHIFT_EXPR, mode, op0,
3212 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3213 target, unsignedp);
3214 }
3215
3216 /* Exclude cost of op0 from max_cost to match the cost
3217 calculation of the synth_mult. */
3218 max_cost = mul_widen_cost[speed][mode];
3219 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3220 max_cost))
3221 {
3222 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3223 return expand_mult_const (mode, op0, coeff, target,
3224 &algorithm, variant);
3225 }
3226 }
3227 return expand_binop (mode, this_optab, op0, op1, target,
3228 unsignedp, OPTAB_LIB_WIDEN);
3229 }
3230 \f
3231 /* Return the smallest n such that 2**n >= X. */
3232
3233 int
3234 ceil_log2 (unsigned HOST_WIDE_INT x)
3235 {
3236 return floor_log2 (x - 1) + 1;
3237 }
3238
3239 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3240 replace division by D, and put the least significant N bits of the result
3241 in *MULTIPLIER_PTR and return the most significant bit.
3242
3243 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3244 needed precision is in PRECISION (should be <= N).
3245
3246 PRECISION should be as small as possible so this function can choose
3247 multiplier more freely.
3248
3249 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3250 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3251
3252 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3253 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3254
3255 static
3256 unsigned HOST_WIDE_INT
3257 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3258 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3259 {
3260 HOST_WIDE_INT mhigh_hi, mlow_hi;
3261 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3262 int lgup, post_shift;
3263 int pow, pow2;
3264 unsigned HOST_WIDE_INT nl, dummy1;
3265 HOST_WIDE_INT nh, dummy2;
3266
3267 /* lgup = ceil(log2(divisor)); */
3268 lgup = ceil_log2 (d);
3269
3270 gcc_assert (lgup <= n);
3271
3272 pow = n + lgup;
3273 pow2 = n + lgup - precision;
3274
3275 /* We could handle this with some effort, but this case is much
3276 better handled directly with a scc insn, so rely on caller using
3277 that. */
3278 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3279
3280 /* mlow = 2^(N + lgup)/d */
3281 if (pow >= HOST_BITS_PER_WIDE_INT)
3282 {
3283 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3284 nl = 0;
3285 }
3286 else
3287 {
3288 nh = 0;
3289 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3290 }
3291 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3292 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3293
3294 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3295 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3296 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3297 else
3298 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3299 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3300 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3301
3302 gcc_assert (!mhigh_hi || nh - d < d);
3303 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3304 /* Assert that mlow < mhigh. */
3305 gcc_assert (mlow_hi < mhigh_hi
3306 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3307
3308 /* If precision == N, then mlow, mhigh exceed 2^N
3309 (but they do not exceed 2^(N+1)). */
3310
3311 /* Reduce to lowest terms. */
3312 for (post_shift = lgup; post_shift > 0; post_shift--)
3313 {
3314 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3315 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3316 if (ml_lo >= mh_lo)
3317 break;
3318
3319 mlow_hi = 0;
3320 mlow_lo = ml_lo;
3321 mhigh_hi = 0;
3322 mhigh_lo = mh_lo;
3323 }
3324
3325 *post_shift_ptr = post_shift;
3326 *lgup_ptr = lgup;
3327 if (n < HOST_BITS_PER_WIDE_INT)
3328 {
3329 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3330 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3331 return mhigh_lo >= mask;
3332 }
3333 else
3334 {
3335 *multiplier_ptr = GEN_INT (mhigh_lo);
3336 return mhigh_hi;
3337 }
3338 }
3339
3340 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3341 congruent to 1 (mod 2**N). */
3342
3343 static unsigned HOST_WIDE_INT
3344 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3345 {
3346 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3347
3348 /* The algorithm notes that the choice y = x satisfies
3349 x*y == 1 mod 2^3, since x is assumed odd.
3350 Each iteration doubles the number of bits of significance in y. */
3351
3352 unsigned HOST_WIDE_INT mask;
3353 unsigned HOST_WIDE_INT y = x;
3354 int nbit = 3;
3355
3356 mask = (n == HOST_BITS_PER_WIDE_INT
3357 ? ~(unsigned HOST_WIDE_INT) 0
3358 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3359
3360 while (nbit < n)
3361 {
3362 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3363 nbit *= 2;
3364 }
3365 return y;
3366 }
3367
3368 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3369 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3370 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3371 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3372 become signed.
3373
3374 The result is put in TARGET if that is convenient.
3375
3376 MODE is the mode of operation. */
3377
3378 rtx
3379 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3380 rtx op1, rtx target, int unsignedp)
3381 {
3382 rtx tem;
3383 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3384
3385 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3386 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3387 NULL_RTX, 0);
3388 tem = expand_and (mode, tem, op1, NULL_RTX);
3389 adj_operand
3390 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3391 adj_operand);
3392
3393 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3394 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3395 NULL_RTX, 0);
3396 tem = expand_and (mode, tem, op0, NULL_RTX);
3397 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3398 target);
3399
3400 return target;
3401 }
3402
3403 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3404
3405 static rtx
3406 extract_high_half (enum machine_mode mode, rtx op)
3407 {
3408 enum machine_mode wider_mode;
3409
3410 if (mode == word_mode)
3411 return gen_highpart (mode, op);
3412
3413 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3414
3415 wider_mode = GET_MODE_WIDER_MODE (mode);
3416 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3417 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3418 return convert_modes (mode, wider_mode, op, 0);
3419 }
3420
3421 /* Like expand_mult_highpart, but only consider using a multiplication
3422 optab. OP1 is an rtx for the constant operand. */
3423
3424 static rtx
3425 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3426 rtx target, int unsignedp, int max_cost)
3427 {
3428 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3429 enum machine_mode wider_mode;
3430 optab moptab;
3431 rtx tem;
3432 int size;
3433 bool speed = optimize_insn_for_speed_p ();
3434
3435 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3436
3437 wider_mode = GET_MODE_WIDER_MODE (mode);
3438 size = GET_MODE_BITSIZE (mode);
3439
3440 /* Firstly, try using a multiplication insn that only generates the needed
3441 high part of the product, and in the sign flavor of unsignedp. */
3442 if (mul_highpart_cost[speed][mode] < max_cost)
3443 {
3444 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3445 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3446 unsignedp, OPTAB_DIRECT);
3447 if (tem)
3448 return tem;
3449 }
3450
3451 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3452 Need to adjust the result after the multiplication. */
3453 if (size - 1 < BITS_PER_WORD
3454 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3455 + 4 * add_cost[speed][mode] < max_cost))
3456 {
3457 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3458 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3459 unsignedp, OPTAB_DIRECT);
3460 if (tem)
3461 /* We used the wrong signedness. Adjust the result. */
3462 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3463 tem, unsignedp);
3464 }
3465
3466 /* Try widening multiplication. */
3467 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3468 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3469 && mul_widen_cost[speed][wider_mode] < max_cost)
3470 {
3471 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3472 unsignedp, OPTAB_WIDEN);
3473 if (tem)
3474 return extract_high_half (mode, tem);
3475 }
3476
3477 /* Try widening the mode and perform a non-widening multiplication. */
3478 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3479 && size - 1 < BITS_PER_WORD
3480 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3481 {
3482 rtx insns, wop0, wop1;
3483
3484 /* We need to widen the operands, for example to ensure the
3485 constant multiplier is correctly sign or zero extended.
3486 Use a sequence to clean-up any instructions emitted by
3487 the conversions if things don't work out. */
3488 start_sequence ();
3489 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3490 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3491 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3492 unsignedp, OPTAB_WIDEN);
3493 insns = get_insns ();
3494 end_sequence ();
3495
3496 if (tem)
3497 {
3498 emit_insn (insns);
3499 return extract_high_half (mode, tem);
3500 }
3501 }
3502
3503 /* Try widening multiplication of opposite signedness, and adjust. */
3504 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3505 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3506 && size - 1 < BITS_PER_WORD
3507 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3508 + 4 * add_cost[speed][mode] < max_cost))
3509 {
3510 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3511 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3512 if (tem != 0)
3513 {
3514 tem = extract_high_half (mode, tem);
3515 /* We used the wrong signedness. Adjust the result. */
3516 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3517 target, unsignedp);
3518 }
3519 }
3520
3521 return 0;
3522 }
3523
3524 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3525 putting the high half of the result in TARGET if that is convenient,
3526 and return where the result is. If the operation can not be performed,
3527 0 is returned.
3528
3529 MODE is the mode of operation and result.
3530
3531 UNSIGNEDP nonzero means unsigned multiply.
3532
3533 MAX_COST is the total allowed cost for the expanded RTL. */
3534
3535 static rtx
3536 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3537 rtx target, int unsignedp, int max_cost)
3538 {
3539 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3540 unsigned HOST_WIDE_INT cnst1;
3541 int extra_cost;
3542 bool sign_adjust = false;
3543 enum mult_variant variant;
3544 struct algorithm alg;
3545 rtx tem;
3546 bool speed = optimize_insn_for_speed_p ();
3547
3548 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3549 /* We can't support modes wider than HOST_BITS_PER_INT. */
3550 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3551
3552 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3553
3554 /* We can't optimize modes wider than BITS_PER_WORD.
3555 ??? We might be able to perform double-word arithmetic if
3556 mode == word_mode, however all the cost calculations in
3557 synth_mult etc. assume single-word operations. */
3558 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3559 return expand_mult_highpart_optab (mode, op0, op1, target,
3560 unsignedp, max_cost);
3561
3562 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3563
3564 /* Check whether we try to multiply by a negative constant. */
3565 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3566 {
3567 sign_adjust = true;
3568 extra_cost += add_cost[speed][mode];
3569 }
3570
3571 /* See whether shift/add multiplication is cheap enough. */
3572 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3573 max_cost - extra_cost))
3574 {
3575 /* See whether the specialized multiplication optabs are
3576 cheaper than the shift/add version. */
3577 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3578 alg.cost.cost + extra_cost);
3579 if (tem)
3580 return tem;
3581
3582 tem = convert_to_mode (wider_mode, op0, unsignedp);
3583 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3584 tem = extract_high_half (mode, tem);
3585
3586 /* Adjust result for signedness. */
3587 if (sign_adjust)
3588 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3589
3590 return tem;
3591 }
3592 return expand_mult_highpart_optab (mode, op0, op1, target,
3593 unsignedp, max_cost);
3594 }
3595
3596
3597 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3598
3599 static rtx
3600 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3601 {
3602 unsigned HOST_WIDE_INT masklow, maskhigh;
3603 rtx result, temp, shift, label;
3604 int logd;
3605
3606 logd = floor_log2 (d);
3607 result = gen_reg_rtx (mode);
3608
3609 /* Avoid conditional branches when they're expensive. */
3610 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3611 && optimize_insn_for_speed_p ())
3612 {
3613 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3614 mode, 0, -1);
3615 if (signmask)
3616 {
3617 signmask = force_reg (mode, signmask);
3618 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3619 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3620
3621 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3622 which instruction sequence to use. If logical right shifts
3623 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3624 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3625
3626 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3627 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3628 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3629 {
3630 temp = expand_binop (mode, xor_optab, op0, signmask,
3631 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3632 temp = expand_binop (mode, sub_optab, temp, signmask,
3633 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3634 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3635 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3636 temp = expand_binop (mode, xor_optab, temp, signmask,
3637 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3638 temp = expand_binop (mode, sub_optab, temp, signmask,
3639 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3640 }
3641 else
3642 {
3643 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3644 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3645 signmask = force_reg (mode, signmask);
3646
3647 temp = expand_binop (mode, add_optab, op0, signmask,
3648 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3649 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3650 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3651 temp = expand_binop (mode, sub_optab, temp, signmask,
3652 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3653 }
3654 return temp;
3655 }
3656 }
3657
3658 /* Mask contains the mode's signbit and the significant bits of the
3659 modulus. By including the signbit in the operation, many targets
3660 can avoid an explicit compare operation in the following comparison
3661 against zero. */
3662
3663 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3664 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3665 {
3666 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3667 maskhigh = -1;
3668 }
3669 else
3670 maskhigh = (HOST_WIDE_INT) -1
3671 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3672
3673 temp = expand_binop (mode, and_optab, op0,
3674 immed_double_const (masklow, maskhigh, mode),
3675 result, 1, OPTAB_LIB_WIDEN);
3676 if (temp != result)
3677 emit_move_insn (result, temp);
3678
3679 label = gen_label_rtx ();
3680 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3681
3682 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3683 0, OPTAB_LIB_WIDEN);
3684 masklow = (HOST_WIDE_INT) -1 << logd;
3685 maskhigh = -1;
3686 temp = expand_binop (mode, ior_optab, temp,
3687 immed_double_const (masklow, maskhigh, mode),
3688 result, 1, OPTAB_LIB_WIDEN);
3689 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3690 0, OPTAB_LIB_WIDEN);
3691 if (temp != result)
3692 emit_move_insn (result, temp);
3693 emit_label (label);
3694 return result;
3695 }
3696
3697 /* Expand signed division of OP0 by a power of two D in mode MODE.
3698 This routine is only called for positive values of D. */
3699
3700 static rtx
3701 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3702 {
3703 rtx temp, label;
3704 tree shift;
3705 int logd;
3706
3707 logd = floor_log2 (d);
3708 shift = build_int_cst (NULL_TREE, logd);
3709
3710 if (d == 2
3711 && BRANCH_COST (optimize_insn_for_speed_p (),
3712 false) >= 1)
3713 {
3714 temp = gen_reg_rtx (mode);
3715 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3716 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3717 0, OPTAB_LIB_WIDEN);
3718 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3719 }
3720
3721 #ifdef HAVE_conditional_move
3722 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3723 >= 2)
3724 {
3725 rtx temp2;
3726
3727 /* ??? emit_conditional_move forces a stack adjustment via
3728 compare_from_rtx so, if the sequence is discarded, it will
3729 be lost. Do it now instead. */
3730 do_pending_stack_adjust ();
3731
3732 start_sequence ();
3733 temp2 = copy_to_mode_reg (mode, op0);
3734 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3735 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3736 temp = force_reg (mode, temp);
3737
3738 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3739 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3740 mode, temp, temp2, mode, 0);
3741 if (temp2)
3742 {
3743 rtx seq = get_insns ();
3744 end_sequence ();
3745 emit_insn (seq);
3746 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3747 }
3748 end_sequence ();
3749 }
3750 #endif
3751
3752 if (BRANCH_COST (optimize_insn_for_speed_p (),
3753 false) >= 2)
3754 {
3755 int ushift = GET_MODE_BITSIZE (mode) - logd;
3756
3757 temp = gen_reg_rtx (mode);
3758 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3759 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3760 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3761 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3762 else
3763 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3764 build_int_cst (NULL_TREE, ushift),
3765 NULL_RTX, 1);
3766 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3767 0, OPTAB_LIB_WIDEN);
3768 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3769 }
3770
3771 label = gen_label_rtx ();
3772 temp = copy_to_mode_reg (mode, op0);
3773 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3774 expand_inc (temp, GEN_INT (d - 1));
3775 emit_label (label);
3776 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3777 }
3778 \f
3779 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3780 if that is convenient, and returning where the result is.
3781 You may request either the quotient or the remainder as the result;
3782 specify REM_FLAG nonzero to get the remainder.
3783
3784 CODE is the expression code for which kind of division this is;
3785 it controls how rounding is done. MODE is the machine mode to use.
3786 UNSIGNEDP nonzero means do unsigned division. */
3787
3788 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3789 and then correct it by or'ing in missing high bits
3790 if result of ANDI is nonzero.
3791 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3792 This could optimize to a bfexts instruction.
3793 But C doesn't use these operations, so their optimizations are
3794 left for later. */
3795 /* ??? For modulo, we don't actually need the highpart of the first product,
3796 the low part will do nicely. And for small divisors, the second multiply
3797 can also be a low-part only multiply or even be completely left out.
3798 E.g. to calculate the remainder of a division by 3 with a 32 bit
3799 multiply, multiply with 0x55555556 and extract the upper two bits;
3800 the result is exact for inputs up to 0x1fffffff.
3801 The input range can be reduced by using cross-sum rules.
3802 For odd divisors >= 3, the following table gives right shift counts
3803 so that if a number is shifted by an integer multiple of the given
3804 amount, the remainder stays the same:
3805 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3806 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3807 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3808 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3809 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3810
3811 Cross-sum rules for even numbers can be derived by leaving as many bits
3812 to the right alone as the divisor has zeros to the right.
3813 E.g. if x is an unsigned 32 bit number:
3814 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3815 */
3816
3817 rtx
3818 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3819 rtx op0, rtx op1, rtx target, int unsignedp)
3820 {
3821 enum machine_mode compute_mode;
3822 rtx tquotient;
3823 rtx quotient = 0, remainder = 0;
3824 rtx last;
3825 int size;
3826 rtx insn, set;
3827 optab optab1, optab2;
3828 int op1_is_constant, op1_is_pow2 = 0;
3829 int max_cost, extra_cost;
3830 static HOST_WIDE_INT last_div_const = 0;
3831 static HOST_WIDE_INT ext_op1;
3832 bool speed = optimize_insn_for_speed_p ();
3833
3834 op1_is_constant = CONST_INT_P (op1);
3835 if (op1_is_constant)
3836 {
3837 ext_op1 = INTVAL (op1);
3838 if (unsignedp)
3839 ext_op1 &= GET_MODE_MASK (mode);
3840 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3841 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3842 }
3843
3844 /*
3845 This is the structure of expand_divmod:
3846
3847 First comes code to fix up the operands so we can perform the operations
3848 correctly and efficiently.
3849
3850 Second comes a switch statement with code specific for each rounding mode.
3851 For some special operands this code emits all RTL for the desired
3852 operation, for other cases, it generates only a quotient and stores it in
3853 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3854 to indicate that it has not done anything.
3855
3856 Last comes code that finishes the operation. If QUOTIENT is set and
3857 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3858 QUOTIENT is not set, it is computed using trunc rounding.
3859
3860 We try to generate special code for division and remainder when OP1 is a
3861 constant. If |OP1| = 2**n we can use shifts and some other fast
3862 operations. For other values of OP1, we compute a carefully selected
3863 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3864 by m.
3865
3866 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3867 half of the product. Different strategies for generating the product are
3868 implemented in expand_mult_highpart.
3869
3870 If what we actually want is the remainder, we generate that by another
3871 by-constant multiplication and a subtraction. */
3872
3873 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3874 code below will malfunction if we are, so check here and handle
3875 the special case if so. */
3876 if (op1 == const1_rtx)
3877 return rem_flag ? const0_rtx : op0;
3878
3879 /* When dividing by -1, we could get an overflow.
3880 negv_optab can handle overflows. */
3881 if (! unsignedp && op1 == constm1_rtx)
3882 {
3883 if (rem_flag)
3884 return const0_rtx;
3885 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3886 ? negv_optab : neg_optab, op0, target, 0);
3887 }
3888
3889 if (target
3890 /* Don't use the function value register as a target
3891 since we have to read it as well as write it,
3892 and function-inlining gets confused by this. */
3893 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3894 /* Don't clobber an operand while doing a multi-step calculation. */
3895 || ((rem_flag || op1_is_constant)
3896 && (reg_mentioned_p (target, op0)
3897 || (MEM_P (op0) && MEM_P (target))))
3898 || reg_mentioned_p (target, op1)
3899 || (MEM_P (op1) && MEM_P (target))))
3900 target = 0;
3901
3902 /* Get the mode in which to perform this computation. Normally it will
3903 be MODE, but sometimes we can't do the desired operation in MODE.
3904 If so, pick a wider mode in which we can do the operation. Convert
3905 to that mode at the start to avoid repeated conversions.
3906
3907 First see what operations we need. These depend on the expression
3908 we are evaluating. (We assume that divxx3 insns exist under the
3909 same conditions that modxx3 insns and that these insns don't normally
3910 fail. If these assumptions are not correct, we may generate less
3911 efficient code in some cases.)
3912
3913 Then see if we find a mode in which we can open-code that operation
3914 (either a division, modulus, or shift). Finally, check for the smallest
3915 mode for which we can do the operation with a library call. */
3916
3917 /* We might want to refine this now that we have division-by-constant
3918 optimization. Since expand_mult_highpart tries so many variants, it is
3919 not straightforward to generalize this. Maybe we should make an array
3920 of possible modes in init_expmed? Save this for GCC 2.7. */
3921
3922 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3923 ? (unsignedp ? lshr_optab : ashr_optab)
3924 : (unsignedp ? udiv_optab : sdiv_optab));
3925 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3926 ? optab1
3927 : (unsignedp ? udivmod_optab : sdivmod_optab));
3928
3929 for (compute_mode = mode; compute_mode != VOIDmode;
3930 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3931 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
3932 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
3933 break;
3934
3935 if (compute_mode == VOIDmode)
3936 for (compute_mode = mode; compute_mode != VOIDmode;
3937 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3938 if (optab_libfunc (optab1, compute_mode)
3939 || optab_libfunc (optab2, compute_mode))
3940 break;
3941
3942 /* If we still couldn't find a mode, use MODE, but expand_binop will
3943 probably die. */
3944 if (compute_mode == VOIDmode)
3945 compute_mode = mode;
3946
3947 if (target && GET_MODE (target) == compute_mode)
3948 tquotient = target;
3949 else
3950 tquotient = gen_reg_rtx (compute_mode);
3951
3952 size = GET_MODE_BITSIZE (compute_mode);
3953 #if 0
3954 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3955 (mode), and thereby get better code when OP1 is a constant. Do that
3956 later. It will require going over all usages of SIZE below. */
3957 size = GET_MODE_BITSIZE (mode);
3958 #endif
3959
3960 /* Only deduct something for a REM if the last divide done was
3961 for a different constant. Then set the constant of the last
3962 divide. */
3963 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3964 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3965 && INTVAL (op1) == last_div_const))
3966 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3967
3968 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3969
3970 /* Now convert to the best mode to use. */
3971 if (compute_mode != mode)
3972 {
3973 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3974 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3975
3976 /* convert_modes may have placed op1 into a register, so we
3977 must recompute the following. */
3978 op1_is_constant = CONST_INT_P (op1);
3979 op1_is_pow2 = (op1_is_constant
3980 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3981 || (! unsignedp
3982 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3983 }
3984
3985 /* If one of the operands is a volatile MEM, copy it into a register. */
3986
3987 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3988 op0 = force_reg (compute_mode, op0);
3989 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3990 op1 = force_reg (compute_mode, op1);
3991
3992 /* If we need the remainder or if OP1 is constant, we need to
3993 put OP0 in a register in case it has any queued subexpressions. */
3994 if (rem_flag || op1_is_constant)
3995 op0 = force_reg (compute_mode, op0);
3996
3997 last = get_last_insn ();
3998
3999 /* Promote floor rounding to trunc rounding for unsigned operations. */
4000 if (unsignedp)
4001 {
4002 if (code == FLOOR_DIV_EXPR)
4003 code = TRUNC_DIV_EXPR;
4004 if (code == FLOOR_MOD_EXPR)
4005 code = TRUNC_MOD_EXPR;
4006 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4007 code = TRUNC_DIV_EXPR;
4008 }
4009
4010 if (op1 != const0_rtx)
4011 switch (code)
4012 {
4013 case TRUNC_MOD_EXPR:
4014 case TRUNC_DIV_EXPR:
4015 if (op1_is_constant)
4016 {
4017 if (unsignedp)
4018 {
4019 unsigned HOST_WIDE_INT mh;
4020 int pre_shift, post_shift;
4021 int dummy;
4022 rtx ml;
4023 unsigned HOST_WIDE_INT d = (INTVAL (op1)
4024 & GET_MODE_MASK (compute_mode));
4025
4026 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4027 {
4028 pre_shift = floor_log2 (d);
4029 if (rem_flag)
4030 {
4031 remainder
4032 = expand_binop (compute_mode, and_optab, op0,
4033 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4034 remainder, 1,
4035 OPTAB_LIB_WIDEN);
4036 if (remainder)
4037 return gen_lowpart (mode, remainder);
4038 }
4039 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4040 build_int_cst (NULL_TREE,
4041 pre_shift),
4042 tquotient, 1);
4043 }
4044 else if (size <= HOST_BITS_PER_WIDE_INT)
4045 {
4046 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4047 {
4048 /* Most significant bit of divisor is set; emit an scc
4049 insn. */
4050 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4051 compute_mode, 1, 1);
4052 }
4053 else
4054 {
4055 /* Find a suitable multiplier and right shift count
4056 instead of multiplying with D. */
4057
4058 mh = choose_multiplier (d, size, size,
4059 &ml, &post_shift, &dummy);
4060
4061 /* If the suggested multiplier is more than SIZE bits,
4062 we can do better for even divisors, using an
4063 initial right shift. */
4064 if (mh != 0 && (d & 1) == 0)
4065 {
4066 pre_shift = floor_log2 (d & -d);
4067 mh = choose_multiplier (d >> pre_shift, size,
4068 size - pre_shift,
4069 &ml, &post_shift, &dummy);
4070 gcc_assert (!mh);
4071 }
4072 else
4073 pre_shift = 0;
4074
4075 if (mh != 0)
4076 {
4077 rtx t1, t2, t3, t4;
4078
4079 if (post_shift - 1 >= BITS_PER_WORD)
4080 goto fail1;
4081
4082 extra_cost
4083 = (shift_cost[speed][compute_mode][post_shift - 1]
4084 + shift_cost[speed][compute_mode][1]
4085 + 2 * add_cost[speed][compute_mode]);
4086 t1 = expand_mult_highpart (compute_mode, op0, ml,
4087 NULL_RTX, 1,
4088 max_cost - extra_cost);
4089 if (t1 == 0)
4090 goto fail1;
4091 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4092 op0, t1),
4093 NULL_RTX);
4094 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
4095 integer_one_node, NULL_RTX, 1);
4096 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4097 t1, t3),
4098 NULL_RTX);
4099 quotient = expand_shift
4100 (RSHIFT_EXPR, compute_mode, t4,
4101 build_int_cst (NULL_TREE, post_shift - 1),
4102 tquotient, 1);
4103 }
4104 else
4105 {
4106 rtx t1, t2;
4107
4108 if (pre_shift >= BITS_PER_WORD
4109 || post_shift >= BITS_PER_WORD)
4110 goto fail1;
4111
4112 t1 = expand_shift
4113 (RSHIFT_EXPR, compute_mode, op0,
4114 build_int_cst (NULL_TREE, pre_shift),
4115 NULL_RTX, 1);
4116 extra_cost
4117 = (shift_cost[speed][compute_mode][pre_shift]
4118 + shift_cost[speed][compute_mode][post_shift]);
4119 t2 = expand_mult_highpart (compute_mode, t1, ml,
4120 NULL_RTX, 1,
4121 max_cost - extra_cost);
4122 if (t2 == 0)
4123 goto fail1;
4124 quotient = expand_shift
4125 (RSHIFT_EXPR, compute_mode, t2,
4126 build_int_cst (NULL_TREE, post_shift),
4127 tquotient, 1);
4128 }
4129 }
4130 }
4131 else /* Too wide mode to use tricky code */
4132 break;
4133
4134 insn = get_last_insn ();
4135 if (insn != last
4136 && (set = single_set (insn)) != 0
4137 && SET_DEST (set) == quotient)
4138 set_unique_reg_note (insn,
4139 REG_EQUAL,
4140 gen_rtx_UDIV (compute_mode, op0, op1));
4141 }
4142 else /* TRUNC_DIV, signed */
4143 {
4144 unsigned HOST_WIDE_INT ml;
4145 int lgup, post_shift;
4146 rtx mlr;
4147 HOST_WIDE_INT d = INTVAL (op1);
4148 unsigned HOST_WIDE_INT abs_d;
4149
4150 /* Since d might be INT_MIN, we have to cast to
4151 unsigned HOST_WIDE_INT before negating to avoid
4152 undefined signed overflow. */
4153 abs_d = (d >= 0
4154 ? (unsigned HOST_WIDE_INT) d
4155 : - (unsigned HOST_WIDE_INT) d);
4156
4157 /* n rem d = n rem -d */
4158 if (rem_flag && d < 0)
4159 {
4160 d = abs_d;
4161 op1 = gen_int_mode (abs_d, compute_mode);
4162 }
4163
4164 if (d == 1)
4165 quotient = op0;
4166 else if (d == -1)
4167 quotient = expand_unop (compute_mode, neg_optab, op0,
4168 tquotient, 0);
4169 else if (HOST_BITS_PER_WIDE_INT >= size
4170 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4171 {
4172 /* This case is not handled correctly below. */
4173 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4174 compute_mode, 1, 1);
4175 if (quotient == 0)
4176 goto fail1;
4177 }
4178 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4179 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4180 : sdiv_pow2_cheap[speed][compute_mode])
4181 /* We assume that cheap metric is true if the
4182 optab has an expander for this mode. */
4183 && ((optab_handler ((rem_flag ? smod_optab
4184 : sdiv_optab),
4185 compute_mode)
4186 != CODE_FOR_nothing)
4187 || (optab_handler (sdivmod_optab,
4188 compute_mode)
4189 != CODE_FOR_nothing)))
4190 ;
4191 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4192 {
4193 if (rem_flag)
4194 {
4195 remainder = expand_smod_pow2 (compute_mode, op0, d);
4196 if (remainder)
4197 return gen_lowpart (mode, remainder);
4198 }
4199
4200 if (sdiv_pow2_cheap[speed][compute_mode]
4201 && ((optab_handler (sdiv_optab, compute_mode)
4202 != CODE_FOR_nothing)
4203 || (optab_handler (sdivmod_optab, compute_mode)
4204 != CODE_FOR_nothing)))
4205 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4206 compute_mode, op0,
4207 gen_int_mode (abs_d,
4208 compute_mode),
4209 NULL_RTX, 0);
4210 else
4211 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4212
4213 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4214 negate the quotient. */
4215 if (d < 0)
4216 {
4217 insn = get_last_insn ();
4218 if (insn != last
4219 && (set = single_set (insn)) != 0
4220 && SET_DEST (set) == quotient
4221 && abs_d < ((unsigned HOST_WIDE_INT) 1
4222 << (HOST_BITS_PER_WIDE_INT - 1)))
4223 set_unique_reg_note (insn,
4224 REG_EQUAL,
4225 gen_rtx_DIV (compute_mode,
4226 op0,
4227 GEN_INT
4228 (trunc_int_for_mode
4229 (abs_d,
4230 compute_mode))));
4231
4232 quotient = expand_unop (compute_mode, neg_optab,
4233 quotient, quotient, 0);
4234 }
4235 }
4236 else if (size <= HOST_BITS_PER_WIDE_INT)
4237 {
4238 choose_multiplier (abs_d, size, size - 1,
4239 &mlr, &post_shift, &lgup);
4240 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4241 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4242 {
4243 rtx t1, t2, t3;
4244
4245 if (post_shift >= BITS_PER_WORD
4246 || size - 1 >= BITS_PER_WORD)
4247 goto fail1;
4248
4249 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4250 + shift_cost[speed][compute_mode][size - 1]
4251 + add_cost[speed][compute_mode]);
4252 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4253 NULL_RTX, 0,
4254 max_cost - extra_cost);
4255 if (t1 == 0)
4256 goto fail1;
4257 t2 = expand_shift
4258 (RSHIFT_EXPR, compute_mode, t1,
4259 build_int_cst (NULL_TREE, post_shift),
4260 NULL_RTX, 0);
4261 t3 = expand_shift
4262 (RSHIFT_EXPR, compute_mode, op0,
4263 build_int_cst (NULL_TREE, size - 1),
4264 NULL_RTX, 0);
4265 if (d < 0)
4266 quotient
4267 = force_operand (gen_rtx_MINUS (compute_mode,
4268 t3, t2),
4269 tquotient);
4270 else
4271 quotient
4272 = force_operand (gen_rtx_MINUS (compute_mode,
4273 t2, t3),
4274 tquotient);
4275 }
4276 else
4277 {
4278 rtx t1, t2, t3, t4;
4279
4280 if (post_shift >= BITS_PER_WORD
4281 || size - 1 >= BITS_PER_WORD)
4282 goto fail1;
4283
4284 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4285 mlr = gen_int_mode (ml, compute_mode);
4286 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4287 + shift_cost[speed][compute_mode][size - 1]
4288 + 2 * add_cost[speed][compute_mode]);
4289 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4290 NULL_RTX, 0,
4291 max_cost - extra_cost);
4292 if (t1 == 0)
4293 goto fail1;
4294 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4295 t1, op0),
4296 NULL_RTX);
4297 t3 = expand_shift
4298 (RSHIFT_EXPR, compute_mode, t2,
4299 build_int_cst (NULL_TREE, post_shift),
4300 NULL_RTX, 0);
4301 t4 = expand_shift
4302 (RSHIFT_EXPR, compute_mode, op0,
4303 build_int_cst (NULL_TREE, size - 1),
4304 NULL_RTX, 0);
4305 if (d < 0)
4306 quotient
4307 = force_operand (gen_rtx_MINUS (compute_mode,
4308 t4, t3),
4309 tquotient);
4310 else
4311 quotient
4312 = force_operand (gen_rtx_MINUS (compute_mode,
4313 t3, t4),
4314 tquotient);
4315 }
4316 }
4317 else /* Too wide mode to use tricky code */
4318 break;
4319
4320 insn = get_last_insn ();
4321 if (insn != last
4322 && (set = single_set (insn)) != 0
4323 && SET_DEST (set) == quotient)
4324 set_unique_reg_note (insn,
4325 REG_EQUAL,
4326 gen_rtx_DIV (compute_mode, op0, op1));
4327 }
4328 break;
4329 }
4330 fail1:
4331 delete_insns_since (last);
4332 break;
4333
4334 case FLOOR_DIV_EXPR:
4335 case FLOOR_MOD_EXPR:
4336 /* We will come here only for signed operations. */
4337 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4338 {
4339 unsigned HOST_WIDE_INT mh;
4340 int pre_shift, lgup, post_shift;
4341 HOST_WIDE_INT d = INTVAL (op1);
4342 rtx ml;
4343
4344 if (d > 0)
4345 {
4346 /* We could just as easily deal with negative constants here,
4347 but it does not seem worth the trouble for GCC 2.6. */
4348 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4349 {
4350 pre_shift = floor_log2 (d);
4351 if (rem_flag)
4352 {
4353 remainder = expand_binop (compute_mode, and_optab, op0,
4354 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4355 remainder, 0, OPTAB_LIB_WIDEN);
4356 if (remainder)
4357 return gen_lowpart (mode, remainder);
4358 }
4359 quotient = expand_shift
4360 (RSHIFT_EXPR, compute_mode, op0,
4361 build_int_cst (NULL_TREE, pre_shift),
4362 tquotient, 0);
4363 }
4364 else
4365 {
4366 rtx t1, t2, t3, t4;
4367
4368 mh = choose_multiplier (d, size, size - 1,
4369 &ml, &post_shift, &lgup);
4370 gcc_assert (!mh);
4371
4372 if (post_shift < BITS_PER_WORD
4373 && size - 1 < BITS_PER_WORD)
4374 {
4375 t1 = expand_shift
4376 (RSHIFT_EXPR, compute_mode, op0,
4377 build_int_cst (NULL_TREE, size - 1),
4378 NULL_RTX, 0);
4379 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4380 NULL_RTX, 0, OPTAB_WIDEN);
4381 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4382 + shift_cost[speed][compute_mode][size - 1]
4383 + 2 * add_cost[speed][compute_mode]);
4384 t3 = expand_mult_highpart (compute_mode, t2, ml,
4385 NULL_RTX, 1,
4386 max_cost - extra_cost);
4387 if (t3 != 0)
4388 {
4389 t4 = expand_shift
4390 (RSHIFT_EXPR, compute_mode, t3,
4391 build_int_cst (NULL_TREE, post_shift),
4392 NULL_RTX, 1);
4393 quotient = expand_binop (compute_mode, xor_optab,
4394 t4, t1, tquotient, 0,
4395 OPTAB_WIDEN);
4396 }
4397 }
4398 }
4399 }
4400 else
4401 {
4402 rtx nsign, t1, t2, t3, t4;
4403 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4404 op0, constm1_rtx), NULL_RTX);
4405 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4406 0, OPTAB_WIDEN);
4407 nsign = expand_shift
4408 (RSHIFT_EXPR, compute_mode, t2,
4409 build_int_cst (NULL_TREE, size - 1),
4410 NULL_RTX, 0);
4411 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4412 NULL_RTX);
4413 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4414 NULL_RTX, 0);
4415 if (t4)
4416 {
4417 rtx t5;
4418 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4419 NULL_RTX, 0);
4420 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4421 t4, t5),
4422 tquotient);
4423 }
4424 }
4425 }
4426
4427 if (quotient != 0)
4428 break;
4429 delete_insns_since (last);
4430
4431 /* Try using an instruction that produces both the quotient and
4432 remainder, using truncation. We can easily compensate the quotient
4433 or remainder to get floor rounding, once we have the remainder.
4434 Notice that we compute also the final remainder value here,
4435 and return the result right away. */
4436 if (target == 0 || GET_MODE (target) != compute_mode)
4437 target = gen_reg_rtx (compute_mode);
4438
4439 if (rem_flag)
4440 {
4441 remainder
4442 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4443 quotient = gen_reg_rtx (compute_mode);
4444 }
4445 else
4446 {
4447 quotient
4448 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4449 remainder = gen_reg_rtx (compute_mode);
4450 }
4451
4452 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4453 quotient, remainder, 0))
4454 {
4455 /* This could be computed with a branch-less sequence.
4456 Save that for later. */
4457 rtx tem;
4458 rtx label = gen_label_rtx ();
4459 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4460 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4461 NULL_RTX, 0, OPTAB_WIDEN);
4462 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4463 expand_dec (quotient, const1_rtx);
4464 expand_inc (remainder, op1);
4465 emit_label (label);
4466 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4467 }
4468
4469 /* No luck with division elimination or divmod. Have to do it
4470 by conditionally adjusting op0 *and* the result. */
4471 {
4472 rtx label1, label2, label3, label4, label5;
4473 rtx adjusted_op0;
4474 rtx tem;
4475
4476 quotient = gen_reg_rtx (compute_mode);
4477 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4478 label1 = gen_label_rtx ();
4479 label2 = gen_label_rtx ();
4480 label3 = gen_label_rtx ();
4481 label4 = gen_label_rtx ();
4482 label5 = gen_label_rtx ();
4483 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4484 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4485 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4486 quotient, 0, OPTAB_LIB_WIDEN);
4487 if (tem != quotient)
4488 emit_move_insn (quotient, tem);
4489 emit_jump_insn (gen_jump (label5));
4490 emit_barrier ();
4491 emit_label (label1);
4492 expand_inc (adjusted_op0, const1_rtx);
4493 emit_jump_insn (gen_jump (label4));
4494 emit_barrier ();
4495 emit_label (label2);
4496 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4497 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4498 quotient, 0, OPTAB_LIB_WIDEN);
4499 if (tem != quotient)
4500 emit_move_insn (quotient, tem);
4501 emit_jump_insn (gen_jump (label5));
4502 emit_barrier ();
4503 emit_label (label3);
4504 expand_dec (adjusted_op0, const1_rtx);
4505 emit_label (label4);
4506 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4507 quotient, 0, OPTAB_LIB_WIDEN);
4508 if (tem != quotient)
4509 emit_move_insn (quotient, tem);
4510 expand_dec (quotient, const1_rtx);
4511 emit_label (label5);
4512 }
4513 break;
4514
4515 case CEIL_DIV_EXPR:
4516 case CEIL_MOD_EXPR:
4517 if (unsignedp)
4518 {
4519 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4520 {
4521 rtx t1, t2, t3;
4522 unsigned HOST_WIDE_INT d = INTVAL (op1);
4523 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4524 build_int_cst (NULL_TREE, floor_log2 (d)),
4525 tquotient, 1);
4526 t2 = expand_binop (compute_mode, and_optab, op0,
4527 GEN_INT (d - 1),
4528 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4529 t3 = gen_reg_rtx (compute_mode);
4530 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4531 compute_mode, 1, 1);
4532 if (t3 == 0)
4533 {
4534 rtx lab;
4535 lab = gen_label_rtx ();
4536 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4537 expand_inc (t1, const1_rtx);
4538 emit_label (lab);
4539 quotient = t1;
4540 }
4541 else
4542 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4543 t1, t3),
4544 tquotient);
4545 break;
4546 }
4547
4548 /* Try using an instruction that produces both the quotient and
4549 remainder, using truncation. We can easily compensate the
4550 quotient or remainder to get ceiling rounding, once we have the
4551 remainder. Notice that we compute also the final remainder
4552 value here, and return the result right away. */
4553 if (target == 0 || GET_MODE (target) != compute_mode)
4554 target = gen_reg_rtx (compute_mode);
4555
4556 if (rem_flag)
4557 {
4558 remainder = (REG_P (target)
4559 ? target : gen_reg_rtx (compute_mode));
4560 quotient = gen_reg_rtx (compute_mode);
4561 }
4562 else
4563 {
4564 quotient = (REG_P (target)
4565 ? target : gen_reg_rtx (compute_mode));
4566 remainder = gen_reg_rtx (compute_mode);
4567 }
4568
4569 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4570 remainder, 1))
4571 {
4572 /* This could be computed with a branch-less sequence.
4573 Save that for later. */
4574 rtx label = gen_label_rtx ();
4575 do_cmp_and_jump (remainder, const0_rtx, EQ,
4576 compute_mode, label);
4577 expand_inc (quotient, const1_rtx);
4578 expand_dec (remainder, op1);
4579 emit_label (label);
4580 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4581 }
4582
4583 /* No luck with division elimination or divmod. Have to do it
4584 by conditionally adjusting op0 *and* the result. */
4585 {
4586 rtx label1, label2;
4587 rtx adjusted_op0, tem;
4588
4589 quotient = gen_reg_rtx (compute_mode);
4590 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4591 label1 = gen_label_rtx ();
4592 label2 = gen_label_rtx ();
4593 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4594 compute_mode, label1);
4595 emit_move_insn (quotient, const0_rtx);
4596 emit_jump_insn (gen_jump (label2));
4597 emit_barrier ();
4598 emit_label (label1);
4599 expand_dec (adjusted_op0, const1_rtx);
4600 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4601 quotient, 1, OPTAB_LIB_WIDEN);
4602 if (tem != quotient)
4603 emit_move_insn (quotient, tem);
4604 expand_inc (quotient, const1_rtx);
4605 emit_label (label2);
4606 }
4607 }
4608 else /* signed */
4609 {
4610 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4611 && INTVAL (op1) >= 0)
4612 {
4613 /* This is extremely similar to the code for the unsigned case
4614 above. For 2.7 we should merge these variants, but for
4615 2.6.1 I don't want to touch the code for unsigned since that
4616 get used in C. The signed case will only be used by other
4617 languages (Ada). */
4618
4619 rtx t1, t2, t3;
4620 unsigned HOST_WIDE_INT d = INTVAL (op1);
4621 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4622 build_int_cst (NULL_TREE, floor_log2 (d)),
4623 tquotient, 0);
4624 t2 = expand_binop (compute_mode, and_optab, op0,
4625 GEN_INT (d - 1),
4626 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4627 t3 = gen_reg_rtx (compute_mode);
4628 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4629 compute_mode, 1, 1);
4630 if (t3 == 0)
4631 {
4632 rtx lab;
4633 lab = gen_label_rtx ();
4634 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4635 expand_inc (t1, const1_rtx);
4636 emit_label (lab);
4637 quotient = t1;
4638 }
4639 else
4640 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4641 t1, t3),
4642 tquotient);
4643 break;
4644 }
4645
4646 /* Try using an instruction that produces both the quotient and
4647 remainder, using truncation. We can easily compensate the
4648 quotient or remainder to get ceiling rounding, once we have the
4649 remainder. Notice that we compute also the final remainder
4650 value here, and return the result right away. */
4651 if (target == 0 || GET_MODE (target) != compute_mode)
4652 target = gen_reg_rtx (compute_mode);
4653 if (rem_flag)
4654 {
4655 remainder= (REG_P (target)
4656 ? target : gen_reg_rtx (compute_mode));
4657 quotient = gen_reg_rtx (compute_mode);
4658 }
4659 else
4660 {
4661 quotient = (REG_P (target)
4662 ? target : gen_reg_rtx (compute_mode));
4663 remainder = gen_reg_rtx (compute_mode);
4664 }
4665
4666 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4667 remainder, 0))
4668 {
4669 /* This could be computed with a branch-less sequence.
4670 Save that for later. */
4671 rtx tem;
4672 rtx label = gen_label_rtx ();
4673 do_cmp_and_jump (remainder, const0_rtx, EQ,
4674 compute_mode, label);
4675 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4676 NULL_RTX, 0, OPTAB_WIDEN);
4677 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4678 expand_inc (quotient, const1_rtx);
4679 expand_dec (remainder, op1);
4680 emit_label (label);
4681 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4682 }
4683
4684 /* No luck with division elimination or divmod. Have to do it
4685 by conditionally adjusting op0 *and* the result. */
4686 {
4687 rtx label1, label2, label3, label4, label5;
4688 rtx adjusted_op0;
4689 rtx tem;
4690
4691 quotient = gen_reg_rtx (compute_mode);
4692 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4693 label1 = gen_label_rtx ();
4694 label2 = gen_label_rtx ();
4695 label3 = gen_label_rtx ();
4696 label4 = gen_label_rtx ();
4697 label5 = gen_label_rtx ();
4698 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4699 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4700 compute_mode, label1);
4701 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4702 quotient, 0, OPTAB_LIB_WIDEN);
4703 if (tem != quotient)
4704 emit_move_insn (quotient, tem);
4705 emit_jump_insn (gen_jump (label5));
4706 emit_barrier ();
4707 emit_label (label1);
4708 expand_dec (adjusted_op0, const1_rtx);
4709 emit_jump_insn (gen_jump (label4));
4710 emit_barrier ();
4711 emit_label (label2);
4712 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4713 compute_mode, label3);
4714 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4715 quotient, 0, OPTAB_LIB_WIDEN);
4716 if (tem != quotient)
4717 emit_move_insn (quotient, tem);
4718 emit_jump_insn (gen_jump (label5));
4719 emit_barrier ();
4720 emit_label (label3);
4721 expand_inc (adjusted_op0, const1_rtx);
4722 emit_label (label4);
4723 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4724 quotient, 0, OPTAB_LIB_WIDEN);
4725 if (tem != quotient)
4726 emit_move_insn (quotient, tem);
4727 expand_inc (quotient, const1_rtx);
4728 emit_label (label5);
4729 }
4730 }
4731 break;
4732
4733 case EXACT_DIV_EXPR:
4734 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4735 {
4736 HOST_WIDE_INT d = INTVAL (op1);
4737 unsigned HOST_WIDE_INT ml;
4738 int pre_shift;
4739 rtx t1;
4740
4741 pre_shift = floor_log2 (d & -d);
4742 ml = invert_mod2n (d >> pre_shift, size);
4743 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4744 build_int_cst (NULL_TREE, pre_shift),
4745 NULL_RTX, unsignedp);
4746 quotient = expand_mult (compute_mode, t1,
4747 gen_int_mode (ml, compute_mode),
4748 NULL_RTX, 1);
4749
4750 insn = get_last_insn ();
4751 set_unique_reg_note (insn,
4752 REG_EQUAL,
4753 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4754 compute_mode,
4755 op0, op1));
4756 }
4757 break;
4758
4759 case ROUND_DIV_EXPR:
4760 case ROUND_MOD_EXPR:
4761 if (unsignedp)
4762 {
4763 rtx tem;
4764 rtx label;
4765 label = gen_label_rtx ();
4766 quotient = gen_reg_rtx (compute_mode);
4767 remainder = gen_reg_rtx (compute_mode);
4768 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4769 {
4770 rtx tem;
4771 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4772 quotient, 1, OPTAB_LIB_WIDEN);
4773 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4774 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4775 remainder, 1, OPTAB_LIB_WIDEN);
4776 }
4777 tem = plus_constant (op1, -1);
4778 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4779 integer_one_node, NULL_RTX, 1);
4780 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4781 expand_inc (quotient, const1_rtx);
4782 expand_dec (remainder, op1);
4783 emit_label (label);
4784 }
4785 else
4786 {
4787 rtx abs_rem, abs_op1, tem, mask;
4788 rtx label;
4789 label = gen_label_rtx ();
4790 quotient = gen_reg_rtx (compute_mode);
4791 remainder = gen_reg_rtx (compute_mode);
4792 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4793 {
4794 rtx tem;
4795 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4796 quotient, 0, OPTAB_LIB_WIDEN);
4797 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4798 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4799 remainder, 0, OPTAB_LIB_WIDEN);
4800 }
4801 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4802 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4803 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4804 integer_one_node, NULL_RTX, 1);
4805 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4806 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4807 NULL_RTX, 0, OPTAB_WIDEN);
4808 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4809 build_int_cst (NULL_TREE, size - 1),
4810 NULL_RTX, 0);
4811 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4812 NULL_RTX, 0, OPTAB_WIDEN);
4813 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4814 NULL_RTX, 0, OPTAB_WIDEN);
4815 expand_inc (quotient, tem);
4816 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4817 NULL_RTX, 0, OPTAB_WIDEN);
4818 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4819 NULL_RTX, 0, OPTAB_WIDEN);
4820 expand_dec (remainder, tem);
4821 emit_label (label);
4822 }
4823 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4824
4825 default:
4826 gcc_unreachable ();
4827 }
4828
4829 if (quotient == 0)
4830 {
4831 if (target && GET_MODE (target) != compute_mode)
4832 target = 0;
4833
4834 if (rem_flag)
4835 {
4836 /* Try to produce the remainder without producing the quotient.
4837 If we seem to have a divmod pattern that does not require widening,
4838 don't try widening here. We should really have a WIDEN argument
4839 to expand_twoval_binop, since what we'd really like to do here is
4840 1) try a mod insn in compute_mode
4841 2) try a divmod insn in compute_mode
4842 3) try a div insn in compute_mode and multiply-subtract to get
4843 remainder
4844 4) try the same things with widening allowed. */
4845 remainder
4846 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4847 op0, op1, target,
4848 unsignedp,
4849 ((optab_handler (optab2, compute_mode)
4850 != CODE_FOR_nothing)
4851 ? OPTAB_DIRECT : OPTAB_WIDEN));
4852 if (remainder == 0)
4853 {
4854 /* No luck there. Can we do remainder and divide at once
4855 without a library call? */
4856 remainder = gen_reg_rtx (compute_mode);
4857 if (! expand_twoval_binop ((unsignedp
4858 ? udivmod_optab
4859 : sdivmod_optab),
4860 op0, op1,
4861 NULL_RTX, remainder, unsignedp))
4862 remainder = 0;
4863 }
4864
4865 if (remainder)
4866 return gen_lowpart (mode, remainder);
4867 }
4868
4869 /* Produce the quotient. Try a quotient insn, but not a library call.
4870 If we have a divmod in this mode, use it in preference to widening
4871 the div (for this test we assume it will not fail). Note that optab2
4872 is set to the one of the two optabs that the call below will use. */
4873 quotient
4874 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4875 op0, op1, rem_flag ? NULL_RTX : target,
4876 unsignedp,
4877 ((optab_handler (optab2, compute_mode)
4878 != CODE_FOR_nothing)
4879 ? OPTAB_DIRECT : OPTAB_WIDEN));
4880
4881 if (quotient == 0)
4882 {
4883 /* No luck there. Try a quotient-and-remainder insn,
4884 keeping the quotient alone. */
4885 quotient = gen_reg_rtx (compute_mode);
4886 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4887 op0, op1,
4888 quotient, NULL_RTX, unsignedp))
4889 {
4890 quotient = 0;
4891 if (! rem_flag)
4892 /* Still no luck. If we are not computing the remainder,
4893 use a library call for the quotient. */
4894 quotient = sign_expand_binop (compute_mode,
4895 udiv_optab, sdiv_optab,
4896 op0, op1, target,
4897 unsignedp, OPTAB_LIB_WIDEN);
4898 }
4899 }
4900 }
4901
4902 if (rem_flag)
4903 {
4904 if (target && GET_MODE (target) != compute_mode)
4905 target = 0;
4906
4907 if (quotient == 0)
4908 {
4909 /* No divide instruction either. Use library for remainder. */
4910 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4911 op0, op1, target,
4912 unsignedp, OPTAB_LIB_WIDEN);
4913 /* No remainder function. Try a quotient-and-remainder
4914 function, keeping the remainder. */
4915 if (!remainder)
4916 {
4917 remainder = gen_reg_rtx (compute_mode);
4918 if (!expand_twoval_binop_libfunc
4919 (unsignedp ? udivmod_optab : sdivmod_optab,
4920 op0, op1,
4921 NULL_RTX, remainder,
4922 unsignedp ? UMOD : MOD))
4923 remainder = NULL_RTX;
4924 }
4925 }
4926 else
4927 {
4928 /* We divided. Now finish doing X - Y * (X / Y). */
4929 remainder = expand_mult (compute_mode, quotient, op1,
4930 NULL_RTX, unsignedp);
4931 remainder = expand_binop (compute_mode, sub_optab, op0,
4932 remainder, target, unsignedp,
4933 OPTAB_LIB_WIDEN);
4934 }
4935 }
4936
4937 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4938 }
4939 \f
4940 /* Return a tree node with data type TYPE, describing the value of X.
4941 Usually this is an VAR_DECL, if there is no obvious better choice.
4942 X may be an expression, however we only support those expressions
4943 generated by loop.c. */
4944
4945 tree
4946 make_tree (tree type, rtx x)
4947 {
4948 tree t;
4949
4950 switch (GET_CODE (x))
4951 {
4952 case CONST_INT:
4953 {
4954 HOST_WIDE_INT hi = 0;
4955
4956 if (INTVAL (x) < 0
4957 && !(TYPE_UNSIGNED (type)
4958 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4959 < HOST_BITS_PER_WIDE_INT)))
4960 hi = -1;
4961
4962 t = build_int_cst_wide (type, INTVAL (x), hi);
4963
4964 return t;
4965 }
4966
4967 case CONST_DOUBLE:
4968 if (GET_MODE (x) == VOIDmode)
4969 t = build_int_cst_wide (type,
4970 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4971 else
4972 {
4973 REAL_VALUE_TYPE d;
4974
4975 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4976 t = build_real (type, d);
4977 }
4978
4979 return t;
4980
4981 case CONST_VECTOR:
4982 {
4983 int units = CONST_VECTOR_NUNITS (x);
4984 tree itype = TREE_TYPE (type);
4985 tree t = NULL_TREE;
4986 int i;
4987
4988
4989 /* Build a tree with vector elements. */
4990 for (i = units - 1; i >= 0; --i)
4991 {
4992 rtx elt = CONST_VECTOR_ELT (x, i);
4993 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4994 }
4995
4996 return build_vector (type, t);
4997 }
4998
4999 case PLUS:
5000 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5001 make_tree (type, XEXP (x, 1)));
5002
5003 case MINUS:
5004 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5005 make_tree (type, XEXP (x, 1)));
5006
5007 case NEG:
5008 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5009
5010 case MULT:
5011 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5012 make_tree (type, XEXP (x, 1)));
5013
5014 case ASHIFT:
5015 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5016 make_tree (type, XEXP (x, 1)));
5017
5018 case LSHIFTRT:
5019 t = unsigned_type_for (type);
5020 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5021 make_tree (t, XEXP (x, 0)),
5022 make_tree (type, XEXP (x, 1))));
5023
5024 case ASHIFTRT:
5025 t = signed_type_for (type);
5026 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5027 make_tree (t, XEXP (x, 0)),
5028 make_tree (type, XEXP (x, 1))));
5029
5030 case DIV:
5031 if (TREE_CODE (type) != REAL_TYPE)
5032 t = signed_type_for (type);
5033 else
5034 t = type;
5035
5036 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5037 make_tree (t, XEXP (x, 0)),
5038 make_tree (t, XEXP (x, 1))));
5039 case UDIV:
5040 t = unsigned_type_for (type);
5041 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5042 make_tree (t, XEXP (x, 0)),
5043 make_tree (t, XEXP (x, 1))));
5044
5045 case SIGN_EXTEND:
5046 case ZERO_EXTEND:
5047 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5048 GET_CODE (x) == ZERO_EXTEND);
5049 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5050
5051 case CONST:
5052 return make_tree (type, XEXP (x, 0));
5053
5054 case SYMBOL_REF:
5055 t = SYMBOL_REF_DECL (x);
5056 if (t)
5057 return fold_convert (type, build_fold_addr_expr (t));
5058 /* else fall through. */
5059
5060 default:
5061 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5062
5063 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5064 address mode to pointer mode. */
5065 if (POINTER_TYPE_P (type))
5066 x = convert_memory_address_addr_space
5067 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5068
5069 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5070 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5071 t->decl_with_rtl.rtl = x;
5072
5073 return t;
5074 }
5075 }
5076 \f
5077 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5078 and returning TARGET.
5079
5080 If TARGET is 0, a pseudo-register or constant is returned. */
5081
5082 rtx
5083 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5084 {
5085 rtx tem = 0;
5086
5087 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5088 tem = simplify_binary_operation (AND, mode, op0, op1);
5089 if (tem == 0)
5090 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5091
5092 if (target == 0)
5093 target = tem;
5094 else if (tem != target)
5095 emit_move_insn (target, tem);
5096 return target;
5097 }
5098
5099 /* Helper function for emit_store_flag. */
5100 static rtx
5101 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5102 enum machine_mode mode, enum machine_mode compare_mode,
5103 int unsignedp, rtx x, rtx y, int normalizep,
5104 enum machine_mode target_mode)
5105 {
5106 rtx op0, last, comparison, subtarget, pattern;
5107 enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
5108
5109 last = get_last_insn ();
5110 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5111 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5112 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5113 if (!x || !y
5114 || !insn_data[icode].operand[2].predicate
5115 (x, insn_data[icode].operand[2].mode)
5116 || !insn_data[icode].operand[3].predicate
5117 (y, insn_data[icode].operand[3].mode)
5118 || !insn_data[icode].operand[1].predicate (comparison, VOIDmode))
5119 {
5120 delete_insns_since (last);
5121 return NULL_RTX;
5122 }
5123
5124 if (target_mode == VOIDmode)
5125 target_mode = result_mode;
5126 if (!target)
5127 target = gen_reg_rtx (target_mode);
5128
5129 if (optimize
5130 || !(insn_data[(int) icode].operand[0].predicate (target, result_mode)))
5131 subtarget = gen_reg_rtx (result_mode);
5132 else
5133 subtarget = target;
5134
5135 pattern = GEN_FCN (icode) (subtarget, comparison, x, y);
5136 if (!pattern)
5137 return NULL_RTX;
5138 emit_insn (pattern);
5139
5140 /* If we are converting to a wider mode, first convert to
5141 TARGET_MODE, then normalize. This produces better combining
5142 opportunities on machines that have a SIGN_EXTRACT when we are
5143 testing a single bit. This mostly benefits the 68k.
5144
5145 If STORE_FLAG_VALUE does not have the sign bit set when
5146 interpreted in MODE, we can do this conversion as unsigned, which
5147 is usually more efficient. */
5148 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5149 {
5150 convert_move (target, subtarget,
5151 (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT)
5152 && 0 == (STORE_FLAG_VALUE
5153 & ((HOST_WIDE_INT) 1
5154 << (GET_MODE_BITSIZE (result_mode) -1))));
5155 op0 = target;
5156 result_mode = target_mode;
5157 }
5158 else
5159 op0 = subtarget;
5160
5161 /* If we want to keep subexpressions around, don't reuse our last
5162 target. */
5163 if (optimize)
5164 subtarget = 0;
5165
5166 /* Now normalize to the proper value in MODE. Sometimes we don't
5167 have to do anything. */
5168 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5169 ;
5170 /* STORE_FLAG_VALUE might be the most negative number, so write
5171 the comparison this way to avoid a compiler-time warning. */
5172 else if (- normalizep == STORE_FLAG_VALUE)
5173 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5174
5175 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5176 it hard to use a value of just the sign bit due to ANSI integer
5177 constant typing rules. */
5178 else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
5179 && (STORE_FLAG_VALUE
5180 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
5181 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5182 size_int (GET_MODE_BITSIZE (result_mode) - 1), subtarget,
5183 normalizep == 1);
5184 else
5185 {
5186 gcc_assert (STORE_FLAG_VALUE & 1);
5187
5188 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5189 if (normalizep == -1)
5190 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5191 }
5192
5193 /* If we were converting to a smaller mode, do the conversion now. */
5194 if (target_mode != result_mode)
5195 {
5196 convert_move (target, op0, 0);
5197 return target;
5198 }
5199 else
5200 return op0;
5201 }
5202
5203
5204 /* A subroutine of emit_store_flag only including "tricks" that do not
5205 need a recursive call. These are kept separate to avoid infinite
5206 loops. */
5207
5208 static rtx
5209 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5210 enum machine_mode mode, int unsignedp, int normalizep,
5211 enum machine_mode target_mode)
5212 {
5213 rtx subtarget;
5214 enum insn_code icode;
5215 enum machine_mode compare_mode;
5216 enum mode_class mclass;
5217 enum rtx_code scode;
5218 rtx tem;
5219
5220 if (unsignedp)
5221 code = unsigned_condition (code);
5222 scode = swap_condition (code);
5223
5224 /* If one operand is constant, make it the second one. Only do this
5225 if the other operand is not constant as well. */
5226
5227 if (swap_commutative_operands_p (op0, op1))
5228 {
5229 tem = op0;
5230 op0 = op1;
5231 op1 = tem;
5232 code = swap_condition (code);
5233 }
5234
5235 if (mode == VOIDmode)
5236 mode = GET_MODE (op0);
5237
5238 /* For some comparisons with 1 and -1, we can convert this to
5239 comparisons with zero. This will often produce more opportunities for
5240 store-flag insns. */
5241
5242 switch (code)
5243 {
5244 case LT:
5245 if (op1 == const1_rtx)
5246 op1 = const0_rtx, code = LE;
5247 break;
5248 case LE:
5249 if (op1 == constm1_rtx)
5250 op1 = const0_rtx, code = LT;
5251 break;
5252 case GE:
5253 if (op1 == const1_rtx)
5254 op1 = const0_rtx, code = GT;
5255 break;
5256 case GT:
5257 if (op1 == constm1_rtx)
5258 op1 = const0_rtx, code = GE;
5259 break;
5260 case GEU:
5261 if (op1 == const1_rtx)
5262 op1 = const0_rtx, code = NE;
5263 break;
5264 case LTU:
5265 if (op1 == const1_rtx)
5266 op1 = const0_rtx, code = EQ;
5267 break;
5268 default:
5269 break;
5270 }
5271
5272 /* If we are comparing a double-word integer with zero or -1, we can
5273 convert the comparison into one involving a single word. */
5274 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5275 && GET_MODE_CLASS (mode) == MODE_INT
5276 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5277 {
5278 if ((code == EQ || code == NE)
5279 && (op1 == const0_rtx || op1 == constm1_rtx))
5280 {
5281 rtx op00, op01;
5282
5283 /* Do a logical OR or AND of the two words and compare the
5284 result. */
5285 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5286 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5287 tem = expand_binop (word_mode,
5288 op1 == const0_rtx ? ior_optab : and_optab,
5289 op00, op01, NULL_RTX, unsignedp,
5290 OPTAB_DIRECT);
5291
5292 if (tem != 0)
5293 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5294 unsignedp, normalizep);
5295 }
5296 else if ((code == LT || code == GE) && op1 == const0_rtx)
5297 {
5298 rtx op0h;
5299
5300 /* If testing the sign bit, can just test on high word. */
5301 op0h = simplify_gen_subreg (word_mode, op0, mode,
5302 subreg_highpart_offset (word_mode,
5303 mode));
5304 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5305 unsignedp, normalizep);
5306 }
5307 else
5308 tem = NULL_RTX;
5309
5310 if (tem)
5311 {
5312 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5313 return tem;
5314 if (!target)
5315 target = gen_reg_rtx (target_mode);
5316
5317 convert_move (target, tem,
5318 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE)
5319 & ((HOST_WIDE_INT) 1
5320 << (GET_MODE_BITSIZE (word_mode) -1))));
5321 return target;
5322 }
5323 }
5324
5325 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5326 complement of A (for GE) and shifting the sign bit to the low bit. */
5327 if (op1 == const0_rtx && (code == LT || code == GE)
5328 && GET_MODE_CLASS (mode) == MODE_INT
5329 && (normalizep || STORE_FLAG_VALUE == 1
5330 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5331 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5332 == ((unsigned HOST_WIDE_INT) 1
5333 << (GET_MODE_BITSIZE (mode) - 1))))))
5334 {
5335 subtarget = target;
5336
5337 if (!target)
5338 target_mode = mode;
5339
5340 /* If the result is to be wider than OP0, it is best to convert it
5341 first. If it is to be narrower, it is *incorrect* to convert it
5342 first. */
5343 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5344 {
5345 op0 = convert_modes (target_mode, mode, op0, 0);
5346 mode = target_mode;
5347 }
5348
5349 if (target_mode != mode)
5350 subtarget = 0;
5351
5352 if (code == GE)
5353 op0 = expand_unop (mode, one_cmpl_optab, op0,
5354 ((STORE_FLAG_VALUE == 1 || normalizep)
5355 ? 0 : subtarget), 0);
5356
5357 if (STORE_FLAG_VALUE == 1 || normalizep)
5358 /* If we are supposed to produce a 0/1 value, we want to do
5359 a logical shift from the sign bit to the low-order bit; for
5360 a -1/0 value, we do an arithmetic shift. */
5361 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5362 size_int (GET_MODE_BITSIZE (mode) - 1),
5363 subtarget, normalizep != -1);
5364
5365 if (mode != target_mode)
5366 op0 = convert_modes (target_mode, mode, op0, 0);
5367
5368 return op0;
5369 }
5370
5371 mclass = GET_MODE_CLASS (mode);
5372 for (compare_mode = mode; compare_mode != VOIDmode;
5373 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5374 {
5375 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5376 icode = optab_handler (cstore_optab, optab_mode);
5377 if (icode != CODE_FOR_nothing)
5378 {
5379 do_pending_stack_adjust ();
5380 tem = emit_cstore (target, icode, code, mode, compare_mode,
5381 unsignedp, op0, op1, normalizep, target_mode);
5382 if (tem)
5383 return tem;
5384
5385 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5386 {
5387 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5388 unsignedp, op1, op0, normalizep, target_mode);
5389 if (tem)
5390 return tem;
5391 }
5392 break;
5393 }
5394 }
5395
5396 return 0;
5397 }
5398
5399 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5400 and storing in TARGET. Normally return TARGET.
5401 Return 0 if that cannot be done.
5402
5403 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5404 it is VOIDmode, they cannot both be CONST_INT.
5405
5406 UNSIGNEDP is for the case where we have to widen the operands
5407 to perform the operation. It says to use zero-extension.
5408
5409 NORMALIZEP is 1 if we should convert the result to be either zero
5410 or one. Normalize is -1 if we should convert the result to be
5411 either zero or -1. If NORMALIZEP is zero, the result will be left
5412 "raw" out of the scc insn. */
5413
5414 rtx
5415 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5416 enum machine_mode mode, int unsignedp, int normalizep)
5417 {
5418 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5419 enum rtx_code rcode;
5420 rtx subtarget;
5421 rtx tem, last, trueval;
5422
5423 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5424 target_mode);
5425 if (tem)
5426 return tem;
5427
5428 /* If we reached here, we can't do this with a scc insn, however there
5429 are some comparisons that can be done in other ways. Don't do any
5430 of these cases if branches are very cheap. */
5431 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5432 return 0;
5433
5434 /* See what we need to return. We can only return a 1, -1, or the
5435 sign bit. */
5436
5437 if (normalizep == 0)
5438 {
5439 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5440 normalizep = STORE_FLAG_VALUE;
5441
5442 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5443 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5444 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5445 ;
5446 else
5447 return 0;
5448 }
5449
5450 last = get_last_insn ();
5451
5452 /* If optimizing, use different pseudo registers for each insn, instead
5453 of reusing the same pseudo. This leads to better CSE, but slows
5454 down the compiler, since there are more pseudos */
5455 subtarget = (!optimize
5456 && (target_mode == mode)) ? target : NULL_RTX;
5457 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5458
5459 /* For floating-point comparisons, try the reverse comparison or try
5460 changing the "orderedness" of the comparison. */
5461 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5462 {
5463 enum rtx_code first_code;
5464 bool and_them;
5465
5466 rcode = reverse_condition_maybe_unordered (code);
5467 if (can_compare_p (rcode, mode, ccp_store_flag)
5468 && (code == ORDERED || code == UNORDERED
5469 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5470 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5471 {
5472 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5473 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5474
5475 /* For the reverse comparison, use either an addition or a XOR. */
5476 if (want_add
5477 && rtx_cost (GEN_INT (normalizep), PLUS,
5478 optimize_insn_for_speed_p ()) == 0)
5479 {
5480 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5481 STORE_FLAG_VALUE, target_mode);
5482 if (tem)
5483 return expand_binop (target_mode, add_optab, tem,
5484 GEN_INT (normalizep),
5485 target, 0, OPTAB_WIDEN);
5486 }
5487 else if (!want_add
5488 && rtx_cost (trueval, XOR,
5489 optimize_insn_for_speed_p ()) == 0)
5490 {
5491 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5492 normalizep, target_mode);
5493 if (tem)
5494 return expand_binop (target_mode, xor_optab, tem, trueval,
5495 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5496 }
5497 }
5498
5499 delete_insns_since (last);
5500
5501 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5502 if (code == ORDERED || code == UNORDERED)
5503 return 0;
5504
5505 and_them = split_comparison (code, mode, &first_code, &code);
5506
5507 /* If there are no NaNs, the first comparison should always fall through.
5508 Effectively change the comparison to the other one. */
5509 if (!HONOR_NANS (mode))
5510 {
5511 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5512 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5513 target_mode);
5514 }
5515
5516 #ifdef HAVE_conditional_move
5517 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5518 conditional move. */
5519 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5520 normalizep, target_mode);
5521 if (tem == 0)
5522 return 0;
5523
5524 if (and_them)
5525 tem = emit_conditional_move (target, code, op0, op1, mode,
5526 tem, const0_rtx, GET_MODE (tem), 0);
5527 else
5528 tem = emit_conditional_move (target, code, op0, op1, mode,
5529 trueval, tem, GET_MODE (tem), 0);
5530
5531 if (tem == 0)
5532 delete_insns_since (last);
5533 return tem;
5534 #else
5535 return 0;
5536 #endif
5537 }
5538
5539 /* The remaining tricks only apply to integer comparisons. */
5540
5541 if (GET_MODE_CLASS (mode) != MODE_INT)
5542 return 0;
5543
5544 /* If this is an equality comparison of integers, we can try to exclusive-or
5545 (or subtract) the two operands and use a recursive call to try the
5546 comparison with zero. Don't do any of these cases if branches are
5547 very cheap. */
5548
5549 if ((code == EQ || code == NE) && op1 != const0_rtx)
5550 {
5551 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5552 OPTAB_WIDEN);
5553
5554 if (tem == 0)
5555 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5556 OPTAB_WIDEN);
5557 if (tem != 0)
5558 tem = emit_store_flag (target, code, tem, const0_rtx,
5559 mode, unsignedp, normalizep);
5560 if (tem != 0)
5561 return tem;
5562
5563 delete_insns_since (last);
5564 }
5565
5566 /* For integer comparisons, try the reverse comparison. However, for
5567 small X and if we'd have anyway to extend, implementing "X != 0"
5568 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5569 rcode = reverse_condition (code);
5570 if (can_compare_p (rcode, mode, ccp_store_flag)
5571 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5572 && code == NE
5573 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5574 && op1 == const0_rtx))
5575 {
5576 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5577 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5578
5579 /* Again, for the reverse comparison, use either an addition or a XOR. */
5580 if (want_add
5581 && rtx_cost (GEN_INT (normalizep), PLUS,
5582 optimize_insn_for_speed_p ()) == 0)
5583 {
5584 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5585 STORE_FLAG_VALUE, target_mode);
5586 if (tem != 0)
5587 tem = expand_binop (target_mode, add_optab, tem,
5588 GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
5589 }
5590 else if (!want_add
5591 && rtx_cost (trueval, XOR,
5592 optimize_insn_for_speed_p ()) == 0)
5593 {
5594 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5595 normalizep, target_mode);
5596 if (tem != 0)
5597 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5598 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5599 }
5600
5601 if (tem != 0)
5602 return tem;
5603 delete_insns_since (last);
5604 }
5605
5606 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5607 the constant zero. Reject all other comparisons at this point. Only
5608 do LE and GT if branches are expensive since they are expensive on
5609 2-operand machines. */
5610
5611 if (op1 != const0_rtx
5612 || (code != EQ && code != NE
5613 && (BRANCH_COST (optimize_insn_for_speed_p (),
5614 false) <= 1 || (code != LE && code != GT))))
5615 return 0;
5616
5617 /* Try to put the result of the comparison in the sign bit. Assume we can't
5618 do the necessary operation below. */
5619
5620 tem = 0;
5621
5622 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5623 the sign bit set. */
5624
5625 if (code == LE)
5626 {
5627 /* This is destructive, so SUBTARGET can't be OP0. */
5628 if (rtx_equal_p (subtarget, op0))
5629 subtarget = 0;
5630
5631 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5632 OPTAB_WIDEN);
5633 if (tem)
5634 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5635 OPTAB_WIDEN);
5636 }
5637
5638 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5639 number of bits in the mode of OP0, minus one. */
5640
5641 if (code == GT)
5642 {
5643 if (rtx_equal_p (subtarget, op0))
5644 subtarget = 0;
5645
5646 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5647 size_int (GET_MODE_BITSIZE (mode) - 1),
5648 subtarget, 0);
5649 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5650 OPTAB_WIDEN);
5651 }
5652
5653 if (code == EQ || code == NE)
5654 {
5655 /* For EQ or NE, one way to do the comparison is to apply an operation
5656 that converts the operand into a positive number if it is nonzero
5657 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5658 for NE we negate. This puts the result in the sign bit. Then we
5659 normalize with a shift, if needed.
5660
5661 Two operations that can do the above actions are ABS and FFS, so try
5662 them. If that doesn't work, and MODE is smaller than a full word,
5663 we can use zero-extension to the wider mode (an unsigned conversion)
5664 as the operation. */
5665
5666 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5667 that is compensated by the subsequent overflow when subtracting
5668 one / negating. */
5669
5670 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5671 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5672 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5673 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5674 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5675 {
5676 tem = convert_modes (word_mode, mode, op0, 1);
5677 mode = word_mode;
5678 }
5679
5680 if (tem != 0)
5681 {
5682 if (code == EQ)
5683 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5684 0, OPTAB_WIDEN);
5685 else
5686 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5687 }
5688
5689 /* If we couldn't do it that way, for NE we can "or" the two's complement
5690 of the value with itself. For EQ, we take the one's complement of
5691 that "or", which is an extra insn, so we only handle EQ if branches
5692 are expensive. */
5693
5694 if (tem == 0
5695 && (code == NE
5696 || BRANCH_COST (optimize_insn_for_speed_p (),
5697 false) > 1))
5698 {
5699 if (rtx_equal_p (subtarget, op0))
5700 subtarget = 0;
5701
5702 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5703 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5704 OPTAB_WIDEN);
5705
5706 if (tem && code == EQ)
5707 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5708 }
5709 }
5710
5711 if (tem && normalizep)
5712 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5713 size_int (GET_MODE_BITSIZE (mode) - 1),
5714 subtarget, normalizep == 1);
5715
5716 if (tem)
5717 {
5718 if (!target)
5719 ;
5720 else if (GET_MODE (tem) != target_mode)
5721 {
5722 convert_move (target, tem, 0);
5723 tem = target;
5724 }
5725 else if (!subtarget)
5726 {
5727 emit_move_insn (target, tem);
5728 tem = target;
5729 }
5730 }
5731 else
5732 delete_insns_since (last);
5733
5734 return tem;
5735 }
5736
5737 /* Like emit_store_flag, but always succeeds. */
5738
5739 rtx
5740 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5741 enum machine_mode mode, int unsignedp, int normalizep)
5742 {
5743 rtx tem, label;
5744 rtx trueval, falseval;
5745
5746 /* First see if emit_store_flag can do the job. */
5747 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5748 if (tem != 0)
5749 return tem;
5750
5751 if (!target)
5752 target = gen_reg_rtx (word_mode);
5753
5754 /* If this failed, we have to do this with set/compare/jump/set code.
5755 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5756 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5757 if (code == NE
5758 && GET_MODE_CLASS (mode) == MODE_INT
5759 && REG_P (target)
5760 && op0 == target
5761 && op1 == const0_rtx)
5762 {
5763 label = gen_label_rtx ();
5764 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5765 mode, NULL_RTX, NULL_RTX, label, -1);
5766 emit_move_insn (target, trueval);
5767 emit_label (label);
5768 return target;
5769 }
5770
5771 if (!REG_P (target)
5772 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5773 target = gen_reg_rtx (GET_MODE (target));
5774
5775 /* Jump in the right direction if the target cannot implement CODE
5776 but can jump on its reverse condition. */
5777 falseval = const0_rtx;
5778 if (! can_compare_p (code, mode, ccp_jump)
5779 && (! FLOAT_MODE_P (mode)
5780 || code == ORDERED || code == UNORDERED
5781 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5782 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5783 {
5784 enum rtx_code rcode;
5785 if (FLOAT_MODE_P (mode))
5786 rcode = reverse_condition_maybe_unordered (code);
5787 else
5788 rcode = reverse_condition (code);
5789
5790 /* Canonicalize to UNORDERED for the libcall. */
5791 if (can_compare_p (rcode, mode, ccp_jump)
5792 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5793 {
5794 falseval = trueval;
5795 trueval = const0_rtx;
5796 code = rcode;
5797 }
5798 }
5799
5800 emit_move_insn (target, trueval);
5801 label = gen_label_rtx ();
5802 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5803 NULL_RTX, label, -1);
5804
5805 emit_move_insn (target, falseval);
5806 emit_label (label);
5807
5808 return target;
5809 }
5810 \f
5811 /* Perform possibly multi-word comparison and conditional jump to LABEL
5812 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5813 now a thin wrapper around do_compare_rtx_and_jump. */
5814
5815 static void
5816 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5817 rtx label)
5818 {
5819 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5820 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5821 NULL_RTX, NULL_RTX, label, -1);
5822 }