re PR middle-end/48908 (build fails on cris-elf in libiberty:md5.c, shift-related)
[gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 2011
6 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "recog.h"
38 #include "langhooks.h"
39 #include "df.h"
40 #include "target.h"
41 #include "expmed.h"
42
43 struct target_expmed default_target_expmed;
44 #if SWITCHABLE_TARGET
45 struct target_expmed *this_target_expmed = &default_target_expmed;
46 #endif
47
48 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx);
51 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, rtx);
53 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
54 unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT,
56 unsigned HOST_WIDE_INT, rtx, int, bool);
57 static rtx mask_rtx (enum machine_mode, int, int, int);
58 static rtx lshift_value (enum machine_mode, rtx, int, int);
59 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
60 unsigned HOST_WIDE_INT, int);
61 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
62 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
63 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
64
65 /* Test whether a value is zero of a power of two. */
66 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
67
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 #endif
71
72
73 /* Reduce conditional compilation elsewhere. */
74 #ifndef HAVE_insv
75 #define HAVE_insv 0
76 #define CODE_FOR_insv CODE_FOR_nothing
77 #define gen_insv(a,b,c,d) NULL_RTX
78 #endif
79 #ifndef HAVE_extv
80 #define HAVE_extv 0
81 #define CODE_FOR_extv CODE_FOR_nothing
82 #define gen_extv(a,b,c,d) NULL_RTX
83 #endif
84 #ifndef HAVE_extzv
85 #define HAVE_extzv 0
86 #define CODE_FOR_extzv CODE_FOR_nothing
87 #define gen_extzv(a,b,c,d) NULL_RTX
88 #endif
89
90 void
91 init_expmed (void)
92 {
93 struct
94 {
95 struct rtx_def reg; rtunion reg_fld[2];
96 struct rtx_def plus; rtunion plus_fld1;
97 struct rtx_def neg;
98 struct rtx_def mult; rtunion mult_fld1;
99 struct rtx_def sdiv; rtunion sdiv_fld1;
100 struct rtx_def udiv; rtunion udiv_fld1;
101 struct rtx_def zext;
102 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
103 struct rtx_def smod_32; rtunion smod_32_fld1;
104 struct rtx_def wide_mult; rtunion wide_mult_fld1;
105 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
106 struct rtx_def wide_trunc;
107 struct rtx_def shift; rtunion shift_fld1;
108 struct rtx_def shift_mult; rtunion shift_mult_fld1;
109 struct rtx_def shift_add; rtunion shift_add_fld1;
110 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
111 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
112 } all;
113
114 rtx pow2[MAX_BITS_PER_WORD];
115 rtx cint[MAX_BITS_PER_WORD];
116 int m, n;
117 enum machine_mode mode, wider_mode;
118 int speed;
119
120
121 for (m = 1; m < MAX_BITS_PER_WORD; m++)
122 {
123 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
124 cint[m] = GEN_INT (m);
125 }
126 memset (&all, 0, sizeof all);
127
128 PUT_CODE (&all.reg, REG);
129 /* Avoid using hard regs in ways which may be unsupported. */
130 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
131
132 PUT_CODE (&all.plus, PLUS);
133 XEXP (&all.plus, 0) = &all.reg;
134 XEXP (&all.plus, 1) = &all.reg;
135
136 PUT_CODE (&all.neg, NEG);
137 XEXP (&all.neg, 0) = &all.reg;
138
139 PUT_CODE (&all.mult, MULT);
140 XEXP (&all.mult, 0) = &all.reg;
141 XEXP (&all.mult, 1) = &all.reg;
142
143 PUT_CODE (&all.sdiv, DIV);
144 XEXP (&all.sdiv, 0) = &all.reg;
145 XEXP (&all.sdiv, 1) = &all.reg;
146
147 PUT_CODE (&all.udiv, UDIV);
148 XEXP (&all.udiv, 0) = &all.reg;
149 XEXP (&all.udiv, 1) = &all.reg;
150
151 PUT_CODE (&all.sdiv_32, DIV);
152 XEXP (&all.sdiv_32, 0) = &all.reg;
153 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
154
155 PUT_CODE (&all.smod_32, MOD);
156 XEXP (&all.smod_32, 0) = &all.reg;
157 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
158
159 PUT_CODE (&all.zext, ZERO_EXTEND);
160 XEXP (&all.zext, 0) = &all.reg;
161
162 PUT_CODE (&all.wide_mult, MULT);
163 XEXP (&all.wide_mult, 0) = &all.zext;
164 XEXP (&all.wide_mult, 1) = &all.zext;
165
166 PUT_CODE (&all.wide_lshr, LSHIFTRT);
167 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
168
169 PUT_CODE (&all.wide_trunc, TRUNCATE);
170 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
171
172 PUT_CODE (&all.shift, ASHIFT);
173 XEXP (&all.shift, 0) = &all.reg;
174
175 PUT_CODE (&all.shift_mult, MULT);
176 XEXP (&all.shift_mult, 0) = &all.reg;
177
178 PUT_CODE (&all.shift_add, PLUS);
179 XEXP (&all.shift_add, 0) = &all.shift_mult;
180 XEXP (&all.shift_add, 1) = &all.reg;
181
182 PUT_CODE (&all.shift_sub0, MINUS);
183 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
184 XEXP (&all.shift_sub0, 1) = &all.reg;
185
186 PUT_CODE (&all.shift_sub1, MINUS);
187 XEXP (&all.shift_sub1, 0) = &all.reg;
188 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
189
190 for (speed = 0; speed < 2; speed++)
191 {
192 crtl->maybe_hot_insn_p = speed;
193 zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
194
195 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
196 mode != VOIDmode;
197 mode = GET_MODE_WIDER_MODE (mode))
198 {
199 PUT_MODE (&all.reg, mode);
200 PUT_MODE (&all.plus, mode);
201 PUT_MODE (&all.neg, mode);
202 PUT_MODE (&all.mult, mode);
203 PUT_MODE (&all.sdiv, mode);
204 PUT_MODE (&all.udiv, mode);
205 PUT_MODE (&all.sdiv_32, mode);
206 PUT_MODE (&all.smod_32, mode);
207 PUT_MODE (&all.wide_trunc, mode);
208 PUT_MODE (&all.shift, mode);
209 PUT_MODE (&all.shift_mult, mode);
210 PUT_MODE (&all.shift_add, mode);
211 PUT_MODE (&all.shift_sub0, mode);
212 PUT_MODE (&all.shift_sub1, mode);
213
214 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
215 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
216 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
217 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
218 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
219
220 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
221 <= 2 * add_cost[speed][mode]);
222 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
223 <= 4 * add_cost[speed][mode]);
224
225 wider_mode = GET_MODE_WIDER_MODE (mode);
226 if (wider_mode != VOIDmode)
227 {
228 PUT_MODE (&all.zext, wider_mode);
229 PUT_MODE (&all.wide_mult, wider_mode);
230 PUT_MODE (&all.wide_lshr, wider_mode);
231 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
232
233 mul_widen_cost[speed][wider_mode]
234 = rtx_cost (&all.wide_mult, SET, speed);
235 mul_highpart_cost[speed][mode]
236 = rtx_cost (&all.wide_trunc, SET, speed);
237 }
238
239 shift_cost[speed][mode][0] = 0;
240 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
241 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
242
243 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
244 for (m = 1; m < n; m++)
245 {
246 XEXP (&all.shift, 1) = cint[m];
247 XEXP (&all.shift_mult, 1) = pow2[m];
248
249 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
250 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
251 shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
252 shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
253 }
254 }
255 }
256 if (alg_hash_used_p)
257 memset (alg_hash, 0, sizeof (alg_hash));
258 else
259 alg_hash_used_p = true;
260 default_rtl_profile ();
261 }
262
263 /* Return an rtx representing minus the value of X.
264 MODE is the intended mode of the result,
265 useful if X is a CONST_INT. */
266
267 rtx
268 negate_rtx (enum machine_mode mode, rtx x)
269 {
270 rtx result = simplify_unary_operation (NEG, mode, x, mode);
271
272 if (result == 0)
273 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
274
275 return result;
276 }
277
278 /* Report on the availability of insv/extv/extzv and the desired mode
279 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
280 is false; else the mode of the specified operand. If OPNO is -1,
281 all the caller cares about is whether the insn is available. */
282 enum machine_mode
283 mode_for_extraction (enum extraction_pattern pattern, int opno)
284 {
285 const struct insn_data_d *data;
286
287 switch (pattern)
288 {
289 case EP_insv:
290 if (HAVE_insv)
291 {
292 data = &insn_data[CODE_FOR_insv];
293 break;
294 }
295 return MAX_MACHINE_MODE;
296
297 case EP_extv:
298 if (HAVE_extv)
299 {
300 data = &insn_data[CODE_FOR_extv];
301 break;
302 }
303 return MAX_MACHINE_MODE;
304
305 case EP_extzv:
306 if (HAVE_extzv)
307 {
308 data = &insn_data[CODE_FOR_extzv];
309 break;
310 }
311 return MAX_MACHINE_MODE;
312
313 default:
314 gcc_unreachable ();
315 }
316
317 if (opno == -1)
318 return VOIDmode;
319
320 /* Everyone who uses this function used to follow it with
321 if (result == VOIDmode) result = word_mode; */
322 if (data->operand[opno].mode == VOIDmode)
323 return word_mode;
324 return data->operand[opno].mode;
325 }
326 \f
327 /* A subroutine of store_bit_field, with the same arguments. Return true
328 if the operation could be implemented.
329
330 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
331 no other way of implementing the operation. If FALLBACK_P is false,
332 return false instead. */
333
334 static bool
335 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
336 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
337 rtx value, bool fallback_p)
338 {
339 unsigned int unit
340 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
341 unsigned HOST_WIDE_INT offset, bitpos;
342 rtx op0 = str_rtx;
343 int byte_offset;
344 rtx orig_value;
345
346 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
347
348 while (GET_CODE (op0) == SUBREG)
349 {
350 /* The following line once was done only if WORDS_BIG_ENDIAN,
351 but I think that is a mistake. WORDS_BIG_ENDIAN is
352 meaningful at a much higher level; when structures are copied
353 between memory and regs, the higher-numbered regs
354 always get higher addresses. */
355 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
356 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
357
358 byte_offset = 0;
359
360 /* Paradoxical subregs need special handling on big endian machines. */
361 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
362 {
363 int difference = inner_mode_size - outer_mode_size;
364
365 if (WORDS_BIG_ENDIAN)
366 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
367 if (BYTES_BIG_ENDIAN)
368 byte_offset += difference % UNITS_PER_WORD;
369 }
370 else
371 byte_offset = SUBREG_BYTE (op0);
372
373 bitnum += byte_offset * BITS_PER_UNIT;
374 op0 = SUBREG_REG (op0);
375 }
376
377 /* No action is needed if the target is a register and if the field
378 lies completely outside that register. This can occur if the source
379 code contains an out-of-bounds access to a small array. */
380 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
381 return true;
382
383 /* Use vec_set patterns for inserting parts of vectors whenever
384 available. */
385 if (VECTOR_MODE_P (GET_MODE (op0))
386 && !MEM_P (op0)
387 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
388 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
389 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
390 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
391 {
392 struct expand_operand ops[3];
393 enum machine_mode outermode = GET_MODE (op0);
394 enum machine_mode innermode = GET_MODE_INNER (outermode);
395 enum insn_code icode = optab_handler (vec_set_optab, outermode);
396 int pos = bitnum / GET_MODE_BITSIZE (innermode);
397
398 create_fixed_operand (&ops[0], op0);
399 create_input_operand (&ops[1], value, innermode);
400 create_integer_operand (&ops[2], pos);
401 if (maybe_expand_insn (icode, 3, ops))
402 return true;
403 }
404
405 /* If the target is a register, overwriting the entire object, or storing
406 a full-word or multi-word field can be done with just a SUBREG.
407
408 If the target is memory, storing any naturally aligned field can be
409 done with a simple store. For targets that support fast unaligned
410 memory, any naturally sized, unit aligned field can be done directly. */
411
412 offset = bitnum / unit;
413 bitpos = bitnum % unit;
414 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
415 + (offset * UNITS_PER_WORD);
416
417 if (bitpos == 0
418 && bitsize == GET_MODE_BITSIZE (fieldmode)
419 && (!MEM_P (op0)
420 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
421 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
422 && ((GET_MODE (op0) == fieldmode && byte_offset == 0)
423 || validate_subreg (fieldmode, GET_MODE (op0), op0,
424 byte_offset)))
425 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
426 || (offset * BITS_PER_UNIT % bitsize == 0
427 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
428 {
429 if (MEM_P (op0))
430 op0 = adjust_address (op0, fieldmode, offset);
431 else if (GET_MODE (op0) != fieldmode)
432 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
433 byte_offset);
434 emit_move_insn (op0, value);
435 return true;
436 }
437
438 /* Make sure we are playing with integral modes. Pun with subregs
439 if we aren't. This must come after the entire register case above,
440 since that case is valid for any mode. The following cases are only
441 valid for integral modes. */
442 {
443 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
444 if (imode != GET_MODE (op0))
445 {
446 if (MEM_P (op0))
447 op0 = adjust_address (op0, imode, 0);
448 else
449 {
450 gcc_assert (imode != BLKmode);
451 op0 = gen_lowpart (imode, op0);
452 }
453 }
454 }
455
456 /* We may be accessing data outside the field, which means
457 we can alias adjacent data. */
458 if (MEM_P (op0))
459 {
460 op0 = shallow_copy_rtx (op0);
461 set_mem_alias_set (op0, 0);
462 set_mem_expr (op0, 0);
463 }
464
465 /* If OP0 is a register, BITPOS must count within a word.
466 But as we have it, it counts within whatever size OP0 now has.
467 On a bigendian machine, these are not the same, so convert. */
468 if (BYTES_BIG_ENDIAN
469 && !MEM_P (op0)
470 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
471 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
472
473 /* Storing an lsb-aligned field in a register
474 can be done with a movestrict instruction. */
475
476 if (!MEM_P (op0)
477 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
478 && bitsize == GET_MODE_BITSIZE (fieldmode)
479 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
480 {
481 struct expand_operand ops[2];
482 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
483 rtx arg0 = op0;
484 unsigned HOST_WIDE_INT subreg_off;
485
486 if (GET_CODE (arg0) == SUBREG)
487 {
488 /* Else we've got some float mode source being extracted into
489 a different float mode destination -- this combination of
490 subregs results in Severe Tire Damage. */
491 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
492 || GET_MODE_CLASS (fieldmode) == MODE_INT
493 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
494 arg0 = SUBREG_REG (arg0);
495 }
496
497 subreg_off = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
498 + (offset * UNITS_PER_WORD);
499 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
500 {
501 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
502
503 create_fixed_operand (&ops[0], arg0);
504 /* Shrink the source operand to FIELDMODE. */
505 create_convert_operand_to (&ops[1], value, fieldmode, false);
506 if (maybe_expand_insn (icode, 2, ops))
507 return true;
508 }
509 }
510
511 /* Handle fields bigger than a word. */
512
513 if (bitsize > BITS_PER_WORD)
514 {
515 /* Here we transfer the words of the field
516 in the order least significant first.
517 This is because the most significant word is the one which may
518 be less than full.
519 However, only do that if the value is not BLKmode. */
520
521 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
522 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
523 unsigned int i;
524 rtx last;
525
526 /* This is the mode we must force value to, so that there will be enough
527 subwords to extract. Note that fieldmode will often (always?) be
528 VOIDmode, because that is what store_field uses to indicate that this
529 is a bit field, but passing VOIDmode to operand_subword_force
530 is not allowed. */
531 fieldmode = GET_MODE (value);
532 if (fieldmode == VOIDmode)
533 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
534
535 last = get_last_insn ();
536 for (i = 0; i < nwords; i++)
537 {
538 /* If I is 0, use the low-order word in both field and target;
539 if I is 1, use the next to lowest word; and so on. */
540 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
541 unsigned int bit_offset = (backwards
542 ? MAX ((int) bitsize - ((int) i + 1)
543 * BITS_PER_WORD,
544 0)
545 : (int) i * BITS_PER_WORD);
546 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
547
548 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
549 bitsize - i * BITS_PER_WORD),
550 bitnum + bit_offset, word_mode,
551 value_word, fallback_p))
552 {
553 delete_insns_since (last);
554 return false;
555 }
556 }
557 return true;
558 }
559
560 /* From here on we can assume that the field to be stored in is
561 a full-word (whatever type that is), since it is shorter than a word. */
562
563 /* OFFSET is the number of words or bytes (UNIT says which)
564 from STR_RTX to the first word or byte containing part of the field. */
565
566 if (!MEM_P (op0))
567 {
568 if (offset != 0
569 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
570 {
571 if (!REG_P (op0))
572 {
573 /* Since this is a destination (lvalue), we can't copy
574 it to a pseudo. We can remove a SUBREG that does not
575 change the size of the operand. Such a SUBREG may
576 have been added above. */
577 gcc_assert (GET_CODE (op0) == SUBREG
578 && (GET_MODE_SIZE (GET_MODE (op0))
579 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
580 op0 = SUBREG_REG (op0);
581 }
582 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
583 op0, (offset * UNITS_PER_WORD));
584 }
585 offset = 0;
586 }
587
588 /* If VALUE has a floating-point or complex mode, access it as an
589 integer of the corresponding size. This can occur on a machine
590 with 64 bit registers that uses SFmode for float. It can also
591 occur for unaligned float or complex fields. */
592 orig_value = value;
593 if (GET_MODE (value) != VOIDmode
594 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
595 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
596 {
597 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
598 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
599 }
600
601 /* Now OFFSET is nonzero only if OP0 is memory
602 and is therefore always measured in bytes. */
603
604 if (HAVE_insv
605 && GET_MODE (value) != BLKmode
606 && bitsize > 0
607 && GET_MODE_BITSIZE (op_mode) >= bitsize
608 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
609 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
610 {
611 struct expand_operand ops[4];
612 int xbitpos = bitpos;
613 rtx value1;
614 rtx xop0 = op0;
615 rtx last = get_last_insn ();
616 bool copy_back = false;
617
618 /* Add OFFSET into OP0's address. */
619 if (MEM_P (xop0))
620 xop0 = adjust_address (xop0, byte_mode, offset);
621
622 /* If xop0 is a register, we need it in OP_MODE
623 to make it acceptable to the format of insv. */
624 if (GET_CODE (xop0) == SUBREG)
625 /* We can't just change the mode, because this might clobber op0,
626 and we will need the original value of op0 if insv fails. */
627 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
628 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
629 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
630
631 /* If the destination is a paradoxical subreg such that we need a
632 truncate to the inner mode, perform the insertion on a temporary and
633 truncate the result to the original destination. Note that we can't
634 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
635 X) 0)) is (reg:N X). */
636 if (GET_CODE (xop0) == SUBREG
637 && REG_P (SUBREG_REG (xop0))
638 && (!TRULY_NOOP_TRUNCATION
639 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0))),
640 GET_MODE_BITSIZE (op_mode))))
641 {
642 rtx tem = gen_reg_rtx (op_mode);
643 emit_move_insn (tem, xop0);
644 xop0 = tem;
645 copy_back = true;
646 }
647
648 /* On big-endian machines, we count bits from the most significant.
649 If the bit field insn does not, we must invert. */
650
651 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
652 xbitpos = unit - bitsize - xbitpos;
653
654 /* We have been counting XBITPOS within UNIT.
655 Count instead within the size of the register. */
656 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
657 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
658
659 unit = GET_MODE_BITSIZE (op_mode);
660
661 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
662 value1 = value;
663 if (GET_MODE (value) != op_mode)
664 {
665 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
666 {
667 /* Optimization: Don't bother really extending VALUE
668 if it has all the bits we will actually use. However,
669 if we must narrow it, be sure we do it correctly. */
670
671 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
672 {
673 rtx tmp;
674
675 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
676 if (! tmp)
677 tmp = simplify_gen_subreg (op_mode,
678 force_reg (GET_MODE (value),
679 value1),
680 GET_MODE (value), 0);
681 value1 = tmp;
682 }
683 else
684 value1 = gen_lowpart (op_mode, value1);
685 }
686 else if (CONST_INT_P (value))
687 value1 = gen_int_mode (INTVAL (value), op_mode);
688 else
689 /* Parse phase is supposed to make VALUE's data type
690 match that of the component reference, which is a type
691 at least as wide as the field; so VALUE should have
692 a mode that corresponds to that type. */
693 gcc_assert (CONSTANT_P (value));
694 }
695
696 create_fixed_operand (&ops[0], xop0);
697 create_integer_operand (&ops[1], bitsize);
698 create_integer_operand (&ops[2], xbitpos);
699 create_input_operand (&ops[3], value1, op_mode);
700 if (maybe_expand_insn (CODE_FOR_insv, 4, ops))
701 {
702 if (copy_back)
703 convert_move (op0, xop0, true);
704 return true;
705 }
706 delete_insns_since (last);
707 }
708
709 /* If OP0 is a memory, try copying it to a register and seeing if a
710 cheap register alternative is available. */
711 if (HAVE_insv && MEM_P (op0))
712 {
713 enum machine_mode bestmode;
714
715 /* Get the mode to use for inserting into this field. If OP0 is
716 BLKmode, get the smallest mode consistent with the alignment. If
717 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
718 mode. Otherwise, use the smallest mode containing the field. */
719
720 if (GET_MODE (op0) == BLKmode
721 || (op_mode != MAX_MACHINE_MODE
722 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
723 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
724 (op_mode == MAX_MACHINE_MODE
725 ? VOIDmode : op_mode),
726 MEM_VOLATILE_P (op0));
727 else
728 bestmode = GET_MODE (op0);
729
730 if (bestmode != VOIDmode
731 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
732 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
733 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
734 {
735 rtx last, tempreg, xop0;
736 unsigned HOST_WIDE_INT xoffset, xbitpos;
737
738 last = get_last_insn ();
739
740 /* Adjust address to point to the containing unit of
741 that mode. Compute the offset as a multiple of this unit,
742 counting in bytes. */
743 unit = GET_MODE_BITSIZE (bestmode);
744 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
745 xbitpos = bitnum % unit;
746 xop0 = adjust_address (op0, bestmode, xoffset);
747
748 /* Fetch that unit, store the bitfield in it, then store
749 the unit. */
750 tempreg = copy_to_reg (xop0);
751 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
752 fieldmode, orig_value, false))
753 {
754 emit_move_insn (xop0, tempreg);
755 return true;
756 }
757 delete_insns_since (last);
758 }
759 }
760
761 if (!fallback_p)
762 return false;
763
764 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
765 return true;
766 }
767
768 /* Generate code to store value from rtx VALUE
769 into a bit-field within structure STR_RTX
770 containing BITSIZE bits starting at bit BITNUM.
771 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
772
773 void
774 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
775 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
776 rtx value)
777 {
778 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
779 gcc_unreachable ();
780 }
781 \f
782 /* Use shifts and boolean operations to store VALUE
783 into a bit field of width BITSIZE
784 in a memory location specified by OP0 except offset by OFFSET bytes.
785 (OFFSET must be 0 if OP0 is a register.)
786 The field starts at position BITPOS within the byte.
787 (If OP0 is a register, it may be a full word or a narrower mode,
788 but BITPOS still counts within a full word,
789 which is significant on bigendian machines.) */
790
791 static void
792 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
793 unsigned HOST_WIDE_INT bitsize,
794 unsigned HOST_WIDE_INT bitpos, rtx value)
795 {
796 enum machine_mode mode;
797 unsigned int total_bits = BITS_PER_WORD;
798 rtx temp;
799 int all_zero = 0;
800 int all_one = 0;
801
802 /* There is a case not handled here:
803 a structure with a known alignment of just a halfword
804 and a field split across two aligned halfwords within the structure.
805 Or likewise a structure with a known alignment of just a byte
806 and a field split across two bytes.
807 Such cases are not supposed to be able to occur. */
808
809 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
810 {
811 gcc_assert (!offset);
812 /* Special treatment for a bit field split across two registers. */
813 if (bitsize + bitpos > BITS_PER_WORD)
814 {
815 store_split_bit_field (op0, bitsize, bitpos, value);
816 return;
817 }
818 }
819 else
820 {
821 /* Get the proper mode to use for this field. We want a mode that
822 includes the entire field. If such a mode would be larger than
823 a word, we won't be doing the extraction the normal way.
824 We don't want a mode bigger than the destination. */
825
826 mode = GET_MODE (op0);
827 if (GET_MODE_BITSIZE (mode) == 0
828 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
829 mode = word_mode;
830
831 if (MEM_VOLATILE_P (op0)
832 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
833 && flag_strict_volatile_bitfields > 0)
834 mode = GET_MODE (op0);
835 else
836 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
837 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
838
839 if (mode == VOIDmode)
840 {
841 /* The only way this should occur is if the field spans word
842 boundaries. */
843 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
844 value);
845 return;
846 }
847
848 total_bits = GET_MODE_BITSIZE (mode);
849
850 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
851 be in the range 0 to total_bits-1, and put any excess bytes in
852 OFFSET. */
853 if (bitpos >= total_bits)
854 {
855 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
856 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
857 * BITS_PER_UNIT);
858 }
859
860 /* Get ref to an aligned byte, halfword, or word containing the field.
861 Adjust BITPOS to be position within a word,
862 and OFFSET to be the offset of that word.
863 Then alter OP0 to refer to that word. */
864 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
865 offset -= (offset % (total_bits / BITS_PER_UNIT));
866 op0 = adjust_address (op0, mode, offset);
867 }
868
869 mode = GET_MODE (op0);
870
871 /* Now MODE is either some integral mode for a MEM as OP0,
872 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
873 The bit field is contained entirely within OP0.
874 BITPOS is the starting bit number within OP0.
875 (OP0's mode may actually be narrower than MODE.) */
876
877 if (BYTES_BIG_ENDIAN)
878 /* BITPOS is the distance between our msb
879 and that of the containing datum.
880 Convert it to the distance from the lsb. */
881 bitpos = total_bits - bitsize - bitpos;
882
883 /* Now BITPOS is always the distance between our lsb
884 and that of OP0. */
885
886 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
887 we must first convert its mode to MODE. */
888
889 if (CONST_INT_P (value))
890 {
891 HOST_WIDE_INT v = INTVAL (value);
892
893 if (bitsize < HOST_BITS_PER_WIDE_INT)
894 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
895
896 if (v == 0)
897 all_zero = 1;
898 else if ((bitsize < HOST_BITS_PER_WIDE_INT
899 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
900 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
901 all_one = 1;
902
903 value = lshift_value (mode, value, bitpos, bitsize);
904 }
905 else
906 {
907 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
908 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
909
910 if (GET_MODE (value) != mode)
911 value = convert_to_mode (mode, value, 1);
912
913 if (must_and)
914 value = expand_binop (mode, and_optab, value,
915 mask_rtx (mode, 0, bitsize, 0),
916 NULL_RTX, 1, OPTAB_LIB_WIDEN);
917 if (bitpos > 0)
918 value = expand_shift (LSHIFT_EXPR, mode, value,
919 bitpos, NULL_RTX, 1);
920 }
921
922 /* Now clear the chosen bits in OP0,
923 except that if VALUE is -1 we need not bother. */
924 /* We keep the intermediates in registers to allow CSE to combine
925 consecutive bitfield assignments. */
926
927 temp = force_reg (mode, op0);
928
929 if (! all_one)
930 {
931 temp = expand_binop (mode, and_optab, temp,
932 mask_rtx (mode, bitpos, bitsize, 1),
933 NULL_RTX, 1, OPTAB_LIB_WIDEN);
934 temp = force_reg (mode, temp);
935 }
936
937 /* Now logical-or VALUE into OP0, unless it is zero. */
938
939 if (! all_zero)
940 {
941 temp = expand_binop (mode, ior_optab, temp, value,
942 NULL_RTX, 1, OPTAB_LIB_WIDEN);
943 temp = force_reg (mode, temp);
944 }
945
946 if (op0 != temp)
947 {
948 op0 = copy_rtx (op0);
949 emit_move_insn (op0, temp);
950 }
951 }
952 \f
953 /* Store a bit field that is split across multiple accessible memory objects.
954
955 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
956 BITSIZE is the field width; BITPOS the position of its first bit
957 (within the word).
958 VALUE is the value to store.
959
960 This does not yet handle fields wider than BITS_PER_WORD. */
961
962 static void
963 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
964 unsigned HOST_WIDE_INT bitpos, rtx value)
965 {
966 unsigned int unit;
967 unsigned int bitsdone = 0;
968
969 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
970 much at a time. */
971 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
972 unit = BITS_PER_WORD;
973 else
974 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
975
976 /* If VALUE is a constant other than a CONST_INT, get it into a register in
977 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
978 that VALUE might be a floating-point constant. */
979 if (CONSTANT_P (value) && !CONST_INT_P (value))
980 {
981 rtx word = gen_lowpart_common (word_mode, value);
982
983 if (word && (value != word))
984 value = word;
985 else
986 value = gen_lowpart_common (word_mode,
987 force_reg (GET_MODE (value) != VOIDmode
988 ? GET_MODE (value)
989 : word_mode, value));
990 }
991
992 while (bitsdone < bitsize)
993 {
994 unsigned HOST_WIDE_INT thissize;
995 rtx part, word;
996 unsigned HOST_WIDE_INT thispos;
997 unsigned HOST_WIDE_INT offset;
998
999 offset = (bitpos + bitsdone) / unit;
1000 thispos = (bitpos + bitsdone) % unit;
1001
1002 /* THISSIZE must not overrun a word boundary. Otherwise,
1003 store_fixed_bit_field will call us again, and we will mutually
1004 recurse forever. */
1005 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1006 thissize = MIN (thissize, unit - thispos);
1007
1008 if (BYTES_BIG_ENDIAN)
1009 {
1010 int total_bits;
1011
1012 /* We must do an endian conversion exactly the same way as it is
1013 done in extract_bit_field, so that the two calls to
1014 extract_fixed_bit_field will have comparable arguments. */
1015 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1016 total_bits = BITS_PER_WORD;
1017 else
1018 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1019
1020 /* Fetch successively less significant portions. */
1021 if (CONST_INT_P (value))
1022 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1023 >> (bitsize - bitsdone - thissize))
1024 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1025 else
1026 /* The args are chosen so that the last part includes the
1027 lsb. Give extract_bit_field the value it needs (with
1028 endianness compensation) to fetch the piece we want. */
1029 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1030 total_bits - bitsize + bitsdone,
1031 NULL_RTX, 1, false);
1032 }
1033 else
1034 {
1035 /* Fetch successively more significant portions. */
1036 if (CONST_INT_P (value))
1037 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1038 >> bitsdone)
1039 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1040 else
1041 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1042 bitsdone, NULL_RTX, 1, false);
1043 }
1044
1045 /* If OP0 is a register, then handle OFFSET here.
1046
1047 When handling multiword bitfields, extract_bit_field may pass
1048 down a word_mode SUBREG of a larger REG for a bitfield that actually
1049 crosses a word boundary. Thus, for a SUBREG, we must find
1050 the current word starting from the base register. */
1051 if (GET_CODE (op0) == SUBREG)
1052 {
1053 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1054 enum machine_mode sub_mode = GET_MODE (SUBREG_REG (op0));
1055 if (sub_mode != BLKmode && GET_MODE_SIZE (sub_mode) < UNITS_PER_WORD)
1056 word = word_offset ? const0_rtx : op0;
1057 else
1058 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1059 GET_MODE (SUBREG_REG (op0)));
1060 offset = 0;
1061 }
1062 else if (REG_P (op0))
1063 {
1064 enum machine_mode op0_mode = GET_MODE (op0);
1065 if (op0_mode != BLKmode && GET_MODE_SIZE (op0_mode) < UNITS_PER_WORD)
1066 word = offset ? const0_rtx : op0;
1067 else
1068 word = operand_subword_force (op0, offset, GET_MODE (op0));
1069 offset = 0;
1070 }
1071 else
1072 word = op0;
1073
1074 /* OFFSET is in UNITs, and UNIT is in bits.
1075 store_fixed_bit_field wants offset in bytes. If WORD is const0_rtx,
1076 it is just an out-of-bounds access. Ignore it. */
1077 if (word != const0_rtx)
1078 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1079 thispos, part);
1080 bitsdone += thissize;
1081 }
1082 }
1083 \f
1084 /* A subroutine of extract_bit_field_1 that converts return value X
1085 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1086 to extract_bit_field. */
1087
1088 static rtx
1089 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1090 enum machine_mode tmode, bool unsignedp)
1091 {
1092 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1093 return x;
1094
1095 /* If the x mode is not a scalar integral, first convert to the
1096 integer mode of that size and then access it as a floating-point
1097 value via a SUBREG. */
1098 if (!SCALAR_INT_MODE_P (tmode))
1099 {
1100 enum machine_mode smode;
1101
1102 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1103 x = convert_to_mode (smode, x, unsignedp);
1104 x = force_reg (smode, x);
1105 return gen_lowpart (tmode, x);
1106 }
1107
1108 return convert_to_mode (tmode, x, unsignedp);
1109 }
1110
1111 /* A subroutine of extract_bit_field, with the same arguments.
1112 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1113 if we can find no other means of implementing the operation.
1114 if FALLBACK_P is false, return NULL instead. */
1115
1116 static rtx
1117 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1118 unsigned HOST_WIDE_INT bitnum,
1119 int unsignedp, bool packedp, rtx target,
1120 enum machine_mode mode, enum machine_mode tmode,
1121 bool fallback_p)
1122 {
1123 unsigned int unit
1124 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1125 unsigned HOST_WIDE_INT offset, bitpos;
1126 rtx op0 = str_rtx;
1127 enum machine_mode int_mode;
1128 enum machine_mode ext_mode;
1129 enum machine_mode mode1;
1130 int byte_offset;
1131
1132 if (tmode == VOIDmode)
1133 tmode = mode;
1134
1135 while (GET_CODE (op0) == SUBREG)
1136 {
1137 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1138 op0 = SUBREG_REG (op0);
1139 }
1140
1141 /* If we have an out-of-bounds access to a register, just return an
1142 uninitialized register of the required mode. This can occur if the
1143 source code contains an out-of-bounds access to a small array. */
1144 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1145 return gen_reg_rtx (tmode);
1146
1147 if (REG_P (op0)
1148 && mode == GET_MODE (op0)
1149 && bitnum == 0
1150 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1151 {
1152 /* We're trying to extract a full register from itself. */
1153 return op0;
1154 }
1155
1156 /* See if we can get a better vector mode before extracting. */
1157 if (VECTOR_MODE_P (GET_MODE (op0))
1158 && !MEM_P (op0)
1159 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1160 {
1161 enum machine_mode new_mode;
1162
1163 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1164 new_mode = MIN_MODE_VECTOR_FLOAT;
1165 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1166 new_mode = MIN_MODE_VECTOR_FRACT;
1167 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1168 new_mode = MIN_MODE_VECTOR_UFRACT;
1169 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1170 new_mode = MIN_MODE_VECTOR_ACCUM;
1171 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1172 new_mode = MIN_MODE_VECTOR_UACCUM;
1173 else
1174 new_mode = MIN_MODE_VECTOR_INT;
1175
1176 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1177 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1178 && targetm.vector_mode_supported_p (new_mode))
1179 break;
1180 if (new_mode != VOIDmode)
1181 op0 = gen_lowpart (new_mode, op0);
1182 }
1183
1184 /* Use vec_extract patterns for extracting parts of vectors whenever
1185 available. */
1186 if (VECTOR_MODE_P (GET_MODE (op0))
1187 && !MEM_P (op0)
1188 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1189 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1190 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1191 {
1192 struct expand_operand ops[3];
1193 enum machine_mode outermode = GET_MODE (op0);
1194 enum machine_mode innermode = GET_MODE_INNER (outermode);
1195 enum insn_code icode = optab_handler (vec_extract_optab, outermode);
1196 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1197
1198 create_output_operand (&ops[0], target, innermode);
1199 create_input_operand (&ops[1], op0, outermode);
1200 create_integer_operand (&ops[2], pos);
1201 if (maybe_expand_insn (icode, 3, ops))
1202 {
1203 target = ops[0].value;
1204 if (GET_MODE (target) != mode)
1205 return gen_lowpart (tmode, target);
1206 return target;
1207 }
1208 }
1209
1210 /* Make sure we are playing with integral modes. Pun with subregs
1211 if we aren't. */
1212 {
1213 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1214 if (imode != GET_MODE (op0))
1215 {
1216 if (MEM_P (op0))
1217 op0 = adjust_address (op0, imode, 0);
1218 else if (imode != BLKmode)
1219 {
1220 op0 = gen_lowpart (imode, op0);
1221
1222 /* If we got a SUBREG, force it into a register since we
1223 aren't going to be able to do another SUBREG on it. */
1224 if (GET_CODE (op0) == SUBREG)
1225 op0 = force_reg (imode, op0);
1226 }
1227 else if (REG_P (op0))
1228 {
1229 rtx reg, subreg;
1230 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1231 MODE_INT);
1232 reg = gen_reg_rtx (imode);
1233 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1234 emit_move_insn (subreg, op0);
1235 op0 = reg;
1236 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1237 }
1238 else
1239 {
1240 rtx mem = assign_stack_temp (GET_MODE (op0),
1241 GET_MODE_SIZE (GET_MODE (op0)), 0);
1242 emit_move_insn (mem, op0);
1243 op0 = adjust_address (mem, BLKmode, 0);
1244 }
1245 }
1246 }
1247
1248 /* We may be accessing data outside the field, which means
1249 we can alias adjacent data. */
1250 if (MEM_P (op0))
1251 {
1252 op0 = shallow_copy_rtx (op0);
1253 set_mem_alias_set (op0, 0);
1254 set_mem_expr (op0, 0);
1255 }
1256
1257 /* Extraction of a full-word or multi-word value from a structure
1258 in a register or aligned memory can be done with just a SUBREG.
1259 A subword value in the least significant part of a register
1260 can also be extracted with a SUBREG. For this, we need the
1261 byte offset of the value in op0. */
1262
1263 bitpos = bitnum % unit;
1264 offset = bitnum / unit;
1265 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1266
1267 /* If OP0 is a register, BITPOS must count within a word.
1268 But as we have it, it counts within whatever size OP0 now has.
1269 On a bigendian machine, these are not the same, so convert. */
1270 if (BYTES_BIG_ENDIAN
1271 && !MEM_P (op0)
1272 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1273 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1274
1275 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1276 If that's wrong, the solution is to test for it and set TARGET to 0
1277 if needed. */
1278
1279 /* Only scalar integer modes can be converted via subregs. There is an
1280 additional problem for FP modes here in that they can have a precision
1281 which is different from the size. mode_for_size uses precision, but
1282 we want a mode based on the size, so we must avoid calling it for FP
1283 modes. */
1284 mode1 = (SCALAR_INT_MODE_P (tmode)
1285 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1286 : mode);
1287
1288 /* If the bitfield is volatile, we need to make sure the access
1289 remains on a type-aligned boundary. */
1290 if (GET_CODE (op0) == MEM
1291 && MEM_VOLATILE_P (op0)
1292 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1293 && flag_strict_volatile_bitfields > 0)
1294 goto no_subreg_mode_swap;
1295
1296 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1297 && bitpos % BITS_PER_WORD == 0)
1298 || (mode1 != BLKmode
1299 /* ??? The big endian test here is wrong. This is correct
1300 if the value is in a register, and if mode_for_size is not
1301 the same mode as op0. This causes us to get unnecessarily
1302 inefficient code from the Thumb port when -mbig-endian. */
1303 && (BYTES_BIG_ENDIAN
1304 ? bitpos + bitsize == BITS_PER_WORD
1305 : bitpos == 0)))
1306 && ((!MEM_P (op0)
1307 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1308 GET_MODE_BITSIZE (GET_MODE (op0)))
1309 && GET_MODE_SIZE (mode1) != 0
1310 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1311 || (MEM_P (op0)
1312 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1313 || (offset * BITS_PER_UNIT % bitsize == 0
1314 && MEM_ALIGN (op0) % bitsize == 0)))))
1315 {
1316 if (MEM_P (op0))
1317 op0 = adjust_address (op0, mode1, offset);
1318 else if (mode1 != GET_MODE (op0))
1319 {
1320 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1321 byte_offset);
1322 if (sub == NULL)
1323 goto no_subreg_mode_swap;
1324 op0 = sub;
1325 }
1326 if (mode1 != mode)
1327 return convert_to_mode (tmode, op0, unsignedp);
1328 return op0;
1329 }
1330 no_subreg_mode_swap:
1331
1332 /* Handle fields bigger than a word. */
1333
1334 if (bitsize > BITS_PER_WORD)
1335 {
1336 /* Here we transfer the words of the field
1337 in the order least significant first.
1338 This is because the most significant word is the one which may
1339 be less than full. */
1340
1341 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1342 unsigned int i;
1343
1344 if (target == 0 || !REG_P (target))
1345 target = gen_reg_rtx (mode);
1346
1347 /* Indicate for flow that the entire target reg is being set. */
1348 emit_clobber (target);
1349
1350 for (i = 0; i < nwords; i++)
1351 {
1352 /* If I is 0, use the low-order word in both field and target;
1353 if I is 1, use the next to lowest word; and so on. */
1354 /* Word number in TARGET to use. */
1355 unsigned int wordnum
1356 = (WORDS_BIG_ENDIAN
1357 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1358 : i);
1359 /* Offset from start of field in OP0. */
1360 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1361 ? MAX (0, ((int) bitsize - ((int) i + 1)
1362 * (int) BITS_PER_WORD))
1363 : (int) i * BITS_PER_WORD);
1364 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1365 rtx result_part
1366 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1367 bitsize - i * BITS_PER_WORD),
1368 bitnum + bit_offset, 1, false, target_part, mode,
1369 word_mode);
1370
1371 gcc_assert (target_part);
1372
1373 if (result_part != target_part)
1374 emit_move_insn (target_part, result_part);
1375 }
1376
1377 if (unsignedp)
1378 {
1379 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1380 need to be zero'd out. */
1381 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1382 {
1383 unsigned int i, total_words;
1384
1385 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1386 for (i = nwords; i < total_words; i++)
1387 emit_move_insn
1388 (operand_subword (target,
1389 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1390 1, VOIDmode),
1391 const0_rtx);
1392 }
1393 return target;
1394 }
1395
1396 /* Signed bit field: sign-extend with two arithmetic shifts. */
1397 target = expand_shift (LSHIFT_EXPR, mode, target,
1398 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1399 return expand_shift (RSHIFT_EXPR, mode, target,
1400 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1401 }
1402
1403 /* From here on we know the desired field is smaller than a word. */
1404
1405 /* Check if there is a correspondingly-sized integer field, so we can
1406 safely extract it as one size of integer, if necessary; then
1407 truncate or extend to the size that is wanted; then use SUBREGs or
1408 convert_to_mode to get one of the modes we really wanted. */
1409
1410 int_mode = int_mode_for_mode (tmode);
1411 if (int_mode == BLKmode)
1412 int_mode = int_mode_for_mode (mode);
1413 /* Should probably push op0 out to memory and then do a load. */
1414 gcc_assert (int_mode != BLKmode);
1415
1416 /* OFFSET is the number of words or bytes (UNIT says which)
1417 from STR_RTX to the first word or byte containing part of the field. */
1418 if (!MEM_P (op0))
1419 {
1420 if (offset != 0
1421 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1422 {
1423 if (!REG_P (op0))
1424 op0 = copy_to_reg (op0);
1425 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1426 op0, (offset * UNITS_PER_WORD));
1427 }
1428 offset = 0;
1429 }
1430
1431 /* Now OFFSET is nonzero only for memory operands. */
1432 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1433 if (ext_mode != MAX_MACHINE_MODE
1434 && bitsize > 0
1435 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1436 /* If op0 is a register, we need it in EXT_MODE to make it
1437 acceptable to the format of ext(z)v. */
1438 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1439 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1440 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode))))
1441 {
1442 struct expand_operand ops[4];
1443 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1444 rtx xop0 = op0;
1445 rtx xtarget = target;
1446 rtx xspec_target = target;
1447 rtx xspec_target_subreg = 0;
1448
1449 /* If op0 is a register, we need it in EXT_MODE to make it
1450 acceptable to the format of ext(z)v. */
1451 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1452 xop0 = gen_lowpart_SUBREG (ext_mode, xop0);
1453 if (MEM_P (xop0))
1454 /* Get ref to first byte containing part of the field. */
1455 xop0 = adjust_address (xop0, byte_mode, xoffset);
1456
1457 /* On big-endian machines, we count bits from the most significant.
1458 If the bit field insn does not, we must invert. */
1459 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1460 xbitpos = unit - bitsize - xbitpos;
1461
1462 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1463 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1464 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1465
1466 unit = GET_MODE_BITSIZE (ext_mode);
1467
1468 if (xtarget == 0)
1469 xtarget = xspec_target = gen_reg_rtx (tmode);
1470
1471 if (GET_MODE (xtarget) != ext_mode)
1472 {
1473 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1474 between the mode of the extraction (word_mode) and the target
1475 mode. Instead, create a temporary and use convert_move to set
1476 the target. */
1477 if (REG_P (xtarget)
1478 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1479 GET_MODE_BITSIZE (ext_mode)))
1480 {
1481 xtarget = gen_lowpart (ext_mode, xtarget);
1482 if (GET_MODE_SIZE (ext_mode)
1483 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1484 xspec_target_subreg = xtarget;
1485 }
1486 else
1487 xtarget = gen_reg_rtx (ext_mode);
1488 }
1489
1490 create_output_operand (&ops[0], xtarget, ext_mode);
1491 create_fixed_operand (&ops[1], xop0);
1492 create_integer_operand (&ops[2], bitsize);
1493 create_integer_operand (&ops[3], xbitpos);
1494 if (maybe_expand_insn (unsignedp ? CODE_FOR_extzv : CODE_FOR_extv,
1495 4, ops))
1496 {
1497 xtarget = ops[0].value;
1498 if (xtarget == xspec_target)
1499 return xtarget;
1500 if (xtarget == xspec_target_subreg)
1501 return xspec_target;
1502 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1503 }
1504 }
1505
1506 /* If OP0 is a memory, try copying it to a register and seeing if a
1507 cheap register alternative is available. */
1508 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1509 {
1510 enum machine_mode bestmode;
1511
1512 /* Get the mode to use for inserting into this field. If
1513 OP0 is BLKmode, get the smallest mode consistent with the
1514 alignment. If OP0 is a non-BLKmode object that is no
1515 wider than EXT_MODE, use its mode. Otherwise, use the
1516 smallest mode containing the field. */
1517
1518 if (GET_MODE (op0) == BLKmode
1519 || (ext_mode != MAX_MACHINE_MODE
1520 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1521 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1522 (ext_mode == MAX_MACHINE_MODE
1523 ? VOIDmode : ext_mode),
1524 MEM_VOLATILE_P (op0));
1525 else
1526 bestmode = GET_MODE (op0);
1527
1528 if (bestmode != VOIDmode
1529 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1530 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1531 {
1532 unsigned HOST_WIDE_INT xoffset, xbitpos;
1533
1534 /* Compute the offset as a multiple of this unit,
1535 counting in bytes. */
1536 unit = GET_MODE_BITSIZE (bestmode);
1537 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1538 xbitpos = bitnum % unit;
1539
1540 /* Make sure the register is big enough for the whole field. */
1541 if (xoffset * BITS_PER_UNIT + unit
1542 >= offset * BITS_PER_UNIT + bitsize)
1543 {
1544 rtx last, result, xop0;
1545
1546 last = get_last_insn ();
1547
1548 /* Fetch it to a register in that size. */
1549 xop0 = adjust_address (op0, bestmode, xoffset);
1550 xop0 = force_reg (bestmode, xop0);
1551 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1552 unsignedp, packedp, target,
1553 mode, tmode, false);
1554 if (result)
1555 return result;
1556
1557 delete_insns_since (last);
1558 }
1559 }
1560 }
1561
1562 if (!fallback_p)
1563 return NULL;
1564
1565 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1566 bitpos, target, unsignedp, packedp);
1567 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1568 }
1569
1570 /* Generate code to extract a byte-field from STR_RTX
1571 containing BITSIZE bits, starting at BITNUM,
1572 and put it in TARGET if possible (if TARGET is nonzero).
1573 Regardless of TARGET, we return the rtx for where the value is placed.
1574
1575 STR_RTX is the structure containing the byte (a REG or MEM).
1576 UNSIGNEDP is nonzero if this is an unsigned bit field.
1577 PACKEDP is nonzero if the field has the packed attribute.
1578 MODE is the natural mode of the field value once extracted.
1579 TMODE is the mode the caller would like the value to have;
1580 but the value may be returned with type MODE instead.
1581
1582 If a TARGET is specified and we can store in it at no extra cost,
1583 we do so, and return TARGET.
1584 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1585 if they are equally easy. */
1586
1587 rtx
1588 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1589 unsigned HOST_WIDE_INT bitnum, int unsignedp, bool packedp,
1590 rtx target, enum machine_mode mode, enum machine_mode tmode)
1591 {
1592 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, packedp,
1593 target, mode, tmode, true);
1594 }
1595 \f
1596 /* Extract a bit field using shifts and boolean operations
1597 Returns an rtx to represent the value.
1598 OP0 addresses a register (word) or memory (byte).
1599 BITPOS says which bit within the word or byte the bit field starts in.
1600 OFFSET says how many bytes farther the bit field starts;
1601 it is 0 if OP0 is a register.
1602 BITSIZE says how many bits long the bit field is.
1603 (If OP0 is a register, it may be narrower than a full word,
1604 but BITPOS still counts within a full word,
1605 which is significant on bigendian machines.)
1606
1607 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1608 PACKEDP is true if the field has the packed attribute.
1609
1610 If TARGET is nonzero, attempts to store the value there
1611 and return TARGET, but this is not guaranteed.
1612 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1613
1614 static rtx
1615 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1616 unsigned HOST_WIDE_INT offset,
1617 unsigned HOST_WIDE_INT bitsize,
1618 unsigned HOST_WIDE_INT bitpos, rtx target,
1619 int unsignedp, bool packedp)
1620 {
1621 unsigned int total_bits = BITS_PER_WORD;
1622 enum machine_mode mode;
1623
1624 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1625 {
1626 /* Special treatment for a bit field split across two registers. */
1627 if (bitsize + bitpos > BITS_PER_WORD)
1628 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1629 }
1630 else
1631 {
1632 /* Get the proper mode to use for this field. We want a mode that
1633 includes the entire field. If such a mode would be larger than
1634 a word, we won't be doing the extraction the normal way. */
1635
1636 if (MEM_VOLATILE_P (op0)
1637 && flag_strict_volatile_bitfields > 0)
1638 {
1639 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1640 mode = GET_MODE (op0);
1641 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1642 mode = GET_MODE (target);
1643 else
1644 mode = tmode;
1645 }
1646 else
1647 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1648 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1649
1650 if (mode == VOIDmode)
1651 /* The only way this should occur is if the field spans word
1652 boundaries. */
1653 return extract_split_bit_field (op0, bitsize,
1654 bitpos + offset * BITS_PER_UNIT,
1655 unsignedp);
1656
1657 total_bits = GET_MODE_BITSIZE (mode);
1658
1659 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1660 be in the range 0 to total_bits-1, and put any excess bytes in
1661 OFFSET. */
1662 if (bitpos >= total_bits)
1663 {
1664 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1665 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1666 * BITS_PER_UNIT);
1667 }
1668
1669 /* If we're accessing a volatile MEM, we can't do the next
1670 alignment step if it results in a multi-word access where we
1671 otherwise wouldn't have one. So, check for that case
1672 here. */
1673 if (MEM_P (op0)
1674 && MEM_VOLATILE_P (op0)
1675 && flag_strict_volatile_bitfields > 0
1676 && bitpos + bitsize <= total_bits
1677 && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
1678 {
1679 if (STRICT_ALIGNMENT)
1680 {
1681 static bool informed_about_misalignment = false;
1682 bool warned;
1683
1684 if (packedp)
1685 {
1686 if (bitsize == total_bits)
1687 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1688 "multiple accesses to volatile structure member"
1689 " because of packed attribute");
1690 else
1691 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1692 "multiple accesses to volatile structure bitfield"
1693 " because of packed attribute");
1694
1695 return extract_split_bit_field (op0, bitsize,
1696 bitpos + offset * BITS_PER_UNIT,
1697 unsignedp);
1698 }
1699
1700 if (bitsize == total_bits)
1701 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1702 "mis-aligned access used for structure member");
1703 else
1704 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1705 "mis-aligned access used for structure bitfield");
1706
1707 if (! informed_about_misalignment && warned)
1708 {
1709 informed_about_misalignment = true;
1710 inform (input_location,
1711 "when a volatile object spans multiple type-sized locations,"
1712 " the compiler must choose between using a single mis-aligned access to"
1713 " preserve the volatility, or using multiple aligned accesses to avoid"
1714 " runtime faults; this code may fail at runtime if the hardware does"
1715 " not allow this access");
1716 }
1717 }
1718 }
1719 else
1720 {
1721
1722 /* Get ref to an aligned byte, halfword, or word containing the field.
1723 Adjust BITPOS to be position within a word,
1724 and OFFSET to be the offset of that word.
1725 Then alter OP0 to refer to that word. */
1726 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1727 offset -= (offset % (total_bits / BITS_PER_UNIT));
1728 }
1729
1730 op0 = adjust_address (op0, mode, offset);
1731 }
1732
1733 mode = GET_MODE (op0);
1734
1735 if (BYTES_BIG_ENDIAN)
1736 /* BITPOS is the distance between our msb and that of OP0.
1737 Convert it to the distance from the lsb. */
1738 bitpos = total_bits - bitsize - bitpos;
1739
1740 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1741 We have reduced the big-endian case to the little-endian case. */
1742
1743 if (unsignedp)
1744 {
1745 if (bitpos)
1746 {
1747 /* If the field does not already start at the lsb,
1748 shift it so it does. */
1749 /* Maybe propagate the target for the shift. */
1750 /* But not if we will return it--could confuse integrate.c. */
1751 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1752 if (tmode != mode) subtarget = 0;
1753 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitpos, subtarget, 1);
1754 }
1755 /* Convert the value to the desired mode. */
1756 if (mode != tmode)
1757 op0 = convert_to_mode (tmode, op0, 1);
1758
1759 /* Unless the msb of the field used to be the msb when we shifted,
1760 mask out the upper bits. */
1761
1762 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1763 return expand_binop (GET_MODE (op0), and_optab, op0,
1764 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1765 target, 1, OPTAB_LIB_WIDEN);
1766 return op0;
1767 }
1768
1769 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1770 then arithmetic-shift its lsb to the lsb of the word. */
1771 op0 = force_reg (mode, op0);
1772 if (mode != tmode)
1773 target = 0;
1774
1775 /* Find the narrowest integer mode that contains the field. */
1776
1777 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1778 mode = GET_MODE_WIDER_MODE (mode))
1779 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1780 {
1781 op0 = convert_to_mode (mode, op0, 0);
1782 break;
1783 }
1784
1785 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1786 {
1787 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitpos);
1788 /* Maybe propagate the target for the shift. */
1789 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1790 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1791 }
1792
1793 return expand_shift (RSHIFT_EXPR, mode, op0,
1794 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
1795 }
1796 \f
1797 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1798 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1799 complement of that if COMPLEMENT. The mask is truncated if
1800 necessary to the width of mode MODE. The mask is zero-extended if
1801 BITSIZE+BITPOS is too small for MODE. */
1802
1803 static rtx
1804 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1805 {
1806 double_int mask;
1807
1808 mask = double_int_mask (bitsize);
1809 mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1810
1811 if (complement)
1812 mask = double_int_not (mask);
1813
1814 return immed_double_int_const (mask, mode);
1815 }
1816
1817 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1818 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1819
1820 static rtx
1821 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1822 {
1823 double_int val;
1824
1825 val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
1826 val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1827
1828 return immed_double_int_const (val, mode);
1829 }
1830 \f
1831 /* Extract a bit field that is split across two words
1832 and return an RTX for the result.
1833
1834 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1835 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1836 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1837
1838 static rtx
1839 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1840 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1841 {
1842 unsigned int unit;
1843 unsigned int bitsdone = 0;
1844 rtx result = NULL_RTX;
1845 int first = 1;
1846
1847 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1848 much at a time. */
1849 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1850 unit = BITS_PER_WORD;
1851 else
1852 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1853
1854 while (bitsdone < bitsize)
1855 {
1856 unsigned HOST_WIDE_INT thissize;
1857 rtx part, word;
1858 unsigned HOST_WIDE_INT thispos;
1859 unsigned HOST_WIDE_INT offset;
1860
1861 offset = (bitpos + bitsdone) / unit;
1862 thispos = (bitpos + bitsdone) % unit;
1863
1864 /* THISSIZE must not overrun a word boundary. Otherwise,
1865 extract_fixed_bit_field will call us again, and we will mutually
1866 recurse forever. */
1867 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1868 thissize = MIN (thissize, unit - thispos);
1869
1870 /* If OP0 is a register, then handle OFFSET here.
1871
1872 When handling multiword bitfields, extract_bit_field may pass
1873 down a word_mode SUBREG of a larger REG for a bitfield that actually
1874 crosses a word boundary. Thus, for a SUBREG, we must find
1875 the current word starting from the base register. */
1876 if (GET_CODE (op0) == SUBREG)
1877 {
1878 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1879 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1880 GET_MODE (SUBREG_REG (op0)));
1881 offset = 0;
1882 }
1883 else if (REG_P (op0))
1884 {
1885 word = operand_subword_force (op0, offset, GET_MODE (op0));
1886 offset = 0;
1887 }
1888 else
1889 word = op0;
1890
1891 /* Extract the parts in bit-counting order,
1892 whose meaning is determined by BYTES_PER_UNIT.
1893 OFFSET is in UNITs, and UNIT is in bits.
1894 extract_fixed_bit_field wants offset in bytes. */
1895 part = extract_fixed_bit_field (word_mode, word,
1896 offset * unit / BITS_PER_UNIT,
1897 thissize, thispos, 0, 1, false);
1898 bitsdone += thissize;
1899
1900 /* Shift this part into place for the result. */
1901 if (BYTES_BIG_ENDIAN)
1902 {
1903 if (bitsize != bitsdone)
1904 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1905 bitsize - bitsdone, 0, 1);
1906 }
1907 else
1908 {
1909 if (bitsdone != thissize)
1910 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1911 bitsdone - thissize, 0, 1);
1912 }
1913
1914 if (first)
1915 result = part;
1916 else
1917 /* Combine the parts with bitwise or. This works
1918 because we extracted each part as an unsigned bit field. */
1919 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1920 OPTAB_LIB_WIDEN);
1921
1922 first = 0;
1923 }
1924
1925 /* Unsigned bit field: we are done. */
1926 if (unsignedp)
1927 return result;
1928 /* Signed bit field: sign-extend with two arithmetic shifts. */
1929 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1930 BITS_PER_WORD - bitsize, NULL_RTX, 0);
1931 return expand_shift (RSHIFT_EXPR, word_mode, result,
1932 BITS_PER_WORD - bitsize, NULL_RTX, 0);
1933 }
1934 \f
1935 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1936 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1937 MODE, fill the upper bits with zeros. Fail if the layout of either
1938 mode is unknown (as for CC modes) or if the extraction would involve
1939 unprofitable mode punning. Return the value on success, otherwise
1940 return null.
1941
1942 This is different from gen_lowpart* in these respects:
1943
1944 - the returned value must always be considered an rvalue
1945
1946 - when MODE is wider than SRC_MODE, the extraction involves
1947 a zero extension
1948
1949 - when MODE is smaller than SRC_MODE, the extraction involves
1950 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
1951
1952 In other words, this routine performs a computation, whereas the
1953 gen_lowpart* routines are conceptually lvalue or rvalue subreg
1954 operations. */
1955
1956 rtx
1957 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
1958 {
1959 enum machine_mode int_mode, src_int_mode;
1960
1961 if (mode == src_mode)
1962 return src;
1963
1964 if (CONSTANT_P (src))
1965 {
1966 /* simplify_gen_subreg can't be used here, as if simplify_subreg
1967 fails, it will happily create (subreg (symbol_ref)) or similar
1968 invalid SUBREGs. */
1969 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
1970 rtx ret = simplify_subreg (mode, src, src_mode, byte);
1971 if (ret)
1972 return ret;
1973
1974 if (GET_MODE (src) == VOIDmode
1975 || !validate_subreg (mode, src_mode, src, byte))
1976 return NULL_RTX;
1977
1978 src = force_reg (GET_MODE (src), src);
1979 return gen_rtx_SUBREG (mode, src, byte);
1980 }
1981
1982 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
1983 return NULL_RTX;
1984
1985 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
1986 && MODES_TIEABLE_P (mode, src_mode))
1987 {
1988 rtx x = gen_lowpart_common (mode, src);
1989 if (x)
1990 return x;
1991 }
1992
1993 src_int_mode = int_mode_for_mode (src_mode);
1994 int_mode = int_mode_for_mode (mode);
1995 if (src_int_mode == BLKmode || int_mode == BLKmode)
1996 return NULL_RTX;
1997
1998 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
1999 return NULL_RTX;
2000 if (!MODES_TIEABLE_P (int_mode, mode))
2001 return NULL_RTX;
2002
2003 src = gen_lowpart (src_int_mode, src);
2004 src = convert_modes (int_mode, src_int_mode, src, true);
2005 src = gen_lowpart (mode, src);
2006 return src;
2007 }
2008 \f
2009 /* Add INC into TARGET. */
2010
2011 void
2012 expand_inc (rtx target, rtx inc)
2013 {
2014 rtx value = expand_binop (GET_MODE (target), add_optab,
2015 target, inc,
2016 target, 0, OPTAB_LIB_WIDEN);
2017 if (value != target)
2018 emit_move_insn (target, value);
2019 }
2020
2021 /* Subtract DEC from TARGET. */
2022
2023 void
2024 expand_dec (rtx target, rtx dec)
2025 {
2026 rtx value = expand_binop (GET_MODE (target), sub_optab,
2027 target, dec,
2028 target, 0, OPTAB_LIB_WIDEN);
2029 if (value != target)
2030 emit_move_insn (target, value);
2031 }
2032 \f
2033 /* Output a shift instruction for expression code CODE,
2034 with SHIFTED being the rtx for the value to shift,
2035 and AMOUNT the rtx for the amount to shift by.
2036 Store the result in the rtx TARGET, if that is convenient.
2037 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2038 Return the rtx for where the value is. */
2039
2040 static rtx
2041 expand_shift_1 (enum tree_code code, enum machine_mode mode, rtx shifted,
2042 rtx amount, rtx target, int unsignedp)
2043 {
2044 rtx op1, temp = 0;
2045 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2046 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2047 optab lshift_optab = ashl_optab;
2048 optab rshift_arith_optab = ashr_optab;
2049 optab rshift_uns_optab = lshr_optab;
2050 optab lrotate_optab = rotl_optab;
2051 optab rrotate_optab = rotr_optab;
2052 enum machine_mode op1_mode;
2053 int attempt;
2054 bool speed = optimize_insn_for_speed_p ();
2055
2056 op1 = amount;
2057 op1_mode = GET_MODE (op1);
2058
2059 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2060 shift amount is a vector, use the vector/vector shift patterns. */
2061 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2062 {
2063 lshift_optab = vashl_optab;
2064 rshift_arith_optab = vashr_optab;
2065 rshift_uns_optab = vlshr_optab;
2066 lrotate_optab = vrotl_optab;
2067 rrotate_optab = vrotr_optab;
2068 }
2069
2070 /* Previously detected shift-counts computed by NEGATE_EXPR
2071 and shifted in the other direction; but that does not work
2072 on all machines. */
2073
2074 if (SHIFT_COUNT_TRUNCATED)
2075 {
2076 if (CONST_INT_P (op1)
2077 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2078 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2079 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2080 % GET_MODE_BITSIZE (mode));
2081 else if (GET_CODE (op1) == SUBREG
2082 && subreg_lowpart_p (op1)
2083 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2084 op1 = SUBREG_REG (op1);
2085 }
2086
2087 if (op1 == const0_rtx)
2088 return shifted;
2089
2090 /* Check whether its cheaper to implement a left shift by a constant
2091 bit count by a sequence of additions. */
2092 if (code == LSHIFT_EXPR
2093 && CONST_INT_P (op1)
2094 && INTVAL (op1) > 0
2095 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2096 && INTVAL (op1) < MAX_BITS_PER_WORD
2097 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2098 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2099 {
2100 int i;
2101 for (i = 0; i < INTVAL (op1); i++)
2102 {
2103 temp = force_reg (mode, shifted);
2104 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2105 unsignedp, OPTAB_LIB_WIDEN);
2106 }
2107 return shifted;
2108 }
2109
2110 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2111 {
2112 enum optab_methods methods;
2113
2114 if (attempt == 0)
2115 methods = OPTAB_DIRECT;
2116 else if (attempt == 1)
2117 methods = OPTAB_WIDEN;
2118 else
2119 methods = OPTAB_LIB_WIDEN;
2120
2121 if (rotate)
2122 {
2123 /* Widening does not work for rotation. */
2124 if (methods == OPTAB_WIDEN)
2125 continue;
2126 else if (methods == OPTAB_LIB_WIDEN)
2127 {
2128 /* If we have been unable to open-code this by a rotation,
2129 do it as the IOR of two shifts. I.e., to rotate A
2130 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2131 where C is the bitsize of A.
2132
2133 It is theoretically possible that the target machine might
2134 not be able to perform either shift and hence we would
2135 be making two libcalls rather than just the one for the
2136 shift (similarly if IOR could not be done). We will allow
2137 this extremely unlikely lossage to avoid complicating the
2138 code below. */
2139
2140 rtx subtarget = target == shifted ? 0 : target;
2141 rtx new_amount, other_amount;
2142 rtx temp1;
2143
2144 new_amount = op1;
2145 if (CONST_INT_P (op1))
2146 other_amount = GEN_INT (GET_MODE_BITSIZE (mode)
2147 - INTVAL (op1));
2148 else
2149 other_amount
2150 = simplify_gen_binary (MINUS, GET_MODE (op1),
2151 GEN_INT (GET_MODE_BITSIZE (mode)),
2152 op1);
2153
2154 shifted = force_reg (mode, shifted);
2155
2156 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2157 mode, shifted, new_amount, 0, 1);
2158 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2159 mode, shifted, other_amount,
2160 subtarget, 1);
2161 return expand_binop (mode, ior_optab, temp, temp1, target,
2162 unsignedp, methods);
2163 }
2164
2165 temp = expand_binop (mode,
2166 left ? lrotate_optab : rrotate_optab,
2167 shifted, op1, target, unsignedp, methods);
2168 }
2169 else if (unsignedp)
2170 temp = expand_binop (mode,
2171 left ? lshift_optab : rshift_uns_optab,
2172 shifted, op1, target, unsignedp, methods);
2173
2174 /* Do arithmetic shifts.
2175 Also, if we are going to widen the operand, we can just as well
2176 use an arithmetic right-shift instead of a logical one. */
2177 if (temp == 0 && ! rotate
2178 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2179 {
2180 enum optab_methods methods1 = methods;
2181
2182 /* If trying to widen a log shift to an arithmetic shift,
2183 don't accept an arithmetic shift of the same size. */
2184 if (unsignedp)
2185 methods1 = OPTAB_MUST_WIDEN;
2186
2187 /* Arithmetic shift */
2188
2189 temp = expand_binop (mode,
2190 left ? lshift_optab : rshift_arith_optab,
2191 shifted, op1, target, unsignedp, methods1);
2192 }
2193
2194 /* We used to try extzv here for logical right shifts, but that was
2195 only useful for one machine, the VAX, and caused poor code
2196 generation there for lshrdi3, so the code was deleted and a
2197 define_expand for lshrsi3 was added to vax.md. */
2198 }
2199
2200 gcc_assert (temp);
2201 return temp;
2202 }
2203
2204 /* Output a shift instruction for expression code CODE,
2205 with SHIFTED being the rtx for the value to shift,
2206 and AMOUNT the amount to shift by.
2207 Store the result in the rtx TARGET, if that is convenient.
2208 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2209 Return the rtx for where the value is. */
2210
2211 rtx
2212 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2213 int amount, rtx target, int unsignedp)
2214 {
2215 return expand_shift_1 (code, mode,
2216 shifted, GEN_INT (amount), target, unsignedp);
2217 }
2218
2219 /* Output a shift instruction for expression code CODE,
2220 with SHIFTED being the rtx for the value to shift,
2221 and AMOUNT the tree for the amount to shift by.
2222 Store the result in the rtx TARGET, if that is convenient.
2223 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2224 Return the rtx for where the value is. */
2225
2226 rtx
2227 expand_variable_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2228 tree amount, rtx target, int unsignedp)
2229 {
2230 return expand_shift_1 (code, mode,
2231 shifted, expand_normal (amount), target, unsignedp);
2232 }
2233
2234 \f
2235 /* Indicates the type of fixup needed after a constant multiplication.
2236 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2237 the result should be negated, and ADD_VARIANT means that the
2238 multiplicand should be added to the result. */
2239 enum mult_variant {basic_variant, negate_variant, add_variant};
2240
2241 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2242 const struct mult_cost *, enum machine_mode mode);
2243 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2244 struct algorithm *, enum mult_variant *, int);
2245 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2246 const struct algorithm *, enum mult_variant);
2247 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2248 int, rtx *, int *, int *);
2249 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2250 static rtx extract_high_half (enum machine_mode, rtx);
2251 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2252 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2253 int, int);
2254 /* Compute and return the best algorithm for multiplying by T.
2255 The algorithm must cost less than cost_limit
2256 If retval.cost >= COST_LIMIT, no algorithm was found and all
2257 other field of the returned struct are undefined.
2258 MODE is the machine mode of the multiplication. */
2259
2260 static void
2261 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2262 const struct mult_cost *cost_limit, enum machine_mode mode)
2263 {
2264 int m;
2265 struct algorithm *alg_in, *best_alg;
2266 struct mult_cost best_cost;
2267 struct mult_cost new_limit;
2268 int op_cost, op_latency;
2269 unsigned HOST_WIDE_INT orig_t = t;
2270 unsigned HOST_WIDE_INT q;
2271 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2272 int hash_index;
2273 bool cache_hit = false;
2274 enum alg_code cache_alg = alg_zero;
2275 bool speed = optimize_insn_for_speed_p ();
2276
2277 /* Indicate that no algorithm is yet found. If no algorithm
2278 is found, this value will be returned and indicate failure. */
2279 alg_out->cost.cost = cost_limit->cost + 1;
2280 alg_out->cost.latency = cost_limit->latency + 1;
2281
2282 if (cost_limit->cost < 0
2283 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2284 return;
2285
2286 /* Restrict the bits of "t" to the multiplication's mode. */
2287 t &= GET_MODE_MASK (mode);
2288
2289 /* t == 1 can be done in zero cost. */
2290 if (t == 1)
2291 {
2292 alg_out->ops = 1;
2293 alg_out->cost.cost = 0;
2294 alg_out->cost.latency = 0;
2295 alg_out->op[0] = alg_m;
2296 return;
2297 }
2298
2299 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2300 fail now. */
2301 if (t == 0)
2302 {
2303 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2304 return;
2305 else
2306 {
2307 alg_out->ops = 1;
2308 alg_out->cost.cost = zero_cost[speed];
2309 alg_out->cost.latency = zero_cost[speed];
2310 alg_out->op[0] = alg_zero;
2311 return;
2312 }
2313 }
2314
2315 /* We'll be needing a couple extra algorithm structures now. */
2316
2317 alg_in = XALLOCA (struct algorithm);
2318 best_alg = XALLOCA (struct algorithm);
2319 best_cost = *cost_limit;
2320
2321 /* Compute the hash index. */
2322 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2323
2324 /* See if we already know what to do for T. */
2325 if (alg_hash[hash_index].t == t
2326 && alg_hash[hash_index].mode == mode
2327 && alg_hash[hash_index].mode == mode
2328 && alg_hash[hash_index].speed == speed
2329 && alg_hash[hash_index].alg != alg_unknown)
2330 {
2331 cache_alg = alg_hash[hash_index].alg;
2332
2333 if (cache_alg == alg_impossible)
2334 {
2335 /* The cache tells us that it's impossible to synthesize
2336 multiplication by T within alg_hash[hash_index].cost. */
2337 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2338 /* COST_LIMIT is at least as restrictive as the one
2339 recorded in the hash table, in which case we have no
2340 hope of synthesizing a multiplication. Just
2341 return. */
2342 return;
2343
2344 /* If we get here, COST_LIMIT is less restrictive than the
2345 one recorded in the hash table, so we may be able to
2346 synthesize a multiplication. Proceed as if we didn't
2347 have the cache entry. */
2348 }
2349 else
2350 {
2351 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2352 /* The cached algorithm shows that this multiplication
2353 requires more cost than COST_LIMIT. Just return. This
2354 way, we don't clobber this cache entry with
2355 alg_impossible but retain useful information. */
2356 return;
2357
2358 cache_hit = true;
2359
2360 switch (cache_alg)
2361 {
2362 case alg_shift:
2363 goto do_alg_shift;
2364
2365 case alg_add_t_m2:
2366 case alg_sub_t_m2:
2367 goto do_alg_addsub_t_m2;
2368
2369 case alg_add_factor:
2370 case alg_sub_factor:
2371 goto do_alg_addsub_factor;
2372
2373 case alg_add_t2_m:
2374 goto do_alg_add_t2_m;
2375
2376 case alg_sub_t2_m:
2377 goto do_alg_sub_t2_m;
2378
2379 default:
2380 gcc_unreachable ();
2381 }
2382 }
2383 }
2384
2385 /* If we have a group of zero bits at the low-order part of T, try
2386 multiplying by the remaining bits and then doing a shift. */
2387
2388 if ((t & 1) == 0)
2389 {
2390 do_alg_shift:
2391 m = floor_log2 (t & -t); /* m = number of low zero bits */
2392 if (m < maxm)
2393 {
2394 q = t >> m;
2395 /* The function expand_shift will choose between a shift and
2396 a sequence of additions, so the observed cost is given as
2397 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2398 op_cost = m * add_cost[speed][mode];
2399 if (shift_cost[speed][mode][m] < op_cost)
2400 op_cost = shift_cost[speed][mode][m];
2401 new_limit.cost = best_cost.cost - op_cost;
2402 new_limit.latency = best_cost.latency - op_cost;
2403 synth_mult (alg_in, q, &new_limit, mode);
2404
2405 alg_in->cost.cost += op_cost;
2406 alg_in->cost.latency += op_cost;
2407 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2408 {
2409 struct algorithm *x;
2410 best_cost = alg_in->cost;
2411 x = alg_in, alg_in = best_alg, best_alg = x;
2412 best_alg->log[best_alg->ops] = m;
2413 best_alg->op[best_alg->ops] = alg_shift;
2414 }
2415
2416 /* See if treating ORIG_T as a signed number yields a better
2417 sequence. Try this sequence only for a negative ORIG_T
2418 as it would be useless for a non-negative ORIG_T. */
2419 if ((HOST_WIDE_INT) orig_t < 0)
2420 {
2421 /* Shift ORIG_T as follows because a right shift of a
2422 negative-valued signed type is implementation
2423 defined. */
2424 q = ~(~orig_t >> m);
2425 /* The function expand_shift will choose between a shift
2426 and a sequence of additions, so the observed cost is
2427 given as MIN (m * add_cost[speed][mode],
2428 shift_cost[speed][mode][m]). */
2429 op_cost = m * add_cost[speed][mode];
2430 if (shift_cost[speed][mode][m] < op_cost)
2431 op_cost = shift_cost[speed][mode][m];
2432 new_limit.cost = best_cost.cost - op_cost;
2433 new_limit.latency = best_cost.latency - op_cost;
2434 synth_mult (alg_in, q, &new_limit, mode);
2435
2436 alg_in->cost.cost += op_cost;
2437 alg_in->cost.latency += op_cost;
2438 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2439 {
2440 struct algorithm *x;
2441 best_cost = alg_in->cost;
2442 x = alg_in, alg_in = best_alg, best_alg = x;
2443 best_alg->log[best_alg->ops] = m;
2444 best_alg->op[best_alg->ops] = alg_shift;
2445 }
2446 }
2447 }
2448 if (cache_hit)
2449 goto done;
2450 }
2451
2452 /* If we have an odd number, add or subtract one. */
2453 if ((t & 1) != 0)
2454 {
2455 unsigned HOST_WIDE_INT w;
2456
2457 do_alg_addsub_t_m2:
2458 for (w = 1; (w & t) != 0; w <<= 1)
2459 ;
2460 /* If T was -1, then W will be zero after the loop. This is another
2461 case where T ends with ...111. Handling this with (T + 1) and
2462 subtract 1 produces slightly better code and results in algorithm
2463 selection much faster than treating it like the ...0111 case
2464 below. */
2465 if (w == 0
2466 || (w > 2
2467 /* Reject the case where t is 3.
2468 Thus we prefer addition in that case. */
2469 && t != 3))
2470 {
2471 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2472
2473 op_cost = add_cost[speed][mode];
2474 new_limit.cost = best_cost.cost - op_cost;
2475 new_limit.latency = best_cost.latency - op_cost;
2476 synth_mult (alg_in, t + 1, &new_limit, mode);
2477
2478 alg_in->cost.cost += op_cost;
2479 alg_in->cost.latency += op_cost;
2480 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2481 {
2482 struct algorithm *x;
2483 best_cost = alg_in->cost;
2484 x = alg_in, alg_in = best_alg, best_alg = x;
2485 best_alg->log[best_alg->ops] = 0;
2486 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2487 }
2488 }
2489 else
2490 {
2491 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2492
2493 op_cost = add_cost[speed][mode];
2494 new_limit.cost = best_cost.cost - op_cost;
2495 new_limit.latency = best_cost.latency - op_cost;
2496 synth_mult (alg_in, t - 1, &new_limit, mode);
2497
2498 alg_in->cost.cost += op_cost;
2499 alg_in->cost.latency += op_cost;
2500 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2501 {
2502 struct algorithm *x;
2503 best_cost = alg_in->cost;
2504 x = alg_in, alg_in = best_alg, best_alg = x;
2505 best_alg->log[best_alg->ops] = 0;
2506 best_alg->op[best_alg->ops] = alg_add_t_m2;
2507 }
2508 }
2509
2510 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2511 quickly with a - a * n for some appropriate constant n. */
2512 m = exact_log2 (-orig_t + 1);
2513 if (m >= 0 && m < maxm)
2514 {
2515 op_cost = shiftsub1_cost[speed][mode][m];
2516 new_limit.cost = best_cost.cost - op_cost;
2517 new_limit.latency = best_cost.latency - op_cost;
2518 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2519
2520 alg_in->cost.cost += op_cost;
2521 alg_in->cost.latency += op_cost;
2522 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2523 {
2524 struct algorithm *x;
2525 best_cost = alg_in->cost;
2526 x = alg_in, alg_in = best_alg, best_alg = x;
2527 best_alg->log[best_alg->ops] = m;
2528 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2529 }
2530 }
2531
2532 if (cache_hit)
2533 goto done;
2534 }
2535
2536 /* Look for factors of t of the form
2537 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2538 If we find such a factor, we can multiply by t using an algorithm that
2539 multiplies by q, shift the result by m and add/subtract it to itself.
2540
2541 We search for large factors first and loop down, even if large factors
2542 are less probable than small; if we find a large factor we will find a
2543 good sequence quickly, and therefore be able to prune (by decreasing
2544 COST_LIMIT) the search. */
2545
2546 do_alg_addsub_factor:
2547 for (m = floor_log2 (t - 1); m >= 2; m--)
2548 {
2549 unsigned HOST_WIDE_INT d;
2550
2551 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2552 if (t % d == 0 && t > d && m < maxm
2553 && (!cache_hit || cache_alg == alg_add_factor))
2554 {
2555 /* If the target has a cheap shift-and-add instruction use
2556 that in preference to a shift insn followed by an add insn.
2557 Assume that the shift-and-add is "atomic" with a latency
2558 equal to its cost, otherwise assume that on superscalar
2559 hardware the shift may be executed concurrently with the
2560 earlier steps in the algorithm. */
2561 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2562 if (shiftadd_cost[speed][mode][m] < op_cost)
2563 {
2564 op_cost = shiftadd_cost[speed][mode][m];
2565 op_latency = op_cost;
2566 }
2567 else
2568 op_latency = add_cost[speed][mode];
2569
2570 new_limit.cost = best_cost.cost - op_cost;
2571 new_limit.latency = best_cost.latency - op_latency;
2572 synth_mult (alg_in, t / d, &new_limit, mode);
2573
2574 alg_in->cost.cost += op_cost;
2575 alg_in->cost.latency += op_latency;
2576 if (alg_in->cost.latency < op_cost)
2577 alg_in->cost.latency = op_cost;
2578 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2579 {
2580 struct algorithm *x;
2581 best_cost = alg_in->cost;
2582 x = alg_in, alg_in = best_alg, best_alg = x;
2583 best_alg->log[best_alg->ops] = m;
2584 best_alg->op[best_alg->ops] = alg_add_factor;
2585 }
2586 /* Other factors will have been taken care of in the recursion. */
2587 break;
2588 }
2589
2590 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2591 if (t % d == 0 && t > d && m < maxm
2592 && (!cache_hit || cache_alg == alg_sub_factor))
2593 {
2594 /* If the target has a cheap shift-and-subtract insn use
2595 that in preference to a shift insn followed by a sub insn.
2596 Assume that the shift-and-sub is "atomic" with a latency
2597 equal to it's cost, otherwise assume that on superscalar
2598 hardware the shift may be executed concurrently with the
2599 earlier steps in the algorithm. */
2600 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2601 if (shiftsub0_cost[speed][mode][m] < op_cost)
2602 {
2603 op_cost = shiftsub0_cost[speed][mode][m];
2604 op_latency = op_cost;
2605 }
2606 else
2607 op_latency = add_cost[speed][mode];
2608
2609 new_limit.cost = best_cost.cost - op_cost;
2610 new_limit.latency = best_cost.latency - op_latency;
2611 synth_mult (alg_in, t / d, &new_limit, mode);
2612
2613 alg_in->cost.cost += op_cost;
2614 alg_in->cost.latency += op_latency;
2615 if (alg_in->cost.latency < op_cost)
2616 alg_in->cost.latency = op_cost;
2617 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2618 {
2619 struct algorithm *x;
2620 best_cost = alg_in->cost;
2621 x = alg_in, alg_in = best_alg, best_alg = x;
2622 best_alg->log[best_alg->ops] = m;
2623 best_alg->op[best_alg->ops] = alg_sub_factor;
2624 }
2625 break;
2626 }
2627 }
2628 if (cache_hit)
2629 goto done;
2630
2631 /* Try shift-and-add (load effective address) instructions,
2632 i.e. do a*3, a*5, a*9. */
2633 if ((t & 1) != 0)
2634 {
2635 do_alg_add_t2_m:
2636 q = t - 1;
2637 q = q & -q;
2638 m = exact_log2 (q);
2639 if (m >= 0 && m < maxm)
2640 {
2641 op_cost = shiftadd_cost[speed][mode][m];
2642 new_limit.cost = best_cost.cost - op_cost;
2643 new_limit.latency = best_cost.latency - op_cost;
2644 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2645
2646 alg_in->cost.cost += op_cost;
2647 alg_in->cost.latency += op_cost;
2648 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2649 {
2650 struct algorithm *x;
2651 best_cost = alg_in->cost;
2652 x = alg_in, alg_in = best_alg, best_alg = x;
2653 best_alg->log[best_alg->ops] = m;
2654 best_alg->op[best_alg->ops] = alg_add_t2_m;
2655 }
2656 }
2657 if (cache_hit)
2658 goto done;
2659
2660 do_alg_sub_t2_m:
2661 q = t + 1;
2662 q = q & -q;
2663 m = exact_log2 (q);
2664 if (m >= 0 && m < maxm)
2665 {
2666 op_cost = shiftsub0_cost[speed][mode][m];
2667 new_limit.cost = best_cost.cost - op_cost;
2668 new_limit.latency = best_cost.latency - op_cost;
2669 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2670
2671 alg_in->cost.cost += op_cost;
2672 alg_in->cost.latency += op_cost;
2673 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2674 {
2675 struct algorithm *x;
2676 best_cost = alg_in->cost;
2677 x = alg_in, alg_in = best_alg, best_alg = x;
2678 best_alg->log[best_alg->ops] = m;
2679 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2680 }
2681 }
2682 if (cache_hit)
2683 goto done;
2684 }
2685
2686 done:
2687 /* If best_cost has not decreased, we have not found any algorithm. */
2688 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2689 {
2690 /* We failed to find an algorithm. Record alg_impossible for
2691 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2692 we are asked to find an algorithm for T within the same or
2693 lower COST_LIMIT, we can immediately return to the
2694 caller. */
2695 alg_hash[hash_index].t = t;
2696 alg_hash[hash_index].mode = mode;
2697 alg_hash[hash_index].speed = speed;
2698 alg_hash[hash_index].alg = alg_impossible;
2699 alg_hash[hash_index].cost = *cost_limit;
2700 return;
2701 }
2702
2703 /* Cache the result. */
2704 if (!cache_hit)
2705 {
2706 alg_hash[hash_index].t = t;
2707 alg_hash[hash_index].mode = mode;
2708 alg_hash[hash_index].speed = speed;
2709 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2710 alg_hash[hash_index].cost.cost = best_cost.cost;
2711 alg_hash[hash_index].cost.latency = best_cost.latency;
2712 }
2713
2714 /* If we are getting a too long sequence for `struct algorithm'
2715 to record, make this search fail. */
2716 if (best_alg->ops == MAX_BITS_PER_WORD)
2717 return;
2718
2719 /* Copy the algorithm from temporary space to the space at alg_out.
2720 We avoid using structure assignment because the majority of
2721 best_alg is normally undefined, and this is a critical function. */
2722 alg_out->ops = best_alg->ops + 1;
2723 alg_out->cost = best_cost;
2724 memcpy (alg_out->op, best_alg->op,
2725 alg_out->ops * sizeof *alg_out->op);
2726 memcpy (alg_out->log, best_alg->log,
2727 alg_out->ops * sizeof *alg_out->log);
2728 }
2729 \f
2730 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2731 Try three variations:
2732
2733 - a shift/add sequence based on VAL itself
2734 - a shift/add sequence based on -VAL, followed by a negation
2735 - a shift/add sequence based on VAL - 1, followed by an addition.
2736
2737 Return true if the cheapest of these cost less than MULT_COST,
2738 describing the algorithm in *ALG and final fixup in *VARIANT. */
2739
2740 static bool
2741 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2742 struct algorithm *alg, enum mult_variant *variant,
2743 int mult_cost)
2744 {
2745 struct algorithm alg2;
2746 struct mult_cost limit;
2747 int op_cost;
2748 bool speed = optimize_insn_for_speed_p ();
2749
2750 /* Fail quickly for impossible bounds. */
2751 if (mult_cost < 0)
2752 return false;
2753
2754 /* Ensure that mult_cost provides a reasonable upper bound.
2755 Any constant multiplication can be performed with less
2756 than 2 * bits additions. */
2757 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2758 if (mult_cost > op_cost)
2759 mult_cost = op_cost;
2760
2761 *variant = basic_variant;
2762 limit.cost = mult_cost;
2763 limit.latency = mult_cost;
2764 synth_mult (alg, val, &limit, mode);
2765
2766 /* This works only if the inverted value actually fits in an
2767 `unsigned int' */
2768 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2769 {
2770 op_cost = neg_cost[speed][mode];
2771 if (MULT_COST_LESS (&alg->cost, mult_cost))
2772 {
2773 limit.cost = alg->cost.cost - op_cost;
2774 limit.latency = alg->cost.latency - op_cost;
2775 }
2776 else
2777 {
2778 limit.cost = mult_cost - op_cost;
2779 limit.latency = mult_cost - op_cost;
2780 }
2781
2782 synth_mult (&alg2, -val, &limit, mode);
2783 alg2.cost.cost += op_cost;
2784 alg2.cost.latency += op_cost;
2785 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2786 *alg = alg2, *variant = negate_variant;
2787 }
2788
2789 /* This proves very useful for division-by-constant. */
2790 op_cost = add_cost[speed][mode];
2791 if (MULT_COST_LESS (&alg->cost, mult_cost))
2792 {
2793 limit.cost = alg->cost.cost - op_cost;
2794 limit.latency = alg->cost.latency - op_cost;
2795 }
2796 else
2797 {
2798 limit.cost = mult_cost - op_cost;
2799 limit.latency = mult_cost - op_cost;
2800 }
2801
2802 synth_mult (&alg2, val - 1, &limit, mode);
2803 alg2.cost.cost += op_cost;
2804 alg2.cost.latency += op_cost;
2805 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2806 *alg = alg2, *variant = add_variant;
2807
2808 return MULT_COST_LESS (&alg->cost, mult_cost);
2809 }
2810
2811 /* A subroutine of expand_mult, used for constant multiplications.
2812 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2813 convenient. Use the shift/add sequence described by ALG and apply
2814 the final fixup specified by VARIANT. */
2815
2816 static rtx
2817 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2818 rtx target, const struct algorithm *alg,
2819 enum mult_variant variant)
2820 {
2821 HOST_WIDE_INT val_so_far;
2822 rtx insn, accum, tem;
2823 int opno;
2824 enum machine_mode nmode;
2825
2826 /* Avoid referencing memory over and over and invalid sharing
2827 on SUBREGs. */
2828 op0 = force_reg (mode, op0);
2829
2830 /* ACCUM starts out either as OP0 or as a zero, depending on
2831 the first operation. */
2832
2833 if (alg->op[0] == alg_zero)
2834 {
2835 accum = copy_to_mode_reg (mode, const0_rtx);
2836 val_so_far = 0;
2837 }
2838 else if (alg->op[0] == alg_m)
2839 {
2840 accum = copy_to_mode_reg (mode, op0);
2841 val_so_far = 1;
2842 }
2843 else
2844 gcc_unreachable ();
2845
2846 for (opno = 1; opno < alg->ops; opno++)
2847 {
2848 int log = alg->log[opno];
2849 rtx shift_subtarget = optimize ? 0 : accum;
2850 rtx add_target
2851 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2852 && !optimize)
2853 ? target : 0;
2854 rtx accum_target = optimize ? 0 : accum;
2855
2856 switch (alg->op[opno])
2857 {
2858 case alg_shift:
2859 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2860 /* REG_EQUAL note will be attached to the following insn. */
2861 emit_move_insn (accum, tem);
2862 val_so_far <<= log;
2863 break;
2864
2865 case alg_add_t_m2:
2866 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2867 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2868 add_target ? add_target : accum_target);
2869 val_so_far += (HOST_WIDE_INT) 1 << log;
2870 break;
2871
2872 case alg_sub_t_m2:
2873 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2874 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2875 add_target ? add_target : accum_target);
2876 val_so_far -= (HOST_WIDE_INT) 1 << log;
2877 break;
2878
2879 case alg_add_t2_m:
2880 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2881 log, shift_subtarget, 0);
2882 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2883 add_target ? add_target : accum_target);
2884 val_so_far = (val_so_far << log) + 1;
2885 break;
2886
2887 case alg_sub_t2_m:
2888 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2889 log, shift_subtarget, 0);
2890 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2891 add_target ? add_target : accum_target);
2892 val_so_far = (val_so_far << log) - 1;
2893 break;
2894
2895 case alg_add_factor:
2896 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2897 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2898 add_target ? add_target : accum_target);
2899 val_so_far += val_so_far << log;
2900 break;
2901
2902 case alg_sub_factor:
2903 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2904 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2905 (add_target
2906 ? add_target : (optimize ? 0 : tem)));
2907 val_so_far = (val_so_far << log) - val_so_far;
2908 break;
2909
2910 default:
2911 gcc_unreachable ();
2912 }
2913
2914 /* Write a REG_EQUAL note on the last insn so that we can cse
2915 multiplication sequences. Note that if ACCUM is a SUBREG,
2916 we've set the inner register and must properly indicate
2917 that. */
2918
2919 tem = op0, nmode = mode;
2920 if (GET_CODE (accum) == SUBREG)
2921 {
2922 nmode = GET_MODE (SUBREG_REG (accum));
2923 tem = gen_lowpart (nmode, op0);
2924 }
2925
2926 insn = get_last_insn ();
2927 set_unique_reg_note (insn, REG_EQUAL,
2928 gen_rtx_MULT (nmode, tem,
2929 GEN_INT (val_so_far)));
2930 }
2931
2932 if (variant == negate_variant)
2933 {
2934 val_so_far = -val_so_far;
2935 accum = expand_unop (mode, neg_optab, accum, target, 0);
2936 }
2937 else if (variant == add_variant)
2938 {
2939 val_so_far = val_so_far + 1;
2940 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2941 }
2942
2943 /* Compare only the bits of val and val_so_far that are significant
2944 in the result mode, to avoid sign-/zero-extension confusion. */
2945 val &= GET_MODE_MASK (mode);
2946 val_so_far &= GET_MODE_MASK (mode);
2947 gcc_assert (val == val_so_far);
2948
2949 return accum;
2950 }
2951
2952 /* Perform a multiplication and return an rtx for the result.
2953 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2954 TARGET is a suggestion for where to store the result (an rtx).
2955
2956 We check specially for a constant integer as OP1.
2957 If you want this check for OP0 as well, then before calling
2958 you should swap the two operands if OP0 would be constant. */
2959
2960 rtx
2961 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2962 int unsignedp)
2963 {
2964 enum mult_variant variant;
2965 struct algorithm algorithm;
2966 int max_cost;
2967 bool speed = optimize_insn_for_speed_p ();
2968
2969 /* Handling const0_rtx here allows us to use zero as a rogue value for
2970 coeff below. */
2971 if (op1 == const0_rtx)
2972 return const0_rtx;
2973 if (op1 == const1_rtx)
2974 return op0;
2975 if (op1 == constm1_rtx)
2976 return expand_unop (mode,
2977 GET_MODE_CLASS (mode) == MODE_INT
2978 && !unsignedp && flag_trapv
2979 ? negv_optab : neg_optab,
2980 op0, target, 0);
2981
2982 /* These are the operations that are potentially turned into a sequence
2983 of shifts and additions. */
2984 if (SCALAR_INT_MODE_P (mode)
2985 && (unsignedp || !flag_trapv))
2986 {
2987 HOST_WIDE_INT coeff = 0;
2988 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
2989
2990 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2991 less than or equal in size to `unsigned int' this doesn't matter.
2992 If the mode is larger than `unsigned int', then synth_mult works
2993 only if the constant value exactly fits in an `unsigned int' without
2994 any truncation. This means that multiplying by negative values does
2995 not work; results are off by 2^32 on a 32 bit machine. */
2996
2997 if (CONST_INT_P (op1))
2998 {
2999 /* Attempt to handle multiplication of DImode values by negative
3000 coefficients, by performing the multiplication by a positive
3001 multiplier and then inverting the result. */
3002 if (INTVAL (op1) < 0
3003 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3004 {
3005 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3006 result is interpreted as an unsigned coefficient.
3007 Exclude cost of op0 from max_cost to match the cost
3008 calculation of the synth_mult. */
3009 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3010 - neg_cost[speed][mode];
3011 if (max_cost > 0
3012 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3013 &variant, max_cost))
3014 {
3015 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3016 NULL_RTX, &algorithm,
3017 variant);
3018 return expand_unop (mode, neg_optab, temp, target, 0);
3019 }
3020 }
3021 else coeff = INTVAL (op1);
3022 }
3023 else if (GET_CODE (op1) == CONST_DOUBLE)
3024 {
3025 /* If we are multiplying in DImode, it may still be a win
3026 to try to work with shifts and adds. */
3027 if (CONST_DOUBLE_HIGH (op1) == 0
3028 && CONST_DOUBLE_LOW (op1) > 0)
3029 coeff = CONST_DOUBLE_LOW (op1);
3030 else if (CONST_DOUBLE_LOW (op1) == 0
3031 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3032 {
3033 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3034 + HOST_BITS_PER_WIDE_INT;
3035 return expand_shift (LSHIFT_EXPR, mode, op0,
3036 shift, target, unsignedp);
3037 }
3038 }
3039
3040 /* We used to test optimize here, on the grounds that it's better to
3041 produce a smaller program when -O is not used. But this causes
3042 such a terrible slowdown sometimes that it seems better to always
3043 use synth_mult. */
3044 if (coeff != 0)
3045 {
3046 /* Special case powers of two. */
3047 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3048 return expand_shift (LSHIFT_EXPR, mode, op0,
3049 floor_log2 (coeff), target, unsignedp);
3050
3051 /* Exclude cost of op0 from max_cost to match the cost
3052 calculation of the synth_mult. */
3053 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3054 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3055 max_cost))
3056 return expand_mult_const (mode, op0, coeff, target,
3057 &algorithm, variant);
3058 }
3059 }
3060
3061 if (GET_CODE (op0) == CONST_DOUBLE)
3062 {
3063 rtx temp = op0;
3064 op0 = op1;
3065 op1 = temp;
3066 }
3067
3068 /* Expand x*2.0 as x+x. */
3069 if (GET_CODE (op1) == CONST_DOUBLE
3070 && SCALAR_FLOAT_MODE_P (mode))
3071 {
3072 REAL_VALUE_TYPE d;
3073 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3074
3075 if (REAL_VALUES_EQUAL (d, dconst2))
3076 {
3077 op0 = force_reg (GET_MODE (op0), op0);
3078 return expand_binop (mode, add_optab, op0, op0,
3079 target, unsignedp, OPTAB_LIB_WIDEN);
3080 }
3081 }
3082
3083 /* This used to use umul_optab if unsigned, but for non-widening multiply
3084 there is no difference between signed and unsigned. */
3085 op0 = expand_binop (mode,
3086 ! unsignedp
3087 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3088 ? smulv_optab : smul_optab,
3089 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3090 gcc_assert (op0);
3091 return op0;
3092 }
3093
3094 /* Perform a widening multiplication and return an rtx for the result.
3095 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3096 TARGET is a suggestion for where to store the result (an rtx).
3097 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3098 or smul_widen_optab.
3099
3100 We check specially for a constant integer as OP1, comparing the
3101 cost of a widening multiply against the cost of a sequence of shifts
3102 and adds. */
3103
3104 rtx
3105 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3106 int unsignedp, optab this_optab)
3107 {
3108 bool speed = optimize_insn_for_speed_p ();
3109 rtx cop1;
3110
3111 if (CONST_INT_P (op1)
3112 && GET_MODE (op0) != VOIDmode
3113 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3114 this_optab == umul_widen_optab))
3115 && CONST_INT_P (cop1)
3116 && (INTVAL (cop1) >= 0
3117 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT))
3118 {
3119 HOST_WIDE_INT coeff = INTVAL (cop1);
3120 int max_cost;
3121 enum mult_variant variant;
3122 struct algorithm algorithm;
3123
3124 /* Special case powers of two. */
3125 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3126 {
3127 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3128 return expand_shift (LSHIFT_EXPR, mode, op0,
3129 floor_log2 (coeff), target, unsignedp);
3130 }
3131
3132 /* Exclude cost of op0 from max_cost to match the cost
3133 calculation of the synth_mult. */
3134 max_cost = mul_widen_cost[speed][mode];
3135 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3136 max_cost))
3137 {
3138 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3139 return expand_mult_const (mode, op0, coeff, target,
3140 &algorithm, variant);
3141 }
3142 }
3143 return expand_binop (mode, this_optab, op0, op1, target,
3144 unsignedp, OPTAB_LIB_WIDEN);
3145 }
3146 \f
3147 /* Return the smallest n such that 2**n >= X. */
3148
3149 int
3150 ceil_log2 (unsigned HOST_WIDE_INT x)
3151 {
3152 return floor_log2 (x - 1) + 1;
3153 }
3154
3155 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3156 replace division by D, and put the least significant N bits of the result
3157 in *MULTIPLIER_PTR and return the most significant bit.
3158
3159 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3160 needed precision is in PRECISION (should be <= N).
3161
3162 PRECISION should be as small as possible so this function can choose
3163 multiplier more freely.
3164
3165 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3166 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3167
3168 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3169 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3170
3171 static
3172 unsigned HOST_WIDE_INT
3173 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3174 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3175 {
3176 HOST_WIDE_INT mhigh_hi, mlow_hi;
3177 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3178 int lgup, post_shift;
3179 int pow, pow2;
3180 unsigned HOST_WIDE_INT nl, dummy1;
3181 HOST_WIDE_INT nh, dummy2;
3182
3183 /* lgup = ceil(log2(divisor)); */
3184 lgup = ceil_log2 (d);
3185
3186 gcc_assert (lgup <= n);
3187
3188 pow = n + lgup;
3189 pow2 = n + lgup - precision;
3190
3191 /* We could handle this with some effort, but this case is much
3192 better handled directly with a scc insn, so rely on caller using
3193 that. */
3194 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3195
3196 /* mlow = 2^(N + lgup)/d */
3197 if (pow >= HOST_BITS_PER_WIDE_INT)
3198 {
3199 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3200 nl = 0;
3201 }
3202 else
3203 {
3204 nh = 0;
3205 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3206 }
3207 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3208 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3209
3210 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3211 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3212 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3213 else
3214 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3215 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3216 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3217
3218 gcc_assert (!mhigh_hi || nh - d < d);
3219 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3220 /* Assert that mlow < mhigh. */
3221 gcc_assert (mlow_hi < mhigh_hi
3222 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3223
3224 /* If precision == N, then mlow, mhigh exceed 2^N
3225 (but they do not exceed 2^(N+1)). */
3226
3227 /* Reduce to lowest terms. */
3228 for (post_shift = lgup; post_shift > 0; post_shift--)
3229 {
3230 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3231 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3232 if (ml_lo >= mh_lo)
3233 break;
3234
3235 mlow_hi = 0;
3236 mlow_lo = ml_lo;
3237 mhigh_hi = 0;
3238 mhigh_lo = mh_lo;
3239 }
3240
3241 *post_shift_ptr = post_shift;
3242 *lgup_ptr = lgup;
3243 if (n < HOST_BITS_PER_WIDE_INT)
3244 {
3245 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3246 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3247 return mhigh_lo >= mask;
3248 }
3249 else
3250 {
3251 *multiplier_ptr = GEN_INT (mhigh_lo);
3252 return mhigh_hi;
3253 }
3254 }
3255
3256 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3257 congruent to 1 (mod 2**N). */
3258
3259 static unsigned HOST_WIDE_INT
3260 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3261 {
3262 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3263
3264 /* The algorithm notes that the choice y = x satisfies
3265 x*y == 1 mod 2^3, since x is assumed odd.
3266 Each iteration doubles the number of bits of significance in y. */
3267
3268 unsigned HOST_WIDE_INT mask;
3269 unsigned HOST_WIDE_INT y = x;
3270 int nbit = 3;
3271
3272 mask = (n == HOST_BITS_PER_WIDE_INT
3273 ? ~(unsigned HOST_WIDE_INT) 0
3274 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3275
3276 while (nbit < n)
3277 {
3278 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3279 nbit *= 2;
3280 }
3281 return y;
3282 }
3283
3284 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3285 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3286 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3287 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3288 become signed.
3289
3290 The result is put in TARGET if that is convenient.
3291
3292 MODE is the mode of operation. */
3293
3294 rtx
3295 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3296 rtx op1, rtx target, int unsignedp)
3297 {
3298 rtx tem;
3299 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3300
3301 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3302 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3303 tem = expand_and (mode, tem, op1, NULL_RTX);
3304 adj_operand
3305 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3306 adj_operand);
3307
3308 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3309 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3310 tem = expand_and (mode, tem, op0, NULL_RTX);
3311 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3312 target);
3313
3314 return target;
3315 }
3316
3317 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3318
3319 static rtx
3320 extract_high_half (enum machine_mode mode, rtx op)
3321 {
3322 enum machine_mode wider_mode;
3323
3324 if (mode == word_mode)
3325 return gen_highpart (mode, op);
3326
3327 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3328
3329 wider_mode = GET_MODE_WIDER_MODE (mode);
3330 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3331 GET_MODE_BITSIZE (mode), 0, 1);
3332 return convert_modes (mode, wider_mode, op, 0);
3333 }
3334
3335 /* Like expand_mult_highpart, but only consider using a multiplication
3336 optab. OP1 is an rtx for the constant operand. */
3337
3338 static rtx
3339 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3340 rtx target, int unsignedp, int max_cost)
3341 {
3342 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3343 enum machine_mode wider_mode;
3344 optab moptab;
3345 rtx tem;
3346 int size;
3347 bool speed = optimize_insn_for_speed_p ();
3348
3349 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3350
3351 wider_mode = GET_MODE_WIDER_MODE (mode);
3352 size = GET_MODE_BITSIZE (mode);
3353
3354 /* Firstly, try using a multiplication insn that only generates the needed
3355 high part of the product, and in the sign flavor of unsignedp. */
3356 if (mul_highpart_cost[speed][mode] < max_cost)
3357 {
3358 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3359 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3360 unsignedp, OPTAB_DIRECT);
3361 if (tem)
3362 return tem;
3363 }
3364
3365 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3366 Need to adjust the result after the multiplication. */
3367 if (size - 1 < BITS_PER_WORD
3368 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3369 + 4 * add_cost[speed][mode] < max_cost))
3370 {
3371 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3372 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3373 unsignedp, OPTAB_DIRECT);
3374 if (tem)
3375 /* We used the wrong signedness. Adjust the result. */
3376 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3377 tem, unsignedp);
3378 }
3379
3380 /* Try widening multiplication. */
3381 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3382 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3383 && mul_widen_cost[speed][wider_mode] < max_cost)
3384 {
3385 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3386 unsignedp, OPTAB_WIDEN);
3387 if (tem)
3388 return extract_high_half (mode, tem);
3389 }
3390
3391 /* Try widening the mode and perform a non-widening multiplication. */
3392 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3393 && size - 1 < BITS_PER_WORD
3394 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3395 {
3396 rtx insns, wop0, wop1;
3397
3398 /* We need to widen the operands, for example to ensure the
3399 constant multiplier is correctly sign or zero extended.
3400 Use a sequence to clean-up any instructions emitted by
3401 the conversions if things don't work out. */
3402 start_sequence ();
3403 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3404 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3405 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3406 unsignedp, OPTAB_WIDEN);
3407 insns = get_insns ();
3408 end_sequence ();
3409
3410 if (tem)
3411 {
3412 emit_insn (insns);
3413 return extract_high_half (mode, tem);
3414 }
3415 }
3416
3417 /* Try widening multiplication of opposite signedness, and adjust. */
3418 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3419 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3420 && size - 1 < BITS_PER_WORD
3421 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3422 + 4 * add_cost[speed][mode] < max_cost))
3423 {
3424 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3425 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3426 if (tem != 0)
3427 {
3428 tem = extract_high_half (mode, tem);
3429 /* We used the wrong signedness. Adjust the result. */
3430 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3431 target, unsignedp);
3432 }
3433 }
3434
3435 return 0;
3436 }
3437
3438 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3439 putting the high half of the result in TARGET if that is convenient,
3440 and return where the result is. If the operation can not be performed,
3441 0 is returned.
3442
3443 MODE is the mode of operation and result.
3444
3445 UNSIGNEDP nonzero means unsigned multiply.
3446
3447 MAX_COST is the total allowed cost for the expanded RTL. */
3448
3449 static rtx
3450 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3451 rtx target, int unsignedp, int max_cost)
3452 {
3453 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3454 unsigned HOST_WIDE_INT cnst1;
3455 int extra_cost;
3456 bool sign_adjust = false;
3457 enum mult_variant variant;
3458 struct algorithm alg;
3459 rtx tem;
3460 bool speed = optimize_insn_for_speed_p ();
3461
3462 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3463 /* We can't support modes wider than HOST_BITS_PER_INT. */
3464 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3465
3466 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3467
3468 /* We can't optimize modes wider than BITS_PER_WORD.
3469 ??? We might be able to perform double-word arithmetic if
3470 mode == word_mode, however all the cost calculations in
3471 synth_mult etc. assume single-word operations. */
3472 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3473 return expand_mult_highpart_optab (mode, op0, op1, target,
3474 unsignedp, max_cost);
3475
3476 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3477
3478 /* Check whether we try to multiply by a negative constant. */
3479 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3480 {
3481 sign_adjust = true;
3482 extra_cost += add_cost[speed][mode];
3483 }
3484
3485 /* See whether shift/add multiplication is cheap enough. */
3486 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3487 max_cost - extra_cost))
3488 {
3489 /* See whether the specialized multiplication optabs are
3490 cheaper than the shift/add version. */
3491 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3492 alg.cost.cost + extra_cost);
3493 if (tem)
3494 return tem;
3495
3496 tem = convert_to_mode (wider_mode, op0, unsignedp);
3497 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3498 tem = extract_high_half (mode, tem);
3499
3500 /* Adjust result for signedness. */
3501 if (sign_adjust)
3502 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3503
3504 return tem;
3505 }
3506 return expand_mult_highpart_optab (mode, op0, op1, target,
3507 unsignedp, max_cost);
3508 }
3509
3510
3511 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3512
3513 static rtx
3514 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3515 {
3516 unsigned HOST_WIDE_INT masklow, maskhigh;
3517 rtx result, temp, shift, label;
3518 int logd;
3519
3520 logd = floor_log2 (d);
3521 result = gen_reg_rtx (mode);
3522
3523 /* Avoid conditional branches when they're expensive. */
3524 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3525 && optimize_insn_for_speed_p ())
3526 {
3527 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3528 mode, 0, -1);
3529 if (signmask)
3530 {
3531 signmask = force_reg (mode, signmask);
3532 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3533 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3534
3535 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3536 which instruction sequence to use. If logical right shifts
3537 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3538 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3539
3540 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3541 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3542 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3543 {
3544 temp = expand_binop (mode, xor_optab, op0, signmask,
3545 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3546 temp = expand_binop (mode, sub_optab, temp, signmask,
3547 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3548 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3549 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3550 temp = expand_binop (mode, xor_optab, temp, signmask,
3551 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3552 temp = expand_binop (mode, sub_optab, temp, signmask,
3553 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3554 }
3555 else
3556 {
3557 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3558 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3559 signmask = force_reg (mode, signmask);
3560
3561 temp = expand_binop (mode, add_optab, op0, signmask,
3562 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3563 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3564 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3565 temp = expand_binop (mode, sub_optab, temp, signmask,
3566 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3567 }
3568 return temp;
3569 }
3570 }
3571
3572 /* Mask contains the mode's signbit and the significant bits of the
3573 modulus. By including the signbit in the operation, many targets
3574 can avoid an explicit compare operation in the following comparison
3575 against zero. */
3576
3577 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3578 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3579 {
3580 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3581 maskhigh = -1;
3582 }
3583 else
3584 maskhigh = (HOST_WIDE_INT) -1
3585 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3586
3587 temp = expand_binop (mode, and_optab, op0,
3588 immed_double_const (masklow, maskhigh, mode),
3589 result, 1, OPTAB_LIB_WIDEN);
3590 if (temp != result)
3591 emit_move_insn (result, temp);
3592
3593 label = gen_label_rtx ();
3594 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3595
3596 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3597 0, OPTAB_LIB_WIDEN);
3598 masklow = (HOST_WIDE_INT) -1 << logd;
3599 maskhigh = -1;
3600 temp = expand_binop (mode, ior_optab, temp,
3601 immed_double_const (masklow, maskhigh, mode),
3602 result, 1, OPTAB_LIB_WIDEN);
3603 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3604 0, OPTAB_LIB_WIDEN);
3605 if (temp != result)
3606 emit_move_insn (result, temp);
3607 emit_label (label);
3608 return result;
3609 }
3610
3611 /* Expand signed division of OP0 by a power of two D in mode MODE.
3612 This routine is only called for positive values of D. */
3613
3614 static rtx
3615 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3616 {
3617 rtx temp, label;
3618 int logd;
3619
3620 logd = floor_log2 (d);
3621
3622 if (d == 2
3623 && BRANCH_COST (optimize_insn_for_speed_p (),
3624 false) >= 1)
3625 {
3626 temp = gen_reg_rtx (mode);
3627 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3628 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3629 0, OPTAB_LIB_WIDEN);
3630 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3631 }
3632
3633 #ifdef HAVE_conditional_move
3634 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3635 >= 2)
3636 {
3637 rtx temp2;
3638
3639 /* ??? emit_conditional_move forces a stack adjustment via
3640 compare_from_rtx so, if the sequence is discarded, it will
3641 be lost. Do it now instead. */
3642 do_pending_stack_adjust ();
3643
3644 start_sequence ();
3645 temp2 = copy_to_mode_reg (mode, op0);
3646 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3647 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3648 temp = force_reg (mode, temp);
3649
3650 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3651 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3652 mode, temp, temp2, mode, 0);
3653 if (temp2)
3654 {
3655 rtx seq = get_insns ();
3656 end_sequence ();
3657 emit_insn (seq);
3658 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
3659 }
3660 end_sequence ();
3661 }
3662 #endif
3663
3664 if (BRANCH_COST (optimize_insn_for_speed_p (),
3665 false) >= 2)
3666 {
3667 int ushift = GET_MODE_BITSIZE (mode) - logd;
3668
3669 temp = gen_reg_rtx (mode);
3670 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3671 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3672 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3673 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3674 else
3675 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3676 ushift, NULL_RTX, 1);
3677 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3678 0, OPTAB_LIB_WIDEN);
3679 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3680 }
3681
3682 label = gen_label_rtx ();
3683 temp = copy_to_mode_reg (mode, op0);
3684 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3685 expand_inc (temp, GEN_INT (d - 1));
3686 emit_label (label);
3687 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3688 }
3689 \f
3690 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3691 if that is convenient, and returning where the result is.
3692 You may request either the quotient or the remainder as the result;
3693 specify REM_FLAG nonzero to get the remainder.
3694
3695 CODE is the expression code for which kind of division this is;
3696 it controls how rounding is done. MODE is the machine mode to use.
3697 UNSIGNEDP nonzero means do unsigned division. */
3698
3699 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3700 and then correct it by or'ing in missing high bits
3701 if result of ANDI is nonzero.
3702 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3703 This could optimize to a bfexts instruction.
3704 But C doesn't use these operations, so their optimizations are
3705 left for later. */
3706 /* ??? For modulo, we don't actually need the highpart of the first product,
3707 the low part will do nicely. And for small divisors, the second multiply
3708 can also be a low-part only multiply or even be completely left out.
3709 E.g. to calculate the remainder of a division by 3 with a 32 bit
3710 multiply, multiply with 0x55555556 and extract the upper two bits;
3711 the result is exact for inputs up to 0x1fffffff.
3712 The input range can be reduced by using cross-sum rules.
3713 For odd divisors >= 3, the following table gives right shift counts
3714 so that if a number is shifted by an integer multiple of the given
3715 amount, the remainder stays the same:
3716 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3717 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3718 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3719 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3720 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3721
3722 Cross-sum rules for even numbers can be derived by leaving as many bits
3723 to the right alone as the divisor has zeros to the right.
3724 E.g. if x is an unsigned 32 bit number:
3725 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3726 */
3727
3728 rtx
3729 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3730 rtx op0, rtx op1, rtx target, int unsignedp)
3731 {
3732 enum machine_mode compute_mode;
3733 rtx tquotient;
3734 rtx quotient = 0, remainder = 0;
3735 rtx last;
3736 int size;
3737 rtx insn, set;
3738 optab optab1, optab2;
3739 int op1_is_constant, op1_is_pow2 = 0;
3740 int max_cost, extra_cost;
3741 static HOST_WIDE_INT last_div_const = 0;
3742 static HOST_WIDE_INT ext_op1;
3743 bool speed = optimize_insn_for_speed_p ();
3744
3745 op1_is_constant = CONST_INT_P (op1);
3746 if (op1_is_constant)
3747 {
3748 ext_op1 = INTVAL (op1);
3749 if (unsignedp)
3750 ext_op1 &= GET_MODE_MASK (mode);
3751 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3752 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3753 }
3754
3755 /*
3756 This is the structure of expand_divmod:
3757
3758 First comes code to fix up the operands so we can perform the operations
3759 correctly and efficiently.
3760
3761 Second comes a switch statement with code specific for each rounding mode.
3762 For some special operands this code emits all RTL for the desired
3763 operation, for other cases, it generates only a quotient and stores it in
3764 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3765 to indicate that it has not done anything.
3766
3767 Last comes code that finishes the operation. If QUOTIENT is set and
3768 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3769 QUOTIENT is not set, it is computed using trunc rounding.
3770
3771 We try to generate special code for division and remainder when OP1 is a
3772 constant. If |OP1| = 2**n we can use shifts and some other fast
3773 operations. For other values of OP1, we compute a carefully selected
3774 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3775 by m.
3776
3777 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3778 half of the product. Different strategies for generating the product are
3779 implemented in expand_mult_highpart.
3780
3781 If what we actually want is the remainder, we generate that by another
3782 by-constant multiplication and a subtraction. */
3783
3784 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3785 code below will malfunction if we are, so check here and handle
3786 the special case if so. */
3787 if (op1 == const1_rtx)
3788 return rem_flag ? const0_rtx : op0;
3789
3790 /* When dividing by -1, we could get an overflow.
3791 negv_optab can handle overflows. */
3792 if (! unsignedp && op1 == constm1_rtx)
3793 {
3794 if (rem_flag)
3795 return const0_rtx;
3796 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3797 ? negv_optab : neg_optab, op0, target, 0);
3798 }
3799
3800 if (target
3801 /* Don't use the function value register as a target
3802 since we have to read it as well as write it,
3803 and function-inlining gets confused by this. */
3804 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3805 /* Don't clobber an operand while doing a multi-step calculation. */
3806 || ((rem_flag || op1_is_constant)
3807 && (reg_mentioned_p (target, op0)
3808 || (MEM_P (op0) && MEM_P (target))))
3809 || reg_mentioned_p (target, op1)
3810 || (MEM_P (op1) && MEM_P (target))))
3811 target = 0;
3812
3813 /* Get the mode in which to perform this computation. Normally it will
3814 be MODE, but sometimes we can't do the desired operation in MODE.
3815 If so, pick a wider mode in which we can do the operation. Convert
3816 to that mode at the start to avoid repeated conversions.
3817
3818 First see what operations we need. These depend on the expression
3819 we are evaluating. (We assume that divxx3 insns exist under the
3820 same conditions that modxx3 insns and that these insns don't normally
3821 fail. If these assumptions are not correct, we may generate less
3822 efficient code in some cases.)
3823
3824 Then see if we find a mode in which we can open-code that operation
3825 (either a division, modulus, or shift). Finally, check for the smallest
3826 mode for which we can do the operation with a library call. */
3827
3828 /* We might want to refine this now that we have division-by-constant
3829 optimization. Since expand_mult_highpart tries so many variants, it is
3830 not straightforward to generalize this. Maybe we should make an array
3831 of possible modes in init_expmed? Save this for GCC 2.7. */
3832
3833 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3834 ? (unsignedp ? lshr_optab : ashr_optab)
3835 : (unsignedp ? udiv_optab : sdiv_optab));
3836 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3837 ? optab1
3838 : (unsignedp ? udivmod_optab : sdivmod_optab));
3839
3840 for (compute_mode = mode; compute_mode != VOIDmode;
3841 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3842 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
3843 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
3844 break;
3845
3846 if (compute_mode == VOIDmode)
3847 for (compute_mode = mode; compute_mode != VOIDmode;
3848 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3849 if (optab_libfunc (optab1, compute_mode)
3850 || optab_libfunc (optab2, compute_mode))
3851 break;
3852
3853 /* If we still couldn't find a mode, use MODE, but expand_binop will
3854 probably die. */
3855 if (compute_mode == VOIDmode)
3856 compute_mode = mode;
3857
3858 if (target && GET_MODE (target) == compute_mode)
3859 tquotient = target;
3860 else
3861 tquotient = gen_reg_rtx (compute_mode);
3862
3863 size = GET_MODE_BITSIZE (compute_mode);
3864 #if 0
3865 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3866 (mode), and thereby get better code when OP1 is a constant. Do that
3867 later. It will require going over all usages of SIZE below. */
3868 size = GET_MODE_BITSIZE (mode);
3869 #endif
3870
3871 /* Only deduct something for a REM if the last divide done was
3872 for a different constant. Then set the constant of the last
3873 divide. */
3874 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3875 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3876 && INTVAL (op1) == last_div_const))
3877 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3878
3879 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3880
3881 /* Now convert to the best mode to use. */
3882 if (compute_mode != mode)
3883 {
3884 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3885 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3886
3887 /* convert_modes may have placed op1 into a register, so we
3888 must recompute the following. */
3889 op1_is_constant = CONST_INT_P (op1);
3890 op1_is_pow2 = (op1_is_constant
3891 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3892 || (! unsignedp
3893 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3894 }
3895
3896 /* If one of the operands is a volatile MEM, copy it into a register. */
3897
3898 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3899 op0 = force_reg (compute_mode, op0);
3900 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3901 op1 = force_reg (compute_mode, op1);
3902
3903 /* If we need the remainder or if OP1 is constant, we need to
3904 put OP0 in a register in case it has any queued subexpressions. */
3905 if (rem_flag || op1_is_constant)
3906 op0 = force_reg (compute_mode, op0);
3907
3908 last = get_last_insn ();
3909
3910 /* Promote floor rounding to trunc rounding for unsigned operations. */
3911 if (unsignedp)
3912 {
3913 if (code == FLOOR_DIV_EXPR)
3914 code = TRUNC_DIV_EXPR;
3915 if (code == FLOOR_MOD_EXPR)
3916 code = TRUNC_MOD_EXPR;
3917 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3918 code = TRUNC_DIV_EXPR;
3919 }
3920
3921 if (op1 != const0_rtx)
3922 switch (code)
3923 {
3924 case TRUNC_MOD_EXPR:
3925 case TRUNC_DIV_EXPR:
3926 if (op1_is_constant)
3927 {
3928 if (unsignedp)
3929 {
3930 unsigned HOST_WIDE_INT mh;
3931 int pre_shift, post_shift;
3932 int dummy;
3933 rtx ml;
3934 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3935 & GET_MODE_MASK (compute_mode));
3936
3937 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3938 {
3939 pre_shift = floor_log2 (d);
3940 if (rem_flag)
3941 {
3942 remainder
3943 = expand_binop (compute_mode, and_optab, op0,
3944 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3945 remainder, 1,
3946 OPTAB_LIB_WIDEN);
3947 if (remainder)
3948 return gen_lowpart (mode, remainder);
3949 }
3950 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3951 pre_shift, tquotient, 1);
3952 }
3953 else if (size <= HOST_BITS_PER_WIDE_INT)
3954 {
3955 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3956 {
3957 /* Most significant bit of divisor is set; emit an scc
3958 insn. */
3959 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
3960 compute_mode, 1, 1);
3961 }
3962 else
3963 {
3964 /* Find a suitable multiplier and right shift count
3965 instead of multiplying with D. */
3966
3967 mh = choose_multiplier (d, size, size,
3968 &ml, &post_shift, &dummy);
3969
3970 /* If the suggested multiplier is more than SIZE bits,
3971 we can do better for even divisors, using an
3972 initial right shift. */
3973 if (mh != 0 && (d & 1) == 0)
3974 {
3975 pre_shift = floor_log2 (d & -d);
3976 mh = choose_multiplier (d >> pre_shift, size,
3977 size - pre_shift,
3978 &ml, &post_shift, &dummy);
3979 gcc_assert (!mh);
3980 }
3981 else
3982 pre_shift = 0;
3983
3984 if (mh != 0)
3985 {
3986 rtx t1, t2, t3, t4;
3987
3988 if (post_shift - 1 >= BITS_PER_WORD)
3989 goto fail1;
3990
3991 extra_cost
3992 = (shift_cost[speed][compute_mode][post_shift - 1]
3993 + shift_cost[speed][compute_mode][1]
3994 + 2 * add_cost[speed][compute_mode]);
3995 t1 = expand_mult_highpart (compute_mode, op0, ml,
3996 NULL_RTX, 1,
3997 max_cost - extra_cost);
3998 if (t1 == 0)
3999 goto fail1;
4000 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4001 op0, t1),
4002 NULL_RTX);
4003 t3 = expand_shift (RSHIFT_EXPR, compute_mode,
4004 t2, 1, NULL_RTX, 1);
4005 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4006 t1, t3),
4007 NULL_RTX);
4008 quotient = expand_shift
4009 (RSHIFT_EXPR, compute_mode, t4,
4010 post_shift - 1, tquotient, 1);
4011 }
4012 else
4013 {
4014 rtx t1, t2;
4015
4016 if (pre_shift >= BITS_PER_WORD
4017 || post_shift >= BITS_PER_WORD)
4018 goto fail1;
4019
4020 t1 = expand_shift
4021 (RSHIFT_EXPR, compute_mode, op0,
4022 pre_shift, NULL_RTX, 1);
4023 extra_cost
4024 = (shift_cost[speed][compute_mode][pre_shift]
4025 + shift_cost[speed][compute_mode][post_shift]);
4026 t2 = expand_mult_highpart (compute_mode, t1, ml,
4027 NULL_RTX, 1,
4028 max_cost - extra_cost);
4029 if (t2 == 0)
4030 goto fail1;
4031 quotient = expand_shift
4032 (RSHIFT_EXPR, compute_mode, t2,
4033 post_shift, tquotient, 1);
4034 }
4035 }
4036 }
4037 else /* Too wide mode to use tricky code */
4038 break;
4039
4040 insn = get_last_insn ();
4041 if (insn != last
4042 && (set = single_set (insn)) != 0
4043 && SET_DEST (set) == quotient)
4044 set_unique_reg_note (insn,
4045 REG_EQUAL,
4046 gen_rtx_UDIV (compute_mode, op0, op1));
4047 }
4048 else /* TRUNC_DIV, signed */
4049 {
4050 unsigned HOST_WIDE_INT ml;
4051 int lgup, post_shift;
4052 rtx mlr;
4053 HOST_WIDE_INT d = INTVAL (op1);
4054 unsigned HOST_WIDE_INT abs_d;
4055
4056 /* Since d might be INT_MIN, we have to cast to
4057 unsigned HOST_WIDE_INT before negating to avoid
4058 undefined signed overflow. */
4059 abs_d = (d >= 0
4060 ? (unsigned HOST_WIDE_INT) d
4061 : - (unsigned HOST_WIDE_INT) d);
4062
4063 /* n rem d = n rem -d */
4064 if (rem_flag && d < 0)
4065 {
4066 d = abs_d;
4067 op1 = gen_int_mode (abs_d, compute_mode);
4068 }
4069
4070 if (d == 1)
4071 quotient = op0;
4072 else if (d == -1)
4073 quotient = expand_unop (compute_mode, neg_optab, op0,
4074 tquotient, 0);
4075 else if (HOST_BITS_PER_WIDE_INT >= size
4076 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4077 {
4078 /* This case is not handled correctly below. */
4079 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4080 compute_mode, 1, 1);
4081 if (quotient == 0)
4082 goto fail1;
4083 }
4084 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4085 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4086 : sdiv_pow2_cheap[speed][compute_mode])
4087 /* We assume that cheap metric is true if the
4088 optab has an expander for this mode. */
4089 && ((optab_handler ((rem_flag ? smod_optab
4090 : sdiv_optab),
4091 compute_mode)
4092 != CODE_FOR_nothing)
4093 || (optab_handler (sdivmod_optab,
4094 compute_mode)
4095 != CODE_FOR_nothing)))
4096 ;
4097 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4098 {
4099 if (rem_flag)
4100 {
4101 remainder = expand_smod_pow2 (compute_mode, op0, d);
4102 if (remainder)
4103 return gen_lowpart (mode, remainder);
4104 }
4105
4106 if (sdiv_pow2_cheap[speed][compute_mode]
4107 && ((optab_handler (sdiv_optab, compute_mode)
4108 != CODE_FOR_nothing)
4109 || (optab_handler (sdivmod_optab, compute_mode)
4110 != CODE_FOR_nothing)))
4111 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4112 compute_mode, op0,
4113 gen_int_mode (abs_d,
4114 compute_mode),
4115 NULL_RTX, 0);
4116 else
4117 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4118
4119 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4120 negate the quotient. */
4121 if (d < 0)
4122 {
4123 insn = get_last_insn ();
4124 if (insn != last
4125 && (set = single_set (insn)) != 0
4126 && SET_DEST (set) == quotient
4127 && abs_d < ((unsigned HOST_WIDE_INT) 1
4128 << (HOST_BITS_PER_WIDE_INT - 1)))
4129 set_unique_reg_note (insn,
4130 REG_EQUAL,
4131 gen_rtx_DIV (compute_mode,
4132 op0,
4133 GEN_INT
4134 (trunc_int_for_mode
4135 (abs_d,
4136 compute_mode))));
4137
4138 quotient = expand_unop (compute_mode, neg_optab,
4139 quotient, quotient, 0);
4140 }
4141 }
4142 else if (size <= HOST_BITS_PER_WIDE_INT)
4143 {
4144 choose_multiplier (abs_d, size, size - 1,
4145 &mlr, &post_shift, &lgup);
4146 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4147 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4148 {
4149 rtx t1, t2, t3;
4150
4151 if (post_shift >= BITS_PER_WORD
4152 || size - 1 >= BITS_PER_WORD)
4153 goto fail1;
4154
4155 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4156 + shift_cost[speed][compute_mode][size - 1]
4157 + add_cost[speed][compute_mode]);
4158 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4159 NULL_RTX, 0,
4160 max_cost - extra_cost);
4161 if (t1 == 0)
4162 goto fail1;
4163 t2 = expand_shift
4164 (RSHIFT_EXPR, compute_mode, t1,
4165 post_shift, NULL_RTX, 0);
4166 t3 = expand_shift
4167 (RSHIFT_EXPR, compute_mode, op0,
4168 size - 1, NULL_RTX, 0);
4169 if (d < 0)
4170 quotient
4171 = force_operand (gen_rtx_MINUS (compute_mode,
4172 t3, t2),
4173 tquotient);
4174 else
4175 quotient
4176 = force_operand (gen_rtx_MINUS (compute_mode,
4177 t2, t3),
4178 tquotient);
4179 }
4180 else
4181 {
4182 rtx t1, t2, t3, t4;
4183
4184 if (post_shift >= BITS_PER_WORD
4185 || size - 1 >= BITS_PER_WORD)
4186 goto fail1;
4187
4188 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4189 mlr = gen_int_mode (ml, compute_mode);
4190 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4191 + shift_cost[speed][compute_mode][size - 1]
4192 + 2 * add_cost[speed][compute_mode]);
4193 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4194 NULL_RTX, 0,
4195 max_cost - extra_cost);
4196 if (t1 == 0)
4197 goto fail1;
4198 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4199 t1, op0),
4200 NULL_RTX);
4201 t3 = expand_shift
4202 (RSHIFT_EXPR, compute_mode, t2,
4203 post_shift, NULL_RTX, 0);
4204 t4 = expand_shift
4205 (RSHIFT_EXPR, compute_mode, op0,
4206 size - 1, NULL_RTX, 0);
4207 if (d < 0)
4208 quotient
4209 = force_operand (gen_rtx_MINUS (compute_mode,
4210 t4, t3),
4211 tquotient);
4212 else
4213 quotient
4214 = force_operand (gen_rtx_MINUS (compute_mode,
4215 t3, t4),
4216 tquotient);
4217 }
4218 }
4219 else /* Too wide mode to use tricky code */
4220 break;
4221
4222 insn = get_last_insn ();
4223 if (insn != last
4224 && (set = single_set (insn)) != 0
4225 && SET_DEST (set) == quotient)
4226 set_unique_reg_note (insn,
4227 REG_EQUAL,
4228 gen_rtx_DIV (compute_mode, op0, op1));
4229 }
4230 break;
4231 }
4232 fail1:
4233 delete_insns_since (last);
4234 break;
4235
4236 case FLOOR_DIV_EXPR:
4237 case FLOOR_MOD_EXPR:
4238 /* We will come here only for signed operations. */
4239 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4240 {
4241 unsigned HOST_WIDE_INT mh;
4242 int pre_shift, lgup, post_shift;
4243 HOST_WIDE_INT d = INTVAL (op1);
4244 rtx ml;
4245
4246 if (d > 0)
4247 {
4248 /* We could just as easily deal with negative constants here,
4249 but it does not seem worth the trouble for GCC 2.6. */
4250 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4251 {
4252 pre_shift = floor_log2 (d);
4253 if (rem_flag)
4254 {
4255 remainder = expand_binop (compute_mode, and_optab, op0,
4256 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4257 remainder, 0, OPTAB_LIB_WIDEN);
4258 if (remainder)
4259 return gen_lowpart (mode, remainder);
4260 }
4261 quotient = expand_shift
4262 (RSHIFT_EXPR, compute_mode, op0,
4263 pre_shift, tquotient, 0);
4264 }
4265 else
4266 {
4267 rtx t1, t2, t3, t4;
4268
4269 mh = choose_multiplier (d, size, size - 1,
4270 &ml, &post_shift, &lgup);
4271 gcc_assert (!mh);
4272
4273 if (post_shift < BITS_PER_WORD
4274 && size - 1 < BITS_PER_WORD)
4275 {
4276 t1 = expand_shift
4277 (RSHIFT_EXPR, compute_mode, op0,
4278 size - 1, NULL_RTX, 0);
4279 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4280 NULL_RTX, 0, OPTAB_WIDEN);
4281 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4282 + shift_cost[speed][compute_mode][size - 1]
4283 + 2 * add_cost[speed][compute_mode]);
4284 t3 = expand_mult_highpart (compute_mode, t2, ml,
4285 NULL_RTX, 1,
4286 max_cost - extra_cost);
4287 if (t3 != 0)
4288 {
4289 t4 = expand_shift
4290 (RSHIFT_EXPR, compute_mode, t3,
4291 post_shift, NULL_RTX, 1);
4292 quotient = expand_binop (compute_mode, xor_optab,
4293 t4, t1, tquotient, 0,
4294 OPTAB_WIDEN);
4295 }
4296 }
4297 }
4298 }
4299 else
4300 {
4301 rtx nsign, t1, t2, t3, t4;
4302 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4303 op0, constm1_rtx), NULL_RTX);
4304 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4305 0, OPTAB_WIDEN);
4306 nsign = expand_shift
4307 (RSHIFT_EXPR, compute_mode, t2,
4308 size - 1, NULL_RTX, 0);
4309 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4310 NULL_RTX);
4311 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4312 NULL_RTX, 0);
4313 if (t4)
4314 {
4315 rtx t5;
4316 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4317 NULL_RTX, 0);
4318 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4319 t4, t5),
4320 tquotient);
4321 }
4322 }
4323 }
4324
4325 if (quotient != 0)
4326 break;
4327 delete_insns_since (last);
4328
4329 /* Try using an instruction that produces both the quotient and
4330 remainder, using truncation. We can easily compensate the quotient
4331 or remainder to get floor rounding, once we have the remainder.
4332 Notice that we compute also the final remainder value here,
4333 and return the result right away. */
4334 if (target == 0 || GET_MODE (target) != compute_mode)
4335 target = gen_reg_rtx (compute_mode);
4336
4337 if (rem_flag)
4338 {
4339 remainder
4340 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4341 quotient = gen_reg_rtx (compute_mode);
4342 }
4343 else
4344 {
4345 quotient
4346 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4347 remainder = gen_reg_rtx (compute_mode);
4348 }
4349
4350 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4351 quotient, remainder, 0))
4352 {
4353 /* This could be computed with a branch-less sequence.
4354 Save that for later. */
4355 rtx tem;
4356 rtx label = gen_label_rtx ();
4357 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4358 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4359 NULL_RTX, 0, OPTAB_WIDEN);
4360 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4361 expand_dec (quotient, const1_rtx);
4362 expand_inc (remainder, op1);
4363 emit_label (label);
4364 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4365 }
4366
4367 /* No luck with division elimination or divmod. Have to do it
4368 by conditionally adjusting op0 *and* the result. */
4369 {
4370 rtx label1, label2, label3, label4, label5;
4371 rtx adjusted_op0;
4372 rtx tem;
4373
4374 quotient = gen_reg_rtx (compute_mode);
4375 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4376 label1 = gen_label_rtx ();
4377 label2 = gen_label_rtx ();
4378 label3 = gen_label_rtx ();
4379 label4 = gen_label_rtx ();
4380 label5 = gen_label_rtx ();
4381 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4382 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4383 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4384 quotient, 0, OPTAB_LIB_WIDEN);
4385 if (tem != quotient)
4386 emit_move_insn (quotient, tem);
4387 emit_jump_insn (gen_jump (label5));
4388 emit_barrier ();
4389 emit_label (label1);
4390 expand_inc (adjusted_op0, const1_rtx);
4391 emit_jump_insn (gen_jump (label4));
4392 emit_barrier ();
4393 emit_label (label2);
4394 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4395 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4396 quotient, 0, OPTAB_LIB_WIDEN);
4397 if (tem != quotient)
4398 emit_move_insn (quotient, tem);
4399 emit_jump_insn (gen_jump (label5));
4400 emit_barrier ();
4401 emit_label (label3);
4402 expand_dec (adjusted_op0, const1_rtx);
4403 emit_label (label4);
4404 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4405 quotient, 0, OPTAB_LIB_WIDEN);
4406 if (tem != quotient)
4407 emit_move_insn (quotient, tem);
4408 expand_dec (quotient, const1_rtx);
4409 emit_label (label5);
4410 }
4411 break;
4412
4413 case CEIL_DIV_EXPR:
4414 case CEIL_MOD_EXPR:
4415 if (unsignedp)
4416 {
4417 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4418 {
4419 rtx t1, t2, t3;
4420 unsigned HOST_WIDE_INT d = INTVAL (op1);
4421 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4422 floor_log2 (d), tquotient, 1);
4423 t2 = expand_binop (compute_mode, and_optab, op0,
4424 GEN_INT (d - 1),
4425 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4426 t3 = gen_reg_rtx (compute_mode);
4427 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4428 compute_mode, 1, 1);
4429 if (t3 == 0)
4430 {
4431 rtx lab;
4432 lab = gen_label_rtx ();
4433 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4434 expand_inc (t1, const1_rtx);
4435 emit_label (lab);
4436 quotient = t1;
4437 }
4438 else
4439 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4440 t1, t3),
4441 tquotient);
4442 break;
4443 }
4444
4445 /* Try using an instruction that produces both the quotient and
4446 remainder, using truncation. We can easily compensate the
4447 quotient or remainder to get ceiling rounding, once we have the
4448 remainder. Notice that we compute also the final remainder
4449 value here, and return the result right away. */
4450 if (target == 0 || GET_MODE (target) != compute_mode)
4451 target = gen_reg_rtx (compute_mode);
4452
4453 if (rem_flag)
4454 {
4455 remainder = (REG_P (target)
4456 ? target : gen_reg_rtx (compute_mode));
4457 quotient = gen_reg_rtx (compute_mode);
4458 }
4459 else
4460 {
4461 quotient = (REG_P (target)
4462 ? target : gen_reg_rtx (compute_mode));
4463 remainder = gen_reg_rtx (compute_mode);
4464 }
4465
4466 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4467 remainder, 1))
4468 {
4469 /* This could be computed with a branch-less sequence.
4470 Save that for later. */
4471 rtx label = gen_label_rtx ();
4472 do_cmp_and_jump (remainder, const0_rtx, EQ,
4473 compute_mode, label);
4474 expand_inc (quotient, const1_rtx);
4475 expand_dec (remainder, op1);
4476 emit_label (label);
4477 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4478 }
4479
4480 /* No luck with division elimination or divmod. Have to do it
4481 by conditionally adjusting op0 *and* the result. */
4482 {
4483 rtx label1, label2;
4484 rtx adjusted_op0, tem;
4485
4486 quotient = gen_reg_rtx (compute_mode);
4487 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4488 label1 = gen_label_rtx ();
4489 label2 = gen_label_rtx ();
4490 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4491 compute_mode, label1);
4492 emit_move_insn (quotient, const0_rtx);
4493 emit_jump_insn (gen_jump (label2));
4494 emit_barrier ();
4495 emit_label (label1);
4496 expand_dec (adjusted_op0, const1_rtx);
4497 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4498 quotient, 1, OPTAB_LIB_WIDEN);
4499 if (tem != quotient)
4500 emit_move_insn (quotient, tem);
4501 expand_inc (quotient, const1_rtx);
4502 emit_label (label2);
4503 }
4504 }
4505 else /* signed */
4506 {
4507 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4508 && INTVAL (op1) >= 0)
4509 {
4510 /* This is extremely similar to the code for the unsigned case
4511 above. For 2.7 we should merge these variants, but for
4512 2.6.1 I don't want to touch the code for unsigned since that
4513 get used in C. The signed case will only be used by other
4514 languages (Ada). */
4515
4516 rtx t1, t2, t3;
4517 unsigned HOST_WIDE_INT d = INTVAL (op1);
4518 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4519 floor_log2 (d), tquotient, 0);
4520 t2 = expand_binop (compute_mode, and_optab, op0,
4521 GEN_INT (d - 1),
4522 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4523 t3 = gen_reg_rtx (compute_mode);
4524 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4525 compute_mode, 1, 1);
4526 if (t3 == 0)
4527 {
4528 rtx lab;
4529 lab = gen_label_rtx ();
4530 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4531 expand_inc (t1, const1_rtx);
4532 emit_label (lab);
4533 quotient = t1;
4534 }
4535 else
4536 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4537 t1, t3),
4538 tquotient);
4539 break;
4540 }
4541
4542 /* Try using an instruction that produces both the quotient and
4543 remainder, using truncation. We can easily compensate the
4544 quotient or remainder to get ceiling rounding, once we have the
4545 remainder. Notice that we compute also the final remainder
4546 value here, and return the result right away. */
4547 if (target == 0 || GET_MODE (target) != compute_mode)
4548 target = gen_reg_rtx (compute_mode);
4549 if (rem_flag)
4550 {
4551 remainder= (REG_P (target)
4552 ? target : gen_reg_rtx (compute_mode));
4553 quotient = gen_reg_rtx (compute_mode);
4554 }
4555 else
4556 {
4557 quotient = (REG_P (target)
4558 ? target : gen_reg_rtx (compute_mode));
4559 remainder = gen_reg_rtx (compute_mode);
4560 }
4561
4562 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4563 remainder, 0))
4564 {
4565 /* This could be computed with a branch-less sequence.
4566 Save that for later. */
4567 rtx tem;
4568 rtx label = gen_label_rtx ();
4569 do_cmp_and_jump (remainder, const0_rtx, EQ,
4570 compute_mode, label);
4571 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4572 NULL_RTX, 0, OPTAB_WIDEN);
4573 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4574 expand_inc (quotient, const1_rtx);
4575 expand_dec (remainder, op1);
4576 emit_label (label);
4577 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4578 }
4579
4580 /* No luck with division elimination or divmod. Have to do it
4581 by conditionally adjusting op0 *and* the result. */
4582 {
4583 rtx label1, label2, label3, label4, label5;
4584 rtx adjusted_op0;
4585 rtx tem;
4586
4587 quotient = gen_reg_rtx (compute_mode);
4588 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4589 label1 = gen_label_rtx ();
4590 label2 = gen_label_rtx ();
4591 label3 = gen_label_rtx ();
4592 label4 = gen_label_rtx ();
4593 label5 = gen_label_rtx ();
4594 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4595 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4596 compute_mode, label1);
4597 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4598 quotient, 0, OPTAB_LIB_WIDEN);
4599 if (tem != quotient)
4600 emit_move_insn (quotient, tem);
4601 emit_jump_insn (gen_jump (label5));
4602 emit_barrier ();
4603 emit_label (label1);
4604 expand_dec (adjusted_op0, const1_rtx);
4605 emit_jump_insn (gen_jump (label4));
4606 emit_barrier ();
4607 emit_label (label2);
4608 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4609 compute_mode, label3);
4610 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4611 quotient, 0, OPTAB_LIB_WIDEN);
4612 if (tem != quotient)
4613 emit_move_insn (quotient, tem);
4614 emit_jump_insn (gen_jump (label5));
4615 emit_barrier ();
4616 emit_label (label3);
4617 expand_inc (adjusted_op0, const1_rtx);
4618 emit_label (label4);
4619 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4620 quotient, 0, OPTAB_LIB_WIDEN);
4621 if (tem != quotient)
4622 emit_move_insn (quotient, tem);
4623 expand_inc (quotient, const1_rtx);
4624 emit_label (label5);
4625 }
4626 }
4627 break;
4628
4629 case EXACT_DIV_EXPR:
4630 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4631 {
4632 HOST_WIDE_INT d = INTVAL (op1);
4633 unsigned HOST_WIDE_INT ml;
4634 int pre_shift;
4635 rtx t1;
4636
4637 pre_shift = floor_log2 (d & -d);
4638 ml = invert_mod2n (d >> pre_shift, size);
4639 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4640 pre_shift, NULL_RTX, unsignedp);
4641 quotient = expand_mult (compute_mode, t1,
4642 gen_int_mode (ml, compute_mode),
4643 NULL_RTX, 1);
4644
4645 insn = get_last_insn ();
4646 set_unique_reg_note (insn,
4647 REG_EQUAL,
4648 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4649 compute_mode,
4650 op0, op1));
4651 }
4652 break;
4653
4654 case ROUND_DIV_EXPR:
4655 case ROUND_MOD_EXPR:
4656 if (unsignedp)
4657 {
4658 rtx tem;
4659 rtx label;
4660 label = gen_label_rtx ();
4661 quotient = gen_reg_rtx (compute_mode);
4662 remainder = gen_reg_rtx (compute_mode);
4663 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4664 {
4665 rtx tem;
4666 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4667 quotient, 1, OPTAB_LIB_WIDEN);
4668 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4669 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4670 remainder, 1, OPTAB_LIB_WIDEN);
4671 }
4672 tem = plus_constant (op1, -1);
4673 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, 1, NULL_RTX, 1);
4674 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4675 expand_inc (quotient, const1_rtx);
4676 expand_dec (remainder, op1);
4677 emit_label (label);
4678 }
4679 else
4680 {
4681 rtx abs_rem, abs_op1, tem, mask;
4682 rtx label;
4683 label = gen_label_rtx ();
4684 quotient = gen_reg_rtx (compute_mode);
4685 remainder = gen_reg_rtx (compute_mode);
4686 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4687 {
4688 rtx tem;
4689 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4690 quotient, 0, OPTAB_LIB_WIDEN);
4691 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4692 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4693 remainder, 0, OPTAB_LIB_WIDEN);
4694 }
4695 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4696 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4697 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4698 1, NULL_RTX, 1);
4699 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4700 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4701 NULL_RTX, 0, OPTAB_WIDEN);
4702 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4703 size - 1, NULL_RTX, 0);
4704 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4705 NULL_RTX, 0, OPTAB_WIDEN);
4706 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4707 NULL_RTX, 0, OPTAB_WIDEN);
4708 expand_inc (quotient, tem);
4709 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4710 NULL_RTX, 0, OPTAB_WIDEN);
4711 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4712 NULL_RTX, 0, OPTAB_WIDEN);
4713 expand_dec (remainder, tem);
4714 emit_label (label);
4715 }
4716 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4717
4718 default:
4719 gcc_unreachable ();
4720 }
4721
4722 if (quotient == 0)
4723 {
4724 if (target && GET_MODE (target) != compute_mode)
4725 target = 0;
4726
4727 if (rem_flag)
4728 {
4729 /* Try to produce the remainder without producing the quotient.
4730 If we seem to have a divmod pattern that does not require widening,
4731 don't try widening here. We should really have a WIDEN argument
4732 to expand_twoval_binop, since what we'd really like to do here is
4733 1) try a mod insn in compute_mode
4734 2) try a divmod insn in compute_mode
4735 3) try a div insn in compute_mode and multiply-subtract to get
4736 remainder
4737 4) try the same things with widening allowed. */
4738 remainder
4739 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4740 op0, op1, target,
4741 unsignedp,
4742 ((optab_handler (optab2, compute_mode)
4743 != CODE_FOR_nothing)
4744 ? OPTAB_DIRECT : OPTAB_WIDEN));
4745 if (remainder == 0)
4746 {
4747 /* No luck there. Can we do remainder and divide at once
4748 without a library call? */
4749 remainder = gen_reg_rtx (compute_mode);
4750 if (! expand_twoval_binop ((unsignedp
4751 ? udivmod_optab
4752 : sdivmod_optab),
4753 op0, op1,
4754 NULL_RTX, remainder, unsignedp))
4755 remainder = 0;
4756 }
4757
4758 if (remainder)
4759 return gen_lowpart (mode, remainder);
4760 }
4761
4762 /* Produce the quotient. Try a quotient insn, but not a library call.
4763 If we have a divmod in this mode, use it in preference to widening
4764 the div (for this test we assume it will not fail). Note that optab2
4765 is set to the one of the two optabs that the call below will use. */
4766 quotient
4767 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4768 op0, op1, rem_flag ? NULL_RTX : target,
4769 unsignedp,
4770 ((optab_handler (optab2, compute_mode)
4771 != CODE_FOR_nothing)
4772 ? OPTAB_DIRECT : OPTAB_WIDEN));
4773
4774 if (quotient == 0)
4775 {
4776 /* No luck there. Try a quotient-and-remainder insn,
4777 keeping the quotient alone. */
4778 quotient = gen_reg_rtx (compute_mode);
4779 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4780 op0, op1,
4781 quotient, NULL_RTX, unsignedp))
4782 {
4783 quotient = 0;
4784 if (! rem_flag)
4785 /* Still no luck. If we are not computing the remainder,
4786 use a library call for the quotient. */
4787 quotient = sign_expand_binop (compute_mode,
4788 udiv_optab, sdiv_optab,
4789 op0, op1, target,
4790 unsignedp, OPTAB_LIB_WIDEN);
4791 }
4792 }
4793 }
4794
4795 if (rem_flag)
4796 {
4797 if (target && GET_MODE (target) != compute_mode)
4798 target = 0;
4799
4800 if (quotient == 0)
4801 {
4802 /* No divide instruction either. Use library for remainder. */
4803 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4804 op0, op1, target,
4805 unsignedp, OPTAB_LIB_WIDEN);
4806 /* No remainder function. Try a quotient-and-remainder
4807 function, keeping the remainder. */
4808 if (!remainder)
4809 {
4810 remainder = gen_reg_rtx (compute_mode);
4811 if (!expand_twoval_binop_libfunc
4812 (unsignedp ? udivmod_optab : sdivmod_optab,
4813 op0, op1,
4814 NULL_RTX, remainder,
4815 unsignedp ? UMOD : MOD))
4816 remainder = NULL_RTX;
4817 }
4818 }
4819 else
4820 {
4821 /* We divided. Now finish doing X - Y * (X / Y). */
4822 remainder = expand_mult (compute_mode, quotient, op1,
4823 NULL_RTX, unsignedp);
4824 remainder = expand_binop (compute_mode, sub_optab, op0,
4825 remainder, target, unsignedp,
4826 OPTAB_LIB_WIDEN);
4827 }
4828 }
4829
4830 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4831 }
4832 \f
4833 /* Return a tree node with data type TYPE, describing the value of X.
4834 Usually this is an VAR_DECL, if there is no obvious better choice.
4835 X may be an expression, however we only support those expressions
4836 generated by loop.c. */
4837
4838 tree
4839 make_tree (tree type, rtx x)
4840 {
4841 tree t;
4842
4843 switch (GET_CODE (x))
4844 {
4845 case CONST_INT:
4846 {
4847 HOST_WIDE_INT hi = 0;
4848
4849 if (INTVAL (x) < 0
4850 && !(TYPE_UNSIGNED (type)
4851 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4852 < HOST_BITS_PER_WIDE_INT)))
4853 hi = -1;
4854
4855 t = build_int_cst_wide (type, INTVAL (x), hi);
4856
4857 return t;
4858 }
4859
4860 case CONST_DOUBLE:
4861 if (GET_MODE (x) == VOIDmode)
4862 t = build_int_cst_wide (type,
4863 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4864 else
4865 {
4866 REAL_VALUE_TYPE d;
4867
4868 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4869 t = build_real (type, d);
4870 }
4871
4872 return t;
4873
4874 case CONST_VECTOR:
4875 {
4876 int units = CONST_VECTOR_NUNITS (x);
4877 tree itype = TREE_TYPE (type);
4878 tree t = NULL_TREE;
4879 int i;
4880
4881
4882 /* Build a tree with vector elements. */
4883 for (i = units - 1; i >= 0; --i)
4884 {
4885 rtx elt = CONST_VECTOR_ELT (x, i);
4886 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4887 }
4888
4889 return build_vector (type, t);
4890 }
4891
4892 case PLUS:
4893 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4894 make_tree (type, XEXP (x, 1)));
4895
4896 case MINUS:
4897 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4898 make_tree (type, XEXP (x, 1)));
4899
4900 case NEG:
4901 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4902
4903 case MULT:
4904 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4905 make_tree (type, XEXP (x, 1)));
4906
4907 case ASHIFT:
4908 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4909 make_tree (type, XEXP (x, 1)));
4910
4911 case LSHIFTRT:
4912 t = unsigned_type_for (type);
4913 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4914 make_tree (t, XEXP (x, 0)),
4915 make_tree (type, XEXP (x, 1))));
4916
4917 case ASHIFTRT:
4918 t = signed_type_for (type);
4919 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4920 make_tree (t, XEXP (x, 0)),
4921 make_tree (type, XEXP (x, 1))));
4922
4923 case DIV:
4924 if (TREE_CODE (type) != REAL_TYPE)
4925 t = signed_type_for (type);
4926 else
4927 t = type;
4928
4929 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4930 make_tree (t, XEXP (x, 0)),
4931 make_tree (t, XEXP (x, 1))));
4932 case UDIV:
4933 t = unsigned_type_for (type);
4934 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4935 make_tree (t, XEXP (x, 0)),
4936 make_tree (t, XEXP (x, 1))));
4937
4938 case SIGN_EXTEND:
4939 case ZERO_EXTEND:
4940 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4941 GET_CODE (x) == ZERO_EXTEND);
4942 return fold_convert (type, make_tree (t, XEXP (x, 0)));
4943
4944 case CONST:
4945 return make_tree (type, XEXP (x, 0));
4946
4947 case SYMBOL_REF:
4948 t = SYMBOL_REF_DECL (x);
4949 if (t)
4950 return fold_convert (type, build_fold_addr_expr (t));
4951 /* else fall through. */
4952
4953 default:
4954 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
4955
4956 /* If TYPE is a POINTER_TYPE, we might need to convert X from
4957 address mode to pointer mode. */
4958 if (POINTER_TYPE_P (type))
4959 x = convert_memory_address_addr_space
4960 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
4961
4962 /* Note that we do *not* use SET_DECL_RTL here, because we do not
4963 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
4964 t->decl_with_rtl.rtl = x;
4965
4966 return t;
4967 }
4968 }
4969 \f
4970 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4971 and returning TARGET.
4972
4973 If TARGET is 0, a pseudo-register or constant is returned. */
4974
4975 rtx
4976 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4977 {
4978 rtx tem = 0;
4979
4980 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4981 tem = simplify_binary_operation (AND, mode, op0, op1);
4982 if (tem == 0)
4983 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4984
4985 if (target == 0)
4986 target = tem;
4987 else if (tem != target)
4988 emit_move_insn (target, tem);
4989 return target;
4990 }
4991
4992 /* Helper function for emit_store_flag. */
4993 static rtx
4994 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
4995 enum machine_mode mode, enum machine_mode compare_mode,
4996 int unsignedp, rtx x, rtx y, int normalizep,
4997 enum machine_mode target_mode)
4998 {
4999 struct expand_operand ops[4];
5000 rtx op0, last, comparison, subtarget;
5001 enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
5002
5003 last = get_last_insn ();
5004 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5005 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5006 if (!x || !y)
5007 {
5008 delete_insns_since (last);
5009 return NULL_RTX;
5010 }
5011
5012 if (target_mode == VOIDmode)
5013 target_mode = result_mode;
5014 if (!target)
5015 target = gen_reg_rtx (target_mode);
5016
5017 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5018
5019 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5020 create_fixed_operand (&ops[1], comparison);
5021 create_fixed_operand (&ops[2], x);
5022 create_fixed_operand (&ops[3], y);
5023 if (!maybe_expand_insn (icode, 4, ops))
5024 {
5025 delete_insns_since (last);
5026 return NULL_RTX;
5027 }
5028 subtarget = ops[0].value;
5029
5030 /* If we are converting to a wider mode, first convert to
5031 TARGET_MODE, then normalize. This produces better combining
5032 opportunities on machines that have a SIGN_EXTRACT when we are
5033 testing a single bit. This mostly benefits the 68k.
5034
5035 If STORE_FLAG_VALUE does not have the sign bit set when
5036 interpreted in MODE, we can do this conversion as unsigned, which
5037 is usually more efficient. */
5038 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5039 {
5040 convert_move (target, subtarget,
5041 (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT)
5042 && 0 == (STORE_FLAG_VALUE
5043 & ((HOST_WIDE_INT) 1
5044 << (GET_MODE_BITSIZE (result_mode) -1))));
5045 op0 = target;
5046 result_mode = target_mode;
5047 }
5048 else
5049 op0 = subtarget;
5050
5051 /* If we want to keep subexpressions around, don't reuse our last
5052 target. */
5053 if (optimize)
5054 subtarget = 0;
5055
5056 /* Now normalize to the proper value in MODE. Sometimes we don't
5057 have to do anything. */
5058 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5059 ;
5060 /* STORE_FLAG_VALUE might be the most negative number, so write
5061 the comparison this way to avoid a compiler-time warning. */
5062 else if (- normalizep == STORE_FLAG_VALUE)
5063 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5064
5065 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5066 it hard to use a value of just the sign bit due to ANSI integer
5067 constant typing rules. */
5068 else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
5069 && (STORE_FLAG_VALUE
5070 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
5071 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5072 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5073 normalizep == 1);
5074 else
5075 {
5076 gcc_assert (STORE_FLAG_VALUE & 1);
5077
5078 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5079 if (normalizep == -1)
5080 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5081 }
5082
5083 /* If we were converting to a smaller mode, do the conversion now. */
5084 if (target_mode != result_mode)
5085 {
5086 convert_move (target, op0, 0);
5087 return target;
5088 }
5089 else
5090 return op0;
5091 }
5092
5093
5094 /* A subroutine of emit_store_flag only including "tricks" that do not
5095 need a recursive call. These are kept separate to avoid infinite
5096 loops. */
5097
5098 static rtx
5099 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5100 enum machine_mode mode, int unsignedp, int normalizep,
5101 enum machine_mode target_mode)
5102 {
5103 rtx subtarget;
5104 enum insn_code icode;
5105 enum machine_mode compare_mode;
5106 enum mode_class mclass;
5107 enum rtx_code scode;
5108 rtx tem;
5109
5110 if (unsignedp)
5111 code = unsigned_condition (code);
5112 scode = swap_condition (code);
5113
5114 /* If one operand is constant, make it the second one. Only do this
5115 if the other operand is not constant as well. */
5116
5117 if (swap_commutative_operands_p (op0, op1))
5118 {
5119 tem = op0;
5120 op0 = op1;
5121 op1 = tem;
5122 code = swap_condition (code);
5123 }
5124
5125 if (mode == VOIDmode)
5126 mode = GET_MODE (op0);
5127
5128 /* For some comparisons with 1 and -1, we can convert this to
5129 comparisons with zero. This will often produce more opportunities for
5130 store-flag insns. */
5131
5132 switch (code)
5133 {
5134 case LT:
5135 if (op1 == const1_rtx)
5136 op1 = const0_rtx, code = LE;
5137 break;
5138 case LE:
5139 if (op1 == constm1_rtx)
5140 op1 = const0_rtx, code = LT;
5141 break;
5142 case GE:
5143 if (op1 == const1_rtx)
5144 op1 = const0_rtx, code = GT;
5145 break;
5146 case GT:
5147 if (op1 == constm1_rtx)
5148 op1 = const0_rtx, code = GE;
5149 break;
5150 case GEU:
5151 if (op1 == const1_rtx)
5152 op1 = const0_rtx, code = NE;
5153 break;
5154 case LTU:
5155 if (op1 == const1_rtx)
5156 op1 = const0_rtx, code = EQ;
5157 break;
5158 default:
5159 break;
5160 }
5161
5162 /* If we are comparing a double-word integer with zero or -1, we can
5163 convert the comparison into one involving a single word. */
5164 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5165 && GET_MODE_CLASS (mode) == MODE_INT
5166 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5167 {
5168 if ((code == EQ || code == NE)
5169 && (op1 == const0_rtx || op1 == constm1_rtx))
5170 {
5171 rtx op00, op01;
5172
5173 /* Do a logical OR or AND of the two words and compare the
5174 result. */
5175 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5176 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5177 tem = expand_binop (word_mode,
5178 op1 == const0_rtx ? ior_optab : and_optab,
5179 op00, op01, NULL_RTX, unsignedp,
5180 OPTAB_DIRECT);
5181
5182 if (tem != 0)
5183 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5184 unsignedp, normalizep);
5185 }
5186 else if ((code == LT || code == GE) && op1 == const0_rtx)
5187 {
5188 rtx op0h;
5189
5190 /* If testing the sign bit, can just test on high word. */
5191 op0h = simplify_gen_subreg (word_mode, op0, mode,
5192 subreg_highpart_offset (word_mode,
5193 mode));
5194 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5195 unsignedp, normalizep);
5196 }
5197 else
5198 tem = NULL_RTX;
5199
5200 if (tem)
5201 {
5202 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5203 return tem;
5204 if (!target)
5205 target = gen_reg_rtx (target_mode);
5206
5207 convert_move (target, tem,
5208 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE)
5209 & ((HOST_WIDE_INT) 1
5210 << (GET_MODE_BITSIZE (word_mode) -1))));
5211 return target;
5212 }
5213 }
5214
5215 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5216 complement of A (for GE) and shifting the sign bit to the low bit. */
5217 if (op1 == const0_rtx && (code == LT || code == GE)
5218 && GET_MODE_CLASS (mode) == MODE_INT
5219 && (normalizep || STORE_FLAG_VALUE == 1
5220 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5221 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5222 == ((unsigned HOST_WIDE_INT) 1
5223 << (GET_MODE_BITSIZE (mode) - 1))))))
5224 {
5225 subtarget = target;
5226
5227 if (!target)
5228 target_mode = mode;
5229
5230 /* If the result is to be wider than OP0, it is best to convert it
5231 first. If it is to be narrower, it is *incorrect* to convert it
5232 first. */
5233 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5234 {
5235 op0 = convert_modes (target_mode, mode, op0, 0);
5236 mode = target_mode;
5237 }
5238
5239 if (target_mode != mode)
5240 subtarget = 0;
5241
5242 if (code == GE)
5243 op0 = expand_unop (mode, one_cmpl_optab, op0,
5244 ((STORE_FLAG_VALUE == 1 || normalizep)
5245 ? 0 : subtarget), 0);
5246
5247 if (STORE_FLAG_VALUE == 1 || normalizep)
5248 /* If we are supposed to produce a 0/1 value, we want to do
5249 a logical shift from the sign bit to the low-order bit; for
5250 a -1/0 value, we do an arithmetic shift. */
5251 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5252 GET_MODE_BITSIZE (mode) - 1,
5253 subtarget, normalizep != -1);
5254
5255 if (mode != target_mode)
5256 op0 = convert_modes (target_mode, mode, op0, 0);
5257
5258 return op0;
5259 }
5260
5261 mclass = GET_MODE_CLASS (mode);
5262 for (compare_mode = mode; compare_mode != VOIDmode;
5263 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5264 {
5265 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5266 icode = optab_handler (cstore_optab, optab_mode);
5267 if (icode != CODE_FOR_nothing)
5268 {
5269 do_pending_stack_adjust ();
5270 tem = emit_cstore (target, icode, code, mode, compare_mode,
5271 unsignedp, op0, op1, normalizep, target_mode);
5272 if (tem)
5273 return tem;
5274
5275 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5276 {
5277 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5278 unsignedp, op1, op0, normalizep, target_mode);
5279 if (tem)
5280 return tem;
5281 }
5282 break;
5283 }
5284 }
5285
5286 return 0;
5287 }
5288
5289 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5290 and storing in TARGET. Normally return TARGET.
5291 Return 0 if that cannot be done.
5292
5293 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5294 it is VOIDmode, they cannot both be CONST_INT.
5295
5296 UNSIGNEDP is for the case where we have to widen the operands
5297 to perform the operation. It says to use zero-extension.
5298
5299 NORMALIZEP is 1 if we should convert the result to be either zero
5300 or one. Normalize is -1 if we should convert the result to be
5301 either zero or -1. If NORMALIZEP is zero, the result will be left
5302 "raw" out of the scc insn. */
5303
5304 rtx
5305 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5306 enum machine_mode mode, int unsignedp, int normalizep)
5307 {
5308 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5309 enum rtx_code rcode;
5310 rtx subtarget;
5311 rtx tem, last, trueval;
5312
5313 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5314 target_mode);
5315 if (tem)
5316 return tem;
5317
5318 /* If we reached here, we can't do this with a scc insn, however there
5319 are some comparisons that can be done in other ways. Don't do any
5320 of these cases if branches are very cheap. */
5321 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5322 return 0;
5323
5324 /* See what we need to return. We can only return a 1, -1, or the
5325 sign bit. */
5326
5327 if (normalizep == 0)
5328 {
5329 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5330 normalizep = STORE_FLAG_VALUE;
5331
5332 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5333 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5334 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5335 ;
5336 else
5337 return 0;
5338 }
5339
5340 last = get_last_insn ();
5341
5342 /* If optimizing, use different pseudo registers for each insn, instead
5343 of reusing the same pseudo. This leads to better CSE, but slows
5344 down the compiler, since there are more pseudos */
5345 subtarget = (!optimize
5346 && (target_mode == mode)) ? target : NULL_RTX;
5347 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5348
5349 /* For floating-point comparisons, try the reverse comparison or try
5350 changing the "orderedness" of the comparison. */
5351 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5352 {
5353 enum rtx_code first_code;
5354 bool and_them;
5355
5356 rcode = reverse_condition_maybe_unordered (code);
5357 if (can_compare_p (rcode, mode, ccp_store_flag)
5358 && (code == ORDERED || code == UNORDERED
5359 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5360 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5361 {
5362 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5363 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5364
5365 /* For the reverse comparison, use either an addition or a XOR. */
5366 if (want_add
5367 && rtx_cost (GEN_INT (normalizep), PLUS,
5368 optimize_insn_for_speed_p ()) == 0)
5369 {
5370 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5371 STORE_FLAG_VALUE, target_mode);
5372 if (tem)
5373 return expand_binop (target_mode, add_optab, tem,
5374 GEN_INT (normalizep),
5375 target, 0, OPTAB_WIDEN);
5376 }
5377 else if (!want_add
5378 && rtx_cost (trueval, XOR,
5379 optimize_insn_for_speed_p ()) == 0)
5380 {
5381 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5382 normalizep, target_mode);
5383 if (tem)
5384 return expand_binop (target_mode, xor_optab, tem, trueval,
5385 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5386 }
5387 }
5388
5389 delete_insns_since (last);
5390
5391 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5392 if (code == ORDERED || code == UNORDERED)
5393 return 0;
5394
5395 and_them = split_comparison (code, mode, &first_code, &code);
5396
5397 /* If there are no NaNs, the first comparison should always fall through.
5398 Effectively change the comparison to the other one. */
5399 if (!HONOR_NANS (mode))
5400 {
5401 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5402 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5403 target_mode);
5404 }
5405
5406 #ifdef HAVE_conditional_move
5407 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5408 conditional move. */
5409 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5410 normalizep, target_mode);
5411 if (tem == 0)
5412 return 0;
5413
5414 if (and_them)
5415 tem = emit_conditional_move (target, code, op0, op1, mode,
5416 tem, const0_rtx, GET_MODE (tem), 0);
5417 else
5418 tem = emit_conditional_move (target, code, op0, op1, mode,
5419 trueval, tem, GET_MODE (tem), 0);
5420
5421 if (tem == 0)
5422 delete_insns_since (last);
5423 return tem;
5424 #else
5425 return 0;
5426 #endif
5427 }
5428
5429 /* The remaining tricks only apply to integer comparisons. */
5430
5431 if (GET_MODE_CLASS (mode) != MODE_INT)
5432 return 0;
5433
5434 /* If this is an equality comparison of integers, we can try to exclusive-or
5435 (or subtract) the two operands and use a recursive call to try the
5436 comparison with zero. Don't do any of these cases if branches are
5437 very cheap. */
5438
5439 if ((code == EQ || code == NE) && op1 != const0_rtx)
5440 {
5441 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5442 OPTAB_WIDEN);
5443
5444 if (tem == 0)
5445 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5446 OPTAB_WIDEN);
5447 if (tem != 0)
5448 tem = emit_store_flag (target, code, tem, const0_rtx,
5449 mode, unsignedp, normalizep);
5450 if (tem != 0)
5451 return tem;
5452
5453 delete_insns_since (last);
5454 }
5455
5456 /* For integer comparisons, try the reverse comparison. However, for
5457 small X and if we'd have anyway to extend, implementing "X != 0"
5458 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5459 rcode = reverse_condition (code);
5460 if (can_compare_p (rcode, mode, ccp_store_flag)
5461 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5462 && code == NE
5463 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5464 && op1 == const0_rtx))
5465 {
5466 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5467 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5468
5469 /* Again, for the reverse comparison, use either an addition or a XOR. */
5470 if (want_add
5471 && rtx_cost (GEN_INT (normalizep), PLUS,
5472 optimize_insn_for_speed_p ()) == 0)
5473 {
5474 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5475 STORE_FLAG_VALUE, target_mode);
5476 if (tem != 0)
5477 tem = expand_binop (target_mode, add_optab, tem,
5478 GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
5479 }
5480 else if (!want_add
5481 && rtx_cost (trueval, XOR,
5482 optimize_insn_for_speed_p ()) == 0)
5483 {
5484 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5485 normalizep, target_mode);
5486 if (tem != 0)
5487 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5488 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5489 }
5490
5491 if (tem != 0)
5492 return tem;
5493 delete_insns_since (last);
5494 }
5495
5496 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5497 the constant zero. Reject all other comparisons at this point. Only
5498 do LE and GT if branches are expensive since they are expensive on
5499 2-operand machines. */
5500
5501 if (op1 != const0_rtx
5502 || (code != EQ && code != NE
5503 && (BRANCH_COST (optimize_insn_for_speed_p (),
5504 false) <= 1 || (code != LE && code != GT))))
5505 return 0;
5506
5507 /* Try to put the result of the comparison in the sign bit. Assume we can't
5508 do the necessary operation below. */
5509
5510 tem = 0;
5511
5512 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5513 the sign bit set. */
5514
5515 if (code == LE)
5516 {
5517 /* This is destructive, so SUBTARGET can't be OP0. */
5518 if (rtx_equal_p (subtarget, op0))
5519 subtarget = 0;
5520
5521 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5522 OPTAB_WIDEN);
5523 if (tem)
5524 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5525 OPTAB_WIDEN);
5526 }
5527
5528 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5529 number of bits in the mode of OP0, minus one. */
5530
5531 if (code == GT)
5532 {
5533 if (rtx_equal_p (subtarget, op0))
5534 subtarget = 0;
5535
5536 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5537 GET_MODE_BITSIZE (mode) - 1,
5538 subtarget, 0);
5539 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5540 OPTAB_WIDEN);
5541 }
5542
5543 if (code == EQ || code == NE)
5544 {
5545 /* For EQ or NE, one way to do the comparison is to apply an operation
5546 that converts the operand into a positive number if it is nonzero
5547 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5548 for NE we negate. This puts the result in the sign bit. Then we
5549 normalize with a shift, if needed.
5550
5551 Two operations that can do the above actions are ABS and FFS, so try
5552 them. If that doesn't work, and MODE is smaller than a full word,
5553 we can use zero-extension to the wider mode (an unsigned conversion)
5554 as the operation. */
5555
5556 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5557 that is compensated by the subsequent overflow when subtracting
5558 one / negating. */
5559
5560 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5561 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5562 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5563 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5564 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5565 {
5566 tem = convert_modes (word_mode, mode, op0, 1);
5567 mode = word_mode;
5568 }
5569
5570 if (tem != 0)
5571 {
5572 if (code == EQ)
5573 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5574 0, OPTAB_WIDEN);
5575 else
5576 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5577 }
5578
5579 /* If we couldn't do it that way, for NE we can "or" the two's complement
5580 of the value with itself. For EQ, we take the one's complement of
5581 that "or", which is an extra insn, so we only handle EQ if branches
5582 are expensive. */
5583
5584 if (tem == 0
5585 && (code == NE
5586 || BRANCH_COST (optimize_insn_for_speed_p (),
5587 false) > 1))
5588 {
5589 if (rtx_equal_p (subtarget, op0))
5590 subtarget = 0;
5591
5592 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5593 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5594 OPTAB_WIDEN);
5595
5596 if (tem && code == EQ)
5597 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5598 }
5599 }
5600
5601 if (tem && normalizep)
5602 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5603 GET_MODE_BITSIZE (mode) - 1,
5604 subtarget, normalizep == 1);
5605
5606 if (tem)
5607 {
5608 if (!target)
5609 ;
5610 else if (GET_MODE (tem) != target_mode)
5611 {
5612 convert_move (target, tem, 0);
5613 tem = target;
5614 }
5615 else if (!subtarget)
5616 {
5617 emit_move_insn (target, tem);
5618 tem = target;
5619 }
5620 }
5621 else
5622 delete_insns_since (last);
5623
5624 return tem;
5625 }
5626
5627 /* Like emit_store_flag, but always succeeds. */
5628
5629 rtx
5630 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5631 enum machine_mode mode, int unsignedp, int normalizep)
5632 {
5633 rtx tem, label;
5634 rtx trueval, falseval;
5635
5636 /* First see if emit_store_flag can do the job. */
5637 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5638 if (tem != 0)
5639 return tem;
5640
5641 if (!target)
5642 target = gen_reg_rtx (word_mode);
5643
5644 /* If this failed, we have to do this with set/compare/jump/set code.
5645 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5646 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5647 if (code == NE
5648 && GET_MODE_CLASS (mode) == MODE_INT
5649 && REG_P (target)
5650 && op0 == target
5651 && op1 == const0_rtx)
5652 {
5653 label = gen_label_rtx ();
5654 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5655 mode, NULL_RTX, NULL_RTX, label, -1);
5656 emit_move_insn (target, trueval);
5657 emit_label (label);
5658 return target;
5659 }
5660
5661 if (!REG_P (target)
5662 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5663 target = gen_reg_rtx (GET_MODE (target));
5664
5665 /* Jump in the right direction if the target cannot implement CODE
5666 but can jump on its reverse condition. */
5667 falseval = const0_rtx;
5668 if (! can_compare_p (code, mode, ccp_jump)
5669 && (! FLOAT_MODE_P (mode)
5670 || code == ORDERED || code == UNORDERED
5671 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5672 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5673 {
5674 enum rtx_code rcode;
5675 if (FLOAT_MODE_P (mode))
5676 rcode = reverse_condition_maybe_unordered (code);
5677 else
5678 rcode = reverse_condition (code);
5679
5680 /* Canonicalize to UNORDERED for the libcall. */
5681 if (can_compare_p (rcode, mode, ccp_jump)
5682 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5683 {
5684 falseval = trueval;
5685 trueval = const0_rtx;
5686 code = rcode;
5687 }
5688 }
5689
5690 emit_move_insn (target, trueval);
5691 label = gen_label_rtx ();
5692 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5693 NULL_RTX, label, -1);
5694
5695 emit_move_insn (target, falseval);
5696 emit_label (label);
5697
5698 return target;
5699 }
5700 \f
5701 /* Perform possibly multi-word comparison and conditional jump to LABEL
5702 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5703 now a thin wrapper around do_compare_rtx_and_jump. */
5704
5705 static void
5706 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5707 rtx label)
5708 {
5709 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5710 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5711 NULL_RTX, NULL_RTX, label, -1);
5712 }