4659db366f74b32769527510fc4e78c26c0ccd12
[gcc.git] / gcc / config / m32c / m32c.c
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "insn-config.h"
30 #include "conditions.h"
31 #include "insn-flags.h"
32 #include "output.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "recog.h"
36 #include "reload.h"
37 #include "diagnostic-core.h"
38 #include "toplev.h"
39 #include "obstack.h"
40 #include "tree.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "except.h"
44 #include "function.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "tm_p.h"
49 #include "langhooks.h"
50 #include "gimple.h"
51 #include "df.h"
52
53 /* Prototypes */
54
55 /* Used by m32c_pushm_popm. */
56 typedef enum
57 {
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61 } Push_Pop_Type;
62
63 static bool m32c_function_needs_enter (void);
64 static tree interrupt_handler (tree *, tree, tree, int, bool *);
65 static tree function_vector_handler (tree *, tree, tree, int, bool *);
66 static int interrupt_p (tree node);
67 static int bank_switch_p (tree node);
68 static int fast_interrupt_p (tree node);
69 static int interrupt_p (tree node);
70 static bool m32c_asm_integer (rtx, unsigned int, int);
71 static int m32c_comp_type_attributes (const_tree, const_tree);
72 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
73 static struct machine_function *m32c_init_machine_status (void);
74 static void m32c_insert_attributes (tree, tree *);
75 static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
76 static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
77 static rtx m32_function_arg (CUMULATIVE_ARGS *, enum machine_mode
78 const_tree, bool);
79 static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
80 const_tree, bool);
81 static void m32c_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
82 const_tree, bool);
83 static bool m32c_promote_prototypes (const_tree);
84 static int m32c_pushm_popm (Push_Pop_Type);
85 static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
86 static rtx m32c_struct_value_rtx (tree, int);
87 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
88 static int need_to_save (int);
89 static rtx m32c_function_value (const_tree, const_tree, bool);
90 static rtx m32c_libcall_value (enum machine_mode, const_rtx);
91
92 /* Returns true if an address is specified, else false. */
93 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
94
95 int current_function_special_page_vector (rtx);
96
97 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
98
99 #define streq(a,b) (strcmp ((a), (b)) == 0)
100
101 /* Internal support routines */
102
103 /* Debugging statements are tagged with DEBUG0 only so that they can
104 be easily enabled individually, by replacing the '0' with '1' as
105 needed. */
106 #define DEBUG0 0
107 #define DEBUG1 1
108
109 #if DEBUG0
110 /* This is needed by some of the commented-out debug statements
111 below. */
112 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
113 #endif
114 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
115
116 /* These are all to support encode_pattern(). */
117 static char pattern[30], *patternp;
118 static GTY(()) rtx patternr[30];
119 #define RTX_IS(x) (streq (pattern, x))
120
121 /* Some macros to simplify the logic throughout this file. */
122 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
123 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
124
125 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
126 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
127
128 static int
129 far_addr_space_p (rtx x)
130 {
131 if (GET_CODE (x) != MEM)
132 return 0;
133 #if DEBUG0
134 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
135 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
136 #endif
137 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
138 }
139
140 /* We do most RTX matching by converting the RTX into a string, and
141 using string compares. This vastly simplifies the logic in many of
142 the functions in this file.
143
144 On exit, pattern[] has the encoded string (use RTX_IS("...") to
145 compare it) and patternr[] has pointers to the nodes in the RTX
146 corresponding to each character in the encoded string. The latter
147 is mostly used by print_operand().
148
149 Unrecognized patterns have '?' in them; this shows up when the
150 assembler complains about syntax errors.
151 */
152
153 static void
154 encode_pattern_1 (rtx x)
155 {
156 int i;
157
158 if (patternp == pattern + sizeof (pattern) - 2)
159 {
160 patternp[-1] = '?';
161 return;
162 }
163
164 patternr[patternp - pattern] = x;
165
166 switch (GET_CODE (x))
167 {
168 case REG:
169 *patternp++ = 'r';
170 break;
171 case SUBREG:
172 if (GET_MODE_SIZE (GET_MODE (x)) !=
173 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
174 *patternp++ = 'S';
175 encode_pattern_1 (XEXP (x, 0));
176 break;
177 case MEM:
178 *patternp++ = 'm';
179 case CONST:
180 encode_pattern_1 (XEXP (x, 0));
181 break;
182 case SIGN_EXTEND:
183 *patternp++ = '^';
184 *patternp++ = 'S';
185 encode_pattern_1 (XEXP (x, 0));
186 break;
187 case ZERO_EXTEND:
188 *patternp++ = '^';
189 *patternp++ = 'Z';
190 encode_pattern_1 (XEXP (x, 0));
191 break;
192 case PLUS:
193 *patternp++ = '+';
194 encode_pattern_1 (XEXP (x, 0));
195 encode_pattern_1 (XEXP (x, 1));
196 break;
197 case PRE_DEC:
198 *patternp++ = '>';
199 encode_pattern_1 (XEXP (x, 0));
200 break;
201 case POST_INC:
202 *patternp++ = '<';
203 encode_pattern_1 (XEXP (x, 0));
204 break;
205 case LO_SUM:
206 *patternp++ = 'L';
207 encode_pattern_1 (XEXP (x, 0));
208 encode_pattern_1 (XEXP (x, 1));
209 break;
210 case HIGH:
211 *patternp++ = 'H';
212 encode_pattern_1 (XEXP (x, 0));
213 break;
214 case SYMBOL_REF:
215 *patternp++ = 's';
216 break;
217 case LABEL_REF:
218 *patternp++ = 'l';
219 break;
220 case CODE_LABEL:
221 *patternp++ = 'c';
222 break;
223 case CONST_INT:
224 case CONST_DOUBLE:
225 *patternp++ = 'i';
226 break;
227 case UNSPEC:
228 *patternp++ = 'u';
229 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
230 for (i = 0; i < XVECLEN (x, 0); i++)
231 encode_pattern_1 (XVECEXP (x, 0, i));
232 break;
233 case USE:
234 *patternp++ = 'U';
235 break;
236 case PARALLEL:
237 *patternp++ = '|';
238 for (i = 0; i < XVECLEN (x, 0); i++)
239 encode_pattern_1 (XVECEXP (x, 0, i));
240 break;
241 case EXPR_LIST:
242 *patternp++ = 'E';
243 encode_pattern_1 (XEXP (x, 0));
244 if (XEXP (x, 1))
245 encode_pattern_1 (XEXP (x, 1));
246 break;
247 default:
248 *patternp++ = '?';
249 #if DEBUG0
250 fprintf (stderr, "can't encode pattern %s\n",
251 GET_RTX_NAME (GET_CODE (x)));
252 debug_rtx (x);
253 gcc_unreachable ();
254 #endif
255 break;
256 }
257 }
258
259 static void
260 encode_pattern (rtx x)
261 {
262 patternp = pattern;
263 encode_pattern_1 (x);
264 *patternp = 0;
265 }
266
267 /* Since register names indicate the mode they're used in, we need a
268 way to determine which name to refer to the register with. Called
269 by print_operand(). */
270
271 static const char *
272 reg_name_with_mode (int regno, enum machine_mode mode)
273 {
274 int mlen = GET_MODE_SIZE (mode);
275 if (regno == R0_REGNO && mlen == 1)
276 return "r0l";
277 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
278 return "r2r0";
279 if (regno == R0_REGNO && mlen == 6)
280 return "r2r1r0";
281 if (regno == R0_REGNO && mlen == 8)
282 return "r3r1r2r0";
283 if (regno == R1_REGNO && mlen == 1)
284 return "r1l";
285 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
286 return "r3r1";
287 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
288 return "a1a0";
289 return reg_names[regno];
290 }
291
292 /* How many bytes a register uses on stack when it's pushed. We need
293 to know this because the push opcode needs to explicitly indicate
294 the size of the register, even though the name of the register
295 already tells it that. Used by m32c_output_reg_{push,pop}, which
296 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
297
298 static int
299 reg_push_size (int regno)
300 {
301 switch (regno)
302 {
303 case R0_REGNO:
304 case R1_REGNO:
305 return 2;
306 case R2_REGNO:
307 case R3_REGNO:
308 case FLG_REGNO:
309 return 2;
310 case A0_REGNO:
311 case A1_REGNO:
312 case SB_REGNO:
313 case FB_REGNO:
314 case SP_REGNO:
315 if (TARGET_A16)
316 return 2;
317 else
318 return 3;
319 default:
320 gcc_unreachable ();
321 }
322 }
323
324 static int *class_sizes = 0;
325
326 /* Given two register classes, find the largest intersection between
327 them. If there is no intersection, return RETURNED_IF_EMPTY
328 instead. */
329 static int
330 reduce_class (int original_class, int limiting_class, int returned_if_empty)
331 {
332 int cc = class_contents[original_class][0];
333 int i, best = NO_REGS;
334 int best_size = 0;
335
336 if (original_class == limiting_class)
337 return original_class;
338
339 if (!class_sizes)
340 {
341 int r;
342 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
343 for (i = 0; i < LIM_REG_CLASSES; i++)
344 {
345 class_sizes[i] = 0;
346 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
347 if (class_contents[i][0] & (1 << r))
348 class_sizes[i]++;
349 }
350 }
351
352 cc &= class_contents[limiting_class][0];
353 for (i = 0; i < LIM_REG_CLASSES; i++)
354 {
355 int ic = class_contents[i][0];
356
357 if ((~cc & ic) == 0)
358 if (best_size < class_sizes[i])
359 {
360 best = i;
361 best_size = class_sizes[i];
362 }
363
364 }
365 if (best == NO_REGS)
366 return returned_if_empty;
367 return best;
368 }
369
370 /* Used by m32c_register_move_cost to determine if a move is
371 impossibly expensive. */
372 static bool
373 class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
374 {
375 /* Cache the results: 0=untested 1=no 2=yes */
376 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
377
378 if (results[(int) rclass][mode] == 0)
379 {
380 int r;
381 results[rclass][mode] = 1;
382 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
383 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
384 && HARD_REGNO_MODE_OK (r, mode))
385 {
386 results[rclass][mode] = 2;
387 break;
388 }
389 }
390
391 #if DEBUG0
392 fprintf (stderr, "class %s can hold %s? %s\n",
393 class_names[(int) rclass], mode_name[mode],
394 (results[rclass][mode] == 2) ? "yes" : "no");
395 #endif
396 return results[(int) rclass][mode] == 2;
397 }
398
399 /* Run-time Target Specification. */
400
401 /* Memregs are memory locations that gcc treats like general
402 registers, as there are a limited number of true registers and the
403 m32c families can use memory in most places that registers can be
404 used.
405
406 However, since memory accesses are more expensive than registers,
407 we allow the user to limit the number of memregs available, in
408 order to try to persuade gcc to try harder to use real registers.
409
410 Memregs are provided by m32c-lib1.S.
411 */
412
413 int target_memregs = 16;
414 static bool target_memregs_set = FALSE;
415 int ok_to_change_target_memregs = TRUE;
416
417 #undef TARGET_HANDLE_OPTION
418 #define TARGET_HANDLE_OPTION m32c_handle_option
419 static bool
420 m32c_handle_option (size_t code,
421 const char *arg ATTRIBUTE_UNUSED,
422 int value ATTRIBUTE_UNUSED)
423 {
424 if (code == OPT_memregs_)
425 {
426 target_memregs_set = TRUE;
427 target_memregs = atoi (arg);
428 }
429 return TRUE;
430 }
431
432 /* Implements TARGET_OPTION_OVERRIDE. */
433
434 #undef TARGET_OPTION_OVERRIDE
435 #define TARGET_OPTION_OVERRIDE m32c_option_override
436
437 static void
438 m32c_option_override (void)
439 {
440 /* We limit memregs to 0..16, and provide a default. */
441 if (target_memregs_set)
442 {
443 if (target_memregs < 0 || target_memregs > 16)
444 error ("invalid target memregs value '%d'", target_memregs);
445 }
446 else
447 target_memregs = 16;
448
449 if (TARGET_A24)
450 flag_ivopts = 0;
451
452 /* This target defaults to strict volatile bitfields. */
453 if (flag_strict_volatile_bitfields < 0)
454 flag_strict_volatile_bitfields = 1;
455
456 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
457 This is always worse than an absolute call. */
458 if (TARGET_A16)
459 flag_no_function_cse = 1;
460 }
461
462 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
463 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
464
465 static void
466 m32c_override_options_after_change (void)
467 {
468 if (TARGET_A16)
469 flag_no_function_cse = 1;
470 }
471
472 /* Defining data structures for per-function information */
473
474 /* The usual; we set up our machine_function data. */
475 static struct machine_function *
476 m32c_init_machine_status (void)
477 {
478 return ggc_alloc_cleared_machine_function ();
479 }
480
481 /* Implements INIT_EXPANDERS. We just set up to call the above
482 function. */
483 void
484 m32c_init_expanders (void)
485 {
486 init_machine_status = m32c_init_machine_status;
487 }
488
489 /* Storage Layout */
490
491 /* Register Basics */
492
493 /* Basic Characteristics of Registers */
494
495 /* Whether a mode fits in a register is complex enough to warrant a
496 table. */
497 static struct
498 {
499 char qi_regs;
500 char hi_regs;
501 char pi_regs;
502 char si_regs;
503 char di_regs;
504 } nregs_table[FIRST_PSEUDO_REGISTER] =
505 {
506 { 1, 1, 2, 2, 4 }, /* r0 */
507 { 0, 1, 0, 0, 0 }, /* r2 */
508 { 1, 1, 2, 2, 0 }, /* r1 */
509 { 0, 1, 0, 0, 0 }, /* r3 */
510 { 0, 1, 1, 0, 0 }, /* a0 */
511 { 0, 1, 1, 0, 0 }, /* a1 */
512 { 0, 1, 1, 0, 0 }, /* sb */
513 { 0, 1, 1, 0, 0 }, /* fb */
514 { 0, 1, 1, 0, 0 }, /* sp */
515 { 1, 1, 1, 0, 0 }, /* pc */
516 { 0, 0, 0, 0, 0 }, /* fl */
517 { 1, 1, 1, 0, 0 }, /* ap */
518 { 1, 1, 2, 2, 4 }, /* mem0 */
519 { 1, 1, 2, 2, 4 }, /* mem1 */
520 { 1, 1, 2, 2, 4 }, /* mem2 */
521 { 1, 1, 2, 2, 4 }, /* mem3 */
522 { 1, 1, 2, 2, 4 }, /* mem4 */
523 { 1, 1, 2, 2, 0 }, /* mem5 */
524 { 1, 1, 2, 2, 0 }, /* mem6 */
525 { 1, 1, 0, 0, 0 }, /* mem7 */
526 };
527
528 /* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
529 available memregs, and select which registers need to be preserved
530 across calls based on the chip family. */
531
532 void
533 m32c_conditional_register_usage (void)
534 {
535 int i;
536
537 if (0 <= target_memregs && target_memregs <= 16)
538 {
539 /* The command line option is bytes, but our "registers" are
540 16-bit words. */
541 for (i = (target_memregs+1)/2; i < 8; i++)
542 {
543 fixed_regs[MEM0_REGNO + i] = 1;
544 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
545 }
546 }
547
548 /* M32CM and M32C preserve more registers across function calls. */
549 if (TARGET_A24)
550 {
551 call_used_regs[R1_REGNO] = 0;
552 call_used_regs[R2_REGNO] = 0;
553 call_used_regs[R3_REGNO] = 0;
554 call_used_regs[A0_REGNO] = 0;
555 call_used_regs[A1_REGNO] = 0;
556 }
557 }
558
559 /* How Values Fit in Registers */
560
561 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
562 different registers are different sizes from each other, *and* may
563 be different sizes in different chip families. */
564 static int
565 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
566 {
567 if (regno == FLG_REGNO && mode == CCmode)
568 return 1;
569 if (regno >= FIRST_PSEUDO_REGISTER)
570 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
571
572 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
573 return (GET_MODE_SIZE (mode) + 1) / 2;
574
575 if (GET_MODE_SIZE (mode) <= 1)
576 return nregs_table[regno].qi_regs;
577 if (GET_MODE_SIZE (mode) <= 2)
578 return nregs_table[regno].hi_regs;
579 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
580 return 2;
581 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
582 return nregs_table[regno].pi_regs;
583 if (GET_MODE_SIZE (mode) <= 4)
584 return nregs_table[regno].si_regs;
585 if (GET_MODE_SIZE (mode) <= 8)
586 return nregs_table[regno].di_regs;
587 return 0;
588 }
589
590 int
591 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
592 {
593 int rv = m32c_hard_regno_nregs_1 (regno, mode);
594 return rv ? rv : 1;
595 }
596
597 /* Implements HARD_REGNO_MODE_OK. The above function does the work
598 already; just test its return value. */
599 int
600 m32c_hard_regno_ok (int regno, enum machine_mode mode)
601 {
602 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
603 }
604
605 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
606 registers are all different sizes. However, since most modes are
607 bigger than our registers anyway, it's easier to implement this
608 function that way, leaving QImode as the only unique case. */
609 int
610 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
611 {
612 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
613 return 1;
614
615 #if 0
616 if (m1 == QImode || m2 == QImode)
617 return 0;
618 #endif
619
620 return 1;
621 }
622
623 /* Register Classes */
624
625 /* Implements REGNO_REG_CLASS. */
626 enum machine_mode
627 m32c_regno_reg_class (int regno)
628 {
629 switch (regno)
630 {
631 case R0_REGNO:
632 return R0_REGS;
633 case R1_REGNO:
634 return R1_REGS;
635 case R2_REGNO:
636 return R2_REGS;
637 case R3_REGNO:
638 return R3_REGS;
639 case A0_REGNO:
640 case A1_REGNO:
641 return A_REGS;
642 case SB_REGNO:
643 return SB_REGS;
644 case FB_REGNO:
645 return FB_REGS;
646 case SP_REGNO:
647 return SP_REGS;
648 case FLG_REGNO:
649 return FLG_REGS;
650 default:
651 if (IS_MEM_REGNO (regno))
652 return MEM_REGS;
653 return ALL_REGS;
654 }
655 }
656
657 /* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
658 for certain chip families. */
659 int
660 m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
661 {
662 if (memcmp (s, "Rsp", 3) == 0)
663 return SP_REGS;
664 if (memcmp (s, "Rfb", 3) == 0)
665 return FB_REGS;
666 if (memcmp (s, "Rsb", 3) == 0)
667 return SB_REGS;
668 if (memcmp (s, "Rcr", 3) == 0)
669 return TARGET_A16 ? CR_REGS : NO_REGS;
670 if (memcmp (s, "Rcl", 3) == 0)
671 return TARGET_A24 ? CR_REGS : NO_REGS;
672 if (memcmp (s, "R0w", 3) == 0)
673 return R0_REGS;
674 if (memcmp (s, "R1w", 3) == 0)
675 return R1_REGS;
676 if (memcmp (s, "R2w", 3) == 0)
677 return R2_REGS;
678 if (memcmp (s, "R3w", 3) == 0)
679 return R3_REGS;
680 if (memcmp (s, "R02", 3) == 0)
681 return R02_REGS;
682 if (memcmp (s, "R13", 3) == 0)
683 return R13_REGS;
684 if (memcmp (s, "R03", 3) == 0)
685 return R03_REGS;
686 if (memcmp (s, "Rdi", 3) == 0)
687 return DI_REGS;
688 if (memcmp (s, "Rhl", 3) == 0)
689 return HL_REGS;
690 if (memcmp (s, "R23", 3) == 0)
691 return R23_REGS;
692 if (memcmp (s, "Ra0", 3) == 0)
693 return A0_REGS;
694 if (memcmp (s, "Ra1", 3) == 0)
695 return A1_REGS;
696 if (memcmp (s, "Raa", 3) == 0)
697 return A_REGS;
698 if (memcmp (s, "Raw", 3) == 0)
699 return TARGET_A16 ? A_REGS : NO_REGS;
700 if (memcmp (s, "Ral", 3) == 0)
701 return TARGET_A24 ? A_REGS : NO_REGS;
702 if (memcmp (s, "Rqi", 3) == 0)
703 return QI_REGS;
704 if (memcmp (s, "Rad", 3) == 0)
705 return AD_REGS;
706 if (memcmp (s, "Rsi", 3) == 0)
707 return SI_REGS;
708 if (memcmp (s, "Rhi", 3) == 0)
709 return HI_REGS;
710 if (memcmp (s, "Rhc", 3) == 0)
711 return HC_REGS;
712 if (memcmp (s, "Rra", 3) == 0)
713 return RA_REGS;
714 if (memcmp (s, "Rfl", 3) == 0)
715 return FLG_REGS;
716 if (memcmp (s, "Rmm", 3) == 0)
717 {
718 if (fixed_regs[MEM0_REGNO])
719 return NO_REGS;
720 return MEM_REGS;
721 }
722
723 /* PSImode registers - i.e. whatever can hold a pointer. */
724 if (memcmp (s, "Rpi", 3) == 0)
725 {
726 if (TARGET_A16)
727 return HI_REGS;
728 else
729 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
730 }
731
732 /* We handle this one as an EXTRA_CONSTRAINT. */
733 if (memcmp (s, "Rpa", 3) == 0)
734 return NO_REGS;
735
736 if (*s == 'R')
737 {
738 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
739 gcc_unreachable();
740 }
741
742 return NO_REGS;
743 }
744
745 /* Implements REGNO_OK_FOR_BASE_P. */
746 int
747 m32c_regno_ok_for_base_p (int regno)
748 {
749 if (regno == A0_REGNO
750 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
751 return 1;
752 return 0;
753 }
754
755 #define DEBUG_RELOAD 0
756
757 /* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
758 registers of the appropriate size. */
759 int
760 m32c_preferred_reload_class (rtx x, int rclass)
761 {
762 int newclass = rclass;
763
764 #if DEBUG_RELOAD
765 fprintf (stderr, "\npreferred_reload_class for %s is ",
766 class_names[rclass]);
767 #endif
768 if (rclass == NO_REGS)
769 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
770
771 if (reg_classes_intersect_p (rclass, CR_REGS))
772 {
773 switch (GET_MODE (x))
774 {
775 case QImode:
776 newclass = HL_REGS;
777 break;
778 default:
779 /* newclass = HI_REGS; */
780 break;
781 }
782 }
783
784 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
785 newclass = SI_REGS;
786 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
787 && ~class_contents[rclass][0] & 0x000f)
788 newclass = DI_REGS;
789
790 rclass = reduce_class (rclass, newclass, rclass);
791
792 if (GET_MODE (x) == QImode)
793 rclass = reduce_class (rclass, HL_REGS, rclass);
794
795 #if DEBUG_RELOAD
796 fprintf (stderr, "%s\n", class_names[rclass]);
797 debug_rtx (x);
798
799 if (GET_CODE (x) == MEM
800 && GET_CODE (XEXP (x, 0)) == PLUS
801 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
802 fprintf (stderr, "Glorm!\n");
803 #endif
804 return rclass;
805 }
806
807 /* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
808 int
809 m32c_preferred_output_reload_class (rtx x, int rclass)
810 {
811 return m32c_preferred_reload_class (x, rclass);
812 }
813
814 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
815 address registers for reloads since they're needed for address
816 reloads. */
817 int
818 m32c_limit_reload_class (enum machine_mode mode, int rclass)
819 {
820 #if DEBUG_RELOAD
821 fprintf (stderr, "limit_reload_class for %s: %s ->",
822 mode_name[mode], class_names[rclass]);
823 #endif
824
825 if (mode == QImode)
826 rclass = reduce_class (rclass, HL_REGS, rclass);
827 else if (mode == HImode)
828 rclass = reduce_class (rclass, HI_REGS, rclass);
829 else if (mode == SImode)
830 rclass = reduce_class (rclass, SI_REGS, rclass);
831
832 if (rclass != A_REGS)
833 rclass = reduce_class (rclass, DI_REGS, rclass);
834
835 #if DEBUG_RELOAD
836 fprintf (stderr, " %s\n", class_names[rclass]);
837 #endif
838 return rclass;
839 }
840
841 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
842 r0 or r1, as those are the only real QImode registers. CR regs get
843 reloaded through appropriately sized general or address
844 registers. */
845 int
846 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
847 {
848 int cc = class_contents[rclass][0];
849 #if DEBUG0
850 fprintf (stderr, "\nsecondary reload class %s %s\n",
851 class_names[rclass], mode_name[mode]);
852 debug_rtx (x);
853 #endif
854 if (mode == QImode
855 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
856 return QI_REGS;
857 if (reg_classes_intersect_p (rclass, CR_REGS)
858 && GET_CODE (x) == REG
859 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
860 return TARGET_A16 ? HI_REGS : A_REGS;
861 return NO_REGS;
862 }
863
864 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
865 reloads. */
866
867 #undef TARGET_CLASS_LIKELY_SPILLED_P
868 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
869
870 static bool
871 m32c_class_likely_spilled_p (reg_class_t regclass)
872 {
873 if (regclass == A_REGS)
874 return true;
875
876 return (reg_class_size[(int) regclass] == 1);
877 }
878
879 /* Implements CLASS_MAX_NREGS. We calculate this according to its
880 documented meaning, to avoid potential inconsistencies with actual
881 class definitions. */
882 int
883 m32c_class_max_nregs (int regclass, enum machine_mode mode)
884 {
885 int rn, max = 0;
886
887 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
888 if (class_contents[regclass][0] & (1 << rn))
889 {
890 int n = m32c_hard_regno_nregs (rn, mode);
891 if (max < n)
892 max = n;
893 }
894 return max;
895 }
896
897 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
898 QI (r0l, r1l) because the chip doesn't support QI ops on other
899 registers (well, it does on a0/a1 but if we let gcc do that, reload
900 suffers). Otherwise, we allow changes to larger modes. */
901 int
902 m32c_cannot_change_mode_class (enum machine_mode from,
903 enum machine_mode to, int rclass)
904 {
905 int rn;
906 #if DEBUG0
907 fprintf (stderr, "cannot change from %s to %s in %s\n",
908 mode_name[from], mode_name[to], class_names[rclass]);
909 #endif
910
911 /* If the larger mode isn't allowed in any of these registers, we
912 can't allow the change. */
913 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
914 if (class_contents[rclass][0] & (1 << rn))
915 if (! m32c_hard_regno_ok (rn, to))
916 return 1;
917
918 if (to == QImode)
919 return (class_contents[rclass][0] & 0x1ffa);
920
921 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
922 && GET_MODE_SIZE (from) > 1)
923 return 0;
924 if (GET_MODE_SIZE (from) > 2) /* all other regs */
925 return 0;
926
927 return 1;
928 }
929
930 /* Helpers for the rest of the file. */
931 /* TRUE if the rtx is a REG rtx for the given register. */
932 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
933 && REGNO (rtx) == regno)
934 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
935 base register in address calculations (hence the "strict"
936 argument). */
937 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
938 && (REGNO (rtx) == AP_REGNO \
939 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
940
941 /* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
942 constraints start with 'I', with the next two characters indicating
943 the type and size of the range allowed. */
944 int
945 m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
946 char c ATTRIBUTE_UNUSED, const char *str)
947 {
948 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
949 [sun] bits [SUN] bytes, p=pointer size
950 I[-0-9][0-9] matches that number */
951 if (memcmp (str, "Is3", 3) == 0)
952 {
953 return (-8 <= value && value <= 7);
954 }
955 if (memcmp (str, "IS1", 3) == 0)
956 {
957 return (-128 <= value && value <= 127);
958 }
959 if (memcmp (str, "IS2", 3) == 0)
960 {
961 return (-32768 <= value && value <= 32767);
962 }
963 if (memcmp (str, "IU2", 3) == 0)
964 {
965 return (0 <= value && value <= 65535);
966 }
967 if (memcmp (str, "IU3", 3) == 0)
968 {
969 return (0 <= value && value <= 0x00ffffff);
970 }
971 if (memcmp (str, "In4", 3) == 0)
972 {
973 return (-8 <= value && value && value <= 8);
974 }
975 if (memcmp (str, "In5", 3) == 0)
976 {
977 return (-16 <= value && value && value <= 16);
978 }
979 if (memcmp (str, "In6", 3) == 0)
980 {
981 return (-32 <= value && value && value <= 32);
982 }
983 if (memcmp (str, "IM2", 3) == 0)
984 {
985 return (-65536 <= value && value && value <= -1);
986 }
987 if (memcmp (str, "Ilb", 3) == 0)
988 {
989 int b = exact_log2 (value);
990 return (b >= 0 && b <= 7);
991 }
992 if (memcmp (str, "Imb", 3) == 0)
993 {
994 int b = exact_log2 ((value ^ 0xff) & 0xff);
995 return (b >= 0 && b <= 7);
996 }
997 if (memcmp (str, "ImB", 3) == 0)
998 {
999 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
1000 return (b >= 0 && b <= 7);
1001 }
1002 if (memcmp (str, "Ilw", 3) == 0)
1003 {
1004 int b = exact_log2 (value);
1005 return (b >= 0 && b <= 15);
1006 }
1007 if (memcmp (str, "Imw", 3) == 0)
1008 {
1009 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
1010 return (b >= 0 && b <= 15);
1011 }
1012 if (memcmp (str, "I00", 3) == 0)
1013 {
1014 return (value == 0);
1015 }
1016 return 0;
1017 }
1018
1019 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
1020
1021 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
1022 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
1023 call return values. */
1024 int
1025 m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
1026 {
1027 encode_pattern (value);
1028
1029 if (far_addr_space_p (value))
1030 {
1031 if (memcmp (str, "SF", 2) == 0)
1032 {
1033 return ( (RTX_IS ("mr")
1034 && A0_OR_PSEUDO (patternr[1])
1035 && GET_MODE (patternr[1]) == SImode)
1036 || (RTX_IS ("m+^Sri")
1037 && A0_OR_PSEUDO (patternr[4])
1038 && GET_MODE (patternr[4]) == HImode)
1039 || (RTX_IS ("m+^Srs")
1040 && A0_OR_PSEUDO (patternr[4])
1041 && GET_MODE (patternr[4]) == HImode)
1042 || (RTX_IS ("m+^S+ris")
1043 && A0_OR_PSEUDO (patternr[5])
1044 && GET_MODE (patternr[5]) == HImode)
1045 || RTX_IS ("ms")
1046 );
1047 }
1048 return 0;
1049 }
1050
1051 if (memcmp (str, "Sd", 2) == 0)
1052 {
1053 /* This is the common "src/dest" address */
1054 rtx r;
1055 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
1056 return 1;
1057 if (RTX_IS ("ms") || RTX_IS ("m+si"))
1058 return 1;
1059 if (RTX_IS ("m++rii"))
1060 {
1061 if (REGNO (patternr[3]) == FB_REGNO
1062 && INTVAL (patternr[4]) == 0)
1063 return 1;
1064 }
1065 if (RTX_IS ("mr"))
1066 r = patternr[1];
1067 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1068 r = patternr[2];
1069 else
1070 return 0;
1071 if (REGNO (r) == SP_REGNO)
1072 return 0;
1073 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1074 }
1075 else if (memcmp (str, "Sa", 2) == 0)
1076 {
1077 rtx r;
1078 if (RTX_IS ("mr"))
1079 r = patternr[1];
1080 else if (RTX_IS ("m+ri"))
1081 r = patternr[2];
1082 else
1083 return 0;
1084 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1085 }
1086 else if (memcmp (str, "Si", 2) == 0)
1087 {
1088 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1089 }
1090 else if (memcmp (str, "Ss", 2) == 0)
1091 {
1092 return ((RTX_IS ("mr")
1093 && (IS_REG (patternr[1], SP_REGNO)))
1094 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1095 }
1096 else if (memcmp (str, "Sf", 2) == 0)
1097 {
1098 return ((RTX_IS ("mr")
1099 && (IS_REG (patternr[1], FB_REGNO)))
1100 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1101 }
1102 else if (memcmp (str, "Sb", 2) == 0)
1103 {
1104 return ((RTX_IS ("mr")
1105 && (IS_REG (patternr[1], SB_REGNO)))
1106 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1107 }
1108 else if (memcmp (str, "Sp", 2) == 0)
1109 {
1110 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1111 return (RTX_IS ("mi")
1112 && !(INTVAL (patternr[1]) & ~0x1fff));
1113 }
1114 else if (memcmp (str, "S1", 2) == 0)
1115 {
1116 return r1h_operand (value, QImode);
1117 }
1118 else if (memcmp (str, "SF", 2) == 0)
1119 {
1120 return 0;
1121 }
1122
1123 gcc_assert (str[0] != 'S');
1124
1125 if (memcmp (str, "Rpa", 2) == 0)
1126 return GET_CODE (value) == PARALLEL;
1127
1128 return 0;
1129 }
1130
1131 /* This is for when we're debugging the above. */
1132 int
1133 m32c_extra_constraint_p (rtx value, char c, const char *str)
1134 {
1135 int rv = m32c_extra_constraint_p2 (value, c, str);
1136 #if DEBUG0
1137 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1138 rv);
1139 debug_rtx (value);
1140 #endif
1141 return rv;
1142 }
1143
1144 /* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1145 starting with 'S'. */
1146 int
1147 m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1148 {
1149 return c == 'S';
1150 }
1151
1152 /* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1153 but don't currently define any. */
1154 int
1155 m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1156 {
1157 return c == 'A';
1158 }
1159
1160 /* STACK AND CALLING */
1161
1162 /* Frame Layout */
1163
1164 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1165 (yes, THREE bytes) onto the stack for the return address, but we
1166 don't support pointers bigger than 16 bits on those chips. This
1167 will likely wreak havoc with exception unwinding. FIXME. */
1168 rtx
1169 m32c_return_addr_rtx (int count)
1170 {
1171 enum machine_mode mode;
1172 int offset;
1173 rtx ra_mem;
1174
1175 if (count)
1176 return NULL_RTX;
1177 /* we want 2[$fb] */
1178
1179 if (TARGET_A24)
1180 {
1181 /* It's four bytes */
1182 mode = PSImode;
1183 offset = 4;
1184 }
1185 else
1186 {
1187 /* FIXME: it's really 3 bytes */
1188 mode = HImode;
1189 offset = 2;
1190 }
1191
1192 ra_mem =
1193 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1194 return copy_to_mode_reg (mode, ra_mem);
1195 }
1196
1197 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1198 rtx
1199 m32c_incoming_return_addr_rtx (void)
1200 {
1201 /* we want [sp] */
1202 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1203 }
1204
1205 /* Exception Handling Support */
1206
1207 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1208 pointers. */
1209 int
1210 m32c_eh_return_data_regno (int n)
1211 {
1212 switch (n)
1213 {
1214 case 0:
1215 return A0_REGNO;
1216 case 1:
1217 if (TARGET_A16)
1218 return R3_REGNO;
1219 else
1220 return R1_REGNO;
1221 default:
1222 return INVALID_REGNUM;
1223 }
1224 }
1225
1226 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1227 m32c_emit_eh_epilogue. */
1228 rtx
1229 m32c_eh_return_stackadj_rtx (void)
1230 {
1231 if (!cfun->machine->eh_stack_adjust)
1232 {
1233 rtx sa;
1234
1235 sa = gen_rtx_REG (Pmode, R0_REGNO);
1236 cfun->machine->eh_stack_adjust = sa;
1237 }
1238 return cfun->machine->eh_stack_adjust;
1239 }
1240
1241 /* Registers That Address the Stack Frame */
1242
1243 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1244 the original spec called for dwarf numbers to vary with register
1245 width as well, for example, r0l, r0, and r2r0 would each have
1246 different dwarf numbers. GCC doesn't support this, and we don't do
1247 it, and gdb seems to like it this way anyway. */
1248 unsigned int
1249 m32c_dwarf_frame_regnum (int n)
1250 {
1251 switch (n)
1252 {
1253 case R0_REGNO:
1254 return 5;
1255 case R1_REGNO:
1256 return 6;
1257 case R2_REGNO:
1258 return 7;
1259 case R3_REGNO:
1260 return 8;
1261 case A0_REGNO:
1262 return 9;
1263 case A1_REGNO:
1264 return 10;
1265 case FB_REGNO:
1266 return 11;
1267 case SB_REGNO:
1268 return 19;
1269
1270 case SP_REGNO:
1271 return 12;
1272 case PC_REGNO:
1273 return 13;
1274 default:
1275 return DWARF_FRAME_REGISTERS + 1;
1276 }
1277 }
1278
1279 /* The frame looks like this:
1280
1281 ap -> +------------------------------
1282 | Return address (3 or 4 bytes)
1283 | Saved FB (2 or 4 bytes)
1284 fb -> +------------------------------
1285 | local vars
1286 | register saves fb
1287 | through r0 as needed
1288 sp -> +------------------------------
1289 */
1290
1291 /* We use this to wrap all emitted insns in the prologue. */
1292 static rtx
1293 F (rtx x)
1294 {
1295 RTX_FRAME_RELATED_P (x) = 1;
1296 return x;
1297 }
1298
1299 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1300 how much the stack pointer moves for each, for each cpu family. */
1301 static struct
1302 {
1303 int reg1;
1304 int bit;
1305 int a16_bytes;
1306 int a24_bytes;
1307 } pushm_info[] =
1308 {
1309 /* These are in reverse push (nearest-to-sp) order. */
1310 { R0_REGNO, 0x80, 2, 2 },
1311 { R1_REGNO, 0x40, 2, 2 },
1312 { R2_REGNO, 0x20, 2, 2 },
1313 { R3_REGNO, 0x10, 2, 2 },
1314 { A0_REGNO, 0x08, 2, 4 },
1315 { A1_REGNO, 0x04, 2, 4 },
1316 { SB_REGNO, 0x02, 2, 4 },
1317 { FB_REGNO, 0x01, 2, 4 }
1318 };
1319
1320 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1321
1322 /* Returns TRUE if we need to save/restore the given register. We
1323 save everything for exception handlers, so that any register can be
1324 unwound. For interrupt handlers, we save everything if the handler
1325 calls something else (because we don't know what *that* function
1326 might do), but try to be a bit smarter if the handler is a leaf
1327 function. We always save $a0, though, because we use that in the
1328 epilogue to copy $fb to $sp. */
1329 static int
1330 need_to_save (int regno)
1331 {
1332 if (fixed_regs[regno])
1333 return 0;
1334 if (crtl->calls_eh_return)
1335 return 1;
1336 if (regno == FP_REGNO)
1337 return 0;
1338 if (cfun->machine->is_interrupt
1339 && (!cfun->machine->is_leaf
1340 || (regno == A0_REGNO
1341 && m32c_function_needs_enter ())
1342 ))
1343 return 1;
1344 if (df_regs_ever_live_p (regno)
1345 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1346 return 1;
1347 return 0;
1348 }
1349
1350 /* This function contains all the intelligence about saving and
1351 restoring registers. It always figures out the register save set.
1352 When called with PP_justcount, it merely returns the size of the
1353 save set (for eliminating the frame pointer, for example). When
1354 called with PP_pushm or PP_popm, it emits the appropriate
1355 instructions for saving (pushm) or restoring (popm) the
1356 registers. */
1357 static int
1358 m32c_pushm_popm (Push_Pop_Type ppt)
1359 {
1360 int reg_mask = 0;
1361 int byte_count = 0, bytes;
1362 int i;
1363 rtx dwarf_set[PUSHM_N];
1364 int n_dwarfs = 0;
1365 int nosave_mask = 0;
1366
1367 if (crtl->return_rtx
1368 && GET_CODE (crtl->return_rtx) == PARALLEL
1369 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1370 {
1371 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1372 rtx rv = XEXP (exp, 0);
1373 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1374
1375 if (rv_bytes > 2)
1376 nosave_mask |= 0x20; /* PSI, SI */
1377 else
1378 nosave_mask |= 0xf0; /* DF */
1379 if (rv_bytes > 4)
1380 nosave_mask |= 0x50; /* DI */
1381 }
1382
1383 for (i = 0; i < (int) PUSHM_N; i++)
1384 {
1385 /* Skip if neither register needs saving. */
1386 if (!need_to_save (pushm_info[i].reg1))
1387 continue;
1388
1389 if (pushm_info[i].bit & nosave_mask)
1390 continue;
1391
1392 reg_mask |= pushm_info[i].bit;
1393 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1394
1395 if (ppt == PP_pushm)
1396 {
1397 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1398 rtx addr;
1399
1400 /* Always use stack_pointer_rtx instead of calling
1401 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1402 that there is a single rtx representing the stack pointer,
1403 namely stack_pointer_rtx, and uses == to recognize it. */
1404 addr = stack_pointer_rtx;
1405
1406 if (byte_count != 0)
1407 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1408
1409 dwarf_set[n_dwarfs++] =
1410 gen_rtx_SET (VOIDmode,
1411 gen_rtx_MEM (mode, addr),
1412 gen_rtx_REG (mode, pushm_info[i].reg1));
1413 F (dwarf_set[n_dwarfs - 1]);
1414
1415 }
1416 byte_count += bytes;
1417 }
1418
1419 if (cfun->machine->is_interrupt)
1420 {
1421 cfun->machine->intr_pushm = reg_mask & 0xfe;
1422 reg_mask = 0;
1423 byte_count = 0;
1424 }
1425
1426 if (cfun->machine->is_interrupt)
1427 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1428 if (need_to_save (i))
1429 {
1430 byte_count += 2;
1431 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1432 }
1433
1434 if (ppt == PP_pushm && byte_count)
1435 {
1436 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1437 rtx pushm;
1438
1439 if (reg_mask)
1440 {
1441 XVECEXP (note, 0, 0)
1442 = gen_rtx_SET (VOIDmode,
1443 stack_pointer_rtx,
1444 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1445 stack_pointer_rtx,
1446 GEN_INT (-byte_count)));
1447 F (XVECEXP (note, 0, 0));
1448
1449 for (i = 0; i < n_dwarfs; i++)
1450 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1451
1452 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1453
1454 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1455 REG_NOTES (pushm));
1456 }
1457
1458 if (cfun->machine->is_interrupt)
1459 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1460 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1461 {
1462 if (TARGET_A16)
1463 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1464 else
1465 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1466 F (pushm);
1467 }
1468 }
1469 if (ppt == PP_popm && byte_count)
1470 {
1471 if (cfun->machine->is_interrupt)
1472 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1473 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1474 {
1475 if (TARGET_A16)
1476 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1477 else
1478 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1479 }
1480 if (reg_mask)
1481 emit_insn (gen_popm (GEN_INT (reg_mask)));
1482 }
1483
1484 return byte_count;
1485 }
1486
1487 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1488 diagrams our call frame. */
1489 int
1490 m32c_initial_elimination_offset (int from, int to)
1491 {
1492 int ofs = 0;
1493
1494 if (from == AP_REGNO)
1495 {
1496 if (TARGET_A16)
1497 ofs += 5;
1498 else
1499 ofs += 8;
1500 }
1501
1502 if (to == SP_REGNO)
1503 {
1504 ofs += m32c_pushm_popm (PP_justcount);
1505 ofs += get_frame_size ();
1506 }
1507
1508 /* Account for push rounding. */
1509 if (TARGET_A24)
1510 ofs = (ofs + 1) & ~1;
1511 #if DEBUG0
1512 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1513 to, ofs);
1514 #endif
1515 return ofs;
1516 }
1517
1518 /* Passing Function Arguments on the Stack */
1519
1520 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1521 M32C has word stacks. */
1522 int
1523 m32c_push_rounding (int n)
1524 {
1525 if (TARGET_R8C || TARGET_M16C)
1526 return n;
1527 return (n + 1) & ~1;
1528 }
1529
1530 /* Passing Arguments in Registers */
1531
1532 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1533 registers, partly on stack. If our function returns a struct, a
1534 pointer to a buffer for it is at the top of the stack (last thing
1535 pushed). The first few real arguments may be in registers as
1536 follows:
1537
1538 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1539 arg2 in r2 if it's HI (else pushed on stack)
1540 rest on stack
1541 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1542 rest on stack
1543
1544 Structs are not passed in registers, even if they fit. Only
1545 integer and pointer types are passed in registers.
1546
1547 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1548 r2 if it fits. */
1549 #undef TARGET_FUNCTION_ARG
1550 #define TARGET_FUNCTION_ARG m32c_function_arg
1551 static rtx
1552 m32c_function_arg (CUMULATIVE_ARGS * ca,
1553 enum machine_mode mode, const_tree type, bool named)
1554 {
1555 /* Can return a reg, parallel, or 0 for stack */
1556 rtx rv = NULL_RTX;
1557 #if DEBUG0
1558 fprintf (stderr, "func_arg %d (%s, %d)\n",
1559 ca->parm_num, mode_name[mode], named);
1560 debug_tree (type);
1561 #endif
1562
1563 if (mode == VOIDmode)
1564 return GEN_INT (0);
1565
1566 if (ca->force_mem || !named)
1567 {
1568 #if DEBUG0
1569 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1570 named);
1571 #endif
1572 return NULL_RTX;
1573 }
1574
1575 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1576 return NULL_RTX;
1577
1578 if (type && AGGREGATE_TYPE_P (type))
1579 return NULL_RTX;
1580
1581 switch (ca->parm_num)
1582 {
1583 case 1:
1584 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1585 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1586 break;
1587
1588 case 2:
1589 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1590 rv = gen_rtx_REG (mode, R2_REGNO);
1591 break;
1592 }
1593
1594 #if DEBUG0
1595 debug_rtx (rv);
1596 #endif
1597 return rv;
1598 }
1599
1600 #undef TARGET_PASS_BY_REFERENCE
1601 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1602 static bool
1603 m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1604 enum machine_mode mode ATTRIBUTE_UNUSED,
1605 const_tree type ATTRIBUTE_UNUSED,
1606 bool named ATTRIBUTE_UNUSED)
1607 {
1608 return 0;
1609 }
1610
1611 /* Implements INIT_CUMULATIVE_ARGS. */
1612 void
1613 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1614 tree fntype,
1615 rtx libname ATTRIBUTE_UNUSED,
1616 tree fndecl,
1617 int n_named_args ATTRIBUTE_UNUSED)
1618 {
1619 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1620 ca->force_mem = 1;
1621 else
1622 ca->force_mem = 0;
1623 ca->parm_num = 1;
1624 }
1625
1626 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1627 functions returning structures, so we always reset that. Otherwise,
1628 we only need to know the sequence number of the argument to know what
1629 to do with it. */
1630 #undef TARGET_FUNCTION_ARG_ADVANCE
1631 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1632 static void
1633 m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1634 enum machine_mode mode ATTRIBUTE_UNUSED,
1635 const_tree type ATTRIBUTE_UNUSED,
1636 bool named ATTRIBUTE_UNUSED)
1637 {
1638 if (ca->force_mem)
1639 ca->force_mem = 0;
1640 else
1641 ca->parm_num++;
1642 }
1643
1644 /* Implements FUNCTION_ARG_REGNO_P. */
1645 int
1646 m32c_function_arg_regno_p (int r)
1647 {
1648 if (TARGET_A24)
1649 return (r == R0_REGNO);
1650 return (r == R1_REGNO || r == R2_REGNO);
1651 }
1652
1653 /* HImode and PSImode are the two "native" modes as far as GCC is
1654 concerned, but the chips also support a 32-bit mode which is used
1655 for some opcodes in R8C/M16C and for reset vectors and such. */
1656 #undef TARGET_VALID_POINTER_MODE
1657 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1658 static bool
1659 m32c_valid_pointer_mode (enum machine_mode mode)
1660 {
1661 if (mode == HImode
1662 || mode == PSImode
1663 || mode == SImode
1664 )
1665 return 1;
1666 return 0;
1667 }
1668
1669 /* How Scalar Function Values Are Returned */
1670
1671 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1672 combination of registers starting there (r2r0 for longs, r3r1r2r0
1673 for long long, r3r2r1r0 for doubles), except that that ABI
1674 currently doesn't work because it ends up using all available
1675 general registers and gcc often can't compile it. So, instead, we
1676 return anything bigger than 16 bits in "mem0" (effectively, a
1677 memory location). */
1678
1679 #undef TARGET_LIBCALL_VALUE
1680 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1681
1682 static rtx
1683 m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1684 {
1685 /* return reg or parallel */
1686 #if 0
1687 /* FIXME: GCC has difficulty returning large values in registers,
1688 because that ties up most of the general registers and gives the
1689 register allocator little to work with. Until we can resolve
1690 this, large values are returned in memory. */
1691 if (mode == DFmode)
1692 {
1693 rtx rv;
1694
1695 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1696 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1697 gen_rtx_REG (HImode,
1698 R0_REGNO),
1699 GEN_INT (0));
1700 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1701 gen_rtx_REG (HImode,
1702 R1_REGNO),
1703 GEN_INT (2));
1704 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1705 gen_rtx_REG (HImode,
1706 R2_REGNO),
1707 GEN_INT (4));
1708 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1709 gen_rtx_REG (HImode,
1710 R3_REGNO),
1711 GEN_INT (6));
1712 return rv;
1713 }
1714
1715 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1716 {
1717 rtx rv;
1718
1719 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1720 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1721 gen_rtx_REG (mode,
1722 R0_REGNO),
1723 GEN_INT (0));
1724 return rv;
1725 }
1726 #endif
1727
1728 if (GET_MODE_SIZE (mode) > 2)
1729 return gen_rtx_REG (mode, MEM0_REGNO);
1730 return gen_rtx_REG (mode, R0_REGNO);
1731 }
1732
1733 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1734 conventions. */
1735
1736 #undef TARGET_FUNCTION_VALUE
1737 #define TARGET_FUNCTION_VALUE m32c_function_value
1738
1739 static rtx
1740 m32c_function_value (const_tree valtype,
1741 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1742 bool outgoing ATTRIBUTE_UNUSED)
1743 {
1744 /* return reg or parallel */
1745 const enum machine_mode mode = TYPE_MODE (valtype);
1746 return m32c_libcall_value (mode, NULL_RTX);
1747 }
1748
1749 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1750
1751 #undef TARGET_FUNCTION_VALUE_REGNO_P
1752 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1753
1754 static bool
1755 m32c_function_value_regno_p (const unsigned int regno)
1756 {
1757 return (regno == R0_REGNO || regno == MEM0_REGNO);
1758 }
1759
1760 /* How Large Values Are Returned */
1761
1762 /* We return structures by pushing the address on the stack, even if
1763 we use registers for the first few "real" arguments. */
1764 #undef TARGET_STRUCT_VALUE_RTX
1765 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1766 static rtx
1767 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1768 int incoming ATTRIBUTE_UNUSED)
1769 {
1770 return 0;
1771 }
1772
1773 /* Function Entry and Exit */
1774
1775 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1776 int
1777 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1778 {
1779 if (cfun->machine->is_interrupt)
1780 return 1;
1781 return 0;
1782 }
1783
1784 /* Implementing the Varargs Macros */
1785
1786 #undef TARGET_STRICT_ARGUMENT_NAMING
1787 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1788 static bool
1789 m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1790 {
1791 return 1;
1792 }
1793
1794 /* Trampolines for Nested Functions */
1795
1796 /*
1797 m16c:
1798 1 0000 75C43412 mov.w #0x1234,a0
1799 2 0004 FC000000 jmp.a label
1800
1801 m32c:
1802 1 0000 BC563412 mov.l:s #0x123456,a0
1803 2 0004 CC000000 jmp.a label
1804 */
1805
1806 /* Implements TRAMPOLINE_SIZE. */
1807 int
1808 m32c_trampoline_size (void)
1809 {
1810 /* Allocate extra space so we can avoid the messy shifts when we
1811 initialize the trampoline; we just write past the end of the
1812 opcode. */
1813 return TARGET_A16 ? 8 : 10;
1814 }
1815
1816 /* Implements TRAMPOLINE_ALIGNMENT. */
1817 int
1818 m32c_trampoline_alignment (void)
1819 {
1820 return 2;
1821 }
1822
1823 /* Implements TARGET_TRAMPOLINE_INIT. */
1824
1825 #undef TARGET_TRAMPOLINE_INIT
1826 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1827 static void
1828 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1829 {
1830 rtx function = XEXP (DECL_RTL (fndecl), 0);
1831
1832 #define A0(m,i) adjust_address (m_tramp, m, i)
1833 if (TARGET_A16)
1834 {
1835 /* Note: we subtract a "word" because the moves want signed
1836 constants, not unsigned constants. */
1837 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1838 emit_move_insn (A0 (HImode, 2), chainval);
1839 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1840 /* We use 16-bit addresses here, but store the zero to turn it
1841 into a 24-bit offset. */
1842 emit_move_insn (A0 (HImode, 5), function);
1843 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1844 }
1845 else
1846 {
1847 /* Note that the PSI moves actually write 4 bytes. Make sure we
1848 write stuff out in the right order, and leave room for the
1849 extra byte at the end. */
1850 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1851 emit_move_insn (A0 (PSImode, 1), chainval);
1852 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1853 emit_move_insn (A0 (PSImode, 5), function);
1854 }
1855 #undef A0
1856 }
1857
1858 /* Implicit Calls to Library Routines */
1859
1860 #undef TARGET_INIT_LIBFUNCS
1861 #define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1862 static void
1863 m32c_init_libfuncs (void)
1864 {
1865 /* We do this because the M32C has an HImode operand, but the
1866 M16C has an 8-bit operand. Since gcc looks at the match data
1867 and not the expanded rtl, we have to reset the optab so that
1868 the right modes are found. */
1869 if (TARGET_A24)
1870 {
1871 set_optab_handler (cstore_optab, QImode, CODE_FOR_cstoreqi4_24);
1872 set_optab_handler (cstore_optab, HImode, CODE_FOR_cstorehi4_24);
1873 set_optab_handler (cstore_optab, PSImode, CODE_FOR_cstorepsi4_24);
1874 }
1875 }
1876
1877 /* Addressing Modes */
1878
1879 /* The r8c/m32c family supports a wide range of non-orthogonal
1880 addressing modes, including the ability to double-indirect on *some*
1881 of them. Not all insns support all modes, either, but we rely on
1882 predicates and constraints to deal with that. */
1883 #undef TARGET_LEGITIMATE_ADDRESS_P
1884 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1885 bool
1886 m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1887 {
1888 int mode_adjust;
1889 if (CONSTANT_P (x))
1890 return 1;
1891
1892 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1893 return 0;
1894 if (TARGET_A24 && GET_MODE (x) != PSImode)
1895 return 0;
1896
1897 /* Wide references to memory will be split after reload, so we must
1898 ensure that all parts of such splits remain legitimate
1899 addresses. */
1900 mode_adjust = GET_MODE_SIZE (mode) - 1;
1901
1902 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1903 if (GET_CODE (x) == PRE_DEC
1904 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1905 {
1906 return (GET_CODE (XEXP (x, 0)) == REG
1907 && REGNO (XEXP (x, 0)) == SP_REGNO);
1908 }
1909
1910 #if 0
1911 /* This is the double indirection detection, but it currently
1912 doesn't work as cleanly as this code implies, so until we've had
1913 a chance to debug it, leave it disabled. */
1914 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1915 {
1916 #if DEBUG_DOUBLE
1917 fprintf (stderr, "double indirect\n");
1918 #endif
1919 x = XEXP (x, 0);
1920 }
1921 #endif
1922
1923 encode_pattern (x);
1924 if (RTX_IS ("r"))
1925 {
1926 /* Most indexable registers can be used without displacements,
1927 although some of them will be emitted with an explicit zero
1928 to please the assembler. */
1929 switch (REGNO (patternr[0]))
1930 {
1931 case A1_REGNO:
1932 case SB_REGNO:
1933 case FB_REGNO:
1934 case SP_REGNO:
1935 if (TARGET_A16 && GET_MODE (x) == SImode)
1936 return 0;
1937 case A0_REGNO:
1938 return 1;
1939
1940 default:
1941 if (IS_PSEUDO (patternr[0], strict))
1942 return 1;
1943 return 0;
1944 }
1945 }
1946
1947 if (TARGET_A16 && GET_MODE (x) == SImode)
1948 return 0;
1949
1950 if (RTX_IS ("+ri"))
1951 {
1952 /* This is more interesting, because different base registers
1953 allow for different displacements - both range and signedness
1954 - and it differs from chip series to chip series too. */
1955 int rn = REGNO (patternr[1]);
1956 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1957 switch (rn)
1958 {
1959 case A0_REGNO:
1960 case A1_REGNO:
1961 case SB_REGNO:
1962 /* The syntax only allows positive offsets, but when the
1963 offsets span the entire memory range, we can simulate
1964 negative offsets by wrapping. */
1965 if (TARGET_A16)
1966 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1967 if (rn == SB_REGNO)
1968 return (offs >= 0 && offs <= 65535 - mode_adjust);
1969 /* A0 or A1 */
1970 return (offs >= -16777216 && offs <= 16777215);
1971
1972 case FB_REGNO:
1973 if (TARGET_A16)
1974 return (offs >= -128 && offs <= 127 - mode_adjust);
1975 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1976
1977 case SP_REGNO:
1978 return (offs >= -128 && offs <= 127 - mode_adjust);
1979
1980 default:
1981 if (IS_PSEUDO (patternr[1], strict))
1982 return 1;
1983 return 0;
1984 }
1985 }
1986 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1987 {
1988 rtx reg = patternr[1];
1989
1990 /* We don't know where the symbol is, so only allow base
1991 registers which support displacements spanning the whole
1992 address range. */
1993 switch (REGNO (reg))
1994 {
1995 case A0_REGNO:
1996 case A1_REGNO:
1997 /* $sb needs a secondary reload, but since it's involved in
1998 memory address reloads too, we don't deal with it very
1999 well. */
2000 /* case SB_REGNO: */
2001 return 1;
2002 default:
2003 if (IS_PSEUDO (reg, strict))
2004 return 1;
2005 return 0;
2006 }
2007 }
2008 return 0;
2009 }
2010
2011 /* Implements REG_OK_FOR_BASE_P. */
2012 int
2013 m32c_reg_ok_for_base_p (rtx x, int strict)
2014 {
2015 if (GET_CODE (x) != REG)
2016 return 0;
2017 switch (REGNO (x))
2018 {
2019 case A0_REGNO:
2020 case A1_REGNO:
2021 case SB_REGNO:
2022 case FB_REGNO:
2023 case SP_REGNO:
2024 return 1;
2025 default:
2026 if (IS_PSEUDO (x, strict))
2027 return 1;
2028 return 0;
2029 }
2030 }
2031
2032 /* We have three choices for choosing fb->aN offsets. If we choose -128,
2033 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
2034 like this:
2035 EB 4B FF mova -128[$fb],$a0
2036 D8 0C FF FF mov.w:Q #0,-1[$a0]
2037
2038 Alternately, we subtract the frame size, and hopefully use 8-bit aN
2039 displacements:
2040 7B F4 stc $fb,$a0
2041 77 54 00 01 sub #256,$a0
2042 D8 08 01 mov.w:Q #0,1[$a0]
2043
2044 If we don't offset (i.e. offset by zero), we end up with:
2045 7B F4 stc $fb,$a0
2046 D8 0C 00 FF mov.w:Q #0,-256[$a0]
2047
2048 We have to subtract *something* so that we have a PLUS rtx to mark
2049 that we've done this reload. The -128 offset will never result in
2050 an 8-bit aN offset, and the payoff for the second case is five
2051 loads *if* those loads are within 256 bytes of the other end of the
2052 frame, so the third case seems best. Note that we subtract the
2053 zero, but detect that in the addhi3 pattern. */
2054
2055 #define BIG_FB_ADJ 0
2056
2057 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
2058 worry about is frame base offsets, as $fb has a limited
2059 displacement range. We deal with this by attempting to reload $fb
2060 itself into an address register; that seems to result in the best
2061 code. */
2062 #undef TARGET_LEGITIMIZE_ADDRESS
2063 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
2064 static rtx
2065 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2066 enum machine_mode mode)
2067 {
2068 #if DEBUG0
2069 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
2070 debug_rtx (x);
2071 fprintf (stderr, "\n");
2072 #endif
2073
2074 if (GET_CODE (x) == PLUS
2075 && GET_CODE (XEXP (x, 0)) == REG
2076 && REGNO (XEXP (x, 0)) == FB_REGNO
2077 && GET_CODE (XEXP (x, 1)) == CONST_INT
2078 && (INTVAL (XEXP (x, 1)) < -128
2079 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
2080 {
2081 /* reload FB to A_REGS */
2082 rtx temp = gen_reg_rtx (Pmode);
2083 x = copy_rtx (x);
2084 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
2085 XEXP (x, 0) = temp;
2086 }
2087
2088 return x;
2089 }
2090
2091 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
2092 int
2093 m32c_legitimize_reload_address (rtx * x,
2094 enum machine_mode mode,
2095 int opnum,
2096 int type, int ind_levels ATTRIBUTE_UNUSED)
2097 {
2098 #if DEBUG0
2099 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
2100 mode_name[mode]);
2101 debug_rtx (*x);
2102 #endif
2103
2104 /* At one point, this function tried to get $fb copied to an address
2105 register, which in theory would maximize sharing, but gcc was
2106 *also* still trying to reload the whole address, and we'd run out
2107 of address registers. So we let gcc do the naive (but safe)
2108 reload instead, when the above function doesn't handle it for
2109 us.
2110
2111 The code below is a second attempt at the above. */
2112
2113 if (GET_CODE (*x) == PLUS
2114 && GET_CODE (XEXP (*x, 0)) == REG
2115 && REGNO (XEXP (*x, 0)) == FB_REGNO
2116 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2117 && (INTVAL (XEXP (*x, 1)) < -128
2118 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2119 {
2120 rtx sum;
2121 int offset = INTVAL (XEXP (*x, 1));
2122 int adjustment = -BIG_FB_ADJ;
2123
2124 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2125 GEN_INT (adjustment));
2126 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2127 if (type == RELOAD_OTHER)
2128 type = RELOAD_FOR_OTHER_ADDRESS;
2129 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2130 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2131 type);
2132 return 1;
2133 }
2134
2135 if (GET_CODE (*x) == PLUS
2136 && GET_CODE (XEXP (*x, 0)) == PLUS
2137 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2138 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2139 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2140 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2141 )
2142 {
2143 if (type == RELOAD_OTHER)
2144 type = RELOAD_FOR_OTHER_ADDRESS;
2145 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2146 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2147 type);
2148 return 1;
2149 }
2150
2151 return 0;
2152 }
2153
2154 /* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2155 so we can allow anything. */
2156 int
2157 m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2158 {
2159 return 1;
2160 }
2161
2162
2163 /* Return the appropriate mode for a named address pointer. */
2164 #undef TARGET_ADDR_SPACE_POINTER_MODE
2165 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
2166 static enum machine_mode
2167 m32c_addr_space_pointer_mode (addr_space_t addrspace)
2168 {
2169 switch (addrspace)
2170 {
2171 case ADDR_SPACE_GENERIC:
2172 return TARGET_A24 ? PSImode : HImode;
2173 case ADDR_SPACE_FAR:
2174 return SImode;
2175 default:
2176 gcc_unreachable ();
2177 }
2178 }
2179
2180 /* Return the appropriate mode for a named address address. */
2181 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
2182 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
2183 static enum machine_mode
2184 m32c_addr_space_address_mode (addr_space_t addrspace)
2185 {
2186 switch (addrspace)
2187 {
2188 case ADDR_SPACE_GENERIC:
2189 return TARGET_A24 ? PSImode : HImode;
2190 case ADDR_SPACE_FAR:
2191 return SImode;
2192 default:
2193 gcc_unreachable ();
2194 }
2195 }
2196
2197 /* Like m32c_legitimate_address_p, except with named addresses. */
2198 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2199 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2200 m32c_addr_space_legitimate_address_p
2201 static bool
2202 m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
2203 bool strict, addr_space_t as)
2204 {
2205 if (as == ADDR_SPACE_FAR)
2206 {
2207 if (TARGET_A24)
2208 return 0;
2209 encode_pattern (x);
2210 if (RTX_IS ("r"))
2211 {
2212 if (GET_MODE (x) != SImode)
2213 return 0;
2214 switch (REGNO (patternr[0]))
2215 {
2216 case A0_REGNO:
2217 return 1;
2218
2219 default:
2220 if (IS_PSEUDO (patternr[0], strict))
2221 return 1;
2222 return 0;
2223 }
2224 }
2225 if (RTX_IS ("+^Sri"))
2226 {
2227 int rn = REGNO (patternr[3]);
2228 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2229 if (GET_MODE (patternr[3]) != HImode)
2230 return 0;
2231 switch (rn)
2232 {
2233 case A0_REGNO:
2234 return (offs >= 0 && offs <= 0xfffff);
2235
2236 default:
2237 if (IS_PSEUDO (patternr[3], strict))
2238 return 1;
2239 return 0;
2240 }
2241 }
2242 if (RTX_IS ("+^Srs"))
2243 {
2244 int rn = REGNO (patternr[3]);
2245 if (GET_MODE (patternr[3]) != HImode)
2246 return 0;
2247 switch (rn)
2248 {
2249 case A0_REGNO:
2250 return 1;
2251
2252 default:
2253 if (IS_PSEUDO (patternr[3], strict))
2254 return 1;
2255 return 0;
2256 }
2257 }
2258 if (RTX_IS ("+^S+ris"))
2259 {
2260 int rn = REGNO (patternr[4]);
2261 if (GET_MODE (patternr[4]) != HImode)
2262 return 0;
2263 switch (rn)
2264 {
2265 case A0_REGNO:
2266 return 1;
2267
2268 default:
2269 if (IS_PSEUDO (patternr[4], strict))
2270 return 1;
2271 return 0;
2272 }
2273 }
2274 if (RTX_IS ("s"))
2275 {
2276 return 1;
2277 }
2278 return 0;
2279 }
2280
2281 else if (as != ADDR_SPACE_GENERIC)
2282 gcc_unreachable ();
2283
2284 return m32c_legitimate_address_p (mode, x, strict);
2285 }
2286
2287 /* Like m32c_legitimate_address, except with named address support. */
2288 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2289 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2290 static rtx
2291 m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2292 addr_space_t as)
2293 {
2294 if (as != ADDR_SPACE_GENERIC)
2295 {
2296 #if DEBUG0
2297 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2298 debug_rtx (x);
2299 fprintf (stderr, "\n");
2300 #endif
2301
2302 if (GET_CODE (x) != REG)
2303 {
2304 x = force_reg (SImode, x);
2305 }
2306 return x;
2307 }
2308
2309 return m32c_legitimize_address (x, oldx, mode);
2310 }
2311
2312 /* Determine if one named address space is a subset of another. */
2313 #undef TARGET_ADDR_SPACE_SUBSET_P
2314 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2315 static bool
2316 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2317 {
2318 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2319 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2320
2321 if (subset == superset)
2322 return true;
2323
2324 else
2325 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2326 }
2327
2328 #undef TARGET_ADDR_SPACE_CONVERT
2329 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2330 /* Convert from one address space to another. */
2331 static rtx
2332 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2333 {
2334 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2335 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2336 rtx result;
2337
2338 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2339 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2340
2341 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2342 {
2343 /* This is unpredictable, as we're truncating off usable address
2344 bits. */
2345
2346 result = gen_reg_rtx (HImode);
2347 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2348 return result;
2349 }
2350 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2351 {
2352 /* This always works. */
2353 result = gen_reg_rtx (SImode);
2354 emit_insn (gen_zero_extendhisi2 (result, op));
2355 return result;
2356 }
2357 else
2358 gcc_unreachable ();
2359 }
2360
2361 /* Condition Code Status */
2362
2363 #undef TARGET_FIXED_CONDITION_CODE_REGS
2364 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2365 static bool
2366 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2367 {
2368 *p1 = FLG_REGNO;
2369 *p2 = INVALID_REGNUM;
2370 return true;
2371 }
2372
2373 /* Describing Relative Costs of Operations */
2374
2375 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2376 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2377 no opcodes to do that). We also discourage use of mem* registers
2378 since they're really memory. */
2379
2380 #undef TARGET_REGISTER_MOVE_COST
2381 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2382
2383 static int
2384 m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2385 reg_class_t to)
2386 {
2387 int cost = COSTS_N_INSNS (3);
2388 HARD_REG_SET cc;
2389
2390 /* FIXME: pick real values, but not 2 for now. */
2391 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2392 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2393
2394 if (mode == QImode
2395 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2396 {
2397 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2398 cost = COSTS_N_INSNS (1000);
2399 else
2400 cost = COSTS_N_INSNS (80);
2401 }
2402
2403 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2404 cost = COSTS_N_INSNS (1000);
2405
2406 if (reg_classes_intersect_p (from, CR_REGS))
2407 cost += COSTS_N_INSNS (5);
2408
2409 if (reg_classes_intersect_p (to, CR_REGS))
2410 cost += COSTS_N_INSNS (5);
2411
2412 if (from == MEM_REGS || to == MEM_REGS)
2413 cost += COSTS_N_INSNS (50);
2414 else if (reg_classes_intersect_p (from, MEM_REGS)
2415 || reg_classes_intersect_p (to, MEM_REGS))
2416 cost += COSTS_N_INSNS (10);
2417
2418 #if DEBUG0
2419 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2420 mode_name[mode], class_names[(int) from], class_names[(int) to],
2421 cost);
2422 #endif
2423 return cost;
2424 }
2425
2426 /* Implements TARGET_MEMORY_MOVE_COST. */
2427
2428 #undef TARGET_MEMORY_MOVE_COST
2429 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2430
2431 static int
2432 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2433 reg_class_t rclass ATTRIBUTE_UNUSED,
2434 bool in ATTRIBUTE_UNUSED)
2435 {
2436 /* FIXME: pick real values. */
2437 return COSTS_N_INSNS (10);
2438 }
2439
2440 /* Here we try to describe when we use multiple opcodes for one RTX so
2441 that gcc knows when to use them. */
2442 #undef TARGET_RTX_COSTS
2443 #define TARGET_RTX_COSTS m32c_rtx_costs
2444 static bool
2445 m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2446 bool speed ATTRIBUTE_UNUSED)
2447 {
2448 switch (code)
2449 {
2450 case REG:
2451 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2452 *total += COSTS_N_INSNS (500);
2453 else
2454 *total += COSTS_N_INSNS (1);
2455 return true;
2456
2457 case ASHIFT:
2458 case LSHIFTRT:
2459 case ASHIFTRT:
2460 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2461 {
2462 /* mov.b r1l, r1h */
2463 *total += COSTS_N_INSNS (1);
2464 return true;
2465 }
2466 if (INTVAL (XEXP (x, 1)) > 8
2467 || INTVAL (XEXP (x, 1)) < -8)
2468 {
2469 /* mov.b #N, r1l */
2470 /* mov.b r1l, r1h */
2471 *total += COSTS_N_INSNS (2);
2472 return true;
2473 }
2474 return true;
2475
2476 case LE:
2477 case LEU:
2478 case LT:
2479 case LTU:
2480 case GT:
2481 case GTU:
2482 case GE:
2483 case GEU:
2484 case NE:
2485 case EQ:
2486 if (outer_code == SET)
2487 {
2488 *total += COSTS_N_INSNS (2);
2489 return true;
2490 }
2491 break;
2492
2493 case ZERO_EXTRACT:
2494 {
2495 rtx dest = XEXP (x, 0);
2496 rtx addr = XEXP (dest, 0);
2497 switch (GET_CODE (addr))
2498 {
2499 case CONST_INT:
2500 *total += COSTS_N_INSNS (1);
2501 break;
2502 case SYMBOL_REF:
2503 *total += COSTS_N_INSNS (3);
2504 break;
2505 default:
2506 *total += COSTS_N_INSNS (2);
2507 break;
2508 }
2509 return true;
2510 }
2511 break;
2512
2513 default:
2514 /* Reasonable default. */
2515 if (TARGET_A16 && GET_MODE(x) == SImode)
2516 *total += COSTS_N_INSNS (2);
2517 break;
2518 }
2519 return false;
2520 }
2521
2522 #undef TARGET_ADDRESS_COST
2523 #define TARGET_ADDRESS_COST m32c_address_cost
2524 static int
2525 m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2526 {
2527 int i;
2528 /* fprintf(stderr, "\naddress_cost\n");
2529 debug_rtx(addr);*/
2530 switch (GET_CODE (addr))
2531 {
2532 case CONST_INT:
2533 i = INTVAL (addr);
2534 if (i == 0)
2535 return COSTS_N_INSNS(1);
2536 if (0 < i && i <= 255)
2537 return COSTS_N_INSNS(2);
2538 if (0 < i && i <= 65535)
2539 return COSTS_N_INSNS(3);
2540 return COSTS_N_INSNS(4);
2541 case SYMBOL_REF:
2542 return COSTS_N_INSNS(4);
2543 case REG:
2544 return COSTS_N_INSNS(1);
2545 case PLUS:
2546 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2547 {
2548 i = INTVAL (XEXP (addr, 1));
2549 if (i == 0)
2550 return COSTS_N_INSNS(1);
2551 if (0 < i && i <= 255)
2552 return COSTS_N_INSNS(2);
2553 if (0 < i && i <= 65535)
2554 return COSTS_N_INSNS(3);
2555 }
2556 return COSTS_N_INSNS(4);
2557 default:
2558 return 0;
2559 }
2560 }
2561
2562 /* Defining the Output Assembler Language */
2563
2564 /* The Overall Framework of an Assembler File */
2565
2566 #undef TARGET_HAVE_NAMED_SECTIONS
2567 #define TARGET_HAVE_NAMED_SECTIONS true
2568
2569 /* Output of Data */
2570
2571 /* We may have 24 bit sizes, which is the native address size.
2572 Currently unused, but provided for completeness. */
2573 #undef TARGET_ASM_INTEGER
2574 #define TARGET_ASM_INTEGER m32c_asm_integer
2575 static bool
2576 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2577 {
2578 switch (size)
2579 {
2580 case 3:
2581 fprintf (asm_out_file, "\t.3byte\t");
2582 output_addr_const (asm_out_file, x);
2583 fputc ('\n', asm_out_file);
2584 return true;
2585 case 4:
2586 if (GET_CODE (x) == SYMBOL_REF)
2587 {
2588 fprintf (asm_out_file, "\t.long\t");
2589 output_addr_const (asm_out_file, x);
2590 fputc ('\n', asm_out_file);
2591 return true;
2592 }
2593 break;
2594 }
2595 return default_assemble_integer (x, size, aligned_p);
2596 }
2597
2598 /* Output of Assembler Instructions */
2599
2600 /* We use a lookup table because the addressing modes are non-orthogonal. */
2601
2602 static struct
2603 {
2604 char code;
2605 char const *pattern;
2606 char const *format;
2607 }
2608 const conversions[] = {
2609 { 0, "r", "0" },
2610
2611 { 0, "mr", "z[1]" },
2612 { 0, "m+ri", "3[2]" },
2613 { 0, "m+rs", "3[2]" },
2614 { 0, "m+^Zrs", "5[4]" },
2615 { 0, "m+^Zri", "5[4]" },
2616 { 0, "m+^Z+ris", "7+6[5]" },
2617 { 0, "m+^Srs", "5[4]" },
2618 { 0, "m+^Sri", "5[4]" },
2619 { 0, "m+^S+ris", "7+6[5]" },
2620 { 0, "m+r+si", "4+5[2]" },
2621 { 0, "ms", "1" },
2622 { 0, "mi", "1" },
2623 { 0, "m+si", "2+3" },
2624
2625 { 0, "mmr", "[z[2]]" },
2626 { 0, "mm+ri", "[4[3]]" },
2627 { 0, "mm+rs", "[4[3]]" },
2628 { 0, "mm+r+si", "[5+6[3]]" },
2629 { 0, "mms", "[[2]]" },
2630 { 0, "mmi", "[[2]]" },
2631 { 0, "mm+si", "[4[3]]" },
2632
2633 { 0, "i", "#0" },
2634 { 0, "s", "#0" },
2635 { 0, "+si", "#1+2" },
2636 { 0, "l", "#0" },
2637
2638 { 'l', "l", "0" },
2639 { 'd', "i", "0" },
2640 { 'd', "s", "0" },
2641 { 'd', "+si", "1+2" },
2642 { 'D', "i", "0" },
2643 { 'D', "s", "0" },
2644 { 'D', "+si", "1+2" },
2645 { 'x', "i", "#0" },
2646 { 'X', "i", "#0" },
2647 { 'm', "i", "#0" },
2648 { 'b', "i", "#0" },
2649 { 'B', "i", "0" },
2650 { 'p', "i", "0" },
2651
2652 { 0, 0, 0 }
2653 };
2654
2655 /* This is in order according to the bitfield that pushm/popm use. */
2656 static char const *pushm_regs[] = {
2657 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2658 };
2659
2660 /* Implements PRINT_OPERAND. */
2661 void
2662 m32c_print_operand (FILE * file, rtx x, int code)
2663 {
2664 int i, j, b;
2665 const char *comma;
2666 HOST_WIDE_INT ival;
2667 int unsigned_const = 0;
2668 int force_sign;
2669
2670 /* Multiplies; constants are converted to sign-extended format but
2671 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2672 need. */
2673 if (code == 'u')
2674 {
2675 unsigned_const = 2;
2676 code = 0;
2677 }
2678 if (code == 'U')
2679 {
2680 unsigned_const = 1;
2681 code = 0;
2682 }
2683 /* This one is only for debugging; you can put it in a pattern to
2684 force this error. */
2685 if (code == '!')
2686 {
2687 fprintf (stderr, "dj: unreviewed pattern:");
2688 if (current_output_insn)
2689 debug_rtx (current_output_insn);
2690 gcc_unreachable ();
2691 }
2692 /* PSImode operations are either .w or .l depending on the target. */
2693 if (code == '&')
2694 {
2695 if (TARGET_A16)
2696 fprintf (file, "w");
2697 else
2698 fprintf (file, "l");
2699 return;
2700 }
2701 /* Inverted conditionals. */
2702 if (code == 'C')
2703 {
2704 switch (GET_CODE (x))
2705 {
2706 case LE:
2707 fputs ("gt", file);
2708 break;
2709 case LEU:
2710 fputs ("gtu", file);
2711 break;
2712 case LT:
2713 fputs ("ge", file);
2714 break;
2715 case LTU:
2716 fputs ("geu", file);
2717 break;
2718 case GT:
2719 fputs ("le", file);
2720 break;
2721 case GTU:
2722 fputs ("leu", file);
2723 break;
2724 case GE:
2725 fputs ("lt", file);
2726 break;
2727 case GEU:
2728 fputs ("ltu", file);
2729 break;
2730 case NE:
2731 fputs ("eq", file);
2732 break;
2733 case EQ:
2734 fputs ("ne", file);
2735 break;
2736 default:
2737 gcc_unreachable ();
2738 }
2739 return;
2740 }
2741 /* Regular conditionals. */
2742 if (code == 'c')
2743 {
2744 switch (GET_CODE (x))
2745 {
2746 case LE:
2747 fputs ("le", file);
2748 break;
2749 case LEU:
2750 fputs ("leu", file);
2751 break;
2752 case LT:
2753 fputs ("lt", file);
2754 break;
2755 case LTU:
2756 fputs ("ltu", file);
2757 break;
2758 case GT:
2759 fputs ("gt", file);
2760 break;
2761 case GTU:
2762 fputs ("gtu", file);
2763 break;
2764 case GE:
2765 fputs ("ge", file);
2766 break;
2767 case GEU:
2768 fputs ("geu", file);
2769 break;
2770 case NE:
2771 fputs ("ne", file);
2772 break;
2773 case EQ:
2774 fputs ("eq", file);
2775 break;
2776 default:
2777 gcc_unreachable ();
2778 }
2779 return;
2780 }
2781 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2782 operand. */
2783 if (code == 'h' && GET_MODE (x) == SImode)
2784 {
2785 x = m32c_subreg (HImode, x, SImode, 0);
2786 code = 0;
2787 }
2788 if (code == 'H' && GET_MODE (x) == SImode)
2789 {
2790 x = m32c_subreg (HImode, x, SImode, 2);
2791 code = 0;
2792 }
2793 if (code == 'h' && GET_MODE (x) == HImode)
2794 {
2795 x = m32c_subreg (QImode, x, HImode, 0);
2796 code = 0;
2797 }
2798 if (code == 'H' && GET_MODE (x) == HImode)
2799 {
2800 /* We can't actually represent this as an rtx. Do it here. */
2801 if (GET_CODE (x) == REG)
2802 {
2803 switch (REGNO (x))
2804 {
2805 case R0_REGNO:
2806 fputs ("r0h", file);
2807 return;
2808 case R1_REGNO:
2809 fputs ("r1h", file);
2810 return;
2811 default:
2812 gcc_unreachable();
2813 }
2814 }
2815 /* This should be a MEM. */
2816 x = m32c_subreg (QImode, x, HImode, 1);
2817 code = 0;
2818 }
2819 /* This is for BMcond, which always wants word register names. */
2820 if (code == 'h' && GET_MODE (x) == QImode)
2821 {
2822 if (GET_CODE (x) == REG)
2823 x = gen_rtx_REG (HImode, REGNO (x));
2824 code = 0;
2825 }
2826 /* 'x' and 'X' need to be ignored for non-immediates. */
2827 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2828 code = 0;
2829
2830 encode_pattern (x);
2831 force_sign = 0;
2832 for (i = 0; conversions[i].pattern; i++)
2833 if (conversions[i].code == code
2834 && streq (conversions[i].pattern, pattern))
2835 {
2836 for (j = 0; conversions[i].format[j]; j++)
2837 /* backslash quotes the next character in the output pattern. */
2838 if (conversions[i].format[j] == '\\')
2839 {
2840 fputc (conversions[i].format[j + 1], file);
2841 j++;
2842 }
2843 /* Digits in the output pattern indicate that the
2844 corresponding RTX is to be output at that point. */
2845 else if (ISDIGIT (conversions[i].format[j]))
2846 {
2847 rtx r = patternr[conversions[i].format[j] - '0'];
2848 switch (GET_CODE (r))
2849 {
2850 case REG:
2851 fprintf (file, "%s",
2852 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2853 break;
2854 case CONST_INT:
2855 switch (code)
2856 {
2857 case 'b':
2858 case 'B':
2859 {
2860 int v = INTVAL (r);
2861 int i = (int) exact_log2 (v);
2862 if (i == -1)
2863 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2864 if (i == -1)
2865 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2866 /* Bit position. */
2867 fprintf (file, "%d", i);
2868 }
2869 break;
2870 case 'x':
2871 /* Unsigned byte. */
2872 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2873 INTVAL (r) & 0xff);
2874 break;
2875 case 'X':
2876 /* Unsigned word. */
2877 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2878 INTVAL (r) & 0xffff);
2879 break;
2880 case 'p':
2881 /* pushm and popm encode a register set into a single byte. */
2882 comma = "";
2883 for (b = 7; b >= 0; b--)
2884 if (INTVAL (r) & (1 << b))
2885 {
2886 fprintf (file, "%s%s", comma, pushm_regs[b]);
2887 comma = ",";
2888 }
2889 break;
2890 case 'm':
2891 /* "Minus". Output -X */
2892 ival = (-INTVAL (r) & 0xffff);
2893 if (ival & 0x8000)
2894 ival = ival - 0x10000;
2895 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2896 break;
2897 default:
2898 ival = INTVAL (r);
2899 if (conversions[i].format[j + 1] == '[' && ival < 0)
2900 {
2901 /* We can simulate negative displacements by
2902 taking advantage of address space
2903 wrapping when the offset can span the
2904 entire address range. */
2905 rtx base =
2906 patternr[conversions[i].format[j + 2] - '0'];
2907 if (GET_CODE (base) == REG)
2908 switch (REGNO (base))
2909 {
2910 case A0_REGNO:
2911 case A1_REGNO:
2912 if (TARGET_A24)
2913 ival = 0x1000000 + ival;
2914 else
2915 ival = 0x10000 + ival;
2916 break;
2917 case SB_REGNO:
2918 if (TARGET_A16)
2919 ival = 0x10000 + ival;
2920 break;
2921 }
2922 }
2923 else if (code == 'd' && ival < 0 && j == 0)
2924 /* The "mova" opcode is used to do addition by
2925 computing displacements, but again, we need
2926 displacements to be unsigned *if* they're
2927 the only component of the displacement
2928 (i.e. no "symbol-4" type displacement). */
2929 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2930
2931 if (conversions[i].format[j] == '0')
2932 {
2933 /* More conversions to unsigned. */
2934 if (unsigned_const == 2)
2935 ival &= 0xffff;
2936 if (unsigned_const == 1)
2937 ival &= 0xff;
2938 }
2939 if (streq (conversions[i].pattern, "mi")
2940 || streq (conversions[i].pattern, "mmi"))
2941 {
2942 /* Integers used as addresses are unsigned. */
2943 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2944 }
2945 if (force_sign && ival >= 0)
2946 fputc ('+', file);
2947 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2948 break;
2949 }
2950 break;
2951 case CONST_DOUBLE:
2952 /* We don't have const_double constants. If it
2953 happens, make it obvious. */
2954 fprintf (file, "[const_double 0x%lx]",
2955 (unsigned long) CONST_DOUBLE_HIGH (r));
2956 break;
2957 case SYMBOL_REF:
2958 assemble_name (file, XSTR (r, 0));
2959 break;
2960 case LABEL_REF:
2961 output_asm_label (r);
2962 break;
2963 default:
2964 fprintf (stderr, "don't know how to print this operand:");
2965 debug_rtx (r);
2966 gcc_unreachable ();
2967 }
2968 }
2969 else
2970 {
2971 if (conversions[i].format[j] == 'z')
2972 {
2973 /* Some addressing modes *must* have a displacement,
2974 so insert a zero here if needed. */
2975 int k;
2976 for (k = j + 1; conversions[i].format[k]; k++)
2977 if (ISDIGIT (conversions[i].format[k]))
2978 {
2979 rtx reg = patternr[conversions[i].format[k] - '0'];
2980 if (GET_CODE (reg) == REG
2981 && (REGNO (reg) == SB_REGNO
2982 || REGNO (reg) == FB_REGNO
2983 || REGNO (reg) == SP_REGNO))
2984 fputc ('0', file);
2985 }
2986 continue;
2987 }
2988 /* Signed displacements off symbols need to have signs
2989 blended cleanly. */
2990 if (conversions[i].format[j] == '+'
2991 && (!code || code == 'D' || code == 'd')
2992 && ISDIGIT (conversions[i].format[j + 1])
2993 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2994 == CONST_INT))
2995 {
2996 force_sign = 1;
2997 continue;
2998 }
2999 fputc (conversions[i].format[j], file);
3000 }
3001 break;
3002 }
3003 if (!conversions[i].pattern)
3004 {
3005 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
3006 pattern);
3007 debug_rtx (x);
3008 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
3009 }
3010
3011 return;
3012 }
3013
3014 /* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
3015 above for descriptions of what these do. */
3016 int
3017 m32c_print_operand_punct_valid_p (int c)
3018 {
3019 if (c == '&' || c == '!')
3020 return 1;
3021 return 0;
3022 }
3023
3024 /* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
3025 void
3026 m32c_print_operand_address (FILE * stream, rtx address)
3027 {
3028 if (GET_CODE (address) == MEM)
3029 address = XEXP (address, 0);
3030 else
3031 /* cf: gcc.dg/asm-4.c. */
3032 gcc_assert (GET_CODE (address) == REG);
3033
3034 m32c_print_operand (stream, address, 0);
3035 }
3036
3037 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
3038 differently than general registers. */
3039 void
3040 m32c_output_reg_push (FILE * s, int regno)
3041 {
3042 if (regno == FLG_REGNO)
3043 fprintf (s, "\tpushc\tflg\n");
3044 else
3045 fprintf (s, "\tpush.%c\t%s\n",
3046 " bwll"[reg_push_size (regno)], reg_names[regno]);
3047 }
3048
3049 /* Likewise for ASM_OUTPUT_REG_POP. */
3050 void
3051 m32c_output_reg_pop (FILE * s, int regno)
3052 {
3053 if (regno == FLG_REGNO)
3054 fprintf (s, "\tpopc\tflg\n");
3055 else
3056 fprintf (s, "\tpop.%c\t%s\n",
3057 " bwll"[reg_push_size (regno)], reg_names[regno]);
3058 }
3059
3060 /* Defining target-specific uses of `__attribute__' */
3061
3062 /* Used to simplify the logic below. Find the attributes wherever
3063 they may be. */
3064 #define M32C_ATTRIBUTES(decl) \
3065 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
3066 : DECL_ATTRIBUTES (decl) \
3067 ? (DECL_ATTRIBUTES (decl)) \
3068 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
3069
3070 /* Returns TRUE if the given tree has the "interrupt" attribute. */
3071 static int
3072 interrupt_p (tree node ATTRIBUTE_UNUSED)
3073 {
3074 tree list = M32C_ATTRIBUTES (node);
3075 while (list)
3076 {
3077 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
3078 return 1;
3079 list = TREE_CHAIN (list);
3080 }
3081 return fast_interrupt_p (node);
3082 }
3083
3084 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
3085 static int
3086 bank_switch_p (tree node ATTRIBUTE_UNUSED)
3087 {
3088 tree list = M32C_ATTRIBUTES (node);
3089 while (list)
3090 {
3091 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
3092 return 1;
3093 list = TREE_CHAIN (list);
3094 }
3095 return 0;
3096 }
3097
3098 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
3099 static int
3100 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
3101 {
3102 tree list = M32C_ATTRIBUTES (node);
3103 while (list)
3104 {
3105 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
3106 return 1;
3107 list = TREE_CHAIN (list);
3108 }
3109 return 0;
3110 }
3111
3112 static tree
3113 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
3114 tree name ATTRIBUTE_UNUSED,
3115 tree args ATTRIBUTE_UNUSED,
3116 int flags ATTRIBUTE_UNUSED,
3117 bool * no_add_attrs ATTRIBUTE_UNUSED)
3118 {
3119 return NULL_TREE;
3120 }
3121
3122 /* Returns TRUE if given tree has the "function_vector" attribute. */
3123 int
3124 m32c_special_page_vector_p (tree func)
3125 {
3126 tree list;
3127
3128 if (TREE_CODE (func) != FUNCTION_DECL)
3129 return 0;
3130
3131 list = M32C_ATTRIBUTES (func);
3132 while (list)
3133 {
3134 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3135 return 1;
3136 list = TREE_CHAIN (list);
3137 }
3138 return 0;
3139 }
3140
3141 static tree
3142 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
3143 tree name ATTRIBUTE_UNUSED,
3144 tree args ATTRIBUTE_UNUSED,
3145 int flags ATTRIBUTE_UNUSED,
3146 bool * no_add_attrs ATTRIBUTE_UNUSED)
3147 {
3148 if (TARGET_R8C)
3149 {
3150 /* The attribute is not supported for R8C target. */
3151 warning (OPT_Wattributes,
3152 "%qE attribute is not supported for R8C target",
3153 name);
3154 *no_add_attrs = true;
3155 }
3156 else if (TREE_CODE (*node) != FUNCTION_DECL)
3157 {
3158 /* The attribute must be applied to functions only. */
3159 warning (OPT_Wattributes,
3160 "%qE attribute applies only to functions",
3161 name);
3162 *no_add_attrs = true;
3163 }
3164 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
3165 {
3166 /* The argument must be a constant integer. */
3167 warning (OPT_Wattributes,
3168 "%qE attribute argument not an integer constant",
3169 name);
3170 *no_add_attrs = true;
3171 }
3172 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
3173 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
3174 {
3175 /* The argument value must be between 18 to 255. */
3176 warning (OPT_Wattributes,
3177 "%qE attribute argument should be between 18 to 255",
3178 name);
3179 *no_add_attrs = true;
3180 }
3181 return NULL_TREE;
3182 }
3183
3184 /* If the function is assigned the attribute 'function_vector', it
3185 returns the function vector number, otherwise returns zero. */
3186 int
3187 current_function_special_page_vector (rtx x)
3188 {
3189 int num;
3190
3191 if ((GET_CODE(x) == SYMBOL_REF)
3192 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
3193 {
3194 tree list;
3195 tree t = SYMBOL_REF_DECL (x);
3196
3197 if (TREE_CODE (t) != FUNCTION_DECL)
3198 return 0;
3199
3200 list = M32C_ATTRIBUTES (t);
3201 while (list)
3202 {
3203 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3204 {
3205 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3206 return num;
3207 }
3208
3209 list = TREE_CHAIN (list);
3210 }
3211
3212 return 0;
3213 }
3214 else
3215 return 0;
3216 }
3217
3218 #undef TARGET_ATTRIBUTE_TABLE
3219 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3220 static const struct attribute_spec m32c_attribute_table[] = {
3221 {"interrupt", 0, 0, false, false, false, interrupt_handler},
3222 {"bank_switch", 0, 0, false, false, false, interrupt_handler},
3223 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler},
3224 {"function_vector", 1, 1, true, false, false, function_vector_handler},
3225 {0, 0, 0, 0, 0, 0, 0}
3226 };
3227
3228 #undef TARGET_COMP_TYPE_ATTRIBUTES
3229 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3230 static int
3231 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3232 const_tree type2 ATTRIBUTE_UNUSED)
3233 {
3234 /* 0=incompatible 1=compatible 2=warning */
3235 return 1;
3236 }
3237
3238 #undef TARGET_INSERT_ATTRIBUTES
3239 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3240 static void
3241 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3242 tree * attr_ptr ATTRIBUTE_UNUSED)
3243 {
3244 unsigned addr;
3245 /* See if we need to make #pragma address variables volatile. */
3246
3247 if (TREE_CODE (node) == VAR_DECL)
3248 {
3249 char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3250 if (m32c_get_pragma_address (name, &addr))
3251 {
3252 TREE_THIS_VOLATILE (node) = true;
3253 }
3254 }
3255 }
3256
3257
3258 struct GTY(()) pragma_entry {
3259 const char *varname;
3260 unsigned address;
3261 };
3262 typedef struct pragma_entry pragma_entry;
3263
3264 /* Hash table of pragma info. */
3265 static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3266
3267 static int
3268 pragma_entry_eq (const void *p1, const void *p2)
3269 {
3270 const pragma_entry *old = (const pragma_entry *) p1;
3271 const char *new_name = (const char *) p2;
3272
3273 return strcmp (old->varname, new_name) == 0;
3274 }
3275
3276 static hashval_t
3277 pragma_entry_hash (const void *p)
3278 {
3279 const pragma_entry *old = (const pragma_entry *) p;
3280 return htab_hash_string (old->varname);
3281 }
3282
3283 void
3284 m32c_note_pragma_address (const char *varname, unsigned address)
3285 {
3286 pragma_entry **slot;
3287
3288 if (!pragma_htab)
3289 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3290 pragma_entry_eq, NULL);
3291
3292 slot = (pragma_entry **)
3293 htab_find_slot_with_hash (pragma_htab, varname,
3294 htab_hash_string (varname), INSERT);
3295
3296 if (!*slot)
3297 {
3298 *slot = ggc_alloc_pragma_entry ();
3299 (*slot)->varname = ggc_strdup (varname);
3300 }
3301 (*slot)->address = address;
3302 }
3303
3304 static bool
3305 m32c_get_pragma_address (const char *varname, unsigned *address)
3306 {
3307 pragma_entry **slot;
3308
3309 if (!pragma_htab)
3310 return false;
3311
3312 slot = (pragma_entry **)
3313 htab_find_slot_with_hash (pragma_htab, varname,
3314 htab_hash_string (varname), NO_INSERT);
3315 if (slot && *slot)
3316 {
3317 *address = (*slot)->address;
3318 return true;
3319 }
3320 return false;
3321 }
3322
3323 void
3324 m32c_output_aligned_common (FILE *stream, tree decl, const char *name,
3325 int size, int align, int global)
3326 {
3327 unsigned address;
3328
3329 if (m32c_get_pragma_address (name, &address))
3330 {
3331 /* We never output these as global. */
3332 assemble_name (stream, name);
3333 fprintf (stream, " = 0x%04x\n", address);
3334 return;
3335 }
3336 if (!global)
3337 {
3338 fprintf (stream, "\t.local\t");
3339 assemble_name (stream, name);
3340 fprintf (stream, "\n");
3341 }
3342 fprintf (stream, "\t.comm\t");
3343 assemble_name (stream, name);
3344 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3345 }
3346
3347 /* Predicates */
3348
3349 /* This is a list of legal subregs of hard regs. */
3350 static const struct {
3351 unsigned char outer_mode_size;
3352 unsigned char inner_mode_size;
3353 unsigned char byte_mask;
3354 unsigned char legal_when;
3355 unsigned int regno;
3356 } legal_subregs[] = {
3357 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3358 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3359 {1, 2, 0x01, 1, A0_REGNO},
3360 {1, 2, 0x01, 1, A1_REGNO},
3361
3362 {1, 4, 0x01, 1, A0_REGNO},
3363 {1, 4, 0x01, 1, A1_REGNO},
3364
3365 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3366 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3367 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3368 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3369 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3370
3371 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3372 };
3373
3374 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3375 support. We also bail on MEMs with illegal addresses. */
3376 bool
3377 m32c_illegal_subreg_p (rtx op)
3378 {
3379 int offset;
3380 unsigned int i;
3381 int src_mode, dest_mode;
3382
3383 if (GET_CODE (op) == MEM
3384 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3385 {
3386 return true;
3387 }
3388
3389 if (GET_CODE (op) != SUBREG)
3390 return false;
3391
3392 dest_mode = GET_MODE (op);
3393 offset = SUBREG_BYTE (op);
3394 op = SUBREG_REG (op);
3395 src_mode = GET_MODE (op);
3396
3397 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3398 return false;
3399 if (GET_CODE (op) != REG)
3400 return false;
3401 if (REGNO (op) >= MEM0_REGNO)
3402 return false;
3403
3404 offset = (1 << offset);
3405
3406 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3407 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3408 && legal_subregs[i].regno == REGNO (op)
3409 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3410 && legal_subregs[i].byte_mask & offset)
3411 {
3412 switch (legal_subregs[i].legal_when)
3413 {
3414 case 1:
3415 return false;
3416 case 16:
3417 if (TARGET_A16)
3418 return false;
3419 break;
3420 case 24:
3421 if (TARGET_A24)
3422 return false;
3423 break;
3424 }
3425 }
3426 return true;
3427 }
3428
3429 /* Returns TRUE if we support a move between the first two operands.
3430 At the moment, we just want to discourage mem to mem moves until
3431 after reload, because reload has a hard time with our limited
3432 number of address registers, and we can get into a situation where
3433 we need three of them when we only have two. */
3434 bool
3435 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3436 {
3437 rtx op0 = operands[0];
3438 rtx op1 = operands[1];
3439
3440 if (TARGET_A24)
3441 return true;
3442
3443 #define DEBUG_MOV_OK 0
3444 #if DEBUG_MOV_OK
3445 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3446 debug_rtx (op0);
3447 debug_rtx (op1);
3448 #endif
3449
3450 if (GET_CODE (op0) == SUBREG)
3451 op0 = XEXP (op0, 0);
3452 if (GET_CODE (op1) == SUBREG)
3453 op1 = XEXP (op1, 0);
3454
3455 if (GET_CODE (op0) == MEM
3456 && GET_CODE (op1) == MEM
3457 && ! reload_completed)
3458 {
3459 #if DEBUG_MOV_OK
3460 fprintf (stderr, " - no, mem to mem\n");
3461 #endif
3462 return false;
3463 }
3464
3465 #if DEBUG_MOV_OK
3466 fprintf (stderr, " - ok\n");
3467 #endif
3468 return true;
3469 }
3470
3471 /* Returns TRUE if two consecutive HImode mov instructions, generated
3472 for moving an immediate double data to a double data type variable
3473 location, can be combined into single SImode mov instruction. */
3474 bool
3475 m32c_immd_dbl_mov (rtx * operands,
3476 enum machine_mode mode ATTRIBUTE_UNUSED)
3477 {
3478 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3479 const char *str1;
3480 const char *str2;
3481
3482 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3483 && MEM_SCALAR_P (operands[0])
3484 && !MEM_IN_STRUCT_P (operands[0])
3485 && GET_CODE (XEXP (operands[2], 0)) == CONST
3486 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3487 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3488 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3489 && MEM_SCALAR_P (operands[2])
3490 && !MEM_IN_STRUCT_P (operands[2]))
3491 flag = 1;
3492
3493 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3494 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3495 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3496 && MEM_SCALAR_P (operands[0])
3497 && !MEM_IN_STRUCT_P (operands[0])
3498 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
3499 && GET_CODE (XEXP (operands[2], 0)) == CONST
3500 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3501 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3502 && MEM_SCALAR_P (operands[2])
3503 && !MEM_IN_STRUCT_P (operands[2]))
3504 flag = 2;
3505
3506 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3507 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3508 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3509 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3510 && MEM_SCALAR_P (operands[0])
3511 && !MEM_IN_STRUCT_P (operands[0])
3512 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
3513 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3514 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3515 && MEM_SCALAR_P (operands[2])
3516 && !MEM_IN_STRUCT_P (operands[2]))
3517 flag = 3;
3518
3519 else
3520 return false;
3521
3522 switch (flag)
3523 {
3524 case 1:
3525 str1 = XSTR (XEXP (operands[0], 0), 0);
3526 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3527 if (strcmp (str1, str2) == 0)
3528 okflag = 1;
3529 else
3530 okflag = 0;
3531 break;
3532 case 2:
3533 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3534 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3535 if (strcmp(str1,str2) == 0)
3536 okflag = 1;
3537 else
3538 okflag = 0;
3539 break;
3540 case 3:
3541 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3542 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
3543 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3544 if (((offset2-offset1) == 2) && offsetsign != 0)
3545 okflag = 1;
3546 else
3547 okflag = 0;
3548 break;
3549 default:
3550 okflag = 0;
3551 }
3552
3553 if (okflag == 1)
3554 {
3555 HOST_WIDE_INT val;
3556 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3557
3558 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
3559 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3560
3561 return true;
3562 }
3563
3564 return false;
3565 }
3566
3567 /* Expanders */
3568
3569 /* Subregs are non-orthogonal for us, because our registers are all
3570 different sizes. */
3571 static rtx
3572 m32c_subreg (enum machine_mode outer,
3573 rtx x, enum machine_mode inner, int byte)
3574 {
3575 int r, nr = -1;
3576
3577 /* Converting MEMs to different types that are the same size, we
3578 just rewrite them. */
3579 if (GET_CODE (x) == SUBREG
3580 && SUBREG_BYTE (x) == 0
3581 && GET_CODE (SUBREG_REG (x)) == MEM
3582 && (GET_MODE_SIZE (GET_MODE (x))
3583 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3584 {
3585 rtx oldx = x;
3586 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3587 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3588 }
3589
3590 /* Push/pop get done as smaller push/pops. */
3591 if (GET_CODE (x) == MEM
3592 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3593 || GET_CODE (XEXP (x, 0)) == POST_INC))
3594 return gen_rtx_MEM (outer, XEXP (x, 0));
3595 if (GET_CODE (x) == SUBREG
3596 && GET_CODE (XEXP (x, 0)) == MEM
3597 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3598 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3599 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3600
3601 if (GET_CODE (x) != REG)
3602 {
3603 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3604 if (GET_CODE (r) == SUBREG
3605 && GET_CODE (x) == MEM
3606 && MEM_VOLATILE_P (x))
3607 {
3608 /* Volatile MEMs don't get simplified, but we need them to
3609 be. We are little endian, so the subreg byte is the
3610 offset. */
3611 r = adjust_address (x, outer, byte);
3612 }
3613 return r;
3614 }
3615
3616 r = REGNO (x);
3617 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3618 return simplify_gen_subreg (outer, x, inner, byte);
3619
3620 if (IS_MEM_REGNO (r))
3621 return simplify_gen_subreg (outer, x, inner, byte);
3622
3623 /* This is where the complexities of our register layout are
3624 described. */
3625 if (byte == 0)
3626 nr = r;
3627 else if (outer == HImode)
3628 {
3629 if (r == R0_REGNO && byte == 2)
3630 nr = R2_REGNO;
3631 else if (r == R0_REGNO && byte == 4)
3632 nr = R1_REGNO;
3633 else if (r == R0_REGNO && byte == 6)
3634 nr = R3_REGNO;
3635 else if (r == R1_REGNO && byte == 2)
3636 nr = R3_REGNO;
3637 else if (r == A0_REGNO && byte == 2)
3638 nr = A1_REGNO;
3639 }
3640 else if (outer == SImode)
3641 {
3642 if (r == R0_REGNO && byte == 0)
3643 nr = R0_REGNO;
3644 else if (r == R0_REGNO && byte == 4)
3645 nr = R1_REGNO;
3646 }
3647 if (nr == -1)
3648 {
3649 fprintf (stderr, "m32c_subreg %s %s %d\n",
3650 mode_name[outer], mode_name[inner], byte);
3651 debug_rtx (x);
3652 gcc_unreachable ();
3653 }
3654 return gen_rtx_REG (outer, nr);
3655 }
3656
3657 /* Used to emit move instructions. We split some moves,
3658 and avoid mem-mem moves. */
3659 int
3660 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3661 {
3662 if (far_addr_space_p (operands[0])
3663 && CONSTANT_P (operands[1]))
3664 {
3665 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3666 }
3667 if (TARGET_A16 && mode == PSImode)
3668 return m32c_split_move (operands, mode, 1);
3669 if ((GET_CODE (operands[0]) == MEM)
3670 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3671 {
3672 rtx pmv = XEXP (operands[0], 0);
3673 rtx dest_reg = XEXP (pmv, 0);
3674 rtx dest_mod = XEXP (pmv, 1);
3675
3676 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3677 operands[0] = gen_rtx_MEM (mode, dest_reg);
3678 }
3679 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3680 operands[1] = copy_to_mode_reg (mode, operands[1]);
3681 return 0;
3682 }
3683
3684 #define DEBUG_SPLIT 0
3685
3686 /* Returns TRUE if the given PSImode move should be split. We split
3687 for all r8c/m16c moves, since it doesn't support them, and for
3688 POP.L as we can only *push* SImode. */
3689 int
3690 m32c_split_psi_p (rtx * operands)
3691 {
3692 #if DEBUG_SPLIT
3693 fprintf (stderr, "\nm32c_split_psi_p\n");
3694 debug_rtx (operands[0]);
3695 debug_rtx (operands[1]);
3696 #endif
3697 if (TARGET_A16)
3698 {
3699 #if DEBUG_SPLIT
3700 fprintf (stderr, "yes, A16\n");
3701 #endif
3702 return 1;
3703 }
3704 if (GET_CODE (operands[1]) == MEM
3705 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3706 {
3707 #if DEBUG_SPLIT
3708 fprintf (stderr, "yes, pop.l\n");
3709 #endif
3710 return 1;
3711 }
3712 #if DEBUG_SPLIT
3713 fprintf (stderr, "no, default\n");
3714 #endif
3715 return 0;
3716 }
3717
3718 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3719 (define_expand), 1 if it is not optional (define_insn_and_split),
3720 and 3 for define_split (alternate api). */
3721 int
3722 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3723 {
3724 rtx s[4], d[4];
3725 int parts, si, di, rev = 0;
3726 int rv = 0, opi = 2;
3727 enum machine_mode submode = HImode;
3728 rtx *ops, local_ops[10];
3729
3730 /* define_split modifies the existing operands, but the other two
3731 emit new insns. OPS is where we store the operand pairs, which
3732 we emit later. */
3733 if (split_all == 3)
3734 ops = operands;
3735 else
3736 ops = local_ops;
3737
3738 /* Else HImode. */
3739 if (mode == DImode)
3740 submode = SImode;
3741
3742 /* Before splitting mem-mem moves, force one operand into a
3743 register. */
3744 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3745 {
3746 #if DEBUG0
3747 fprintf (stderr, "force_reg...\n");
3748 debug_rtx (operands[1]);
3749 #endif
3750 operands[1] = force_reg (mode, operands[1]);
3751 #if DEBUG0
3752 debug_rtx (operands[1]);
3753 #endif
3754 }
3755
3756 parts = 2;
3757
3758 #if DEBUG_SPLIT
3759 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3760 split_all);
3761 debug_rtx (operands[0]);
3762 debug_rtx (operands[1]);
3763 #endif
3764
3765 /* Note that split_all is not used to select the api after this
3766 point, so it's safe to set it to 3 even with define_insn. */
3767 /* None of the chips can move SI operands to sp-relative addresses,
3768 so we always split those. */
3769 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3770 split_all = 3;
3771
3772 if (TARGET_A16
3773 && (far_addr_space_p (operands[0])
3774 || far_addr_space_p (operands[1])))
3775 split_all |= 1;
3776
3777 /* We don't need to split these. */
3778 if (TARGET_A24
3779 && split_all != 3
3780 && (mode == SImode || mode == PSImode)
3781 && !(GET_CODE (operands[1]) == MEM
3782 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3783 return 0;
3784
3785 /* First, enumerate the subregs we'll be dealing with. */
3786 for (si = 0; si < parts; si++)
3787 {
3788 d[si] =
3789 m32c_subreg (submode, operands[0], mode,
3790 si * GET_MODE_SIZE (submode));
3791 s[si] =
3792 m32c_subreg (submode, operands[1], mode,
3793 si * GET_MODE_SIZE (submode));
3794 }
3795
3796 /* Split pushes by emitting a sequence of smaller pushes. */
3797 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3798 {
3799 for (si = parts - 1; si >= 0; si--)
3800 {
3801 ops[opi++] = gen_rtx_MEM (submode,
3802 gen_rtx_PRE_DEC (Pmode,
3803 gen_rtx_REG (Pmode,
3804 SP_REGNO)));
3805 ops[opi++] = s[si];
3806 }
3807
3808 rv = 1;
3809 }
3810 /* Likewise for pops. */
3811 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3812 {
3813 for (di = 0; di < parts; di++)
3814 {
3815 ops[opi++] = d[di];
3816 ops[opi++] = gen_rtx_MEM (submode,
3817 gen_rtx_POST_INC (Pmode,
3818 gen_rtx_REG (Pmode,
3819 SP_REGNO)));
3820 }
3821 rv = 1;
3822 }
3823 else if (split_all)
3824 {
3825 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3826 for (di = 0; di < parts - 1; di++)
3827 for (si = di + 1; si < parts; si++)
3828 if (reg_mentioned_p (d[di], s[si]))
3829 rev = 1;
3830
3831 if (rev)
3832 for (si = 0; si < parts; si++)
3833 {
3834 ops[opi++] = d[si];
3835 ops[opi++] = s[si];
3836 }
3837 else
3838 for (si = parts - 1; si >= 0; si--)
3839 {
3840 ops[opi++] = d[si];
3841 ops[opi++] = s[si];
3842 }
3843 rv = 1;
3844 }
3845 /* Now emit any moves we may have accumulated. */
3846 if (rv && split_all != 3)
3847 {
3848 int i;
3849 for (i = 2; i < opi; i += 2)
3850 emit_move_insn (ops[i], ops[i + 1]);
3851 }
3852 return rv;
3853 }
3854
3855 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3856 the like. For the R8C they expect one of the addresses to be in
3857 R1L:An so we need to arrange for that. Otherwise, it's just a
3858 matter of picking out the operands we want and emitting the right
3859 pattern for them. All these expanders, which correspond to
3860 patterns in blkmov.md, must return nonzero if they expand the insn,
3861 or zero if they should FAIL. */
3862
3863 /* This is a memset() opcode. All operands are implied, so we need to
3864 arrange for them to be in the right registers. The opcode wants
3865 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3866 the count (HI), and $2 the value (QI). */
3867 int
3868 m32c_expand_setmemhi(rtx *operands)
3869 {
3870 rtx desta, count, val;
3871 rtx desto, counto;
3872
3873 desta = XEXP (operands[0], 0);
3874 count = operands[1];
3875 val = operands[2];
3876
3877 desto = gen_reg_rtx (Pmode);
3878 counto = gen_reg_rtx (HImode);
3879
3880 if (GET_CODE (desta) != REG
3881 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3882 desta = copy_to_mode_reg (Pmode, desta);
3883
3884 /* This looks like an arbitrary restriction, but this is by far the
3885 most common case. For counts 8..14 this actually results in
3886 smaller code with no speed penalty because the half-sized
3887 constant can be loaded with a shorter opcode. */
3888 if (GET_CODE (count) == CONST_INT
3889 && GET_CODE (val) == CONST_INT
3890 && ! (INTVAL (count) & 1)
3891 && (INTVAL (count) > 1)
3892 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3893 {
3894 unsigned v = INTVAL (val) & 0xff;
3895 v = v | (v << 8);
3896 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3897 val = copy_to_mode_reg (HImode, GEN_INT (v));
3898 if (TARGET_A16)
3899 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3900 else
3901 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3902 return 1;
3903 }
3904
3905 /* This is the generalized memset() case. */
3906 if (GET_CODE (val) != REG
3907 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3908 val = copy_to_mode_reg (QImode, val);
3909
3910 if (GET_CODE (count) != REG
3911 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3912 count = copy_to_mode_reg (HImode, count);
3913
3914 if (TARGET_A16)
3915 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3916 else
3917 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3918
3919 return 1;
3920 }
3921
3922 /* This is a memcpy() opcode. All operands are implied, so we need to
3923 arrange for them to be in the right registers. The opcode wants
3924 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3925 is the source (MEM:BLK), and $2 the count (HI). */
3926 int
3927 m32c_expand_movmemhi(rtx *operands)
3928 {
3929 rtx desta, srca, count;
3930 rtx desto, srco, counto;
3931
3932 desta = XEXP (operands[0], 0);
3933 srca = XEXP (operands[1], 0);
3934 count = operands[2];
3935
3936 desto = gen_reg_rtx (Pmode);
3937 srco = gen_reg_rtx (Pmode);
3938 counto = gen_reg_rtx (HImode);
3939
3940 if (GET_CODE (desta) != REG
3941 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3942 desta = copy_to_mode_reg (Pmode, desta);
3943
3944 if (GET_CODE (srca) != REG
3945 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3946 srca = copy_to_mode_reg (Pmode, srca);
3947
3948 /* Similar to setmem, but we don't need to check the value. */
3949 if (GET_CODE (count) == CONST_INT
3950 && ! (INTVAL (count) & 1)
3951 && (INTVAL (count) > 1))
3952 {
3953 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3954 if (TARGET_A16)
3955 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3956 else
3957 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3958 return 1;
3959 }
3960
3961 /* This is the generalized memset() case. */
3962 if (GET_CODE (count) != REG
3963 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3964 count = copy_to_mode_reg (HImode, count);
3965
3966 if (TARGET_A16)
3967 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3968 else
3969 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3970
3971 return 1;
3972 }
3973
3974 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3975 the copy, which should point to the NUL at the end of the string,
3976 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3977 Since our opcode leaves the destination pointing *after* the NUL,
3978 we must emit an adjustment. */
3979 int
3980 m32c_expand_movstr(rtx *operands)
3981 {
3982 rtx desta, srca;
3983 rtx desto, srco;
3984
3985 desta = XEXP (operands[1], 0);
3986 srca = XEXP (operands[2], 0);
3987
3988 desto = gen_reg_rtx (Pmode);
3989 srco = gen_reg_rtx (Pmode);
3990
3991 if (GET_CODE (desta) != REG
3992 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3993 desta = copy_to_mode_reg (Pmode, desta);
3994
3995 if (GET_CODE (srca) != REG
3996 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3997 srca = copy_to_mode_reg (Pmode, srca);
3998
3999 emit_insn (gen_movstr_op (desto, srco, desta, srca));
4000 /* desto ends up being a1, which allows this type of add through MOVA. */
4001 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
4002
4003 return 1;
4004 }
4005
4006 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
4007 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
4008 $2 is the other (MEM:BLK). We must do the comparison, and then
4009 convert the flags to a signed integer result. */
4010 int
4011 m32c_expand_cmpstr(rtx *operands)
4012 {
4013 rtx src1a, src2a;
4014
4015 src1a = XEXP (operands[1], 0);
4016 src2a = XEXP (operands[2], 0);
4017
4018 if (GET_CODE (src1a) != REG
4019 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
4020 src1a = copy_to_mode_reg (Pmode, src1a);
4021
4022 if (GET_CODE (src2a) != REG
4023 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
4024 src2a = copy_to_mode_reg (Pmode, src2a);
4025
4026 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
4027 emit_insn (gen_cond_to_int (operands[0]));
4028
4029 return 1;
4030 }
4031
4032
4033 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
4034
4035 static shift_gen_func
4036 shift_gen_func_for (int mode, int code)
4037 {
4038 #define GFF(m,c,f) if (mode == m && code == c) return f
4039 GFF(QImode, ASHIFT, gen_ashlqi3_i);
4040 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
4041 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
4042 GFF(HImode, ASHIFT, gen_ashlhi3_i);
4043 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
4044 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
4045 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
4046 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
4047 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
4048 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
4049 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
4050 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
4051 #undef GFF
4052 gcc_unreachable ();
4053 }
4054
4055 /* The m32c only has one shift, but it takes a signed count. GCC
4056 doesn't want this, so we fake it by negating any shift count when
4057 we're pretending to shift the other way. Also, the shift count is
4058 limited to -8..8. It's slightly better to use two shifts for 9..15
4059 than to load the count into r1h, so we do that too. */
4060 int
4061 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
4062 {
4063 enum machine_mode mode = GET_MODE (operands[0]);
4064 shift_gen_func func = shift_gen_func_for (mode, shift_code);
4065 rtx temp;
4066
4067 if (GET_CODE (operands[2]) == CONST_INT)
4068 {
4069 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
4070 int count = INTVAL (operands[2]) * scale;
4071
4072 while (count > maxc)
4073 {
4074 temp = gen_reg_rtx (mode);
4075 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
4076 operands[1] = temp;
4077 count -= maxc;
4078 }
4079 while (count < -maxc)
4080 {
4081 temp = gen_reg_rtx (mode);
4082 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
4083 operands[1] = temp;
4084 count += maxc;
4085 }
4086 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
4087 return 1;
4088 }
4089
4090 temp = gen_reg_rtx (QImode);
4091 if (scale < 0)
4092 /* The pattern has a NEG that corresponds to this. */
4093 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
4094 else if (TARGET_A16 && mode == SImode)
4095 /* We do this because the code below may modify this, we don't
4096 want to modify the origin of this value. */
4097 emit_move_insn (temp, operands[2]);
4098 else
4099 /* We'll only use it for the shift, no point emitting a move. */
4100 temp = operands[2];
4101
4102 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
4103 {
4104 /* The m16c has a limit of -16..16 for SI shifts, even when the
4105 shift count is in a register. Since there are so many targets
4106 of these shifts, it's better to expand the RTL here than to
4107 call a helper function.
4108
4109 The resulting code looks something like this:
4110
4111 cmp.b r1h,-16
4112 jge.b 1f
4113 shl.l -16,dest
4114 add.b r1h,16
4115 1f: cmp.b r1h,16
4116 jle.b 1f
4117 shl.l 16,dest
4118 sub.b r1h,16
4119 1f: shl.l r1h,dest
4120
4121 We take advantage of the fact that "negative" shifts are
4122 undefined to skip one of the comparisons. */
4123
4124 rtx count;
4125 rtx label, lref, insn, tempvar;
4126
4127 emit_move_insn (operands[0], operands[1]);
4128
4129 count = temp;
4130 label = gen_label_rtx ();
4131 lref = gen_rtx_LABEL_REF (VOIDmode, label);
4132 LABEL_NUSES (label) ++;
4133
4134 tempvar = gen_reg_rtx (mode);
4135
4136 if (shift_code == ASHIFT)
4137 {
4138 /* This is a left shift. We only need check positive counts. */
4139 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
4140 count, GEN_INT (16), label));
4141 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
4142 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
4143 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
4144 emit_label_after (label, insn);
4145 }
4146 else
4147 {
4148 /* This is a right shift. We only need check negative counts. */
4149 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
4150 count, GEN_INT (-16), label));
4151 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
4152 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
4153 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
4154 emit_label_after (label, insn);
4155 }
4156 operands[1] = operands[0];
4157 emit_insn (func (operands[0], operands[0], count));
4158 return 1;
4159 }
4160
4161 operands[2] = temp;
4162 return 0;
4163 }
4164
4165 /* The m32c has a limited range of operations that work on PSImode
4166 values; we have to expand to SI, do the math, and truncate back to
4167 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
4168 those cases. */
4169 void
4170 m32c_expand_neg_mulpsi3 (rtx * operands)
4171 {
4172 /* operands: a = b * i */
4173 rtx temp1; /* b as SI */
4174 rtx scale /* i as SI */;
4175 rtx temp2; /* a*b as SI */
4176
4177 temp1 = gen_reg_rtx (SImode);
4178 temp2 = gen_reg_rtx (SImode);
4179 if (GET_CODE (operands[2]) != CONST_INT)
4180 {
4181 scale = gen_reg_rtx (SImode);
4182 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
4183 }
4184 else
4185 scale = copy_to_mode_reg (SImode, operands[2]);
4186
4187 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
4188 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
4189 emit_insn (gen_truncsipsi2 (operands[0], temp2));
4190 }
4191
4192 /* Pattern Output Functions */
4193
4194 int
4195 m32c_expand_movcc (rtx *operands)
4196 {
4197 rtx rel = operands[1];
4198 rtx cmp;
4199
4200 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
4201 return 1;
4202 if (GET_CODE (operands[2]) != CONST_INT
4203 || GET_CODE (operands[3]) != CONST_INT)
4204 return 1;
4205 if (GET_CODE (rel) == NE)
4206 {
4207 rtx tmp = operands[2];
4208 operands[2] = operands[3];
4209 operands[3] = tmp;
4210 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
4211 }
4212
4213 emit_move_insn (operands[0],
4214 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4215 rel,
4216 operands[2],
4217 operands[3]));
4218 return 0;
4219 }
4220
4221 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
4222 int
4223 m32c_expand_insv (rtx *operands)
4224 {
4225 rtx op0, src0, p;
4226 int mask;
4227
4228 if (INTVAL (operands[1]) != 1)
4229 return 1;
4230
4231 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
4232 if (GET_CODE (operands[3]) != CONST_INT)
4233 return 1;
4234 if (INTVAL (operands[3]) != 0
4235 && INTVAL (operands[3]) != 1
4236 && INTVAL (operands[3]) != -1)
4237 return 1;
4238
4239 mask = 1 << INTVAL (operands[2]);
4240
4241 op0 = operands[0];
4242 if (GET_CODE (op0) == SUBREG
4243 && SUBREG_BYTE (op0) == 0)
4244 {
4245 rtx sub = SUBREG_REG (op0);
4246 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
4247 op0 = sub;
4248 }
4249
4250 if (!can_create_pseudo_p ()
4251 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
4252 src0 = op0;
4253 else
4254 {
4255 src0 = gen_reg_rtx (GET_MODE (op0));
4256 emit_move_insn (src0, op0);
4257 }
4258
4259 if (GET_MODE (op0) == HImode
4260 && INTVAL (operands[2]) >= 8
4261 && GET_MODE (op0) == MEM)
4262 {
4263 /* We are little endian. */
4264 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
4265 MEM_COPY_ATTRIBUTES (new_mem, op0);
4266 mask >>= 8;
4267 }
4268
4269 /* First, we generate a mask with the correct polarity. If we are
4270 storing a zero, we want an AND mask, so invert it. */
4271 if (INTVAL (operands[3]) == 0)
4272 {
4273 /* Storing a zero, use an AND mask */
4274 if (GET_MODE (op0) == HImode)
4275 mask ^= 0xffff;
4276 else
4277 mask ^= 0xff;
4278 }
4279 /* Now we need to properly sign-extend the mask in case we need to
4280 fall back to an AND or OR opcode. */
4281 if (GET_MODE (op0) == HImode)
4282 {
4283 if (mask & 0x8000)
4284 mask -= 0x10000;
4285 }
4286 else
4287 {
4288 if (mask & 0x80)
4289 mask -= 0x100;
4290 }
4291
4292 switch ( (INTVAL (operands[3]) ? 4 : 0)
4293 + ((GET_MODE (op0) == HImode) ? 2 : 0)
4294 + (TARGET_A24 ? 1 : 0))
4295 {
4296 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4297 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4298 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4299 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4300 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4301 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4302 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4303 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
4304 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
4305 }
4306
4307 emit_insn (p);
4308 return 0;
4309 }
4310
4311 const char *
4312 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4313 {
4314 static char buf[30];
4315 if (GET_CODE (operands[0]) == REG
4316 && REGNO (operands[0]) == R0_REGNO)
4317 {
4318 if (code == EQ)
4319 return "stzx\t#1,#0,r0l";
4320 if (code == NE)
4321 return "stzx\t#0,#1,r0l";
4322 }
4323 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4324 return buf;
4325 }
4326
4327 /* Encode symbol attributes of a SYMBOL_REF into its
4328 SYMBOL_REF_FLAGS. */
4329 static void
4330 m32c_encode_section_info (tree decl, rtx rtl, int first)
4331 {
4332 int extra_flags = 0;
4333
4334 default_encode_section_info (decl, rtl, first);
4335 if (TREE_CODE (decl) == FUNCTION_DECL
4336 && m32c_special_page_vector_p (decl))
4337
4338 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4339
4340 if (extra_flags)
4341 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4342 }
4343
4344 /* Returns TRUE if the current function is a leaf, and thus we can
4345 determine which registers an interrupt function really needs to
4346 save. The logic below is mostly about finding the insn sequence
4347 that's the function, versus any sequence that might be open for the
4348 current insn. */
4349 static int
4350 m32c_leaf_function_p (void)
4351 {
4352 rtx saved_first, saved_last;
4353 struct sequence_stack *seq;
4354 int rv;
4355
4356 saved_first = crtl->emit.x_first_insn;
4357 saved_last = crtl->emit.x_last_insn;
4358 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4359 ;
4360 if (seq)
4361 {
4362 crtl->emit.x_first_insn = seq->first;
4363 crtl->emit.x_last_insn = seq->last;
4364 }
4365
4366 rv = leaf_function_p ();
4367
4368 crtl->emit.x_first_insn = saved_first;
4369 crtl->emit.x_last_insn = saved_last;
4370 return rv;
4371 }
4372
4373 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4374 opcodes. If the function doesn't need the frame base or stack
4375 pointer, it can use the simpler RTS opcode. */
4376 static bool
4377 m32c_function_needs_enter (void)
4378 {
4379 rtx insn;
4380 struct sequence_stack *seq;
4381 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4382 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4383
4384 insn = get_insns ();
4385 for (seq = crtl->emit.sequence_stack;
4386 seq;
4387 insn = seq->first, seq = seq->next);
4388
4389 while (insn)
4390 {
4391 if (reg_mentioned_p (sp, insn))
4392 return true;
4393 if (reg_mentioned_p (fb, insn))
4394 return true;
4395 insn = NEXT_INSN (insn);
4396 }
4397 return false;
4398 }
4399
4400 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4401 frame-related. Return PAR.
4402
4403 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4404 PARALLEL rtx other than the first if they do not have the
4405 FRAME_RELATED flag set on them. So this function is handy for
4406 marking up 'enter' instructions. */
4407 static rtx
4408 m32c_all_frame_related (rtx par)
4409 {
4410 int len = XVECLEN (par, 0);
4411 int i;
4412
4413 for (i = 0; i < len; i++)
4414 F (XVECEXP (par, 0, i));
4415
4416 return par;
4417 }
4418
4419 /* Emits the prologue. See the frame layout comment earlier in this
4420 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4421 that we manually update sp. */
4422 void
4423 m32c_emit_prologue (void)
4424 {
4425 int frame_size, extra_frame_size = 0, reg_save_size;
4426 int complex_prologue = 0;
4427
4428 cfun->machine->is_leaf = m32c_leaf_function_p ();
4429 if (interrupt_p (cfun->decl))
4430 {
4431 cfun->machine->is_interrupt = 1;
4432 complex_prologue = 1;
4433 }
4434 else if (bank_switch_p (cfun->decl))
4435 warning (OPT_Wattributes,
4436 "%<bank_switch%> has no effect on non-interrupt functions");
4437
4438 reg_save_size = m32c_pushm_popm (PP_justcount);
4439
4440 if (interrupt_p (cfun->decl))
4441 {
4442 if (bank_switch_p (cfun->decl))
4443 emit_insn (gen_fset_b ());
4444 else if (cfun->machine->intr_pushm)
4445 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4446 }
4447
4448 frame_size =
4449 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4450 if (frame_size == 0
4451 && !m32c_function_needs_enter ())
4452 cfun->machine->use_rts = 1;
4453
4454 if (frame_size > 254)
4455 {
4456 extra_frame_size = frame_size - 254;
4457 frame_size = 254;
4458 }
4459 if (cfun->machine->use_rts == 0)
4460 F (emit_insn (m32c_all_frame_related
4461 (TARGET_A16
4462 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4463 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4464
4465 if (extra_frame_size)
4466 {
4467 complex_prologue = 1;
4468 if (TARGET_A16)
4469 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4470 gen_rtx_REG (HImode, SP_REGNO),
4471 GEN_INT (-extra_frame_size))));
4472 else
4473 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4474 gen_rtx_REG (PSImode, SP_REGNO),
4475 GEN_INT (-extra_frame_size))));
4476 }
4477
4478 complex_prologue += m32c_pushm_popm (PP_pushm);
4479
4480 /* This just emits a comment into the .s file for debugging. */
4481 if (complex_prologue)
4482 emit_insn (gen_prologue_end ());
4483 }
4484
4485 /* Likewise, for the epilogue. The only exception is that, for
4486 interrupts, we must manually unwind the frame as the REIT opcode
4487 doesn't do that. */
4488 void
4489 m32c_emit_epilogue (void)
4490 {
4491 /* This just emits a comment into the .s file for debugging. */
4492 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4493 emit_insn (gen_epilogue_start ());
4494
4495 m32c_pushm_popm (PP_popm);
4496
4497 if (cfun->machine->is_interrupt)
4498 {
4499 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4500
4501 /* REIT clears B flag and restores $fp for us, but we still
4502 have to fix up the stack. USE_RTS just means we didn't
4503 emit ENTER. */
4504 if (!cfun->machine->use_rts)
4505 {
4506 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4507 gen_rtx_REG (spmode, FP_REGNO));
4508 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4509 gen_rtx_REG (spmode, A0_REGNO));
4510 /* We can't just add this to the POPM because it would be in
4511 the wrong order, and wouldn't fix the stack if we're bank
4512 switching. */
4513 if (TARGET_A16)
4514 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4515 else
4516 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4517 }
4518 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4519 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4520
4521 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4522 generated only for M32C/M32CM targets (generate the REIT
4523 instruction otherwise). */
4524 if (fast_interrupt_p (cfun->decl))
4525 {
4526 /* Check if fast_attribute is set for M32C or M32CM. */
4527 if (TARGET_A24)
4528 {
4529 emit_jump_insn (gen_epilogue_freit ());
4530 }
4531 /* If fast_interrupt attribute is set for an R8C or M16C
4532 target ignore this attribute and generated REIT
4533 instruction. */
4534 else
4535 {
4536 warning (OPT_Wattributes,
4537 "%<fast_interrupt%> attribute directive ignored");
4538 emit_jump_insn (gen_epilogue_reit_16 ());
4539 }
4540 }
4541 else if (TARGET_A16)
4542 emit_jump_insn (gen_epilogue_reit_16 ());
4543 else
4544 emit_jump_insn (gen_epilogue_reit_24 ());
4545 }
4546 else if (cfun->machine->use_rts)
4547 emit_jump_insn (gen_epilogue_rts ());
4548 else if (TARGET_A16)
4549 emit_jump_insn (gen_epilogue_exitd_16 ());
4550 else
4551 emit_jump_insn (gen_epilogue_exitd_24 ());
4552 emit_barrier ();
4553 }
4554
4555 void
4556 m32c_emit_eh_epilogue (rtx ret_addr)
4557 {
4558 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4559 return to. We have to fudge the stack, pop everything, pop SP
4560 (fudged), and return (fudged). This is actually easier to do in
4561 assembler, so punt to libgcc. */
4562 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4563 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4564 emit_barrier ();
4565 }
4566
4567 /* Indicate which flags must be properly set for a given conditional. */
4568 static int
4569 flags_needed_for_conditional (rtx cond)
4570 {
4571 switch (GET_CODE (cond))
4572 {
4573 case LE:
4574 case GT:
4575 return FLAGS_OSZ;
4576 case LEU:
4577 case GTU:
4578 return FLAGS_ZC;
4579 case LT:
4580 case GE:
4581 return FLAGS_OS;
4582 case LTU:
4583 case GEU:
4584 return FLAGS_C;
4585 case EQ:
4586 case NE:
4587 return FLAGS_Z;
4588 default:
4589 return FLAGS_N;
4590 }
4591 }
4592
4593 #define DEBUG_CMP 0
4594
4595 /* Returns true if a compare insn is redundant because it would only
4596 set flags that are already set correctly. */
4597 static bool
4598 m32c_compare_redundant (rtx cmp, rtx *operands)
4599 {
4600 int flags_needed;
4601 int pflags;
4602 rtx prev, pp, next;
4603 rtx op0, op1, op2;
4604 #if DEBUG_CMP
4605 int prev_icode, i;
4606 #endif
4607
4608 op0 = operands[0];
4609 op1 = operands[1];
4610 op2 = operands[2];
4611
4612 #if DEBUG_CMP
4613 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4614 debug_rtx(cmp);
4615 for (i=0; i<2; i++)
4616 {
4617 fprintf(stderr, "operands[%d] = ", i);
4618 debug_rtx(operands[i]);
4619 }
4620 #endif
4621
4622 next = next_nonnote_insn (cmp);
4623 if (!next || !INSN_P (next))
4624 {
4625 #if DEBUG_CMP
4626 fprintf(stderr, "compare not followed by insn\n");
4627 debug_rtx(next);
4628 #endif
4629 return false;
4630 }
4631 if (GET_CODE (PATTERN (next)) == SET
4632 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4633 {
4634 next = XEXP (XEXP (PATTERN (next), 1), 0);
4635 }
4636 else if (GET_CODE (PATTERN (next)) == SET)
4637 {
4638 /* If this is a conditional, flags_needed will be something
4639 other than FLAGS_N, which we test below. */
4640 next = XEXP (PATTERN (next), 1);
4641 }
4642 else
4643 {
4644 #if DEBUG_CMP
4645 fprintf(stderr, "compare not followed by conditional\n");
4646 debug_rtx(next);
4647 #endif
4648 return false;
4649 }
4650 #if DEBUG_CMP
4651 fprintf(stderr, "conditional is: ");
4652 debug_rtx(next);
4653 #endif
4654
4655 flags_needed = flags_needed_for_conditional (next);
4656 if (flags_needed == FLAGS_N)
4657 {
4658 #if DEBUG_CMP
4659 fprintf(stderr, "compare not followed by conditional\n");
4660 debug_rtx(next);
4661 #endif
4662 return false;
4663 }
4664
4665 /* Compare doesn't set overflow and carry the same way that
4666 arithmetic instructions do, so we can't replace those. */
4667 if (flags_needed & FLAGS_OC)
4668 return false;
4669
4670 prev = cmp;
4671 do {
4672 prev = prev_nonnote_insn (prev);
4673 if (!prev)
4674 {
4675 #if DEBUG_CMP
4676 fprintf(stderr, "No previous insn.\n");
4677 #endif
4678 return false;
4679 }
4680 if (!INSN_P (prev))
4681 {
4682 #if DEBUG_CMP
4683 fprintf(stderr, "Previous insn is a non-insn.\n");
4684 #endif
4685 return false;
4686 }
4687 pp = PATTERN (prev);
4688 if (GET_CODE (pp) != SET)
4689 {
4690 #if DEBUG_CMP
4691 fprintf(stderr, "Previous insn is not a SET.\n");
4692 #endif
4693 return false;
4694 }
4695 pflags = get_attr_flags (prev);
4696
4697 /* Looking up attributes of previous insns corrupted the recog
4698 tables. */
4699 INSN_UID (cmp) = -1;
4700 recog (PATTERN (cmp), cmp, 0);
4701
4702 if (pflags == FLAGS_N
4703 && reg_mentioned_p (op0, pp))
4704 {
4705 #if DEBUG_CMP
4706 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4707 debug_rtx(prev);
4708 #endif
4709 return false;
4710 }
4711
4712 /* Check for comparisons against memory - between volatiles and
4713 aliases, we just can't risk this one. */
4714 if (GET_CODE (operands[0]) == MEM
4715 || GET_CODE (operands[0]) == MEM)
4716 {
4717 #if DEBUG_CMP
4718 fprintf(stderr, "comparisons with memory:\n");
4719 debug_rtx(prev);
4720 #endif
4721 return false;
4722 }
4723
4724 /* Check for PREV changing a register that's used to compute a
4725 value in CMP, even if it doesn't otherwise change flags. */
4726 if (GET_CODE (operands[0]) == REG
4727 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4728 {
4729 #if DEBUG_CMP
4730 fprintf(stderr, "sub-value affected, op0:\n");
4731 debug_rtx(prev);
4732 #endif
4733 return false;
4734 }
4735 if (GET_CODE (operands[1]) == REG
4736 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4737 {
4738 #if DEBUG_CMP
4739 fprintf(stderr, "sub-value affected, op1:\n");
4740 debug_rtx(prev);
4741 #endif
4742 return false;
4743 }
4744
4745 } while (pflags == FLAGS_N);
4746 #if DEBUG_CMP
4747 fprintf(stderr, "previous flag-setting insn:\n");
4748 debug_rtx(prev);
4749 debug_rtx(pp);
4750 #endif
4751
4752 if (GET_CODE (pp) == SET
4753 && GET_CODE (XEXP (pp, 0)) == REG
4754 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4755 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4756 {
4757 /* Adjacent cbranches must have the same operands to be
4758 redundant. */
4759 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4760 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4761 #if DEBUG_CMP
4762 fprintf(stderr, "adjacent cbranches\n");
4763 debug_rtx(pop0);
4764 debug_rtx(pop1);
4765 #endif
4766 if (rtx_equal_p (op0, pop0)
4767 && rtx_equal_p (op1, pop1))
4768 return true;
4769 #if DEBUG_CMP
4770 fprintf(stderr, "prev cmp not same\n");
4771 #endif
4772 return false;
4773 }
4774
4775 /* Else the previous insn must be a SET, with either the source or
4776 dest equal to operands[0], and operands[1] must be zero. */
4777
4778 if (!rtx_equal_p (op1, const0_rtx))
4779 {
4780 #if DEBUG_CMP
4781 fprintf(stderr, "operands[1] not const0_rtx\n");
4782 #endif
4783 return false;
4784 }
4785 if (GET_CODE (pp) != SET)
4786 {
4787 #if DEBUG_CMP
4788 fprintf (stderr, "pp not set\n");
4789 #endif
4790 return false;
4791 }
4792 if (!rtx_equal_p (op0, SET_SRC (pp))
4793 && !rtx_equal_p (op0, SET_DEST (pp)))
4794 {
4795 #if DEBUG_CMP
4796 fprintf(stderr, "operands[0] not found in set\n");
4797 #endif
4798 return false;
4799 }
4800
4801 #if DEBUG_CMP
4802 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4803 #endif
4804 if ((pflags & flags_needed) == flags_needed)
4805 return true;
4806
4807 return false;
4808 }
4809
4810 /* Return the pattern for a compare. This will be commented out if
4811 the compare is redundant, else a normal pattern is returned. Thus,
4812 the assembler output says where the compare would have been. */
4813 char *
4814 m32c_output_compare (rtx insn, rtx *operands)
4815 {
4816 static char templ[] = ";cmp.b\t%1,%0";
4817 /* ^ 5 */
4818
4819 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4820 if (m32c_compare_redundant (insn, operands))
4821 {
4822 #if DEBUG_CMP
4823 fprintf(stderr, "cbranch: cmp not needed\n");
4824 #endif
4825 return templ;
4826 }
4827
4828 #if DEBUG_CMP
4829 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4830 #endif
4831 return templ + 1;
4832 }
4833
4834 #undef TARGET_ENCODE_SECTION_INFO
4835 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4836
4837 /* If the frame pointer isn't used, we detect it manually. But the
4838 stack pointer doesn't have as flexible addressing as the frame
4839 pointer, so we always assume we have it. */
4840
4841 #undef TARGET_FRAME_POINTER_REQUIRED
4842 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4843
4844 /* The Global `targetm' Variable. */
4845
4846 struct gcc_target targetm = TARGET_INITIALIZER;
4847
4848 #include "gt-m32c.h"