alpha.c (alpha_output_mi_thunk_osf): Use insn_locators_alloc instead of insn_locators...
[gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "recog.h"
38 #include "toplev.h"
39 #include "expr.h"
40 #include "reload.h"
41 #include "tm_p.h"
42 #include "target.h"
43 #include "target-def.h"
44 #include "debug.h"
45 #include "flags.h"
46
47 enum reg_class regno_reg_class[] =
48 {
49 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
50 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
51 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
52 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
53 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
54 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
55 ADDR_REGS
56 };
57
58
59 /* The ASM_DOT macro allows easy string pasting to handle the differences
60 between MOTOROLA and MIT syntaxes in asm_fprintf(), which doesn't
61 support the %. option. */
62 #if MOTOROLA
63 # define ASM_DOT "."
64 # define ASM_DOTW ".w"
65 # define ASM_DOTL ".l"
66 #else
67 # define ASM_DOT ""
68 # define ASM_DOTW ""
69 # define ASM_DOTL ""
70 #endif
71
72
73 /* The minimum number of integer registers that we want to save with the
74 movem instruction. Using two movel instructions instead of a single
75 moveml is about 15% faster for the 68020 and 68030 at no expense in
76 code size. */
77 #define MIN_MOVEM_REGS 3
78
79 /* The minimum number of floating point registers that we want to save
80 with the fmovem instruction. */
81 #define MIN_FMOVEM_REGS 1
82
83 /* Structure describing stack frame layout. */
84 struct m68k_frame
85 {
86 /* Stack pointer to frame pointer offset. */
87 HOST_WIDE_INT offset;
88
89 /* Offset of FPU registers. */
90 HOST_WIDE_INT foffset;
91
92 /* Frame size in bytes (rounded up). */
93 HOST_WIDE_INT size;
94
95 /* Data and address register. */
96 int reg_no;
97 unsigned int reg_mask;
98
99 /* FPU registers. */
100 int fpu_no;
101 unsigned int fpu_mask;
102
103 /* Offsets relative to ARG_POINTER. */
104 HOST_WIDE_INT frame_pointer_offset;
105 HOST_WIDE_INT stack_pointer_offset;
106
107 /* Function which the above information refers to. */
108 int funcdef_no;
109 };
110
111 /* Current frame information calculated by m68k_compute_frame_layout(). */
112 static struct m68k_frame current_frame;
113
114 /* Structure describing an m68k address.
115
116 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
117 with null fields evaluating to 0. Here:
118
119 - BASE satisfies m68k_legitimate_base_reg_p
120 - INDEX satisfies m68k_legitimate_index_reg_p
121 - OFFSET satisfies m68k_legitimate_constant_address_p
122
123 INDEX is either HImode or SImode. The other fields are SImode.
124
125 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
126 the address is (BASE)+. */
127 struct m68k_address {
128 enum rtx_code code;
129 rtx base;
130 rtx index;
131 rtx offset;
132 int scale;
133 };
134
135 static bool m68k_handle_option (size_t, const char *, int);
136 static rtx find_addr_reg (rtx);
137 static const char *singlemove_string (rtx *);
138 #ifdef M68K_TARGET_COFF
139 static void m68k_coff_asm_named_section (const char *, unsigned int, tree);
140 #endif /* M68K_TARGET_COFF */
141 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
142 HOST_WIDE_INT, tree);
143 static rtx m68k_struct_value_rtx (tree, int);
144 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
145 tree args, int flags,
146 bool *no_add_attrs);
147 static void m68k_compute_frame_layout (void);
148 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
149 static bool m68k_ok_for_sibcall_p (tree, tree);
150 static bool m68k_rtx_costs (rtx, int, int, int *);
151 \f
152
153 /* Specify the identification number of the library being built */
154 const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
155
156 /* Nonzero if the last compare/test insn had FP operands. The
157 sCC expanders peek at this to determine what to do for the
158 68060, which has no fsCC instructions. */
159 int m68k_last_compare_had_fp_operands;
160 \f
161 /* Initialize the GCC target structure. */
162
163 #if INT_OP_GROUP == INT_OP_DOT_WORD
164 #undef TARGET_ASM_ALIGNED_HI_OP
165 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
166 #endif
167
168 #if INT_OP_GROUP == INT_OP_NO_DOT
169 #undef TARGET_ASM_BYTE_OP
170 #define TARGET_ASM_BYTE_OP "\tbyte\t"
171 #undef TARGET_ASM_ALIGNED_HI_OP
172 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
173 #undef TARGET_ASM_ALIGNED_SI_OP
174 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
175 #endif
176
177 #if INT_OP_GROUP == INT_OP_DC
178 #undef TARGET_ASM_BYTE_OP
179 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
180 #undef TARGET_ASM_ALIGNED_HI_OP
181 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
182 #undef TARGET_ASM_ALIGNED_SI_OP
183 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
184 #endif
185
186 #undef TARGET_ASM_UNALIGNED_HI_OP
187 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
188 #undef TARGET_ASM_UNALIGNED_SI_OP
189 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
190
191 #undef TARGET_ASM_OUTPUT_MI_THUNK
192 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
193 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
194 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
195
196 #undef TARGET_ASM_FILE_START_APP_OFF
197 #define TARGET_ASM_FILE_START_APP_OFF true
198
199 #undef TARGET_DEFAULT_TARGET_FLAGS
200 #define TARGET_DEFAULT_TARGET_FLAGS MASK_STRICT_ALIGNMENT
201 #undef TARGET_HANDLE_OPTION
202 #define TARGET_HANDLE_OPTION m68k_handle_option
203
204 #undef TARGET_RTX_COSTS
205 #define TARGET_RTX_COSTS m68k_rtx_costs
206
207 #undef TARGET_ATTRIBUTE_TABLE
208 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
209
210 #undef TARGET_PROMOTE_PROTOTYPES
211 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
212
213 #undef TARGET_STRUCT_VALUE_RTX
214 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
215
216 #undef TARGET_CANNOT_FORCE_CONST_MEM
217 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
218
219 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
220 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
221
222 static const struct attribute_spec m68k_attribute_table[] =
223 {
224 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
225 { "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
226 { NULL, 0, 0, false, false, false, NULL }
227 };
228
229 struct gcc_target targetm = TARGET_INITIALIZER;
230 \f
231 /* Base flags for 68k ISAs. */
232 #define FL_FOR_isa_00 FL_ISA_68000
233 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
234 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
235 generated 68881 code for 68020 and 68030 targets unless explicitly told
236 not to. */
237 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
238 | FL_BITFIELD | FL_68881)
239 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
240 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
241
242 /* Base flags for ColdFire ISAs. */
243 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
244 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
245 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
246 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
247 #define FL_FOR_isa_c (FL_FOR_isa_b | FL_ISA_C | FL_CF_USP)
248
249 enum m68k_isa
250 {
251 /* Traditional 68000 instruction sets. */
252 isa_00,
253 isa_10,
254 isa_20,
255 isa_40,
256 isa_cpu32,
257 /* ColdFire instruction set variants. */
258 isa_a,
259 isa_aplus,
260 isa_b,
261 isa_c,
262 isa_max
263 };
264
265 /* Information about one of the -march, -mcpu or -mtune arguments. */
266 struct m68k_target_selection
267 {
268 /* The argument being described. */
269 const char *name;
270
271 /* For -mcpu, this is the device selected by the option.
272 For -mtune and -march, it is a representative device
273 for the microarchitecture or ISA respectively. */
274 enum target_device device;
275
276 /* The M68K_DEVICE fields associated with DEVICE. See the comment
277 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
278 const char *family;
279 enum uarch_type microarch;
280 enum m68k_isa isa;
281 unsigned long flags;
282 };
283
284 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
285 static const struct m68k_target_selection all_devices[] =
286 {
287 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
288 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
289 #include "m68k-devices.def"
290 #undef M68K_DEVICE
291 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
292 };
293
294 /* A list of all ISAs, mapping each one to a representative device.
295 Used for -march selection. */
296 static const struct m68k_target_selection all_isas[] =
297 {
298 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
299 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
300 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
301 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
302 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
303 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
304 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
305 { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a
306 | FL_CF_HWDIV) },
307 { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus
308 | FL_CF_HWDIV) },
309 { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
310 { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c
311 | FL_CF_FPU
312 | FL_CF_EMAC) },
313 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
314 };
315
316 /* A list of all microarchitectures, mapping each one to a representative
317 device. Used for -mtune selection. */
318 static const struct m68k_target_selection all_microarchs[] =
319 {
320 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
321 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
322 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
323 { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 },
324 { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 },
325 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
326 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
327 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
328 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
329 { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a },
330 { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a
331 | FL_CF_HWDIV) },
332 { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
333 { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b
334 | FL_CF_USP
335 | FL_CF_EMAC
336 | FL_CF_FPU) },
337 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
338 };
339 \f
340 /* The entries associated with the -mcpu, -march and -mtune settings,
341 or null for options that have not been used. */
342 const struct m68k_target_selection *m68k_cpu_entry;
343 const struct m68k_target_selection *m68k_arch_entry;
344 const struct m68k_target_selection *m68k_tune_entry;
345
346 /* Which CPU we are generating code for. */
347 enum target_device m68k_cpu;
348
349 /* Which microarchitecture to tune for. */
350 enum uarch_type m68k_tune;
351
352 /* Which FPU to use. */
353 enum fpu_type m68k_fpu;
354
355 /* The set of FL_* flags that apply to the target processor. */
356 unsigned int m68k_cpu_flags;
357
358 /* Asm templates for calling or jumping to an arbitrary symbolic address,
359 or NULL if such calls or jumps are not supported. The address is held
360 in operand 0. */
361 const char *m68k_symbolic_call;
362 const char *m68k_symbolic_jump;
363 \f
364 /* See whether TABLE has an entry with name NAME. Return true and
365 store the entry in *ENTRY if so, otherwise return false and
366 leave *ENTRY alone. */
367
368 static bool
369 m68k_find_selection (const struct m68k_target_selection **entry,
370 const struct m68k_target_selection *table,
371 const char *name)
372 {
373 size_t i;
374
375 for (i = 0; table[i].name; i++)
376 if (strcmp (table[i].name, name) == 0)
377 {
378 *entry = table + i;
379 return true;
380 }
381 return false;
382 }
383
384 /* Implement TARGET_HANDLE_OPTION. */
385
386 static bool
387 m68k_handle_option (size_t code, const char *arg, int value)
388 {
389 switch (code)
390 {
391 case OPT_march_:
392 return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
393
394 case OPT_mcpu_:
395 return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
396
397 case OPT_mtune_:
398 return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
399
400 case OPT_m5200:
401 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
402
403 case OPT_m5206e:
404 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
405
406 case OPT_m528x:
407 return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
408
409 case OPT_m5307:
410 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
411
412 case OPT_m5407:
413 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
414
415 case OPT_mcfv4e:
416 return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
417
418 case OPT_m68000:
419 case OPT_mc68000:
420 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
421
422 case OPT_m68010:
423 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
424
425 case OPT_m68020:
426 case OPT_mc68020:
427 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
428
429 case OPT_m68020_40:
430 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
431 "68020-40")
432 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
433
434 case OPT_m68020_60:
435 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
436 "68020-60")
437 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
438
439 case OPT_m68030:
440 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
441
442 case OPT_m68040:
443 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
444
445 case OPT_m68060:
446 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
447
448 case OPT_m68302:
449 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
450
451 case OPT_m68332:
452 case OPT_mcpu32:
453 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
454
455 case OPT_mshared_library_id_:
456 if (value > MAX_LIBRARY_ID)
457 error ("-mshared-library-id=%s is not between 0 and %d",
458 arg, MAX_LIBRARY_ID);
459 else
460 asprintf ((char **) &m68k_library_id_string, "%d", (value * -4) - 4);
461 return true;
462
463 default:
464 return true;
465 }
466 }
467
468 /* Sometimes certain combinations of command options do not make
469 sense on a particular target machine. You can define a macro
470 `OVERRIDE_OPTIONS' to take account of this. This macro, if
471 defined, is executed once just after all the command options have
472 been parsed.
473
474 Don't use this macro to turn on various extra optimizations for
475 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
476
477 void
478 override_options (void)
479 {
480 const struct m68k_target_selection *entry;
481 unsigned long target_mask;
482
483 /* User can choose:
484
485 -mcpu=
486 -march=
487 -mtune=
488
489 -march=ARCH should generate code that runs any processor
490 implementing architecture ARCH. -mcpu=CPU should override -march
491 and should generate code that runs on processor CPU, making free
492 use of any instructions that CPU understands. -mtune=UARCH applies
493 on top of -mcpu or -march and optimizes the code for UARCH. It does
494 not change the target architecture. */
495 if (m68k_cpu_entry)
496 {
497 /* Complain if the -march setting is for a different microarchitecture,
498 or includes flags that the -mcpu setting doesn't. */
499 if (m68k_arch_entry
500 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
501 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
502 warning (0, "-mcpu=%s conflicts with -march=%s",
503 m68k_cpu_entry->name, m68k_arch_entry->name);
504
505 entry = m68k_cpu_entry;
506 }
507 else
508 entry = m68k_arch_entry;
509
510 if (!entry)
511 entry = all_devices + TARGET_CPU_DEFAULT;
512
513 m68k_cpu_flags = entry->flags;
514
515 /* Use the architecture setting to derive default values for
516 certain flags. */
517 target_mask = 0;
518 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
519 target_mask |= MASK_BITFIELD;
520 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
521 target_mask |= MASK_CF_HWDIV;
522 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
523 target_mask |= MASK_HARD_FLOAT;
524 target_flags |= target_mask & ~target_flags_explicit;
525
526 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
527 m68k_cpu = entry->device;
528 if (m68k_tune_entry)
529 m68k_tune = m68k_tune_entry->microarch;
530 #ifdef M68K_DEFAULT_TUNE
531 else if (!m68k_cpu_entry && !m68k_arch_entry)
532 m68k_tune = M68K_DEFAULT_TUNE;
533 #endif
534 else
535 m68k_tune = entry->microarch;
536
537 /* Set the type of FPU. */
538 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
539 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
540 : FPUTYPE_68881);
541
542 if (TARGET_COLDFIRE_FPU)
543 {
544 REAL_MODE_FORMAT (SFmode) = &coldfire_single_format;
545 REAL_MODE_FORMAT (DFmode) = &coldfire_double_format;
546 }
547
548 /* Sanity check to ensure that msep-data and mid-sahred-library are not
549 * both specified together. Doing so simply doesn't make sense.
550 */
551 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
552 error ("cannot specify both -msep-data and -mid-shared-library");
553
554 /* If we're generating code for a separate A5 relative data segment,
555 * we've got to enable -fPIC as well. This might be relaxable to
556 * -fpic but it hasn't been tested properly.
557 */
558 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
559 flag_pic = 2;
560
561 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
562 error if the target does not support them. */
563 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
564 error ("-mpcrel -fPIC is not currently supported on selected cpu");
565
566 /* ??? A historic way of turning on pic, or is this intended to
567 be an embedded thing that doesn't have the same name binding
568 significance that it does on hosted ELF systems? */
569 if (TARGET_PCREL && flag_pic == 0)
570 flag_pic = 1;
571
572 if (!flag_pic)
573 {
574 #if MOTOROLA && !defined (USE_GAS)
575 m68k_symbolic_call = "jsr %a0";
576 m68k_symbolic_jump = "jmp %a0";
577 #else
578 m68k_symbolic_call = "jbsr %a0";
579 m68k_symbolic_jump = "jra %a0";
580 #endif
581 }
582 else if (TARGET_ID_SHARED_LIBRARY)
583 /* All addresses must be loaded from the GOT. */
584 ;
585 else if (TARGET_68020 || TARGET_ISAB)
586 {
587 if (TARGET_PCREL)
588 {
589 m68k_symbolic_call = "bsr.l %c0";
590 m68k_symbolic_jump = "bra.l %c0";
591 }
592 else
593 {
594 #if defined(USE_GAS)
595 m68k_symbolic_call = "bsr.l %p0";
596 m68k_symbolic_jump = "bra.l %p0";
597 #else
598 m68k_symbolic_call = "bsr %p0";
599 m68k_symbolic_jump = "bra %p0";
600 #endif
601 }
602 /* Turn off function cse if we are doing PIC. We always want
603 function call to be done as `bsr foo@PLTPC'. */
604 /* ??? It's traditional to do this for -mpcrel too, but it isn't
605 clear how intentional that is. */
606 flag_no_function_cse = 1;
607 }
608
609 SUBTARGET_OVERRIDE_OPTIONS;
610 }
611
612 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
613 given argument and NAME is the argument passed to -mcpu. Return NULL
614 if -mcpu was not passed. */
615
616 const char *
617 m68k_cpp_cpu_ident (const char *prefix)
618 {
619 if (!m68k_cpu_entry)
620 return NULL;
621 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
622 }
623
624 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
625 given argument and NAME is the name of the representative device for
626 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
627
628 const char *
629 m68k_cpp_cpu_family (const char *prefix)
630 {
631 if (!m68k_cpu_entry)
632 return NULL;
633 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
634 }
635 \f
636 /* Return nonzero if FUNC is an interrupt function as specified by the
637 "interrupt_handler" attribute. */
638 bool
639 m68k_interrupt_function_p (tree func)
640 {
641 tree a;
642
643 if (TREE_CODE (func) != FUNCTION_DECL)
644 return false;
645
646 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
647 return (a != NULL_TREE);
648 }
649
650 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
651 struct attribute_spec.handler. */
652 static tree
653 m68k_handle_fndecl_attribute (tree *node, tree name,
654 tree args ATTRIBUTE_UNUSED,
655 int flags ATTRIBUTE_UNUSED,
656 bool *no_add_attrs)
657 {
658 if (TREE_CODE (*node) != FUNCTION_DECL)
659 {
660 warning (OPT_Wattributes, "%qs attribute only applies to functions",
661 IDENTIFIER_POINTER (name));
662 *no_add_attrs = true;
663 }
664
665 return NULL_TREE;
666 }
667
668 static void
669 m68k_compute_frame_layout (void)
670 {
671 int regno, saved;
672 unsigned int mask;
673 bool interrupt_handler = m68k_interrupt_function_p (current_function_decl);
674
675 /* Only compute the frame once per function.
676 Don't cache information until reload has been completed. */
677 if (current_frame.funcdef_no == current_function_funcdef_no
678 && reload_completed)
679 return;
680
681 current_frame.size = (get_frame_size () + 3) & -4;
682
683 mask = saved = 0;
684 for (regno = 0; regno < 16; regno++)
685 if (m68k_save_reg (regno, interrupt_handler))
686 {
687 mask |= 1 << (regno - D0_REG);
688 saved++;
689 }
690 current_frame.offset = saved * 4;
691 current_frame.reg_no = saved;
692 current_frame.reg_mask = mask;
693
694 current_frame.foffset = 0;
695 mask = saved = 0;
696 if (TARGET_HARD_FLOAT)
697 {
698 for (regno = 16; regno < 24; regno++)
699 if (m68k_save_reg (regno, interrupt_handler))
700 {
701 mask |= 1 << (regno - FP0_REG);
702 saved++;
703 }
704 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
705 current_frame.offset += current_frame.foffset;
706 }
707 current_frame.fpu_no = saved;
708 current_frame.fpu_mask = mask;
709
710 /* Remember what function this frame refers to. */
711 current_frame.funcdef_no = current_function_funcdef_no;
712 }
713
714 HOST_WIDE_INT
715 m68k_initial_elimination_offset (int from, int to)
716 {
717 int argptr_offset;
718 /* The arg pointer points 8 bytes before the start of the arguments,
719 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
720 frame pointer in most frames. */
721 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
722 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
723 return argptr_offset;
724
725 m68k_compute_frame_layout ();
726
727 gcc_assert (to == STACK_POINTER_REGNUM);
728 switch (from)
729 {
730 case ARG_POINTER_REGNUM:
731 return current_frame.offset + current_frame.size - argptr_offset;
732 case FRAME_POINTER_REGNUM:
733 return current_frame.offset + current_frame.size;
734 default:
735 gcc_unreachable ();
736 }
737 }
738
739 /* Refer to the array `regs_ever_live' to determine which registers
740 to save; `regs_ever_live[I]' is nonzero if register number I
741 is ever used in the function. This function is responsible for
742 knowing which registers should not be saved even if used.
743 Return true if we need to save REGNO. */
744
745 static bool
746 m68k_save_reg (unsigned int regno, bool interrupt_handler)
747 {
748 if (flag_pic && regno == PIC_REG)
749 {
750 /* A function that receives a nonlocal goto must save all call-saved
751 registers. */
752 if (current_function_has_nonlocal_label)
753 return true;
754 if (current_function_uses_pic_offset_table)
755 return true;
756 /* Reload may introduce constant pool references into a function
757 that thitherto didn't need a PIC register. Note that the test
758 above will not catch that case because we will only set
759 current_function_uses_pic_offset_table when emitting
760 the address reloads. */
761 if (current_function_uses_const_pool)
762 return true;
763 }
764
765 if (current_function_calls_eh_return)
766 {
767 unsigned int i;
768 for (i = 0; ; i++)
769 {
770 unsigned int test = EH_RETURN_DATA_REGNO (i);
771 if (test == INVALID_REGNUM)
772 break;
773 if (test == regno)
774 return true;
775 }
776 }
777
778 /* Fixed regs we never touch. */
779 if (fixed_regs[regno])
780 return false;
781
782 /* The frame pointer (if it is such) is handled specially. */
783 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
784 return false;
785
786 /* Interrupt handlers must also save call_used_regs
787 if they are live or when calling nested functions. */
788 if (interrupt_handler)
789 {
790 if (regs_ever_live[regno])
791 return true;
792
793 if (!current_function_is_leaf && call_used_regs[regno])
794 return true;
795 }
796
797 /* Never need to save registers that aren't touched. */
798 if (!regs_ever_live[regno])
799 return false;
800
801 /* Otherwise save everything that isn't call-clobbered. */
802 return !call_used_regs[regno];
803 }
804
805 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
806 the lowest memory address. COUNT is the number of registers to be
807 moved, with register REGNO + I being moved if bit I of MASK is set.
808 STORE_P specifies the direction of the move and ADJUST_STACK_P says
809 whether or not this is pre-decrement (if STORE_P) or post-increment
810 (if !STORE_P) operation. */
811
812 static rtx
813 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
814 unsigned int count, unsigned int regno,
815 unsigned int mask, bool store_p, bool adjust_stack_p)
816 {
817 int i;
818 rtx body, addr, src, operands[2];
819 enum machine_mode mode;
820
821 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
822 mode = reg_raw_mode[regno];
823 i = 0;
824
825 if (adjust_stack_p)
826 {
827 src = plus_constant (base, (count
828 * GET_MODE_SIZE (mode)
829 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
830 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
831 }
832
833 for (; mask != 0; mask >>= 1, regno++)
834 if (mask & 1)
835 {
836 addr = plus_constant (base, offset);
837 operands[!store_p] = gen_frame_mem (mode, addr);
838 operands[store_p] = gen_rtx_REG (mode, regno);
839 XVECEXP (body, 0, i++)
840 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
841 offset += GET_MODE_SIZE (mode);
842 }
843 gcc_assert (i == XVECLEN (body, 0));
844
845 return emit_insn (body);
846 }
847
848 /* Make INSN a frame-related instruction. */
849
850 static void
851 m68k_set_frame_related (rtx insn)
852 {
853 rtx body;
854 int i;
855
856 RTX_FRAME_RELATED_P (insn) = 1;
857 body = PATTERN (insn);
858 if (GET_CODE (body) == PARALLEL)
859 for (i = 0; i < XVECLEN (body, 0); i++)
860 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
861 }
862
863 /* Emit RTL for the "prologue" define_expand. */
864
865 void
866 m68k_expand_prologue (void)
867 {
868 HOST_WIDE_INT fsize_with_regs;
869 rtx limit, src, dest, insn;
870
871 m68k_compute_frame_layout ();
872
873 /* If the stack limit is a symbol, we can check it here,
874 before actually allocating the space. */
875 if (current_function_limit_stack
876 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
877 {
878 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
879 if (!LEGITIMATE_CONSTANT_P (limit))
880 {
881 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
882 limit = gen_rtx_REG (Pmode, D0_REG);
883 }
884 emit_insn (gen_cmpsi (stack_pointer_rtx, limit));
885 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode,
886 cc0_rtx, const0_rtx),
887 const1_rtx));
888 }
889
890 fsize_with_regs = current_frame.size;
891 if (TARGET_COLDFIRE)
892 {
893 /* ColdFire's move multiple instructions do not allow pre-decrement
894 addressing. Add the size of movem saves to the initial stack
895 allocation instead. */
896 if (current_frame.reg_no >= MIN_MOVEM_REGS)
897 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
898 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
899 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
900 }
901
902 if (frame_pointer_needed)
903 {
904 if (fsize_with_regs == 0 && TUNE_68040)
905 {
906 /* On the 68040, two separate moves are faster than link.w 0. */
907 dest = gen_frame_mem (Pmode,
908 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
909 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
910 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
911 stack_pointer_rtx));
912 }
913 else if (fsize_with_regs < 0x8000 || TARGET_68020)
914 m68k_set_frame_related
915 (emit_insn (gen_link (frame_pointer_rtx,
916 GEN_INT (-4 - fsize_with_regs))));
917 else
918 {
919 m68k_set_frame_related
920 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
921 m68k_set_frame_related
922 (emit_insn (gen_addsi3 (stack_pointer_rtx,
923 stack_pointer_rtx,
924 GEN_INT (-fsize_with_regs))));
925 }
926 }
927 else if (fsize_with_regs != 0)
928 m68k_set_frame_related
929 (emit_insn (gen_addsi3 (stack_pointer_rtx,
930 stack_pointer_rtx,
931 GEN_INT (-fsize_with_regs))));
932
933 if (current_frame.fpu_mask)
934 {
935 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
936 if (TARGET_68881)
937 m68k_set_frame_related
938 (m68k_emit_movem (stack_pointer_rtx,
939 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
940 current_frame.fpu_no, FP0_REG,
941 current_frame.fpu_mask, true, true));
942 else
943 {
944 int offset;
945
946 /* If we're using moveml to save the integer registers,
947 the stack pointer will point to the bottom of the moveml
948 save area. Find the stack offset of the first FP register. */
949 if (current_frame.reg_no < MIN_MOVEM_REGS)
950 offset = 0;
951 else
952 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
953 m68k_set_frame_related
954 (m68k_emit_movem (stack_pointer_rtx, offset,
955 current_frame.fpu_no, FP0_REG,
956 current_frame.fpu_mask, true, false));
957 }
958 }
959
960 /* If the stack limit is not a symbol, check it here.
961 This has the disadvantage that it may be too late... */
962 if (current_function_limit_stack)
963 {
964 if (REG_P (stack_limit_rtx))
965 {
966 emit_insn (gen_cmpsi (stack_pointer_rtx, stack_limit_rtx));
967 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode,
968 cc0_rtx, const0_rtx),
969 const1_rtx));
970 }
971 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
972 warning (0, "stack limit expression is not supported");
973 }
974
975 if (current_frame.reg_no < MIN_MOVEM_REGS)
976 {
977 /* Store each register separately in the same order moveml does. */
978 int i;
979
980 for (i = 16; i-- > 0; )
981 if (current_frame.reg_mask & (1 << i))
982 {
983 src = gen_rtx_REG (SImode, D0_REG + i);
984 dest = gen_frame_mem (SImode,
985 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
986 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
987 }
988 }
989 else
990 {
991 if (TARGET_COLDFIRE)
992 /* The required register save space has already been allocated.
993 The first register should be stored at (%sp). */
994 m68k_set_frame_related
995 (m68k_emit_movem (stack_pointer_rtx, 0,
996 current_frame.reg_no, D0_REG,
997 current_frame.reg_mask, true, false));
998 else
999 m68k_set_frame_related
1000 (m68k_emit_movem (stack_pointer_rtx,
1001 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1002 current_frame.reg_no, D0_REG,
1003 current_frame.reg_mask, true, true));
1004 }
1005
1006 if (flag_pic
1007 && !TARGET_SEP_DATA
1008 && current_function_uses_pic_offset_table)
1009 {
1010 insn = emit_insn (gen_load_got (pic_offset_table_rtx));
1011 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
1012 const0_rtx,
1013 REG_NOTES (insn));
1014 }
1015 }
1016 \f
1017 /* Return true if a simple (return) instruction is sufficient for this
1018 instruction (i.e. if no epilogue is needed). */
1019
1020 bool
1021 m68k_use_return_insn (void)
1022 {
1023 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1024 return false;
1025
1026 m68k_compute_frame_layout ();
1027 return current_frame.offset == 0;
1028 }
1029
1030 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1031 SIBCALL_P says which.
1032
1033 The function epilogue should not depend on the current stack pointer!
1034 It should use the frame pointer only, if there is a frame pointer.
1035 This is mandatory because of alloca; we also take advantage of it to
1036 omit stack adjustments before returning. */
1037
1038 void
1039 m68k_expand_epilogue (bool sibcall_p)
1040 {
1041 HOST_WIDE_INT fsize, fsize_with_regs;
1042 bool big, restore_from_sp;
1043
1044 m68k_compute_frame_layout ();
1045
1046 fsize = current_frame.size;
1047 big = false;
1048 restore_from_sp = false;
1049
1050 /* FIXME : current_function_is_leaf below is too strong.
1051 What we really need to know there is if there could be pending
1052 stack adjustment needed at that point. */
1053 restore_from_sp = (!frame_pointer_needed
1054 || (!current_function_calls_alloca
1055 && current_function_is_leaf));
1056
1057 /* fsize_with_regs is the size we need to adjust the sp when
1058 popping the frame. */
1059 fsize_with_regs = fsize;
1060 if (TARGET_COLDFIRE && restore_from_sp)
1061 {
1062 /* ColdFire's move multiple instructions do not allow post-increment
1063 addressing. Add the size of movem loads to the final deallocation
1064 instead. */
1065 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1066 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1067 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1068 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1069 }
1070
1071 if (current_frame.offset + fsize >= 0x8000
1072 && !restore_from_sp
1073 && (current_frame.reg_mask || current_frame.fpu_mask))
1074 {
1075 if (TARGET_COLDFIRE
1076 && (current_frame.reg_no >= MIN_MOVEM_REGS
1077 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1078 {
1079 /* ColdFire's move multiple instructions do not support the
1080 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1081 stack-based restore. */
1082 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1083 GEN_INT (-(current_frame.offset + fsize)));
1084 emit_insn (gen_addsi3 (stack_pointer_rtx,
1085 gen_rtx_REG (Pmode, A1_REG),
1086 frame_pointer_rtx));
1087 restore_from_sp = true;
1088 }
1089 else
1090 {
1091 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1092 fsize = 0;
1093 big = true;
1094 }
1095 }
1096
1097 if (current_frame.reg_no < MIN_MOVEM_REGS)
1098 {
1099 /* Restore each register separately in the same order moveml does. */
1100 int i;
1101 HOST_WIDE_INT offset;
1102
1103 offset = current_frame.offset + fsize;
1104 for (i = 0; i < 16; i++)
1105 if (current_frame.reg_mask & (1 << i))
1106 {
1107 rtx addr;
1108
1109 if (big)
1110 {
1111 /* Generate the address -OFFSET(%fp,%a1.l). */
1112 addr = gen_rtx_REG (Pmode, A1_REG);
1113 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1114 addr = plus_constant (addr, -offset);
1115 }
1116 else if (restore_from_sp)
1117 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1118 else
1119 addr = plus_constant (frame_pointer_rtx, -offset);
1120 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1121 gen_frame_mem (SImode, addr));
1122 offset -= GET_MODE_SIZE (SImode);
1123 }
1124 }
1125 else if (current_frame.reg_mask)
1126 {
1127 if (big)
1128 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1129 gen_rtx_REG (Pmode, A1_REG),
1130 frame_pointer_rtx),
1131 -(current_frame.offset + fsize),
1132 current_frame.reg_no, D0_REG,
1133 current_frame.reg_mask, false, false);
1134 else if (restore_from_sp)
1135 m68k_emit_movem (stack_pointer_rtx, 0,
1136 current_frame.reg_no, D0_REG,
1137 current_frame.reg_mask, false,
1138 !TARGET_COLDFIRE);
1139 else
1140 m68k_emit_movem (frame_pointer_rtx,
1141 -(current_frame.offset + fsize),
1142 current_frame.reg_no, D0_REG,
1143 current_frame.reg_mask, false, false);
1144 }
1145
1146 if (current_frame.fpu_no > 0)
1147 {
1148 if (big)
1149 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1150 gen_rtx_REG (Pmode, A1_REG),
1151 frame_pointer_rtx),
1152 -(current_frame.foffset + fsize),
1153 current_frame.fpu_no, FP0_REG,
1154 current_frame.fpu_mask, false, false);
1155 else if (restore_from_sp)
1156 {
1157 if (TARGET_COLDFIRE)
1158 {
1159 int offset;
1160
1161 /* If we used moveml to restore the integer registers, the
1162 stack pointer will still point to the bottom of the moveml
1163 save area. Find the stack offset of the first FP
1164 register. */
1165 if (current_frame.reg_no < MIN_MOVEM_REGS)
1166 offset = 0;
1167 else
1168 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1169 m68k_emit_movem (stack_pointer_rtx, offset,
1170 current_frame.fpu_no, FP0_REG,
1171 current_frame.fpu_mask, false, false);
1172 }
1173 else
1174 m68k_emit_movem (stack_pointer_rtx, 0,
1175 current_frame.fpu_no, FP0_REG,
1176 current_frame.fpu_mask, false, true);
1177 }
1178 else
1179 m68k_emit_movem (frame_pointer_rtx,
1180 -(current_frame.foffset + fsize),
1181 current_frame.fpu_no, FP0_REG,
1182 current_frame.fpu_mask, false, false);
1183 }
1184
1185 if (frame_pointer_needed)
1186 emit_insn (gen_unlink (frame_pointer_rtx));
1187 else if (fsize_with_regs)
1188 emit_insn (gen_addsi3 (stack_pointer_rtx,
1189 stack_pointer_rtx,
1190 GEN_INT (fsize_with_regs)));
1191
1192 if (current_function_calls_eh_return)
1193 emit_insn (gen_addsi3 (stack_pointer_rtx,
1194 stack_pointer_rtx,
1195 EH_RETURN_STACKADJ_RTX));
1196
1197 if (!sibcall_p)
1198 emit_insn (gen_rtx_RETURN (VOIDmode));
1199 }
1200 \f
1201 /* Return true if X is a valid comparison operator for the dbcc
1202 instruction.
1203
1204 Note it rejects floating point comparison operators.
1205 (In the future we could use Fdbcc).
1206
1207 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1208
1209 int
1210 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1211 {
1212 switch (GET_CODE (x))
1213 {
1214 case EQ: case NE: case GTU: case LTU:
1215 case GEU: case LEU:
1216 return 1;
1217
1218 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1219 conservative */
1220 case GT: case LT: case GE: case LE:
1221 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1222 default:
1223 return 0;
1224 }
1225 }
1226
1227 /* Return nonzero if flags are currently in the 68881 flag register. */
1228 int
1229 flags_in_68881 (void)
1230 {
1231 /* We could add support for these in the future */
1232 return cc_status.flags & CC_IN_68881;
1233 }
1234
1235 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. We cannot use sibcalls
1236 for nested functions because we use the static chain register for
1237 indirect calls. */
1238
1239 static bool
1240 m68k_ok_for_sibcall_p (tree decl ATTRIBUTE_UNUSED, tree exp)
1241 {
1242 return TREE_OPERAND (exp, 2) == NULL;
1243 }
1244
1245 /* Convert X to a legitimate function call memory reference and return the
1246 result. */
1247
1248 rtx
1249 m68k_legitimize_call_address (rtx x)
1250 {
1251 gcc_assert (MEM_P (x));
1252 if (call_operand (XEXP (x, 0), VOIDmode))
1253 return x;
1254 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1255 }
1256
1257 /* Likewise for sibling calls. */
1258
1259 rtx
1260 m68k_legitimize_sibcall_address (rtx x)
1261 {
1262 gcc_assert (MEM_P (x));
1263 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1264 return x;
1265
1266 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1267 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1268 }
1269
1270 /* Output a dbCC; jCC sequence. Note we do not handle the
1271 floating point version of this sequence (Fdbcc). We also
1272 do not handle alternative conditions when CC_NO_OVERFLOW is
1273 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1274 kick those out before we get here. */
1275
1276 void
1277 output_dbcc_and_branch (rtx *operands)
1278 {
1279 switch (GET_CODE (operands[3]))
1280 {
1281 case EQ:
1282 output_asm_insn (MOTOROLA
1283 ? "dbeq %0,%l1\n\tjbeq %l2"
1284 : "dbeq %0,%l1\n\tjeq %l2",
1285 operands);
1286 break;
1287
1288 case NE:
1289 output_asm_insn (MOTOROLA
1290 ? "dbne %0,%l1\n\tjbne %l2"
1291 : "dbne %0,%l1\n\tjne %l2",
1292 operands);
1293 break;
1294
1295 case GT:
1296 output_asm_insn (MOTOROLA
1297 ? "dbgt %0,%l1\n\tjbgt %l2"
1298 : "dbgt %0,%l1\n\tjgt %l2",
1299 operands);
1300 break;
1301
1302 case GTU:
1303 output_asm_insn (MOTOROLA
1304 ? "dbhi %0,%l1\n\tjbhi %l2"
1305 : "dbhi %0,%l1\n\tjhi %l2",
1306 operands);
1307 break;
1308
1309 case LT:
1310 output_asm_insn (MOTOROLA
1311 ? "dblt %0,%l1\n\tjblt %l2"
1312 : "dblt %0,%l1\n\tjlt %l2",
1313 operands);
1314 break;
1315
1316 case LTU:
1317 output_asm_insn (MOTOROLA
1318 ? "dbcs %0,%l1\n\tjbcs %l2"
1319 : "dbcs %0,%l1\n\tjcs %l2",
1320 operands);
1321 break;
1322
1323 case GE:
1324 output_asm_insn (MOTOROLA
1325 ? "dbge %0,%l1\n\tjbge %l2"
1326 : "dbge %0,%l1\n\tjge %l2",
1327 operands);
1328 break;
1329
1330 case GEU:
1331 output_asm_insn (MOTOROLA
1332 ? "dbcc %0,%l1\n\tjbcc %l2"
1333 : "dbcc %0,%l1\n\tjcc %l2",
1334 operands);
1335 break;
1336
1337 case LE:
1338 output_asm_insn (MOTOROLA
1339 ? "dble %0,%l1\n\tjble %l2"
1340 : "dble %0,%l1\n\tjle %l2",
1341 operands);
1342 break;
1343
1344 case LEU:
1345 output_asm_insn (MOTOROLA
1346 ? "dbls %0,%l1\n\tjbls %l2"
1347 : "dbls %0,%l1\n\tjls %l2",
1348 operands);
1349 break;
1350
1351 default:
1352 gcc_unreachable ();
1353 }
1354
1355 /* If the decrement is to be done in SImode, then we have
1356 to compensate for the fact that dbcc decrements in HImode. */
1357 switch (GET_MODE (operands[0]))
1358 {
1359 case SImode:
1360 output_asm_insn (MOTOROLA
1361 ? "clr%.w %0\n\tsubq%.l #1,%0\n\tjbpl %l1"
1362 : "clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1",
1363 operands);
1364 break;
1365
1366 case HImode:
1367 break;
1368
1369 default:
1370 gcc_unreachable ();
1371 }
1372 }
1373
1374 const char *
1375 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1376 {
1377 rtx loperands[7];
1378 enum rtx_code op_code = GET_CODE (op);
1379
1380 /* This does not produce a useful cc. */
1381 CC_STATUS_INIT;
1382
1383 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1384 below. Swap the operands and change the op if these requirements
1385 are not fulfilled. */
1386 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1387 {
1388 rtx tmp = operand1;
1389
1390 operand1 = operand2;
1391 operand2 = tmp;
1392 op_code = swap_condition (op_code);
1393 }
1394 loperands[0] = operand1;
1395 if (GET_CODE (operand1) == REG)
1396 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1397 else
1398 loperands[1] = adjust_address (operand1, SImode, 4);
1399 if (operand2 != const0_rtx)
1400 {
1401 loperands[2] = operand2;
1402 if (GET_CODE (operand2) == REG)
1403 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1404 else
1405 loperands[3] = adjust_address (operand2, SImode, 4);
1406 }
1407 loperands[4] = gen_label_rtx ();
1408 if (operand2 != const0_rtx)
1409 {
1410 output_asm_insn (MOTOROLA
1411 ? "cmp%.l %2,%0\n\tjbne %l4\n\tcmp%.l %3,%1"
1412 : "cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1",
1413 loperands);
1414 }
1415 else
1416 {
1417 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1418 output_asm_insn ("tst%.l %0", loperands);
1419 else
1420 output_asm_insn ("cmp%.w #0,%0", loperands);
1421
1422 output_asm_insn (MOTOROLA ? "jbne %l4" : "jne %l4", loperands);
1423
1424 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1425 output_asm_insn ("tst%.l %1", loperands);
1426 else
1427 output_asm_insn ("cmp%.w #0,%1", loperands);
1428 }
1429
1430 loperands[5] = dest;
1431
1432 switch (op_code)
1433 {
1434 case EQ:
1435 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1436 CODE_LABEL_NUMBER (loperands[4]));
1437 output_asm_insn ("seq %5", loperands);
1438 break;
1439
1440 case NE:
1441 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1442 CODE_LABEL_NUMBER (loperands[4]));
1443 output_asm_insn ("sne %5", loperands);
1444 break;
1445
1446 case GT:
1447 loperands[6] = gen_label_rtx ();
1448 output_asm_insn (MOTOROLA ? "shi %5\n\tjbra %l6" : "shi %5\n\tjra %l6",
1449 loperands);
1450 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1451 CODE_LABEL_NUMBER (loperands[4]));
1452 output_asm_insn ("sgt %5", loperands);
1453 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1454 CODE_LABEL_NUMBER (loperands[6]));
1455 break;
1456
1457 case GTU:
1458 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1459 CODE_LABEL_NUMBER (loperands[4]));
1460 output_asm_insn ("shi %5", loperands);
1461 break;
1462
1463 case LT:
1464 loperands[6] = gen_label_rtx ();
1465 output_asm_insn (MOTOROLA ? "scs %5\n\tjbra %l6" : "scs %5\n\tjra %l6",
1466 loperands);
1467 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1468 CODE_LABEL_NUMBER (loperands[4]));
1469 output_asm_insn ("slt %5", loperands);
1470 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1471 CODE_LABEL_NUMBER (loperands[6]));
1472 break;
1473
1474 case LTU:
1475 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1476 CODE_LABEL_NUMBER (loperands[4]));
1477 output_asm_insn ("scs %5", loperands);
1478 break;
1479
1480 case GE:
1481 loperands[6] = gen_label_rtx ();
1482 output_asm_insn (MOTOROLA ? "scc %5\n\tjbra %l6" : "scc %5\n\tjra %l6",
1483 loperands);
1484 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1485 CODE_LABEL_NUMBER (loperands[4]));
1486 output_asm_insn ("sge %5", loperands);
1487 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1488 CODE_LABEL_NUMBER (loperands[6]));
1489 break;
1490
1491 case GEU:
1492 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1493 CODE_LABEL_NUMBER (loperands[4]));
1494 output_asm_insn ("scc %5", loperands);
1495 break;
1496
1497 case LE:
1498 loperands[6] = gen_label_rtx ();
1499 output_asm_insn (MOTOROLA ? "sls %5\n\tjbra %l6" : "sls %5\n\tjra %l6",
1500 loperands);
1501 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1502 CODE_LABEL_NUMBER (loperands[4]));
1503 output_asm_insn ("sle %5", loperands);
1504 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1505 CODE_LABEL_NUMBER (loperands[6]));
1506 break;
1507
1508 case LEU:
1509 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1510 CODE_LABEL_NUMBER (loperands[4]));
1511 output_asm_insn ("sls %5", loperands);
1512 break;
1513
1514 default:
1515 gcc_unreachable ();
1516 }
1517 return "";
1518 }
1519
1520 const char *
1521 output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1522 {
1523 operands[0] = countop;
1524 operands[1] = dataop;
1525
1526 if (GET_CODE (countop) == CONST_INT)
1527 {
1528 register int count = INTVAL (countop);
1529 /* If COUNT is bigger than size of storage unit in use,
1530 advance to the containing unit of same size. */
1531 if (count > signpos)
1532 {
1533 int offset = (count & ~signpos) / 8;
1534 count = count & signpos;
1535 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1536 }
1537 if (count == signpos)
1538 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1539 else
1540 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1541
1542 /* These three statements used to use next_insns_test_no...
1543 but it appears that this should do the same job. */
1544 if (count == 31
1545 && next_insn_tests_no_inequality (insn))
1546 return "tst%.l %1";
1547 if (count == 15
1548 && next_insn_tests_no_inequality (insn))
1549 return "tst%.w %1";
1550 if (count == 7
1551 && next_insn_tests_no_inequality (insn))
1552 return "tst%.b %1";
1553 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1554 On some m68k variants unfortunately that's slower than btst.
1555 On 68000 and higher, that should also work for all HImode operands. */
1556 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1557 {
1558 if (count == 3 && DATA_REG_P (operands[1])
1559 && next_insn_tests_no_inequality (insn))
1560 {
1561 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1562 return "move%.w %1,%%ccr";
1563 }
1564 if (count == 2 && DATA_REG_P (operands[1])
1565 && next_insn_tests_no_inequality (insn))
1566 {
1567 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1568 return "move%.w %1,%%ccr";
1569 }
1570 /* count == 1 followed by bvc/bvs and
1571 count == 0 followed by bcc/bcs are also possible, but need
1572 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1573 }
1574
1575 cc_status.flags = CC_NOT_NEGATIVE;
1576 }
1577 return "btst %0,%1";
1578 }
1579 \f
1580 /* Return true if X is a legitimate base register. STRICT_P says
1581 whether we need strict checking. */
1582
1583 bool
1584 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1585 {
1586 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1587 if (!strict_p && GET_CODE (x) == SUBREG)
1588 x = SUBREG_REG (x);
1589
1590 return (REG_P (x)
1591 && (strict_p
1592 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1593 : !DATA_REGNO_P (REGNO (x)) && !FP_REGNO_P (REGNO (x))));
1594 }
1595
1596 /* Return true if X is a legitimate index register. STRICT_P says
1597 whether we need strict checking. */
1598
1599 bool
1600 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1601 {
1602 if (!strict_p && GET_CODE (x) == SUBREG)
1603 x = SUBREG_REG (x);
1604
1605 return (REG_P (x)
1606 && (strict_p
1607 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1608 : !FP_REGNO_P (REGNO (x))));
1609 }
1610
1611 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1612 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1613 ADDRESS if so. STRICT_P says whether we need strict checking. */
1614
1615 static bool
1616 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1617 {
1618 int scale;
1619
1620 /* Check for a scale factor. */
1621 scale = 1;
1622 if ((TARGET_68020 || TARGET_COLDFIRE)
1623 && GET_CODE (x) == MULT
1624 && GET_CODE (XEXP (x, 1)) == CONST_INT
1625 && (INTVAL (XEXP (x, 1)) == 2
1626 || INTVAL (XEXP (x, 1)) == 4
1627 || (INTVAL (XEXP (x, 1)) == 8
1628 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1629 {
1630 scale = INTVAL (XEXP (x, 1));
1631 x = XEXP (x, 0);
1632 }
1633
1634 /* Check for a word extension. */
1635 if (!TARGET_COLDFIRE
1636 && GET_CODE (x) == SIGN_EXTEND
1637 && GET_MODE (XEXP (x, 0)) == HImode)
1638 x = XEXP (x, 0);
1639
1640 if (m68k_legitimate_index_reg_p (x, strict_p))
1641 {
1642 address->scale = scale;
1643 address->index = x;
1644 return true;
1645 }
1646
1647 return false;
1648 }
1649
1650 /* Return true if X is an illegitimate symbolic constant. */
1651
1652 bool
1653 m68k_illegitimate_symbolic_constant_p (rtx x)
1654 {
1655 rtx base, offset;
1656
1657 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1658 {
1659 split_const (x, &base, &offset);
1660 if (GET_CODE (base) == SYMBOL_REF
1661 && !offset_within_block_p (base, INTVAL (offset)))
1662 return true;
1663 }
1664 return false;
1665 }
1666
1667 /* Return true if X is a legitimate constant address that can reach
1668 bytes in the range [X, X + REACH). STRICT_P says whether we need
1669 strict checking. */
1670
1671 static bool
1672 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1673 {
1674 rtx base, offset;
1675
1676 if (!CONSTANT_ADDRESS_P (x))
1677 return false;
1678
1679 if (flag_pic
1680 && !(strict_p && TARGET_PCREL)
1681 && symbolic_operand (x, VOIDmode))
1682 return false;
1683
1684 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1685 {
1686 split_const (x, &base, &offset);
1687 if (GET_CODE (base) == SYMBOL_REF
1688 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1689 return false;
1690 }
1691
1692 return true;
1693 }
1694
1695 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1696 labels will become jump tables. */
1697
1698 static bool
1699 m68k_jump_table_ref_p (rtx x)
1700 {
1701 if (GET_CODE (x) != LABEL_REF)
1702 return false;
1703
1704 x = XEXP (x, 0);
1705 if (!NEXT_INSN (x) && !PREV_INSN (x))
1706 return true;
1707
1708 x = next_nonnote_insn (x);
1709 return x && JUMP_TABLE_DATA_P (x);
1710 }
1711
1712 /* Return true if X is a legitimate address for values of mode MODE.
1713 STRICT_P says whether strict checking is needed. If the address
1714 is valid, describe its components in *ADDRESS. */
1715
1716 static bool
1717 m68k_decompose_address (enum machine_mode mode, rtx x,
1718 bool strict_p, struct m68k_address *address)
1719 {
1720 unsigned int reach;
1721
1722 memset (address, 0, sizeof (*address));
1723
1724 if (mode == BLKmode)
1725 reach = 1;
1726 else
1727 reach = GET_MODE_SIZE (mode);
1728
1729 /* Check for (An) (mode 2). */
1730 if (m68k_legitimate_base_reg_p (x, strict_p))
1731 {
1732 address->base = x;
1733 return true;
1734 }
1735
1736 /* Check for -(An) and (An)+ (modes 3 and 4). */
1737 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1738 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1739 {
1740 address->code = GET_CODE (x);
1741 address->base = XEXP (x, 0);
1742 return true;
1743 }
1744
1745 /* Check for (d16,An) (mode 5). */
1746 if (GET_CODE (x) == PLUS
1747 && GET_CODE (XEXP (x, 1)) == CONST_INT
1748 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1749 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1750 {
1751 address->base = XEXP (x, 0);
1752 address->offset = XEXP (x, 1);
1753 return true;
1754 }
1755
1756 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1757 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1758 addresses. */
1759 if (flag_pic
1760 && GET_CODE (x) == PLUS
1761 && XEXP (x, 0) == pic_offset_table_rtx
1762 && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF
1763 || GET_CODE (XEXP (x, 1)) == LABEL_REF))
1764 {
1765 address->base = XEXP (x, 0);
1766 address->offset = XEXP (x, 1);
1767 return true;
1768 }
1769
1770 /* The ColdFire FPU only accepts addressing modes 2-5. */
1771 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1772 return false;
1773
1774 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
1775 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
1776 All these modes are variations of mode 7. */
1777 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
1778 {
1779 address->offset = x;
1780 return true;
1781 }
1782
1783 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
1784 tablejumps.
1785
1786 ??? do_tablejump creates these addresses before placing the target
1787 label, so we have to assume that unplaced labels are jump table
1788 references. It seems unlikely that we would ever generate indexed
1789 accesses to unplaced labels in other cases. */
1790 if (GET_CODE (x) == PLUS
1791 && m68k_jump_table_ref_p (XEXP (x, 1))
1792 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1793 {
1794 address->offset = XEXP (x, 1);
1795 return true;
1796 }
1797
1798 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
1799 (bd,An,Xn.SIZE*SCALE) addresses. */
1800
1801 if (TARGET_68020)
1802 {
1803 /* Check for a nonzero base displacement. */
1804 if (GET_CODE (x) == PLUS
1805 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
1806 {
1807 address->offset = XEXP (x, 1);
1808 x = XEXP (x, 0);
1809 }
1810
1811 /* Check for a suppressed index register. */
1812 if (m68k_legitimate_base_reg_p (x, strict_p))
1813 {
1814 address->base = x;
1815 return true;
1816 }
1817
1818 /* Check for a suppressed base register. Do not allow this case
1819 for non-symbolic offsets as it effectively gives gcc freedom
1820 to treat data registers as base registers, which can generate
1821 worse code. */
1822 if (address->offset
1823 && symbolic_operand (address->offset, VOIDmode)
1824 && m68k_decompose_index (x, strict_p, address))
1825 return true;
1826 }
1827 else
1828 {
1829 /* Check for a nonzero base displacement. */
1830 if (GET_CODE (x) == PLUS
1831 && GET_CODE (XEXP (x, 1)) == CONST_INT
1832 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
1833 {
1834 address->offset = XEXP (x, 1);
1835 x = XEXP (x, 0);
1836 }
1837 }
1838
1839 /* We now expect the sum of a base and an index. */
1840 if (GET_CODE (x) == PLUS)
1841 {
1842 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
1843 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
1844 {
1845 address->base = XEXP (x, 0);
1846 return true;
1847 }
1848
1849 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
1850 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1851 {
1852 address->base = XEXP (x, 1);
1853 return true;
1854 }
1855 }
1856 return false;
1857 }
1858
1859 /* Return true if X is a legitimate address for values of mode MODE.
1860 STRICT_P says whether strict checking is needed. */
1861
1862 bool
1863 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1864 {
1865 struct m68k_address address;
1866
1867 return m68k_decompose_address (mode, x, strict_p, &address);
1868 }
1869
1870 /* Return true if X is a memory, describing its address in ADDRESS if so.
1871 Apply strict checking if called during or after reload. */
1872
1873 static bool
1874 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
1875 {
1876 return (MEM_P (x)
1877 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
1878 reload_in_progress || reload_completed,
1879 address));
1880 }
1881
1882 /* Return true if X matches the 'Q' constraint. It must be a memory
1883 with a base address and no constant offset or index. */
1884
1885 bool
1886 m68k_matches_q_p (rtx x)
1887 {
1888 struct m68k_address address;
1889
1890 return (m68k_legitimate_mem_p (x, &address)
1891 && address.code == UNKNOWN
1892 && address.base
1893 && !address.offset
1894 && !address.index);
1895 }
1896
1897 /* Return true if X matches the 'U' constraint. It must be a base address
1898 with a constant offset and no index. */
1899
1900 bool
1901 m68k_matches_u_p (rtx x)
1902 {
1903 struct m68k_address address;
1904
1905 return (m68k_legitimate_mem_p (x, &address)
1906 && address.code == UNKNOWN
1907 && address.base
1908 && address.offset
1909 && !address.index);
1910 }
1911
1912 /* Legitimize PIC addresses. If the address is already
1913 position-independent, we return ORIG. Newly generated
1914 position-independent addresses go to REG. If we need more
1915 than one register, we lose.
1916
1917 An address is legitimized by making an indirect reference
1918 through the Global Offset Table with the name of the symbol
1919 used as an offset.
1920
1921 The assembler and linker are responsible for placing the
1922 address of the symbol in the GOT. The function prologue
1923 is responsible for initializing a5 to the starting address
1924 of the GOT.
1925
1926 The assembler is also responsible for translating a symbol name
1927 into a constant displacement from the start of the GOT.
1928
1929 A quick example may make things a little clearer:
1930
1931 When not generating PIC code to store the value 12345 into _foo
1932 we would generate the following code:
1933
1934 movel #12345, _foo
1935
1936 When generating PIC two transformations are made. First, the compiler
1937 loads the address of foo into a register. So the first transformation makes:
1938
1939 lea _foo, a0
1940 movel #12345, a0@
1941
1942 The code in movsi will intercept the lea instruction and call this
1943 routine which will transform the instructions into:
1944
1945 movel a5@(_foo:w), a0
1946 movel #12345, a0@
1947
1948
1949 That (in a nutshell) is how *all* symbol and label references are
1950 handled. */
1951
1952 rtx
1953 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
1954 rtx reg)
1955 {
1956 rtx pic_ref = orig;
1957
1958 /* First handle a simple SYMBOL_REF or LABEL_REF */
1959 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
1960 {
1961 gcc_assert (reg);
1962
1963 pic_ref = gen_rtx_MEM (Pmode,
1964 gen_rtx_PLUS (Pmode,
1965 pic_offset_table_rtx, orig));
1966 current_function_uses_pic_offset_table = 1;
1967 MEM_READONLY_P (pic_ref) = 1;
1968 emit_move_insn (reg, pic_ref);
1969 return reg;
1970 }
1971 else if (GET_CODE (orig) == CONST)
1972 {
1973 rtx base;
1974
1975 /* Make sure this has not already been legitimized. */
1976 if (GET_CODE (XEXP (orig, 0)) == PLUS
1977 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
1978 return orig;
1979
1980 gcc_assert (reg);
1981
1982 /* legitimize both operands of the PLUS */
1983 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
1984
1985 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
1986 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
1987 base == reg ? 0 : reg);
1988
1989 if (GET_CODE (orig) == CONST_INT)
1990 return plus_constant (base, INTVAL (orig));
1991 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
1992 /* Likewise, should we set special REG_NOTEs here? */
1993 }
1994 return pic_ref;
1995 }
1996
1997 \f
1998 typedef enum { MOVL, SWAP, NEGW, NOTW, NOTB, MOVQ, MVS, MVZ } CONST_METHOD;
1999
2000 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2001
2002 /* Return the type of move that should be used for integer I. */
2003
2004 static CONST_METHOD
2005 const_method (HOST_WIDE_INT i)
2006 {
2007 unsigned u;
2008
2009 if (USE_MOVQ (i))
2010 return MOVQ;
2011
2012 /* The ColdFire doesn't have byte or word operations. */
2013 /* FIXME: This may not be useful for the m68060 either. */
2014 if (!TARGET_COLDFIRE)
2015 {
2016 /* if -256 < N < 256 but N is not in range for a moveq
2017 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2018 if (USE_MOVQ (i ^ 0xff))
2019 return NOTB;
2020 /* Likewise, try with not.w */
2021 if (USE_MOVQ (i ^ 0xffff))
2022 return NOTW;
2023 /* This is the only value where neg.w is useful */
2024 if (i == -65408)
2025 return NEGW;
2026 }
2027
2028 /* Try also with swap. */
2029 u = i;
2030 if (USE_MOVQ ((u >> 16) | (u << 16)))
2031 return SWAP;
2032
2033 if (TARGET_ISAB)
2034 {
2035 /* Try using MVZ/MVS with an immediate value to load constants. */
2036 if (i >= 0 && i <= 65535)
2037 return MVZ;
2038 if (i >= -32768 && i <= 32767)
2039 return MVS;
2040 }
2041
2042 /* Otherwise, use move.l */
2043 return MOVL;
2044 }
2045
2046 /* Return the cost of moving constant I into a data register. */
2047
2048 static int
2049 const_int_cost (HOST_WIDE_INT i)
2050 {
2051 switch (const_method (i))
2052 {
2053 case MOVQ:
2054 /* Constants between -128 and 127 are cheap due to moveq. */
2055 return 0;
2056 case MVZ:
2057 case MVS:
2058 case NOTB:
2059 case NOTW:
2060 case NEGW:
2061 case SWAP:
2062 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2063 return 1;
2064 case MOVL:
2065 return 2;
2066 default:
2067 gcc_unreachable ();
2068 }
2069 }
2070
2071 static bool
2072 m68k_rtx_costs (rtx x, int code, int outer_code, int *total)
2073 {
2074 switch (code)
2075 {
2076 case CONST_INT:
2077 /* Constant zero is super cheap due to clr instruction. */
2078 if (x == const0_rtx)
2079 *total = 0;
2080 else
2081 *total = const_int_cost (INTVAL (x));
2082 return true;
2083
2084 case CONST:
2085 case LABEL_REF:
2086 case SYMBOL_REF:
2087 *total = 3;
2088 return true;
2089
2090 case CONST_DOUBLE:
2091 /* Make 0.0 cheaper than other floating constants to
2092 encourage creating tstsf and tstdf insns. */
2093 if (outer_code == COMPARE
2094 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2095 *total = 4;
2096 else
2097 *total = 5;
2098 return true;
2099
2100 /* These are vaguely right for a 68020. */
2101 /* The costs for long multiply have been adjusted to work properly
2102 in synth_mult on the 68020, relative to an average of the time
2103 for add and the time for shift, taking away a little more because
2104 sometimes move insns are needed. */
2105 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2106 terms. */
2107 #define MULL_COST \
2108 (TUNE_68060 ? 2 \
2109 : TUNE_68040 ? 5 \
2110 : TUNE_CFV2 ? 10 \
2111 : TARGET_COLDFIRE ? 3 : 13)
2112
2113 #define MULW_COST \
2114 (TUNE_68060 ? 2 \
2115 : TUNE_68040 ? 3 \
2116 : TUNE_68000_10 || TUNE_CFV2 ? 5 \
2117 : TARGET_COLDFIRE ? 2 : 8)
2118
2119 #define DIVW_COST \
2120 (TARGET_CF_HWDIV ? 11 \
2121 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2122
2123 case PLUS:
2124 /* An lea costs about three times as much as a simple add. */
2125 if (GET_MODE (x) == SImode
2126 && GET_CODE (XEXP (x, 1)) == REG
2127 && GET_CODE (XEXP (x, 0)) == MULT
2128 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2129 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2130 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2131 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2132 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2133 {
2134 /* lea an@(dx:l:i),am */
2135 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2136 return true;
2137 }
2138 return false;
2139
2140 case ASHIFT:
2141 case ASHIFTRT:
2142 case LSHIFTRT:
2143 if (TUNE_68060)
2144 {
2145 *total = COSTS_N_INSNS(1);
2146 return true;
2147 }
2148 if (TUNE_68000_10)
2149 {
2150 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2151 {
2152 if (INTVAL (XEXP (x, 1)) < 16)
2153 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2154 else
2155 /* We're using clrw + swap for these cases. */
2156 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2157 }
2158 else
2159 *total = COSTS_N_INSNS (10); /* Worst case. */
2160 return true;
2161 }
2162 /* A shift by a big integer takes an extra instruction. */
2163 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2164 && (INTVAL (XEXP (x, 1)) == 16))
2165 {
2166 *total = COSTS_N_INSNS (2); /* clrw;swap */
2167 return true;
2168 }
2169 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2170 && !(INTVAL (XEXP (x, 1)) > 0
2171 && INTVAL (XEXP (x, 1)) <= 8))
2172 {
2173 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2174 return true;
2175 }
2176 return false;
2177
2178 case MULT:
2179 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2180 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2181 && GET_MODE (x) == SImode)
2182 *total = COSTS_N_INSNS (MULW_COST);
2183 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2184 *total = COSTS_N_INSNS (MULW_COST);
2185 else
2186 *total = COSTS_N_INSNS (MULL_COST);
2187 return true;
2188
2189 case DIV:
2190 case UDIV:
2191 case MOD:
2192 case UMOD:
2193 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2194 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2195 else if (TARGET_CF_HWDIV)
2196 *total = COSTS_N_INSNS (18);
2197 else
2198 *total = COSTS_N_INSNS (43); /* div.l */
2199 return true;
2200
2201 default:
2202 return false;
2203 }
2204 }
2205
2206 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2207 OPERANDS[0]. */
2208
2209 static const char *
2210 output_move_const_into_data_reg (rtx *operands)
2211 {
2212 HOST_WIDE_INT i;
2213
2214 i = INTVAL (operands[1]);
2215 switch (const_method (i))
2216 {
2217 case MVZ:
2218 return "mvzw %1,%0";
2219 case MVS:
2220 return "mvsw %1,%0";
2221 case MOVQ:
2222 return "moveq %1,%0";
2223 case NOTB:
2224 CC_STATUS_INIT;
2225 operands[1] = GEN_INT (i ^ 0xff);
2226 return "moveq %1,%0\n\tnot%.b %0";
2227 case NOTW:
2228 CC_STATUS_INIT;
2229 operands[1] = GEN_INT (i ^ 0xffff);
2230 return "moveq %1,%0\n\tnot%.w %0";
2231 case NEGW:
2232 CC_STATUS_INIT;
2233 return "moveq #-128,%0\n\tneg%.w %0";
2234 case SWAP:
2235 {
2236 unsigned u = i;
2237
2238 operands[1] = GEN_INT ((u << 16) | (u >> 16));
2239 return "moveq %1,%0\n\tswap %0";
2240 }
2241 case MOVL:
2242 return "move%.l %1,%0";
2243 default:
2244 gcc_unreachable ();
2245 }
2246 }
2247
2248 /* Return true if I can be handled by ISA B's mov3q instruction. */
2249
2250 bool
2251 valid_mov3q_const (HOST_WIDE_INT i)
2252 {
2253 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2254 }
2255
2256 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2257 I is the value of OPERANDS[1]. */
2258
2259 static const char *
2260 output_move_simode_const (rtx *operands)
2261 {
2262 rtx dest;
2263 HOST_WIDE_INT src;
2264
2265 dest = operands[0];
2266 src = INTVAL (operands[1]);
2267 if (src == 0
2268 && (DATA_REG_P (dest) || MEM_P (dest))
2269 /* clr insns on 68000 read before writing. */
2270 && ((TARGET_68010 || TARGET_COLDFIRE)
2271 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
2272 return "clr%.l %0";
2273 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
2274 return "mov3q%.l %1,%0";
2275 else if (src == 0 && ADDRESS_REG_P (dest))
2276 return "sub%.l %0,%0";
2277 else if (DATA_REG_P (dest))
2278 return output_move_const_into_data_reg (operands);
2279 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
2280 {
2281 if (valid_mov3q_const (src))
2282 return "mov3q%.l %1,%0";
2283 return "move%.w %1,%0";
2284 }
2285 else if (MEM_P (dest)
2286 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
2287 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2288 && IN_RANGE (src, -0x8000, 0x7fff))
2289 {
2290 if (valid_mov3q_const (src))
2291 return "mov3q%.l %1,%-";
2292 return "pea %a1";
2293 }
2294 return "move%.l %1,%0";
2295 }
2296
2297 const char *
2298 output_move_simode (rtx *operands)
2299 {
2300 if (GET_CODE (operands[1]) == CONST_INT)
2301 return output_move_simode_const (operands);
2302 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2303 || GET_CODE (operands[1]) == CONST)
2304 && push_operand (operands[0], SImode))
2305 return "pea %a1";
2306 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2307 || GET_CODE (operands[1]) == CONST)
2308 && ADDRESS_REG_P (operands[0]))
2309 return "lea %a1,%0";
2310 return "move%.l %1,%0";
2311 }
2312
2313 const char *
2314 output_move_himode (rtx *operands)
2315 {
2316 if (GET_CODE (operands[1]) == CONST_INT)
2317 {
2318 if (operands[1] == const0_rtx
2319 && (DATA_REG_P (operands[0])
2320 || GET_CODE (operands[0]) == MEM)
2321 /* clr insns on 68000 read before writing. */
2322 && ((TARGET_68010 || TARGET_COLDFIRE)
2323 || !(GET_CODE (operands[0]) == MEM
2324 && MEM_VOLATILE_P (operands[0]))))
2325 return "clr%.w %0";
2326 else if (operands[1] == const0_rtx
2327 && ADDRESS_REG_P (operands[0]))
2328 return "sub%.l %0,%0";
2329 else if (DATA_REG_P (operands[0])
2330 && INTVAL (operands[1]) < 128
2331 && INTVAL (operands[1]) >= -128)
2332 return "moveq %1,%0";
2333 else if (INTVAL (operands[1]) < 0x8000
2334 && INTVAL (operands[1]) >= -0x8000)
2335 return "move%.w %1,%0";
2336 }
2337 else if (CONSTANT_P (operands[1]))
2338 return "move%.l %1,%0";
2339 /* Recognize the insn before a tablejump, one that refers
2340 to a table of offsets. Such an insn will need to refer
2341 to a label on the insn. So output one. Use the label-number
2342 of the table of offsets to generate this label. This code,
2343 and similar code below, assumes that there will be at most one
2344 reference to each table. */
2345 if (GET_CODE (operands[1]) == MEM
2346 && GET_CODE (XEXP (operands[1], 0)) == PLUS
2347 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == LABEL_REF
2348 && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) != PLUS)
2349 {
2350 rtx labelref = XEXP (XEXP (operands[1], 0), 1);
2351 if (MOTOROLA)
2352 asm_fprintf (asm_out_file, "\t.set %LLI%d,.+2\n",
2353 CODE_LABEL_NUMBER (XEXP (labelref, 0)));
2354 else
2355 (*targetm.asm_out.internal_label) (asm_out_file, "LI",
2356 CODE_LABEL_NUMBER (XEXP (labelref, 0)));
2357 }
2358 return "move%.w %1,%0";
2359 }
2360
2361 const char *
2362 output_move_qimode (rtx *operands)
2363 {
2364 /* 68k family always modifies the stack pointer by at least 2, even for
2365 byte pushes. The 5200 (ColdFire) does not do this. */
2366
2367 /* This case is generated by pushqi1 pattern now. */
2368 gcc_assert (!(GET_CODE (operands[0]) == MEM
2369 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
2370 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
2371 && ! ADDRESS_REG_P (operands[1])
2372 && ! TARGET_COLDFIRE));
2373
2374 /* clr and st insns on 68000 read before writing. */
2375 if (!ADDRESS_REG_P (operands[0])
2376 && ((TARGET_68010 || TARGET_COLDFIRE)
2377 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2378 {
2379 if (operands[1] == const0_rtx)
2380 return "clr%.b %0";
2381 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
2382 && GET_CODE (operands[1]) == CONST_INT
2383 && (INTVAL (operands[1]) & 255) == 255)
2384 {
2385 CC_STATUS_INIT;
2386 return "st %0";
2387 }
2388 }
2389 if (GET_CODE (operands[1]) == CONST_INT
2390 && DATA_REG_P (operands[0])
2391 && INTVAL (operands[1]) < 128
2392 && INTVAL (operands[1]) >= -128)
2393 return "moveq %1,%0";
2394 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
2395 return "sub%.l %0,%0";
2396 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
2397 return "move%.l %1,%0";
2398 /* 68k family (including the 5200 ColdFire) does not support byte moves to
2399 from address registers. */
2400 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
2401 return "move%.w %1,%0";
2402 return "move%.b %1,%0";
2403 }
2404
2405 const char *
2406 output_move_stricthi (rtx *operands)
2407 {
2408 if (operands[1] == const0_rtx
2409 /* clr insns on 68000 read before writing. */
2410 && ((TARGET_68010 || TARGET_COLDFIRE)
2411 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2412 return "clr%.w %0";
2413 return "move%.w %1,%0";
2414 }
2415
2416 const char *
2417 output_move_strictqi (rtx *operands)
2418 {
2419 if (operands[1] == const0_rtx
2420 /* clr insns on 68000 read before writing. */
2421 && ((TARGET_68010 || TARGET_COLDFIRE)
2422 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2423 return "clr%.b %0";
2424 return "move%.b %1,%0";
2425 }
2426
2427 /* Return the best assembler insn template
2428 for moving operands[1] into operands[0] as a fullword. */
2429
2430 static const char *
2431 singlemove_string (rtx *operands)
2432 {
2433 if (GET_CODE (operands[1]) == CONST_INT)
2434 return output_move_simode_const (operands);
2435 return "move%.l %1,%0";
2436 }
2437
2438
2439 /* Output assembler code to perform a doubleword move insn
2440 with operands OPERANDS. */
2441
2442 const char *
2443 output_move_double (rtx *operands)
2444 {
2445 enum
2446 {
2447 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
2448 } optype0, optype1;
2449 rtx latehalf[2];
2450 rtx middlehalf[2];
2451 rtx xops[2];
2452 rtx addreg0 = 0, addreg1 = 0;
2453 int dest_overlapped_low = 0;
2454 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2455
2456 middlehalf[0] = 0;
2457 middlehalf[1] = 0;
2458
2459 /* First classify both operands. */
2460
2461 if (REG_P (operands[0]))
2462 optype0 = REGOP;
2463 else if (offsettable_memref_p (operands[0]))
2464 optype0 = OFFSOP;
2465 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
2466 optype0 = POPOP;
2467 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
2468 optype0 = PUSHOP;
2469 else if (GET_CODE (operands[0]) == MEM)
2470 optype0 = MEMOP;
2471 else
2472 optype0 = RNDOP;
2473
2474 if (REG_P (operands[1]))
2475 optype1 = REGOP;
2476 else if (CONSTANT_P (operands[1]))
2477 optype1 = CNSTOP;
2478 else if (offsettable_memref_p (operands[1]))
2479 optype1 = OFFSOP;
2480 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
2481 optype1 = POPOP;
2482 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
2483 optype1 = PUSHOP;
2484 else if (GET_CODE (operands[1]) == MEM)
2485 optype1 = MEMOP;
2486 else
2487 optype1 = RNDOP;
2488
2489 /* Check for the cases that the operand constraints are not supposed
2490 to allow to happen. Generating code for these cases is
2491 painful. */
2492 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
2493
2494 /* If one operand is decrementing and one is incrementing
2495 decrement the former register explicitly
2496 and change that operand into ordinary indexing. */
2497
2498 if (optype0 == PUSHOP && optype1 == POPOP)
2499 {
2500 operands[0] = XEXP (XEXP (operands[0], 0), 0);
2501 if (size == 12)
2502 output_asm_insn ("sub%.l #12,%0", operands);
2503 else
2504 output_asm_insn ("subq%.l #8,%0", operands);
2505 if (GET_MODE (operands[1]) == XFmode)
2506 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2507 else if (GET_MODE (operands[0]) == DFmode)
2508 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2509 else
2510 operands[0] = gen_rtx_MEM (DImode, operands[0]);
2511 optype0 = OFFSOP;
2512 }
2513 if (optype0 == POPOP && optype1 == PUSHOP)
2514 {
2515 operands[1] = XEXP (XEXP (operands[1], 0), 0);
2516 if (size == 12)
2517 output_asm_insn ("sub%.l #12,%1", operands);
2518 else
2519 output_asm_insn ("subq%.l #8,%1", operands);
2520 if (GET_MODE (operands[1]) == XFmode)
2521 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2522 else if (GET_MODE (operands[1]) == DFmode)
2523 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2524 else
2525 operands[1] = gen_rtx_MEM (DImode, operands[1]);
2526 optype1 = OFFSOP;
2527 }
2528
2529 /* If an operand is an unoffsettable memory ref, find a register
2530 we can increment temporarily to make it refer to the second word. */
2531
2532 if (optype0 == MEMOP)
2533 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2534
2535 if (optype1 == MEMOP)
2536 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2537
2538 /* Ok, we can do one word at a time.
2539 Normally we do the low-numbered word first,
2540 but if either operand is autodecrementing then we
2541 do the high-numbered word first.
2542
2543 In either case, set up in LATEHALF the operands to use
2544 for the high-numbered word and in some cases alter the
2545 operands in OPERANDS to be suitable for the low-numbered word. */
2546
2547 if (size == 12)
2548 {
2549 if (optype0 == REGOP)
2550 {
2551 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
2552 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2553 }
2554 else if (optype0 == OFFSOP)
2555 {
2556 middlehalf[0] = adjust_address (operands[0], SImode, 4);
2557 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2558 }
2559 else
2560 {
2561 middlehalf[0] = operands[0];
2562 latehalf[0] = operands[0];
2563 }
2564
2565 if (optype1 == REGOP)
2566 {
2567 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
2568 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2569 }
2570 else if (optype1 == OFFSOP)
2571 {
2572 middlehalf[1] = adjust_address (operands[1], SImode, 4);
2573 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2574 }
2575 else if (optype1 == CNSTOP)
2576 {
2577 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2578 {
2579 REAL_VALUE_TYPE r;
2580 long l[3];
2581
2582 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
2583 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
2584 operands[1] = GEN_INT (l[0]);
2585 middlehalf[1] = GEN_INT (l[1]);
2586 latehalf[1] = GEN_INT (l[2]);
2587 }
2588 else
2589 {
2590 /* No non-CONST_DOUBLE constant should ever appear
2591 here. */
2592 gcc_assert (!CONSTANT_P (operands[1]));
2593 }
2594 }
2595 else
2596 {
2597 middlehalf[1] = operands[1];
2598 latehalf[1] = operands[1];
2599 }
2600 }
2601 else
2602 /* size is not 12: */
2603 {
2604 if (optype0 == REGOP)
2605 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2606 else if (optype0 == OFFSOP)
2607 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2608 else
2609 latehalf[0] = operands[0];
2610
2611 if (optype1 == REGOP)
2612 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2613 else if (optype1 == OFFSOP)
2614 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2615 else if (optype1 == CNSTOP)
2616 split_double (operands[1], &operands[1], &latehalf[1]);
2617 else
2618 latehalf[1] = operands[1];
2619 }
2620
2621 /* If insn is effectively movd N(sp),-(sp) then we will do the
2622 high word first. We should use the adjusted operand 1 (which is N+4(sp))
2623 for the low word as well, to compensate for the first decrement of sp. */
2624 if (optype0 == PUSHOP
2625 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
2626 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
2627 operands[1] = middlehalf[1] = latehalf[1];
2628
2629 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
2630 if the upper part of reg N does not appear in the MEM, arrange to
2631 emit the move late-half first. Otherwise, compute the MEM address
2632 into the upper part of N and use that as a pointer to the memory
2633 operand. */
2634 if (optype0 == REGOP
2635 && (optype1 == OFFSOP || optype1 == MEMOP))
2636 {
2637 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
2638
2639 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
2640 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
2641 {
2642 /* If both halves of dest are used in the src memory address,
2643 compute the address into latehalf of dest.
2644 Note that this can't happen if the dest is two data regs. */
2645 compadr:
2646 xops[0] = latehalf[0];
2647 xops[1] = XEXP (operands[1], 0);
2648 output_asm_insn ("lea %a1,%0", xops);
2649 if (GET_MODE (operands[1]) == XFmode )
2650 {
2651 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
2652 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
2653 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
2654 }
2655 else
2656 {
2657 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
2658 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
2659 }
2660 }
2661 else if (size == 12
2662 && reg_overlap_mentioned_p (middlehalf[0],
2663 XEXP (operands[1], 0)))
2664 {
2665 /* Check for two regs used by both source and dest.
2666 Note that this can't happen if the dest is all data regs.
2667 It can happen if the dest is d6, d7, a0.
2668 But in that case, latehalf is an addr reg, so
2669 the code at compadr does ok. */
2670
2671 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
2672 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
2673 goto compadr;
2674
2675 /* JRV says this can't happen: */
2676 gcc_assert (!addreg0 && !addreg1);
2677
2678 /* Only the middle reg conflicts; simply put it last. */
2679 output_asm_insn (singlemove_string (operands), operands);
2680 output_asm_insn (singlemove_string (latehalf), latehalf);
2681 output_asm_insn (singlemove_string (middlehalf), middlehalf);
2682 return "";
2683 }
2684 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
2685 /* If the low half of dest is mentioned in the source memory
2686 address, the arrange to emit the move late half first. */
2687 dest_overlapped_low = 1;
2688 }
2689
2690 /* If one or both operands autodecrementing,
2691 do the two words, high-numbered first. */
2692
2693 /* Likewise, the first move would clobber the source of the second one,
2694 do them in the other order. This happens only for registers;
2695 such overlap can't happen in memory unless the user explicitly
2696 sets it up, and that is an undefined circumstance. */
2697
2698 if (optype0 == PUSHOP || optype1 == PUSHOP
2699 || (optype0 == REGOP && optype1 == REGOP
2700 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
2701 || REGNO (operands[0]) == REGNO (latehalf[1])))
2702 || dest_overlapped_low)
2703 {
2704 /* Make any unoffsettable addresses point at high-numbered word. */
2705 if (addreg0)
2706 {
2707 if (size == 12)
2708 output_asm_insn ("addq%.l #8,%0", &addreg0);
2709 else
2710 output_asm_insn ("addq%.l #4,%0", &addreg0);
2711 }
2712 if (addreg1)
2713 {
2714 if (size == 12)
2715 output_asm_insn ("addq%.l #8,%0", &addreg1);
2716 else
2717 output_asm_insn ("addq%.l #4,%0", &addreg1);
2718 }
2719
2720 /* Do that word. */
2721 output_asm_insn (singlemove_string (latehalf), latehalf);
2722
2723 /* Undo the adds we just did. */
2724 if (addreg0)
2725 output_asm_insn ("subq%.l #4,%0", &addreg0);
2726 if (addreg1)
2727 output_asm_insn ("subq%.l #4,%0", &addreg1);
2728
2729 if (size == 12)
2730 {
2731 output_asm_insn (singlemove_string (middlehalf), middlehalf);
2732 if (addreg0)
2733 output_asm_insn ("subq%.l #4,%0", &addreg0);
2734 if (addreg1)
2735 output_asm_insn ("subq%.l #4,%0", &addreg1);
2736 }
2737
2738 /* Do low-numbered word. */
2739 return singlemove_string (operands);
2740 }
2741
2742 /* Normal case: do the two words, low-numbered first. */
2743
2744 output_asm_insn (singlemove_string (operands), operands);
2745
2746 /* Do the middle one of the three words for long double */
2747 if (size == 12)
2748 {
2749 if (addreg0)
2750 output_asm_insn ("addq%.l #4,%0", &addreg0);
2751 if (addreg1)
2752 output_asm_insn ("addq%.l #4,%0", &addreg1);
2753
2754 output_asm_insn (singlemove_string (middlehalf), middlehalf);
2755 }
2756
2757 /* Make any unoffsettable addresses point at high-numbered word. */
2758 if (addreg0)
2759 output_asm_insn ("addq%.l #4,%0", &addreg0);
2760 if (addreg1)
2761 output_asm_insn ("addq%.l #4,%0", &addreg1);
2762
2763 /* Do that word. */
2764 output_asm_insn (singlemove_string (latehalf), latehalf);
2765
2766 /* Undo the adds we just did. */
2767 if (addreg0)
2768 {
2769 if (size == 12)
2770 output_asm_insn ("subq%.l #8,%0", &addreg0);
2771 else
2772 output_asm_insn ("subq%.l #4,%0", &addreg0);
2773 }
2774 if (addreg1)
2775 {
2776 if (size == 12)
2777 output_asm_insn ("subq%.l #8,%0", &addreg1);
2778 else
2779 output_asm_insn ("subq%.l #4,%0", &addreg1);
2780 }
2781
2782 return "";
2783 }
2784
2785
2786 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
2787 new rtx with the correct mode. */
2788
2789 static rtx
2790 force_mode (enum machine_mode mode, rtx orig)
2791 {
2792 if (mode == GET_MODE (orig))
2793 return orig;
2794
2795 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
2796 abort ();
2797
2798 return gen_rtx_REG (mode, REGNO (orig));
2799 }
2800
2801 static int
2802 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2803 {
2804 return reg_renumber && FP_REG_P (op);
2805 }
2806
2807 /* Emit insns to move operands[1] into operands[0].
2808
2809 Return 1 if we have written out everything that needs to be done to
2810 do the move. Otherwise, return 0 and the caller will emit the move
2811 normally.
2812
2813 Note SCRATCH_REG may not be in the proper mode depending on how it
2814 will be used. This routine is responsible for creating a new copy
2815 of SCRATCH_REG in the proper mode. */
2816
2817 int
2818 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
2819 {
2820 register rtx operand0 = operands[0];
2821 register rtx operand1 = operands[1];
2822 register rtx tem;
2823
2824 if (scratch_reg
2825 && reload_in_progress && GET_CODE (operand0) == REG
2826 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2827 operand0 = reg_equiv_mem[REGNO (operand0)];
2828 else if (scratch_reg
2829 && reload_in_progress && GET_CODE (operand0) == SUBREG
2830 && GET_CODE (SUBREG_REG (operand0)) == REG
2831 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
2832 {
2833 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
2834 the code which tracks sets/uses for delete_output_reload. */
2835 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
2836 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
2837 SUBREG_BYTE (operand0));
2838 operand0 = alter_subreg (&temp);
2839 }
2840
2841 if (scratch_reg
2842 && reload_in_progress && GET_CODE (operand1) == REG
2843 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
2844 operand1 = reg_equiv_mem[REGNO (operand1)];
2845 else if (scratch_reg
2846 && reload_in_progress && GET_CODE (operand1) == SUBREG
2847 && GET_CODE (SUBREG_REG (operand1)) == REG
2848 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
2849 {
2850 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
2851 the code which tracks sets/uses for delete_output_reload. */
2852 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
2853 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
2854 SUBREG_BYTE (operand1));
2855 operand1 = alter_subreg (&temp);
2856 }
2857
2858 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
2859 && ((tem = find_replacement (&XEXP (operand0, 0)))
2860 != XEXP (operand0, 0)))
2861 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
2862 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
2863 && ((tem = find_replacement (&XEXP (operand1, 0)))
2864 != XEXP (operand1, 0)))
2865 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
2866
2867 /* Handle secondary reloads for loads/stores of FP registers where
2868 the address is symbolic by using the scratch register */
2869 if (fp_reg_operand (operand0, mode)
2870 && ((GET_CODE (operand1) == MEM
2871 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
2872 || ((GET_CODE (operand1) == SUBREG
2873 && GET_CODE (XEXP (operand1, 0)) == MEM
2874 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
2875 && scratch_reg)
2876 {
2877 if (GET_CODE (operand1) == SUBREG)
2878 operand1 = XEXP (operand1, 0);
2879
2880 /* SCRATCH_REG will hold an address. We want
2881 it in SImode regardless of what mode it was originally given
2882 to us. */
2883 scratch_reg = force_mode (SImode, scratch_reg);
2884
2885 /* D might not fit in 14 bits either; for such cases load D into
2886 scratch reg. */
2887 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
2888 {
2889 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
2890 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
2891 Pmode,
2892 XEXP (XEXP (operand1, 0), 0),
2893 scratch_reg));
2894 }
2895 else
2896 emit_move_insn (scratch_reg, XEXP (operand1, 0));
2897 emit_insn (gen_rtx_SET (VOIDmode, operand0,
2898 gen_rtx_MEM (mode, scratch_reg)));
2899 return 1;
2900 }
2901 else if (fp_reg_operand (operand1, mode)
2902 && ((GET_CODE (operand0) == MEM
2903 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
2904 || ((GET_CODE (operand0) == SUBREG)
2905 && GET_CODE (XEXP (operand0, 0)) == MEM
2906 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
2907 && scratch_reg)
2908 {
2909 if (GET_CODE (operand0) == SUBREG)
2910 operand0 = XEXP (operand0, 0);
2911
2912 /* SCRATCH_REG will hold an address and maybe the actual data. We want
2913 it in SIMODE regardless of what mode it was originally given
2914 to us. */
2915 scratch_reg = force_mode (SImode, scratch_reg);
2916
2917 /* D might not fit in 14 bits either; for such cases load D into
2918 scratch reg. */
2919 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
2920 {
2921 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
2922 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
2923 0)),
2924 Pmode,
2925 XEXP (XEXP (operand0, 0),
2926 0),
2927 scratch_reg));
2928 }
2929 else
2930 emit_move_insn (scratch_reg, XEXP (operand0, 0));
2931 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
2932 operand1));
2933 return 1;
2934 }
2935 /* Handle secondary reloads for loads of FP registers from constant
2936 expressions by forcing the constant into memory.
2937
2938 use scratch_reg to hold the address of the memory location.
2939
2940 The proper fix is to change PREFERRED_RELOAD_CLASS to return
2941 NO_REGS when presented with a const_int and an register class
2942 containing only FP registers. Doing so unfortunately creates
2943 more problems than it solves. Fix this for 2.5. */
2944 else if (fp_reg_operand (operand0, mode)
2945 && CONSTANT_P (operand1)
2946 && scratch_reg)
2947 {
2948 rtx xoperands[2];
2949
2950 /* SCRATCH_REG will hold an address and maybe the actual data. We want
2951 it in SIMODE regardless of what mode it was originally given
2952 to us. */
2953 scratch_reg = force_mode (SImode, scratch_reg);
2954
2955 /* Force the constant into memory and put the address of the
2956 memory location into scratch_reg. */
2957 xoperands[0] = scratch_reg;
2958 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
2959 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
2960
2961 /* Now load the destination register. */
2962 emit_insn (gen_rtx_SET (mode, operand0,
2963 gen_rtx_MEM (mode, scratch_reg)));
2964 return 1;
2965 }
2966
2967 /* Now have insn-emit do whatever it normally does. */
2968 return 0;
2969 }
2970
2971 /* Split one or more DImode RTL references into pairs of SImode
2972 references. The RTL can be REG, offsettable MEM, integer constant, or
2973 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
2974 split and "num" is its length. lo_half and hi_half are output arrays
2975 that parallel "operands". */
2976
2977 void
2978 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
2979 {
2980 while (num--)
2981 {
2982 rtx op = operands[num];
2983
2984 /* simplify_subreg refuses to split volatile memory addresses,
2985 but we still have to handle it. */
2986 if (GET_CODE (op) == MEM)
2987 {
2988 lo_half[num] = adjust_address (op, SImode, 4);
2989 hi_half[num] = adjust_address (op, SImode, 0);
2990 }
2991 else
2992 {
2993 lo_half[num] = simplify_gen_subreg (SImode, op,
2994 GET_MODE (op) == VOIDmode
2995 ? DImode : GET_MODE (op), 4);
2996 hi_half[num] = simplify_gen_subreg (SImode, op,
2997 GET_MODE (op) == VOIDmode
2998 ? DImode : GET_MODE (op), 0);
2999 }
3000 }
3001 }
3002
3003 /* Split X into a base and a constant offset, storing them in *BASE
3004 and *OFFSET respectively. */
3005
3006 static void
3007 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3008 {
3009 *offset = 0;
3010 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3011 {
3012 *offset += INTVAL (XEXP (x, 1));
3013 x = XEXP (x, 0);
3014 }
3015 *base = x;
3016 }
3017
3018 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3019 instruction. STORE_P says whether the move is a load or store.
3020
3021 If the instruction uses post-increment or pre-decrement addressing,
3022 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3023 adjustment. This adjustment will be made by the first element of
3024 PARALLEL, with the loads or stores starting at element 1. If the
3025 instruction does not use post-increment or pre-decrement addressing,
3026 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3027 start at element 0. */
3028
3029 bool
3030 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3031 HOST_WIDE_INT automod_offset, bool store_p)
3032 {
3033 rtx base, mem_base, set, mem, reg, last_reg;
3034 HOST_WIDE_INT offset, mem_offset;
3035 int i, first, len;
3036 enum reg_class rclass;
3037
3038 len = XVECLEN (pattern, 0);
3039 first = (automod_base != NULL);
3040
3041 if (automod_base)
3042 {
3043 /* Stores must be pre-decrement and loads must be post-increment. */
3044 if (store_p != (automod_offset < 0))
3045 return false;
3046
3047 /* Work out the base and offset for lowest memory location. */
3048 base = automod_base;
3049 offset = (automod_offset < 0 ? automod_offset : 0);
3050 }
3051 else
3052 {
3053 /* Allow any valid base and offset in the first access. */
3054 base = NULL;
3055 offset = 0;
3056 }
3057
3058 last_reg = NULL;
3059 rclass = NO_REGS;
3060 for (i = first; i < len; i++)
3061 {
3062 /* We need a plain SET. */
3063 set = XVECEXP (pattern, 0, i);
3064 if (GET_CODE (set) != SET)
3065 return false;
3066
3067 /* Check that we have a memory location... */
3068 mem = XEXP (set, !store_p);
3069 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3070 return false;
3071
3072 /* ...with the right address. */
3073 if (base == NULL)
3074 {
3075 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3076 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3077 There are no mode restrictions for 680x0 besides the
3078 automodification rules enforced above. */
3079 if (TARGET_COLDFIRE
3080 && !m68k_legitimate_base_reg_p (base, reload_completed))
3081 return false;
3082 }
3083 else
3084 {
3085 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3086 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3087 return false;
3088 }
3089
3090 /* Check that we have a register of the required mode and class. */
3091 reg = XEXP (set, store_p);
3092 if (!REG_P (reg)
3093 || !HARD_REGISTER_P (reg)
3094 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3095 return false;
3096
3097 if (last_reg)
3098 {
3099 /* The register must belong to RCLASS and have a higher number
3100 than the register in the previous SET. */
3101 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3102 || REGNO (last_reg) >= REGNO (reg))
3103 return false;
3104 }
3105 else
3106 {
3107 /* Work out which register class we need. */
3108 if (INT_REGNO_P (REGNO (reg)))
3109 rclass = GENERAL_REGS;
3110 else if (FP_REGNO_P (REGNO (reg)))
3111 rclass = FP_REGS;
3112 else
3113 return false;
3114 }
3115
3116 last_reg = reg;
3117 offset += GET_MODE_SIZE (GET_MODE (reg));
3118 }
3119
3120 /* If we have an automodification, check whether the final offset is OK. */
3121 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3122 return false;
3123
3124 /* Reject unprofitable cases. */
3125 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3126 return false;
3127
3128 return true;
3129 }
3130
3131 /* Return the assembly code template for a movem or fmovem instruction
3132 whose pattern is given by PATTERN. Store the template's operands
3133 in OPERANDS.
3134
3135 If the instruction uses post-increment or pre-decrement addressing,
3136 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3137 is true if this is a store instruction. */
3138
3139 const char *
3140 m68k_output_movem (rtx *operands, rtx pattern,
3141 HOST_WIDE_INT automod_offset, bool store_p)
3142 {
3143 unsigned int mask;
3144 int i, first;
3145
3146 gcc_assert (GET_CODE (pattern) == PARALLEL);
3147 mask = 0;
3148 first = (automod_offset != 0);
3149 for (i = first; i < XVECLEN (pattern, 0); i++)
3150 {
3151 /* When using movem with pre-decrement addressing, register X + D0_REG
3152 is controlled by bit 15 - X. For all other addressing modes,
3153 register X + D0_REG is controlled by bit X. Confusingly, the
3154 register mask for fmovem is in the opposite order to that for
3155 movem. */
3156 unsigned int regno;
3157
3158 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3159 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3160 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3161 if (automod_offset < 0)
3162 {
3163 if (FP_REGNO_P (regno))
3164 mask |= 1 << (regno - FP0_REG);
3165 else
3166 mask |= 1 << (15 - (regno - D0_REG));
3167 }
3168 else
3169 {
3170 if (FP_REGNO_P (regno))
3171 mask |= 1 << (7 - (regno - FP0_REG));
3172 else
3173 mask |= 1 << (regno - D0_REG);
3174 }
3175 }
3176 CC_STATUS_INIT;
3177
3178 if (automod_offset == 0)
3179 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
3180 else if (automod_offset < 0)
3181 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3182 else
3183 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3184 operands[1] = GEN_INT (mask);
3185 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
3186 {
3187 if (store_p)
3188 return MOTOROLA ? "fmovm %1,%a0" : "fmovem %1,%a0";
3189 else
3190 return MOTOROLA ? "fmovm %a0,%1" : "fmovem %a0,%1";
3191 }
3192 else
3193 {
3194 if (store_p)
3195 return MOTOROLA ? "movm.l %1,%a0" : "moveml %1,%a0";
3196 else
3197 return MOTOROLA ? "movm.l %a0,%1" : "moveml %a0,%1";
3198 }
3199 }
3200
3201 /* Return a REG that occurs in ADDR with coefficient 1.
3202 ADDR can be effectively incremented by incrementing REG. */
3203
3204 static rtx
3205 find_addr_reg (rtx addr)
3206 {
3207 while (GET_CODE (addr) == PLUS)
3208 {
3209 if (GET_CODE (XEXP (addr, 0)) == REG)
3210 addr = XEXP (addr, 0);
3211 else if (GET_CODE (XEXP (addr, 1)) == REG)
3212 addr = XEXP (addr, 1);
3213 else if (CONSTANT_P (XEXP (addr, 0)))
3214 addr = XEXP (addr, 1);
3215 else if (CONSTANT_P (XEXP (addr, 1)))
3216 addr = XEXP (addr, 0);
3217 else
3218 gcc_unreachable ();
3219 }
3220 gcc_assert (GET_CODE (addr) == REG);
3221 return addr;
3222 }
3223
3224 /* Output assembler code to perform a 32-bit 3-operand add. */
3225
3226 const char *
3227 output_addsi3 (rtx *operands)
3228 {
3229 if (! operands_match_p (operands[0], operands[1]))
3230 {
3231 if (!ADDRESS_REG_P (operands[1]))
3232 {
3233 rtx tmp = operands[1];
3234
3235 operands[1] = operands[2];
3236 operands[2] = tmp;
3237 }
3238
3239 /* These insns can result from reloads to access
3240 stack slots over 64k from the frame pointer. */
3241 if (GET_CODE (operands[2]) == CONST_INT
3242 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
3243 return "move%.l %2,%0\n\tadd%.l %1,%0";
3244 if (GET_CODE (operands[2]) == REG)
3245 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
3246 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
3247 }
3248 if (GET_CODE (operands[2]) == CONST_INT)
3249 {
3250 if (INTVAL (operands[2]) > 0
3251 && INTVAL (operands[2]) <= 8)
3252 return "addq%.l %2,%0";
3253 if (INTVAL (operands[2]) < 0
3254 && INTVAL (operands[2]) >= -8)
3255 {
3256 operands[2] = GEN_INT (- INTVAL (operands[2]));
3257 return "subq%.l %2,%0";
3258 }
3259 /* On the CPU32 it is faster to use two addql instructions to
3260 add a small integer (8 < N <= 16) to a register.
3261 Likewise for subql. */
3262 if (TUNE_CPU32 && REG_P (operands[0]))
3263 {
3264 if (INTVAL (operands[2]) > 8
3265 && INTVAL (operands[2]) <= 16)
3266 {
3267 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3268 return "addq%.l #8,%0\n\taddq%.l %2,%0";
3269 }
3270 if (INTVAL (operands[2]) < -8
3271 && INTVAL (operands[2]) >= -16)
3272 {
3273 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3274 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
3275 }
3276 }
3277 if (ADDRESS_REG_P (operands[0])
3278 && INTVAL (operands[2]) >= -0x8000
3279 && INTVAL (operands[2]) < 0x8000)
3280 {
3281 if (TUNE_68040)
3282 return "add%.w %2,%0";
3283 else
3284 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
3285 }
3286 }
3287 return "add%.l %2,%0";
3288 }
3289 \f
3290 /* Store in cc_status the expressions that the condition codes will
3291 describe after execution of an instruction whose pattern is EXP.
3292 Do not alter them if the instruction would not alter the cc's. */
3293
3294 /* On the 68000, all the insns to store in an address register fail to
3295 set the cc's. However, in some cases these instructions can make it
3296 possibly invalid to use the saved cc's. In those cases we clear out
3297 some or all of the saved cc's so they won't be used. */
3298
3299 void
3300 notice_update_cc (rtx exp, rtx insn)
3301 {
3302 if (GET_CODE (exp) == SET)
3303 {
3304 if (GET_CODE (SET_SRC (exp)) == CALL)
3305 CC_STATUS_INIT;
3306 else if (ADDRESS_REG_P (SET_DEST (exp)))
3307 {
3308 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
3309 cc_status.value1 = 0;
3310 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
3311 cc_status.value2 = 0;
3312 }
3313 /* fmoves to memory or data registers do not set the condition
3314 codes. Normal moves _do_ set the condition codes, but not in
3315 a way that is appropriate for comparison with 0, because -0.0
3316 would be treated as a negative nonzero number. Note that it
3317 isn't appropriate to conditionalize this restriction on
3318 HONOR_SIGNED_ZEROS because that macro merely indicates whether
3319 we care about the difference between -0.0 and +0.0. */
3320 else if (!FP_REG_P (SET_DEST (exp))
3321 && SET_DEST (exp) != cc0_rtx
3322 && (FP_REG_P (SET_SRC (exp))
3323 || GET_CODE (SET_SRC (exp)) == FIX
3324 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
3325 CC_STATUS_INIT;
3326 /* A pair of move insns doesn't produce a useful overall cc. */
3327 else if (!FP_REG_P (SET_DEST (exp))
3328 && !FP_REG_P (SET_SRC (exp))
3329 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
3330 && (GET_CODE (SET_SRC (exp)) == REG
3331 || GET_CODE (SET_SRC (exp)) == MEM
3332 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
3333 CC_STATUS_INIT;
3334 else if (SET_DEST (exp) != pc_rtx)
3335 {
3336 cc_status.flags = 0;
3337 cc_status.value1 = SET_DEST (exp);
3338 cc_status.value2 = SET_SRC (exp);
3339 }
3340 }
3341 else if (GET_CODE (exp) == PARALLEL
3342 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
3343 {
3344 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
3345 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
3346
3347 if (ADDRESS_REG_P (dest))
3348 CC_STATUS_INIT;
3349 else if (dest != pc_rtx)
3350 {
3351 cc_status.flags = 0;
3352 cc_status.value1 = dest;
3353 cc_status.value2 = src;
3354 }
3355 }
3356 else
3357 CC_STATUS_INIT;
3358 if (cc_status.value2 != 0
3359 && ADDRESS_REG_P (cc_status.value2)
3360 && GET_MODE (cc_status.value2) == QImode)
3361 CC_STATUS_INIT;
3362 if (cc_status.value2 != 0)
3363 switch (GET_CODE (cc_status.value2))
3364 {
3365 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
3366 case ROTATE: case ROTATERT:
3367 /* These instructions always clear the overflow bit, and set
3368 the carry to the bit shifted out. */
3369 /* ??? We don't currently have a way to signal carry not valid,
3370 nor do we check for it in the branch insns. */
3371 CC_STATUS_INIT;
3372 break;
3373
3374 case PLUS: case MINUS: case MULT:
3375 case DIV: case UDIV: case MOD: case UMOD: case NEG:
3376 if (GET_MODE (cc_status.value2) != VOIDmode)
3377 cc_status.flags |= CC_NO_OVERFLOW;
3378 break;
3379 case ZERO_EXTEND:
3380 /* (SET r1 (ZERO_EXTEND r2)) on this machine
3381 ends with a move insn moving r2 in r2's mode.
3382 Thus, the cc's are set for r2.
3383 This can set N bit spuriously. */
3384 cc_status.flags |= CC_NOT_NEGATIVE;
3385
3386 default:
3387 break;
3388 }
3389 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
3390 && cc_status.value2
3391 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
3392 cc_status.value2 = 0;
3393 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
3394 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
3395 cc_status.flags = CC_IN_68881;
3396 }
3397 \f
3398 const char *
3399 output_move_const_double (rtx *operands)
3400 {
3401 int code = standard_68881_constant_p (operands[1]);
3402
3403 if (code != 0)
3404 {
3405 static char buf[40];
3406
3407 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
3408 return buf;
3409 }
3410 return "fmove%.d %1,%0";
3411 }
3412
3413 const char *
3414 output_move_const_single (rtx *operands)
3415 {
3416 int code = standard_68881_constant_p (operands[1]);
3417
3418 if (code != 0)
3419 {
3420 static char buf[40];
3421
3422 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
3423 return buf;
3424 }
3425 return "fmove%.s %f1,%0";
3426 }
3427
3428 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
3429 from the "fmovecr" instruction.
3430 The value, anded with 0xff, gives the code to use in fmovecr
3431 to get the desired constant. */
3432
3433 /* This code has been fixed for cross-compilation. */
3434
3435 static int inited_68881_table = 0;
3436
3437 static const char *const strings_68881[7] = {
3438 "0.0",
3439 "1.0",
3440 "10.0",
3441 "100.0",
3442 "10000.0",
3443 "1e8",
3444 "1e16"
3445 };
3446
3447 static const int codes_68881[7] = {
3448 0x0f,
3449 0x32,
3450 0x33,
3451 0x34,
3452 0x35,
3453 0x36,
3454 0x37
3455 };
3456
3457 REAL_VALUE_TYPE values_68881[7];
3458
3459 /* Set up values_68881 array by converting the decimal values
3460 strings_68881 to binary. */
3461
3462 void
3463 init_68881_table (void)
3464 {
3465 int i;
3466 REAL_VALUE_TYPE r;
3467 enum machine_mode mode;
3468
3469 mode = SFmode;
3470 for (i = 0; i < 7; i++)
3471 {
3472 if (i == 6)
3473 mode = DFmode;
3474 r = REAL_VALUE_ATOF (strings_68881[i], mode);
3475 values_68881[i] = r;
3476 }
3477 inited_68881_table = 1;
3478 }
3479
3480 int
3481 standard_68881_constant_p (rtx x)
3482 {
3483 REAL_VALUE_TYPE r;
3484 int i;
3485
3486 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
3487 used at all on those chips. */
3488 if (TUNE_68040_60)
3489 return 0;
3490
3491 if (! inited_68881_table)
3492 init_68881_table ();
3493
3494 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3495
3496 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
3497 is rejected. */
3498 for (i = 0; i < 6; i++)
3499 {
3500 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
3501 return (codes_68881[i]);
3502 }
3503
3504 if (GET_MODE (x) == SFmode)
3505 return 0;
3506
3507 if (REAL_VALUES_EQUAL (r, values_68881[6]))
3508 return (codes_68881[6]);
3509
3510 /* larger powers of ten in the constants ram are not used
3511 because they are not equal to a `double' C constant. */
3512 return 0;
3513 }
3514
3515 /* If X is a floating-point constant, return the logarithm of X base 2,
3516 or 0 if X is not a power of 2. */
3517
3518 int
3519 floating_exact_log2 (rtx x)
3520 {
3521 REAL_VALUE_TYPE r, r1;
3522 int exp;
3523
3524 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3525
3526 if (REAL_VALUES_LESS (r, dconst1))
3527 return 0;
3528
3529 exp = real_exponent (&r);
3530 real_2expN (&r1, exp);
3531 if (REAL_VALUES_EQUAL (r1, r))
3532 return exp;
3533
3534 return 0;
3535 }
3536 \f
3537 /* A C compound statement to output to stdio stream STREAM the
3538 assembler syntax for an instruction operand X. X is an RTL
3539 expression.
3540
3541 CODE is a value that can be used to specify one of several ways
3542 of printing the operand. It is used when identical operands
3543 must be printed differently depending on the context. CODE
3544 comes from the `%' specification that was used to request
3545 printing of the operand. If the specification was just `%DIGIT'
3546 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
3547 is the ASCII code for LTR.
3548
3549 If X is a register, this macro should print the register's name.
3550 The names can be found in an array `reg_names' whose type is
3551 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
3552
3553 When the machine description has a specification `%PUNCT' (a `%'
3554 followed by a punctuation character), this macro is called with
3555 a null pointer for X and the punctuation character for CODE.
3556
3557 The m68k specific codes are:
3558
3559 '.' for dot needed in Motorola-style opcode names.
3560 '-' for an operand pushing on the stack:
3561 sp@-, -(sp) or -(%sp) depending on the style of syntax.
3562 '+' for an operand pushing on the stack:
3563 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
3564 '@' for a reference to the top word on the stack:
3565 sp@, (sp) or (%sp) depending on the style of syntax.
3566 '#' for an immediate operand prefix (# in MIT and Motorola syntax
3567 but & in SGS syntax).
3568 '!' for the cc register (used in an `and to cc' insn).
3569 '$' for the letter `s' in an op code, but only on the 68040.
3570 '&' for the letter `d' in an op code, but only on the 68040.
3571 '/' for register prefix needed by longlong.h.
3572 '?' for m68k_library_id_string
3573
3574 'b' for byte insn (no effect, on the Sun; this is for the ISI).
3575 'd' to force memory addressing to be absolute, not relative.
3576 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
3577 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
3578 or print pair of registers as rx:ry.
3579 'p' print an address with @PLTPC attached, but only if the operand
3580 is not locally-bound. */
3581
3582 void
3583 print_operand (FILE *file, rtx op, int letter)
3584 {
3585 if (letter == '.')
3586 {
3587 if (MOTOROLA)
3588 fprintf (file, ".");
3589 }
3590 else if (letter == '#')
3591 asm_fprintf (file, "%I");
3592 else if (letter == '-')
3593 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
3594 else if (letter == '+')
3595 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
3596 else if (letter == '@')
3597 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
3598 else if (letter == '!')
3599 asm_fprintf (file, "%Rfpcr");
3600 else if (letter == '$')
3601 {
3602 if (TARGET_68040)
3603 fprintf (file, "s");
3604 }
3605 else if (letter == '&')
3606 {
3607 if (TARGET_68040)
3608 fprintf (file, "d");
3609 }
3610 else if (letter == '/')
3611 asm_fprintf (file, "%R");
3612 else if (letter == '?')
3613 asm_fprintf (file, m68k_library_id_string);
3614 else if (letter == 'p')
3615 {
3616 output_addr_const (file, op);
3617 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
3618 fprintf (file, "@PLTPC");
3619 }
3620 else if (GET_CODE (op) == REG)
3621 {
3622 if (letter == 'R')
3623 /* Print out the second register name of a register pair.
3624 I.e., R (6) => 7. */
3625 fputs (M68K_REGNAME(REGNO (op) + 1), file);
3626 else
3627 fputs (M68K_REGNAME(REGNO (op)), file);
3628 }
3629 else if (GET_CODE (op) == MEM)
3630 {
3631 output_address (XEXP (op, 0));
3632 if (letter == 'd' && ! TARGET_68020
3633 && CONSTANT_ADDRESS_P (XEXP (op, 0))
3634 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
3635 && INTVAL (XEXP (op, 0)) < 0x8000
3636 && INTVAL (XEXP (op, 0)) >= -0x8000))
3637 fprintf (file, MOTOROLA ? ".l" : ":l");
3638 }
3639 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
3640 {
3641 REAL_VALUE_TYPE r;
3642 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3643 ASM_OUTPUT_FLOAT_OPERAND (letter, file, r);
3644 }
3645 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
3646 {
3647 REAL_VALUE_TYPE r;
3648 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3649 ASM_OUTPUT_LONG_DOUBLE_OPERAND (file, r);
3650 }
3651 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
3652 {
3653 REAL_VALUE_TYPE r;
3654 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3655 ASM_OUTPUT_DOUBLE_OPERAND (file, r);
3656 }
3657 else
3658 {
3659 /* Use `print_operand_address' instead of `output_addr_const'
3660 to ensure that we print relevant PIC stuff. */
3661 asm_fprintf (file, "%I");
3662 if (TARGET_PCREL
3663 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
3664 print_operand_address (file, op);
3665 else
3666 output_addr_const (file, op);
3667 }
3668 }
3669
3670 \f
3671 /* A C compound statement to output to stdio stream STREAM the
3672 assembler syntax for an instruction operand that is a memory
3673 reference whose address is ADDR. ADDR is an RTL expression.
3674
3675 Note that this contains a kludge that knows that the only reason
3676 we have an address (plus (label_ref...) (reg...)) when not generating
3677 PIC code is in the insn before a tablejump, and we know that m68k.md
3678 generates a label LInnn: on such an insn.
3679
3680 It is possible for PIC to generate a (plus (label_ref...) (reg...))
3681 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
3682
3683 Some SGS assemblers have a bug such that "Lnnn-LInnn-2.b(pc,d0.l*2)"
3684 fails to assemble. Luckily "Lnnn(pc,d0.l*2)" produces the results
3685 we want. This difference can be accommodated by using an assembler
3686 define such "LDnnn" to be either "Lnnn-LInnn-2.b", "Lnnn", or any other
3687 string, as necessary. This is accomplished via the ASM_OUTPUT_CASE_END
3688 macro. See m68k/sgs.h for an example; for versions without the bug.
3689 Some assemblers refuse all the above solutions. The workaround is to
3690 emit "K(pc,d0.l*2)" with K being a small constant known to give the
3691 right behavior.
3692
3693 They also do not like things like "pea 1.w", so we simple leave off
3694 the .w on small constants.
3695
3696 This routine is responsible for distinguishing between -fpic and -fPIC
3697 style relocations in an address. When generating -fpic code the
3698 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
3699 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
3700
3701 void
3702 print_operand_address (FILE *file, rtx addr)
3703 {
3704 struct m68k_address address;
3705
3706 if (!m68k_decompose_address (QImode, addr, true, &address))
3707 gcc_unreachable ();
3708
3709 if (address.code == PRE_DEC)
3710 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
3711 M68K_REGNAME (REGNO (address.base)));
3712 else if (address.code == POST_INC)
3713 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
3714 M68K_REGNAME (REGNO (address.base)));
3715 else if (!address.base && !address.index)
3716 {
3717 /* A constant address. */
3718 gcc_assert (address.offset == addr);
3719 if (GET_CODE (addr) == CONST_INT)
3720 {
3721 /* (xxx).w or (xxx).l. */
3722 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
3723 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
3724 else
3725 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
3726 }
3727 else if (TARGET_PCREL)
3728 {
3729 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
3730 fputc ('(', file);
3731 output_addr_const (file, addr);
3732 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
3733 }
3734 else
3735 {
3736 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
3737 name ends in `.<letter>', as the last 2 characters can be
3738 mistaken as a size suffix. Put the name in parentheses. */
3739 if (GET_CODE (addr) == SYMBOL_REF
3740 && strlen (XSTR (addr, 0)) > 2
3741 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
3742 {
3743 putc ('(', file);
3744 output_addr_const (file, addr);
3745 putc (')', file);
3746 }
3747 else
3748 output_addr_const (file, addr);
3749 }
3750 }
3751 else
3752 {
3753 int labelno;
3754
3755 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
3756 label being accessed, otherwise it is -1. */
3757 labelno = (address.offset
3758 && !address.base
3759 && GET_CODE (address.offset) == LABEL_REF
3760 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
3761 : -1);
3762 if (MOTOROLA)
3763 {
3764 /* Print the "offset(base" component. */
3765 if (labelno >= 0)
3766 asm_fprintf (file, "%LL%d-%LLI%d.b(%Rpc,", labelno, labelno);
3767 else
3768 {
3769 if (address.offset)
3770 {
3771 output_addr_const (file, address.offset);
3772 if (flag_pic && address.base == pic_offset_table_rtx)
3773 {
3774 fprintf (file, "@GOT");
3775 if (flag_pic == 1 && TARGET_68020)
3776 fprintf (file, ".w");
3777 }
3778 }
3779 putc ('(', file);
3780 if (address.base)
3781 fputs (M68K_REGNAME (REGNO (address.base)), file);
3782 }
3783 /* Print the ",index" component, if any. */
3784 if (address.index)
3785 {
3786 if (address.base)
3787 putc (',', file);
3788 fprintf (file, "%s.%c",
3789 M68K_REGNAME (REGNO (address.index)),
3790 GET_MODE (address.index) == HImode ? 'w' : 'l');
3791 if (address.scale != 1)
3792 fprintf (file, "*%d", address.scale);
3793 }
3794 putc (')', file);
3795 }
3796 else /* !MOTOROLA */
3797 {
3798 if (!address.offset && !address.index)
3799 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
3800 else
3801 {
3802 /* Print the "base@(offset" component. */
3803 if (labelno >= 0)
3804 asm_fprintf (file, "%Rpc@(%LL%d-%LLI%d-2:b", labelno, labelno);
3805 else
3806 {
3807 if (address.base)
3808 fputs (M68K_REGNAME (REGNO (address.base)), file);
3809 fprintf (file, "@(");
3810 if (address.offset)
3811 {
3812 output_addr_const (file, address.offset);
3813 if (address.base == pic_offset_table_rtx && TARGET_68020)
3814 switch (flag_pic)
3815 {
3816 case 1:
3817 fprintf (file, ":w"); break;
3818 case 2:
3819 fprintf (file, ":l"); break;
3820 default:
3821 break;
3822 }
3823 }
3824 }
3825 /* Print the ",index" component, if any. */
3826 if (address.index)
3827 {
3828 fprintf (file, ",%s:%c",
3829 M68K_REGNAME (REGNO (address.index)),
3830 GET_MODE (address.index) == HImode ? 'w' : 'l');
3831 if (address.scale != 1)
3832 fprintf (file, ":%d", address.scale);
3833 }
3834 putc (')', file);
3835 }
3836 }
3837 }
3838 }
3839 \f
3840 /* Check for cases where a clr insns can be omitted from code using
3841 strict_low_part sets. For example, the second clrl here is not needed:
3842 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
3843
3844 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
3845 insn we are checking for redundancy. TARGET is the register set by the
3846 clear insn. */
3847
3848 bool
3849 strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
3850 rtx target)
3851 {
3852 rtx p;
3853
3854 p = prev_nonnote_insn (first_insn);
3855
3856 while (p)
3857 {
3858 /* If it isn't an insn, then give up. */
3859 if (GET_CODE (p) != INSN)
3860 return false;
3861
3862 if (reg_set_p (target, p))
3863 {
3864 rtx set = single_set (p);
3865 rtx dest;
3866
3867 /* If it isn't an easy to recognize insn, then give up. */
3868 if (! set)
3869 return false;
3870
3871 dest = SET_DEST (set);
3872
3873 /* If this sets the entire target register to zero, then our
3874 first_insn is redundant. */
3875 if (rtx_equal_p (dest, target)
3876 && SET_SRC (set) == const0_rtx)
3877 return true;
3878 else if (GET_CODE (dest) == STRICT_LOW_PART
3879 && GET_CODE (XEXP (dest, 0)) == REG
3880 && REGNO (XEXP (dest, 0)) == REGNO (target)
3881 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
3882 <= GET_MODE_SIZE (mode)))
3883 /* This is a strict low part set which modifies less than
3884 we are using, so it is safe. */
3885 ;
3886 else
3887 return false;
3888 }
3889
3890 p = prev_nonnote_insn (p);
3891 }
3892
3893 return false;
3894 }
3895
3896 /* Operand predicates for implementing asymmetric pc-relative addressing
3897 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
3898 when used as a source operand, but not as a destination operand.
3899
3900 We model this by restricting the meaning of the basic predicates
3901 (general_operand, memory_operand, etc) to forbid the use of this
3902 addressing mode, and then define the following predicates that permit
3903 this addressing mode. These predicates can then be used for the
3904 source operands of the appropriate instructions.
3905
3906 n.b. While it is theoretically possible to change all machine patterns
3907 to use this addressing more where permitted by the architecture,
3908 it has only been implemented for "common" cases: SImode, HImode, and
3909 QImode operands, and only for the principle operations that would
3910 require this addressing mode: data movement and simple integer operations.
3911
3912 In parallel with these new predicates, two new constraint letters
3913 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
3914 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
3915 In the pcrel case 's' is only valid in combination with 'a' registers.
3916 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
3917 of how these constraints are used.
3918
3919 The use of these predicates is strictly optional, though patterns that
3920 don't will cause an extra reload register to be allocated where one
3921 was not necessary:
3922
3923 lea (abc:w,%pc),%a0 ; need to reload address
3924 moveq &1,%d1 ; since write to pc-relative space
3925 movel %d1,%a0@ ; is not allowed
3926 ...
3927 lea (abc:w,%pc),%a1 ; no need to reload address here
3928 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
3929
3930 For more info, consult tiemann@cygnus.com.
3931
3932
3933 All of the ugliness with predicates and constraints is due to the
3934 simple fact that the m68k does not allow a pc-relative addressing
3935 mode as a destination. gcc does not distinguish between source and
3936 destination addresses. Hence, if we claim that pc-relative address
3937 modes are valid, e.g. GO_IF_LEGITIMATE_ADDRESS accepts them, then we
3938 end up with invalid code. To get around this problem, we left
3939 pc-relative modes as invalid addresses, and then added special
3940 predicates and constraints to accept them.
3941
3942 A cleaner way to handle this is to modify gcc to distinguish
3943 between source and destination addresses. We can then say that
3944 pc-relative is a valid source address but not a valid destination
3945 address, and hopefully avoid a lot of the predicate and constraint
3946 hackery. Unfortunately, this would be a pretty big change. It would
3947 be a useful change for a number of ports, but there aren't any current
3948 plans to undertake this.
3949
3950 ***************************************************************************/
3951
3952
3953 const char *
3954 output_andsi3 (rtx *operands)
3955 {
3956 int logval;
3957 if (GET_CODE (operands[2]) == CONST_INT
3958 && (INTVAL (operands[2]) | 0xffff) == -1
3959 && (DATA_REG_P (operands[0])
3960 || offsettable_memref_p (operands[0]))
3961 && !TARGET_COLDFIRE)
3962 {
3963 if (GET_CODE (operands[0]) != REG)
3964 operands[0] = adjust_address (operands[0], HImode, 2);
3965 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
3966 /* Do not delete a following tstl %0 insn; that would be incorrect. */
3967 CC_STATUS_INIT;
3968 if (operands[2] == const0_rtx)
3969 return "clr%.w %0";
3970 return "and%.w %2,%0";
3971 }
3972 if (GET_CODE (operands[2]) == CONST_INT
3973 && (logval = exact_log2 (~ INTVAL (operands[2]))) >= 0
3974 && (DATA_REG_P (operands[0])
3975 || offsettable_memref_p (operands[0])))
3976 {
3977 if (DATA_REG_P (operands[0]))
3978 operands[1] = GEN_INT (logval);
3979 else
3980 {
3981 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
3982 operands[1] = GEN_INT (logval % 8);
3983 }
3984 /* This does not set condition codes in a standard way. */
3985 CC_STATUS_INIT;
3986 return "bclr %1,%0";
3987 }
3988 return "and%.l %2,%0";
3989 }
3990
3991 const char *
3992 output_iorsi3 (rtx *operands)
3993 {
3994 register int logval;
3995 if (GET_CODE (operands[2]) == CONST_INT
3996 && INTVAL (operands[2]) >> 16 == 0
3997 && (DATA_REG_P (operands[0])
3998 || offsettable_memref_p (operands[0]))
3999 && !TARGET_COLDFIRE)
4000 {
4001 if (GET_CODE (operands[0]) != REG)
4002 operands[0] = adjust_address (operands[0], HImode, 2);
4003 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4004 CC_STATUS_INIT;
4005 if (INTVAL (operands[2]) == 0xffff)
4006 return "mov%.w %2,%0";
4007 return "or%.w %2,%0";
4008 }
4009 if (GET_CODE (operands[2]) == CONST_INT
4010 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0
4011 && (DATA_REG_P (operands[0])
4012 || offsettable_memref_p (operands[0])))
4013 {
4014 if (DATA_REG_P (operands[0]))
4015 operands[1] = GEN_INT (logval);
4016 else
4017 {
4018 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4019 operands[1] = GEN_INT (logval % 8);
4020 }
4021 CC_STATUS_INIT;
4022 return "bset %1,%0";
4023 }
4024 return "or%.l %2,%0";
4025 }
4026
4027 const char *
4028 output_xorsi3 (rtx *operands)
4029 {
4030 register int logval;
4031 if (GET_CODE (operands[2]) == CONST_INT
4032 && INTVAL (operands[2]) >> 16 == 0
4033 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
4034 && !TARGET_COLDFIRE)
4035 {
4036 if (! DATA_REG_P (operands[0]))
4037 operands[0] = adjust_address (operands[0], HImode, 2);
4038 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4039 CC_STATUS_INIT;
4040 if (INTVAL (operands[2]) == 0xffff)
4041 return "not%.w %0";
4042 return "eor%.w %2,%0";
4043 }
4044 if (GET_CODE (operands[2]) == CONST_INT
4045 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0
4046 && (DATA_REG_P (operands[0])
4047 || offsettable_memref_p (operands[0])))
4048 {
4049 if (DATA_REG_P (operands[0]))
4050 operands[1] = GEN_INT (logval);
4051 else
4052 {
4053 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4054 operands[1] = GEN_INT (logval % 8);
4055 }
4056 CC_STATUS_INIT;
4057 return "bchg %1,%0";
4058 }
4059 return "eor%.l %2,%0";
4060 }
4061
4062 /* Return the instruction that should be used for a call to address X,
4063 which is known to be in operand 0. */
4064
4065 const char *
4066 output_call (rtx x)
4067 {
4068 if (symbolic_operand (x, VOIDmode))
4069 return m68k_symbolic_call;
4070 else
4071 return "jsr %a0";
4072 }
4073
4074 /* Likewise sibling calls. */
4075
4076 const char *
4077 output_sibcall (rtx x)
4078 {
4079 if (symbolic_operand (x, VOIDmode))
4080 return m68k_symbolic_jump;
4081 else
4082 return "jmp %a0";
4083 }
4084
4085 #ifdef M68K_TARGET_COFF
4086
4087 /* Output assembly to switch to section NAME with attribute FLAGS. */
4088
4089 static void
4090 m68k_coff_asm_named_section (const char *name, unsigned int flags,
4091 tree decl ATTRIBUTE_UNUSED)
4092 {
4093 char flagchar;
4094
4095 if (flags & SECTION_WRITE)
4096 flagchar = 'd';
4097 else
4098 flagchar = 'x';
4099
4100 fprintf (asm_out_file, "\t.section\t%s,\"%c\"\n", name, flagchar);
4101 }
4102
4103 #endif /* M68K_TARGET_COFF */
4104
4105 static void
4106 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4107 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
4108 tree function)
4109 {
4110 rtx this_slot, offset, addr, mem, insn;
4111
4112 /* Pretend to be a post-reload pass while generating rtl. */
4113 no_new_pseudos = 1;
4114 reload_completed = 1;
4115 allocate_reg_info (FIRST_PSEUDO_REGISTER, true, true);
4116
4117 /* The "this" pointer is stored at 4(%sp). */
4118 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
4119
4120 /* Add DELTA to THIS. */
4121 if (delta != 0)
4122 {
4123 /* Make the offset a legitimate operand for memory addition. */
4124 offset = GEN_INT (delta);
4125 if ((delta < -8 || delta > 8)
4126 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
4127 {
4128 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
4129 offset = gen_rtx_REG (Pmode, D0_REG);
4130 }
4131 emit_insn (gen_add3_insn (copy_rtx (this_slot),
4132 copy_rtx (this_slot), offset));
4133 }
4134
4135 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
4136 if (vcall_offset != 0)
4137 {
4138 /* Set the static chain register to *THIS. */
4139 emit_move_insn (static_chain_rtx, this_slot);
4140 emit_move_insn (static_chain_rtx, gen_rtx_MEM (Pmode, static_chain_rtx));
4141
4142 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
4143 addr = plus_constant (static_chain_rtx, vcall_offset);
4144 if (!m68k_legitimate_address_p (Pmode, addr, true))
4145 {
4146 emit_insn (gen_rtx_SET (VOIDmode, static_chain_rtx, addr));
4147 addr = static_chain_rtx;
4148 }
4149
4150 /* Load the offset into %d0 and add it to THIS. */
4151 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
4152 gen_rtx_MEM (Pmode, addr));
4153 emit_insn (gen_add3_insn (copy_rtx (this_slot),
4154 copy_rtx (this_slot),
4155 gen_rtx_REG (Pmode, D0_REG)));
4156 }
4157
4158 /* Jump to the target function. Use a sibcall if direct jumps are
4159 allowed, otherwise load the address into a register first. */
4160 mem = DECL_RTL (function);
4161 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
4162 {
4163 gcc_assert (flag_pic);
4164
4165 if (!TARGET_SEP_DATA)
4166 {
4167 /* Use the static chain register as a temporary (call-clobbered)
4168 GOT pointer for this function. We can use the static chain
4169 register because it isn't live on entry to the thunk. */
4170 REGNO (pic_offset_table_rtx) = STATIC_CHAIN_REGNUM;
4171 emit_insn (gen_load_got (pic_offset_table_rtx));
4172 }
4173 legitimize_pic_address (XEXP (mem, 0), Pmode, static_chain_rtx);
4174 mem = replace_equiv_address (mem, static_chain_rtx);
4175 }
4176 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
4177 SIBLING_CALL_P (insn) = 1;
4178
4179 /* Run just enough of rest_of_compilation. */
4180 insn = get_insns ();
4181 split_all_insns_noflow ();
4182 final_start_function (insn, file, 1);
4183 final (insn, file, 1);
4184 final_end_function ();
4185
4186 /* Clean up the vars set above. */
4187 reload_completed = 0;
4188 no_new_pseudos = 0;
4189
4190 /* Restore the original PIC register. */
4191 if (flag_pic)
4192 REGNO (pic_offset_table_rtx) = PIC_REG;
4193 }
4194
4195 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
4196
4197 static rtx
4198 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
4199 int incoming ATTRIBUTE_UNUSED)
4200 {
4201 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
4202 }
4203
4204 /* Return nonzero if register old_reg can be renamed to register new_reg. */
4205 int
4206 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
4207 unsigned int new_reg)
4208 {
4209
4210 /* Interrupt functions can only use registers that have already been
4211 saved by the prologue, even if they would normally be
4212 call-clobbered. */
4213
4214 if (m68k_interrupt_function_p (current_function_decl)
4215 && !regs_ever_live[new_reg])
4216 return 0;
4217
4218 return 1;
4219 }
4220
4221 /* Value is true if hard register REGNO can hold a value of machine-mode
4222 MODE. On the 68000, we let the cpu registers can hold any mode, but
4223 restrict the 68881 registers to floating-point modes. */
4224
4225 bool
4226 m68k_regno_mode_ok (int regno, enum machine_mode mode)
4227 {
4228 if (DATA_REGNO_P (regno))
4229 {
4230 /* Data Registers, can hold aggregate if fits in. */
4231 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
4232 return true;
4233 }
4234 else if (ADDRESS_REGNO_P (regno))
4235 {
4236 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
4237 return true;
4238 }
4239 else if (FP_REGNO_P (regno))
4240 {
4241 /* FPU registers, hold float or complex float of long double or
4242 smaller. */
4243 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
4244 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4245 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
4246 return true;
4247 }
4248 return false;
4249 }
4250
4251 /* Implement SECONDARY_RELOAD_CLASS. */
4252
4253 enum reg_class
4254 m68k_secondary_reload_class (enum reg_class rclass,
4255 enum machine_mode mode, rtx x)
4256 {
4257 int regno;
4258
4259 regno = true_regnum (x);
4260
4261 /* If one operand of a movqi is an address register, the other
4262 operand must be a general register or constant. Other types
4263 of operand must be reloaded through a data register. */
4264 if (GET_MODE_SIZE (mode) == 1
4265 && reg_classes_intersect_p (rclass, ADDR_REGS)
4266 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
4267 return DATA_REGS;
4268
4269 /* PC-relative addresses must be loaded into an address register first. */
4270 if (TARGET_PCREL
4271 && !reg_class_subset_p (rclass, ADDR_REGS)
4272 && symbolic_operand (x, VOIDmode))
4273 return ADDR_REGS;
4274
4275 return NO_REGS;
4276 }
4277
4278 /* Implement PREFERRED_RELOAD_CLASS. */
4279
4280 enum reg_class
4281 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
4282 {
4283 enum reg_class secondary_class;
4284
4285 /* If RCLASS might need a secondary reload, try restricting it to
4286 a class that doesn't. */
4287 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
4288 if (secondary_class != NO_REGS
4289 && reg_class_subset_p (secondary_class, rclass))
4290 return secondary_class;
4291
4292 /* Prefer to use moveq for in-range constants. */
4293 if (GET_CODE (x) == CONST_INT
4294 && reg_class_subset_p (DATA_REGS, rclass)
4295 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
4296 return DATA_REGS;
4297
4298 /* ??? Do we really need this now? */
4299 if (GET_CODE (x) == CONST_DOUBLE
4300 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
4301 {
4302 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
4303 return FP_REGS;
4304
4305 return NO_REGS;
4306 }
4307
4308 return rclass;
4309 }
4310
4311 /* Return floating point values in a 68881 register. This makes 68881 code
4312 a little bit faster. It also makes -msoft-float code incompatible with
4313 hard-float code, so people have to be careful not to mix the two.
4314 For ColdFire it was decided the ABI incompatibility is undesirable.
4315 If there is need for a hard-float ABI it is probably worth doing it
4316 properly and also passing function arguments in FP registers. */
4317 rtx
4318 m68k_libcall_value (enum machine_mode mode)
4319 {
4320 switch (mode) {
4321 case SFmode:
4322 case DFmode:
4323 case XFmode:
4324 if (TARGET_68881)
4325 return gen_rtx_REG (mode, FP0_REG);
4326 break;
4327 default:
4328 break;
4329 }
4330 return gen_rtx_REG (mode, D0_REG);
4331 }
4332
4333 rtx
4334 m68k_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4335 {
4336 enum machine_mode mode;
4337
4338 mode = TYPE_MODE (valtype);
4339 switch (mode) {
4340 case SFmode:
4341 case DFmode:
4342 case XFmode:
4343 if (TARGET_68881)
4344 return gen_rtx_REG (mode, FP0_REG);
4345 break;
4346 default:
4347 break;
4348 }
4349
4350 /* If the function returns a pointer, push that into %a0. */
4351 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
4352 /* For compatibility with the large body of existing code which
4353 does not always properly declare external functions returning
4354 pointer types, the m68k/SVR4 convention is to copy the value
4355 returned for pointer functions from a0 to d0 in the function
4356 epilogue, so that callers that have neglected to properly
4357 declare the callee can still find the correct return value in
4358 d0. */
4359 return gen_rtx_PARALLEL
4360 (mode,
4361 gen_rtvec (2,
4362 gen_rtx_EXPR_LIST (VOIDmode,
4363 gen_rtx_REG (mode, A0_REG),
4364 const0_rtx),
4365 gen_rtx_EXPR_LIST (VOIDmode,
4366 gen_rtx_REG (mode, D0_REG),
4367 const0_rtx)));
4368 else if (POINTER_TYPE_P (valtype))
4369 return gen_rtx_REG (mode, A0_REG);
4370 else
4371 return gen_rtx_REG (mode, D0_REG);
4372 }