defaults.h (GO_IF_MODE_DEPENDENT_ADDRESS): Provide empty default.
[gcc.git] / gcc / config / sh / sh.h
1 /* Definitions of target machine for GNU compiler for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #ifndef GCC_SH_H
24 #define GCC_SH_H
25
26 #include "config/vxworks-dummy.h"
27
28 #define TARGET_VERSION \
29 fputs (" (Hitachi SH)", stderr);
30
31 /* Unfortunately, insn-attrtab.c doesn't include insn-codes.h. We can't
32 include it here, because bconfig.h is also included by gencodes.c . */
33 /* ??? No longer true. */
34 extern int code_for_indirect_jump_scratch;
35
36 #define TARGET_CPU_CPP_BUILTINS() \
37 do { \
38 builtin_define ("__sh__"); \
39 builtin_assert ("cpu=sh"); \
40 builtin_assert ("machine=sh"); \
41 switch ((int) sh_cpu) \
42 { \
43 case PROCESSOR_SH1: \
44 builtin_define ("__sh1__"); \
45 break; \
46 case PROCESSOR_SH2: \
47 builtin_define ("__sh2__"); \
48 break; \
49 case PROCESSOR_SH2E: \
50 builtin_define ("__SH2E__"); \
51 break; \
52 case PROCESSOR_SH2A: \
53 builtin_define ("__SH2A__"); \
54 builtin_define (TARGET_SH2A_DOUBLE \
55 ? (TARGET_FPU_SINGLE ? "__SH2A_SINGLE__" : "__SH2A_DOUBLE__") \
56 : TARGET_FPU_ANY ? "__SH2A_SINGLE_ONLY__" \
57 : "__SH2A_NOFPU__"); \
58 break; \
59 case PROCESSOR_SH3: \
60 builtin_define ("__sh3__"); \
61 builtin_define ("__SH3__"); \
62 if (TARGET_HARD_SH4) \
63 builtin_define ("__SH4_NOFPU__"); \
64 break; \
65 case PROCESSOR_SH3E: \
66 builtin_define (TARGET_HARD_SH4 ? "__SH4_SINGLE_ONLY__" : "__SH3E__"); \
67 break; \
68 case PROCESSOR_SH4: \
69 builtin_define (TARGET_FPU_SINGLE ? "__SH4_SINGLE__" : "__SH4__"); \
70 break; \
71 case PROCESSOR_SH4A: \
72 builtin_define ("__SH4A__"); \
73 builtin_define (TARGET_SH4 \
74 ? (TARGET_FPU_SINGLE ? "__SH4_SINGLE__" : "__SH4__") \
75 : TARGET_FPU_ANY ? "__SH4_SINGLE_ONLY__" \
76 : "__SH4_NOFPU__"); \
77 break; \
78 case PROCESSOR_SH5: \
79 { \
80 builtin_define_with_value ("__SH5__", \
81 TARGET_SHMEDIA64 ? "64" : "32", 0); \
82 builtin_define_with_value ("__SHMEDIA__", \
83 TARGET_SHMEDIA ? "1" : "0", 0); \
84 if (! TARGET_FPU_DOUBLE) \
85 builtin_define ("__SH4_NOFPU__"); \
86 } \
87 } \
88 if (TARGET_FPU_ANY) \
89 builtin_define ("__SH_FPU_ANY__"); \
90 if (TARGET_FPU_DOUBLE) \
91 builtin_define ("__SH_FPU_DOUBLE__"); \
92 if (TARGET_HITACHI) \
93 builtin_define ("__HITACHI__"); \
94 builtin_define (TARGET_LITTLE_ENDIAN \
95 ? "__LITTLE_ENDIAN__" : "__BIG_ENDIAN__"); \
96 } while (0)
97
98 /* We can not debug without a frame pointer. */
99 /* #define CAN_DEBUG_WITHOUT_FP */
100
101 #define CONDITIONAL_REGISTER_USAGE do \
102 { \
103 int regno; \
104 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno ++) \
105 if (! VALID_REGISTER_P (regno)) \
106 fixed_regs[regno] = call_used_regs[regno] = 1; \
107 /* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs. */ \
108 if (TARGET_SH5) \
109 { \
110 call_used_regs[FIRST_GENERAL_REG + 8] \
111 = call_used_regs[FIRST_GENERAL_REG + 9] = 1; \
112 call_really_used_regs[FIRST_GENERAL_REG + 8] \
113 = call_really_used_regs[FIRST_GENERAL_REG + 9] = 1; \
114 } \
115 if (TARGET_SHMEDIA) \
116 { \
117 regno_reg_class[FIRST_GENERAL_REG] = GENERAL_REGS; \
118 CLEAR_HARD_REG_SET (reg_class_contents[FP0_REGS]); \
119 regno_reg_class[FIRST_FP_REG] = FP_REGS; \
120 } \
121 if (flag_pic) \
122 { \
123 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
124 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
125 } \
126 /* Renesas saves and restores mac registers on call. */ \
127 if (TARGET_HITACHI && ! TARGET_NOMACSAVE) \
128 { \
129 call_really_used_regs[MACH_REG] = 0; \
130 call_really_used_regs[MACL_REG] = 0; \
131 } \
132 for (regno = FIRST_FP_REG + (TARGET_LITTLE_ENDIAN != 0); \
133 regno <= LAST_FP_REG; regno += 2) \
134 SET_HARD_REG_BIT (reg_class_contents[DF_HI_REGS], regno); \
135 if (TARGET_SHMEDIA) \
136 { \
137 for (regno = FIRST_TARGET_REG; regno <= LAST_TARGET_REG; regno ++)\
138 if (! fixed_regs[regno] && call_really_used_regs[regno]) \
139 SET_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno); \
140 } \
141 else \
142 for (regno = FIRST_GENERAL_REG; regno <= LAST_GENERAL_REG; regno++) \
143 if (! fixed_regs[regno] && call_really_used_regs[regno]) \
144 SET_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno); \
145 } while (0)
146 \f
147 /* Nonzero if this is an ELF target - compile time only */
148 #define TARGET_ELF 0
149
150 /* Nonzero if we should generate code using type 2E insns. */
151 #define TARGET_SH2E (TARGET_SH2 && TARGET_SH_E)
152
153 /* Nonzero if we should generate code using type 2A insns. */
154 #define TARGET_SH2A TARGET_HARD_SH2A
155 /* Nonzero if we should generate code using type 2A SF insns. */
156 #define TARGET_SH2A_SINGLE (TARGET_SH2A && TARGET_SH2E)
157 /* Nonzero if we should generate code using type 2A DF insns. */
158 #define TARGET_SH2A_DOUBLE (TARGET_HARD_SH2A_DOUBLE && TARGET_SH2A)
159
160 /* Nonzero if we should generate code using type 3E insns. */
161 #define TARGET_SH3E (TARGET_SH3 && TARGET_SH_E)
162
163 /* Nonzero if the cache line size is 32. */
164 #define TARGET_CACHE32 (TARGET_HARD_SH4 || TARGET_SH5)
165
166 /* Nonzero if we schedule for a superscalar implementation. */
167 #define TARGET_SUPERSCALAR TARGET_HARD_SH4
168
169 /* Nonzero if the target has separate instruction and data caches. */
170 #define TARGET_HARVARD (TARGET_HARD_SH4 || TARGET_SH5)
171
172 /* Nonzero if a double-precision FPU is available. */
173 #define TARGET_FPU_DOUBLE \
174 ((target_flags & MASK_SH4) != 0 || TARGET_SH2A_DOUBLE)
175
176 /* Nonzero if an FPU is available. */
177 #define TARGET_FPU_ANY (TARGET_SH2E || TARGET_FPU_DOUBLE)
178
179 /* Nonzero if we should generate code using type 4 insns. */
180 #undef TARGET_SH4
181 #define TARGET_SH4 ((target_flags & MASK_SH4) != 0 && TARGET_SH1)
182
183 /* Nonzero if we're generating code for the common subset of
184 instructions present on both SH4a and SH4al-dsp. */
185 #define TARGET_SH4A_ARCH TARGET_SH4A
186
187 /* Nonzero if we're generating code for SH4a, unless the use of the
188 FPU is disabled (which makes it compatible with SH4al-dsp). */
189 #define TARGET_SH4A_FP (TARGET_SH4A_ARCH && TARGET_FPU_ANY)
190
191 /* Nonzero if we should generate code using the SHcompact instruction
192 set and 32-bit ABI. */
193 #define TARGET_SHCOMPACT (TARGET_SH5 && TARGET_SH1)
194
195 /* Nonzero if we should generate code using the SHmedia instruction
196 set and ABI. */
197 #define TARGET_SHMEDIA (TARGET_SH5 && ! TARGET_SH1)
198
199 /* Nonzero if we should generate code using the SHmedia ISA and 32-bit
200 ABI. */
201 #define TARGET_SHMEDIA32 (TARGET_SH5 && ! TARGET_SH1 && TARGET_SH_E)
202
203 /* Nonzero if we should generate code using the SHmedia ISA and 64-bit
204 ABI. */
205 #define TARGET_SHMEDIA64 (TARGET_SH5 && ! TARGET_SH1 && ! TARGET_SH_E)
206
207 /* Nonzero if we should generate code using SHmedia FPU instructions. */
208 #define TARGET_SHMEDIA_FPU (TARGET_SHMEDIA && TARGET_FPU_DOUBLE)
209
210 /* This is not used by the SH2E calling convention */
211 #define TARGET_VARARGS_PRETEND_ARGS(FUN_DECL) \
212 (TARGET_SH1 && ! TARGET_SH2E && ! TARGET_SH5 \
213 && ! (TARGET_HITACHI || sh_attr_renesas_p (FUN_DECL)))
214
215 #ifndef TARGET_CPU_DEFAULT
216 #define TARGET_CPU_DEFAULT SELECT_SH1
217 #define SUPPORT_SH1 1
218 #define SUPPORT_SH2E 1
219 #define SUPPORT_SH4 1
220 #define SUPPORT_SH4_SINGLE 1
221 #define SUPPORT_SH2A 1
222 #define SUPPORT_SH2A_SINGLE 1
223 #endif
224
225 #define TARGET_DIVIDE_INV \
226 (sh_div_strategy == SH_DIV_INV || sh_div_strategy == SH_DIV_INV_MINLAT \
227 || sh_div_strategy == SH_DIV_INV20U || sh_div_strategy == SH_DIV_INV20L \
228 || sh_div_strategy == SH_DIV_INV_CALL \
229 || sh_div_strategy == SH_DIV_INV_CALL2 || sh_div_strategy == SH_DIV_INV_FP)
230 #define TARGET_DIVIDE_FP (sh_div_strategy == SH_DIV_FP)
231 #define TARGET_DIVIDE_INV_FP (sh_div_strategy == SH_DIV_INV_FP)
232 #define TARGET_DIVIDE_CALL2 (sh_div_strategy == SH_DIV_CALL2)
233 #define TARGET_DIVIDE_INV_MINLAT (sh_div_strategy == SH_DIV_INV_MINLAT)
234 #define TARGET_DIVIDE_INV20U (sh_div_strategy == SH_DIV_INV20U)
235 #define TARGET_DIVIDE_INV20L (sh_div_strategy == SH_DIV_INV20L)
236 #define TARGET_DIVIDE_INV_CALL (sh_div_strategy == SH_DIV_INV_CALL)
237 #define TARGET_DIVIDE_INV_CALL2 (sh_div_strategy == SH_DIV_INV_CALL2)
238 #define TARGET_DIVIDE_CALL_DIV1 (sh_div_strategy == SH_DIV_CALL_DIV1)
239 #define TARGET_DIVIDE_CALL_FP (sh_div_strategy == SH_DIV_CALL_FP)
240 #define TARGET_DIVIDE_CALL_TABLE (sh_div_strategy == SH_DIV_CALL_TABLE)
241
242 #define SELECT_SH1 (MASK_SH1)
243 #define SELECT_SH2 (MASK_SH2 | SELECT_SH1)
244 #define SELECT_SH2E (MASK_SH_E | MASK_SH2 | MASK_SH1 \
245 | MASK_FPU_SINGLE)
246 #define SELECT_SH2A (MASK_SH_E | MASK_HARD_SH2A \
247 | MASK_HARD_SH2A_DOUBLE \
248 | MASK_SH2 | MASK_SH1)
249 #define SELECT_SH2A_NOFPU (MASK_HARD_SH2A | MASK_SH2 | MASK_SH1)
250 #define SELECT_SH2A_SINGLE_ONLY (MASK_SH_E | MASK_HARD_SH2A | MASK_SH2 \
251 | MASK_SH1 | MASK_FPU_SINGLE)
252 #define SELECT_SH2A_SINGLE (MASK_SH_E | MASK_HARD_SH2A \
253 | MASK_FPU_SINGLE | MASK_HARD_SH2A_DOUBLE \
254 | MASK_SH2 | MASK_SH1)
255 #define SELECT_SH3 (MASK_SH3 | SELECT_SH2)
256 #define SELECT_SH3E (MASK_SH_E | MASK_FPU_SINGLE | SELECT_SH3)
257 #define SELECT_SH4_NOFPU (MASK_HARD_SH4 | SELECT_SH3)
258 #define SELECT_SH4_SINGLE_ONLY (MASK_HARD_SH4 | SELECT_SH3E)
259 #define SELECT_SH4 (MASK_SH4 | MASK_SH_E | MASK_HARD_SH4 \
260 | SELECT_SH3)
261 #define SELECT_SH4_SINGLE (MASK_FPU_SINGLE | SELECT_SH4)
262 #define SELECT_SH4A_NOFPU (MASK_SH4A | SELECT_SH4_NOFPU)
263 #define SELECT_SH4A_SINGLE_ONLY (MASK_SH4A | SELECT_SH4_SINGLE_ONLY)
264 #define SELECT_SH4A (MASK_SH4A | SELECT_SH4)
265 #define SELECT_SH4A_SINGLE (MASK_SH4A | SELECT_SH4_SINGLE)
266 #define SELECT_SH5_64MEDIA (MASK_SH5 | MASK_SH4)
267 #define SELECT_SH5_64MEDIA_NOFPU (MASK_SH5)
268 #define SELECT_SH5_32MEDIA (MASK_SH5 | MASK_SH4 | MASK_SH_E)
269 #define SELECT_SH5_32MEDIA_NOFPU (MASK_SH5 | MASK_SH_E)
270 #define SELECT_SH5_COMPACT (MASK_SH5 | MASK_SH4 | SELECT_SH3E)
271 #define SELECT_SH5_COMPACT_NOFPU (MASK_SH5 | SELECT_SH3)
272
273 #if SUPPORT_SH1
274 #define SUPPORT_SH2 1
275 #endif
276 #if SUPPORT_SH2
277 #define SUPPORT_SH3 1
278 #define SUPPORT_SH2A_NOFPU 1
279 #endif
280 #if SUPPORT_SH3
281 #define SUPPORT_SH4_NOFPU 1
282 #endif
283 #if SUPPORT_SH4_NOFPU
284 #define SUPPORT_SH4A_NOFPU 1
285 #define SUPPORT_SH4AL 1
286 #endif
287
288 #if SUPPORT_SH2E
289 #define SUPPORT_SH3E 1
290 #define SUPPORT_SH2A_SINGLE_ONLY 1
291 #endif
292 #if SUPPORT_SH3E
293 #define SUPPORT_SH4_SINGLE_ONLY 1
294 #endif
295 #if SUPPORT_SH4_SINGLE_ONLY
296 #define SUPPORT_SH4A_SINGLE_ONLY 1
297 #endif
298
299 #if SUPPORT_SH4
300 #define SUPPORT_SH4A 1
301 #endif
302
303 #if SUPPORT_SH4_SINGLE
304 #define SUPPORT_SH4A_SINGLE 1
305 #endif
306
307 #if SUPPORT_SH5_COMPAT
308 #define SUPPORT_SH5_32MEDIA 1
309 #endif
310
311 #if SUPPORT_SH5_COMPACT_NOFPU
312 #define SUPPORT_SH5_32MEDIA_NOFPU 1
313 #endif
314
315 #define SUPPORT_ANY_SH5_32MEDIA \
316 (SUPPORT_SH5_32MEDIA || SUPPORT_SH5_32MEDIA_NOFPU)
317 #define SUPPORT_ANY_SH5_64MEDIA \
318 (SUPPORT_SH5_64MEDIA || SUPPORT_SH5_64MEDIA_NOFPU)
319 #define SUPPORT_ANY_SH5 \
320 (SUPPORT_ANY_SH5_32MEDIA || SUPPORT_ANY_SH5_64MEDIA)
321
322 /* Reset all target-selection flags. */
323 #define MASK_ARCH (MASK_SH1 | MASK_SH2 | MASK_SH3 | MASK_SH_E | MASK_SH4 \
324 | MASK_HARD_SH2A | MASK_HARD_SH2A_DOUBLE | MASK_SH4A \
325 | MASK_HARD_SH4 | MASK_FPU_SINGLE | MASK_SH5)
326
327 /* This defaults us to big-endian. */
328 #ifndef TARGET_ENDIAN_DEFAULT
329 #define TARGET_ENDIAN_DEFAULT 0
330 #endif
331
332 #ifndef TARGET_OPT_DEFAULT
333 #define TARGET_OPT_DEFAULT MASK_ADJUST_UNROLL
334 #endif
335
336 #define TARGET_DEFAULT \
337 (TARGET_CPU_DEFAULT | TARGET_ENDIAN_DEFAULT | TARGET_OPT_DEFAULT)
338
339 #ifndef SH_MULTILIB_CPU_DEFAULT
340 #define SH_MULTILIB_CPU_DEFAULT "m1"
341 #endif
342
343 #if TARGET_ENDIAN_DEFAULT
344 #define MULTILIB_DEFAULTS { "ml", SH_MULTILIB_CPU_DEFAULT }
345 #else
346 #define MULTILIB_DEFAULTS { "mb", SH_MULTILIB_CPU_DEFAULT }
347 #endif
348
349 #define CPP_SPEC " %(subtarget_cpp_spec) "
350
351 #ifndef SUBTARGET_CPP_SPEC
352 #define SUBTARGET_CPP_SPEC ""
353 #endif
354
355 #ifndef SUBTARGET_EXTRA_SPECS
356 #define SUBTARGET_EXTRA_SPECS
357 #endif
358
359 #define EXTRA_SPECS \
360 { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
361 { "link_emul_prefix", LINK_EMUL_PREFIX }, \
362 { "link_default_cpu_emul", LINK_DEFAULT_CPU_EMUL }, \
363 { "subtarget_link_emul_suffix", SUBTARGET_LINK_EMUL_SUFFIX }, \
364 { "subtarget_link_spec", SUBTARGET_LINK_SPEC }, \
365 { "subtarget_asm_endian_spec", SUBTARGET_ASM_ENDIAN_SPEC }, \
366 { "subtarget_asm_relax_spec", SUBTARGET_ASM_RELAX_SPEC }, \
367 { "subtarget_asm_isa_spec", SUBTARGET_ASM_ISA_SPEC }, \
368 { "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \
369 SUBTARGET_EXTRA_SPECS
370
371 #if TARGET_CPU_DEFAULT & MASK_HARD_SH4
372 #define SUBTARGET_ASM_RELAX_SPEC "%{!m1:%{!m2:%{!m3*:%{!m5*:-isa=sh4-up}}}}"
373 #else
374 #define SUBTARGET_ASM_RELAX_SPEC "%{m4*:-isa=sh4-up}"
375 #endif
376
377 #define SH_ASM_SPEC \
378 "%(subtarget_asm_endian_spec) %{mrelax:-relax %(subtarget_asm_relax_spec)}\
379 %(subtarget_asm_isa_spec) %(subtarget_asm_spec)\
380 %{m2a:--isa=sh2a} \
381 %{m2a-single:--isa=sh2a} \
382 %{m2a-single-only:--isa=sh2a} \
383 %{m2a-nofpu:--isa=sh2a-nofpu} \
384 %{m5-compact*:--isa=SHcompact} \
385 %{m5-32media*:--isa=SHmedia --abi=32} \
386 %{m5-64media*:--isa=SHmedia --abi=64} \
387 %{m4al:-dsp} %{mcut2-workaround:-cut2-workaround}"
388
389 #define ASM_SPEC SH_ASM_SPEC
390
391 #ifndef SUBTARGET_ASM_ENDIAN_SPEC
392 #if TARGET_ENDIAN_DEFAULT == MASK_LITTLE_ENDIAN
393 #define SUBTARGET_ASM_ENDIAN_SPEC "%{mb:-big} %{!mb:-little}"
394 #else
395 #define SUBTARGET_ASM_ENDIAN_SPEC "%{ml:-little} %{!ml:-big}"
396 #endif
397 #endif
398
399 #if STRICT_NOFPU == 1
400 /* Strict nofpu means that the compiler should tell the assembler
401 to reject FPU instructions. E.g. from ASM inserts. */
402 #if TARGET_CPU_DEFAULT & MASK_HARD_SH4 && !(TARGET_CPU_DEFAULT & MASK_SH_E)
403 #define SUBTARGET_ASM_ISA_SPEC "%{!m1:%{!m2:%{!m3*:%{m4-nofpu|!m4*:%{!m5:-isa=sh4-nofpu}}}}}"
404 #else
405 /* If there were an -isa option for sh5-nofpu then it would also go here. */
406 #define SUBTARGET_ASM_ISA_SPEC \
407 "%{m4-nofpu:-isa=sh4-nofpu} " ASM_ISA_DEFAULT_SPEC
408 #endif
409 #else /* ! STRICT_NOFPU */
410 #define SUBTARGET_ASM_ISA_SPEC ASM_ISA_DEFAULT_SPEC
411 #endif
412
413 #ifndef SUBTARGET_ASM_SPEC
414 #define SUBTARGET_ASM_SPEC ""
415 #endif
416
417 #if TARGET_ENDIAN_DEFAULT == MASK_LITTLE_ENDIAN
418 #define LINK_EMUL_PREFIX "sh%{!mb:l}"
419 #else
420 #define LINK_EMUL_PREFIX "sh%{ml:l}"
421 #endif
422
423 #if TARGET_CPU_DEFAULT & MASK_SH5
424 #if TARGET_CPU_DEFAULT & MASK_SH_E
425 #define LINK_DEFAULT_CPU_EMUL "32"
426 #if TARGET_CPU_DEFAULT & MASK_SH1
427 #define ASM_ISA_SPEC_DEFAULT "--isa=SHcompact"
428 #else
429 #define ASM_ISA_SPEC_DEFAULT "--isa=SHmedia --abi=32"
430 #endif /* MASK_SH1 */
431 #else /* !MASK_SH_E */
432 #define LINK_DEFAULT_CPU_EMUL "64"
433 #define ASM_ISA_SPEC_DEFAULT "--isa=SHmedia --abi=64"
434 #endif /* MASK_SH_E */
435 #define ASM_ISA_DEFAULT_SPEC \
436 " %{!m1:%{!m2*:%{!m3*:%{!m4*:%{!m5*:" ASM_ISA_SPEC_DEFAULT "}}}}}"
437 #else /* !MASK_SH5 */
438 #define LINK_DEFAULT_CPU_EMUL ""
439 #define ASM_ISA_DEFAULT_SPEC ""
440 #endif /* MASK_SH5 */
441
442 #define SUBTARGET_LINK_EMUL_SUFFIX ""
443 #define SUBTARGET_LINK_SPEC ""
444
445 /* svr4.h redefines LINK_SPEC inappropriately, so go via SH_LINK_SPEC,
446 so that we can undo the damage without code replication. */
447 #define LINK_SPEC SH_LINK_SPEC
448
449 #define SH_LINK_SPEC "\
450 -m %(link_emul_prefix)\
451 %{m5-compact*|m5-32media*:32}\
452 %{m5-64media*:64}\
453 %{!m1:%{!m2:%{!m3*:%{!m4*:%{!m5*:%(link_default_cpu_emul)}}}}}\
454 %(subtarget_link_emul_suffix) \
455 %{mrelax:-relax} %(subtarget_link_spec)"
456
457 #ifndef SH_DIV_STR_FOR_SIZE
458 #define SH_DIV_STR_FOR_SIZE "call"
459 #endif
460
461 #define DRIVER_SELF_SPECS "%{m2a:%{ml:%eSH2a does not support little-endian}}"
462 #define OPTIMIZATION_OPTIONS(LEVEL,SIZE) \
463 do { \
464 if (LEVEL) \
465 { \
466 flag_omit_frame_pointer = 2; \
467 if (! SIZE) \
468 sh_div_str = "inv:minlat"; \
469 } \
470 if (SIZE) \
471 { \
472 target_flags |= MASK_SMALLCODE; \
473 sh_div_str = SH_DIV_STR_FOR_SIZE ; \
474 } \
475 else \
476 { \
477 TARGET_CBRANCHDI4 = 1; \
478 TARGET_EXPAND_CBRANCHDI4 = 1; \
479 } \
480 /* We can't meaningfully test TARGET_SHMEDIA here, because -m options \
481 haven't been parsed yet, hence we'd read only the default. \
482 sh_target_reg_class will return NO_REGS if this is not SHMEDIA, so \
483 it's OK to always set flag_branch_target_load_optimize. */ \
484 if (LEVEL > 1) \
485 { \
486 flag_branch_target_load_optimize = 1; \
487 if (! (SIZE)) \
488 target_flags |= MASK_SAVE_ALL_TARGET_REGS; \
489 } \
490 /* Likewise, we can't meaningfully test TARGET_SH2E / TARGET_IEEE \
491 here, so leave it to OVERRIDE_OPTIONS to set \
492 flag_finite_math_only. We set it to 2 here so we know if the user \
493 explicitly requested this to be on or off. */ \
494 flag_finite_math_only = 2; \
495 /* If flag_schedule_insns is 1, we set it to 2 here so we know if \
496 the user explicitly requested this to be on or off. */ \
497 if (flag_schedule_insns > 0) \
498 flag_schedule_insns = 2; \
499 \
500 set_param_value ("simultaneous-prefetches", 2); \
501 } while (0)
502
503 #define ASSEMBLER_DIALECT assembler_dialect
504
505 extern int assembler_dialect;
506
507 enum sh_divide_strategy_e {
508 /* SH5 strategies. */
509 SH_DIV_CALL,
510 SH_DIV_CALL2,
511 SH_DIV_FP, /* We could do this also for SH4. */
512 SH_DIV_INV,
513 SH_DIV_INV_MINLAT,
514 SH_DIV_INV20U,
515 SH_DIV_INV20L,
516 SH_DIV_INV_CALL,
517 SH_DIV_INV_CALL2,
518 SH_DIV_INV_FP,
519 /* SH1 .. SH4 strategies. Because of the small number of registers
520 available, the compiler uses knowledge of the actual set of registers
521 being clobbered by the different functions called. */
522 SH_DIV_CALL_DIV1, /* No FPU, medium size, highest latency. */
523 SH_DIV_CALL_FP, /* FPU needed, small size, high latency. */
524 SH_DIV_CALL_TABLE, /* No FPU, large size, medium latency. */
525 SH_DIV_INTRINSIC
526 };
527
528 extern enum sh_divide_strategy_e sh_div_strategy;
529
530 #ifndef SH_DIV_STRATEGY_DEFAULT
531 #define SH_DIV_STRATEGY_DEFAULT SH_DIV_CALL
532 #endif
533
534 #define SUBTARGET_OVERRIDE_OPTIONS (void) 0
535
536 extern const char *sh_fixed_range_str;
537
538 #define OVERRIDE_OPTIONS \
539 do { \
540 int regno; \
541 \
542 SUBTARGET_OVERRIDE_OPTIONS; \
543 if (flag_finite_math_only == 2) \
544 flag_finite_math_only \
545 = !flag_signaling_nans && TARGET_SH2E && ! TARGET_IEEE; \
546 if (TARGET_SH2E && !flag_finite_math_only) \
547 target_flags |= MASK_IEEE; \
548 sh_cpu = CPU_SH1; \
549 assembler_dialect = 0; \
550 if (TARGET_SH2) \
551 sh_cpu = CPU_SH2; \
552 if (TARGET_SH2E) \
553 sh_cpu = CPU_SH2E; \
554 if (TARGET_SH2A) \
555 { \
556 sh_cpu = CPU_SH2A; \
557 if (TARGET_SH2A_DOUBLE) \
558 target_flags |= MASK_FMOVD; \
559 } \
560 if (TARGET_SH3) \
561 sh_cpu = CPU_SH3; \
562 if (TARGET_SH3E) \
563 sh_cpu = CPU_SH3E; \
564 if (TARGET_SH4) \
565 { \
566 assembler_dialect = 1; \
567 sh_cpu = CPU_SH4; \
568 } \
569 if (TARGET_SH4A_ARCH) \
570 { \
571 assembler_dialect = 1; \
572 sh_cpu = CPU_SH4A; \
573 } \
574 if (TARGET_SH5) \
575 { \
576 sh_cpu = CPU_SH5; \
577 target_flags |= MASK_ALIGN_DOUBLE; \
578 if (TARGET_SHMEDIA_FPU) \
579 target_flags |= MASK_FMOVD; \
580 if (TARGET_SHMEDIA) \
581 { \
582 /* There are no delay slots on SHmedia. */ \
583 flag_delayed_branch = 0; \
584 /* Relaxation isn't yet supported for SHmedia */ \
585 target_flags &= ~MASK_RELAX; \
586 /* After reload, if conversion does little good but can cause \
587 ICEs: \
588 - find_if_block doesn't do anything for SH because we don't\
589 have conditional execution patterns. (We use conditional\
590 move patterns, which are handled differently, and only \
591 before reload). \
592 - find_cond_trap doesn't do anything for the SH because we \
593 don't have conditional traps. \
594 - find_if_case_1 uses redirect_edge_and_branch_force in \
595 the only path that does an optimization, and this causes \
596 an ICE when branch targets are in registers. \
597 - find_if_case_2 doesn't do anything for the SHmedia after \
598 reload except when it can redirect a tablejump - and \
599 that's rather rare. */ \
600 flag_if_conversion2 = 0; \
601 if (! strcmp (sh_div_str, "call")) \
602 sh_div_strategy = SH_DIV_CALL; \
603 else if (! strcmp (sh_div_str, "call2")) \
604 sh_div_strategy = SH_DIV_CALL2; \
605 if (! strcmp (sh_div_str, "fp") && TARGET_FPU_ANY) \
606 sh_div_strategy = SH_DIV_FP; \
607 else if (! strcmp (sh_div_str, "inv")) \
608 sh_div_strategy = SH_DIV_INV; \
609 else if (! strcmp (sh_div_str, "inv:minlat")) \
610 sh_div_strategy = SH_DIV_INV_MINLAT; \
611 else if (! strcmp (sh_div_str, "inv20u")) \
612 sh_div_strategy = SH_DIV_INV20U; \
613 else if (! strcmp (sh_div_str, "inv20l")) \
614 sh_div_strategy = SH_DIV_INV20L; \
615 else if (! strcmp (sh_div_str, "inv:call2")) \
616 sh_div_strategy = SH_DIV_INV_CALL2; \
617 else if (! strcmp (sh_div_str, "inv:call")) \
618 sh_div_strategy = SH_DIV_INV_CALL; \
619 else if (! strcmp (sh_div_str, "inv:fp")) \
620 { \
621 if (TARGET_FPU_ANY) \
622 sh_div_strategy = SH_DIV_INV_FP; \
623 else \
624 sh_div_strategy = SH_DIV_INV; \
625 } \
626 TARGET_CBRANCHDI4 = 0; \
627 /* Assembler CFI isn't yet fully supported for SHmedia. */ \
628 flag_dwarf2_cfi_asm = 0; \
629 } \
630 } \
631 else \
632 { \
633 /* Only the sh64-elf assembler fully supports .quad properly. */\
634 targetm.asm_out.aligned_op.di = NULL; \
635 targetm.asm_out.unaligned_op.di = NULL; \
636 } \
637 if (TARGET_SH1) \
638 { \
639 if (! strcmp (sh_div_str, "call-div1")) \
640 sh_div_strategy = SH_DIV_CALL_DIV1; \
641 else if (! strcmp (sh_div_str, "call-fp") \
642 && (TARGET_FPU_DOUBLE \
643 || (TARGET_HARD_SH4 && TARGET_SH2E) \
644 || (TARGET_SHCOMPACT && TARGET_FPU_ANY))) \
645 sh_div_strategy = SH_DIV_CALL_FP; \
646 else if (! strcmp (sh_div_str, "call-table") && TARGET_SH2) \
647 sh_div_strategy = SH_DIV_CALL_TABLE; \
648 else \
649 /* Pick one that makes most sense for the target in general. \
650 It is not much good to use different functions depending \
651 on -Os, since then we'll end up with two different functions \
652 when some of the code is compiled for size, and some for \
653 speed. */ \
654 \
655 /* SH4 tends to emphasize speed. */ \
656 if (TARGET_HARD_SH4) \
657 sh_div_strategy = SH_DIV_CALL_TABLE; \
658 /* These have their own way of doing things. */ \
659 else if (TARGET_SH2A) \
660 sh_div_strategy = SH_DIV_INTRINSIC; \
661 /* ??? Should we use the integer SHmedia function instead? */ \
662 else if (TARGET_SHCOMPACT && TARGET_FPU_ANY) \
663 sh_div_strategy = SH_DIV_CALL_FP; \
664 /* SH1 .. SH3 cores often go into small-footprint systems, so \
665 default to the smallest implementation available. */ \
666 else if (TARGET_SH2) /* ??? EXPERIMENTAL */ \
667 sh_div_strategy = SH_DIV_CALL_TABLE; \
668 else \
669 sh_div_strategy = SH_DIV_CALL_DIV1; \
670 } \
671 if (!TARGET_SH1) \
672 TARGET_PRETEND_CMOVE = 0; \
673 if (sh_divsi3_libfunc[0]) \
674 ; /* User supplied - leave it alone. */ \
675 else if (TARGET_DIVIDE_CALL_FP) \
676 sh_divsi3_libfunc = "__sdivsi3_i4"; \
677 else if (TARGET_DIVIDE_CALL_TABLE) \
678 sh_divsi3_libfunc = "__sdivsi3_i4i"; \
679 else if (TARGET_SH5) \
680 sh_divsi3_libfunc = "__sdivsi3_1"; \
681 else \
682 sh_divsi3_libfunc = "__sdivsi3"; \
683 if (sh_branch_cost == -1) \
684 sh_branch_cost \
685 = TARGET_SH5 ? 1 : ! TARGET_SH2 || TARGET_HARD_SH4 ? 2 : 1; \
686 \
687 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) \
688 if (! VALID_REGISTER_P (regno)) \
689 sh_register_names[regno][0] = '\0'; \
690 \
691 for (regno = 0; regno < ADDREGNAMES_SIZE; regno++) \
692 if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno))) \
693 sh_additional_register_names[regno][0] = '\0'; \
694 \
695 if (flag_omit_frame_pointer == 2) \
696 { \
697 /* The debugging information is sufficient, \
698 but gdb doesn't implement this yet */ \
699 if (0) \
700 flag_omit_frame_pointer \
701 = (PREFERRED_DEBUGGING_TYPE == DWARF2_DEBUG); \
702 else \
703 flag_omit_frame_pointer = 0; \
704 } \
705 \
706 if ((flag_pic && ! TARGET_PREFERGOT) \
707 || (TARGET_SHMEDIA && !TARGET_PT_FIXED)) \
708 flag_no_function_cse = 1; \
709 \
710 if (SMALL_REGISTER_CLASSES) \
711 { \
712 /* Never run scheduling before reload, since that can \
713 break global alloc, and generates slower code anyway due \
714 to the pressure on R0. */ \
715 /* Enable sched1 for SH4; ready queue will be reordered by \
716 the target hooks when pressure is high. We can not do this for \
717 PIC, SH3 and lower as they give spill failures for R0. */ \
718 if (!TARGET_HARD_SH4 || flag_pic) \
719 flag_schedule_insns = 0; \
720 /* ??? Current exception handling places basic block boundaries \
721 after call_insns. It causes the high pressure on R0 and gives \
722 spill failures for R0 in reload. See PR 22553 and the thread \
723 on gcc-patches \
724 <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>. */ \
725 else if (flag_exceptions) \
726 { \
727 if (flag_schedule_insns == 1) \
728 warning (0, "ignoring -fschedule-insns because of exception handling bug"); \
729 flag_schedule_insns = 0; \
730 } \
731 } \
732 \
733 if (align_loops == 0) \
734 align_loops = 1 << (TARGET_SH5 ? 3 : 2); \
735 if (align_jumps == 0) \
736 align_jumps = 1 << CACHE_LOG; \
737 else if (align_jumps < (TARGET_SHMEDIA ? 4 : 2)) \
738 align_jumps = TARGET_SHMEDIA ? 4 : 2; \
739 \
740 /* Allocation boundary (in *bytes*) for the code of a function. \
741 SH1: 32 bit alignment is faster, because instructions are always \
742 fetched as a pair from a longword boundary. \
743 SH2 .. SH5 : align to cache line start. */ \
744 if (align_functions == 0) \
745 align_functions \
746 = TARGET_SMALLCODE ? FUNCTION_BOUNDARY/8 : (1 << CACHE_LOG); \
747 /* The linker relaxation code breaks when a function contains \
748 alignments that are larger than that at the start of a \
749 compilation unit. */ \
750 if (TARGET_RELAX) \
751 { \
752 int min_align \
753 = align_loops > align_jumps ? align_loops : align_jumps; \
754 \
755 /* Also take possible .long constants / mova tables int account. */\
756 if (min_align < 4) \
757 min_align = 4; \
758 if (align_functions < min_align) \
759 align_functions = min_align; \
760 } \
761 \
762 if (sh_fixed_range_str) \
763 sh_fix_range (sh_fixed_range_str); \
764 } while (0)
765 \f
766 /* Target machine storage layout. */
767
768 /* Define this if most significant bit is lowest numbered
769 in instructions that operate on numbered bit-fields. */
770
771 #define BITS_BIG_ENDIAN 0
772
773 /* Define this if most significant byte of a word is the lowest numbered. */
774 #define BYTES_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)
775
776 /* Define this if most significant word of a multiword number is the lowest
777 numbered. */
778 #define WORDS_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)
779
780 /* Define this to set the endianness to use in libgcc2.c, which can
781 not depend on target_flags. */
782 #if defined(__LITTLE_ENDIAN__)
783 #define LIBGCC2_WORDS_BIG_ENDIAN 0
784 #else
785 #define LIBGCC2_WORDS_BIG_ENDIAN 1
786 #endif
787
788 #define MAX_BITS_PER_WORD 64
789
790 /* Width in bits of an `int'. We want just 32-bits, even if words are
791 longer. */
792 #define INT_TYPE_SIZE 32
793
794 /* Width in bits of a `long'. */
795 #define LONG_TYPE_SIZE (TARGET_SHMEDIA64 ? 64 : 32)
796
797 /* Width in bits of a `long long'. */
798 #define LONG_LONG_TYPE_SIZE 64
799
800 /* Width in bits of a `long double'. */
801 #define LONG_DOUBLE_TYPE_SIZE 64
802
803 /* Width of a word, in units (bytes). */
804 #define UNITS_PER_WORD (TARGET_SHMEDIA ? 8 : 4)
805 #define MIN_UNITS_PER_WORD 4
806
807 /* Scaling factor for Dwarf data offsets for CFI information.
808 The dwarf2out.c default would use -UNITS_PER_WORD, which is -8 for
809 SHmedia; however, since we do partial register saves for the registers
810 visible to SHcompact, and for target registers for SHMEDIA32, we have
811 to allow saves that are only 4-byte aligned. */
812 #define DWARF_CIE_DATA_ALIGNMENT -4
813
814 /* Width in bits of a pointer.
815 See also the macro `Pmode' defined below. */
816 #define POINTER_SIZE (TARGET_SHMEDIA64 ? 64 : 32)
817
818 /* Allocation boundary (in *bits*) for storing arguments in argument list. */
819 #define PARM_BOUNDARY (TARGET_SH5 ? 64 : 32)
820
821 /* Boundary (in *bits*) on which stack pointer should be aligned. */
822 #define STACK_BOUNDARY BIGGEST_ALIGNMENT
823
824 /* The log (base 2) of the cache line size, in bytes. Processors prior to
825 SH2 have no actual cache, but they fetch code in chunks of 4 bytes.
826 The SH2/3 have 16 byte cache lines, and the SH4 has a 32 byte cache line */
827 #define CACHE_LOG (TARGET_CACHE32 ? 5 : TARGET_SH2 ? 4 : 2)
828
829 /* ABI given & required minimum allocation boundary (in *bits*) for the
830 code of a function. */
831 #define FUNCTION_BOUNDARY (16 << TARGET_SHMEDIA)
832
833 /* On SH5, the lowest bit is used to indicate SHmedia functions, so
834 the vbit must go into the delta field of
835 pointers-to-member-functions. */
836 #define TARGET_PTRMEMFUNC_VBIT_LOCATION \
837 (TARGET_SH5 ? ptrmemfunc_vbit_in_delta : ptrmemfunc_vbit_in_pfn)
838
839 /* Alignment of field after `int : 0' in a structure. */
840 #define EMPTY_FIELD_BOUNDARY 32
841
842 /* No data type wants to be aligned rounder than this. */
843 #define BIGGEST_ALIGNMENT (TARGET_ALIGN_DOUBLE ? 64 : 32)
844
845 /* The best alignment to use in cases where we have a choice. */
846 #define FASTEST_ALIGNMENT (TARGET_SH5 ? 64 : 32)
847
848 /* Make strings word-aligned so strcpy from constants will be faster. */
849 #define CONSTANT_ALIGNMENT(EXP, ALIGN) \
850 ((TREE_CODE (EXP) == STRING_CST \
851 && (ALIGN) < FASTEST_ALIGNMENT) \
852 ? FASTEST_ALIGNMENT : (ALIGN))
853
854 /* get_mode_alignment assumes complex values are always held in multiple
855 registers, but that is not the case on the SH; CQImode and CHImode are
856 held in a single integer register. SH5 also holds CSImode and SCmode
857 values in integer registers. This is relevant for argument passing on
858 SHcompact as we use a stack temp in order to pass CSImode by reference. */
859 #define LOCAL_ALIGNMENT(TYPE, ALIGN) \
860 ((GET_MODE_CLASS (TYPE_MODE (TYPE)) == MODE_COMPLEX_INT \
861 || GET_MODE_CLASS (TYPE_MODE (TYPE)) == MODE_COMPLEX_FLOAT) \
862 ? (unsigned) MIN (BIGGEST_ALIGNMENT, GET_MODE_BITSIZE (TYPE_MODE (TYPE))) \
863 : (unsigned) DATA_ALIGNMENT(TYPE, ALIGN))
864
865 /* Make arrays of chars word-aligned for the same reasons. */
866 #define DATA_ALIGNMENT(TYPE, ALIGN) \
867 (TREE_CODE (TYPE) == ARRAY_TYPE \
868 && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
869 && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
870
871 /* Number of bits which any structure or union's size must be a
872 multiple of. Each structure or union's size is rounded up to a
873 multiple of this. */
874 #define STRUCTURE_SIZE_BOUNDARY (TARGET_PADSTRUCT ? 32 : 8)
875
876 /* Set this nonzero if move instructions will actually fail to work
877 when given unaligned data. */
878 #define STRICT_ALIGNMENT 1
879
880 /* If LABEL_AFTER_BARRIER demands an alignment, return its base 2 logarithm. */
881 #define LABEL_ALIGN_AFTER_BARRIER(LABEL_AFTER_BARRIER) \
882 barrier_align (LABEL_AFTER_BARRIER)
883
884 #define LOOP_ALIGN(A_LABEL) \
885 ((! optimize || TARGET_HARD_SH4 || TARGET_SMALLCODE) \
886 ? 0 : sh_loop_align (A_LABEL))
887
888 #define LABEL_ALIGN(A_LABEL) \
889 ( \
890 (PREV_INSN (A_LABEL) \
891 && GET_CODE (PREV_INSN (A_LABEL)) == INSN \
892 && GET_CODE (PATTERN (PREV_INSN (A_LABEL))) == UNSPEC_VOLATILE \
893 && XINT (PATTERN (PREV_INSN (A_LABEL)), 1) == UNSPECV_ALIGN) \
894 /* explicit alignment insn in constant tables. */ \
895 ? INTVAL (XVECEXP (PATTERN (PREV_INSN (A_LABEL)), 0, 0)) \
896 : 0)
897
898 /* Jump tables must be 32 bit aligned, no matter the size of the element. */
899 #define ADDR_VEC_ALIGN(ADDR_VEC) 2
900
901 /* The base two logarithm of the known minimum alignment of an insn length. */
902 #define INSN_LENGTH_ALIGNMENT(A_INSN) \
903 (GET_CODE (A_INSN) == INSN \
904 ? 1 << TARGET_SHMEDIA \
905 : GET_CODE (A_INSN) == JUMP_INSN || GET_CODE (A_INSN) == CALL_INSN \
906 ? 1 << TARGET_SHMEDIA \
907 : CACHE_LOG)
908 \f
909 /* Standard register usage. */
910
911 /* Register allocation for the Renesas calling convention:
912
913 r0 arg return
914 r1..r3 scratch
915 r4..r7 args in
916 r8..r13 call saved
917 r14 frame pointer/call saved
918 r15 stack pointer
919 ap arg pointer (doesn't really exist, always eliminated)
920 pr subroutine return address
921 t t bit
922 mach multiply/accumulate result, high part
923 macl multiply/accumulate result, low part.
924 fpul fp/int communication register
925 rap return address pointer register
926 fr0 fp arg return
927 fr1..fr3 scratch floating point registers
928 fr4..fr11 fp args in
929 fr12..fr15 call saved floating point registers */
930
931 #define MAX_REGISTER_NAME_LENGTH 5
932 extern char sh_register_names[][MAX_REGISTER_NAME_LENGTH + 1];
933
934 #define SH_REGISTER_NAMES_INITIALIZER \
935 { \
936 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
937 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
938 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
939 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \
940 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", \
941 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", \
942 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", \
943 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", \
944 "fr0", "fr1", "fr2", "fr3", "fr4", "fr5", "fr6", "fr7", \
945 "fr8", "fr9", "fr10", "fr11", "fr12", "fr13", "fr14", "fr15", \
946 "fr16", "fr17", "fr18", "fr19", "fr20", "fr21", "fr22", "fr23", \
947 "fr24", "fr25", "fr26", "fr27", "fr28", "fr29", "fr30", "fr31", \
948 "fr32", "fr33", "fr34", "fr35", "fr36", "fr37", "fr38", "fr39", \
949 "fr40", "fr41", "fr42", "fr43", "fr44", "fr45", "fr46", "fr47", \
950 "fr48", "fr49", "fr50", "fr51", "fr52", "fr53", "fr54", "fr55", \
951 "fr56", "fr57", "fr58", "fr59", "fr60", "fr61", "fr62", "fr63", \
952 "tr0", "tr1", "tr2", "tr3", "tr4", "tr5", "tr6", "tr7", \
953 "xd0", "xd2", "xd4", "xd6", "xd8", "xd10", "xd12", "xd14", \
954 "gbr", "ap", "pr", "t", "mach", "macl", "fpul", "fpscr", \
955 "rap", "sfp" \
956 }
957
958 #define REGNAMES_ARR_INDEX_1(index) \
959 (sh_register_names[index])
960 #define REGNAMES_ARR_INDEX_2(index) \
961 REGNAMES_ARR_INDEX_1 ((index)), REGNAMES_ARR_INDEX_1 ((index)+1)
962 #define REGNAMES_ARR_INDEX_4(index) \
963 REGNAMES_ARR_INDEX_2 ((index)), REGNAMES_ARR_INDEX_2 ((index)+2)
964 #define REGNAMES_ARR_INDEX_8(index) \
965 REGNAMES_ARR_INDEX_4 ((index)), REGNAMES_ARR_INDEX_4 ((index)+4)
966 #define REGNAMES_ARR_INDEX_16(index) \
967 REGNAMES_ARR_INDEX_8 ((index)), REGNAMES_ARR_INDEX_8 ((index)+8)
968 #define REGNAMES_ARR_INDEX_32(index) \
969 REGNAMES_ARR_INDEX_16 ((index)), REGNAMES_ARR_INDEX_16 ((index)+16)
970 #define REGNAMES_ARR_INDEX_64(index) \
971 REGNAMES_ARR_INDEX_32 ((index)), REGNAMES_ARR_INDEX_32 ((index)+32)
972
973 #define REGISTER_NAMES \
974 { \
975 REGNAMES_ARR_INDEX_64 (0), \
976 REGNAMES_ARR_INDEX_64 (64), \
977 REGNAMES_ARR_INDEX_8 (128), \
978 REGNAMES_ARR_INDEX_8 (136), \
979 REGNAMES_ARR_INDEX_8 (144), \
980 REGNAMES_ARR_INDEX_2 (152) \
981 }
982
983 #define ADDREGNAMES_SIZE 32
984 #define MAX_ADDITIONAL_REGISTER_NAME_LENGTH 4
985 extern char sh_additional_register_names[ADDREGNAMES_SIZE] \
986 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1];
987
988 #define SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER \
989 { \
990 "dr0", "dr2", "dr4", "dr6", "dr8", "dr10", "dr12", "dr14", \
991 "dr16", "dr18", "dr20", "dr22", "dr24", "dr26", "dr28", "dr30", \
992 "dr32", "dr34", "dr36", "dr38", "dr40", "dr42", "dr44", "dr46", \
993 "dr48", "dr50", "dr52", "dr54", "dr56", "dr58", "dr60", "dr62" \
994 }
995
996 #define ADDREGNAMES_REGNO(index) \
997 ((index < 32) ? (FIRST_FP_REG + (index) * 2) \
998 : (-1))
999
1000 #define ADDREGNAMES_ARR_INDEX_1(index) \
1001 { (sh_additional_register_names[index]), ADDREGNAMES_REGNO (index) }
1002 #define ADDREGNAMES_ARR_INDEX_2(index) \
1003 ADDREGNAMES_ARR_INDEX_1 ((index)), ADDREGNAMES_ARR_INDEX_1 ((index)+1)
1004 #define ADDREGNAMES_ARR_INDEX_4(index) \
1005 ADDREGNAMES_ARR_INDEX_2 ((index)), ADDREGNAMES_ARR_INDEX_2 ((index)+2)
1006 #define ADDREGNAMES_ARR_INDEX_8(index) \
1007 ADDREGNAMES_ARR_INDEX_4 ((index)), ADDREGNAMES_ARR_INDEX_4 ((index)+4)
1008 #define ADDREGNAMES_ARR_INDEX_16(index) \
1009 ADDREGNAMES_ARR_INDEX_8 ((index)), ADDREGNAMES_ARR_INDEX_8 ((index)+8)
1010 #define ADDREGNAMES_ARR_INDEX_32(index) \
1011 ADDREGNAMES_ARR_INDEX_16 ((index)), ADDREGNAMES_ARR_INDEX_16 ((index)+16)
1012
1013 #define ADDITIONAL_REGISTER_NAMES \
1014 { \
1015 ADDREGNAMES_ARR_INDEX_32 (0) \
1016 }
1017
1018 /* Number of actual hardware registers.
1019 The hardware registers are assigned numbers for the compiler
1020 from 0 to just below FIRST_PSEUDO_REGISTER.
1021 All registers that the compiler knows about must be given numbers,
1022 even those that are not normally considered general registers. */
1023
1024 /* There are many other relevant definitions in sh.md's md_constants. */
1025
1026 #define FIRST_GENERAL_REG R0_REG
1027 #define LAST_GENERAL_REG (FIRST_GENERAL_REG + (TARGET_SHMEDIA ? 63 : 15))
1028 #define FIRST_FP_REG DR0_REG
1029 #define LAST_FP_REG (FIRST_FP_REG + \
1030 (TARGET_SHMEDIA_FPU ? 63 : TARGET_SH2E ? 15 : -1))
1031 #define FIRST_XD_REG XD0_REG
1032 #define LAST_XD_REG (FIRST_XD_REG + ((TARGET_SH4 && TARGET_FMOVD) ? 7 : -1))
1033 #define FIRST_TARGET_REG TR0_REG
1034 #define LAST_TARGET_REG (FIRST_TARGET_REG + (TARGET_SHMEDIA ? 7 : -1))
1035
1036 /* Registers that can be accessed through bank0 or bank1 depending on sr.md. */
1037
1038 #define FIRST_BANKED_REG R0_REG
1039 #define LAST_BANKED_REG R7_REG
1040
1041 #define BANKED_REGISTER_P(REGNO) \
1042 IN_RANGE ((REGNO), \
1043 (unsigned HOST_WIDE_INT) FIRST_BANKED_REG, \
1044 (unsigned HOST_WIDE_INT) LAST_BANKED_REG)
1045
1046 #define GENERAL_REGISTER_P(REGNO) \
1047 IN_RANGE ((REGNO), \
1048 (unsigned HOST_WIDE_INT) FIRST_GENERAL_REG, \
1049 (unsigned HOST_WIDE_INT) LAST_GENERAL_REG)
1050
1051 #define GENERAL_OR_AP_REGISTER_P(REGNO) \
1052 (GENERAL_REGISTER_P (REGNO) || ((REGNO) == AP_REG) \
1053 || ((REGNO) == FRAME_POINTER_REGNUM))
1054
1055 #define FP_REGISTER_P(REGNO) \
1056 ((int) (REGNO) >= FIRST_FP_REG && (int) (REGNO) <= LAST_FP_REG)
1057
1058 #define XD_REGISTER_P(REGNO) \
1059 ((int) (REGNO) >= FIRST_XD_REG && (int) (REGNO) <= LAST_XD_REG)
1060
1061 #define FP_OR_XD_REGISTER_P(REGNO) \
1062 (FP_REGISTER_P (REGNO) || XD_REGISTER_P (REGNO))
1063
1064 #define FP_ANY_REGISTER_P(REGNO) \
1065 (FP_REGISTER_P (REGNO) || XD_REGISTER_P (REGNO) || (REGNO) == FPUL_REG)
1066
1067 #define SPECIAL_REGISTER_P(REGNO) \
1068 ((REGNO) == GBR_REG || (REGNO) == T_REG \
1069 || (REGNO) == MACH_REG || (REGNO) == MACL_REG)
1070
1071 #define TARGET_REGISTER_P(REGNO) \
1072 ((int) (REGNO) >= FIRST_TARGET_REG && (int) (REGNO) <= LAST_TARGET_REG)
1073
1074 #define SHMEDIA_REGISTER_P(REGNO) \
1075 (GENERAL_REGISTER_P (REGNO) || FP_REGISTER_P (REGNO) \
1076 || TARGET_REGISTER_P (REGNO))
1077
1078 /* This is to be used in CONDITIONAL_REGISTER_USAGE, to mark registers
1079 that should be fixed. */
1080 #define VALID_REGISTER_P(REGNO) \
1081 (SHMEDIA_REGISTER_P (REGNO) || XD_REGISTER_P (REGNO) \
1082 || (REGNO) == AP_REG || (REGNO) == RAP_REG \
1083 || (REGNO) == FRAME_POINTER_REGNUM \
1084 || (TARGET_SH1 && (SPECIAL_REGISTER_P (REGNO) || (REGNO) == PR_REG)) \
1085 || (TARGET_SH2E && (REGNO) == FPUL_REG))
1086
1087 /* The mode that should be generally used to store a register by
1088 itself in the stack, or to load it back. */
1089 #define REGISTER_NATURAL_MODE(REGNO) \
1090 (FP_REGISTER_P (REGNO) ? SFmode \
1091 : XD_REGISTER_P (REGNO) ? DFmode \
1092 : TARGET_SHMEDIA && ! HARD_REGNO_CALL_PART_CLOBBERED ((REGNO), DImode) \
1093 ? DImode \
1094 : SImode)
1095
1096 #define FIRST_PSEUDO_REGISTER 154
1097
1098 /* Don't count soft frame pointer. */
1099 #define DWARF_FRAME_REGISTERS (FIRST_PSEUDO_REGISTER - 1)
1100
1101 /* 1 for registers that have pervasive standard uses
1102 and are not available for the register allocator.
1103
1104 Mach register is fixed 'cause it's only 10 bits wide for SH1.
1105 It is 32 bits wide for SH2. */
1106
1107 #define FIXED_REGISTERS \
1108 { \
1109 /* Regular registers. */ \
1110 0, 0, 0, 0, 0, 0, 0, 0, \
1111 0, 0, 0, 0, 0, 0, 0, 1, \
1112 /* r16 is reserved, r18 is the former pr. */ \
1113 1, 0, 0, 0, 0, 0, 0, 0, \
1114 /* r24 is reserved for the OS; r25, for the assembler or linker. */ \
1115 /* r26 is a global variable data pointer; r27 is for constants. */ \
1116 1, 1, 1, 1, 0, 0, 0, 0, \
1117 0, 0, 0, 0, 0, 0, 0, 0, \
1118 0, 0, 0, 0, 0, 0, 0, 0, \
1119 0, 0, 0, 0, 0, 0, 0, 0, \
1120 0, 0, 0, 0, 0, 0, 0, 1, \
1121 /* FP registers. */ \
1122 0, 0, 0, 0, 0, 0, 0, 0, \
1123 0, 0, 0, 0, 0, 0, 0, 0, \
1124 0, 0, 0, 0, 0, 0, 0, 0, \
1125 0, 0, 0, 0, 0, 0, 0, 0, \
1126 0, 0, 0, 0, 0, 0, 0, 0, \
1127 0, 0, 0, 0, 0, 0, 0, 0, \
1128 0, 0, 0, 0, 0, 0, 0, 0, \
1129 0, 0, 0, 0, 0, 0, 0, 0, \
1130 /* Branch target registers. */ \
1131 0, 0, 0, 0, 0, 0, 0, 0, \
1132 /* XD registers. */ \
1133 0, 0, 0, 0, 0, 0, 0, 0, \
1134 /*"gbr", "ap", "pr", "t", "mach", "macl", "fpul", "fpscr", */ \
1135 1, 1, 1, 1, 1, 1, 0, 1, \
1136 /*"rap", "sfp" */ \
1137 1, 1, \
1138 }
1139
1140 /* 1 for registers not available across function calls.
1141 These must include the FIXED_REGISTERS and also any
1142 registers that can be used without being saved.
1143 The latter must include the registers where values are returned
1144 and the register where structure-value addresses are passed.
1145 Aside from that, you can include as many other registers as you like. */
1146
1147 #define CALL_USED_REGISTERS \
1148 { \
1149 /* Regular registers. */ \
1150 1, 1, 1, 1, 1, 1, 1, 1, \
1151 /* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs. \
1152 Only the lower 32bits of R10-R14 are guaranteed to be preserved \
1153 across SH5 function calls. */ \
1154 0, 0, 0, 0, 0, 0, 0, 1, \
1155 1, 1, 1, 1, 1, 1, 1, 1, \
1156 1, 1, 1, 1, 0, 0, 0, 0, \
1157 0, 0, 0, 0, 1, 1, 1, 1, \
1158 1, 1, 1, 1, 0, 0, 0, 0, \
1159 0, 0, 0, 0, 0, 0, 0, 0, \
1160 0, 0, 0, 0, 1, 1, 1, 1, \
1161 /* FP registers. */ \
1162 1, 1, 1, 1, 1, 1, 1, 1, \
1163 1, 1, 1, 1, 0, 0, 0, 0, \
1164 1, 1, 1, 1, 1, 1, 1, 1, \
1165 1, 1, 1, 1, 1, 1, 1, 1, \
1166 1, 1, 1, 1, 0, 0, 0, 0, \
1167 0, 0, 0, 0, 0, 0, 0, 0, \
1168 0, 0, 0, 0, 0, 0, 0, 0, \
1169 0, 0, 0, 0, 0, 0, 0, 0, \
1170 /* Branch target registers. */ \
1171 1, 1, 1, 1, 1, 0, 0, 0, \
1172 /* XD registers. */ \
1173 1, 1, 1, 1, 1, 1, 0, 0, \
1174 /*"gbr", "ap", "pr", "t", "mach", "macl", "fpul", "fpscr", */ \
1175 1, 1, 1, 1, 1, 1, 1, 1, \
1176 /*"rap", "sfp" */ \
1177 1, 1, \
1178 }
1179
1180 /* CONDITIONAL_REGISTER_USAGE might want to make a register call-used, yet
1181 fixed, like PIC_OFFSET_TABLE_REGNUM. */
1182 #define CALL_REALLY_USED_REGISTERS CALL_USED_REGISTERS
1183
1184 /* Only the lower 32-bits of R10-R14 are guaranteed to be preserved
1185 across SHcompact function calls. We can't tell whether a called
1186 function is SHmedia or SHcompact, so we assume it may be when
1187 compiling SHmedia code with the 32-bit ABI, since that's the only
1188 ABI that can be linked with SHcompact code. */
1189 #define HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE) \
1190 (TARGET_SHMEDIA32 \
1191 && GET_MODE_SIZE (MODE) > 4 \
1192 && (((REGNO) >= FIRST_GENERAL_REG + 10 \
1193 && (REGNO) <= FIRST_GENERAL_REG + 15) \
1194 || TARGET_REGISTER_P (REGNO) \
1195 || (REGNO) == PR_MEDIA_REG))
1196
1197 /* Return number of consecutive hard regs needed starting at reg REGNO
1198 to hold something of mode MODE.
1199 This is ordinarily the length in words of a value of mode MODE
1200 but can be less for certain modes in special long registers.
1201
1202 On the SH all but the XD regs are UNITS_PER_WORD bits wide. */
1203
1204 #define HARD_REGNO_NREGS(REGNO, MODE) \
1205 (XD_REGISTER_P (REGNO) \
1206 ? ((GET_MODE_SIZE (MODE) + (2*UNITS_PER_WORD - 1)) / (2*UNITS_PER_WORD)) \
1207 : (TARGET_SHMEDIA && FP_REGISTER_P (REGNO)) \
1208 ? ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD/2 - 1) / (UNITS_PER_WORD/2)) \
1209 : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
1210
1211 /* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
1212
1213 #define HARD_REGNO_MODE_OK(REGNO, MODE) \
1214 sh_hard_regno_mode_ok ((REGNO), (MODE))
1215
1216 /* Value is 1 if it is a good idea to tie two pseudo registers
1217 when one has mode MODE1 and one has mode MODE2.
1218 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
1219 for any hard reg, then this must be 0 for correct output.
1220 That's the case for xd registers: we don't hold SFmode values in
1221 them, so we can't tie an SFmode pseudos with one in another
1222 floating-point mode. */
1223
1224 #define MODES_TIEABLE_P(MODE1, MODE2) \
1225 ((MODE1) == (MODE2) \
1226 || (TARGET_SHMEDIA \
1227 && GET_MODE_SIZE (MODE1) == GET_MODE_SIZE (MODE2) \
1228 && INTEGRAL_MODE_P (MODE1) && INTEGRAL_MODE_P (MODE2)) \
1229 || (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2) \
1230 && (TARGET_SHMEDIA ? ((GET_MODE_SIZE (MODE1) <= 4) \
1231 && (GET_MODE_SIZE (MODE2) <= 4)) \
1232 : ((MODE1) != SFmode && (MODE2) != SFmode))))
1233
1234 /* A C expression that is nonzero if hard register NEW_REG can be
1235 considered for use as a rename register for OLD_REG register */
1236
1237 #define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \
1238 sh_hard_regno_rename_ok (OLD_REG, NEW_REG)
1239
1240 /* Specify the registers used for certain standard purposes.
1241 The values of these macros are register numbers. */
1242
1243 /* Define this if the program counter is overloaded on a register. */
1244 /* #define PC_REGNUM 15*/
1245
1246 /* Register to use for pushing function arguments. */
1247 #define STACK_POINTER_REGNUM SP_REG
1248
1249 /* Base register for access to local variables of the function. */
1250 #define HARD_FRAME_POINTER_REGNUM FP_REG
1251
1252 /* Base register for access to local variables of the function. */
1253 #define FRAME_POINTER_REGNUM 153
1254
1255 /* Fake register that holds the address on the stack of the
1256 current function's return address. */
1257 #define RETURN_ADDRESS_POINTER_REGNUM RAP_REG
1258
1259 /* Register to hold the addressing base for position independent
1260 code access to data items. */
1261 #define PIC_OFFSET_TABLE_REGNUM (flag_pic ? PIC_REG : INVALID_REGNUM)
1262
1263 #define GOT_SYMBOL_NAME "*_GLOBAL_OFFSET_TABLE_"
1264
1265 /* Value should be nonzero if functions must have frame pointers.
1266 Zero means the frame pointer need not be set up (and parms may be accessed
1267 via the stack pointer) in functions that seem suitable. */
1268
1269 #define FRAME_POINTER_REQUIRED 0
1270
1271 /* Definitions for register eliminations.
1272
1273 We have three registers that can be eliminated on the SH. First, the
1274 frame pointer register can often be eliminated in favor of the stack
1275 pointer register. Secondly, the argument pointer register can always be
1276 eliminated; it is replaced with either the stack or frame pointer.
1277 Third, there is the return address pointer, which can also be replaced
1278 with either the stack or the frame pointer. */
1279
1280 /* This is an array of structures. Each structure initializes one pair
1281 of eliminable registers. The "from" register number is given first,
1282 followed by "to". Eliminations of the same "from" register are listed
1283 in order of preference. */
1284
1285 /* If you add any registers here that are not actually hard registers,
1286 and that have any alternative of elimination that doesn't always
1287 apply, you need to amend calc_live_regs to exclude it, because
1288 reload spills all eliminable registers where it sees an
1289 can_eliminate == 0 entry, thus making them 'live' .
1290 If you add any hard registers that can be eliminated in different
1291 ways, you have to patch reload to spill them only when all alternatives
1292 of elimination fail. */
1293
1294 #define ELIMINABLE_REGS \
1295 {{ HARD_FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1296 { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1297 { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
1298 { RETURN_ADDRESS_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1299 { RETURN_ADDRESS_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
1300 { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
1301 { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM},}
1302
1303 /* Given FROM and TO register numbers, say whether this elimination
1304 is allowed. */
1305 #define CAN_ELIMINATE(FROM, TO) \
1306 (!((FROM) == HARD_FRAME_POINTER_REGNUM && FRAME_POINTER_REQUIRED))
1307
1308 /* Define the offset between two registers, one to be eliminated, and the other
1309 its replacement, at the start of a routine. */
1310
1311 #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
1312 OFFSET = initial_elimination_offset ((FROM), (TO))
1313
1314 /* Base register for access to arguments of the function. */
1315 #define ARG_POINTER_REGNUM AP_REG
1316
1317 /* Register in which the static-chain is passed to a function. */
1318 #define STATIC_CHAIN_REGNUM (TARGET_SH5 ? 1 : 3)
1319
1320 /* Don't default to pcc-struct-return, because we have already specified
1321 exactly how to return structures in the TARGET_RETURN_IN_MEMORY
1322 target hook. */
1323
1324 #define DEFAULT_PCC_STRUCT_RETURN 0
1325
1326 #define SHMEDIA_REGS_STACK_ADJUST() \
1327 (TARGET_SHCOMPACT && crtl->saves_all_registers \
1328 ? (8 * (/* r28-r35 */ 8 + /* r44-r59 */ 16 + /* tr5-tr7 */ 3) \
1329 + (TARGET_FPU_ANY ? 4 * (/* fr36 - fr63 */ 28) : 0)) \
1330 : 0)
1331
1332 \f
1333 /* Define the classes of registers for register constraints in the
1334 machine description. Also define ranges of constants.
1335
1336 One of the classes must always be named ALL_REGS and include all hard regs.
1337 If there is more than one class, another class must be named NO_REGS
1338 and contain no registers.
1339
1340 The name GENERAL_REGS must be the name of a class (or an alias for
1341 another name such as ALL_REGS). This is the class of registers
1342 that is allowed by "g" or "r" in a register constraint.
1343 Also, registers outside this class are allocated only when
1344 instructions express preferences for them.
1345
1346 The classes must be numbered in nondecreasing order; that is,
1347 a larger-numbered class must never be contained completely
1348 in a smaller-numbered class.
1349
1350 For any two classes, it is very desirable that there be another
1351 class that represents their union. */
1352
1353 /* The SH has two sorts of general registers, R0 and the rest. R0 can
1354 be used as the destination of some of the arithmetic ops. There are
1355 also some special purpose registers; the T bit register, the
1356 Procedure Return Register and the Multiply Accumulate Registers. */
1357 /* Place GENERAL_REGS after FPUL_REGS so that it will be preferred by
1358 reg_class_subunion. We don't want to have an actual union class
1359 of these, because it would only be used when both classes are calculated
1360 to give the same cost, but there is only one FPUL register.
1361 Besides, regclass fails to notice the different REGISTER_MOVE_COSTS
1362 applying to the actual instruction alternative considered. E.g., the
1363 y/r alternative of movsi_ie is considered to have no more cost that
1364 the r/r alternative, which is patently untrue. */
1365
1366 enum reg_class
1367 {
1368 NO_REGS,
1369 R0_REGS,
1370 PR_REGS,
1371 T_REGS,
1372 MAC_REGS,
1373 FPUL_REGS,
1374 SIBCALL_REGS,
1375 GENERAL_REGS,
1376 FP0_REGS,
1377 FP_REGS,
1378 DF_HI_REGS,
1379 DF_REGS,
1380 FPSCR_REGS,
1381 GENERAL_FP_REGS,
1382 GENERAL_DF_REGS,
1383 TARGET_REGS,
1384 ALL_REGS,
1385 LIM_REG_CLASSES
1386 };
1387
1388 #define N_REG_CLASSES (int) LIM_REG_CLASSES
1389
1390 /* Give names of register classes as strings for dump file. */
1391 #define REG_CLASS_NAMES \
1392 { \
1393 "NO_REGS", \
1394 "R0_REGS", \
1395 "PR_REGS", \
1396 "T_REGS", \
1397 "MAC_REGS", \
1398 "FPUL_REGS", \
1399 "SIBCALL_REGS", \
1400 "GENERAL_REGS", \
1401 "FP0_REGS", \
1402 "FP_REGS", \
1403 "DF_HI_REGS", \
1404 "DF_REGS", \
1405 "FPSCR_REGS", \
1406 "GENERAL_FP_REGS", \
1407 "GENERAL_DF_REGS", \
1408 "TARGET_REGS", \
1409 "ALL_REGS", \
1410 }
1411
1412 /* Define which registers fit in which classes.
1413 This is an initializer for a vector of HARD_REG_SET
1414 of length N_REG_CLASSES. */
1415
1416 #define REG_CLASS_CONTENTS \
1417 { \
1418 /* NO_REGS: */ \
1419 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, \
1420 /* R0_REGS: */ \
1421 { 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, \
1422 /* PR_REGS: */ \
1423 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00040000 }, \
1424 /* T_REGS: */ \
1425 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00080000 }, \
1426 /* MAC_REGS: */ \
1427 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00300000 }, \
1428 /* FPUL_REGS: */ \
1429 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00400000 }, \
1430 /* SIBCALL_REGS: Initialized in CONDITIONAL_REGISTER_USAGE. */ \
1431 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, \
1432 /* GENERAL_REGS: */ \
1433 { 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x03020000 }, \
1434 /* FP0_REGS: */ \
1435 { 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0x00000000 }, \
1436 /* FP_REGS: */ \
1437 { 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000 }, \
1438 /* DF_HI_REGS: Initialized in CONDITIONAL_REGISTER_USAGE. */ \
1439 { 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ff00 }, \
1440 /* DF_REGS: */ \
1441 { 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ff00 }, \
1442 /* FPSCR_REGS: */ \
1443 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, \
1444 /* GENERAL_FP_REGS: */ \
1445 { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x03020000 }, \
1446 /* GENERAL_DF_REGS: */ \
1447 { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x0302ff00 }, \
1448 /* TARGET_REGS: */ \
1449 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000ff }, \
1450 /* ALL_REGS: */ \
1451 { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x03ffffff }, \
1452 }
1453
1454 /* The same information, inverted:
1455 Return the class number of the smallest class containing
1456 reg number REGNO. This could be a conditional expression
1457 or could index an array. */
1458
1459 extern enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER];
1460 #define REGNO_REG_CLASS(REGNO) regno_reg_class[(REGNO)]
1461
1462 /* The following macro defines cover classes for Integrated Register
1463 Allocator. Cover classes is a set of non-intersected register
1464 classes covering all hard registers used for register allocation
1465 purpose. Any move between two registers of a cover class should be
1466 cheaper than load or store of the registers. The macro value is
1467 array of register classes with LIM_REG_CLASSES used as the end
1468 marker. */
1469
1470 #define IRA_COVER_CLASSES \
1471 { \
1472 GENERAL_REGS, FP_REGS, PR_REGS, T_REGS, MAC_REGS, TARGET_REGS, \
1473 FPUL_REGS, LIM_REG_CLASSES \
1474 }
1475
1476 /* When defined, the compiler allows registers explicitly used in the
1477 rtl to be used as spill registers but prevents the compiler from
1478 extending the lifetime of these registers. */
1479
1480 #define SMALL_REGISTER_CLASSES (! TARGET_SHMEDIA)
1481
1482 /* The order in which register should be allocated. */
1483 /* Sometimes FP0_REGS becomes the preferred class of a floating point pseudo,
1484 and GENERAL_FP_REGS the alternate class. Since FP0 is likely to be
1485 spilled or used otherwise, we better have the FP_REGS allocated first. */
1486 #define REG_ALLOC_ORDER \
1487 {/* Caller-saved FPRs */ \
1488 65, 66, 67, 68, 69, 70, 71, 64, \
1489 72, 73, 74, 75, 80, 81, 82, 83, \
1490 84, 85, 86, 87, 88, 89, 90, 91, \
1491 92, 93, 94, 95, 96, 97, 98, 99, \
1492 /* Callee-saved FPRs */ \
1493 76, 77, 78, 79,100,101,102,103, \
1494 104,105,106,107,108,109,110,111, \
1495 112,113,114,115,116,117,118,119, \
1496 120,121,122,123,124,125,126,127, \
1497 136,137,138,139,140,141,142,143, \
1498 /* FPSCR */ 151, \
1499 /* Caller-saved GPRs (except 8/9 on SH1-4) */ \
1500 1, 2, 3, 7, 6, 5, 4, 0, \
1501 8, 9, 17, 19, 20, 21, 22, 23, \
1502 36, 37, 38, 39, 40, 41, 42, 43, \
1503 60, 61, 62, \
1504 /* SH1-4 callee-saved saved GPRs / SH5 partially-saved GPRs */ \
1505 10, 11, 12, 13, 14, 18, \
1506 /* SH5 callee-saved GPRs */ \
1507 28, 29, 30, 31, 32, 33, 34, 35, \
1508 44, 45, 46, 47, 48, 49, 50, 51, \
1509 52, 53, 54, 55, 56, 57, 58, 59, \
1510 /* FPUL */ 150, \
1511 /* SH5 branch target registers */ \
1512 128,129,130,131,132,133,134,135, \
1513 /* Fixed registers */ \
1514 15, 16, 24, 25, 26, 27, 63,144, \
1515 145,146,147,148,149,152,153 }
1516
1517 /* The class value for index registers, and the one for base regs. */
1518 #define INDEX_REG_CLASS \
1519 (!ALLOW_INDEXED_ADDRESS ? NO_REGS : TARGET_SHMEDIA ? GENERAL_REGS : R0_REGS)
1520 #define BASE_REG_CLASS GENERAL_REGS
1521 \f
1522 /* Defines for sh.md and constraints.md. */
1523
1524 #define CONST_OK_FOR_I06(VALUE) (((HOST_WIDE_INT)(VALUE)) >= -32 \
1525 && ((HOST_WIDE_INT)(VALUE)) <= 31)
1526 #define CONST_OK_FOR_I08(VALUE) (((HOST_WIDE_INT)(VALUE))>= -128 \
1527 && ((HOST_WIDE_INT)(VALUE)) <= 127)
1528 #define CONST_OK_FOR_I10(VALUE) (((HOST_WIDE_INT)(VALUE)) >= -512 \
1529 && ((HOST_WIDE_INT)(VALUE)) <= 511)
1530 #define CONST_OK_FOR_I16(VALUE) (((HOST_WIDE_INT)(VALUE)) >= -32768 \
1531 && ((HOST_WIDE_INT)(VALUE)) <= 32767)
1532
1533 #define CONST_OK_FOR_J16(VALUE) \
1534 ((HOST_BITS_PER_WIDE_INT >= 64 && (VALUE) == (HOST_WIDE_INT) 0xffffffff) \
1535 || (HOST_BITS_PER_WIDE_INT >= 64 && (VALUE) == (HOST_WIDE_INT) -1 << 32))
1536
1537 #define CONST_OK_FOR_K08(VALUE) (((HOST_WIDE_INT)(VALUE))>= 0 \
1538 && ((HOST_WIDE_INT)(VALUE)) <= 255)
1539
1540 /* Given an rtx X being reloaded into a reg required to be
1541 in class CLASS, return the class of reg to actually use.
1542 In general this is just CLASS; but on some machines
1543 in some cases it is preferable to use a more restrictive class. */
1544
1545 #define PREFERRED_RELOAD_CLASS(X, CLASS) \
1546 ((CLASS) == NO_REGS && TARGET_SHMEDIA \
1547 && (GET_CODE (X) == CONST_DOUBLE \
1548 || GET_CODE (X) == SYMBOL_REF \
1549 || PIC_ADDR_P (X)) \
1550 ? GENERAL_REGS \
1551 : (CLASS)) \
1552
1553 #if 0
1554 #define SECONDARY_INOUT_RELOAD_CLASS(CLASS,MODE,X,ELSE) \
1555 ((((REGCLASS_HAS_FP_REG (CLASS) \
1556 && (GET_CODE (X) == REG \
1557 && (GENERAL_OR_AP_REGISTER_P (REGNO (X)) \
1558 || (FP_REGISTER_P (REGNO (X)) && (MODE) == SImode \
1559 && TARGET_FMOVD)))) \
1560 || (REGCLASS_HAS_GENERAL_REG (CLASS) \
1561 && GET_CODE (X) == REG \
1562 && FP_REGISTER_P (REGNO (X)))) \
1563 && ! TARGET_SHMEDIA \
1564 && ((MODE) == SFmode || (MODE) == SImode)) \
1565 ? FPUL_REGS \
1566 : (((CLASS) == FPUL_REGS \
1567 || (REGCLASS_HAS_FP_REG (CLASS) \
1568 && ! TARGET_SHMEDIA && MODE == SImode)) \
1569 && (GET_CODE (X) == MEM \
1570 || (GET_CODE (X) == REG \
1571 && (REGNO (X) >= FIRST_PSEUDO_REGISTER \
1572 || REGNO (X) == T_REG \
1573 || system_reg_operand (X, VOIDmode))))) \
1574 ? GENERAL_REGS \
1575 : (((CLASS) == TARGET_REGS \
1576 || (TARGET_SHMEDIA && (CLASS) == SIBCALL_REGS)) \
1577 && !satisfies_constraint_Csy (X) \
1578 && (GET_CODE (X) != REG || ! GENERAL_REGISTER_P (REGNO (X)))) \
1579 ? GENERAL_REGS \
1580 : (((CLASS) == MAC_REGS || (CLASS) == PR_REGS) \
1581 && GET_CODE (X) == REG && ! GENERAL_REGISTER_P (REGNO (X)) \
1582 && (CLASS) != REGNO_REG_CLASS (REGNO (X))) \
1583 ? GENERAL_REGS \
1584 : ((CLASS) != GENERAL_REGS && GET_CODE (X) == REG \
1585 && TARGET_REGISTER_P (REGNO (X))) \
1586 ? GENERAL_REGS : (ELSE))
1587
1588 #define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
1589 SECONDARY_INOUT_RELOAD_CLASS(CLASS,MODE,X,NO_REGS)
1590
1591 #define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
1592 ((REGCLASS_HAS_FP_REG (CLASS) \
1593 && ! TARGET_SHMEDIA \
1594 && immediate_operand ((X), (MODE)) \
1595 && ! ((fp_zero_operand (X) || fp_one_operand (X)) \
1596 && (MODE) == SFmode && fldi_ok ())) \
1597 ? R0_REGS \
1598 : ((CLASS) == FPUL_REGS \
1599 && ((GET_CODE (X) == REG \
1600 && (REGNO (X) == MACL_REG || REGNO (X) == MACH_REG \
1601 || REGNO (X) == T_REG)) \
1602 || GET_CODE (X) == PLUS)) \
1603 ? GENERAL_REGS \
1604 : (CLASS) == FPUL_REGS && immediate_operand ((X), (MODE)) \
1605 ? (satisfies_constraint_I08 (X) \
1606 ? GENERAL_REGS \
1607 : R0_REGS) \
1608 : ((CLASS) == FPSCR_REGS \
1609 && ((GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER) \
1610 || (GET_CODE (X) == MEM && GET_CODE (XEXP ((X), 0)) == PLUS)))\
1611 ? GENERAL_REGS \
1612 : (REGCLASS_HAS_FP_REG (CLASS) \
1613 && TARGET_SHMEDIA \
1614 && immediate_operand ((X), (MODE)) \
1615 && (X) != CONST0_RTX (GET_MODE (X)) \
1616 && GET_MODE (X) != V4SFmode) \
1617 ? GENERAL_REGS \
1618 : (((MODE) == QImode || (MODE) == HImode) \
1619 && TARGET_SHMEDIA && inqhi_operand ((X), (MODE))) \
1620 ? GENERAL_REGS \
1621 : (TARGET_SHMEDIA && (CLASS) == GENERAL_REGS \
1622 && (GET_CODE (X) == LABEL_REF || PIC_ADDR_P (X))) \
1623 ? TARGET_REGS \
1624 : SECONDARY_INOUT_RELOAD_CLASS((CLASS),(MODE),(X), NO_REGS))
1625 #endif
1626
1627 /* Return the maximum number of consecutive registers
1628 needed to represent mode MODE in a register of class CLASS.
1629
1630 If TARGET_SHMEDIA, we need two FP registers per word.
1631 Otherwise we will need at most one register per word. */
1632 #define CLASS_MAX_NREGS(CLASS, MODE) \
1633 (TARGET_SHMEDIA \
1634 && TEST_HARD_REG_BIT (reg_class_contents[CLASS], FIRST_FP_REG) \
1635 ? (GET_MODE_SIZE (MODE) + UNITS_PER_WORD/2 - 1) / (UNITS_PER_WORD/2) \
1636 : (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1637
1638 /* If defined, gives a class of registers that cannot be used as the
1639 operand of a SUBREG that changes the mode of the object illegally. */
1640 /* ??? We need to renumber the internal numbers for the frnn registers
1641 when in little endian in order to allow mode size changes. */
1642
1643 #define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
1644 sh_cannot_change_mode_class (FROM, TO, CLASS)
1645 \f
1646 /* Stack layout; function entry, exit and calling. */
1647
1648 /* Define the number of registers that can hold parameters.
1649 These macros are used only in other macro definitions below. */
1650
1651 #define NPARM_REGS(MODE) \
1652 (TARGET_FPU_ANY && (MODE) == SFmode \
1653 ? (TARGET_SH5 ? 12 : 8) \
1654 : (TARGET_SH4 || TARGET_SH2A_DOUBLE) && (GET_MODE_CLASS (MODE) == MODE_FLOAT \
1655 || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT) \
1656 ? (TARGET_SH5 ? 12 : 8) \
1657 : (TARGET_SH5 ? 8 : 4))
1658
1659 #define FIRST_PARM_REG (FIRST_GENERAL_REG + (TARGET_SH5 ? 2 : 4))
1660 #define FIRST_RET_REG (FIRST_GENERAL_REG + (TARGET_SH5 ? 2 : 0))
1661
1662 #define FIRST_FP_PARM_REG (FIRST_FP_REG + (TARGET_SH5 ? 0 : 4))
1663 #define FIRST_FP_RET_REG FIRST_FP_REG
1664
1665 /* Define this if pushing a word on the stack
1666 makes the stack pointer a smaller address. */
1667 #define STACK_GROWS_DOWNWARD
1668
1669 /* Define this macro to nonzero if the addresses of local variable slots
1670 are at negative offsets from the frame pointer. */
1671 #define FRAME_GROWS_DOWNWARD 1
1672
1673 /* Offset from the frame pointer to the first local variable slot to
1674 be allocated. */
1675 #define STARTING_FRAME_OFFSET 0
1676
1677 /* If we generate an insn to push BYTES bytes,
1678 this says how many the stack pointer really advances by. */
1679 /* Don't define PUSH_ROUNDING, since the hardware doesn't do this.
1680 When PUSH_ROUNDING is not defined, PARM_BOUNDARY will cause gcc to
1681 do correct alignment. */
1682 #if 0
1683 #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3)
1684 #endif
1685
1686 /* Offset of first parameter from the argument pointer register value. */
1687 #define FIRST_PARM_OFFSET(FNDECL) 0
1688
1689 /* Value is the number of byte of arguments automatically
1690 popped when returning from a subroutine call.
1691 FUNDECL is the declaration node of the function (as a tree),
1692 FUNTYPE is the data type of the function (as a tree),
1693 or for a library call it is an identifier node for the subroutine name.
1694 SIZE is the number of bytes of arguments passed on the stack.
1695
1696 On the SH, the caller does not pop any of its arguments that were passed
1697 on the stack. */
1698 #define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
1699
1700 /* Value is the number of bytes of arguments automatically popped when
1701 calling a subroutine.
1702 CUM is the accumulated argument list.
1703
1704 On SHcompact, the call trampoline pops arguments off the stack. */
1705 #define CALL_POPS_ARGS(CUM) (TARGET_SHCOMPACT ? (CUM).stack_regs * 8 : 0)
1706
1707 /* Some subroutine macros specific to this machine. */
1708
1709 #define BASE_RETURN_VALUE_REG(MODE) \
1710 ((TARGET_FPU_ANY && ((MODE) == SFmode)) \
1711 ? FIRST_FP_RET_REG \
1712 : TARGET_FPU_ANY && (MODE) == SCmode \
1713 ? FIRST_FP_RET_REG \
1714 : (TARGET_FPU_DOUBLE \
1715 && ((MODE) == DFmode || (MODE) == SFmode \
1716 || (MODE) == DCmode || (MODE) == SCmode )) \
1717 ? FIRST_FP_RET_REG \
1718 : FIRST_RET_REG)
1719
1720 #define BASE_ARG_REG(MODE) \
1721 ((TARGET_SH2E && ((MODE) == SFmode)) \
1722 ? FIRST_FP_PARM_REG \
1723 : (TARGET_SH4 || TARGET_SH2A_DOUBLE) && (GET_MODE_CLASS (MODE) == MODE_FLOAT \
1724 || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT)\
1725 ? FIRST_FP_PARM_REG \
1726 : FIRST_PARM_REG)
1727
1728 /* Define how to find the value returned by a function.
1729 VALTYPE is the data type of the value (as a tree).
1730 If the precise function being called is known, FUNC is its FUNCTION_DECL;
1731 otherwise, FUNC is 0.
1732 For the SH, this is like LIBCALL_VALUE, except that we must change the
1733 mode like PROMOTE_MODE does.
1734 ??? PROMOTE_MODE is ignored for non-scalar types. The set of types
1735 tested here has to be kept in sync with the one in explow.c:promote_mode. */
1736
1737 #define FUNCTION_VALUE(VALTYPE, FUNC) \
1738 gen_rtx_REG ( \
1739 ((GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_INT \
1740 && GET_MODE_SIZE (TYPE_MODE (VALTYPE)) < 4 \
1741 && (TREE_CODE (VALTYPE) == INTEGER_TYPE \
1742 || TREE_CODE (VALTYPE) == ENUMERAL_TYPE \
1743 || TREE_CODE (VALTYPE) == BOOLEAN_TYPE \
1744 || TREE_CODE (VALTYPE) == REAL_TYPE \
1745 || TREE_CODE (VALTYPE) == OFFSET_TYPE)) \
1746 && sh_promote_prototypes (FUNC) \
1747 ? (TARGET_SHMEDIA64 ? DImode : SImode) : TYPE_MODE (VALTYPE)), \
1748 BASE_RETURN_VALUE_REG (TYPE_MODE (VALTYPE)))
1749
1750 /* Define how to find the value returned by a library function
1751 assuming the value has mode MODE. */
1752 #define LIBCALL_VALUE(MODE) \
1753 gen_rtx_REG ((MODE), BASE_RETURN_VALUE_REG (MODE));
1754
1755 /* 1 if N is a possible register number for a function value. */
1756 #define FUNCTION_VALUE_REGNO_P(REGNO) \
1757 ((REGNO) == FIRST_RET_REG || (TARGET_SH2E && (REGNO) == FIRST_FP_RET_REG) \
1758 || (TARGET_SHMEDIA_FPU && (REGNO) == FIRST_FP_RET_REG))
1759
1760 /* 1 if N is a possible register number for function argument passing. */
1761 /* ??? There are some callers that pass REGNO as int, and others that pass
1762 it as unsigned. We get warnings unless we do casts everywhere. */
1763 #define FUNCTION_ARG_REGNO_P(REGNO) \
1764 (((unsigned) (REGNO) >= (unsigned) FIRST_PARM_REG \
1765 && (unsigned) (REGNO) < (unsigned) (FIRST_PARM_REG + NPARM_REGS (SImode)))\
1766 || (TARGET_FPU_ANY \
1767 && (unsigned) (REGNO) >= (unsigned) FIRST_FP_PARM_REG \
1768 && (unsigned) (REGNO) < (unsigned) (FIRST_FP_PARM_REG \
1769 + NPARM_REGS (SFmode))))
1770 \f
1771 /* Define a data type for recording info about an argument list
1772 during the scan of that argument list. This data type should
1773 hold all necessary information about the function itself
1774 and about the args processed so far, enough to enable macros
1775 such as FUNCTION_ARG to determine where the next arg should go.
1776
1777 On SH, this is a single integer, which is a number of words
1778 of arguments scanned so far (including the invisible argument,
1779 if any, which holds the structure-value-address).
1780 Thus NARGREGS or more means all following args should go on the stack. */
1781
1782 enum sh_arg_class { SH_ARG_INT = 0, SH_ARG_FLOAT = 1 };
1783 struct sh_args {
1784 int arg_count[2];
1785 int force_mem;
1786 /* Nonzero if a prototype is available for the function. */
1787 int prototype_p;
1788 /* The number of an odd floating-point register, that should be used
1789 for the next argument of type float. */
1790 int free_single_fp_reg;
1791 /* Whether we're processing an outgoing function call. */
1792 int outgoing;
1793 /* The number of general-purpose registers that should have been
1794 used to pass partial arguments, that are passed totally on the
1795 stack. On SHcompact, a call trampoline will pop them off the
1796 stack before calling the actual function, and, if the called
1797 function is implemented in SHcompact mode, the incoming arguments
1798 decoder will push such arguments back onto the stack. For
1799 incoming arguments, STACK_REGS also takes into account other
1800 arguments passed by reference, that the decoder will also push
1801 onto the stack. */
1802 int stack_regs;
1803 /* The number of general-purpose registers that should have been
1804 used to pass arguments, if the arguments didn't have to be passed
1805 by reference. */
1806 int byref_regs;
1807 /* Set as by shcompact_byref if the current argument is to be passed
1808 by reference. */
1809 int byref;
1810
1811 /* call_cookie is a bitmask used by call expanders, as well as
1812 function prologue and epilogues, to allow SHcompact to comply
1813 with the SH5 32-bit ABI, that requires 64-bit registers to be
1814 used even though only the lower 32-bit half is visible in
1815 SHcompact mode. The strategy is to call SHmedia trampolines.
1816
1817 The alternatives for each of the argument-passing registers are
1818 (a) leave it unchanged; (b) pop it off the stack; (c) load its
1819 contents from the address in it; (d) add 8 to it, storing the
1820 result in the next register, then (c); (e) copy it from some
1821 floating-point register,
1822
1823 Regarding copies from floating-point registers, r2 may only be
1824 copied from dr0. r3 may be copied from dr0 or dr2. r4 maybe
1825 copied from dr0, dr2 or dr4. r5 maybe copied from dr0, dr2,
1826 dr4 or dr6. r6 may be copied from dr0, dr2, dr4, dr6 or dr8.
1827 r7 through to r9 may be copied from dr0, dr2, dr4, dr8, dr8 or
1828 dr10.
1829
1830 The bit mask is structured as follows:
1831
1832 - 1 bit to tell whether to set up a return trampoline.
1833
1834 - 3 bits to count the number consecutive registers to pop off the
1835 stack.
1836
1837 - 4 bits for each of r9, r8, r7 and r6.
1838
1839 - 3 bits for each of r5, r4, r3 and r2.
1840
1841 - 3 bits set to 0 (the most significant ones)
1842
1843 3 2 1 0
1844 1098 7654 3210 9876 5432 1098 7654 3210
1845 FLPF LPFL PFLP FFLP FFLP FFLP FFLP SSST
1846 2223 3344 4555 6666 7777 8888 9999 SSS-
1847
1848 - If F is set, the register must be copied from an FP register,
1849 whose number is encoded in the remaining bits.
1850
1851 - Else, if L is set, the register must be loaded from the address
1852 contained in it. If the P bit is *not* set, the address of the
1853 following dword should be computed first, and stored in the
1854 following register.
1855
1856 - Else, if P is set, the register alone should be popped off the
1857 stack.
1858
1859 - After all this processing, the number of registers represented
1860 in SSS will be popped off the stack. This is an optimization
1861 for pushing/popping consecutive registers, typically used for
1862 varargs and large arguments partially passed in registers.
1863
1864 - If T is set, a return trampoline will be set up for 64-bit
1865 return values to be split into 2 32-bit registers. */
1866 long call_cookie;
1867
1868 /* This is set to nonzero when the call in question must use the Renesas ABI,
1869 even without the -mrenesas option. */
1870 int renesas_abi;
1871 };
1872
1873 #define CALL_COOKIE_RET_TRAMP_SHIFT 0
1874 #define CALL_COOKIE_RET_TRAMP(VAL) ((VAL) << CALL_COOKIE_RET_TRAMP_SHIFT)
1875 #define CALL_COOKIE_STACKSEQ_SHIFT 1
1876 #define CALL_COOKIE_STACKSEQ(VAL) ((VAL) << CALL_COOKIE_STACKSEQ_SHIFT)
1877 #define CALL_COOKIE_STACKSEQ_GET(COOKIE) \
1878 (((COOKIE) >> CALL_COOKIE_STACKSEQ_SHIFT) & 7)
1879 #define CALL_COOKIE_INT_REG_SHIFT(REG) \
1880 (4 * (7 - (REG)) + (((REG) <= 2) ? ((REG) - 2) : 1) + 3)
1881 #define CALL_COOKIE_INT_REG(REG, VAL) \
1882 ((VAL) << CALL_COOKIE_INT_REG_SHIFT (REG))
1883 #define CALL_COOKIE_INT_REG_GET(COOKIE, REG) \
1884 (((COOKIE) >> CALL_COOKIE_INT_REG_SHIFT (REG)) & ((REG) < 4 ? 7 : 15))
1885
1886 #define CUMULATIVE_ARGS struct sh_args
1887
1888 #define GET_SH_ARG_CLASS(MODE) \
1889 ((TARGET_FPU_ANY && (MODE) == SFmode) \
1890 ? SH_ARG_FLOAT \
1891 /* There's no mention of complex float types in the SH5 ABI, so we
1892 should presumably handle them as aggregate types. */ \
1893 : TARGET_SH5 && GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT \
1894 ? SH_ARG_INT \
1895 : TARGET_FPU_DOUBLE && (GET_MODE_CLASS (MODE) == MODE_FLOAT \
1896 || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT) \
1897 ? SH_ARG_FLOAT : SH_ARG_INT)
1898
1899 #define ROUND_ADVANCE(SIZE) \
1900 (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1901
1902 /* Round a register number up to a proper boundary for an arg of mode
1903 MODE.
1904
1905 The SH doesn't care about double alignment, so we only
1906 round doubles to even regs when asked to explicitly. */
1907
1908 #define ROUND_REG(CUM, MODE) \
1909 (((TARGET_ALIGN_DOUBLE \
1910 || ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && ((MODE) == DFmode || (MODE) == DCmode) \
1911 && (CUM).arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (MODE)))\
1912 && GET_MODE_UNIT_SIZE ((MODE)) > UNITS_PER_WORD) \
1913 ? ((CUM).arg_count[(int) GET_SH_ARG_CLASS (MODE)] \
1914 + ((CUM).arg_count[(int) GET_SH_ARG_CLASS (MODE)] & 1)) \
1915 : (CUM).arg_count[(int) GET_SH_ARG_CLASS (MODE)])
1916
1917 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1918 for a call to a function whose data type is FNTYPE.
1919 For a library call, FNTYPE is 0.
1920
1921 On SH, the offset always starts at 0: the first parm reg is always
1922 the same reg for a given argument class.
1923
1924 For TARGET_HITACHI, the structure value pointer is passed in memory. */
1925
1926 #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
1927 sh_init_cumulative_args (& (CUM), (FNTYPE), (LIBNAME), (FNDECL), (N_NAMED_ARGS), VOIDmode)
1928
1929 #define INIT_CUMULATIVE_LIBCALL_ARGS(CUM, MODE, LIBNAME) \
1930 sh_init_cumulative_args (& (CUM), NULL_TREE, (LIBNAME), NULL_TREE, 0, (MODE))
1931
1932 #define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
1933 sh_function_arg_advance (&(CUM), (MODE), (TYPE), (NAMED))
1934 #define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
1935 sh_function_arg (&(CUM), (MODE), (TYPE), (NAMED))
1936
1937 /* Return boolean indicating arg of mode MODE will be passed in a reg.
1938 This macro is only used in this file. */
1939
1940 #define PASS_IN_REG_P(CUM, MODE, TYPE) \
1941 (((TYPE) == 0 \
1942 || (! TREE_ADDRESSABLE ((tree)(TYPE)) \
1943 && (! (TARGET_HITACHI || (CUM).renesas_abi) \
1944 || ! (AGGREGATE_TYPE_P (TYPE) \
1945 || (!TARGET_FPU_ANY \
1946 && (GET_MODE_CLASS (MODE) == MODE_FLOAT \
1947 && GET_MODE_SIZE (MODE) > GET_MODE_SIZE (SFmode))))))) \
1948 && ! (CUM).force_mem \
1949 && (TARGET_SH2E \
1950 ? ((MODE) == BLKmode \
1951 ? (((CUM).arg_count[(int) SH_ARG_INT] * UNITS_PER_WORD \
1952 + int_size_in_bytes (TYPE)) \
1953 <= NPARM_REGS (SImode) * UNITS_PER_WORD) \
1954 : ((ROUND_REG((CUM), (MODE)) \
1955 + HARD_REGNO_NREGS (BASE_ARG_REG (MODE), (MODE))) \
1956 <= NPARM_REGS (MODE))) \
1957 : ROUND_REG ((CUM), (MODE)) < NPARM_REGS (MODE)))
1958
1959 /* By accident we got stuck with passing SCmode on SH4 little endian
1960 in two registers that are nominally successive - which is different from
1961 two single SFmode values, where we take endianness translation into
1962 account. That does not work at all if an odd number of registers is
1963 already in use, so that got fixed, but library functions are still more
1964 likely to use complex numbers without mixing them with SFmode arguments
1965 (which in C would have to be structures), so for the sake of ABI
1966 compatibility the way SCmode values are passed when an even number of
1967 FP registers is in use remains different from a pair of SFmode values for
1968 now.
1969 I.e.:
1970 foo (double); a: fr5,fr4
1971 foo (float a, float b); a: fr5 b: fr4
1972 foo (__complex float a); a.real fr4 a.imag: fr5 - for consistency,
1973 this should be the other way round...
1974 foo (float a, __complex float b); a: fr5 b.real: fr4 b.imag: fr7 */
1975 #define FUNCTION_ARG_SCmode_WART 1
1976
1977 /* If an argument of size 5, 6 or 7 bytes is to be passed in a 64-bit
1978 register in SHcompact mode, it must be padded in the most
1979 significant end. This means that passing it by reference wouldn't
1980 pad properly on a big-endian machine. In this particular case, we
1981 pass this argument on the stack, in a way that the call trampoline
1982 will load its value into the appropriate register. */
1983 #define SHCOMPACT_FORCE_ON_STACK(MODE,TYPE) \
1984 ((MODE) == BLKmode \
1985 && TARGET_SHCOMPACT \
1986 && ! TARGET_LITTLE_ENDIAN \
1987 && int_size_in_bytes (TYPE) > 4 \
1988 && int_size_in_bytes (TYPE) < 8)
1989
1990 /* Minimum alignment for an argument to be passed by callee-copy
1991 reference. We need such arguments to be aligned to 8 byte
1992 boundaries, because they'll be loaded using quad loads. */
1993 #define SH_MIN_ALIGN_FOR_CALLEE_COPY (8 * BITS_PER_UNIT)
1994
1995 /* The SH5 ABI requires floating-point arguments to be passed to
1996 functions without a prototype in both an FP register and a regular
1997 register or the stack. When passing the argument in both FP and
1998 general-purpose registers, list the FP register first. */
1999 #define SH5_PROTOTYPELESS_FLOAT_ARG(CUM,MODE) \
2000 (gen_rtx_PARALLEL \
2001 ((MODE), \
2002 gen_rtvec (2, \
2003 gen_rtx_EXPR_LIST \
2004 (VOIDmode, \
2005 ((CUM).arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode) \
2006 ? gen_rtx_REG ((MODE), FIRST_FP_PARM_REG \
2007 + (CUM).arg_count[(int) SH_ARG_FLOAT]) \
2008 : NULL_RTX), \
2009 const0_rtx), \
2010 gen_rtx_EXPR_LIST \
2011 (VOIDmode, \
2012 ((CUM).arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode) \
2013 ? gen_rtx_REG ((MODE), FIRST_PARM_REG \
2014 + (CUM).arg_count[(int) SH_ARG_INT]) \
2015 : gen_rtx_REG ((MODE), FIRST_FP_PARM_REG \
2016 + (CUM).arg_count[(int) SH_ARG_FLOAT])), \
2017 const0_rtx))))
2018
2019 /* The SH5 ABI requires regular registers or stack slots to be
2020 reserved for floating-point arguments. Registers are taken care of
2021 in FUNCTION_ARG_ADVANCE, but stack slots must be reserved here.
2022 Unfortunately, there's no way to just reserve a stack slot, so
2023 we'll end up needlessly storing a copy of the argument in the
2024 stack. For incoming arguments, however, the PARALLEL will be
2025 optimized to the register-only form, and the value in the stack
2026 slot won't be used at all. */
2027 #define SH5_PROTOTYPED_FLOAT_ARG(CUM,MODE,REG) \
2028 ((CUM).arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode) \
2029 ? gen_rtx_REG ((MODE), (REG)) \
2030 : gen_rtx_PARALLEL ((MODE), \
2031 gen_rtvec (2, \
2032 gen_rtx_EXPR_LIST \
2033 (VOIDmode, NULL_RTX, \
2034 const0_rtx), \
2035 gen_rtx_EXPR_LIST \
2036 (VOIDmode, gen_rtx_REG ((MODE), \
2037 (REG)), \
2038 const0_rtx))))
2039
2040 #define SH5_WOULD_BE_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
2041 (TARGET_SH5 \
2042 && ((MODE) == BLKmode || (MODE) == TImode || (MODE) == CDImode \
2043 || (MODE) == DCmode) \
2044 && ((CUM).arg_count[(int) SH_ARG_INT] \
2045 + (((MODE) == BLKmode ? int_size_in_bytes (TYPE) \
2046 : GET_MODE_SIZE (MODE)) \
2047 + 7) / 8) > NPARM_REGS (SImode))
2048
2049 /* Perform any needed actions needed for a function that is receiving a
2050 variable number of arguments. */
2051
2052 /* Call the function profiler with a given profile label.
2053 We use two .aligns, so as to make sure that both the .long is aligned
2054 on a 4 byte boundary, and that the .long is a fixed distance (2 bytes)
2055 from the trapa instruction. */
2056
2057 #define FUNCTION_PROFILER(STREAM,LABELNO) \
2058 { \
2059 if (TARGET_SHMEDIA) \
2060 { \
2061 fprintf((STREAM), "\tmovi\t33,r0\n"); \
2062 fprintf((STREAM), "\ttrapa\tr0\n"); \
2063 asm_fprintf((STREAM), "\t.long\t%LLP%d\n", (LABELNO)); \
2064 } \
2065 else \
2066 { \
2067 fprintf((STREAM), "\t.align\t2\n"); \
2068 fprintf((STREAM), "\ttrapa\t#33\n"); \
2069 fprintf((STREAM), "\t.align\t2\n"); \
2070 asm_fprintf((STREAM), "\t.long\t%LLP%d\n", (LABELNO)); \
2071 } \
2072 }
2073
2074 /* Define this macro if the code for function profiling should come
2075 before the function prologue. Normally, the profiling code comes
2076 after. */
2077
2078 #define PROFILE_BEFORE_PROLOGUE
2079
2080 /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
2081 the stack pointer does not matter. The value is tested only in
2082 functions that have frame pointers.
2083 No definition is equivalent to always zero. */
2084
2085 #define EXIT_IGNORE_STACK 1
2086
2087 /*
2088 On the SH, the trampoline looks like
2089 2 0002 D202 mov.l l2,r2
2090 1 0000 D301 mov.l l1,r3
2091 3 0004 422B jmp @r2
2092 4 0006 0009 nop
2093 5 0008 00000000 l1: .long area
2094 6 000c 00000000 l2: .long function */
2095
2096 /* Length in units of the trampoline for entering a nested function. */
2097 #define TRAMPOLINE_SIZE (TARGET_SHMEDIA64 ? 40 : TARGET_SH5 ? 24 : 16)
2098
2099 /* Alignment required for a trampoline in bits . */
2100 #define TRAMPOLINE_ALIGNMENT \
2101 ((CACHE_LOG < 3 || (TARGET_SMALLCODE && ! TARGET_HARVARD)) ? 32 \
2102 : TARGET_SHMEDIA ? 256 : 64)
2103
2104 /* Emit RTL insns to initialize the variable parts of a trampoline.
2105 FNADDR is an RTX for the address of the function's pure code.
2106 CXT is an RTX for the static chain value for the function. */
2107
2108 #define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
2109 sh_initialize_trampoline ((TRAMP), (FNADDR), (CXT))
2110
2111 /* On SH5, trampolines are SHmedia code, so add 1 to the address. */
2112
2113 #define TRAMPOLINE_ADJUST_ADDRESS(TRAMP) do \
2114 { \
2115 if (TARGET_SHMEDIA) \
2116 (TRAMP) = expand_simple_binop (Pmode, PLUS, (TRAMP), const1_rtx, \
2117 gen_reg_rtx (Pmode), 0, \
2118 OPTAB_LIB_WIDEN); \
2119 } while (0)
2120
2121 /* A C expression whose value is RTL representing the value of the return
2122 address for the frame COUNT steps up from the current frame.
2123 FRAMEADDR is already the frame pointer of the COUNT frame, so we
2124 can ignore COUNT. */
2125
2126 #define RETURN_ADDR_RTX(COUNT, FRAME) \
2127 (((COUNT) == 0) ? sh_get_pr_initial_val () : (rtx) 0)
2128
2129 /* A C expression whose value is RTL representing the location of the
2130 incoming return address at the beginning of any function, before the
2131 prologue. This RTL is either a REG, indicating that the return
2132 value is saved in REG, or a MEM representing a location in
2133 the stack. */
2134 #define INCOMING_RETURN_ADDR_RTX \
2135 gen_rtx_REG (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
2136 \f
2137 /* Addressing modes, and classification of registers for them. */
2138 #define HAVE_POST_INCREMENT TARGET_SH1
2139 #define HAVE_PRE_DECREMENT TARGET_SH1
2140
2141 #define USE_LOAD_POST_INCREMENT(mode) ((mode == SImode || mode == DImode) \
2142 ? 0 : TARGET_SH1)
2143 #define USE_LOAD_PRE_DECREMENT(mode) 0
2144 #define USE_STORE_POST_INCREMENT(mode) 0
2145 #define USE_STORE_PRE_DECREMENT(mode) ((mode == SImode || mode == DImode) \
2146 ? 0 : TARGET_SH1)
2147
2148 #define MOVE_BY_PIECES_P(SIZE, ALIGN) \
2149 (move_by_pieces_ninsns (SIZE, ALIGN, MOVE_MAX_PIECES + 1) \
2150 < (TARGET_SMALLCODE ? 2 : ((ALIGN >= 32) ? 16 : 2)))
2151
2152 #define STORE_BY_PIECES_P(SIZE, ALIGN) \
2153 (move_by_pieces_ninsns (SIZE, ALIGN, STORE_MAX_PIECES + 1) \
2154 < (TARGET_SMALLCODE ? 2 : ((ALIGN >= 32) ? 16 : 2)))
2155
2156 #define SET_BY_PIECES_P(SIZE, ALIGN) STORE_BY_PIECES_P(SIZE, ALIGN)
2157
2158 /* Macros to check register numbers against specific register classes. */
2159
2160 /* These assume that REGNO is a hard or pseudo reg number.
2161 They give nonzero only if REGNO is a hard reg of the suitable class
2162 or a pseudo reg currently allocated to a suitable hard reg.
2163 Since they use reg_renumber, they are safe only once reg_renumber
2164 has been allocated, which happens in local-alloc.c. */
2165
2166 #define REGNO_OK_FOR_BASE_P(REGNO) \
2167 (GENERAL_OR_AP_REGISTER_P (REGNO) \
2168 || GENERAL_OR_AP_REGISTER_P (reg_renumber[(REGNO)]))
2169 #define REGNO_OK_FOR_INDEX_P(REGNO) \
2170 (TARGET_SHMEDIA \
2171 ? (GENERAL_REGISTER_P (REGNO) \
2172 || GENERAL_REGISTER_P ((unsigned) reg_renumber[(REGNO)])) \
2173 : (REGNO) == R0_REG || (unsigned) reg_renumber[(REGNO)] == R0_REG)
2174
2175 /* Maximum number of registers that can appear in a valid memory
2176 address. */
2177
2178 #define MAX_REGS_PER_ADDRESS 2
2179
2180 /* Recognize any constant value that is a valid address. */
2181
2182 #define CONSTANT_ADDRESS_P(X) (GET_CODE (X) == LABEL_REF)
2183
2184 /* Nonzero if the constant value X is a legitimate general operand. */
2185 /* can_store_by_pieces constructs VOIDmode CONST_DOUBLEs. */
2186
2187 #define LEGITIMATE_CONSTANT_P(X) \
2188 (TARGET_SHMEDIA \
2189 ? ((GET_MODE (X) != DFmode \
2190 && GET_MODE_CLASS (GET_MODE (X)) != MODE_VECTOR_FLOAT) \
2191 || (X) == CONST0_RTX (GET_MODE (X)) \
2192 || ! TARGET_SHMEDIA_FPU \
2193 || TARGET_SHMEDIA64) \
2194 : (GET_CODE (X) != CONST_DOUBLE \
2195 || GET_MODE (X) == DFmode || GET_MODE (X) == SFmode \
2196 || GET_MODE (X) == DImode || GET_MODE (X) == VOIDmode))
2197
2198 /* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
2199 and check its validity for a certain class.
2200 We have two alternate definitions for each of them.
2201 The usual definition accepts all pseudo regs; the other rejects
2202 them unless they have been allocated suitable hard regs.
2203 The symbol REG_OK_STRICT causes the latter definition to be used. */
2204
2205 #ifndef REG_OK_STRICT
2206
2207 /* Nonzero if X is a hard reg that can be used as a base reg
2208 or if it is a pseudo reg. */
2209 #define REG_OK_FOR_BASE_P(X) \
2210 (GENERAL_OR_AP_REGISTER_P (REGNO (X)) || REGNO (X) >= FIRST_PSEUDO_REGISTER)
2211
2212 /* Nonzero if X is a hard reg that can be used as an index
2213 or if it is a pseudo reg. */
2214 #define REG_OK_FOR_INDEX_P(X) \
2215 ((TARGET_SHMEDIA ? GENERAL_REGISTER_P (REGNO (X)) \
2216 : REGNO (X) == R0_REG) || REGNO (X) >= FIRST_PSEUDO_REGISTER)
2217
2218 /* Nonzero if X/OFFSET is a hard reg that can be used as an index
2219 or if X is a pseudo reg. */
2220 #define SUBREG_OK_FOR_INDEX_P(X, OFFSET) \
2221 ((TARGET_SHMEDIA ? GENERAL_REGISTER_P (REGNO (X)) \
2222 : REGNO (X) == R0_REG && OFFSET == 0) || REGNO (X) >= FIRST_PSEUDO_REGISTER)
2223
2224 #else
2225
2226 /* Nonzero if X is a hard reg that can be used as a base reg. */
2227 #define REG_OK_FOR_BASE_P(X) \
2228 REGNO_OK_FOR_BASE_P (REGNO (X))
2229
2230 /* Nonzero if X is a hard reg that can be used as an index. */
2231 #define REG_OK_FOR_INDEX_P(X) \
2232 REGNO_OK_FOR_INDEX_P (REGNO (X))
2233
2234 /* Nonzero if X/OFFSET is a hard reg that can be used as an index. */
2235 #define SUBREG_OK_FOR_INDEX_P(X, OFFSET) \
2236 (REGNO_OK_FOR_INDEX_P (REGNO (X)) && (OFFSET) == 0)
2237
2238 #endif
2239
2240 /* Macros for extra constraints. */
2241
2242 #define IS_PC_RELATIVE_LOAD_ADDR_P(OP) \
2243 ((GET_CODE ((OP)) == LABEL_REF) \
2244 || (GET_CODE ((OP)) == CONST \
2245 && GET_CODE (XEXP ((OP), 0)) == PLUS \
2246 && GET_CODE (XEXP (XEXP ((OP), 0), 0)) == LABEL_REF \
2247 && GET_CODE (XEXP (XEXP ((OP), 0), 1)) == CONST_INT))
2248
2249 #define IS_NON_EXPLICIT_CONSTANT_P(OP) \
2250 (CONSTANT_P (OP) \
2251 && GET_CODE (OP) != CONST_INT \
2252 && GET_CODE (OP) != CONST_DOUBLE \
2253 && (!flag_pic \
2254 || (LEGITIMATE_PIC_OPERAND_P (OP) \
2255 && !PIC_ADDR_P (OP) \
2256 && GET_CODE (OP) != LABEL_REF)))
2257
2258 /* Check whether OP is a datalabel unspec. */
2259 #define DATALABEL_REF_NO_CONST_P(OP) \
2260 (GET_CODE (OP) == UNSPEC \
2261 && XINT ((OP), 1) == UNSPEC_DATALABEL \
2262 && XVECLEN ((OP), 0) == 1 \
2263 && GET_CODE (XVECEXP ((OP), 0, 0)) == LABEL_REF)
2264
2265 #define GOT_ENTRY_P(OP) \
2266 (GET_CODE (OP) == CONST && GET_CODE (XEXP ((OP), 0)) == UNSPEC \
2267 && XINT (XEXP ((OP), 0), 1) == UNSPEC_GOT)
2268
2269 #define GOTPLT_ENTRY_P(OP) \
2270 (GET_CODE (OP) == CONST && GET_CODE (XEXP ((OP), 0)) == UNSPEC \
2271 && XINT (XEXP ((OP), 0), 1) == UNSPEC_GOTPLT)
2272
2273 #define UNSPEC_GOTOFF_P(OP) \
2274 (GET_CODE (OP) == UNSPEC && XINT ((OP), 1) == UNSPEC_GOTOFF)
2275
2276 #define GOTOFF_P(OP) \
2277 (GET_CODE (OP) == CONST \
2278 && (UNSPEC_GOTOFF_P (XEXP ((OP), 0)) \
2279 || (GET_CODE (XEXP ((OP), 0)) == PLUS \
2280 && UNSPEC_GOTOFF_P (XEXP (XEXP ((OP), 0), 0)) \
2281 && GET_CODE (XEXP (XEXP ((OP), 0), 1)) == CONST_INT)))
2282
2283 #define PIC_ADDR_P(OP) \
2284 (GET_CODE (OP) == CONST && GET_CODE (XEXP ((OP), 0)) == UNSPEC \
2285 && XINT (XEXP ((OP), 0), 1) == UNSPEC_PIC)
2286
2287 #define PCREL_SYMOFF_P(OP) \
2288 (GET_CODE (OP) == CONST \
2289 && GET_CODE (XEXP ((OP), 0)) == UNSPEC \
2290 && XINT (XEXP ((OP), 0), 1) == UNSPEC_PCREL_SYMOFF)
2291
2292 #define NON_PIC_REFERENCE_P(OP) \
2293 (GET_CODE (OP) == LABEL_REF || GET_CODE (OP) == SYMBOL_REF \
2294 || (GET_CODE (OP) == CONST \
2295 && (GET_CODE (XEXP ((OP), 0)) == LABEL_REF \
2296 || GET_CODE (XEXP ((OP), 0)) == SYMBOL_REF \
2297 || DATALABEL_REF_NO_CONST_P (XEXP ((OP), 0)))) \
2298 || (GET_CODE (OP) == CONST && GET_CODE (XEXP ((OP), 0)) == PLUS \
2299 && (GET_CODE (XEXP (XEXP ((OP), 0), 0)) == SYMBOL_REF \
2300 || GET_CODE (XEXP (XEXP ((OP), 0), 0)) == LABEL_REF \
2301 || DATALABEL_REF_NO_CONST_P (XEXP (XEXP ((OP), 0), 0))) \
2302 && GET_CODE (XEXP (XEXP ((OP), 0), 1)) == CONST_INT))
2303
2304 #define PIC_REFERENCE_P(OP) \
2305 (GOT_ENTRY_P (OP) || GOTPLT_ENTRY_P (OP) \
2306 || GOTOFF_P (OP) || PIC_ADDR_P (OP))
2307
2308 #define MOVI_SHORI_BASE_OPERAND_P(OP) \
2309 (flag_pic \
2310 ? (GOT_ENTRY_P (OP) || GOTPLT_ENTRY_P (OP) || GOTOFF_P (OP) \
2311 || PCREL_SYMOFF_P (OP)) \
2312 : NON_PIC_REFERENCE_P (OP))
2313 \f
2314 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
2315 that is a valid memory address for an instruction.
2316 The MODE argument is the machine mode for the MEM expression
2317 that wants to use this address. */
2318
2319 #define MODE_DISP_OK_4(X,MODE) \
2320 (GET_MODE_SIZE (MODE) == 4 && (unsigned) INTVAL (X) < 64 \
2321 && ! (INTVAL (X) & 3) && ! (TARGET_SH2E && (MODE) == SFmode))
2322
2323 #define MODE_DISP_OK_8(X,MODE) \
2324 ((GET_MODE_SIZE(MODE)==8) && ((unsigned)INTVAL(X)<60) \
2325 && ! (INTVAL(X) & 3) && ! (TARGET_SH4 && (MODE) == DFmode))
2326
2327 #undef MODE_DISP_OK_4
2328 #define MODE_DISP_OK_4(X,MODE) \
2329 ((GET_MODE_SIZE (MODE) == 4 && (unsigned) INTVAL (X) < 64 \
2330 && ! (INTVAL (X) & 3) && ! (TARGET_SH2E && (MODE) == SFmode)) \
2331 || ((GET_MODE_SIZE(MODE)==4) && ((unsigned)INTVAL(X)<16383) \
2332 && ! (INTVAL(X) & 3) && TARGET_SH2A))
2333
2334 #undef MODE_DISP_OK_8
2335 #define MODE_DISP_OK_8(X,MODE) \
2336 (((GET_MODE_SIZE(MODE)==8) && ((unsigned)INTVAL(X)<60) \
2337 && ! (INTVAL(X) & 3) && ! ((TARGET_SH4 || TARGET_SH2A) && (MODE) == DFmode)) \
2338 || ((GET_MODE_SIZE(MODE)==8) && ((unsigned)INTVAL(X)<8192) \
2339 && ! (INTVAL(X) & (TARGET_SH2A_DOUBLE ? 7 : 3)) && (TARGET_SH2A && (MODE) == DFmode)))
2340
2341 #define BASE_REGISTER_RTX_P(X) \
2342 ((GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
2343 || (GET_CODE (X) == SUBREG \
2344 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE ((X))), \
2345 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (X)))) \
2346 && GET_CODE (SUBREG_REG (X)) == REG \
2347 && REG_OK_FOR_BASE_P (SUBREG_REG (X))))
2348
2349 /* Since this must be r0, which is a single register class, we must check
2350 SUBREGs more carefully, to be sure that we don't accept one that extends
2351 outside the class. */
2352 #define INDEX_REGISTER_RTX_P(X) \
2353 ((GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X)) \
2354 || (GET_CODE (X) == SUBREG \
2355 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE ((X))), \
2356 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (X)))) \
2357 && GET_CODE (SUBREG_REG (X)) == REG \
2358 && SUBREG_OK_FOR_INDEX_P (SUBREG_REG (X), SUBREG_BYTE (X))))
2359
2360 /* Jump to LABEL if X is a valid address RTX. This must also take
2361 REG_OK_STRICT into account when deciding about valid registers, but it uses
2362 the above macros so we are in luck.
2363
2364 Allow REG
2365 REG+disp
2366 REG+r0
2367 REG++
2368 --REG */
2369
2370 /* ??? The SH2e does not have the REG+disp addressing mode when loading values
2371 into the FRx registers. We implement this by setting the maximum offset
2372 to zero when the value is SFmode. This also restricts loading of SFmode
2373 values into the integer registers, but that can't be helped. */
2374
2375 /* The SH allows a displacement in a QI or HI amode, but only when the
2376 other operand is R0. GCC doesn't handle this very well, so we forgo
2377 all of that.
2378
2379 A legitimate index for a QI or HI is 0, SI can be any number 0..63,
2380 DI can be any number 0..60. */
2381
2382 #define GO_IF_LEGITIMATE_INDEX(MODE, OP, LABEL) \
2383 do { \
2384 if (GET_CODE (OP) == CONST_INT) \
2385 { \
2386 if (TARGET_SHMEDIA) \
2387 { \
2388 int MODE_SIZE; \
2389 /* Check if this the address of an unaligned load / store. */\
2390 if ((MODE) == VOIDmode) \
2391 { \
2392 if (CONST_OK_FOR_I06 (INTVAL (OP))) \
2393 goto LABEL; \
2394 break; \
2395 } \
2396 MODE_SIZE = GET_MODE_SIZE (MODE); \
2397 if (! (INTVAL (OP) & (MODE_SIZE - 1)) \
2398 && INTVAL (OP) >= -512 * MODE_SIZE \
2399 && INTVAL (OP) < 512 * MODE_SIZE) \
2400 goto LABEL; \
2401 else \
2402 break; \
2403 } \
2404 if (TARGET_SH2A) \
2405 { \
2406 if (GET_MODE_SIZE (MODE) == 1 \
2407 && (unsigned) INTVAL (OP) < 4096) \
2408 goto LABEL; \
2409 } \
2410 if (MODE_DISP_OK_4 ((OP), (MODE))) goto LABEL; \
2411 if (MODE_DISP_OK_8 ((OP), (MODE))) goto LABEL; \
2412 } \
2413 } while(0)
2414
2415 #define ALLOW_INDEXED_ADDRESS \
2416 ((!TARGET_SHMEDIA32 && !TARGET_SHCOMPACT) || TARGET_ALLOW_INDEXED_ADDRESS)
2417
2418 #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
2419 { \
2420 if (BASE_REGISTER_RTX_P (X)) \
2421 goto LABEL; \
2422 else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
2423 && ! TARGET_SHMEDIA \
2424 && BASE_REGISTER_RTX_P (XEXP ((X), 0))) \
2425 goto LABEL; \
2426 else if (GET_CODE (X) == PLUS \
2427 && ((MODE) != PSImode || reload_completed)) \
2428 { \
2429 rtx xop0 = XEXP ((X), 0); \
2430 rtx xop1 = XEXP ((X), 1); \
2431 if (GET_MODE_SIZE (MODE) <= 8 && BASE_REGISTER_RTX_P (xop0)) \
2432 GO_IF_LEGITIMATE_INDEX ((MODE), xop1, LABEL); \
2433 if ((ALLOW_INDEXED_ADDRESS || GET_MODE (X) == DImode \
2434 || ((xop0 == stack_pointer_rtx \
2435 || xop0 == hard_frame_pointer_rtx) \
2436 && REG_P (xop1) && REGNO (xop1) == R0_REG) \
2437 || ((xop1 == stack_pointer_rtx \
2438 || xop1 == hard_frame_pointer_rtx) \
2439 && REG_P (xop0) && REGNO (xop0) == R0_REG)) \
2440 && ((!TARGET_SHMEDIA && GET_MODE_SIZE (MODE) <= 4) \
2441 || (TARGET_SHMEDIA && GET_MODE_SIZE (MODE) <= 8) \
2442 || ((TARGET_SH4 || TARGET_SH2A_DOUBLE) \
2443 && TARGET_FMOVD && MODE == DFmode))) \
2444 { \
2445 if (BASE_REGISTER_RTX_P (xop1) && INDEX_REGISTER_RTX_P (xop0))\
2446 goto LABEL; \
2447 if (INDEX_REGISTER_RTX_P (xop1) && BASE_REGISTER_RTX_P (xop0))\
2448 goto LABEL; \
2449 } \
2450 } \
2451 }
2452 \f
2453 /* Try machine-dependent ways of modifying an illegitimate address
2454 to be legitimate. If we find one, return the new, valid address.
2455 This macro is used in only one place: `memory_address' in explow.c.
2456
2457 OLDX is the address as it was before break_out_memory_refs was called.
2458 In some cases it is useful to look at this to decide what needs to be done.
2459
2460 MODE and WIN are passed so that this macro can use
2461 GO_IF_LEGITIMATE_ADDRESS.
2462
2463 It is always safe for this macro to do nothing. It exists to recognize
2464 opportunities to optimize the output.
2465
2466 For the SH, if X is almost suitable for indexing, but the offset is
2467 out of range, convert it into a normal form so that cse has a chance
2468 of reducing the number of address registers used. */
2469
2470 #define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
2471 { \
2472 if (flag_pic) \
2473 (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
2474 if (GET_CODE (X) == PLUS \
2475 && (GET_MODE_SIZE (MODE) == 4 \
2476 || GET_MODE_SIZE (MODE) == 8) \
2477 && GET_CODE (XEXP ((X), 1)) == CONST_INT \
2478 && BASE_REGISTER_RTX_P (XEXP ((X), 0)) \
2479 && ! TARGET_SHMEDIA \
2480 && ! ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && (MODE) == DFmode) \
2481 && ! (TARGET_SH2E && (MODE) == SFmode)) \
2482 { \
2483 rtx index_rtx = XEXP ((X), 1); \
2484 HOST_WIDE_INT offset = INTVAL (index_rtx), offset_base; \
2485 rtx sum; \
2486 \
2487 GO_IF_LEGITIMATE_INDEX ((MODE), index_rtx, WIN); \
2488 /* On rare occasions, we might get an unaligned pointer \
2489 that is indexed in a way to give an aligned address. \
2490 Therefore, keep the lower two bits in offset_base. */ \
2491 /* Instead of offset_base 128..131 use 124..127, so that \
2492 simple add suffices. */ \
2493 if (offset > 127) \
2494 { \
2495 offset_base = ((offset + 4) & ~60) - 4; \
2496 } \
2497 else \
2498 offset_base = offset & ~60; \
2499 /* Sometimes the normal form does not suit DImode. We \
2500 could avoid that by using smaller ranges, but that \
2501 would give less optimized code when SImode is \
2502 prevalent. */ \
2503 if (GET_MODE_SIZE (MODE) + offset - offset_base <= 64) \
2504 { \
2505 sum = expand_binop (Pmode, add_optab, XEXP ((X), 0), \
2506 GEN_INT (offset_base), NULL_RTX, 0, \
2507 OPTAB_LIB_WIDEN); \
2508 \
2509 (X) = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - offset_base)); \
2510 goto WIN; \
2511 } \
2512 } \
2513 }
2514
2515 /* A C compound statement that attempts to replace X, which is an address
2516 that needs reloading, with a valid memory address for an operand of
2517 mode MODE. WIN is a C statement label elsewhere in the code.
2518
2519 Like for LEGITIMIZE_ADDRESS, for the SH we try to get a normal form
2520 of the address. That will allow inheritance of the address reloads. */
2521
2522 #define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
2523 { \
2524 if (GET_CODE (X) == PLUS \
2525 && (GET_MODE_SIZE (MODE) == 4 || GET_MODE_SIZE (MODE) == 8) \
2526 && GET_CODE (XEXP (X, 1)) == CONST_INT \
2527 && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
2528 && ! TARGET_SHMEDIA \
2529 && ! (TARGET_SH4 && (MODE) == DFmode) \
2530 && ! ((MODE) == PSImode && (TYPE) == RELOAD_FOR_INPUT_ADDRESS) \
2531 && (ALLOW_INDEXED_ADDRESS \
2532 || XEXP ((X), 0) == stack_pointer_rtx \
2533 || XEXP ((X), 0) == hard_frame_pointer_rtx)) \
2534 { \
2535 rtx index_rtx = XEXP (X, 1); \
2536 HOST_WIDE_INT offset = INTVAL (index_rtx), offset_base; \
2537 rtx sum; \
2538 \
2539 if (TARGET_SH2A && (MODE) == DFmode && (offset & 0x7)) \
2540 { \
2541 push_reload (X, NULL_RTX, &X, NULL, \
2542 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, (OPNUM), \
2543 (TYPE)); \
2544 goto WIN; \
2545 } \
2546 if (TARGET_SH2E && MODE == SFmode) \
2547 { \
2548 X = copy_rtx (X); \
2549 push_reload (X, NULL_RTX, &X, NULL, \
2550 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, (OPNUM), \
2551 (TYPE)); \
2552 goto WIN; \
2553 } \
2554 /* Instead of offset_base 128..131 use 124..127, so that \
2555 simple add suffices. */ \
2556 if (offset > 127) \
2557 { \
2558 offset_base = ((offset + 4) & ~60) - 4; \
2559 } \
2560 else \
2561 offset_base = offset & ~60; \
2562 /* Sometimes the normal form does not suit DImode. We \
2563 could avoid that by using smaller ranges, but that \
2564 would give less optimized code when SImode is \
2565 prevalent. */ \
2566 if (GET_MODE_SIZE (MODE) + offset - offset_base <= 64) \
2567 { \
2568 sum = gen_rtx_PLUS (Pmode, XEXP (X, 0), \
2569 GEN_INT (offset_base)); \
2570 X = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - offset_base));\
2571 push_reload (sum, NULL_RTX, &XEXP (X, 0), NULL, \
2572 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, (OPNUM), \
2573 (TYPE)); \
2574 goto WIN; \
2575 } \
2576 } \
2577 /* We must re-recognize what we created before. */ \
2578 else if (GET_CODE (X) == PLUS \
2579 && (GET_MODE_SIZE (MODE) == 4 || GET_MODE_SIZE (MODE) == 8) \
2580 && GET_CODE (XEXP (X, 0)) == PLUS \
2581 && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT \
2582 && BASE_REGISTER_RTX_P (XEXP (XEXP (X, 0), 0)) \
2583 && GET_CODE (XEXP (X, 1)) == CONST_INT \
2584 && ! TARGET_SHMEDIA \
2585 && ! (TARGET_SH2E && MODE == SFmode)) \
2586 { \
2587 /* Because this address is so complex, we know it must have \
2588 been created by LEGITIMIZE_RELOAD_ADDRESS before; thus, \
2589 it is already unshared, and needs no further unsharing. */ \
2590 push_reload (XEXP ((X), 0), NULL_RTX, &XEXP ((X), 0), NULL, \
2591 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, (OPNUM), (TYPE));\
2592 goto WIN; \
2593 } \
2594 }
2595 \f
2596 /* Specify the machine mode that this machine uses
2597 for the index in the tablejump instruction. */
2598 #define CASE_VECTOR_MODE ((! optimize || TARGET_BIGTABLE) ? SImode : HImode)
2599
2600 #define CASE_VECTOR_SHORTEN_MODE(MIN_OFFSET, MAX_OFFSET, BODY) \
2601 ((MIN_OFFSET) >= 0 && (MAX_OFFSET) <= 127 \
2602 ? (ADDR_DIFF_VEC_FLAGS (BODY).offset_unsigned = 0, QImode) \
2603 : (MIN_OFFSET) >= 0 && (MAX_OFFSET) <= 255 \
2604 ? (ADDR_DIFF_VEC_FLAGS (BODY).offset_unsigned = 1, QImode) \
2605 : (MIN_OFFSET) >= -32768 && (MAX_OFFSET) <= 32767 ? HImode \
2606 : SImode)
2607
2608 /* Define as C expression which evaluates to nonzero if the tablejump
2609 instruction expects the table to contain offsets from the address of the
2610 table.
2611 Do not define this if the table should contain absolute addresses. */
2612 #define CASE_VECTOR_PC_RELATIVE 1
2613
2614 /* Define it here, so that it doesn't get bumped to 64-bits on SHmedia. */
2615 #define FLOAT_TYPE_SIZE 32
2616
2617 /* Since the SH2e has only `float' support, it is desirable to make all
2618 floating point types equivalent to `float'. */
2619 #define DOUBLE_TYPE_SIZE ((TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH2A_DOUBLE) ? 32 : 64)
2620
2621 #if defined(__SH2E__) || defined(__SH3E__) || defined( __SH4_SINGLE_ONLY__)
2622 #define LIBGCC2_DOUBLE_TYPE_SIZE 32
2623 #else
2624 #define LIBGCC2_DOUBLE_TYPE_SIZE 64
2625 #endif
2626
2627 /* 'char' is signed by default. */
2628 #define DEFAULT_SIGNED_CHAR 1
2629
2630 /* The type of size_t unsigned int. */
2631 #define SIZE_TYPE (TARGET_SH5 ? "long unsigned int" : "unsigned int")
2632
2633 #undef PTRDIFF_TYPE
2634 #define PTRDIFF_TYPE (TARGET_SH5 ? "long int" : "int")
2635
2636 #define WCHAR_TYPE "short unsigned int"
2637 #define WCHAR_TYPE_SIZE 16
2638
2639 #define SH_ELF_WCHAR_TYPE "long int"
2640
2641 /* Max number of bytes we can move from memory to memory
2642 in one reasonably fast instruction. */
2643 #define MOVE_MAX (TARGET_SHMEDIA ? 8 : 4)
2644
2645 /* Maximum value possibly taken by MOVE_MAX. Must be defined whenever
2646 MOVE_MAX is not a compile-time constant. */
2647 #define MAX_MOVE_MAX 8
2648
2649 /* Max number of bytes we want move_by_pieces to be able to copy
2650 efficiently. */
2651 #define MOVE_MAX_PIECES (TARGET_SH4 || TARGET_SHMEDIA ? 8 : 4)
2652
2653 /* Define if operations between registers always perform the operation
2654 on the full register even if a narrower mode is specified. */
2655 #define WORD_REGISTER_OPERATIONS
2656
2657 /* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
2658 will either zero-extend or sign-extend. The value of this macro should
2659 be the code that says which one of the two operations is implicitly
2660 done, UNKNOWN if none. */
2661 /* For SHmedia, we can truncate to QImode easier using zero extension. */
2662 /* FP registers can load SImode values, but don't implicitly sign-extend
2663 them to DImode. */
2664 #define LOAD_EXTEND_OP(MODE) \
2665 (((MODE) == QImode && TARGET_SHMEDIA) ? ZERO_EXTEND \
2666 : (MODE) != SImode ? SIGN_EXTEND : UNKNOWN)
2667
2668 /* Define if loading short immediate values into registers sign extends. */
2669 #define SHORT_IMMEDIATES_SIGN_EXTEND
2670
2671 /* Nonzero if access to memory by bytes is no faster than for words. */
2672 #define SLOW_BYTE_ACCESS 1
2673
2674 /* Immediate shift counts are truncated by the output routines (or was it
2675 the assembler?). Shift counts in a register are truncated by SH. Note
2676 that the native compiler puts too large (> 32) immediate shift counts
2677 into a register and shifts by the register, letting the SH decide what
2678 to do instead of doing that itself. */
2679 /* ??? The library routines in lib1funcs.asm truncate the shift count.
2680 However, the SH3 has hardware shifts that do not truncate exactly as gcc
2681 expects - the sign bit is significant - so it appears that we need to
2682 leave this zero for correct SH3 code. */
2683 #define SHIFT_COUNT_TRUNCATED (! TARGET_SH3 && ! TARGET_SH2A)
2684
2685 /* All integers have the same format so truncation is easy. */
2686 /* But SHmedia must sign-extend DImode when truncating to SImode. */
2687 #define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) \
2688 (!TARGET_SHMEDIA || (INPREC) < 64 || (OUTPREC) >= 64)
2689
2690 /* Define this if addresses of constant functions
2691 shouldn't be put through pseudo regs where they can be cse'd.
2692 Desirable on machines where ordinary constants are expensive
2693 but a CALL with constant address is cheap. */
2694 /*#define NO_FUNCTION_CSE 1*/
2695
2696 /* The machine modes of pointers and functions. */
2697 #define Pmode (TARGET_SHMEDIA64 ? DImode : SImode)
2698 #define FUNCTION_MODE Pmode
2699
2700 /* The multiply insn on the SH1 and the divide insns on the SH1 and SH2
2701 are actually function calls with some special constraints on arguments
2702 and register usage.
2703
2704 These macros tell reorg that the references to arguments and
2705 register clobbers for insns of type sfunc do not appear to happen
2706 until after the millicode call. This allows reorg to put insns
2707 which set the argument registers into the delay slot of the millicode
2708 call -- thus they act more like traditional CALL_INSNs.
2709
2710 get_attr_is_sfunc will try to recognize the given insn, so make sure to
2711 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
2712 in particular. */
2713
2714 #define INSN_SETS_ARE_DELAYED(X) \
2715 ((GET_CODE (X) == INSN \
2716 && GET_CODE (PATTERN (X)) != SEQUENCE \
2717 && GET_CODE (PATTERN (X)) != USE \
2718 && GET_CODE (PATTERN (X)) != CLOBBER \
2719 && get_attr_is_sfunc (X)))
2720
2721 #define INSN_REFERENCES_ARE_DELAYED(X) \
2722 ((GET_CODE (X) == INSN \
2723 && GET_CODE (PATTERN (X)) != SEQUENCE \
2724 && GET_CODE (PATTERN (X)) != USE \
2725 && GET_CODE (PATTERN (X)) != CLOBBER \
2726 && get_attr_is_sfunc (X)))
2727
2728 \f
2729 /* Position Independent Code. */
2730
2731 /* We can't directly access anything that contains a symbol,
2732 nor can we indirect via the constant pool. */
2733 #define LEGITIMATE_PIC_OPERAND_P(X) \
2734 ((! nonpic_symbol_mentioned_p (X) \
2735 && (GET_CODE (X) != SYMBOL_REF \
2736 || ! CONSTANT_POOL_ADDRESS_P (X) \
2737 || ! nonpic_symbol_mentioned_p (get_pool_constant (X)))) \
2738 || (TARGET_SHMEDIA && GET_CODE (X) == LABEL_REF))
2739
2740 #define SYMBOLIC_CONST_P(X) \
2741 ((GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == LABEL_REF) \
2742 && nonpic_symbol_mentioned_p (X))
2743 \f
2744 /* Compute extra cost of moving data between one register class
2745 and another. */
2746
2747 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
2748 uses this information. Hence, the general register <-> floating point
2749 register information here is not used for SFmode. */
2750
2751 #define REGCLASS_HAS_GENERAL_REG(CLASS) \
2752 ((CLASS) == GENERAL_REGS || (CLASS) == R0_REGS \
2753 || (! TARGET_SHMEDIA && (CLASS) == SIBCALL_REGS))
2754
2755 #define REGCLASS_HAS_FP_REG(CLASS) \
2756 ((CLASS) == FP0_REGS || (CLASS) == FP_REGS \
2757 || (CLASS) == DF_REGS || (CLASS) == DF_HI_REGS)
2758
2759 #define REGISTER_MOVE_COST(MODE, SRCCLASS, DSTCLASS) \
2760 sh_register_move_cost ((MODE), (SRCCLASS), (DSTCLASS))
2761
2762 /* ??? Perhaps make MEMORY_MOVE_COST depend on compiler option? This
2763 would be so that people with slow memory systems could generate
2764 different code that does fewer memory accesses. */
2765
2766 /* A C expression for the cost of a branch instruction. A value of 1
2767 is the default; other values are interpreted relative to that.
2768 The SH1 does not have delay slots, hence we get a pipeline stall
2769 at every branch. The SH4 is superscalar, so the single delay slot
2770 is not sufficient to keep both pipelines filled. */
2771 #define BRANCH_COST(speed_p, predictable_p) \
2772 (TARGET_SH5 ? 1 : ! TARGET_SH2 || TARGET_HARD_SH4 ? 2 : 1)
2773 \f
2774 /* Assembler output control. */
2775
2776 /* A C string constant describing how to begin a comment in the target
2777 assembler language. The compiler assumes that the comment will end at
2778 the end of the line. */
2779 #define ASM_COMMENT_START "!"
2780
2781 #define ASM_APP_ON ""
2782 #define ASM_APP_OFF ""
2783 #define FILE_ASM_OP "\t.file\n"
2784 #define SET_ASM_OP "\t.set\t"
2785
2786 /* How to change between sections. */
2787
2788 #define TEXT_SECTION_ASM_OP (TARGET_SHMEDIA32 ? "\t.section\t.text..SHmedia32,\"ax\"" : "\t.text")
2789 #define DATA_SECTION_ASM_OP "\t.data"
2790
2791 #if defined CRT_BEGIN || defined CRT_END
2792 /* Arrange for TEXT_SECTION_ASM_OP to be a compile-time constant. */
2793 # undef TEXT_SECTION_ASM_OP
2794 # if __SHMEDIA__ == 1 && __SH5__ == 32
2795 # define TEXT_SECTION_ASM_OP "\t.section\t.text..SHmedia32,\"ax\""
2796 # else
2797 # define TEXT_SECTION_ASM_OP "\t.text"
2798 # endif
2799 #endif
2800
2801
2802 /* If defined, a C expression whose value is a string containing the
2803 assembler operation to identify the following data as
2804 uninitialized global data. If not defined, and neither
2805 `ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined,
2806 uninitialized global data will be output in the data section if
2807 `-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be
2808 used. */
2809 #ifndef BSS_SECTION_ASM_OP
2810 #define BSS_SECTION_ASM_OP "\t.section\t.bss"
2811 #endif
2812
2813 /* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
2814 separate, explicit argument. If you define this macro, it is used
2815 in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
2816 handling the required alignment of the variable. The alignment is
2817 specified as the number of bits.
2818
2819 Try to use function `asm_output_aligned_bss' defined in file
2820 `varasm.c' when defining this macro. */
2821 #ifndef ASM_OUTPUT_ALIGNED_BSS
2822 #define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
2823 asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
2824 #endif
2825
2826 /* Define this so that jump tables go in same section as the current function,
2827 which could be text or it could be a user defined section. */
2828 #define JUMP_TABLES_IN_TEXT_SECTION 1
2829
2830 #undef DO_GLOBAL_CTORS_BODY
2831 #define DO_GLOBAL_CTORS_BODY \
2832 { \
2833 typedef void (*pfunc) (void); \
2834 extern pfunc __ctors[]; \
2835 extern pfunc __ctors_end[]; \
2836 pfunc *p; \
2837 for (p = __ctors_end; p > __ctors; ) \
2838 { \
2839 (*--p)(); \
2840 } \
2841 }
2842
2843 #undef DO_GLOBAL_DTORS_BODY
2844 #define DO_GLOBAL_DTORS_BODY \
2845 { \
2846 typedef void (*pfunc) (void); \
2847 extern pfunc __dtors[]; \
2848 extern pfunc __dtors_end[]; \
2849 pfunc *p; \
2850 for (p = __dtors; p < __dtors_end; p++) \
2851 { \
2852 (*p)(); \
2853 } \
2854 }
2855
2856 #define ASM_OUTPUT_REG_PUSH(file, v) \
2857 { \
2858 if (TARGET_SHMEDIA) \
2859 { \
2860 fprintf ((file), "\taddi.l\tr15,-8,r15\n"); \
2861 fprintf ((file), "\tst.q\tr15,0,r%d\n", (v)); \
2862 } \
2863 else \
2864 fprintf ((file), "\tmov.l\tr%d,@-r15\n", (v)); \
2865 }
2866
2867 #define ASM_OUTPUT_REG_POP(file, v) \
2868 { \
2869 if (TARGET_SHMEDIA) \
2870 { \
2871 fprintf ((file), "\tld.q\tr15,0,r%d\n", (v)); \
2872 fprintf ((file), "\taddi.l\tr15,8,r15\n"); \
2873 } \
2874 else \
2875 fprintf ((file), "\tmov.l\t@r15+,r%d\n", (v)); \
2876 }
2877
2878 /* DBX register number for a given compiler register number. */
2879 /* GDB has FPUL at 23 and FP0 at 25, so we must add one to all FP registers
2880 to match gdb. */
2881 /* svr4.h undefines this macro, yet we really want to use the same numbers
2882 for coff as for elf, so we go via another macro: SH_DBX_REGISTER_NUMBER. */
2883 /* expand_builtin_init_dwarf_reg_sizes uses this to test if a
2884 register exists, so we should return -1 for invalid register numbers. */
2885 #define DBX_REGISTER_NUMBER(REGNO) SH_DBX_REGISTER_NUMBER (REGNO)
2886
2887 /* SHcompact PR_REG used to use the encoding 241, and SHcompact FP registers
2888 used to use the encodings 245..260, but that doesn't make sense:
2889 PR_REG and PR_MEDIA_REG are actually the same register, and likewise
2890 the FP registers stay the same when switching between compact and media
2891 mode. Hence, we also need to use the same dwarf frame columns.
2892 Likewise, we need to support unwind information for SHmedia registers
2893 even in compact code. */
2894 #define SH_DBX_REGISTER_NUMBER(REGNO) \
2895 (IN_RANGE ((REGNO), \
2896 (unsigned HOST_WIDE_INT) FIRST_GENERAL_REG, \
2897 FIRST_GENERAL_REG + (TARGET_SH5 ? 63U :15U)) \
2898 ? ((unsigned) (REGNO) - FIRST_GENERAL_REG) \
2899 : ((int) (REGNO) >= FIRST_FP_REG \
2900 && ((int) (REGNO) \
2901 <= (FIRST_FP_REG + \
2902 ((TARGET_SH5 && TARGET_FPU_ANY) ? 63 : TARGET_SH2E ? 15 : -1)))) \
2903 ? ((unsigned) (REGNO) - FIRST_FP_REG \
2904 + (TARGET_SH5 ? 77 : 25)) \
2905 : XD_REGISTER_P (REGNO) \
2906 ? ((unsigned) (REGNO) - FIRST_XD_REG + (TARGET_SH5 ? 289 : 87)) \
2907 : TARGET_REGISTER_P (REGNO) \
2908 ? ((unsigned) (REGNO) - FIRST_TARGET_REG + 68) \
2909 : (REGNO) == PR_REG \
2910 ? (TARGET_SH5 ? 18 : 17) \
2911 : (REGNO) == PR_MEDIA_REG \
2912 ? (TARGET_SH5 ? 18 : (unsigned) -1) \
2913 : (REGNO) == GBR_REG \
2914 ? (TARGET_SH5 ? 238 : 18) \
2915 : (REGNO) == MACH_REG \
2916 ? (TARGET_SH5 ? 239 : 20) \
2917 : (REGNO) == MACL_REG \
2918 ? (TARGET_SH5 ? 240 : 21) \
2919 : (REGNO) == T_REG \
2920 ? (TARGET_SH5 ? 242 : 22) \
2921 : (REGNO) == FPUL_REG \
2922 ? (TARGET_SH5 ? 244 : 23) \
2923 : (REGNO) == FPSCR_REG \
2924 ? (TARGET_SH5 ? 243 : 24) \
2925 : (unsigned) -1)
2926
2927 /* This is how to output a reference to a symbol_ref. On SH5,
2928 references to non-code symbols must be preceded by `datalabel'. */
2929 #define ASM_OUTPUT_SYMBOL_REF(FILE,SYM) \
2930 do \
2931 { \
2932 if (TARGET_SH5 && !SYMBOL_REF_FUNCTION_P (SYM)) \
2933 fputs ("datalabel ", (FILE)); \
2934 assemble_name ((FILE), XSTR ((SYM), 0)); \
2935 } \
2936 while (0)
2937
2938 /* This is how to output an assembler line
2939 that says to advance the location counter
2940 to a multiple of 2**LOG bytes. */
2941
2942 #define ASM_OUTPUT_ALIGN(FILE,LOG) \
2943 if ((LOG) != 0) \
2944 fprintf ((FILE), "\t.align %d\n", (LOG))
2945
2946 /* Globalizing directive for a label. */
2947 #define GLOBAL_ASM_OP "\t.global\t"
2948
2949 /* #define ASM_OUTPUT_CASE_END(STREAM,NUM,TABLE) */
2950
2951 /* Output a relative address table. */
2952
2953 #define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
2954 switch (GET_MODE (BODY)) \
2955 { \
2956 case SImode: \
2957 if (TARGET_SH5) \
2958 { \
2959 asm_fprintf ((STREAM), "\t.long\t%LL%d-datalabel %LL%d\n", \
2960 (VALUE), (REL)); \
2961 break; \
2962 } \
2963 asm_fprintf ((STREAM), "\t.long\t%LL%d-%LL%d\n", (VALUE),(REL)); \
2964 break; \
2965 case HImode: \
2966 if (TARGET_SH5) \
2967 { \
2968 asm_fprintf ((STREAM), "\t.word\t%LL%d-datalabel %LL%d\n", \
2969 (VALUE), (REL)); \
2970 break; \
2971 } \
2972 asm_fprintf ((STREAM), "\t.word\t%LL%d-%LL%d\n", (VALUE),(REL)); \
2973 break; \
2974 case QImode: \
2975 if (TARGET_SH5) \
2976 { \
2977 asm_fprintf ((STREAM), "\t.byte\t%LL%d-datalabel %LL%d\n", \
2978 (VALUE), (REL)); \
2979 break; \
2980 } \
2981 asm_fprintf ((STREAM), "\t.byte\t%LL%d-%LL%d\n", (VALUE),(REL)); \
2982 break; \
2983 default: \
2984 break; \
2985 }
2986
2987 /* Output an absolute table element. */
2988
2989 #define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
2990 if (! optimize || TARGET_BIGTABLE) \
2991 asm_fprintf ((STREAM), "\t.long\t%LL%d\n", (VALUE)); \
2992 else \
2993 asm_fprintf ((STREAM), "\t.word\t%LL%d\n", (VALUE));
2994
2995 \f
2996 /* A C statement to be executed just prior to the output of
2997 assembler code for INSN, to modify the extracted operands so
2998 they will be output differently.
2999
3000 Here the argument OPVEC is the vector containing the operands
3001 extracted from INSN, and NOPERANDS is the number of elements of
3002 the vector which contain meaningful data for this insn.
3003 The contents of this vector are what will be used to convert the insn
3004 template into assembler code, so you can change the assembler output
3005 by changing the contents of the vector. */
3006
3007 #define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
3008 final_prescan_insn ((INSN), (OPVEC), (NOPERANDS))
3009
3010 /* Print operand X (an rtx) in assembler syntax to file FILE.
3011 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
3012 For `%' followed by punctuation, CODE is the punctuation and X is null. */
3013
3014 #define PRINT_OPERAND(STREAM, X, CODE) print_operand ((STREAM), (X), (CODE))
3015
3016 /* Print a memory address as an operand to reference that memory location. */
3017
3018 #define PRINT_OPERAND_ADDRESS(STREAM,X) print_operand_address ((STREAM), (X))
3019
3020 #define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
3021 ((CHAR) == '.' || (CHAR) == '#' || (CHAR) == '@' || (CHAR) == ',' \
3022 || (CHAR) == '$' || (CHAR) == '\'' || (CHAR) == '>')
3023
3024 /* Recognize machine-specific patterns that may appear within
3025 constants. Used for PIC-specific UNSPECs. */
3026 #define OUTPUT_ADDR_CONST_EXTRA(STREAM, X, FAIL) \
3027 do \
3028 if (GET_CODE (X) == UNSPEC) \
3029 { \
3030 switch (XINT ((X), 1)) \
3031 { \
3032 case UNSPEC_DATALABEL: \
3033 fputs ("datalabel ", (STREAM)); \
3034 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3035 break; \
3036 case UNSPEC_PIC: \
3037 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */ \
3038 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3039 break; \
3040 case UNSPEC_GOT: \
3041 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3042 fputs ("@GOT", (STREAM)); \
3043 break; \
3044 case UNSPEC_GOTOFF: \
3045 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3046 fputs ("@GOTOFF", (STREAM)); \
3047 break; \
3048 case UNSPEC_PLT: \
3049 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3050 fputs ("@PLT", (STREAM)); \
3051 break; \
3052 case UNSPEC_GOTPLT: \
3053 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3054 fputs ("@GOTPLT", (STREAM)); \
3055 break; \
3056 case UNSPEC_DTPOFF: \
3057 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3058 fputs ("@DTPOFF", (STREAM)); \
3059 break; \
3060 case UNSPEC_GOTTPOFF: \
3061 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3062 fputs ("@GOTTPOFF", (STREAM)); \
3063 break; \
3064 case UNSPEC_TPOFF: \
3065 output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
3066 fputs ("@TPOFF", (STREAM)); \
3067 break; \
3068 case UNSPEC_CALLER: \
3069 { \
3070 char name[32]; \
3071 /* LPCS stands for Label for PIC Call Site. */ \
3072 ASM_GENERATE_INTERNAL_LABEL \
3073 (name, "LPCS", INTVAL (XVECEXP ((X), 0, 0))); \
3074 assemble_name ((STREAM), name); \
3075 } \
3076 break; \
3077 case UNSPEC_EXTRACT_S16: \
3078 case UNSPEC_EXTRACT_U16: \
3079 { \
3080 rtx val, shift; \
3081 \
3082 val = XVECEXP (X, 0, 0); \
3083 shift = XVECEXP (X, 0, 1); \
3084 fputc ('(', STREAM); \
3085 if (shift != const0_rtx) \
3086 fputc ('(', STREAM); \
3087 if (GET_CODE (val) == CONST \
3088 || GET_RTX_CLASS (GET_CODE (val)) != RTX_OBJ) \
3089 { \
3090 fputc ('(', STREAM); \
3091 output_addr_const (STREAM, val); \
3092 fputc (')', STREAM); \
3093 } \
3094 else \
3095 output_addr_const (STREAM, val); \
3096 if (shift != const0_rtx) \
3097 { \
3098 fputs (" >> ", STREAM); \
3099 output_addr_const (STREAM, shift); \
3100 fputc (')', STREAM); \
3101 } \
3102 fputs (" & 65535)", STREAM); \
3103 } \
3104 break; \
3105 case UNSPEC_SYMOFF: \
3106 output_addr_const (STREAM, XVECEXP (X, 0, 0)); \
3107 fputc ('-', STREAM); \
3108 if (GET_CODE (XVECEXP (X, 0, 1)) == CONST) \
3109 { \
3110 fputc ('(', STREAM); \
3111 output_addr_const (STREAM, XVECEXP (X, 0, 1)); \
3112 fputc (')', STREAM); \
3113 } \
3114 else \
3115 output_addr_const (STREAM, XVECEXP (X, 0, 1)); \
3116 break; \
3117 case UNSPEC_PCREL_SYMOFF: \
3118 output_addr_const (STREAM, XVECEXP (X, 0, 0)); \
3119 fputs ("-(", STREAM); \
3120 output_addr_const (STREAM, XVECEXP (X, 0, 1)); \
3121 fputs ("-.)", STREAM); \
3122 break; \
3123 default: \
3124 goto FAIL; \
3125 } \
3126 break; \
3127 } \
3128 else \
3129 goto FAIL; \
3130 while (0)
3131
3132 \f
3133 extern struct rtx_def *sh_compare_op0;
3134 extern struct rtx_def *sh_compare_op1;
3135
3136 /* Which processor to schedule for. The elements of the enumeration must
3137 match exactly the cpu attribute in the sh.md file. */
3138
3139 enum processor_type {
3140 PROCESSOR_SH1,
3141 PROCESSOR_SH2,
3142 PROCESSOR_SH2E,
3143 PROCESSOR_SH2A,
3144 PROCESSOR_SH3,
3145 PROCESSOR_SH3E,
3146 PROCESSOR_SH4,
3147 PROCESSOR_SH4A,
3148 PROCESSOR_SH5
3149 };
3150
3151 #define sh_cpu_attr ((enum attr_cpu)sh_cpu)
3152 extern enum processor_type sh_cpu;
3153
3154 extern int optimize; /* needed for gen_casesi. */
3155
3156 enum mdep_reorg_phase_e
3157 {
3158 SH_BEFORE_MDEP_REORG,
3159 SH_INSERT_USES_LABELS,
3160 SH_SHORTEN_BRANCHES0,
3161 SH_FIXUP_PCLOAD,
3162 SH_SHORTEN_BRANCHES1,
3163 SH_AFTER_MDEP_REORG
3164 };
3165
3166 extern enum mdep_reorg_phase_e mdep_reorg_phase;
3167
3168 /* Handle Renesas compiler's pragmas. */
3169 #define REGISTER_TARGET_PRAGMAS() do { \
3170 c_register_pragma (0, "interrupt", sh_pr_interrupt); \
3171 c_register_pragma (0, "trapa", sh_pr_trapa); \
3172 c_register_pragma (0, "nosave_low_regs", sh_pr_nosave_low_regs); \
3173 } while (0)
3174
3175 extern tree sh_deferred_function_attributes;
3176 extern tree *sh_deferred_function_attributes_tail;
3177
3178 /* Set when processing a function with interrupt attribute. */
3179
3180 extern int current_function_interrupt;
3181
3182 \f
3183 /* Instructions with unfilled delay slots take up an
3184 extra two bytes for the nop in the delay slot.
3185 sh-dsp parallel processing insns are four bytes long. */
3186
3187 #define ADJUST_INSN_LENGTH(X, LENGTH) \
3188 (LENGTH) += sh_insn_length_adjustment (X);
3189 \f
3190 /* Define this macro if it is advisable to hold scalars in registers
3191 in a wider mode than that declared by the program. In such cases,
3192 the value is constrained to be within the bounds of the declared
3193 type, but kept valid in the wider mode. The signedness of the
3194 extension may differ from that of the type.
3195
3196 Leaving the unsignedp unchanged gives better code than always setting it
3197 to 0. This is despite the fact that we have only signed char and short
3198 load instructions. */
3199 #define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
3200 if (GET_MODE_CLASS (MODE) == MODE_INT \
3201 && GET_MODE_SIZE (MODE) < 4/* ! UNITS_PER_WORD */)\
3202 (UNSIGNEDP) = ((MODE) == SImode ? 0 : (UNSIGNEDP)), \
3203 (MODE) = (TARGET_SH1 ? SImode \
3204 : TARGET_SHMEDIA32 ? SImode : DImode);
3205
3206 #define MAX_FIXED_MODE_SIZE (TARGET_SH5 ? 128 : 64)
3207
3208 #define SIDI_OFF (TARGET_LITTLE_ENDIAN ? 0 : 4)
3209
3210 /* ??? Define ACCUMULATE_OUTGOING_ARGS? This is more efficient than pushing
3211 and popping arguments. However, we do have push/pop instructions, and
3212 rather limited offsets (4 bits) in load/store instructions, so it isn't
3213 clear if this would give better code. If implemented, should check for
3214 compatibility problems. */
3215
3216 #define SH_DYNAMIC_SHIFT_COST \
3217 (TARGET_HARD_SH4 ? 1 : TARGET_SH3 ? (TARGET_SMALLCODE ? 1 : 2) : 20)
3218
3219
3220 #define NUM_MODES_FOR_MODE_SWITCHING { FP_MODE_NONE }
3221
3222 #define OPTIMIZE_MODE_SWITCHING(ENTITY) (TARGET_SH4 || TARGET_SH2A_DOUBLE)
3223
3224 #define ACTUAL_NORMAL_MODE(ENTITY) \
3225 (TARGET_FPU_SINGLE ? FP_MODE_SINGLE : FP_MODE_DOUBLE)
3226
3227 #define NORMAL_MODE(ENTITY) \
3228 (sh_cfun_interrupt_handler_p () \
3229 ? (TARGET_FMOVD ? FP_MODE_DOUBLE : FP_MODE_NONE) \
3230 : ACTUAL_NORMAL_MODE (ENTITY))
3231
3232 #define MODE_ENTRY(ENTITY) NORMAL_MODE (ENTITY)
3233
3234 #define MODE_EXIT(ENTITY) \
3235 (sh_cfun_attr_renesas_p () ? FP_MODE_NONE : NORMAL_MODE (ENTITY))
3236
3237 #define EPILOGUE_USES(REGNO) ((TARGET_SH2E || TARGET_SH4) \
3238 && (REGNO) == FPSCR_REG)
3239
3240 #define MODE_NEEDED(ENTITY, INSN) \
3241 (recog_memoized (INSN) >= 0 \
3242 ? get_attr_fp_mode (INSN) \
3243 : FP_MODE_NONE)
3244
3245 #define MODE_AFTER(MODE, INSN) \
3246 (TARGET_HITACHI \
3247 && recog_memoized (INSN) >= 0 \
3248 && get_attr_fp_set (INSN) != FP_SET_NONE \
3249 ? (int) get_attr_fp_set (INSN) \
3250 : (MODE))
3251
3252 #define MODE_PRIORITY_TO_MODE(ENTITY, N) \
3253 ((TARGET_FPU_SINGLE != 0) ^ (N) ? FP_MODE_SINGLE : FP_MODE_DOUBLE)
3254
3255 #define EMIT_MODE_SET(ENTITY, MODE, HARD_REGS_LIVE) \
3256 fpscr_set_from_mem ((MODE), (HARD_REGS_LIVE))
3257
3258 #define MD_CAN_REDIRECT_BRANCH(INSN, SEQ) \
3259 sh_can_redirect_branch ((INSN), (SEQ))
3260
3261 #define DWARF_FRAME_RETURN_COLUMN \
3262 (TARGET_SH5 ? DWARF_FRAME_REGNUM (PR_MEDIA_REG) : DWARF_FRAME_REGNUM (PR_REG))
3263
3264 #define EH_RETURN_DATA_REGNO(N) \
3265 ((N) < 4 ? (N) + (TARGET_SH5 ? 2U : 4U) : INVALID_REGNUM)
3266
3267 #define EH_RETURN_STACKADJ_REGNO STATIC_CHAIN_REGNUM
3268 #define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, EH_RETURN_STACKADJ_REGNO)
3269
3270 /* We have to distinguish between code and data, so that we apply
3271 datalabel where and only where appropriate. Use sdataN for data. */
3272 #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
3273 ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \
3274 | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \
3275 | ((CODE) ? 0 : (TARGET_SHMEDIA64 ? DW_EH_PE_sdata8 : DW_EH_PE_sdata4)))
3276
3277 /* Handle special EH pointer encodings. Absolute, pc-relative, and
3278 indirect are handled automatically. */
3279 #define ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX(FILE, ENCODING, SIZE, ADDR, DONE) \
3280 do { \
3281 if (((ENCODING) & 0xf) != DW_EH_PE_sdata4 \
3282 && ((ENCODING) & 0xf) != DW_EH_PE_sdata8) \
3283 { \
3284 gcc_assert (GET_CODE (ADDR) == SYMBOL_REF); \
3285 SYMBOL_REF_FLAGS (ADDR) |= SYMBOL_FLAG_FUNCTION; \
3286 if (0) goto DONE; \
3287 } \
3288 } while (0)
3289
3290 #if (defined CRT_BEGIN || defined CRT_END) && ! __SHMEDIA__
3291 /* SH constant pool breaks the devices in crtstuff.c to control section
3292 in where code resides. We have to write it as asm code. */
3293 #define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
3294 asm (SECTION_OP "\n\
3295 mov.l 1f,r1\n\
3296 mova 2f,r0\n\
3297 braf r1\n\
3298 lds r0,pr\n\
3299 0: .p2align 2\n\
3300 1: .long " USER_LABEL_PREFIX #FUNC " - 0b\n\
3301 2:\n" TEXT_SECTION_ASM_OP);
3302 #endif /* (defined CRT_BEGIN || defined CRT_END) && ! __SHMEDIA__ */
3303
3304 /* FIXME: middle-end support for highpart optimizations is missing. */
3305 #define high_life_started reload_in_progress
3306
3307 #endif /* ! GCC_SH_H */