sparc.c (sparc_profile_hook): If NO_PROFILE_COUNTERS, don't generate and pass a label...
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "tm_p.h"
47 #include "debug.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
51 #include "tree-gimple.h"
52 #include "langhooks.h"
53 #include "params.h"
54 #include "df.h"
55
56 /* Processor costs */
57 static const
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
79 };
80
81 static const
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
103 };
104
105 static const
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
127 };
128
129 static const
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
151 };
152
153 static const
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
175 };
176
177 static const
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
199 };
200
201 static const
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
223 };
224
225 static const
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
247 };
248
249 const struct processor_costs *sparc_costs = &cypress_costs;
250
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
257 #else
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
260 #endif
261
262 /* Global variables for machine-dependent things. */
263
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
272
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
276
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
279
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
282
283 /* Save the operands last given to a compare for use when we
284 generate a scc or bcc insn. */
285 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
286
287 /* Vector to say how input registers are mapped to output registers.
288 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
289 eliminate it. You must use -fomit-frame-pointer to get that. */
290 char leaf_reg_remap[] =
291 { 0, 1, 2, 3, 4, 5, 6, 7,
292 -1, -1, -1, -1, -1, -1, 14, -1,
293 -1, -1, -1, -1, -1, -1, -1, -1,
294 8, 9, 10, 11, 12, 13, -1, 15,
295
296 32, 33, 34, 35, 36, 37, 38, 39,
297 40, 41, 42, 43, 44, 45, 46, 47,
298 48, 49, 50, 51, 52, 53, 54, 55,
299 56, 57, 58, 59, 60, 61, 62, 63,
300 64, 65, 66, 67, 68, 69, 70, 71,
301 72, 73, 74, 75, 76, 77, 78, 79,
302 80, 81, 82, 83, 84, 85, 86, 87,
303 88, 89, 90, 91, 92, 93, 94, 95,
304 96, 97, 98, 99, 100};
305
306 /* Vector, indexed by hard register number, which contains 1
307 for a register that is allowable in a candidate for leaf
308 function treatment. */
309 char sparc_leaf_regs[] =
310 { 1, 1, 1, 1, 1, 1, 1, 1,
311 0, 0, 0, 0, 0, 0, 1, 0,
312 0, 0, 0, 0, 0, 0, 0, 0,
313 1, 1, 1, 1, 1, 1, 0, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
320 1, 1, 1, 1, 1, 1, 1, 1,
321 1, 1, 1, 1, 1, 1, 1, 1,
322 1, 1, 1, 1, 1};
323
324 struct machine_function GTY(())
325 {
326 /* Some local-dynamic TLS symbol name. */
327 const char *some_ld_name;
328
329 /* True if the current function is leaf and uses only leaf regs,
330 so that the SPARC leaf function optimization can be applied.
331 Private version of current_function_uses_only_leaf_regs, see
332 sparc_expand_prologue for the rationale. */
333 int leaf_function_p;
334
335 /* True if the data calculated by sparc_expand_prologue are valid. */
336 bool prologue_data_valid_p;
337 };
338
339 #define sparc_leaf_function_p cfun->machine->leaf_function_p
340 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
341
342 /* Register we pretend to think the frame pointer is allocated to.
343 Normally, this is %fp, but if we are in a leaf procedure, this
344 is %sp+"something". We record "something" separately as it may
345 be too big for reg+constant addressing. */
346 static rtx frame_base_reg;
347 static HOST_WIDE_INT frame_base_offset;
348
349 /* 1 if the next opcode is to be specially indented. */
350 int sparc_indent_opcode = 0;
351
352 static bool sparc_handle_option (size_t, const char *, int);
353 static void sparc_init_modes (void);
354 static void scan_record_type (tree, int *, int *, int *);
355 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
356 tree, int, int, int *, int *);
357
358 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
359 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
360
361 static void sparc_output_addr_vec (rtx);
362 static void sparc_output_addr_diff_vec (rtx);
363 static void sparc_output_deferred_case_vectors (void);
364 static rtx sparc_builtin_saveregs (void);
365 static int epilogue_renumber (rtx *, int);
366 static bool sparc_assemble_integer (rtx, unsigned int, int);
367 static int set_extends (rtx);
368 static void emit_pic_helper (void);
369 static void load_pic_register (bool);
370 static int save_or_restore_regs (int, int, rtx, int, int);
371 static void emit_save_or_restore_regs (int);
372 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
373 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
374 #ifdef OBJECT_FORMAT_ELF
375 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
376 #endif
377
378 static int sparc_adjust_cost (rtx, rtx, rtx, int);
379 static int sparc_issue_rate (void);
380 static void sparc_sched_init (FILE *, int, int);
381 static int sparc_use_sched_lookahead (void);
382
383 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
384 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
385 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
386 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
387 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
388
389 static bool sparc_function_ok_for_sibcall (tree, tree);
390 static void sparc_init_libfuncs (void);
391 static void sparc_init_builtins (void);
392 static void sparc_vis_init_builtins (void);
393 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
394 static tree sparc_fold_builtin (tree, tree, bool);
395 static int sparc_vis_mul8x16 (int, int);
396 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
397 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
398 HOST_WIDE_INT, tree);
399 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
400 HOST_WIDE_INT, const_tree);
401 static struct machine_function * sparc_init_machine_status (void);
402 static bool sparc_cannot_force_const_mem (rtx);
403 static rtx sparc_tls_get_addr (void);
404 static rtx sparc_tls_got (void);
405 static const char *get_some_local_dynamic_name (void);
406 static int get_some_local_dynamic_name_1 (rtx *, void *);
407 static bool sparc_rtx_costs (rtx, int, int, int *);
408 static bool sparc_promote_prototypes (const_tree);
409 static rtx sparc_struct_value_rtx (tree, int);
410 static bool sparc_return_in_memory (const_tree, const_tree);
411 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
412 static void sparc_va_start (tree, rtx);
413 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
414 static bool sparc_vector_mode_supported_p (enum machine_mode);
415 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
416 enum machine_mode, const_tree, bool);
417 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
418 enum machine_mode, tree, bool);
419 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
420 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
421 static void sparc_file_end (void);
422 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
423 static const char *sparc_mangle_type (const_tree);
424 #endif
425 #ifdef SUBTARGET_ATTRIBUTE_TABLE
426 const struct attribute_spec sparc_attribute_table[];
427 #endif
428 \f
429 /* Option handling. */
430
431 /* Parsed value. */
432 enum cmodel sparc_cmodel;
433
434 char sparc_hard_reg_printed[8];
435
436 struct sparc_cpu_select sparc_select[] =
437 {
438 /* switch name, tune arch */
439 { (char *)0, "default", 1, 1 },
440 { (char *)0, "-mcpu=", 1, 1 },
441 { (char *)0, "-mtune=", 1, 0 },
442 { 0, 0, 0, 0 }
443 };
444
445 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
446 enum processor_type sparc_cpu;
447
448 /* Whether\fan FPU option was specified. */
449 static bool fpu_option_set = false;
450
451 /* Initialize the GCC target structure. */
452
453 /* The sparc default is to use .half rather than .short for aligned
454 HI objects. Use .word instead of .long on non-ELF systems. */
455 #undef TARGET_ASM_ALIGNED_HI_OP
456 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
457 #ifndef OBJECT_FORMAT_ELF
458 #undef TARGET_ASM_ALIGNED_SI_OP
459 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
460 #endif
461
462 #undef TARGET_ASM_UNALIGNED_HI_OP
463 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
464 #undef TARGET_ASM_UNALIGNED_SI_OP
465 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
466 #undef TARGET_ASM_UNALIGNED_DI_OP
467 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
468
469 /* The target hook has to handle DI-mode values. */
470 #undef TARGET_ASM_INTEGER
471 #define TARGET_ASM_INTEGER sparc_assemble_integer
472
473 #undef TARGET_ASM_FUNCTION_PROLOGUE
474 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
475 #undef TARGET_ASM_FUNCTION_EPILOGUE
476 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
477
478 #undef TARGET_SCHED_ADJUST_COST
479 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
480 #undef TARGET_SCHED_ISSUE_RATE
481 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
482 #undef TARGET_SCHED_INIT
483 #define TARGET_SCHED_INIT sparc_sched_init
484 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
485 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
486
487 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
488 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
489
490 #undef TARGET_INIT_LIBFUNCS
491 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
492 #undef TARGET_INIT_BUILTINS
493 #define TARGET_INIT_BUILTINS sparc_init_builtins
494
495 #undef TARGET_EXPAND_BUILTIN
496 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
497 #undef TARGET_FOLD_BUILTIN
498 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
499
500 #if TARGET_TLS
501 #undef TARGET_HAVE_TLS
502 #define TARGET_HAVE_TLS true
503 #endif
504
505 #undef TARGET_CANNOT_FORCE_CONST_MEM
506 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
507
508 #undef TARGET_ASM_OUTPUT_MI_THUNK
509 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
510 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
511 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
512
513 #undef TARGET_RTX_COSTS
514 #define TARGET_RTX_COSTS sparc_rtx_costs
515 #undef TARGET_ADDRESS_COST
516 #define TARGET_ADDRESS_COST hook_int_rtx_0
517
518 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
519 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
520 test for this value. */
521 #undef TARGET_PROMOTE_FUNCTION_ARGS
522 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
523
524 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
525 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
526 test for this value. */
527 #undef TARGET_PROMOTE_FUNCTION_RETURN
528 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
529
530 #undef TARGET_PROMOTE_PROTOTYPES
531 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
532
533 #undef TARGET_STRUCT_VALUE_RTX
534 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
535 #undef TARGET_RETURN_IN_MEMORY
536 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
537 #undef TARGET_MUST_PASS_IN_STACK
538 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
539 #undef TARGET_PASS_BY_REFERENCE
540 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
541 #undef TARGET_ARG_PARTIAL_BYTES
542 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
543
544 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
545 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
546 #undef TARGET_STRICT_ARGUMENT_NAMING
547 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
548
549 #undef TARGET_EXPAND_BUILTIN_VA_START
550 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
551 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
552 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
553
554 #undef TARGET_VECTOR_MODE_SUPPORTED_P
555 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
556
557 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
558 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
559
560 #ifdef SUBTARGET_INSERT_ATTRIBUTES
561 #undef TARGET_INSERT_ATTRIBUTES
562 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
563 #endif
564
565 #ifdef SUBTARGET_ATTRIBUTE_TABLE
566 #undef TARGET_ATTRIBUTE_TABLE
567 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
568 #endif
569
570 #undef TARGET_RELAXED_ORDERING
571 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
572
573 #undef TARGET_DEFAULT_TARGET_FLAGS
574 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
575 #undef TARGET_HANDLE_OPTION
576 #define TARGET_HANDLE_OPTION sparc_handle_option
577
578 #if TARGET_GNU_TLS
579 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
580 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
581 #endif
582
583 #undef TARGET_ASM_FILE_END
584 #define TARGET_ASM_FILE_END sparc_file_end
585
586 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
587 #undef TARGET_MANGLE_TYPE
588 #define TARGET_MANGLE_TYPE sparc_mangle_type
589 #endif
590
591 struct gcc_target targetm = TARGET_INITIALIZER;
592
593 /* Implement TARGET_HANDLE_OPTION. */
594
595 static bool
596 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
597 {
598 switch (code)
599 {
600 case OPT_mfpu:
601 case OPT_mhard_float:
602 case OPT_msoft_float:
603 fpu_option_set = true;
604 break;
605
606 case OPT_mcpu_:
607 sparc_select[1].string = arg;
608 break;
609
610 case OPT_mtune_:
611 sparc_select[2].string = arg;
612 break;
613 }
614
615 return true;
616 }
617
618 /* Validate and override various options, and do some machine dependent
619 initialization. */
620
621 void
622 sparc_override_options (void)
623 {
624 static struct code_model {
625 const char *const name;
626 const int value;
627 } const cmodels[] = {
628 { "32", CM_32 },
629 { "medlow", CM_MEDLOW },
630 { "medmid", CM_MEDMID },
631 { "medany", CM_MEDANY },
632 { "embmedany", CM_EMBMEDANY },
633 { 0, 0 }
634 };
635 const struct code_model *cmodel;
636 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
637 static struct cpu_default {
638 const int cpu;
639 const char *const name;
640 } const cpu_default[] = {
641 /* There must be one entry here for each TARGET_CPU value. */
642 { TARGET_CPU_sparc, "cypress" },
643 { TARGET_CPU_sparclet, "tsc701" },
644 { TARGET_CPU_sparclite, "f930" },
645 { TARGET_CPU_v8, "v8" },
646 { TARGET_CPU_hypersparc, "hypersparc" },
647 { TARGET_CPU_sparclite86x, "sparclite86x" },
648 { TARGET_CPU_supersparc, "supersparc" },
649 { TARGET_CPU_v9, "v9" },
650 { TARGET_CPU_ultrasparc, "ultrasparc" },
651 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
652 { TARGET_CPU_niagara, "niagara" },
653 { TARGET_CPU_niagara2, "niagara2" },
654 { 0, 0 }
655 };
656 const struct cpu_default *def;
657 /* Table of values for -m{cpu,tune}=. */
658 static struct cpu_table {
659 const char *const name;
660 const enum processor_type processor;
661 const int disable;
662 const int enable;
663 } const cpu_table[] = {
664 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
665 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
666 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
667 /* TI TMS390Z55 supersparc */
668 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
669 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
670 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
671 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
672 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
673 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
674 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
675 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
676 MASK_SPARCLITE },
677 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
678 /* TEMIC sparclet */
679 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
680 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
681 /* TI ultrasparc I, II, IIi */
682 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
683 /* Although insns using %y are deprecated, it is a clear win on current
684 ultrasparcs. */
685 |MASK_DEPRECATED_V8_INSNS},
686 /* TI ultrasparc III */
687 /* ??? Check if %y issue still holds true in ultra3. */
688 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
689 /* UltraSPARC T1 */
690 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
691 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
692 { 0, 0, 0, 0 }
693 };
694 const struct cpu_table *cpu;
695 const struct sparc_cpu_select *sel;
696 int fpu;
697
698 #ifndef SPARC_BI_ARCH
699 /* Check for unsupported architecture size. */
700 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
701 error ("%s is not supported by this configuration",
702 DEFAULT_ARCH32_P ? "-m64" : "-m32");
703 #endif
704
705 /* We force all 64bit archs to use 128 bit long double */
706 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
707 {
708 error ("-mlong-double-64 not allowed with -m64");
709 target_flags |= MASK_LONG_DOUBLE_128;
710 }
711
712 /* Code model selection. */
713 sparc_cmodel = SPARC_DEFAULT_CMODEL;
714
715 #ifdef SPARC_BI_ARCH
716 if (TARGET_ARCH32)
717 sparc_cmodel = CM_32;
718 #endif
719
720 if (sparc_cmodel_string != NULL)
721 {
722 if (TARGET_ARCH64)
723 {
724 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
725 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
726 break;
727 if (cmodel->name == NULL)
728 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
729 else
730 sparc_cmodel = cmodel->value;
731 }
732 else
733 error ("-mcmodel= is not supported on 32 bit systems");
734 }
735
736 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
737
738 /* Set the default CPU. */
739 for (def = &cpu_default[0]; def->name; ++def)
740 if (def->cpu == TARGET_CPU_DEFAULT)
741 break;
742 gcc_assert (def->name);
743 sparc_select[0].string = def->name;
744
745 for (sel = &sparc_select[0]; sel->name; ++sel)
746 {
747 if (sel->string)
748 {
749 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
750 if (! strcmp (sel->string, cpu->name))
751 {
752 if (sel->set_tune_p)
753 sparc_cpu = cpu->processor;
754
755 if (sel->set_arch_p)
756 {
757 target_flags &= ~cpu->disable;
758 target_flags |= cpu->enable;
759 }
760 break;
761 }
762
763 if (! cpu->name)
764 error ("bad value (%s) for %s switch", sel->string, sel->name);
765 }
766 }
767
768 /* If -mfpu or -mno-fpu was explicitly used, don't override with
769 the processor default. */
770 if (fpu_option_set)
771 target_flags = (target_flags & ~MASK_FPU) | fpu;
772
773 /* Don't allow -mvis if FPU is disabled. */
774 if (! TARGET_FPU)
775 target_flags &= ~MASK_VIS;
776
777 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
778 are available.
779 -m64 also implies v9. */
780 if (TARGET_VIS || TARGET_ARCH64)
781 {
782 target_flags |= MASK_V9;
783 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
784 }
785
786 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
787 if (TARGET_V9 && TARGET_ARCH32)
788 target_flags |= MASK_DEPRECATED_V8_INSNS;
789
790 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
791 if (! TARGET_V9 || TARGET_ARCH64)
792 target_flags &= ~MASK_V8PLUS;
793
794 /* Don't use stack biasing in 32 bit mode. */
795 if (TARGET_ARCH32)
796 target_flags &= ~MASK_STACK_BIAS;
797
798 /* Supply a default value for align_functions. */
799 if (align_functions == 0
800 && (sparc_cpu == PROCESSOR_ULTRASPARC
801 || sparc_cpu == PROCESSOR_ULTRASPARC3
802 || sparc_cpu == PROCESSOR_NIAGARA
803 || sparc_cpu == PROCESSOR_NIAGARA2))
804 align_functions = 32;
805
806 /* Validate PCC_STRUCT_RETURN. */
807 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
808 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
809
810 /* Only use .uaxword when compiling for a 64-bit target. */
811 if (!TARGET_ARCH64)
812 targetm.asm_out.unaligned_op.di = NULL;
813
814 /* Do various machine dependent initializations. */
815 sparc_init_modes ();
816
817 /* Acquire unique alias sets for our private stuff. */
818 sparc_sr_alias_set = new_alias_set ();
819 struct_value_alias_set = new_alias_set ();
820
821 /* Set up function hooks. */
822 init_machine_status = sparc_init_machine_status;
823
824 switch (sparc_cpu)
825 {
826 case PROCESSOR_V7:
827 case PROCESSOR_CYPRESS:
828 sparc_costs = &cypress_costs;
829 break;
830 case PROCESSOR_V8:
831 case PROCESSOR_SPARCLITE:
832 case PROCESSOR_SUPERSPARC:
833 sparc_costs = &supersparc_costs;
834 break;
835 case PROCESSOR_F930:
836 case PROCESSOR_F934:
837 case PROCESSOR_HYPERSPARC:
838 case PROCESSOR_SPARCLITE86X:
839 sparc_costs = &hypersparc_costs;
840 break;
841 case PROCESSOR_SPARCLET:
842 case PROCESSOR_TSC701:
843 sparc_costs = &sparclet_costs;
844 break;
845 case PROCESSOR_V9:
846 case PROCESSOR_ULTRASPARC:
847 sparc_costs = &ultrasparc_costs;
848 break;
849 case PROCESSOR_ULTRASPARC3:
850 sparc_costs = &ultrasparc3_costs;
851 break;
852 case PROCESSOR_NIAGARA:
853 sparc_costs = &niagara_costs;
854 break;
855 case PROCESSOR_NIAGARA2:
856 sparc_costs = &niagara2_costs;
857 break;
858 };
859
860 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
861 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
862 target_flags |= MASK_LONG_DOUBLE_128;
863 #endif
864
865 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
866 set_param_value ("simultaneous-prefetches",
867 ((sparc_cpu == PROCESSOR_ULTRASPARC
868 || sparc_cpu == PROCESSOR_NIAGARA
869 || sparc_cpu == PROCESSOR_NIAGARA2)
870 ? 2
871 : (sparc_cpu == PROCESSOR_ULTRASPARC3
872 ? 8 : 3)));
873 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
874 set_param_value ("l1-cache-line-size",
875 ((sparc_cpu == PROCESSOR_ULTRASPARC
876 || sparc_cpu == PROCESSOR_ULTRASPARC3
877 || sparc_cpu == PROCESSOR_NIAGARA
878 || sparc_cpu == PROCESSOR_NIAGARA2)
879 ? 64 : 32));
880 }
881 \f
882 #ifdef SUBTARGET_ATTRIBUTE_TABLE
883 /* Table of valid machine attributes. */
884 const struct attribute_spec sparc_attribute_table[] =
885 {
886 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
887 SUBTARGET_ATTRIBUTE_TABLE,
888 { NULL, 0, 0, false, false, false, NULL }
889 };
890 #endif
891 \f
892 /* Miscellaneous utilities. */
893
894 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
895 or branch on register contents instructions. */
896
897 int
898 v9_regcmp_p (enum rtx_code code)
899 {
900 return (code == EQ || code == NE || code == GE || code == LT
901 || code == LE || code == GT);
902 }
903
904 /* Nonzero if OP is a floating point constant which can
905 be loaded into an integer register using a single
906 sethi instruction. */
907
908 int
909 fp_sethi_p (rtx op)
910 {
911 if (GET_CODE (op) == CONST_DOUBLE)
912 {
913 REAL_VALUE_TYPE r;
914 long i;
915
916 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
917 REAL_VALUE_TO_TARGET_SINGLE (r, i);
918 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
919 }
920
921 return 0;
922 }
923
924 /* Nonzero if OP is a floating point constant which can
925 be loaded into an integer register using a single
926 mov instruction. */
927
928 int
929 fp_mov_p (rtx op)
930 {
931 if (GET_CODE (op) == CONST_DOUBLE)
932 {
933 REAL_VALUE_TYPE r;
934 long i;
935
936 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
937 REAL_VALUE_TO_TARGET_SINGLE (r, i);
938 return SPARC_SIMM13_P (i);
939 }
940
941 return 0;
942 }
943
944 /* Nonzero if OP is a floating point constant which can
945 be loaded into an integer register using a high/losum
946 instruction sequence. */
947
948 int
949 fp_high_losum_p (rtx op)
950 {
951 /* The constraints calling this should only be in
952 SFmode move insns, so any constant which cannot
953 be moved using a single insn will do. */
954 if (GET_CODE (op) == CONST_DOUBLE)
955 {
956 REAL_VALUE_TYPE r;
957 long i;
958
959 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
960 REAL_VALUE_TO_TARGET_SINGLE (r, i);
961 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
962 }
963
964 return 0;
965 }
966
967 /* Expand a move instruction. Return true if all work is done. */
968
969 bool
970 sparc_expand_move (enum machine_mode mode, rtx *operands)
971 {
972 /* Handle sets of MEM first. */
973 if (GET_CODE (operands[0]) == MEM)
974 {
975 /* 0 is a register (or a pair of registers) on SPARC. */
976 if (register_or_zero_operand (operands[1], mode))
977 return false;
978
979 if (!reload_in_progress)
980 {
981 operands[0] = validize_mem (operands[0]);
982 operands[1] = force_reg (mode, operands[1]);
983 }
984 }
985
986 /* Fixup TLS cases. */
987 if (TARGET_HAVE_TLS
988 && CONSTANT_P (operands[1])
989 && GET_CODE (operands[1]) != HIGH
990 && sparc_tls_referenced_p (operands [1]))
991 {
992 rtx sym = operands[1];
993 rtx addend = NULL;
994
995 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
996 {
997 addend = XEXP (XEXP (sym, 0), 1);
998 sym = XEXP (XEXP (sym, 0), 0);
999 }
1000
1001 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1002
1003 sym = legitimize_tls_address (sym);
1004 if (addend)
1005 {
1006 sym = gen_rtx_PLUS (mode, sym, addend);
1007 sym = force_operand (sym, operands[0]);
1008 }
1009 operands[1] = sym;
1010 }
1011
1012 /* Fixup PIC cases. */
1013 if (flag_pic && CONSTANT_P (operands[1]))
1014 {
1015 if (pic_address_needs_scratch (operands[1]))
1016 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1017
1018 /* VxWorks does not impose a fixed gap between segments; the run-time
1019 gap can be different from the object-file gap. We therefore can't
1020 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1021 are absolutely sure that X is in the same segment as the GOT.
1022 Unfortunately, the flexibility of linker scripts means that we
1023 can't be sure of that in general, so assume that _G_O_T_-relative
1024 accesses are never valid on VxWorks. */
1025 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1026 {
1027 if (mode == SImode)
1028 {
1029 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1030 return true;
1031 }
1032
1033 if (mode == DImode)
1034 {
1035 gcc_assert (TARGET_ARCH64);
1036 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1037 return true;
1038 }
1039 }
1040
1041 if (symbolic_operand (operands[1], mode))
1042 {
1043 operands[1] = legitimize_pic_address (operands[1],
1044 mode,
1045 (reload_in_progress ?
1046 operands[0] :
1047 NULL_RTX));
1048 return false;
1049 }
1050 }
1051
1052 /* If we are trying to toss an integer constant into FP registers,
1053 or loading a FP or vector constant, force it into memory. */
1054 if (CONSTANT_P (operands[1])
1055 && REG_P (operands[0])
1056 && (SPARC_FP_REG_P (REGNO (operands[0]))
1057 || SCALAR_FLOAT_MODE_P (mode)
1058 || VECTOR_MODE_P (mode)))
1059 {
1060 /* emit_group_store will send such bogosity to us when it is
1061 not storing directly into memory. So fix this up to avoid
1062 crashes in output_constant_pool. */
1063 if (operands [1] == const0_rtx)
1064 operands[1] = CONST0_RTX (mode);
1065
1066 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1067 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1068 && const_zero_operand (operands[1], mode))
1069 return false;
1070
1071 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1072 /* We are able to build any SF constant in integer registers
1073 with at most 2 instructions. */
1074 && (mode == SFmode
1075 /* And any DF constant in integer registers. */
1076 || (mode == DFmode
1077 && (reload_completed || reload_in_progress))))
1078 return false;
1079
1080 operands[1] = force_const_mem (mode, operands[1]);
1081 if (!reload_in_progress)
1082 operands[1] = validize_mem (operands[1]);
1083 return false;
1084 }
1085
1086 /* Accept non-constants and valid constants unmodified. */
1087 if (!CONSTANT_P (operands[1])
1088 || GET_CODE (operands[1]) == HIGH
1089 || input_operand (operands[1], mode))
1090 return false;
1091
1092 switch (mode)
1093 {
1094 case QImode:
1095 /* All QImode constants require only one insn, so proceed. */
1096 break;
1097
1098 case HImode:
1099 case SImode:
1100 sparc_emit_set_const32 (operands[0], operands[1]);
1101 return true;
1102
1103 case DImode:
1104 /* input_operand should have filtered out 32-bit mode. */
1105 sparc_emit_set_const64 (operands[0], operands[1]);
1106 return true;
1107
1108 default:
1109 gcc_unreachable ();
1110 }
1111
1112 return false;
1113 }
1114
1115 /* Load OP1, a 32-bit constant, into OP0, a register.
1116 We know it can't be done in one insn when we get
1117 here, the move expander guarantees this. */
1118
1119 void
1120 sparc_emit_set_const32 (rtx op0, rtx op1)
1121 {
1122 enum machine_mode mode = GET_MODE (op0);
1123 rtx temp;
1124
1125 if (reload_in_progress || reload_completed)
1126 temp = op0;
1127 else
1128 temp = gen_reg_rtx (mode);
1129
1130 if (GET_CODE (op1) == CONST_INT)
1131 {
1132 gcc_assert (!small_int_operand (op1, mode)
1133 && !const_high_operand (op1, mode));
1134
1135 /* Emit them as real moves instead of a HIGH/LO_SUM,
1136 this way CSE can see everything and reuse intermediate
1137 values if it wants. */
1138 emit_insn (gen_rtx_SET (VOIDmode, temp,
1139 GEN_INT (INTVAL (op1)
1140 & ~(HOST_WIDE_INT)0x3ff)));
1141
1142 emit_insn (gen_rtx_SET (VOIDmode,
1143 op0,
1144 gen_rtx_IOR (mode, temp,
1145 GEN_INT (INTVAL (op1) & 0x3ff))));
1146 }
1147 else
1148 {
1149 /* A symbol, emit in the traditional way. */
1150 emit_insn (gen_rtx_SET (VOIDmode, temp,
1151 gen_rtx_HIGH (mode, op1)));
1152 emit_insn (gen_rtx_SET (VOIDmode,
1153 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1154 }
1155 }
1156
1157 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1158 If TEMP is nonzero, we are forbidden to use any other scratch
1159 registers. Otherwise, we are allowed to generate them as needed.
1160
1161 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1162 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1163
1164 void
1165 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1166 {
1167 rtx temp1, temp2, temp3, temp4, temp5;
1168 rtx ti_temp = 0;
1169
1170 if (temp && GET_MODE (temp) == TImode)
1171 {
1172 ti_temp = temp;
1173 temp = gen_rtx_REG (DImode, REGNO (temp));
1174 }
1175
1176 /* SPARC-V9 code-model support. */
1177 switch (sparc_cmodel)
1178 {
1179 case CM_MEDLOW:
1180 /* The range spanned by all instructions in the object is less
1181 than 2^31 bytes (2GB) and the distance from any instruction
1182 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1183 than 2^31 bytes (2GB).
1184
1185 The executable must be in the low 4TB of the virtual address
1186 space.
1187
1188 sethi %hi(symbol), %temp1
1189 or %temp1, %lo(symbol), %reg */
1190 if (temp)
1191 temp1 = temp; /* op0 is allowed. */
1192 else
1193 temp1 = gen_reg_rtx (DImode);
1194
1195 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1196 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1197 break;
1198
1199 case CM_MEDMID:
1200 /* The range spanned by all instructions in the object is less
1201 than 2^31 bytes (2GB) and the distance from any instruction
1202 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1203 than 2^31 bytes (2GB).
1204
1205 The executable must be in the low 16TB of the virtual address
1206 space.
1207
1208 sethi %h44(symbol), %temp1
1209 or %temp1, %m44(symbol), %temp2
1210 sllx %temp2, 12, %temp3
1211 or %temp3, %l44(symbol), %reg */
1212 if (temp)
1213 {
1214 temp1 = op0;
1215 temp2 = op0;
1216 temp3 = temp; /* op0 is allowed. */
1217 }
1218 else
1219 {
1220 temp1 = gen_reg_rtx (DImode);
1221 temp2 = gen_reg_rtx (DImode);
1222 temp3 = gen_reg_rtx (DImode);
1223 }
1224
1225 emit_insn (gen_seth44 (temp1, op1));
1226 emit_insn (gen_setm44 (temp2, temp1, op1));
1227 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1228 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1229 emit_insn (gen_setl44 (op0, temp3, op1));
1230 break;
1231
1232 case CM_MEDANY:
1233 /* The range spanned by all instructions in the object is less
1234 than 2^31 bytes (2GB) and the distance from any instruction
1235 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1236 than 2^31 bytes (2GB).
1237
1238 The executable can be placed anywhere in the virtual address
1239 space.
1240
1241 sethi %hh(symbol), %temp1
1242 sethi %lm(symbol), %temp2
1243 or %temp1, %hm(symbol), %temp3
1244 sllx %temp3, 32, %temp4
1245 or %temp4, %temp2, %temp5
1246 or %temp5, %lo(symbol), %reg */
1247 if (temp)
1248 {
1249 /* It is possible that one of the registers we got for operands[2]
1250 might coincide with that of operands[0] (which is why we made
1251 it TImode). Pick the other one to use as our scratch. */
1252 if (rtx_equal_p (temp, op0))
1253 {
1254 gcc_assert (ti_temp);
1255 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1256 }
1257 temp1 = op0;
1258 temp2 = temp; /* op0 is _not_ allowed, see above. */
1259 temp3 = op0;
1260 temp4 = op0;
1261 temp5 = op0;
1262 }
1263 else
1264 {
1265 temp1 = gen_reg_rtx (DImode);
1266 temp2 = gen_reg_rtx (DImode);
1267 temp3 = gen_reg_rtx (DImode);
1268 temp4 = gen_reg_rtx (DImode);
1269 temp5 = gen_reg_rtx (DImode);
1270 }
1271
1272 emit_insn (gen_sethh (temp1, op1));
1273 emit_insn (gen_setlm (temp2, op1));
1274 emit_insn (gen_sethm (temp3, temp1, op1));
1275 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1276 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1277 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1278 gen_rtx_PLUS (DImode, temp4, temp2)));
1279 emit_insn (gen_setlo (op0, temp5, op1));
1280 break;
1281
1282 case CM_EMBMEDANY:
1283 /* Old old old backwards compatibility kruft here.
1284 Essentially it is MEDLOW with a fixed 64-bit
1285 virtual base added to all data segment addresses.
1286 Text-segment stuff is computed like MEDANY, we can't
1287 reuse the code above because the relocation knobs
1288 look different.
1289
1290 Data segment: sethi %hi(symbol), %temp1
1291 add %temp1, EMBMEDANY_BASE_REG, %temp2
1292 or %temp2, %lo(symbol), %reg */
1293 if (data_segment_operand (op1, GET_MODE (op1)))
1294 {
1295 if (temp)
1296 {
1297 temp1 = temp; /* op0 is allowed. */
1298 temp2 = op0;
1299 }
1300 else
1301 {
1302 temp1 = gen_reg_rtx (DImode);
1303 temp2 = gen_reg_rtx (DImode);
1304 }
1305
1306 emit_insn (gen_embmedany_sethi (temp1, op1));
1307 emit_insn (gen_embmedany_brsum (temp2, temp1));
1308 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1309 }
1310
1311 /* Text segment: sethi %uhi(symbol), %temp1
1312 sethi %hi(symbol), %temp2
1313 or %temp1, %ulo(symbol), %temp3
1314 sllx %temp3, 32, %temp4
1315 or %temp4, %temp2, %temp5
1316 or %temp5, %lo(symbol), %reg */
1317 else
1318 {
1319 if (temp)
1320 {
1321 /* It is possible that one of the registers we got for operands[2]
1322 might coincide with that of operands[0] (which is why we made
1323 it TImode). Pick the other one to use as our scratch. */
1324 if (rtx_equal_p (temp, op0))
1325 {
1326 gcc_assert (ti_temp);
1327 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1328 }
1329 temp1 = op0;
1330 temp2 = temp; /* op0 is _not_ allowed, see above. */
1331 temp3 = op0;
1332 temp4 = op0;
1333 temp5 = op0;
1334 }
1335 else
1336 {
1337 temp1 = gen_reg_rtx (DImode);
1338 temp2 = gen_reg_rtx (DImode);
1339 temp3 = gen_reg_rtx (DImode);
1340 temp4 = gen_reg_rtx (DImode);
1341 temp5 = gen_reg_rtx (DImode);
1342 }
1343
1344 emit_insn (gen_embmedany_textuhi (temp1, op1));
1345 emit_insn (gen_embmedany_texthi (temp2, op1));
1346 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1347 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1348 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1349 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1350 gen_rtx_PLUS (DImode, temp4, temp2)));
1351 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1352 }
1353 break;
1354
1355 default:
1356 gcc_unreachable ();
1357 }
1358 }
1359
1360 #if HOST_BITS_PER_WIDE_INT == 32
1361 void
1362 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1363 {
1364 gcc_unreachable ();
1365 }
1366 #else
1367 /* These avoid problems when cross compiling. If we do not
1368 go through all this hair then the optimizer will see
1369 invalid REG_EQUAL notes or in some cases none at all. */
1370 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1371 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1372 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1373 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1374
1375 /* The optimizer is not to assume anything about exactly
1376 which bits are set for a HIGH, they are unspecified.
1377 Unfortunately this leads to many missed optimizations
1378 during CSE. We mask out the non-HIGH bits, and matches
1379 a plain movdi, to alleviate this problem. */
1380 static rtx
1381 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1382 {
1383 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1384 }
1385
1386 static rtx
1387 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1388 {
1389 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1390 }
1391
1392 static rtx
1393 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1394 {
1395 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1396 }
1397
1398 static rtx
1399 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1400 {
1401 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1402 }
1403
1404 /* Worker routines for 64-bit constant formation on arch64.
1405 One of the key things to be doing in these emissions is
1406 to create as many temp REGs as possible. This makes it
1407 possible for half-built constants to be used later when
1408 such values are similar to something required later on.
1409 Without doing this, the optimizer cannot see such
1410 opportunities. */
1411
1412 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1413 unsigned HOST_WIDE_INT, int);
1414
1415 static void
1416 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1417 unsigned HOST_WIDE_INT low_bits, int is_neg)
1418 {
1419 unsigned HOST_WIDE_INT high_bits;
1420
1421 if (is_neg)
1422 high_bits = (~low_bits) & 0xffffffff;
1423 else
1424 high_bits = low_bits;
1425
1426 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1427 if (!is_neg)
1428 {
1429 emit_insn (gen_rtx_SET (VOIDmode, op0,
1430 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1431 }
1432 else
1433 {
1434 /* If we are XOR'ing with -1, then we should emit a one's complement
1435 instead. This way the combiner will notice logical operations
1436 such as ANDN later on and substitute. */
1437 if ((low_bits & 0x3ff) == 0x3ff)
1438 {
1439 emit_insn (gen_rtx_SET (VOIDmode, op0,
1440 gen_rtx_NOT (DImode, temp)));
1441 }
1442 else
1443 {
1444 emit_insn (gen_rtx_SET (VOIDmode, op0,
1445 gen_safe_XOR64 (temp,
1446 (-(HOST_WIDE_INT)0x400
1447 | (low_bits & 0x3ff)))));
1448 }
1449 }
1450 }
1451
1452 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1453 unsigned HOST_WIDE_INT, int);
1454
1455 static void
1456 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1457 unsigned HOST_WIDE_INT high_bits,
1458 unsigned HOST_WIDE_INT low_immediate,
1459 int shift_count)
1460 {
1461 rtx temp2 = op0;
1462
1463 if ((high_bits & 0xfffffc00) != 0)
1464 {
1465 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1466 if ((high_bits & ~0xfffffc00) != 0)
1467 emit_insn (gen_rtx_SET (VOIDmode, op0,
1468 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1469 else
1470 temp2 = temp;
1471 }
1472 else
1473 {
1474 emit_insn (gen_safe_SET64 (temp, high_bits));
1475 temp2 = temp;
1476 }
1477
1478 /* Now shift it up into place. */
1479 emit_insn (gen_rtx_SET (VOIDmode, op0,
1480 gen_rtx_ASHIFT (DImode, temp2,
1481 GEN_INT (shift_count))));
1482
1483 /* If there is a low immediate part piece, finish up by
1484 putting that in as well. */
1485 if (low_immediate != 0)
1486 emit_insn (gen_rtx_SET (VOIDmode, op0,
1487 gen_safe_OR64 (op0, low_immediate)));
1488 }
1489
1490 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1491 unsigned HOST_WIDE_INT);
1492
1493 /* Full 64-bit constant decomposition. Even though this is the
1494 'worst' case, we still optimize a few things away. */
1495 static void
1496 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1497 unsigned HOST_WIDE_INT high_bits,
1498 unsigned HOST_WIDE_INT low_bits)
1499 {
1500 rtx sub_temp;
1501
1502 if (reload_in_progress || reload_completed)
1503 sub_temp = op0;
1504 else
1505 sub_temp = gen_reg_rtx (DImode);
1506
1507 if ((high_bits & 0xfffffc00) != 0)
1508 {
1509 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1510 if ((high_bits & ~0xfffffc00) != 0)
1511 emit_insn (gen_rtx_SET (VOIDmode,
1512 sub_temp,
1513 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1514 else
1515 sub_temp = temp;
1516 }
1517 else
1518 {
1519 emit_insn (gen_safe_SET64 (temp, high_bits));
1520 sub_temp = temp;
1521 }
1522
1523 if (!reload_in_progress && !reload_completed)
1524 {
1525 rtx temp2 = gen_reg_rtx (DImode);
1526 rtx temp3 = gen_reg_rtx (DImode);
1527 rtx temp4 = gen_reg_rtx (DImode);
1528
1529 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1530 gen_rtx_ASHIFT (DImode, sub_temp,
1531 GEN_INT (32))));
1532
1533 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1534 if ((low_bits & ~0xfffffc00) != 0)
1535 {
1536 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1537 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1538 emit_insn (gen_rtx_SET (VOIDmode, op0,
1539 gen_rtx_PLUS (DImode, temp4, temp3)));
1540 }
1541 else
1542 {
1543 emit_insn (gen_rtx_SET (VOIDmode, op0,
1544 gen_rtx_PLUS (DImode, temp4, temp2)));
1545 }
1546 }
1547 else
1548 {
1549 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1550 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1551 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1552 int to_shift = 12;
1553
1554 /* We are in the middle of reload, so this is really
1555 painful. However we do still make an attempt to
1556 avoid emitting truly stupid code. */
1557 if (low1 != const0_rtx)
1558 {
1559 emit_insn (gen_rtx_SET (VOIDmode, op0,
1560 gen_rtx_ASHIFT (DImode, sub_temp,
1561 GEN_INT (to_shift))));
1562 emit_insn (gen_rtx_SET (VOIDmode, op0,
1563 gen_rtx_IOR (DImode, op0, low1)));
1564 sub_temp = op0;
1565 to_shift = 12;
1566 }
1567 else
1568 {
1569 to_shift += 12;
1570 }
1571 if (low2 != const0_rtx)
1572 {
1573 emit_insn (gen_rtx_SET (VOIDmode, op0,
1574 gen_rtx_ASHIFT (DImode, sub_temp,
1575 GEN_INT (to_shift))));
1576 emit_insn (gen_rtx_SET (VOIDmode, op0,
1577 gen_rtx_IOR (DImode, op0, low2)));
1578 sub_temp = op0;
1579 to_shift = 8;
1580 }
1581 else
1582 {
1583 to_shift += 8;
1584 }
1585 emit_insn (gen_rtx_SET (VOIDmode, op0,
1586 gen_rtx_ASHIFT (DImode, sub_temp,
1587 GEN_INT (to_shift))));
1588 if (low3 != const0_rtx)
1589 emit_insn (gen_rtx_SET (VOIDmode, op0,
1590 gen_rtx_IOR (DImode, op0, low3)));
1591 /* phew... */
1592 }
1593 }
1594
1595 /* Analyze a 64-bit constant for certain properties. */
1596 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1597 unsigned HOST_WIDE_INT,
1598 int *, int *, int *);
1599
1600 static void
1601 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1602 unsigned HOST_WIDE_INT low_bits,
1603 int *hbsp, int *lbsp, int *abbasp)
1604 {
1605 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1606 int i;
1607
1608 lowest_bit_set = highest_bit_set = -1;
1609 i = 0;
1610 do
1611 {
1612 if ((lowest_bit_set == -1)
1613 && ((low_bits >> i) & 1))
1614 lowest_bit_set = i;
1615 if ((highest_bit_set == -1)
1616 && ((high_bits >> (32 - i - 1)) & 1))
1617 highest_bit_set = (64 - i - 1);
1618 }
1619 while (++i < 32
1620 && ((highest_bit_set == -1)
1621 || (lowest_bit_set == -1)));
1622 if (i == 32)
1623 {
1624 i = 0;
1625 do
1626 {
1627 if ((lowest_bit_set == -1)
1628 && ((high_bits >> i) & 1))
1629 lowest_bit_set = i + 32;
1630 if ((highest_bit_set == -1)
1631 && ((low_bits >> (32 - i - 1)) & 1))
1632 highest_bit_set = 32 - i - 1;
1633 }
1634 while (++i < 32
1635 && ((highest_bit_set == -1)
1636 || (lowest_bit_set == -1)));
1637 }
1638 /* If there are no bits set this should have gone out
1639 as one instruction! */
1640 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1641 all_bits_between_are_set = 1;
1642 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1643 {
1644 if (i < 32)
1645 {
1646 if ((low_bits & (1 << i)) != 0)
1647 continue;
1648 }
1649 else
1650 {
1651 if ((high_bits & (1 << (i - 32))) != 0)
1652 continue;
1653 }
1654 all_bits_between_are_set = 0;
1655 break;
1656 }
1657 *hbsp = highest_bit_set;
1658 *lbsp = lowest_bit_set;
1659 *abbasp = all_bits_between_are_set;
1660 }
1661
1662 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1663
1664 static int
1665 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1666 unsigned HOST_WIDE_INT low_bits)
1667 {
1668 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1669
1670 if (high_bits == 0
1671 || high_bits == 0xffffffff)
1672 return 1;
1673
1674 analyze_64bit_constant (high_bits, low_bits,
1675 &highest_bit_set, &lowest_bit_set,
1676 &all_bits_between_are_set);
1677
1678 if ((highest_bit_set == 63
1679 || lowest_bit_set == 0)
1680 && all_bits_between_are_set != 0)
1681 return 1;
1682
1683 if ((highest_bit_set - lowest_bit_set) < 21)
1684 return 1;
1685
1686 return 0;
1687 }
1688
1689 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1690 unsigned HOST_WIDE_INT,
1691 int, int);
1692
1693 static unsigned HOST_WIDE_INT
1694 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1695 unsigned HOST_WIDE_INT low_bits,
1696 int lowest_bit_set, int shift)
1697 {
1698 HOST_WIDE_INT hi, lo;
1699
1700 if (lowest_bit_set < 32)
1701 {
1702 lo = (low_bits >> lowest_bit_set) << shift;
1703 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1704 }
1705 else
1706 {
1707 lo = 0;
1708 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1709 }
1710 gcc_assert (! (hi & lo));
1711 return (hi | lo);
1712 }
1713
1714 /* Here we are sure to be arch64 and this is an integer constant
1715 being loaded into a register. Emit the most efficient
1716 insn sequence possible. Detection of all the 1-insn cases
1717 has been done already. */
1718 void
1719 sparc_emit_set_const64 (rtx op0, rtx op1)
1720 {
1721 unsigned HOST_WIDE_INT high_bits, low_bits;
1722 int lowest_bit_set, highest_bit_set;
1723 int all_bits_between_are_set;
1724 rtx temp = 0;
1725
1726 /* Sanity check that we know what we are working with. */
1727 gcc_assert (TARGET_ARCH64
1728 && (GET_CODE (op0) == SUBREG
1729 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1730
1731 if (reload_in_progress || reload_completed)
1732 temp = op0;
1733
1734 if (GET_CODE (op1) != CONST_INT)
1735 {
1736 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1737 return;
1738 }
1739
1740 if (! temp)
1741 temp = gen_reg_rtx (DImode);
1742
1743 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1744 low_bits = (INTVAL (op1) & 0xffffffff);
1745
1746 /* low_bits bits 0 --> 31
1747 high_bits bits 32 --> 63 */
1748
1749 analyze_64bit_constant (high_bits, low_bits,
1750 &highest_bit_set, &lowest_bit_set,
1751 &all_bits_between_are_set);
1752
1753 /* First try for a 2-insn sequence. */
1754
1755 /* These situations are preferred because the optimizer can
1756 * do more things with them:
1757 * 1) mov -1, %reg
1758 * sllx %reg, shift, %reg
1759 * 2) mov -1, %reg
1760 * srlx %reg, shift, %reg
1761 * 3) mov some_small_const, %reg
1762 * sllx %reg, shift, %reg
1763 */
1764 if (((highest_bit_set == 63
1765 || lowest_bit_set == 0)
1766 && all_bits_between_are_set != 0)
1767 || ((highest_bit_set - lowest_bit_set) < 12))
1768 {
1769 HOST_WIDE_INT the_const = -1;
1770 int shift = lowest_bit_set;
1771
1772 if ((highest_bit_set != 63
1773 && lowest_bit_set != 0)
1774 || all_bits_between_are_set == 0)
1775 {
1776 the_const =
1777 create_simple_focus_bits (high_bits, low_bits,
1778 lowest_bit_set, 0);
1779 }
1780 else if (lowest_bit_set == 0)
1781 shift = -(63 - highest_bit_set);
1782
1783 gcc_assert (SPARC_SIMM13_P (the_const));
1784 gcc_assert (shift != 0);
1785
1786 emit_insn (gen_safe_SET64 (temp, the_const));
1787 if (shift > 0)
1788 emit_insn (gen_rtx_SET (VOIDmode,
1789 op0,
1790 gen_rtx_ASHIFT (DImode,
1791 temp,
1792 GEN_INT (shift))));
1793 else if (shift < 0)
1794 emit_insn (gen_rtx_SET (VOIDmode,
1795 op0,
1796 gen_rtx_LSHIFTRT (DImode,
1797 temp,
1798 GEN_INT (-shift))));
1799 return;
1800 }
1801
1802 /* Now a range of 22 or less bits set somewhere.
1803 * 1) sethi %hi(focus_bits), %reg
1804 * sllx %reg, shift, %reg
1805 * 2) sethi %hi(focus_bits), %reg
1806 * srlx %reg, shift, %reg
1807 */
1808 if ((highest_bit_set - lowest_bit_set) < 21)
1809 {
1810 unsigned HOST_WIDE_INT focus_bits =
1811 create_simple_focus_bits (high_bits, low_bits,
1812 lowest_bit_set, 10);
1813
1814 gcc_assert (SPARC_SETHI_P (focus_bits));
1815 gcc_assert (lowest_bit_set != 10);
1816
1817 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1818
1819 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1820 if (lowest_bit_set < 10)
1821 emit_insn (gen_rtx_SET (VOIDmode,
1822 op0,
1823 gen_rtx_LSHIFTRT (DImode, temp,
1824 GEN_INT (10 - lowest_bit_set))));
1825 else if (lowest_bit_set > 10)
1826 emit_insn (gen_rtx_SET (VOIDmode,
1827 op0,
1828 gen_rtx_ASHIFT (DImode, temp,
1829 GEN_INT (lowest_bit_set - 10))));
1830 return;
1831 }
1832
1833 /* 1) sethi %hi(low_bits), %reg
1834 * or %reg, %lo(low_bits), %reg
1835 * 2) sethi %hi(~low_bits), %reg
1836 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1837 */
1838 if (high_bits == 0
1839 || high_bits == 0xffffffff)
1840 {
1841 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1842 (high_bits == 0xffffffff));
1843 return;
1844 }
1845
1846 /* Now, try 3-insn sequences. */
1847
1848 /* 1) sethi %hi(high_bits), %reg
1849 * or %reg, %lo(high_bits), %reg
1850 * sllx %reg, 32, %reg
1851 */
1852 if (low_bits == 0)
1853 {
1854 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1855 return;
1856 }
1857
1858 /* We may be able to do something quick
1859 when the constant is negated, so try that. */
1860 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1861 (~low_bits) & 0xfffffc00))
1862 {
1863 /* NOTE: The trailing bits get XOR'd so we need the
1864 non-negated bits, not the negated ones. */
1865 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1866
1867 if ((((~high_bits) & 0xffffffff) == 0
1868 && ((~low_bits) & 0x80000000) == 0)
1869 || (((~high_bits) & 0xffffffff) == 0xffffffff
1870 && ((~low_bits) & 0x80000000) != 0))
1871 {
1872 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1873
1874 if ((SPARC_SETHI_P (fast_int)
1875 && (~high_bits & 0xffffffff) == 0)
1876 || SPARC_SIMM13_P (fast_int))
1877 emit_insn (gen_safe_SET64 (temp, fast_int));
1878 else
1879 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1880 }
1881 else
1882 {
1883 rtx negated_const;
1884 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1885 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1886 sparc_emit_set_const64 (temp, negated_const);
1887 }
1888
1889 /* If we are XOR'ing with -1, then we should emit a one's complement
1890 instead. This way the combiner will notice logical operations
1891 such as ANDN later on and substitute. */
1892 if (trailing_bits == 0x3ff)
1893 {
1894 emit_insn (gen_rtx_SET (VOIDmode, op0,
1895 gen_rtx_NOT (DImode, temp)));
1896 }
1897 else
1898 {
1899 emit_insn (gen_rtx_SET (VOIDmode,
1900 op0,
1901 gen_safe_XOR64 (temp,
1902 (-0x400 | trailing_bits))));
1903 }
1904 return;
1905 }
1906
1907 /* 1) sethi %hi(xxx), %reg
1908 * or %reg, %lo(xxx), %reg
1909 * sllx %reg, yyy, %reg
1910 *
1911 * ??? This is just a generalized version of the low_bits==0
1912 * thing above, FIXME...
1913 */
1914 if ((highest_bit_set - lowest_bit_set) < 32)
1915 {
1916 unsigned HOST_WIDE_INT focus_bits =
1917 create_simple_focus_bits (high_bits, low_bits,
1918 lowest_bit_set, 0);
1919
1920 /* We can't get here in this state. */
1921 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1922
1923 /* So what we know is that the set bits straddle the
1924 middle of the 64-bit word. */
1925 sparc_emit_set_const64_quick2 (op0, temp,
1926 focus_bits, 0,
1927 lowest_bit_set);
1928 return;
1929 }
1930
1931 /* 1) sethi %hi(high_bits), %reg
1932 * or %reg, %lo(high_bits), %reg
1933 * sllx %reg, 32, %reg
1934 * or %reg, low_bits, %reg
1935 */
1936 if (SPARC_SIMM13_P(low_bits)
1937 && ((int)low_bits > 0))
1938 {
1939 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1940 return;
1941 }
1942
1943 /* The easiest way when all else fails, is full decomposition. */
1944 #if 0
1945 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1946 high_bits, low_bits, ~high_bits, ~low_bits);
1947 #endif
1948 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1949 }
1950 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1951
1952 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1953 return the mode to be used for the comparison. For floating-point,
1954 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1955 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1956 processing is needed. */
1957
1958 enum machine_mode
1959 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1960 {
1961 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1962 {
1963 switch (op)
1964 {
1965 case EQ:
1966 case NE:
1967 case UNORDERED:
1968 case ORDERED:
1969 case UNLT:
1970 case UNLE:
1971 case UNGT:
1972 case UNGE:
1973 case UNEQ:
1974 case LTGT:
1975 return CCFPmode;
1976
1977 case LT:
1978 case LE:
1979 case GT:
1980 case GE:
1981 return CCFPEmode;
1982
1983 default:
1984 gcc_unreachable ();
1985 }
1986 }
1987 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1988 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1989 {
1990 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1991 return CCX_NOOVmode;
1992 else
1993 return CC_NOOVmode;
1994 }
1995 else
1996 {
1997 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1998 return CCXmode;
1999 else
2000 return CCmode;
2001 }
2002 }
2003
2004 /* X and Y are two things to compare using CODE. Emit the compare insn and
2005 return the rtx for the cc reg in the proper mode. */
2006
2007 rtx
2008 gen_compare_reg (enum rtx_code code)
2009 {
2010 rtx x = sparc_compare_op0;
2011 rtx y = sparc_compare_op1;
2012 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
2013 rtx cc_reg;
2014
2015 if (sparc_compare_emitted != NULL_RTX)
2016 {
2017 cc_reg = sparc_compare_emitted;
2018 sparc_compare_emitted = NULL_RTX;
2019 return cc_reg;
2020 }
2021
2022 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2023 fcc regs (cse can't tell they're really call clobbered regs and will
2024 remove a duplicate comparison even if there is an intervening function
2025 call - it will then try to reload the cc reg via an int reg which is why
2026 we need the movcc patterns). It is possible to provide the movcc
2027 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2028 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2029 to tell cse that CCFPE mode registers (even pseudos) are call
2030 clobbered. */
2031
2032 /* ??? This is an experiment. Rather than making changes to cse which may
2033 or may not be easy/clean, we do our own cse. This is possible because
2034 we will generate hard registers. Cse knows they're call clobbered (it
2035 doesn't know the same thing about pseudos). If we guess wrong, no big
2036 deal, but if we win, great! */
2037
2038 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2039 #if 1 /* experiment */
2040 {
2041 int reg;
2042 /* We cycle through the registers to ensure they're all exercised. */
2043 static int next_fcc_reg = 0;
2044 /* Previous x,y for each fcc reg. */
2045 static rtx prev_args[4][2];
2046
2047 /* Scan prev_args for x,y. */
2048 for (reg = 0; reg < 4; reg++)
2049 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2050 break;
2051 if (reg == 4)
2052 {
2053 reg = next_fcc_reg;
2054 prev_args[reg][0] = x;
2055 prev_args[reg][1] = y;
2056 next_fcc_reg = (next_fcc_reg + 1) & 3;
2057 }
2058 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2059 }
2060 #else
2061 cc_reg = gen_reg_rtx (mode);
2062 #endif /* ! experiment */
2063 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2064 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2065 else
2066 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2067
2068 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2069 gen_rtx_COMPARE (mode, x, y)));
2070
2071 return cc_reg;
2072 }
2073
2074 /* This function is used for v9 only.
2075 CODE is the code for an Scc's comparison.
2076 OPERANDS[0] is the target of the Scc insn.
2077 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2078 been generated yet).
2079
2080 This function is needed to turn
2081
2082 (set (reg:SI 110)
2083 (gt (reg:CCX 100 %icc)
2084 (const_int 0)))
2085 into
2086 (set (reg:SI 110)
2087 (gt:DI (reg:CCX 100 %icc)
2088 (const_int 0)))
2089
2090 IE: The instruction recognizer needs to see the mode of the comparison to
2091 find the right instruction. We could use "gt:DI" right in the
2092 define_expand, but leaving it out allows us to handle DI, SI, etc.
2093
2094 We refer to the global sparc compare operands sparc_compare_op0 and
2095 sparc_compare_op1. */
2096
2097 int
2098 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2099 {
2100 if (! TARGET_ARCH64
2101 && (GET_MODE (sparc_compare_op0) == DImode
2102 || GET_MODE (operands[0]) == DImode))
2103 return 0;
2104
2105 /* Try to use the movrCC insns. */
2106 if (TARGET_ARCH64
2107 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2108 && sparc_compare_op1 == const0_rtx
2109 && v9_regcmp_p (compare_code))
2110 {
2111 rtx op0 = sparc_compare_op0;
2112 rtx temp;
2113
2114 /* Special case for op0 != 0. This can be done with one instruction if
2115 operands[0] == sparc_compare_op0. */
2116
2117 if (compare_code == NE
2118 && GET_MODE (operands[0]) == DImode
2119 && rtx_equal_p (op0, operands[0]))
2120 {
2121 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2122 gen_rtx_IF_THEN_ELSE (DImode,
2123 gen_rtx_fmt_ee (compare_code, DImode,
2124 op0, const0_rtx),
2125 const1_rtx,
2126 operands[0])));
2127 return 1;
2128 }
2129
2130 if (reg_overlap_mentioned_p (operands[0], op0))
2131 {
2132 /* Handle the case where operands[0] == sparc_compare_op0.
2133 We "early clobber" the result. */
2134 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2135 emit_move_insn (op0, sparc_compare_op0);
2136 }
2137
2138 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2139 if (GET_MODE (op0) != DImode)
2140 {
2141 temp = gen_reg_rtx (DImode);
2142 convert_move (temp, op0, 0);
2143 }
2144 else
2145 temp = op0;
2146 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2147 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2148 gen_rtx_fmt_ee (compare_code, DImode,
2149 temp, const0_rtx),
2150 const1_rtx,
2151 operands[0])));
2152 return 1;
2153 }
2154 else
2155 {
2156 operands[1] = gen_compare_reg (compare_code);
2157
2158 switch (GET_MODE (operands[1]))
2159 {
2160 case CCmode :
2161 case CCXmode :
2162 case CCFPEmode :
2163 case CCFPmode :
2164 break;
2165 default :
2166 gcc_unreachable ();
2167 }
2168 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2169 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2170 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2171 gen_rtx_fmt_ee (compare_code,
2172 GET_MODE (operands[1]),
2173 operands[1], const0_rtx),
2174 const1_rtx, operands[0])));
2175 return 1;
2176 }
2177 }
2178
2179 /* Emit a conditional jump insn for the v9 architecture using comparison code
2180 CODE and jump target LABEL.
2181 This function exists to take advantage of the v9 brxx insns. */
2182
2183 void
2184 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2185 {
2186 gcc_assert (sparc_compare_emitted == NULL_RTX);
2187 emit_jump_insn (gen_rtx_SET (VOIDmode,
2188 pc_rtx,
2189 gen_rtx_IF_THEN_ELSE (VOIDmode,
2190 gen_rtx_fmt_ee (code, GET_MODE (op0),
2191 op0, const0_rtx),
2192 gen_rtx_LABEL_REF (VOIDmode, label),
2193 pc_rtx)));
2194 }
2195
2196 /* Generate a DFmode part of a hard TFmode register.
2197 REG is the TFmode hard register, LOW is 1 for the
2198 low 64bit of the register and 0 otherwise.
2199 */
2200 rtx
2201 gen_df_reg (rtx reg, int low)
2202 {
2203 int regno = REGNO (reg);
2204
2205 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2206 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2207 return gen_rtx_REG (DFmode, regno);
2208 }
2209 \f
2210 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2211 Unlike normal calls, TFmode operands are passed by reference. It is
2212 assumed that no more than 3 operands are required. */
2213
2214 static void
2215 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2216 {
2217 rtx ret_slot = NULL, arg[3], func_sym;
2218 int i;
2219
2220 /* We only expect to be called for conversions, unary, and binary ops. */
2221 gcc_assert (nargs == 2 || nargs == 3);
2222
2223 for (i = 0; i < nargs; ++i)
2224 {
2225 rtx this_arg = operands[i];
2226 rtx this_slot;
2227
2228 /* TFmode arguments and return values are passed by reference. */
2229 if (GET_MODE (this_arg) == TFmode)
2230 {
2231 int force_stack_temp;
2232
2233 force_stack_temp = 0;
2234 if (TARGET_BUGGY_QP_LIB && i == 0)
2235 force_stack_temp = 1;
2236
2237 if (GET_CODE (this_arg) == MEM
2238 && ! force_stack_temp)
2239 this_arg = XEXP (this_arg, 0);
2240 else if (CONSTANT_P (this_arg)
2241 && ! force_stack_temp)
2242 {
2243 this_slot = force_const_mem (TFmode, this_arg);
2244 this_arg = XEXP (this_slot, 0);
2245 }
2246 else
2247 {
2248 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2249
2250 /* Operand 0 is the return value. We'll copy it out later. */
2251 if (i > 0)
2252 emit_move_insn (this_slot, this_arg);
2253 else
2254 ret_slot = this_slot;
2255
2256 this_arg = XEXP (this_slot, 0);
2257 }
2258 }
2259
2260 arg[i] = this_arg;
2261 }
2262
2263 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2264
2265 if (GET_MODE (operands[0]) == TFmode)
2266 {
2267 if (nargs == 2)
2268 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2269 arg[0], GET_MODE (arg[0]),
2270 arg[1], GET_MODE (arg[1]));
2271 else
2272 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2273 arg[0], GET_MODE (arg[0]),
2274 arg[1], GET_MODE (arg[1]),
2275 arg[2], GET_MODE (arg[2]));
2276
2277 if (ret_slot)
2278 emit_move_insn (operands[0], ret_slot);
2279 }
2280 else
2281 {
2282 rtx ret;
2283
2284 gcc_assert (nargs == 2);
2285
2286 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2287 GET_MODE (operands[0]), 1,
2288 arg[1], GET_MODE (arg[1]));
2289
2290 if (ret != operands[0])
2291 emit_move_insn (operands[0], ret);
2292 }
2293 }
2294
2295 /* Expand soft-float TFmode calls to sparc abi routines. */
2296
2297 static void
2298 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2299 {
2300 const char *func;
2301
2302 switch (code)
2303 {
2304 case PLUS:
2305 func = "_Qp_add";
2306 break;
2307 case MINUS:
2308 func = "_Qp_sub";
2309 break;
2310 case MULT:
2311 func = "_Qp_mul";
2312 break;
2313 case DIV:
2314 func = "_Qp_div";
2315 break;
2316 default:
2317 gcc_unreachable ();
2318 }
2319
2320 emit_soft_tfmode_libcall (func, 3, operands);
2321 }
2322
2323 static void
2324 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2325 {
2326 const char *func;
2327
2328 gcc_assert (code == SQRT);
2329 func = "_Qp_sqrt";
2330
2331 emit_soft_tfmode_libcall (func, 2, operands);
2332 }
2333
2334 static void
2335 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2336 {
2337 const char *func;
2338
2339 switch (code)
2340 {
2341 case FLOAT_EXTEND:
2342 switch (GET_MODE (operands[1]))
2343 {
2344 case SFmode:
2345 func = "_Qp_stoq";
2346 break;
2347 case DFmode:
2348 func = "_Qp_dtoq";
2349 break;
2350 default:
2351 gcc_unreachable ();
2352 }
2353 break;
2354
2355 case FLOAT_TRUNCATE:
2356 switch (GET_MODE (operands[0]))
2357 {
2358 case SFmode:
2359 func = "_Qp_qtos";
2360 break;
2361 case DFmode:
2362 func = "_Qp_qtod";
2363 break;
2364 default:
2365 gcc_unreachable ();
2366 }
2367 break;
2368
2369 case FLOAT:
2370 switch (GET_MODE (operands[1]))
2371 {
2372 case SImode:
2373 func = "_Qp_itoq";
2374 break;
2375 case DImode:
2376 func = "_Qp_xtoq";
2377 break;
2378 default:
2379 gcc_unreachable ();
2380 }
2381 break;
2382
2383 case UNSIGNED_FLOAT:
2384 switch (GET_MODE (operands[1]))
2385 {
2386 case SImode:
2387 func = "_Qp_uitoq";
2388 break;
2389 case DImode:
2390 func = "_Qp_uxtoq";
2391 break;
2392 default:
2393 gcc_unreachable ();
2394 }
2395 break;
2396
2397 case FIX:
2398 switch (GET_MODE (operands[0]))
2399 {
2400 case SImode:
2401 func = "_Qp_qtoi";
2402 break;
2403 case DImode:
2404 func = "_Qp_qtox";
2405 break;
2406 default:
2407 gcc_unreachable ();
2408 }
2409 break;
2410
2411 case UNSIGNED_FIX:
2412 switch (GET_MODE (operands[0]))
2413 {
2414 case SImode:
2415 func = "_Qp_qtoui";
2416 break;
2417 case DImode:
2418 func = "_Qp_qtoux";
2419 break;
2420 default:
2421 gcc_unreachable ();
2422 }
2423 break;
2424
2425 default:
2426 gcc_unreachable ();
2427 }
2428
2429 emit_soft_tfmode_libcall (func, 2, operands);
2430 }
2431
2432 /* Expand a hard-float tfmode operation. All arguments must be in
2433 registers. */
2434
2435 static void
2436 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2437 {
2438 rtx op, dest;
2439
2440 if (GET_RTX_CLASS (code) == RTX_UNARY)
2441 {
2442 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2443 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2444 }
2445 else
2446 {
2447 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2448 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2449 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2450 operands[1], operands[2]);
2451 }
2452
2453 if (register_operand (operands[0], VOIDmode))
2454 dest = operands[0];
2455 else
2456 dest = gen_reg_rtx (GET_MODE (operands[0]));
2457
2458 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2459
2460 if (dest != operands[0])
2461 emit_move_insn (operands[0], dest);
2462 }
2463
2464 void
2465 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2466 {
2467 if (TARGET_HARD_QUAD)
2468 emit_hard_tfmode_operation (code, operands);
2469 else
2470 emit_soft_tfmode_binop (code, operands);
2471 }
2472
2473 void
2474 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2475 {
2476 if (TARGET_HARD_QUAD)
2477 emit_hard_tfmode_operation (code, operands);
2478 else
2479 emit_soft_tfmode_unop (code, operands);
2480 }
2481
2482 void
2483 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2484 {
2485 if (TARGET_HARD_QUAD)
2486 emit_hard_tfmode_operation (code, operands);
2487 else
2488 emit_soft_tfmode_cvt (code, operands);
2489 }
2490 \f
2491 /* Return nonzero if a branch/jump/call instruction will be emitting
2492 nop into its delay slot. */
2493
2494 int
2495 empty_delay_slot (rtx insn)
2496 {
2497 rtx seq;
2498
2499 /* If no previous instruction (should not happen), return true. */
2500 if (PREV_INSN (insn) == NULL)
2501 return 1;
2502
2503 seq = NEXT_INSN (PREV_INSN (insn));
2504 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2505 return 0;
2506
2507 return 1;
2508 }
2509
2510 /* Return nonzero if TRIAL can go into the call delay slot. */
2511
2512 int
2513 tls_call_delay (rtx trial)
2514 {
2515 rtx pat;
2516
2517 /* Binutils allows
2518 call __tls_get_addr, %tgd_call (foo)
2519 add %l7, %o0, %o0, %tgd_add (foo)
2520 while Sun as/ld does not. */
2521 if (TARGET_GNU_TLS || !TARGET_TLS)
2522 return 1;
2523
2524 pat = PATTERN (trial);
2525
2526 /* We must reject tgd_add{32|64}, i.e.
2527 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2528 and tldm_add{32|64}, i.e.
2529 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2530 for Sun as/ld. */
2531 if (GET_CODE (pat) == SET
2532 && GET_CODE (SET_SRC (pat)) == PLUS)
2533 {
2534 rtx unspec = XEXP (SET_SRC (pat), 1);
2535
2536 if (GET_CODE (unspec) == UNSPEC
2537 && (XINT (unspec, 1) == UNSPEC_TLSGD
2538 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2539 return 0;
2540 }
2541
2542 return 1;
2543 }
2544
2545 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2546 instruction. RETURN_P is true if the v9 variant 'return' is to be
2547 considered in the test too.
2548
2549 TRIAL must be a SET whose destination is a REG appropriate for the
2550 'restore' instruction or, if RETURN_P is true, for the 'return'
2551 instruction. */
2552
2553 static int
2554 eligible_for_restore_insn (rtx trial, bool return_p)
2555 {
2556 rtx pat = PATTERN (trial);
2557 rtx src = SET_SRC (pat);
2558
2559 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2560 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2561 && arith_operand (src, GET_MODE (src)))
2562 {
2563 if (TARGET_ARCH64)
2564 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2565 else
2566 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2567 }
2568
2569 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2570 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2571 && arith_double_operand (src, GET_MODE (src)))
2572 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2573
2574 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2575 else if (! TARGET_FPU && register_operand (src, SFmode))
2576 return 1;
2577
2578 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2579 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2580 return 1;
2581
2582 /* If we have the 'return' instruction, anything that does not use
2583 local or output registers and can go into a delay slot wins. */
2584 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2585 && (get_attr_in_uncond_branch_delay (trial)
2586 == IN_UNCOND_BRANCH_DELAY_TRUE))
2587 return 1;
2588
2589 /* The 'restore src1,src2,dest' pattern for SImode. */
2590 else if (GET_CODE (src) == PLUS
2591 && register_operand (XEXP (src, 0), SImode)
2592 && arith_operand (XEXP (src, 1), SImode))
2593 return 1;
2594
2595 /* The 'restore src1,src2,dest' pattern for DImode. */
2596 else if (GET_CODE (src) == PLUS
2597 && register_operand (XEXP (src, 0), DImode)
2598 && arith_double_operand (XEXP (src, 1), DImode))
2599 return 1;
2600
2601 /* The 'restore src1,%lo(src2),dest' pattern. */
2602 else if (GET_CODE (src) == LO_SUM
2603 && ! TARGET_CM_MEDMID
2604 && ((register_operand (XEXP (src, 0), SImode)
2605 && immediate_operand (XEXP (src, 1), SImode))
2606 || (TARGET_ARCH64
2607 && register_operand (XEXP (src, 0), DImode)
2608 && immediate_operand (XEXP (src, 1), DImode))))
2609 return 1;
2610
2611 /* The 'restore src,src,dest' pattern. */
2612 else if (GET_CODE (src) == ASHIFT
2613 && (register_operand (XEXP (src, 0), SImode)
2614 || register_operand (XEXP (src, 0), DImode))
2615 && XEXP (src, 1) == const1_rtx)
2616 return 1;
2617
2618 return 0;
2619 }
2620
2621 /* Return nonzero if TRIAL can go into the function return's
2622 delay slot. */
2623
2624 int
2625 eligible_for_return_delay (rtx trial)
2626 {
2627 rtx pat;
2628
2629 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2630 return 0;
2631
2632 if (get_attr_length (trial) != 1)
2633 return 0;
2634
2635 /* If there are any call-saved registers, we should scan TRIAL if it
2636 does not reference them. For now just make it easy. */
2637 if (num_gfregs)
2638 return 0;
2639
2640 /* If the function uses __builtin_eh_return, the eh_return machinery
2641 occupies the delay slot. */
2642 if (crtl->calls_eh_return)
2643 return 0;
2644
2645 /* In the case of a true leaf function, anything can go into the slot. */
2646 if (sparc_leaf_function_p)
2647 return get_attr_in_uncond_branch_delay (trial)
2648 == IN_UNCOND_BRANCH_DELAY_TRUE;
2649
2650 pat = PATTERN (trial);
2651
2652 /* Otherwise, only operations which can be done in tandem with
2653 a `restore' or `return' insn can go into the delay slot. */
2654 if (GET_CODE (SET_DEST (pat)) != REG
2655 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2656 return 0;
2657
2658 /* If this instruction sets up floating point register and we have a return
2659 instruction, it can probably go in. But restore will not work
2660 with FP_REGS. */
2661 if (REGNO (SET_DEST (pat)) >= 32)
2662 return (TARGET_V9
2663 && ! epilogue_renumber (&pat, 1)
2664 && (get_attr_in_uncond_branch_delay (trial)
2665 == IN_UNCOND_BRANCH_DELAY_TRUE));
2666
2667 return eligible_for_restore_insn (trial, true);
2668 }
2669
2670 /* Return nonzero if TRIAL can go into the sibling call's
2671 delay slot. */
2672
2673 int
2674 eligible_for_sibcall_delay (rtx trial)
2675 {
2676 rtx pat;
2677
2678 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2679 return 0;
2680
2681 if (get_attr_length (trial) != 1)
2682 return 0;
2683
2684 pat = PATTERN (trial);
2685
2686 if (sparc_leaf_function_p)
2687 {
2688 /* If the tail call is done using the call instruction,
2689 we have to restore %o7 in the delay slot. */
2690 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2691 return 0;
2692
2693 /* %g1 is used to build the function address */
2694 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2695 return 0;
2696
2697 return 1;
2698 }
2699
2700 /* Otherwise, only operations which can be done in tandem with
2701 a `restore' insn can go into the delay slot. */
2702 if (GET_CODE (SET_DEST (pat)) != REG
2703 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2704 || REGNO (SET_DEST (pat)) >= 32)
2705 return 0;
2706
2707 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2708 in most cases. */
2709 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2710 return 0;
2711
2712 return eligible_for_restore_insn (trial, false);
2713 }
2714
2715 int
2716 short_branch (int uid1, int uid2)
2717 {
2718 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2719
2720 /* Leave a few words of "slop". */
2721 if (delta >= -1023 && delta <= 1022)
2722 return 1;
2723
2724 return 0;
2725 }
2726
2727 /* Return nonzero if REG is not used after INSN.
2728 We assume REG is a reload reg, and therefore does
2729 not live past labels or calls or jumps. */
2730 int
2731 reg_unused_after (rtx reg, rtx insn)
2732 {
2733 enum rtx_code code, prev_code = UNKNOWN;
2734
2735 while ((insn = NEXT_INSN (insn)))
2736 {
2737 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2738 return 1;
2739
2740 code = GET_CODE (insn);
2741 if (GET_CODE (insn) == CODE_LABEL)
2742 return 1;
2743
2744 if (INSN_P (insn))
2745 {
2746 rtx set = single_set (insn);
2747 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2748 if (set && in_src)
2749 return 0;
2750 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2751 return 1;
2752 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2753 return 0;
2754 }
2755 prev_code = code;
2756 }
2757 return 1;
2758 }
2759 \f
2760 /* Determine if it's legal to put X into the constant pool. This
2761 is not possible if X contains the address of a symbol that is
2762 not constant (TLS) or not known at final link time (PIC). */
2763
2764 static bool
2765 sparc_cannot_force_const_mem (rtx x)
2766 {
2767 switch (GET_CODE (x))
2768 {
2769 case CONST_INT:
2770 case CONST_DOUBLE:
2771 case CONST_VECTOR:
2772 /* Accept all non-symbolic constants. */
2773 return false;
2774
2775 case LABEL_REF:
2776 /* Labels are OK iff we are non-PIC. */
2777 return flag_pic != 0;
2778
2779 case SYMBOL_REF:
2780 /* 'Naked' TLS symbol references are never OK,
2781 non-TLS symbols are OK iff we are non-PIC. */
2782 if (SYMBOL_REF_TLS_MODEL (x))
2783 return true;
2784 else
2785 return flag_pic != 0;
2786
2787 case CONST:
2788 return sparc_cannot_force_const_mem (XEXP (x, 0));
2789 case PLUS:
2790 case MINUS:
2791 return sparc_cannot_force_const_mem (XEXP (x, 0))
2792 || sparc_cannot_force_const_mem (XEXP (x, 1));
2793 case UNSPEC:
2794 return true;
2795 default:
2796 gcc_unreachable ();
2797 }
2798 }
2799 \f
2800 /* PIC support. */
2801 static GTY(()) char pic_helper_symbol_name[256];
2802 static GTY(()) rtx pic_helper_symbol;
2803 static GTY(()) bool pic_helper_emitted_p = false;
2804 static GTY(()) rtx global_offset_table;
2805
2806 /* Ensure that we are not using patterns that are not OK with PIC. */
2807
2808 int
2809 check_pic (int i)
2810 {
2811 switch (flag_pic)
2812 {
2813 case 1:
2814 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2815 && (GET_CODE (recog_data.operand[i]) != CONST
2816 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2817 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2818 == global_offset_table)
2819 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2820 == CONST))));
2821 case 2:
2822 default:
2823 return 1;
2824 }
2825 }
2826
2827 /* Return true if X is an address which needs a temporary register when
2828 reloaded while generating PIC code. */
2829
2830 int
2831 pic_address_needs_scratch (rtx x)
2832 {
2833 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2834 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2835 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2836 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2837 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2838 return 1;
2839
2840 return 0;
2841 }
2842
2843 /* Determine if a given RTX is a valid constant. We already know this
2844 satisfies CONSTANT_P. */
2845
2846 bool
2847 legitimate_constant_p (rtx x)
2848 {
2849 rtx inner;
2850
2851 switch (GET_CODE (x))
2852 {
2853 case SYMBOL_REF:
2854 /* TLS symbols are not constant. */
2855 if (SYMBOL_REF_TLS_MODEL (x))
2856 return false;
2857 break;
2858
2859 case CONST:
2860 inner = XEXP (x, 0);
2861
2862 /* Offsets of TLS symbols are never valid.
2863 Discourage CSE from creating them. */
2864 if (GET_CODE (inner) == PLUS
2865 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2866 return false;
2867 break;
2868
2869 case CONST_DOUBLE:
2870 if (GET_MODE (x) == VOIDmode)
2871 return true;
2872
2873 /* Floating point constants are generally not ok.
2874 The only exception is 0.0 in VIS. */
2875 if (TARGET_VIS
2876 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2877 && const_zero_operand (x, GET_MODE (x)))
2878 return true;
2879
2880 return false;
2881
2882 case CONST_VECTOR:
2883 /* Vector constants are generally not ok.
2884 The only exception is 0 in VIS. */
2885 if (TARGET_VIS
2886 && const_zero_operand (x, GET_MODE (x)))
2887 return true;
2888
2889 return false;
2890
2891 default:
2892 break;
2893 }
2894
2895 return true;
2896 }
2897
2898 /* Determine if a given RTX is a valid constant address. */
2899
2900 bool
2901 constant_address_p (rtx x)
2902 {
2903 switch (GET_CODE (x))
2904 {
2905 case LABEL_REF:
2906 case CONST_INT:
2907 case HIGH:
2908 return true;
2909
2910 case CONST:
2911 if (flag_pic && pic_address_needs_scratch (x))
2912 return false;
2913 return legitimate_constant_p (x);
2914
2915 case SYMBOL_REF:
2916 return !flag_pic && legitimate_constant_p (x);
2917
2918 default:
2919 return false;
2920 }
2921 }
2922
2923 /* Nonzero if the constant value X is a legitimate general operand
2924 when generating PIC code. It is given that flag_pic is on and
2925 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2926
2927 bool
2928 legitimate_pic_operand_p (rtx x)
2929 {
2930 if (pic_address_needs_scratch (x))
2931 return false;
2932 if (SPARC_SYMBOL_REF_TLS_P (x)
2933 || (GET_CODE (x) == CONST
2934 && GET_CODE (XEXP (x, 0)) == PLUS
2935 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2936 return false;
2937 return true;
2938 }
2939
2940 /* Return nonzero if ADDR is a valid memory address.
2941 STRICT specifies whether strict register checking applies. */
2942
2943 int
2944 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2945 {
2946 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2947
2948 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2949 rs1 = addr;
2950 else if (GET_CODE (addr) == PLUS)
2951 {
2952 rs1 = XEXP (addr, 0);
2953 rs2 = XEXP (addr, 1);
2954
2955 /* Canonicalize. REG comes first, if there are no regs,
2956 LO_SUM comes first. */
2957 if (!REG_P (rs1)
2958 && GET_CODE (rs1) != SUBREG
2959 && (REG_P (rs2)
2960 || GET_CODE (rs2) == SUBREG
2961 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2962 {
2963 rs1 = XEXP (addr, 1);
2964 rs2 = XEXP (addr, 0);
2965 }
2966
2967 if ((flag_pic == 1
2968 && rs1 == pic_offset_table_rtx
2969 && !REG_P (rs2)
2970 && GET_CODE (rs2) != SUBREG
2971 && GET_CODE (rs2) != LO_SUM
2972 && GET_CODE (rs2) != MEM
2973 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2974 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2975 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2976 || ((REG_P (rs1)
2977 || GET_CODE (rs1) == SUBREG)
2978 && RTX_OK_FOR_OFFSET_P (rs2)))
2979 {
2980 imm1 = rs2;
2981 rs2 = NULL;
2982 }
2983 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2984 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2985 {
2986 /* We prohibit REG + REG for TFmode when there are no quad move insns
2987 and we consequently need to split. We do this because REG+REG
2988 is not an offsettable address. If we get the situation in reload
2989 where source and destination of a movtf pattern are both MEMs with
2990 REG+REG address, then only one of them gets converted to an
2991 offsettable address. */
2992 if (mode == TFmode
2993 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2994 return 0;
2995
2996 /* We prohibit REG + REG on ARCH32 if not optimizing for
2997 DFmode/DImode because then mem_min_alignment is likely to be zero
2998 after reload and the forced split would lack a matching splitter
2999 pattern. */
3000 if (TARGET_ARCH32 && !optimize
3001 && (mode == DFmode || mode == DImode))
3002 return 0;
3003 }
3004 else if (USE_AS_OFFSETABLE_LO10
3005 && GET_CODE (rs1) == LO_SUM
3006 && TARGET_ARCH64
3007 && ! TARGET_CM_MEDMID
3008 && RTX_OK_FOR_OLO10_P (rs2))
3009 {
3010 rs2 = NULL;
3011 imm1 = XEXP (rs1, 1);
3012 rs1 = XEXP (rs1, 0);
3013 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3014 return 0;
3015 }
3016 }
3017 else if (GET_CODE (addr) == LO_SUM)
3018 {
3019 rs1 = XEXP (addr, 0);
3020 imm1 = XEXP (addr, 1);
3021
3022 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3023 return 0;
3024
3025 /* We can't allow TFmode in 32-bit mode, because an offset greater
3026 than the alignment (8) may cause the LO_SUM to overflow. */
3027 if (mode == TFmode && TARGET_ARCH32)
3028 return 0;
3029 }
3030 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3031 return 1;
3032 else
3033 return 0;
3034
3035 if (GET_CODE (rs1) == SUBREG)
3036 rs1 = SUBREG_REG (rs1);
3037 if (!REG_P (rs1))
3038 return 0;
3039
3040 if (rs2)
3041 {
3042 if (GET_CODE (rs2) == SUBREG)
3043 rs2 = SUBREG_REG (rs2);
3044 if (!REG_P (rs2))
3045 return 0;
3046 }
3047
3048 if (strict)
3049 {
3050 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3051 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3052 return 0;
3053 }
3054 else
3055 {
3056 if ((REGNO (rs1) >= 32
3057 && REGNO (rs1) != FRAME_POINTER_REGNUM
3058 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3059 || (rs2
3060 && (REGNO (rs2) >= 32
3061 && REGNO (rs2) != FRAME_POINTER_REGNUM
3062 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3063 return 0;
3064 }
3065 return 1;
3066 }
3067
3068 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3069
3070 static GTY(()) rtx sparc_tls_symbol;
3071
3072 static rtx
3073 sparc_tls_get_addr (void)
3074 {
3075 if (!sparc_tls_symbol)
3076 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3077
3078 return sparc_tls_symbol;
3079 }
3080
3081 static rtx
3082 sparc_tls_got (void)
3083 {
3084 rtx temp;
3085 if (flag_pic)
3086 {
3087 crtl->uses_pic_offset_table = 1;
3088 return pic_offset_table_rtx;
3089 }
3090
3091 if (!global_offset_table)
3092 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3093 temp = gen_reg_rtx (Pmode);
3094 emit_move_insn (temp, global_offset_table);
3095 return temp;
3096 }
3097
3098 /* Return 1 if *X is a thread-local symbol. */
3099
3100 static int
3101 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3102 {
3103 return SPARC_SYMBOL_REF_TLS_P (*x);
3104 }
3105
3106 /* Return 1 if X contains a thread-local symbol. */
3107
3108 bool
3109 sparc_tls_referenced_p (rtx x)
3110 {
3111 if (!TARGET_HAVE_TLS)
3112 return false;
3113
3114 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3115 }
3116
3117 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3118 this (thread-local) address. */
3119
3120 rtx
3121 legitimize_tls_address (rtx addr)
3122 {
3123 rtx temp1, temp2, temp3, ret, o0, got, insn;
3124
3125 gcc_assert (can_create_pseudo_p ());
3126
3127 if (GET_CODE (addr) == SYMBOL_REF)
3128 switch (SYMBOL_REF_TLS_MODEL (addr))
3129 {
3130 case TLS_MODEL_GLOBAL_DYNAMIC:
3131 start_sequence ();
3132 temp1 = gen_reg_rtx (SImode);
3133 temp2 = gen_reg_rtx (SImode);
3134 ret = gen_reg_rtx (Pmode);
3135 o0 = gen_rtx_REG (Pmode, 8);
3136 got = sparc_tls_got ();
3137 emit_insn (gen_tgd_hi22 (temp1, addr));
3138 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3139 if (TARGET_ARCH32)
3140 {
3141 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3142 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3143 addr, const1_rtx));
3144 }
3145 else
3146 {
3147 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3148 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3149 addr, const1_rtx));
3150 }
3151 CALL_INSN_FUNCTION_USAGE (insn)
3152 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3153 CALL_INSN_FUNCTION_USAGE (insn));
3154 insn = get_insns ();
3155 end_sequence ();
3156 emit_libcall_block (insn, ret, o0, addr);
3157 break;
3158
3159 case TLS_MODEL_LOCAL_DYNAMIC:
3160 start_sequence ();
3161 temp1 = gen_reg_rtx (SImode);
3162 temp2 = gen_reg_rtx (SImode);
3163 temp3 = gen_reg_rtx (Pmode);
3164 ret = gen_reg_rtx (Pmode);
3165 o0 = gen_rtx_REG (Pmode, 8);
3166 got = sparc_tls_got ();
3167 emit_insn (gen_tldm_hi22 (temp1));
3168 emit_insn (gen_tldm_lo10 (temp2, temp1));
3169 if (TARGET_ARCH32)
3170 {
3171 emit_insn (gen_tldm_add32 (o0, got, temp2));
3172 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3173 const1_rtx));
3174 }
3175 else
3176 {
3177 emit_insn (gen_tldm_add64 (o0, got, temp2));
3178 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3179 const1_rtx));
3180 }
3181 CALL_INSN_FUNCTION_USAGE (insn)
3182 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3183 CALL_INSN_FUNCTION_USAGE (insn));
3184 insn = get_insns ();
3185 end_sequence ();
3186 emit_libcall_block (insn, temp3, o0,
3187 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3188 UNSPEC_TLSLD_BASE));
3189 temp1 = gen_reg_rtx (SImode);
3190 temp2 = gen_reg_rtx (SImode);
3191 emit_insn (gen_tldo_hix22 (temp1, addr));
3192 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3193 if (TARGET_ARCH32)
3194 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3195 else
3196 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3197 break;
3198
3199 case TLS_MODEL_INITIAL_EXEC:
3200 temp1 = gen_reg_rtx (SImode);
3201 temp2 = gen_reg_rtx (SImode);
3202 temp3 = gen_reg_rtx (Pmode);
3203 got = sparc_tls_got ();
3204 emit_insn (gen_tie_hi22 (temp1, addr));
3205 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3206 if (TARGET_ARCH32)
3207 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3208 else
3209 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3210 if (TARGET_SUN_TLS)
3211 {
3212 ret = gen_reg_rtx (Pmode);
3213 if (TARGET_ARCH32)
3214 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3215 temp3, addr));
3216 else
3217 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3218 temp3, addr));
3219 }
3220 else
3221 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3222 break;
3223
3224 case TLS_MODEL_LOCAL_EXEC:
3225 temp1 = gen_reg_rtx (Pmode);
3226 temp2 = gen_reg_rtx (Pmode);
3227 if (TARGET_ARCH32)
3228 {
3229 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3230 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3231 }
3232 else
3233 {
3234 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3235 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3236 }
3237 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3238 break;
3239
3240 default:
3241 gcc_unreachable ();
3242 }
3243
3244 else
3245 gcc_unreachable (); /* for now ... */
3246
3247 return ret;
3248 }
3249
3250
3251 /* Legitimize PIC addresses. If the address is already position-independent,
3252 we return ORIG. Newly generated position-independent addresses go into a
3253 reg. This is REG if nonzero, otherwise we allocate register(s) as
3254 necessary. */
3255
3256 rtx
3257 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3258 rtx reg)
3259 {
3260 if (GET_CODE (orig) == SYMBOL_REF
3261 /* See the comment in sparc_expand_move. */
3262 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3263 {
3264 rtx pic_ref, address;
3265 rtx insn;
3266
3267 if (reg == 0)
3268 {
3269 gcc_assert (! reload_in_progress && ! reload_completed);
3270 reg = gen_reg_rtx (Pmode);
3271 }
3272
3273 if (flag_pic == 2)
3274 {
3275 /* If not during reload, allocate another temp reg here for loading
3276 in the address, so that these instructions can be optimized
3277 properly. */
3278 rtx temp_reg = ((reload_in_progress || reload_completed)
3279 ? reg : gen_reg_rtx (Pmode));
3280
3281 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3282 won't get confused into thinking that these two instructions
3283 are loading in the true address of the symbol. If in the
3284 future a PIC rtx exists, that should be used instead. */
3285 if (TARGET_ARCH64)
3286 {
3287 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3288 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3289 }
3290 else
3291 {
3292 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3293 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3294 }
3295 address = temp_reg;
3296 }
3297 else
3298 address = orig;
3299
3300 pic_ref = gen_const_mem (Pmode,
3301 gen_rtx_PLUS (Pmode,
3302 pic_offset_table_rtx, address));
3303 crtl->uses_pic_offset_table = 1;
3304 insn = emit_move_insn (reg, pic_ref);
3305 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3306 by loop. */
3307 set_unique_reg_note (insn, REG_EQUAL, orig);
3308 return reg;
3309 }
3310 else if (GET_CODE (orig) == CONST)
3311 {
3312 rtx base, offset;
3313
3314 if (GET_CODE (XEXP (orig, 0)) == PLUS
3315 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3316 return orig;
3317
3318 if (reg == 0)
3319 {
3320 gcc_assert (! reload_in_progress && ! reload_completed);
3321 reg = gen_reg_rtx (Pmode);
3322 }
3323
3324 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3325 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3326 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3327 base == reg ? 0 : reg);
3328
3329 if (GET_CODE (offset) == CONST_INT)
3330 {
3331 if (SMALL_INT (offset))
3332 return plus_constant (base, INTVAL (offset));
3333 else if (! reload_in_progress && ! reload_completed)
3334 offset = force_reg (Pmode, offset);
3335 else
3336 /* If we reach here, then something is seriously wrong. */
3337 gcc_unreachable ();
3338 }
3339 return gen_rtx_PLUS (Pmode, base, offset);
3340 }
3341 else if (GET_CODE (orig) == LABEL_REF)
3342 /* ??? Why do we do this? */
3343 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3344 the register is live instead, in case it is eliminated. */
3345 crtl->uses_pic_offset_table = 1;
3346
3347 return orig;
3348 }
3349
3350 /* Try machine-dependent ways of modifying an illegitimate address X
3351 to be legitimate. If we find one, return the new, valid address.
3352
3353 OLDX is the address as it was before break_out_memory_refs was called.
3354 In some cases it is useful to look at this to decide what needs to be done.
3355
3356 MODE is the mode of the operand pointed to by X. */
3357
3358 rtx
3359 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3360 {
3361 rtx orig_x = x;
3362
3363 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3364 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3365 force_operand (XEXP (x, 0), NULL_RTX));
3366 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3367 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3368 force_operand (XEXP (x, 1), NULL_RTX));
3369 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3370 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3371 XEXP (x, 1));
3372 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3373 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3374 force_operand (XEXP (x, 1), NULL_RTX));
3375
3376 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3377 return x;
3378
3379 if (SPARC_SYMBOL_REF_TLS_P (x))
3380 x = legitimize_tls_address (x);
3381 else if (flag_pic)
3382 x = legitimize_pic_address (x, mode, 0);
3383 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3384 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3385 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3386 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3387 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3388 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3389 else if (GET_CODE (x) == SYMBOL_REF
3390 || GET_CODE (x) == CONST
3391 || GET_CODE (x) == LABEL_REF)
3392 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3393 return x;
3394 }
3395
3396 /* Emit the special PIC helper function. */
3397
3398 static void
3399 emit_pic_helper (void)
3400 {
3401 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3402 int align;
3403
3404 switch_to_section (text_section);
3405
3406 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3407 if (align > 0)
3408 ASM_OUTPUT_ALIGN (asm_out_file, align);
3409 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3410 if (flag_delayed_branch)
3411 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3412 pic_name, pic_name);
3413 else
3414 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3415 pic_name, pic_name);
3416
3417 pic_helper_emitted_p = true;
3418 }
3419
3420 /* Emit code to load the PIC register. */
3421
3422 static void
3423 load_pic_register (bool delay_pic_helper)
3424 {
3425 int orig_flag_pic = flag_pic;
3426
3427 if (TARGET_VXWORKS_RTP)
3428 {
3429 emit_insn (gen_vxworks_load_got ());
3430 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3431 return;
3432 }
3433
3434 /* If we haven't initialized the special PIC symbols, do so now. */
3435 if (!pic_helper_symbol_name[0])
3436 {
3437 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3438 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3439 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3440 }
3441
3442 /* If we haven't emitted the special PIC helper function, do so now unless
3443 we are requested to delay it. */
3444 if (!delay_pic_helper && !pic_helper_emitted_p)
3445 emit_pic_helper ();
3446
3447 flag_pic = 0;
3448 if (TARGET_ARCH64)
3449 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3450 pic_helper_symbol));
3451 else
3452 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3453 pic_helper_symbol));
3454 flag_pic = orig_flag_pic;
3455
3456 /* Need to emit this whether or not we obey regdecls,
3457 since setjmp/longjmp can cause life info to screw up.
3458 ??? In the case where we don't obey regdecls, this is not sufficient
3459 since we may not fall out the bottom. */
3460 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3461 }
3462
3463 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3464 address of the call target. */
3465
3466 void
3467 sparc_emit_call_insn (rtx pat, rtx addr)
3468 {
3469 rtx insn;
3470
3471 insn = emit_call_insn (pat);
3472
3473 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3474 if (TARGET_VXWORKS_RTP
3475 && flag_pic
3476 && GET_CODE (addr) == SYMBOL_REF
3477 && (SYMBOL_REF_DECL (addr)
3478 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3479 : !SYMBOL_REF_LOCAL_P (addr)))
3480 {
3481 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3482 crtl->uses_pic_offset_table = 1;
3483 }
3484 }
3485 \f
3486 /* Return 1 if RTX is a MEM which is known to be aligned to at
3487 least a DESIRED byte boundary. */
3488
3489 int
3490 mem_min_alignment (rtx mem, int desired)
3491 {
3492 rtx addr, base, offset;
3493
3494 /* If it's not a MEM we can't accept it. */
3495 if (GET_CODE (mem) != MEM)
3496 return 0;
3497
3498 /* Obviously... */
3499 if (!TARGET_UNALIGNED_DOUBLES
3500 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3501 return 1;
3502
3503 /* ??? The rest of the function predates MEM_ALIGN so
3504 there is probably a bit of redundancy. */
3505 addr = XEXP (mem, 0);
3506 base = offset = NULL_RTX;
3507 if (GET_CODE (addr) == PLUS)
3508 {
3509 if (GET_CODE (XEXP (addr, 0)) == REG)
3510 {
3511 base = XEXP (addr, 0);
3512
3513 /* What we are saying here is that if the base
3514 REG is aligned properly, the compiler will make
3515 sure any REG based index upon it will be so
3516 as well. */
3517 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3518 offset = XEXP (addr, 1);
3519 else
3520 offset = const0_rtx;
3521 }
3522 }
3523 else if (GET_CODE (addr) == REG)
3524 {
3525 base = addr;
3526 offset = const0_rtx;
3527 }
3528
3529 if (base != NULL_RTX)
3530 {
3531 int regno = REGNO (base);
3532
3533 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3534 {
3535 /* Check if the compiler has recorded some information
3536 about the alignment of the base REG. If reload has
3537 completed, we already matched with proper alignments.
3538 If not running global_alloc, reload might give us
3539 unaligned pointer to local stack though. */
3540 if (((cfun != 0
3541 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3542 || (optimize && reload_completed))
3543 && (INTVAL (offset) & (desired - 1)) == 0)
3544 return 1;
3545 }
3546 else
3547 {
3548 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3549 return 1;
3550 }
3551 }
3552 else if (! TARGET_UNALIGNED_DOUBLES
3553 || CONSTANT_P (addr)
3554 || GET_CODE (addr) == LO_SUM)
3555 {
3556 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3557 is true, in which case we can only assume that an access is aligned if
3558 it is to a constant address, or the address involves a LO_SUM. */
3559 return 1;
3560 }
3561
3562 /* An obviously unaligned address. */
3563 return 0;
3564 }
3565
3566 \f
3567 /* Vectors to keep interesting information about registers where it can easily
3568 be got. We used to use the actual mode value as the bit number, but there
3569 are more than 32 modes now. Instead we use two tables: one indexed by
3570 hard register number, and one indexed by mode. */
3571
3572 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3573 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3574 mapped into one sparc_mode_class mode. */
3575
3576 enum sparc_mode_class {
3577 S_MODE, D_MODE, T_MODE, O_MODE,
3578 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3579 CC_MODE, CCFP_MODE
3580 };
3581
3582 /* Modes for single-word and smaller quantities. */
3583 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3584
3585 /* Modes for double-word and smaller quantities. */
3586 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3587
3588 /* Modes for quad-word and smaller quantities. */
3589 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3590
3591 /* Modes for 8-word and smaller quantities. */
3592 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3593
3594 /* Modes for single-float quantities. We must allow any single word or
3595 smaller quantity. This is because the fix/float conversion instructions
3596 take integer inputs/outputs from the float registers. */
3597 #define SF_MODES (S_MODES)
3598
3599 /* Modes for double-float and smaller quantities. */
3600 #define DF_MODES (S_MODES | D_MODES)
3601
3602 /* Modes for double-float only quantities. */
3603 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3604
3605 /* Modes for quad-float only quantities. */
3606 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3607
3608 /* Modes for quad-float and smaller quantities. */
3609 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3610
3611 /* Modes for quad-float and double-float quantities. */
3612 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3613
3614 /* Modes for quad-float pair only quantities. */
3615 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3616
3617 /* Modes for quad-float pairs and smaller quantities. */
3618 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3619
3620 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3621
3622 /* Modes for condition codes. */
3623 #define CC_MODES (1 << (int) CC_MODE)
3624 #define CCFP_MODES (1 << (int) CCFP_MODE)
3625
3626 /* Value is 1 if register/mode pair is acceptable on sparc.
3627 The funny mixture of D and T modes is because integer operations
3628 do not specially operate on tetra quantities, so non-quad-aligned
3629 registers can hold quadword quantities (except %o4 and %i4 because
3630 they cross fixed registers). */
3631
3632 /* This points to either the 32 bit or the 64 bit version. */
3633 const int *hard_regno_mode_classes;
3634
3635 static const int hard_32bit_mode_classes[] = {
3636 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3637 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3638 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3639 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3640
3641 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3642 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3643 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3644 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3645
3646 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3647 and none can hold SFmode/SImode values. */
3648 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3649 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3650 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3651 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3652
3653 /* %fcc[0123] */
3654 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3655
3656 /* %icc */
3657 CC_MODES
3658 };
3659
3660 static const int hard_64bit_mode_classes[] = {
3661 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3662 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3663 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3664 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3665
3666 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3667 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3668 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3669 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3670
3671 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3672 and none can hold SFmode/SImode values. */
3673 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3674 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3675 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3676 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3677
3678 /* %fcc[0123] */
3679 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3680
3681 /* %icc */
3682 CC_MODES
3683 };
3684
3685 int sparc_mode_class [NUM_MACHINE_MODES];
3686
3687 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3688
3689 static void
3690 sparc_init_modes (void)
3691 {
3692 int i;
3693
3694 for (i = 0; i < NUM_MACHINE_MODES; i++)
3695 {
3696 switch (GET_MODE_CLASS (i))
3697 {
3698 case MODE_INT:
3699 case MODE_PARTIAL_INT:
3700 case MODE_COMPLEX_INT:
3701 if (GET_MODE_SIZE (i) <= 4)
3702 sparc_mode_class[i] = 1 << (int) S_MODE;
3703 else if (GET_MODE_SIZE (i) == 8)
3704 sparc_mode_class[i] = 1 << (int) D_MODE;
3705 else if (GET_MODE_SIZE (i) == 16)
3706 sparc_mode_class[i] = 1 << (int) T_MODE;
3707 else if (GET_MODE_SIZE (i) == 32)
3708 sparc_mode_class[i] = 1 << (int) O_MODE;
3709 else
3710 sparc_mode_class[i] = 0;
3711 break;
3712 case MODE_VECTOR_INT:
3713 if (GET_MODE_SIZE (i) <= 4)
3714 sparc_mode_class[i] = 1 << (int)SF_MODE;
3715 else if (GET_MODE_SIZE (i) == 8)
3716 sparc_mode_class[i] = 1 << (int)DF_MODE;
3717 break;
3718 case MODE_FLOAT:
3719 case MODE_COMPLEX_FLOAT:
3720 if (GET_MODE_SIZE (i) <= 4)
3721 sparc_mode_class[i] = 1 << (int) SF_MODE;
3722 else if (GET_MODE_SIZE (i) == 8)
3723 sparc_mode_class[i] = 1 << (int) DF_MODE;
3724 else if (GET_MODE_SIZE (i) == 16)
3725 sparc_mode_class[i] = 1 << (int) TF_MODE;
3726 else if (GET_MODE_SIZE (i) == 32)
3727 sparc_mode_class[i] = 1 << (int) OF_MODE;
3728 else
3729 sparc_mode_class[i] = 0;
3730 break;
3731 case MODE_CC:
3732 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3733 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3734 else
3735 sparc_mode_class[i] = 1 << (int) CC_MODE;
3736 break;
3737 default:
3738 sparc_mode_class[i] = 0;
3739 break;
3740 }
3741 }
3742
3743 if (TARGET_ARCH64)
3744 hard_regno_mode_classes = hard_64bit_mode_classes;
3745 else
3746 hard_regno_mode_classes = hard_32bit_mode_classes;
3747
3748 /* Initialize the array used by REGNO_REG_CLASS. */
3749 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3750 {
3751 if (i < 16 && TARGET_V8PLUS)
3752 sparc_regno_reg_class[i] = I64_REGS;
3753 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3754 sparc_regno_reg_class[i] = GENERAL_REGS;
3755 else if (i < 64)
3756 sparc_regno_reg_class[i] = FP_REGS;
3757 else if (i < 96)
3758 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3759 else if (i < 100)
3760 sparc_regno_reg_class[i] = FPCC_REGS;
3761 else
3762 sparc_regno_reg_class[i] = NO_REGS;
3763 }
3764 }
3765 \f
3766 /* Compute the frame size required by the function. This function is called
3767 during the reload pass and also by sparc_expand_prologue. */
3768
3769 HOST_WIDE_INT
3770 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3771 {
3772 int outgoing_args_size = (crtl->outgoing_args_size
3773 + REG_PARM_STACK_SPACE (current_function_decl));
3774 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3775 int i;
3776
3777 if (TARGET_ARCH64)
3778 {
3779 for (i = 0; i < 8; i++)
3780 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3781 n_regs += 2;
3782 }
3783 else
3784 {
3785 for (i = 0; i < 8; i += 2)
3786 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3787 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3788 n_regs += 2;
3789 }
3790
3791 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3792 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3793 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3794 n_regs += 2;
3795
3796 /* Set up values for use in prologue and epilogue. */
3797 num_gfregs = n_regs;
3798
3799 if (leaf_function_p
3800 && n_regs == 0
3801 && size == 0
3802 && crtl->outgoing_args_size == 0)
3803 actual_fsize = apparent_fsize = 0;
3804 else
3805 {
3806 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3807 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3808 apparent_fsize += n_regs * 4;
3809 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3810 }
3811
3812 /* Make sure nothing can clobber our register windows.
3813 If a SAVE must be done, or there is a stack-local variable,
3814 the register window area must be allocated. */
3815 if (! leaf_function_p || size > 0)
3816 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3817
3818 return SPARC_STACK_ALIGN (actual_fsize);
3819 }
3820
3821 /* Output any necessary .register pseudo-ops. */
3822
3823 void
3824 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3825 {
3826 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3827 int i;
3828
3829 if (TARGET_ARCH32)
3830 return;
3831
3832 /* Check if %g[2367] were used without
3833 .register being printed for them already. */
3834 for (i = 2; i < 8; i++)
3835 {
3836 if (df_regs_ever_live_p (i)
3837 && ! sparc_hard_reg_printed [i])
3838 {
3839 sparc_hard_reg_printed [i] = 1;
3840 /* %g7 is used as TLS base register, use #ignore
3841 for it instead of #scratch. */
3842 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3843 i == 7 ? "ignore" : "scratch");
3844 }
3845 if (i == 3) i = 5;
3846 }
3847 #endif
3848 }
3849
3850 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3851 as needed. LOW should be double-word aligned for 32-bit registers.
3852 Return the new OFFSET. */
3853
3854 #define SORR_SAVE 0
3855 #define SORR_RESTORE 1
3856
3857 static int
3858 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3859 {
3860 rtx mem, insn;
3861 int i;
3862
3863 if (TARGET_ARCH64 && high <= 32)
3864 {
3865 for (i = low; i < high; i++)
3866 {
3867 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3868 {
3869 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3870 set_mem_alias_set (mem, sparc_sr_alias_set);
3871 if (action == SORR_SAVE)
3872 {
3873 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3874 RTX_FRAME_RELATED_P (insn) = 1;
3875 }
3876 else /* action == SORR_RESTORE */
3877 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3878 offset += 8;
3879 }
3880 }
3881 }
3882 else
3883 {
3884 for (i = low; i < high; i += 2)
3885 {
3886 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
3887 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
3888 enum machine_mode mode;
3889 int regno;
3890
3891 if (reg0 && reg1)
3892 {
3893 mode = i < 32 ? DImode : DFmode;
3894 regno = i;
3895 }
3896 else if (reg0)
3897 {
3898 mode = i < 32 ? SImode : SFmode;
3899 regno = i;
3900 }
3901 else if (reg1)
3902 {
3903 mode = i < 32 ? SImode : SFmode;
3904 regno = i + 1;
3905 offset += 4;
3906 }
3907 else
3908 continue;
3909
3910 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3911 set_mem_alias_set (mem, sparc_sr_alias_set);
3912 if (action == SORR_SAVE)
3913 {
3914 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3915 RTX_FRAME_RELATED_P (insn) = 1;
3916 }
3917 else /* action == SORR_RESTORE */
3918 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3919
3920 /* Always preserve double-word alignment. */
3921 offset = (offset + 7) & -8;
3922 }
3923 }
3924
3925 return offset;
3926 }
3927
3928 /* Emit code to save call-saved registers. */
3929
3930 static void
3931 emit_save_or_restore_regs (int action)
3932 {
3933 HOST_WIDE_INT offset;
3934 rtx base;
3935
3936 offset = frame_base_offset - apparent_fsize;
3937
3938 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3939 {
3940 /* ??? This might be optimized a little as %g1 might already have a
3941 value close enough that a single add insn will do. */
3942 /* ??? Although, all of this is probably only a temporary fix
3943 because if %g1 can hold a function result, then
3944 sparc_expand_epilogue will lose (the result will be
3945 clobbered). */
3946 base = gen_rtx_REG (Pmode, 1);
3947 emit_move_insn (base, GEN_INT (offset));
3948 emit_insn (gen_rtx_SET (VOIDmode,
3949 base,
3950 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3951 offset = 0;
3952 }
3953 else
3954 base = frame_base_reg;
3955
3956 offset = save_or_restore_regs (0, 8, base, offset, action);
3957 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3958 }
3959
3960 /* Generate a save_register_window insn. */
3961
3962 static rtx
3963 gen_save_register_window (rtx increment)
3964 {
3965 if (TARGET_ARCH64)
3966 return gen_save_register_windowdi (increment);
3967 else
3968 return gen_save_register_windowsi (increment);
3969 }
3970
3971 /* Generate an increment for the stack pointer. */
3972
3973 static rtx
3974 gen_stack_pointer_inc (rtx increment)
3975 {
3976 return gen_rtx_SET (VOIDmode,
3977 stack_pointer_rtx,
3978 gen_rtx_PLUS (Pmode,
3979 stack_pointer_rtx,
3980 increment));
3981 }
3982
3983 /* Generate a decrement for the stack pointer. */
3984
3985 static rtx
3986 gen_stack_pointer_dec (rtx decrement)
3987 {
3988 return gen_rtx_SET (VOIDmode,
3989 stack_pointer_rtx,
3990 gen_rtx_MINUS (Pmode,
3991 stack_pointer_rtx,
3992 decrement));
3993 }
3994
3995 /* Expand the function prologue. The prologue is responsible for reserving
3996 storage for the frame, saving the call-saved registers and loading the
3997 PIC register if needed. */
3998
3999 void
4000 sparc_expand_prologue (void)
4001 {
4002 rtx insn;
4003 int i;
4004
4005 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4006 on the final value of the flag means deferring the prologue/epilogue
4007 expansion until just before the second scheduling pass, which is too
4008 late to emit multiple epilogues or return insns.
4009
4010 Of course we are making the assumption that the value of the flag
4011 will not change between now and its final value. Of the three parts
4012 of the formula, only the last one can reasonably vary. Let's take a
4013 closer look, after assuming that the first two ones are set to true
4014 (otherwise the last value is effectively silenced).
4015
4016 If only_leaf_regs_used returns false, the global predicate will also
4017 be false so the actual frame size calculated below will be positive.
4018 As a consequence, the save_register_window insn will be emitted in
4019 the instruction stream; now this insn explicitly references %fp
4020 which is not a leaf register so only_leaf_regs_used will always
4021 return false subsequently.
4022
4023 If only_leaf_regs_used returns true, we hope that the subsequent
4024 optimization passes won't cause non-leaf registers to pop up. For
4025 example, the regrename pass has special provisions to not rename to
4026 non-leaf registers in a leaf function. */
4027 sparc_leaf_function_p
4028 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4029
4030 /* Need to use actual_fsize, since we are also allocating
4031 space for our callee (and our own register save area). */
4032 actual_fsize
4033 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4034
4035 /* Advertise that the data calculated just above are now valid. */
4036 sparc_prologue_data_valid_p = true;
4037
4038 if (sparc_leaf_function_p)
4039 {
4040 frame_base_reg = stack_pointer_rtx;
4041 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4042 }
4043 else
4044 {
4045 frame_base_reg = hard_frame_pointer_rtx;
4046 frame_base_offset = SPARC_STACK_BIAS;
4047 }
4048
4049 if (actual_fsize == 0)
4050 /* do nothing. */ ;
4051 else if (sparc_leaf_function_p)
4052 {
4053 if (actual_fsize <= 4096)
4054 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4055 else if (actual_fsize <= 8192)
4056 {
4057 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4058 /* %sp is still the CFA register. */
4059 RTX_FRAME_RELATED_P (insn) = 1;
4060 insn
4061 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4062 }
4063 else
4064 {
4065 rtx reg = gen_rtx_REG (Pmode, 1);
4066 emit_move_insn (reg, GEN_INT (-actual_fsize));
4067 insn = emit_insn (gen_stack_pointer_inc (reg));
4068 REG_NOTES (insn) =
4069 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4070 gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
4071 REG_NOTES (insn));
4072 }
4073
4074 RTX_FRAME_RELATED_P (insn) = 1;
4075 }
4076 else
4077 {
4078 if (actual_fsize <= 4096)
4079 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4080 else if (actual_fsize <= 8192)
4081 {
4082 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4083 /* %sp is not the CFA register anymore. */
4084 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4085 }
4086 else
4087 {
4088 rtx reg = gen_rtx_REG (Pmode, 1);
4089 emit_move_insn (reg, GEN_INT (-actual_fsize));
4090 insn = emit_insn (gen_save_register_window (reg));
4091 }
4092
4093 RTX_FRAME_RELATED_P (insn) = 1;
4094 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4095 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4096 }
4097
4098 if (num_gfregs)
4099 emit_save_or_restore_regs (SORR_SAVE);
4100
4101 /* Load the PIC register if needed. */
4102 if (flag_pic && crtl->uses_pic_offset_table)
4103 load_pic_register (false);
4104 }
4105
4106 /* This function generates the assembly code for function entry, which boils
4107 down to emitting the necessary .register directives. */
4108
4109 static void
4110 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4111 {
4112 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4113 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4114
4115 sparc_output_scratch_registers (file);
4116 }
4117
4118 /* Expand the function epilogue, either normal or part of a sibcall.
4119 We emit all the instructions except the return or the call. */
4120
4121 void
4122 sparc_expand_epilogue (void)
4123 {
4124 if (num_gfregs)
4125 emit_save_or_restore_regs (SORR_RESTORE);
4126
4127 if (actual_fsize == 0)
4128 /* do nothing. */ ;
4129 else if (sparc_leaf_function_p)
4130 {
4131 if (actual_fsize <= 4096)
4132 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4133 else if (actual_fsize <= 8192)
4134 {
4135 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4136 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4137 }
4138 else
4139 {
4140 rtx reg = gen_rtx_REG (Pmode, 1);
4141 emit_move_insn (reg, GEN_INT (-actual_fsize));
4142 emit_insn (gen_stack_pointer_dec (reg));
4143 }
4144 }
4145 }
4146
4147 /* Return true if it is appropriate to emit `return' instructions in the
4148 body of a function. */
4149
4150 bool
4151 sparc_can_use_return_insn_p (void)
4152 {
4153 return sparc_prologue_data_valid_p
4154 && (actual_fsize == 0 || !sparc_leaf_function_p);
4155 }
4156
4157 /* This function generates the assembly code for function exit. */
4158
4159 static void
4160 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4161 {
4162 /* If code does not drop into the epilogue, we have to still output
4163 a dummy nop for the sake of sane backtraces. Otherwise, if the
4164 last two instructions of a function were "call foo; dslot;" this
4165 can make the return PC of foo (i.e. address of call instruction
4166 plus 8) point to the first instruction in the next function. */
4167
4168 rtx insn, last_real_insn;
4169
4170 insn = get_last_insn ();
4171
4172 last_real_insn = prev_real_insn (insn);
4173 if (last_real_insn
4174 && GET_CODE (last_real_insn) == INSN
4175 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4176 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4177
4178 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4179 fputs("\tnop\n", file);
4180
4181 sparc_output_deferred_case_vectors ();
4182 }
4183
4184 /* Output a 'restore' instruction. */
4185
4186 static void
4187 output_restore (rtx pat)
4188 {
4189 rtx operands[3];
4190
4191 if (! pat)
4192 {
4193 fputs ("\t restore\n", asm_out_file);
4194 return;
4195 }
4196
4197 gcc_assert (GET_CODE (pat) == SET);
4198
4199 operands[0] = SET_DEST (pat);
4200 pat = SET_SRC (pat);
4201
4202 switch (GET_CODE (pat))
4203 {
4204 case PLUS:
4205 operands[1] = XEXP (pat, 0);
4206 operands[2] = XEXP (pat, 1);
4207 output_asm_insn (" restore %r1, %2, %Y0", operands);
4208 break;
4209 case LO_SUM:
4210 operands[1] = XEXP (pat, 0);
4211 operands[2] = XEXP (pat, 1);
4212 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4213 break;
4214 case ASHIFT:
4215 operands[1] = XEXP (pat, 0);
4216 gcc_assert (XEXP (pat, 1) == const1_rtx);
4217 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4218 break;
4219 default:
4220 operands[1] = pat;
4221 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4222 break;
4223 }
4224 }
4225
4226 /* Output a return. */
4227
4228 const char *
4229 output_return (rtx insn)
4230 {
4231 if (sparc_leaf_function_p)
4232 {
4233 /* This is a leaf function so we don't have to bother restoring the
4234 register window, which frees us from dealing with the convoluted
4235 semantics of restore/return. We simply output the jump to the
4236 return address and the insn in the delay slot (if any). */
4237
4238 gcc_assert (! crtl->calls_eh_return);
4239
4240 return "jmp\t%%o7+%)%#";
4241 }
4242 else
4243 {
4244 /* This is a regular function so we have to restore the register window.
4245 We may have a pending insn for the delay slot, which will be either
4246 combined with the 'restore' instruction or put in the delay slot of
4247 the 'return' instruction. */
4248
4249 if (crtl->calls_eh_return)
4250 {
4251 /* If the function uses __builtin_eh_return, the eh_return
4252 machinery occupies the delay slot. */
4253 gcc_assert (! final_sequence);
4254
4255 if (! flag_delayed_branch)
4256 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4257
4258 if (TARGET_V9)
4259 fputs ("\treturn\t%i7+8\n", asm_out_file);
4260 else
4261 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4262
4263 if (flag_delayed_branch)
4264 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4265 else
4266 fputs ("\t nop\n", asm_out_file);
4267 }
4268 else if (final_sequence)
4269 {
4270 rtx delay, pat;
4271
4272 delay = NEXT_INSN (insn);
4273 gcc_assert (delay);
4274
4275 pat = PATTERN (delay);
4276
4277 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4278 {
4279 epilogue_renumber (&pat, 0);
4280 return "return\t%%i7+%)%#";
4281 }
4282 else
4283 {
4284 output_asm_insn ("jmp\t%%i7+%)", NULL);
4285 output_restore (pat);
4286 PATTERN (delay) = gen_blockage ();
4287 INSN_CODE (delay) = -1;
4288 }
4289 }
4290 else
4291 {
4292 /* The delay slot is empty. */
4293 if (TARGET_V9)
4294 return "return\t%%i7+%)\n\t nop";
4295 else if (flag_delayed_branch)
4296 return "jmp\t%%i7+%)\n\t restore";
4297 else
4298 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4299 }
4300 }
4301
4302 return "";
4303 }
4304
4305 /* Output a sibling call. */
4306
4307 const char *
4308 output_sibcall (rtx insn, rtx call_operand)
4309 {
4310 rtx operands[1];
4311
4312 gcc_assert (flag_delayed_branch);
4313
4314 operands[0] = call_operand;
4315
4316 if (sparc_leaf_function_p)
4317 {
4318 /* This is a leaf function so we don't have to bother restoring the
4319 register window. We simply output the jump to the function and
4320 the insn in the delay slot (if any). */
4321
4322 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4323
4324 if (final_sequence)
4325 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4326 operands);
4327 else
4328 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4329 it into branch if possible. */
4330 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4331 operands);
4332 }
4333 else
4334 {
4335 /* This is a regular function so we have to restore the register window.
4336 We may have a pending insn for the delay slot, which will be combined
4337 with the 'restore' instruction. */
4338
4339 output_asm_insn ("call\t%a0, 0", operands);
4340
4341 if (final_sequence)
4342 {
4343 rtx delay = NEXT_INSN (insn);
4344 gcc_assert (delay);
4345
4346 output_restore (PATTERN (delay));
4347
4348 PATTERN (delay) = gen_blockage ();
4349 INSN_CODE (delay) = -1;
4350 }
4351 else
4352 output_restore (NULL_RTX);
4353 }
4354
4355 return "";
4356 }
4357 \f
4358 /* Functions for handling argument passing.
4359
4360 For 32-bit, the first 6 args are normally in registers and the rest are
4361 pushed. Any arg that starts within the first 6 words is at least
4362 partially passed in a register unless its data type forbids.
4363
4364 For 64-bit, the argument registers are laid out as an array of 16 elements
4365 and arguments are added sequentially. The first 6 int args and up to the
4366 first 16 fp args (depending on size) are passed in regs.
4367
4368 Slot Stack Integral Float Float in structure Double Long Double
4369 ---- ----- -------- ----- ------------------ ------ -----------
4370 15 [SP+248] %f31 %f30,%f31 %d30
4371 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4372 13 [SP+232] %f27 %f26,%f27 %d26
4373 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4374 11 [SP+216] %f23 %f22,%f23 %d22
4375 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4376 9 [SP+200] %f19 %f18,%f19 %d18
4377 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4378 7 [SP+184] %f15 %f14,%f15 %d14
4379 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4380 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4381 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4382 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4383 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4384 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4385 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4386
4387 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4388
4389 Integral arguments are always passed as 64-bit quantities appropriately
4390 extended.
4391
4392 Passing of floating point values is handled as follows.
4393 If a prototype is in scope:
4394 If the value is in a named argument (i.e. not a stdarg function or a
4395 value not part of the `...') then the value is passed in the appropriate
4396 fp reg.
4397 If the value is part of the `...' and is passed in one of the first 6
4398 slots then the value is passed in the appropriate int reg.
4399 If the value is part of the `...' and is not passed in one of the first 6
4400 slots then the value is passed in memory.
4401 If a prototype is not in scope:
4402 If the value is one of the first 6 arguments the value is passed in the
4403 appropriate integer reg and the appropriate fp reg.
4404 If the value is not one of the first 6 arguments the value is passed in
4405 the appropriate fp reg and in memory.
4406
4407
4408 Summary of the calling conventions implemented by GCC on SPARC:
4409
4410 32-bit ABI:
4411 size argument return value
4412
4413 small integer <4 int. reg. int. reg.
4414 word 4 int. reg. int. reg.
4415 double word 8 int. reg. int. reg.
4416
4417 _Complex small integer <8 int. reg. int. reg.
4418 _Complex word 8 int. reg. int. reg.
4419 _Complex double word 16 memory int. reg.
4420
4421 vector integer <=8 int. reg. FP reg.
4422 vector integer >8 memory memory
4423
4424 float 4 int. reg. FP reg.
4425 double 8 int. reg. FP reg.
4426 long double 16 memory memory
4427
4428 _Complex float 8 memory FP reg.
4429 _Complex double 16 memory FP reg.
4430 _Complex long double 32 memory FP reg.
4431
4432 vector float any memory memory
4433
4434 aggregate any memory memory
4435
4436
4437
4438 64-bit ABI:
4439 size argument return value
4440
4441 small integer <8 int. reg. int. reg.
4442 word 8 int. reg. int. reg.
4443 double word 16 int. reg. int. reg.
4444
4445 _Complex small integer <16 int. reg. int. reg.
4446 _Complex word 16 int. reg. int. reg.
4447 _Complex double word 32 memory int. reg.
4448
4449 vector integer <=16 FP reg. FP reg.
4450 vector integer 16<s<=32 memory FP reg.
4451 vector integer >32 memory memory
4452
4453 float 4 FP reg. FP reg.
4454 double 8 FP reg. FP reg.
4455 long double 16 FP reg. FP reg.
4456
4457 _Complex float 8 FP reg. FP reg.
4458 _Complex double 16 FP reg. FP reg.
4459 _Complex long double 32 memory FP reg.
4460
4461 vector float <=16 FP reg. FP reg.
4462 vector float 16<s<=32 memory FP reg.
4463 vector float >32 memory memory
4464
4465 aggregate <=16 reg. reg.
4466 aggregate 16<s<=32 memory reg.
4467 aggregate >32 memory memory
4468
4469
4470
4471 Note #1: complex floating-point types follow the extended SPARC ABIs as
4472 implemented by the Sun compiler.
4473
4474 Note #2: integral vector types follow the scalar floating-point types
4475 conventions to match what is implemented by the Sun VIS SDK.
4476
4477 Note #3: floating-point vector types follow the aggregate types
4478 conventions. */
4479
4480
4481 /* Maximum number of int regs for args. */
4482 #define SPARC_INT_ARG_MAX 6
4483 /* Maximum number of fp regs for args. */
4484 #define SPARC_FP_ARG_MAX 16
4485
4486 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4487
4488 /* Handle the INIT_CUMULATIVE_ARGS macro.
4489 Initialize a variable CUM of type CUMULATIVE_ARGS
4490 for a call to a function whose data type is FNTYPE.
4491 For a library call, FNTYPE is 0. */
4492
4493 void
4494 init_cumulative_args (struct sparc_args *cum, tree fntype,
4495 rtx libname ATTRIBUTE_UNUSED,
4496 tree fndecl ATTRIBUTE_UNUSED)
4497 {
4498 cum->words = 0;
4499 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4500 cum->libcall_p = fntype == 0;
4501 }
4502
4503 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4504 When a prototype says `char' or `short', really pass an `int'. */
4505
4506 static bool
4507 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4508 {
4509 return TARGET_ARCH32 ? true : false;
4510 }
4511
4512 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4513
4514 static bool
4515 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4516 {
4517 return TARGET_ARCH64 ? true : false;
4518 }
4519
4520 /* Scan the record type TYPE and return the following predicates:
4521 - INTREGS_P: the record contains at least one field or sub-field
4522 that is eligible for promotion in integer registers.
4523 - FP_REGS_P: the record contains at least one field or sub-field
4524 that is eligible for promotion in floating-point registers.
4525 - PACKED_P: the record contains at least one field that is packed.
4526
4527 Sub-fields are not taken into account for the PACKED_P predicate. */
4528
4529 static void
4530 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4531 {
4532 tree field;
4533
4534 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4535 {
4536 if (TREE_CODE (field) == FIELD_DECL)
4537 {
4538 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4539 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4540 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4541 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4542 && TARGET_FPU)
4543 *fpregs_p = 1;
4544 else
4545 *intregs_p = 1;
4546
4547 if (packed_p && DECL_PACKED (field))
4548 *packed_p = 1;
4549 }
4550 }
4551 }
4552
4553 /* Compute the slot number to pass an argument in.
4554 Return the slot number or -1 if passing on the stack.
4555
4556 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4557 the preceding args and about the function being called.
4558 MODE is the argument's machine mode.
4559 TYPE is the data type of the argument (as a tree).
4560 This is null for libcalls where that information may
4561 not be available.
4562 NAMED is nonzero if this argument is a named parameter
4563 (otherwise it is an extra parameter matching an ellipsis).
4564 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4565 *PREGNO records the register number to use if scalar type.
4566 *PPADDING records the amount of padding needed in words. */
4567
4568 static int
4569 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4570 tree type, int named, int incoming_p,
4571 int *pregno, int *ppadding)
4572 {
4573 int regbase = (incoming_p
4574 ? SPARC_INCOMING_INT_ARG_FIRST
4575 : SPARC_OUTGOING_INT_ARG_FIRST);
4576 int slotno = cum->words;
4577 enum mode_class mclass;
4578 int regno;
4579
4580 *ppadding = 0;
4581
4582 if (type && TREE_ADDRESSABLE (type))
4583 return -1;
4584
4585 if (TARGET_ARCH32
4586 && mode == BLKmode
4587 && type
4588 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4589 return -1;
4590
4591 /* For SPARC64, objects requiring 16-byte alignment get it. */
4592 if (TARGET_ARCH64
4593 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4594 && (slotno & 1) != 0)
4595 slotno++, *ppadding = 1;
4596
4597 mclass = GET_MODE_CLASS (mode);
4598 if (type && TREE_CODE (type) == VECTOR_TYPE)
4599 {
4600 /* Vector types deserve special treatment because they are
4601 polymorphic wrt their mode, depending upon whether VIS
4602 instructions are enabled. */
4603 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4604 {
4605 /* The SPARC port defines no floating-point vector modes. */
4606 gcc_assert (mode == BLKmode);
4607 }
4608 else
4609 {
4610 /* Integral vector types should either have a vector
4611 mode or an integral mode, because we are guaranteed
4612 by pass_by_reference that their size is not greater
4613 than 16 bytes and TImode is 16-byte wide. */
4614 gcc_assert (mode != BLKmode);
4615
4616 /* Vector integers are handled like floats according to
4617 the Sun VIS SDK. */
4618 mclass = MODE_FLOAT;
4619 }
4620 }
4621
4622 switch (mclass)
4623 {
4624 case MODE_FLOAT:
4625 case MODE_COMPLEX_FLOAT:
4626 if (TARGET_ARCH64 && TARGET_FPU && named)
4627 {
4628 if (slotno >= SPARC_FP_ARG_MAX)
4629 return -1;
4630 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4631 /* Arguments filling only one single FP register are
4632 right-justified in the outer double FP register. */
4633 if (GET_MODE_SIZE (mode) <= 4)
4634 regno++;
4635 break;
4636 }
4637 /* fallthrough */
4638
4639 case MODE_INT:
4640 case MODE_COMPLEX_INT:
4641 if (slotno >= SPARC_INT_ARG_MAX)
4642 return -1;
4643 regno = regbase + slotno;
4644 break;
4645
4646 case MODE_RANDOM:
4647 if (mode == VOIDmode)
4648 /* MODE is VOIDmode when generating the actual call. */
4649 return -1;
4650
4651 gcc_assert (mode == BLKmode);
4652
4653 if (TARGET_ARCH32
4654 || !type
4655 || (TREE_CODE (type) != VECTOR_TYPE
4656 && TREE_CODE (type) != RECORD_TYPE))
4657 {
4658 if (slotno >= SPARC_INT_ARG_MAX)
4659 return -1;
4660 regno = regbase + slotno;
4661 }
4662 else /* TARGET_ARCH64 && type */
4663 {
4664 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4665
4666 /* First see what kinds of registers we would need. */
4667 if (TREE_CODE (type) == VECTOR_TYPE)
4668 fpregs_p = 1;
4669 else
4670 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4671
4672 /* The ABI obviously doesn't specify how packed structures
4673 are passed. These are defined to be passed in int regs
4674 if possible, otherwise memory. */
4675 if (packed_p || !named)
4676 fpregs_p = 0, intregs_p = 1;
4677
4678 /* If all arg slots are filled, then must pass on stack. */
4679 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4680 return -1;
4681
4682 /* If there are only int args and all int arg slots are filled,
4683 then must pass on stack. */
4684 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4685 return -1;
4686
4687 /* Note that even if all int arg slots are filled, fp members may
4688 still be passed in regs if such regs are available.
4689 *PREGNO isn't set because there may be more than one, it's up
4690 to the caller to compute them. */
4691 return slotno;
4692 }
4693 break;
4694
4695 default :
4696 gcc_unreachable ();
4697 }
4698
4699 *pregno = regno;
4700 return slotno;
4701 }
4702
4703 /* Handle recursive register counting for structure field layout. */
4704
4705 struct function_arg_record_value_parms
4706 {
4707 rtx ret; /* return expression being built. */
4708 int slotno; /* slot number of the argument. */
4709 int named; /* whether the argument is named. */
4710 int regbase; /* regno of the base register. */
4711 int stack; /* 1 if part of the argument is on the stack. */
4712 int intoffset; /* offset of the first pending integer field. */
4713 unsigned int nregs; /* number of words passed in registers. */
4714 };
4715
4716 static void function_arg_record_value_3
4717 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4718 static void function_arg_record_value_2
4719 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4720 static void function_arg_record_value_1
4721 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4722 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4723 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4724
4725 /* A subroutine of function_arg_record_value. Traverse the structure
4726 recursively and determine how many registers will be required. */
4727
4728 static void
4729 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4730 struct function_arg_record_value_parms *parms,
4731 bool packed_p)
4732 {
4733 tree field;
4734
4735 /* We need to compute how many registers are needed so we can
4736 allocate the PARALLEL but before we can do that we need to know
4737 whether there are any packed fields. The ABI obviously doesn't
4738 specify how structures are passed in this case, so they are
4739 defined to be passed in int regs if possible, otherwise memory,
4740 regardless of whether there are fp values present. */
4741
4742 if (! packed_p)
4743 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4744 {
4745 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4746 {
4747 packed_p = true;
4748 break;
4749 }
4750 }
4751
4752 /* Compute how many registers we need. */
4753 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4754 {
4755 if (TREE_CODE (field) == FIELD_DECL)
4756 {
4757 HOST_WIDE_INT bitpos = startbitpos;
4758
4759 if (DECL_SIZE (field) != 0)
4760 {
4761 if (integer_zerop (DECL_SIZE (field)))
4762 continue;
4763
4764 if (host_integerp (bit_position (field), 1))
4765 bitpos += int_bit_position (field);
4766 }
4767
4768 /* ??? FIXME: else assume zero offset. */
4769
4770 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4771 function_arg_record_value_1 (TREE_TYPE (field),
4772 bitpos,
4773 parms,
4774 packed_p);
4775 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4776 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4777 && TARGET_FPU
4778 && parms->named
4779 && ! packed_p)
4780 {
4781 if (parms->intoffset != -1)
4782 {
4783 unsigned int startbit, endbit;
4784 int intslots, this_slotno;
4785
4786 startbit = parms->intoffset & -BITS_PER_WORD;
4787 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4788
4789 intslots = (endbit - startbit) / BITS_PER_WORD;
4790 this_slotno = parms->slotno + parms->intoffset
4791 / BITS_PER_WORD;
4792
4793 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4794 {
4795 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4796 /* We need to pass this field on the stack. */
4797 parms->stack = 1;
4798 }
4799
4800 parms->nregs += intslots;
4801 parms->intoffset = -1;
4802 }
4803
4804 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4805 If it wasn't true we wouldn't be here. */
4806 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4807 && DECL_MODE (field) == BLKmode)
4808 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4809 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4810 parms->nregs += 2;
4811 else
4812 parms->nregs += 1;
4813 }
4814 else
4815 {
4816 if (parms->intoffset == -1)
4817 parms->intoffset = bitpos;
4818 }
4819 }
4820 }
4821 }
4822
4823 /* A subroutine of function_arg_record_value. Assign the bits of the
4824 structure between parms->intoffset and bitpos to integer registers. */
4825
4826 static void
4827 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4828 struct function_arg_record_value_parms *parms)
4829 {
4830 enum machine_mode mode;
4831 unsigned int regno;
4832 unsigned int startbit, endbit;
4833 int this_slotno, intslots, intoffset;
4834 rtx reg;
4835
4836 if (parms->intoffset == -1)
4837 return;
4838
4839 intoffset = parms->intoffset;
4840 parms->intoffset = -1;
4841
4842 startbit = intoffset & -BITS_PER_WORD;
4843 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4844 intslots = (endbit - startbit) / BITS_PER_WORD;
4845 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4846
4847 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4848 if (intslots <= 0)
4849 return;
4850
4851 /* If this is the trailing part of a word, only load that much into
4852 the register. Otherwise load the whole register. Note that in
4853 the latter case we may pick up unwanted bits. It's not a problem
4854 at the moment but may wish to revisit. */
4855
4856 if (intoffset % BITS_PER_WORD != 0)
4857 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4858 MODE_INT);
4859 else
4860 mode = word_mode;
4861
4862 intoffset /= BITS_PER_UNIT;
4863 do
4864 {
4865 regno = parms->regbase + this_slotno;
4866 reg = gen_rtx_REG (mode, regno);
4867 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4868 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4869
4870 this_slotno += 1;
4871 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4872 mode = word_mode;
4873 parms->nregs += 1;
4874 intslots -= 1;
4875 }
4876 while (intslots > 0);
4877 }
4878
4879 /* A subroutine of function_arg_record_value. Traverse the structure
4880 recursively and assign bits to floating point registers. Track which
4881 bits in between need integer registers; invoke function_arg_record_value_3
4882 to make that happen. */
4883
4884 static void
4885 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
4886 struct function_arg_record_value_parms *parms,
4887 bool packed_p)
4888 {
4889 tree field;
4890
4891 if (! packed_p)
4892 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4893 {
4894 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4895 {
4896 packed_p = true;
4897 break;
4898 }
4899 }
4900
4901 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4902 {
4903 if (TREE_CODE (field) == FIELD_DECL)
4904 {
4905 HOST_WIDE_INT bitpos = startbitpos;
4906
4907 if (DECL_SIZE (field) != 0)
4908 {
4909 if (integer_zerop (DECL_SIZE (field)))
4910 continue;
4911
4912 if (host_integerp (bit_position (field), 1))
4913 bitpos += int_bit_position (field);
4914 }
4915
4916 /* ??? FIXME: else assume zero offset. */
4917
4918 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4919 function_arg_record_value_2 (TREE_TYPE (field),
4920 bitpos,
4921 parms,
4922 packed_p);
4923 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4924 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4925 && TARGET_FPU
4926 && parms->named
4927 && ! packed_p)
4928 {
4929 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4930 int regno, nregs, pos;
4931 enum machine_mode mode = DECL_MODE (field);
4932 rtx reg;
4933
4934 function_arg_record_value_3 (bitpos, parms);
4935
4936 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4937 && mode == BLKmode)
4938 {
4939 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4940 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4941 }
4942 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4943 {
4944 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4945 nregs = 2;
4946 }
4947 else
4948 nregs = 1;
4949
4950 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4951 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4952 regno++;
4953 reg = gen_rtx_REG (mode, regno);
4954 pos = bitpos / BITS_PER_UNIT;
4955 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4956 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4957 parms->nregs += 1;
4958 while (--nregs > 0)
4959 {
4960 regno += GET_MODE_SIZE (mode) / 4;
4961 reg = gen_rtx_REG (mode, regno);
4962 pos += GET_MODE_SIZE (mode);
4963 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4964 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4965 parms->nregs += 1;
4966 }
4967 }
4968 else
4969 {
4970 if (parms->intoffset == -1)
4971 parms->intoffset = bitpos;
4972 }
4973 }
4974 }
4975 }
4976
4977 /* Used by function_arg and function_value to implement the complex
4978 conventions of the 64-bit ABI for passing and returning structures.
4979 Return an expression valid as a return value for the two macros
4980 FUNCTION_ARG and FUNCTION_VALUE.
4981
4982 TYPE is the data type of the argument (as a tree).
4983 This is null for libcalls where that information may
4984 not be available.
4985 MODE is the argument's machine mode.
4986 SLOTNO is the index number of the argument's slot in the parameter array.
4987 NAMED is nonzero if this argument is a named parameter
4988 (otherwise it is an extra parameter matching an ellipsis).
4989 REGBASE is the regno of the base register for the parameter array. */
4990
4991 static rtx
4992 function_arg_record_value (const_tree type, enum machine_mode mode,
4993 int slotno, int named, int regbase)
4994 {
4995 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4996 struct function_arg_record_value_parms parms;
4997 unsigned int nregs;
4998
4999 parms.ret = NULL_RTX;
5000 parms.slotno = slotno;
5001 parms.named = named;
5002 parms.regbase = regbase;
5003 parms.stack = 0;
5004
5005 /* Compute how many registers we need. */
5006 parms.nregs = 0;
5007 parms.intoffset = 0;
5008 function_arg_record_value_1 (type, 0, &parms, false);
5009
5010 /* Take into account pending integer fields. */
5011 if (parms.intoffset != -1)
5012 {
5013 unsigned int startbit, endbit;
5014 int intslots, this_slotno;
5015
5016 startbit = parms.intoffset & -BITS_PER_WORD;
5017 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5018 intslots = (endbit - startbit) / BITS_PER_WORD;
5019 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5020
5021 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5022 {
5023 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5024 /* We need to pass this field on the stack. */
5025 parms.stack = 1;
5026 }
5027
5028 parms.nregs += intslots;
5029 }
5030 nregs = parms.nregs;
5031
5032 /* Allocate the vector and handle some annoying special cases. */
5033 if (nregs == 0)
5034 {
5035 /* ??? Empty structure has no value? Duh? */
5036 if (typesize <= 0)
5037 {
5038 /* Though there's nothing really to store, return a word register
5039 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5040 leads to breakage due to the fact that there are zero bytes to
5041 load. */
5042 return gen_rtx_REG (mode, regbase);
5043 }
5044 else
5045 {
5046 /* ??? C++ has structures with no fields, and yet a size. Give up
5047 for now and pass everything back in integer registers. */
5048 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5049 }
5050 if (nregs + slotno > SPARC_INT_ARG_MAX)
5051 nregs = SPARC_INT_ARG_MAX - slotno;
5052 }
5053 gcc_assert (nregs != 0);
5054
5055 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5056
5057 /* If at least one field must be passed on the stack, generate
5058 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5059 also be passed on the stack. We can't do much better because the
5060 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5061 of structures for which the fields passed exclusively in registers
5062 are not at the beginning of the structure. */
5063 if (parms.stack)
5064 XVECEXP (parms.ret, 0, 0)
5065 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5066
5067 /* Fill in the entries. */
5068 parms.nregs = 0;
5069 parms.intoffset = 0;
5070 function_arg_record_value_2 (type, 0, &parms, false);
5071 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5072
5073 gcc_assert (parms.nregs == nregs);
5074
5075 return parms.ret;
5076 }
5077
5078 /* Used by function_arg and function_value to implement the conventions
5079 of the 64-bit ABI for passing and returning unions.
5080 Return an expression valid as a return value for the two macros
5081 FUNCTION_ARG and FUNCTION_VALUE.
5082
5083 SIZE is the size in bytes of the union.
5084 MODE is the argument's machine mode.
5085 REGNO is the hard register the union will be passed in. */
5086
5087 static rtx
5088 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5089 int regno)
5090 {
5091 int nwords = ROUND_ADVANCE (size), i;
5092 rtx regs;
5093
5094 /* See comment in previous function for empty structures. */
5095 if (nwords == 0)
5096 return gen_rtx_REG (mode, regno);
5097
5098 if (slotno == SPARC_INT_ARG_MAX - 1)
5099 nwords = 1;
5100
5101 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5102
5103 for (i = 0; i < nwords; i++)
5104 {
5105 /* Unions are passed left-justified. */
5106 XVECEXP (regs, 0, i)
5107 = gen_rtx_EXPR_LIST (VOIDmode,
5108 gen_rtx_REG (word_mode, regno),
5109 GEN_INT (UNITS_PER_WORD * i));
5110 regno++;
5111 }
5112
5113 return regs;
5114 }
5115
5116 /* Used by function_arg and function_value to implement the conventions
5117 for passing and returning large (BLKmode) vectors.
5118 Return an expression valid as a return value for the two macros
5119 FUNCTION_ARG and FUNCTION_VALUE.
5120
5121 SIZE is the size in bytes of the vector.
5122 BASE_MODE is the argument's base machine mode.
5123 REGNO is the FP hard register the vector will be passed in. */
5124
5125 static rtx
5126 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5127 {
5128 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5129 int nregs = size / base_mode_size, i;
5130 rtx regs;
5131
5132 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5133
5134 for (i = 0; i < nregs; i++)
5135 {
5136 XVECEXP (regs, 0, i)
5137 = gen_rtx_EXPR_LIST (VOIDmode,
5138 gen_rtx_REG (base_mode, regno),
5139 GEN_INT (base_mode_size * i));
5140 regno += base_mode_size / 4;
5141 }
5142
5143 return regs;
5144 }
5145
5146 /* Handle the FUNCTION_ARG macro.
5147 Determine where to put an argument to a function.
5148 Value is zero to push the argument on the stack,
5149 or a hard register in which to store the argument.
5150
5151 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5152 the preceding args and about the function being called.
5153 MODE is the argument's machine mode.
5154 TYPE is the data type of the argument (as a tree).
5155 This is null for libcalls where that information may
5156 not be available.
5157 NAMED is nonzero if this argument is a named parameter
5158 (otherwise it is an extra parameter matching an ellipsis).
5159 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5160
5161 rtx
5162 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5163 tree type, int named, int incoming_p)
5164 {
5165 int regbase = (incoming_p
5166 ? SPARC_INCOMING_INT_ARG_FIRST
5167 : SPARC_OUTGOING_INT_ARG_FIRST);
5168 int slotno, regno, padding;
5169 enum mode_class mclass = GET_MODE_CLASS (mode);
5170
5171 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5172 &regno, &padding);
5173 if (slotno == -1)
5174 return 0;
5175
5176 /* Vector types deserve special treatment because they are polymorphic wrt
5177 their mode, depending upon whether VIS instructions are enabled. */
5178 if (type && TREE_CODE (type) == VECTOR_TYPE)
5179 {
5180 HOST_WIDE_INT size = int_size_in_bytes (type);
5181 gcc_assert ((TARGET_ARCH32 && size <= 8)
5182 || (TARGET_ARCH64 && size <= 16));
5183
5184 if (mode == BLKmode)
5185 return function_arg_vector_value (size,
5186 TYPE_MODE (TREE_TYPE (type)),
5187 SPARC_FP_ARG_FIRST + 2*slotno);
5188 else
5189 mclass = MODE_FLOAT;
5190 }
5191
5192 if (TARGET_ARCH32)
5193 return gen_rtx_REG (mode, regno);
5194
5195 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5196 and are promoted to registers if possible. */
5197 if (type && TREE_CODE (type) == RECORD_TYPE)
5198 {
5199 HOST_WIDE_INT size = int_size_in_bytes (type);
5200 gcc_assert (size <= 16);
5201
5202 return function_arg_record_value (type, mode, slotno, named, regbase);
5203 }
5204
5205 /* Unions up to 16 bytes in size are passed in integer registers. */
5206 else if (type && TREE_CODE (type) == UNION_TYPE)
5207 {
5208 HOST_WIDE_INT size = int_size_in_bytes (type);
5209 gcc_assert (size <= 16);
5210
5211 return function_arg_union_value (size, mode, slotno, regno);
5212 }
5213
5214 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5215 but also have the slot allocated for them.
5216 If no prototype is in scope fp values in register slots get passed
5217 in two places, either fp regs and int regs or fp regs and memory. */
5218 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5219 && SPARC_FP_REG_P (regno))
5220 {
5221 rtx reg = gen_rtx_REG (mode, regno);
5222 if (cum->prototype_p || cum->libcall_p)
5223 {
5224 /* "* 2" because fp reg numbers are recorded in 4 byte
5225 quantities. */
5226 #if 0
5227 /* ??? This will cause the value to be passed in the fp reg and
5228 in the stack. When a prototype exists we want to pass the
5229 value in the reg but reserve space on the stack. That's an
5230 optimization, and is deferred [for a bit]. */
5231 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5232 return gen_rtx_PARALLEL (mode,
5233 gen_rtvec (2,
5234 gen_rtx_EXPR_LIST (VOIDmode,
5235 NULL_RTX, const0_rtx),
5236 gen_rtx_EXPR_LIST (VOIDmode,
5237 reg, const0_rtx)));
5238 else
5239 #else
5240 /* ??? It seems that passing back a register even when past
5241 the area declared by REG_PARM_STACK_SPACE will allocate
5242 space appropriately, and will not copy the data onto the
5243 stack, exactly as we desire.
5244
5245 This is due to locate_and_pad_parm being called in
5246 expand_call whenever reg_parm_stack_space > 0, which
5247 while beneficial to our example here, would seem to be
5248 in error from what had been intended. Ho hum... -- r~ */
5249 #endif
5250 return reg;
5251 }
5252 else
5253 {
5254 rtx v0, v1;
5255
5256 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5257 {
5258 int intreg;
5259
5260 /* On incoming, we don't need to know that the value
5261 is passed in %f0 and %i0, and it confuses other parts
5262 causing needless spillage even on the simplest cases. */
5263 if (incoming_p)
5264 return reg;
5265
5266 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5267 + (regno - SPARC_FP_ARG_FIRST) / 2);
5268
5269 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5270 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5271 const0_rtx);
5272 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5273 }
5274 else
5275 {
5276 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5277 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5278 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5279 }
5280 }
5281 }
5282
5283 /* All other aggregate types are passed in an integer register in a mode
5284 corresponding to the size of the type. */
5285 else if (type && AGGREGATE_TYPE_P (type))
5286 {
5287 HOST_WIDE_INT size = int_size_in_bytes (type);
5288 gcc_assert (size <= 16);
5289
5290 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5291 }
5292
5293 return gen_rtx_REG (mode, regno);
5294 }
5295
5296 /* For an arg passed partly in registers and partly in memory,
5297 this is the number of bytes of registers used.
5298 For args passed entirely in registers or entirely in memory, zero.
5299
5300 Any arg that starts in the first 6 regs but won't entirely fit in them
5301 needs partial registers on v8. On v9, structures with integer
5302 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5303 values that begin in the last fp reg [where "last fp reg" varies with the
5304 mode] will be split between that reg and memory. */
5305
5306 static int
5307 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5308 tree type, bool named)
5309 {
5310 int slotno, regno, padding;
5311
5312 /* We pass 0 for incoming_p here, it doesn't matter. */
5313 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5314
5315 if (slotno == -1)
5316 return 0;
5317
5318 if (TARGET_ARCH32)
5319 {
5320 if ((slotno + (mode == BLKmode
5321 ? ROUND_ADVANCE (int_size_in_bytes (type))
5322 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5323 > SPARC_INT_ARG_MAX)
5324 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5325 }
5326 else
5327 {
5328 /* We are guaranteed by pass_by_reference that the size of the
5329 argument is not greater than 16 bytes, so we only need to return
5330 one word if the argument is partially passed in registers. */
5331
5332 if (type && AGGREGATE_TYPE_P (type))
5333 {
5334 int size = int_size_in_bytes (type);
5335
5336 if (size > UNITS_PER_WORD
5337 && slotno == SPARC_INT_ARG_MAX - 1)
5338 return UNITS_PER_WORD;
5339 }
5340 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5341 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5342 && ! (TARGET_FPU && named)))
5343 {
5344 /* The complex types are passed as packed types. */
5345 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5346 && slotno == SPARC_INT_ARG_MAX - 1)
5347 return UNITS_PER_WORD;
5348 }
5349 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5350 {
5351 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5352 > SPARC_FP_ARG_MAX)
5353 return UNITS_PER_WORD;
5354 }
5355 }
5356
5357 return 0;
5358 }
5359
5360 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5361 Specify whether to pass the argument by reference. */
5362
5363 static bool
5364 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5365 enum machine_mode mode, const_tree type,
5366 bool named ATTRIBUTE_UNUSED)
5367 {
5368 if (TARGET_ARCH32)
5369 /* Original SPARC 32-bit ABI says that structures and unions,
5370 and quad-precision floats are passed by reference. For Pascal,
5371 also pass arrays by reference. All other base types are passed
5372 in registers.
5373
5374 Extended ABI (as implemented by the Sun compiler) says that all
5375 complex floats are passed by reference. Pass complex integers
5376 in registers up to 8 bytes. More generally, enforce the 2-word
5377 cap for passing arguments in registers.
5378
5379 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5380 integers are passed like floats of the same size, that is in
5381 registers up to 8 bytes. Pass all vector floats by reference
5382 like structure and unions. */
5383 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5384 || mode == SCmode
5385 /* Catch CDImode, TFmode, DCmode and TCmode. */
5386 || GET_MODE_SIZE (mode) > 8
5387 || (type
5388 && TREE_CODE (type) == VECTOR_TYPE
5389 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5390 else
5391 /* Original SPARC 64-bit ABI says that structures and unions
5392 smaller than 16 bytes are passed in registers, as well as
5393 all other base types.
5394
5395 Extended ABI (as implemented by the Sun compiler) says that
5396 complex floats are passed in registers up to 16 bytes. Pass
5397 all complex integers in registers up to 16 bytes. More generally,
5398 enforce the 2-word cap for passing arguments in registers.
5399
5400 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5401 integers are passed like floats of the same size, that is in
5402 registers (up to 16 bytes). Pass all vector floats like structure
5403 and unions. */
5404 return ((type
5405 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5406 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5407 /* Catch CTImode and TCmode. */
5408 || GET_MODE_SIZE (mode) > 16);
5409 }
5410
5411 /* Handle the FUNCTION_ARG_ADVANCE macro.
5412 Update the data in CUM to advance over an argument
5413 of mode MODE and data type TYPE.
5414 TYPE is null for libcalls where that information may not be available. */
5415
5416 void
5417 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5418 tree type, int named)
5419 {
5420 int slotno, regno, padding;
5421
5422 /* We pass 0 for incoming_p here, it doesn't matter. */
5423 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5424
5425 /* If register required leading padding, add it. */
5426 if (slotno != -1)
5427 cum->words += padding;
5428
5429 if (TARGET_ARCH32)
5430 {
5431 cum->words += (mode != BLKmode
5432 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5433 : ROUND_ADVANCE (int_size_in_bytes (type)));
5434 }
5435 else
5436 {
5437 if (type && AGGREGATE_TYPE_P (type))
5438 {
5439 int size = int_size_in_bytes (type);
5440
5441 if (size <= 8)
5442 ++cum->words;
5443 else if (size <= 16)
5444 cum->words += 2;
5445 else /* passed by reference */
5446 ++cum->words;
5447 }
5448 else
5449 {
5450 cum->words += (mode != BLKmode
5451 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5452 : ROUND_ADVANCE (int_size_in_bytes (type)));
5453 }
5454 }
5455 }
5456
5457 /* Handle the FUNCTION_ARG_PADDING macro.
5458 For the 64 bit ABI structs are always stored left shifted in their
5459 argument slot. */
5460
5461 enum direction
5462 function_arg_padding (enum machine_mode mode, const_tree type)
5463 {
5464 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5465 return upward;
5466
5467 /* Fall back to the default. */
5468 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5469 }
5470
5471 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5472 Specify whether to return the return value in memory. */
5473
5474 static bool
5475 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5476 {
5477 if (TARGET_ARCH32)
5478 /* Original SPARC 32-bit ABI says that structures and unions,
5479 and quad-precision floats are returned in memory. All other
5480 base types are returned in registers.
5481
5482 Extended ABI (as implemented by the Sun compiler) says that
5483 all complex floats are returned in registers (8 FP registers
5484 at most for '_Complex long double'). Return all complex integers
5485 in registers (4 at most for '_Complex long long').
5486
5487 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5488 integers are returned like floats of the same size, that is in
5489 registers up to 8 bytes and in memory otherwise. Return all
5490 vector floats in memory like structure and unions; note that
5491 they always have BLKmode like the latter. */
5492 return (TYPE_MODE (type) == BLKmode
5493 || TYPE_MODE (type) == TFmode
5494 || (TREE_CODE (type) == VECTOR_TYPE
5495 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5496 else
5497 /* Original SPARC 64-bit ABI says that structures and unions
5498 smaller than 32 bytes are returned in registers, as well as
5499 all other base types.
5500
5501 Extended ABI (as implemented by the Sun compiler) says that all
5502 complex floats are returned in registers (8 FP registers at most
5503 for '_Complex long double'). Return all complex integers in
5504 registers (4 at most for '_Complex TItype').
5505
5506 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5507 integers are returned like floats of the same size, that is in
5508 registers. Return all vector floats like structure and unions;
5509 note that they always have BLKmode like the latter. */
5510 return ((TYPE_MODE (type) == BLKmode
5511 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5512 }
5513
5514 /* Handle the TARGET_STRUCT_VALUE target hook.
5515 Return where to find the structure return value address. */
5516
5517 static rtx
5518 sparc_struct_value_rtx (tree fndecl, int incoming)
5519 {
5520 if (TARGET_ARCH64)
5521 return 0;
5522 else
5523 {
5524 rtx mem;
5525
5526 if (incoming)
5527 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5528 STRUCT_VALUE_OFFSET));
5529 else
5530 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5531 STRUCT_VALUE_OFFSET));
5532
5533 /* Only follow the SPARC ABI for fixed-size structure returns.
5534 Variable size structure returns are handled per the normal
5535 procedures in GCC. This is enabled by -mstd-struct-return */
5536 if (incoming == 2
5537 && sparc_std_struct_return
5538 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5539 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5540 {
5541 /* We must check and adjust the return address, as it is
5542 optional as to whether the return object is really
5543 provided. */
5544 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5545 rtx scratch = gen_reg_rtx (SImode);
5546 rtx endlab = gen_label_rtx ();
5547
5548 /* Calculate the return object size */
5549 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5550 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5551 /* Construct a temporary return value */
5552 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5553
5554 /* Implement SPARC 32-bit psABI callee returns struck checking
5555 requirements:
5556
5557 Fetch the instruction where we will return to and see if
5558 it's an unimp instruction (the most significant 10 bits
5559 will be zero). */
5560 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5561 plus_constant (ret_rtx, 8)));
5562 /* Assume the size is valid and pre-adjust */
5563 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5564 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5565 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5566 /* Assign stack temp:
5567 Write the address of the memory pointed to by temp_val into
5568 the memory pointed to by mem */
5569 emit_move_insn (mem, XEXP (temp_val, 0));
5570 emit_label (endlab);
5571 }
5572
5573 set_mem_alias_set (mem, struct_value_alias_set);
5574 return mem;
5575 }
5576 }
5577
5578 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5579 For v9, function return values are subject to the same rules as arguments,
5580 except that up to 32 bytes may be returned in registers. */
5581
5582 rtx
5583 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5584 {
5585 /* Beware that the two values are swapped here wrt function_arg. */
5586 int regbase = (incoming_p
5587 ? SPARC_OUTGOING_INT_ARG_FIRST
5588 : SPARC_INCOMING_INT_ARG_FIRST);
5589 enum mode_class mclass = GET_MODE_CLASS (mode);
5590 int regno;
5591
5592 /* Vector types deserve special treatment because they are polymorphic wrt
5593 their mode, depending upon whether VIS instructions are enabled. */
5594 if (type && TREE_CODE (type) == VECTOR_TYPE)
5595 {
5596 HOST_WIDE_INT size = int_size_in_bytes (type);
5597 gcc_assert ((TARGET_ARCH32 && size <= 8)
5598 || (TARGET_ARCH64 && size <= 32));
5599
5600 if (mode == BLKmode)
5601 return function_arg_vector_value (size,
5602 TYPE_MODE (TREE_TYPE (type)),
5603 SPARC_FP_ARG_FIRST);
5604 else
5605 mclass = MODE_FLOAT;
5606 }
5607
5608 if (TARGET_ARCH64 && type)
5609 {
5610 /* Structures up to 32 bytes in size are returned in registers. */
5611 if (TREE_CODE (type) == RECORD_TYPE)
5612 {
5613 HOST_WIDE_INT size = int_size_in_bytes (type);
5614 gcc_assert (size <= 32);
5615
5616 return function_arg_record_value (type, mode, 0, 1, regbase);
5617 }
5618
5619 /* Unions up to 32 bytes in size are returned in integer registers. */
5620 else if (TREE_CODE (type) == UNION_TYPE)
5621 {
5622 HOST_WIDE_INT size = int_size_in_bytes (type);
5623 gcc_assert (size <= 32);
5624
5625 return function_arg_union_value (size, mode, 0, regbase);
5626 }
5627
5628 /* Objects that require it are returned in FP registers. */
5629 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5630 ;
5631
5632 /* All other aggregate types are returned in an integer register in a
5633 mode corresponding to the size of the type. */
5634 else if (AGGREGATE_TYPE_P (type))
5635 {
5636 /* All other aggregate types are passed in an integer register
5637 in a mode corresponding to the size of the type. */
5638 HOST_WIDE_INT size = int_size_in_bytes (type);
5639 gcc_assert (size <= 32);
5640
5641 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5642
5643 /* ??? We probably should have made the same ABI change in
5644 3.4.0 as the one we made for unions. The latter was
5645 required by the SCD though, while the former is not
5646 specified, so we favored compatibility and efficiency.
5647
5648 Now we're stuck for aggregates larger than 16 bytes,
5649 because OImode vanished in the meantime. Let's not
5650 try to be unduly clever, and simply follow the ABI
5651 for unions in that case. */
5652 if (mode == BLKmode)
5653 return function_arg_union_value (size, mode, 0, regbase);
5654 else
5655 mclass = MODE_INT;
5656 }
5657
5658 /* This must match PROMOTE_FUNCTION_MODE. */
5659 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5660 mode = word_mode;
5661 }
5662
5663 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5664 regno = SPARC_FP_ARG_FIRST;
5665 else
5666 regno = regbase;
5667
5668 return gen_rtx_REG (mode, regno);
5669 }
5670
5671 /* Do what is necessary for `va_start'. We look at the current function
5672 to determine if stdarg or varargs is used and return the address of
5673 the first unnamed parameter. */
5674
5675 static rtx
5676 sparc_builtin_saveregs (void)
5677 {
5678 int first_reg = crtl->args.info.words;
5679 rtx address;
5680 int regno;
5681
5682 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5683 emit_move_insn (gen_rtx_MEM (word_mode,
5684 gen_rtx_PLUS (Pmode,
5685 frame_pointer_rtx,
5686 GEN_INT (FIRST_PARM_OFFSET (0)
5687 + (UNITS_PER_WORD
5688 * regno)))),
5689 gen_rtx_REG (word_mode,
5690 SPARC_INCOMING_INT_ARG_FIRST + regno));
5691
5692 address = gen_rtx_PLUS (Pmode,
5693 frame_pointer_rtx,
5694 GEN_INT (FIRST_PARM_OFFSET (0)
5695 + UNITS_PER_WORD * first_reg));
5696
5697 return address;
5698 }
5699
5700 /* Implement `va_start' for stdarg. */
5701
5702 static void
5703 sparc_va_start (tree valist, rtx nextarg)
5704 {
5705 nextarg = expand_builtin_saveregs ();
5706 std_expand_builtin_va_start (valist, nextarg);
5707 }
5708
5709 /* Implement `va_arg' for stdarg. */
5710
5711 static tree
5712 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5713 {
5714 HOST_WIDE_INT size, rsize, align;
5715 tree addr, incr;
5716 bool indirect;
5717 tree ptrtype = build_pointer_type (type);
5718
5719 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5720 {
5721 indirect = true;
5722 size = rsize = UNITS_PER_WORD;
5723 align = 0;
5724 }
5725 else
5726 {
5727 indirect = false;
5728 size = int_size_in_bytes (type);
5729 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5730 align = 0;
5731
5732 if (TARGET_ARCH64)
5733 {
5734 /* For SPARC64, objects requiring 16-byte alignment get it. */
5735 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5736 align = 2 * UNITS_PER_WORD;
5737
5738 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5739 are left-justified in their slots. */
5740 if (AGGREGATE_TYPE_P (type))
5741 {
5742 if (size == 0)
5743 size = rsize = UNITS_PER_WORD;
5744 else
5745 size = rsize;
5746 }
5747 }
5748 }
5749
5750 incr = valist;
5751 if (align)
5752 {
5753 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5754 size_int (align - 1));
5755 incr = fold_convert (sizetype, incr);
5756 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5757 size_int (-align));
5758 incr = fold_convert (ptr_type_node, incr);
5759 }
5760
5761 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5762 addr = incr;
5763
5764 if (BYTES_BIG_ENDIAN && size < rsize)
5765 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5766 size_int (rsize - size));
5767
5768 if (indirect)
5769 {
5770 addr = fold_convert (build_pointer_type (ptrtype), addr);
5771 addr = build_va_arg_indirect_ref (addr);
5772 }
5773 /* If the address isn't aligned properly for the type,
5774 we may need to copy to a temporary.
5775 FIXME: This is inefficient. Usually we can do this
5776 in registers. */
5777 else if (align == 0
5778 && TYPE_ALIGN (type) > BITS_PER_WORD)
5779 {
5780 tree tmp = create_tmp_var (type, "va_arg_tmp");
5781 tree dest_addr = build_fold_addr_expr (tmp);
5782
5783 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY], 3,
5784 dest_addr,
5785 addr,
5786 size_int (rsize));
5787
5788 gimplify_and_add (copy, pre_p);
5789 addr = dest_addr;
5790 }
5791 else
5792 addr = fold_convert (ptrtype, addr);
5793
5794 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5795 incr = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, incr);
5796 gimplify_and_add (incr, post_p);
5797
5798 return build_va_arg_indirect_ref (addr);
5799 }
5800 \f
5801 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5802 Specify whether the vector mode is supported by the hardware. */
5803
5804 static bool
5805 sparc_vector_mode_supported_p (enum machine_mode mode)
5806 {
5807 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5808 }
5809 \f
5810 /* Return the string to output an unconditional branch to LABEL, which is
5811 the operand number of the label.
5812
5813 DEST is the destination insn (i.e. the label), INSN is the source. */
5814
5815 const char *
5816 output_ubranch (rtx dest, int label, rtx insn)
5817 {
5818 static char string[64];
5819 bool v9_form = false;
5820 char *p;
5821
5822 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5823 {
5824 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5825 - INSN_ADDRESSES (INSN_UID (insn)));
5826 /* Leave some instructions for "slop". */
5827 if (delta >= -260000 && delta < 260000)
5828 v9_form = true;
5829 }
5830
5831 if (v9_form)
5832 strcpy (string, "ba%*,pt\t%%xcc, ");
5833 else
5834 strcpy (string, "b%*\t");
5835
5836 p = strchr (string, '\0');
5837 *p++ = '%';
5838 *p++ = 'l';
5839 *p++ = '0' + label;
5840 *p++ = '%';
5841 *p++ = '(';
5842 *p = '\0';
5843
5844 return string;
5845 }
5846
5847 /* Return the string to output a conditional branch to LABEL, which is
5848 the operand number of the label. OP is the conditional expression.
5849 XEXP (OP, 0) is assumed to be a condition code register (integer or
5850 floating point) and its mode specifies what kind of comparison we made.
5851
5852 DEST is the destination insn (i.e. the label), INSN is the source.
5853
5854 REVERSED is nonzero if we should reverse the sense of the comparison.
5855
5856 ANNUL is nonzero if we should generate an annulling branch. */
5857
5858 const char *
5859 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5860 rtx insn)
5861 {
5862 static char string[64];
5863 enum rtx_code code = GET_CODE (op);
5864 rtx cc_reg = XEXP (op, 0);
5865 enum machine_mode mode = GET_MODE (cc_reg);
5866 const char *labelno, *branch;
5867 int spaces = 8, far;
5868 char *p;
5869
5870 /* v9 branches are limited to +-1MB. If it is too far away,
5871 change
5872
5873 bne,pt %xcc, .LC30
5874
5875 to
5876
5877 be,pn %xcc, .+12
5878 nop
5879 ba .LC30
5880
5881 and
5882
5883 fbne,a,pn %fcc2, .LC29
5884
5885 to
5886
5887 fbe,pt %fcc2, .+16
5888 nop
5889 ba .LC29 */
5890
5891 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5892 if (reversed ^ far)
5893 {
5894 /* Reversal of FP compares takes care -- an ordered compare
5895 becomes an unordered compare and vice versa. */
5896 if (mode == CCFPmode || mode == CCFPEmode)
5897 code = reverse_condition_maybe_unordered (code);
5898 else
5899 code = reverse_condition (code);
5900 }
5901
5902 /* Start by writing the branch condition. */
5903 if (mode == CCFPmode || mode == CCFPEmode)
5904 {
5905 switch (code)
5906 {
5907 case NE:
5908 branch = "fbne";
5909 break;
5910 case EQ:
5911 branch = "fbe";
5912 break;
5913 case GE:
5914 branch = "fbge";
5915 break;
5916 case GT:
5917 branch = "fbg";
5918 break;
5919 case LE:
5920 branch = "fble";
5921 break;
5922 case LT:
5923 branch = "fbl";
5924 break;
5925 case UNORDERED:
5926 branch = "fbu";
5927 break;
5928 case ORDERED:
5929 branch = "fbo";
5930 break;
5931 case UNGT:
5932 branch = "fbug";
5933 break;
5934 case UNLT:
5935 branch = "fbul";
5936 break;
5937 case UNEQ:
5938 branch = "fbue";
5939 break;
5940 case UNGE:
5941 branch = "fbuge";
5942 break;
5943 case UNLE:
5944 branch = "fbule";
5945 break;
5946 case LTGT:
5947 branch = "fblg";
5948 break;
5949
5950 default:
5951 gcc_unreachable ();
5952 }
5953
5954 /* ??? !v9: FP branches cannot be preceded by another floating point
5955 insn. Because there is currently no concept of pre-delay slots,
5956 we can fix this only by always emitting a nop before a floating
5957 point branch. */
5958
5959 string[0] = '\0';
5960 if (! TARGET_V9)
5961 strcpy (string, "nop\n\t");
5962 strcat (string, branch);
5963 }
5964 else
5965 {
5966 switch (code)
5967 {
5968 case NE:
5969 branch = "bne";
5970 break;
5971 case EQ:
5972 branch = "be";
5973 break;
5974 case GE:
5975 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5976 branch = "bpos";
5977 else
5978 branch = "bge";
5979 break;
5980 case GT:
5981 branch = "bg";
5982 break;
5983 case LE:
5984 branch = "ble";
5985 break;
5986 case LT:
5987 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5988 branch = "bneg";
5989 else
5990 branch = "bl";
5991 break;
5992 case GEU:
5993 branch = "bgeu";
5994 break;
5995 case GTU:
5996 branch = "bgu";
5997 break;
5998 case LEU:
5999 branch = "bleu";
6000 break;
6001 case LTU:
6002 branch = "blu";
6003 break;
6004
6005 default:
6006 gcc_unreachable ();
6007 }
6008 strcpy (string, branch);
6009 }
6010 spaces -= strlen (branch);
6011 p = strchr (string, '\0');
6012
6013 /* Now add the annulling, the label, and a possible noop. */
6014 if (annul && ! far)
6015 {
6016 strcpy (p, ",a");
6017 p += 2;
6018 spaces -= 2;
6019 }
6020
6021 if (TARGET_V9)
6022 {
6023 rtx note;
6024 int v8 = 0;
6025
6026 if (! far && insn && INSN_ADDRESSES_SET_P ())
6027 {
6028 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6029 - INSN_ADDRESSES (INSN_UID (insn)));
6030 /* Leave some instructions for "slop". */
6031 if (delta < -260000 || delta >= 260000)
6032 v8 = 1;
6033 }
6034
6035 if (mode == CCFPmode || mode == CCFPEmode)
6036 {
6037 static char v9_fcc_labelno[] = "%%fccX, ";
6038 /* Set the char indicating the number of the fcc reg to use. */
6039 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6040 labelno = v9_fcc_labelno;
6041 if (v8)
6042 {
6043 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6044 labelno = "";
6045 }
6046 }
6047 else if (mode == CCXmode || mode == CCX_NOOVmode)
6048 {
6049 labelno = "%%xcc, ";
6050 gcc_assert (! v8);
6051 }
6052 else
6053 {
6054 labelno = "%%icc, ";
6055 if (v8)
6056 labelno = "";
6057 }
6058
6059 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6060 {
6061 strcpy (p,
6062 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6063 ? ",pt" : ",pn");
6064 p += 3;
6065 spaces -= 3;
6066 }
6067 }
6068 else
6069 labelno = "";
6070
6071 if (spaces > 0)
6072 *p++ = '\t';
6073 else
6074 *p++ = ' ';
6075 strcpy (p, labelno);
6076 p = strchr (p, '\0');
6077 if (far)
6078 {
6079 strcpy (p, ".+12\n\t nop\n\tb\t");
6080 /* Skip the next insn if requested or
6081 if we know that it will be a nop. */
6082 if (annul || ! final_sequence)
6083 p[3] = '6';
6084 p += 14;
6085 }
6086 *p++ = '%';
6087 *p++ = 'l';
6088 *p++ = label + '0';
6089 *p++ = '%';
6090 *p++ = '#';
6091 *p = '\0';
6092
6093 return string;
6094 }
6095
6096 /* Emit a library call comparison between floating point X and Y.
6097 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6098 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6099 values as arguments instead of the TFmode registers themselves,
6100 that's why we cannot call emit_float_lib_cmp. */
6101 void
6102 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6103 {
6104 const char *qpfunc;
6105 rtx slot0, slot1, result, tem, tem2;
6106 enum machine_mode mode;
6107
6108 switch (comparison)
6109 {
6110 case EQ:
6111 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6112 break;
6113
6114 case NE:
6115 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6116 break;
6117
6118 case GT:
6119 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6120 break;
6121
6122 case GE:
6123 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6124 break;
6125
6126 case LT:
6127 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6128 break;
6129
6130 case LE:
6131 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6132 break;
6133
6134 case ORDERED:
6135 case UNORDERED:
6136 case UNGT:
6137 case UNLT:
6138 case UNEQ:
6139 case UNGE:
6140 case UNLE:
6141 case LTGT:
6142 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6143 break;
6144
6145 default:
6146 gcc_unreachable ();
6147 }
6148
6149 if (TARGET_ARCH64)
6150 {
6151 if (GET_CODE (x) != MEM)
6152 {
6153 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6154 emit_move_insn (slot0, x);
6155 }
6156 else
6157 slot0 = x;
6158
6159 if (GET_CODE (y) != MEM)
6160 {
6161 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6162 emit_move_insn (slot1, y);
6163 }
6164 else
6165 slot1 = y;
6166
6167 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6168 DImode, 2,
6169 XEXP (slot0, 0), Pmode,
6170 XEXP (slot1, 0), Pmode);
6171
6172 mode = DImode;
6173 }
6174 else
6175 {
6176 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6177 SImode, 2,
6178 x, TFmode, y, TFmode);
6179
6180 mode = SImode;
6181 }
6182
6183
6184 /* Immediately move the result of the libcall into a pseudo
6185 register so reload doesn't clobber the value if it needs
6186 the return register for a spill reg. */
6187 result = gen_reg_rtx (mode);
6188 emit_move_insn (result, hard_libcall_value (mode));
6189
6190 switch (comparison)
6191 {
6192 default:
6193 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6194 break;
6195 case ORDERED:
6196 case UNORDERED:
6197 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6198 NULL_RTX, mode, 0);
6199 break;
6200 case UNGT:
6201 case UNGE:
6202 emit_cmp_insn (result, const1_rtx,
6203 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6204 break;
6205 case UNLE:
6206 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6207 break;
6208 case UNLT:
6209 tem = gen_reg_rtx (mode);
6210 if (TARGET_ARCH32)
6211 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6212 else
6213 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6214 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6215 break;
6216 case UNEQ:
6217 case LTGT:
6218 tem = gen_reg_rtx (mode);
6219 if (TARGET_ARCH32)
6220 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6221 else
6222 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6223 tem2 = gen_reg_rtx (mode);
6224 if (TARGET_ARCH32)
6225 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6226 else
6227 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6228 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6229 NULL_RTX, mode, 0);
6230 break;
6231 }
6232 }
6233
6234 /* Generate an unsigned DImode to FP conversion. This is the same code
6235 optabs would emit if we didn't have TFmode patterns. */
6236
6237 void
6238 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6239 {
6240 rtx neglab, donelab, i0, i1, f0, in, out;
6241
6242 out = operands[0];
6243 in = force_reg (DImode, operands[1]);
6244 neglab = gen_label_rtx ();
6245 donelab = gen_label_rtx ();
6246 i0 = gen_reg_rtx (DImode);
6247 i1 = gen_reg_rtx (DImode);
6248 f0 = gen_reg_rtx (mode);
6249
6250 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6251
6252 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6253 emit_jump_insn (gen_jump (donelab));
6254 emit_barrier ();
6255
6256 emit_label (neglab);
6257
6258 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6259 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6260 emit_insn (gen_iordi3 (i0, i0, i1));
6261 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6262 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6263
6264 emit_label (donelab);
6265 }
6266
6267 /* Generate an FP to unsigned DImode conversion. This is the same code
6268 optabs would emit if we didn't have TFmode patterns. */
6269
6270 void
6271 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6272 {
6273 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6274
6275 out = operands[0];
6276 in = force_reg (mode, operands[1]);
6277 neglab = gen_label_rtx ();
6278 donelab = gen_label_rtx ();
6279 i0 = gen_reg_rtx (DImode);
6280 i1 = gen_reg_rtx (DImode);
6281 limit = gen_reg_rtx (mode);
6282 f0 = gen_reg_rtx (mode);
6283
6284 emit_move_insn (limit,
6285 CONST_DOUBLE_FROM_REAL_VALUE (
6286 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6287 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6288
6289 emit_insn (gen_rtx_SET (VOIDmode,
6290 out,
6291 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6292 emit_jump_insn (gen_jump (donelab));
6293 emit_barrier ();
6294
6295 emit_label (neglab);
6296
6297 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6298 emit_insn (gen_rtx_SET (VOIDmode,
6299 i0,
6300 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6301 emit_insn (gen_movdi (i1, const1_rtx));
6302 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6303 emit_insn (gen_xordi3 (out, i0, i1));
6304
6305 emit_label (donelab);
6306 }
6307
6308 /* Return the string to output a conditional branch to LABEL, testing
6309 register REG. LABEL is the operand number of the label; REG is the
6310 operand number of the reg. OP is the conditional expression. The mode
6311 of REG says what kind of comparison we made.
6312
6313 DEST is the destination insn (i.e. the label), INSN is the source.
6314
6315 REVERSED is nonzero if we should reverse the sense of the comparison.
6316
6317 ANNUL is nonzero if we should generate an annulling branch. */
6318
6319 const char *
6320 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6321 int annul, rtx insn)
6322 {
6323 static char string[64];
6324 enum rtx_code code = GET_CODE (op);
6325 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6326 rtx note;
6327 int far;
6328 char *p;
6329
6330 /* branch on register are limited to +-128KB. If it is too far away,
6331 change
6332
6333 brnz,pt %g1, .LC30
6334
6335 to
6336
6337 brz,pn %g1, .+12
6338 nop
6339 ba,pt %xcc, .LC30
6340
6341 and
6342
6343 brgez,a,pn %o1, .LC29
6344
6345 to
6346
6347 brlz,pt %o1, .+16
6348 nop
6349 ba,pt %xcc, .LC29 */
6350
6351 far = get_attr_length (insn) >= 3;
6352
6353 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6354 if (reversed ^ far)
6355 code = reverse_condition (code);
6356
6357 /* Only 64 bit versions of these instructions exist. */
6358 gcc_assert (mode == DImode);
6359
6360 /* Start by writing the branch condition. */
6361
6362 switch (code)
6363 {
6364 case NE:
6365 strcpy (string, "brnz");
6366 break;
6367
6368 case EQ:
6369 strcpy (string, "brz");
6370 break;
6371
6372 case GE:
6373 strcpy (string, "brgez");
6374 break;
6375
6376 case LT:
6377 strcpy (string, "brlz");
6378 break;
6379
6380 case LE:
6381 strcpy (string, "brlez");
6382 break;
6383
6384 case GT:
6385 strcpy (string, "brgz");
6386 break;
6387
6388 default:
6389 gcc_unreachable ();
6390 }
6391
6392 p = strchr (string, '\0');
6393
6394 /* Now add the annulling, reg, label, and nop. */
6395 if (annul && ! far)
6396 {
6397 strcpy (p, ",a");
6398 p += 2;
6399 }
6400
6401 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6402 {
6403 strcpy (p,
6404 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6405 ? ",pt" : ",pn");
6406 p += 3;
6407 }
6408
6409 *p = p < string + 8 ? '\t' : ' ';
6410 p++;
6411 *p++ = '%';
6412 *p++ = '0' + reg;
6413 *p++ = ',';
6414 *p++ = ' ';
6415 if (far)
6416 {
6417 int veryfar = 1, delta;
6418
6419 if (INSN_ADDRESSES_SET_P ())
6420 {
6421 delta = (INSN_ADDRESSES (INSN_UID (dest))
6422 - INSN_ADDRESSES (INSN_UID (insn)));
6423 /* Leave some instructions for "slop". */
6424 if (delta >= -260000 && delta < 260000)
6425 veryfar = 0;
6426 }
6427
6428 strcpy (p, ".+12\n\t nop\n\t");
6429 /* Skip the next insn if requested or
6430 if we know that it will be a nop. */
6431 if (annul || ! final_sequence)
6432 p[3] = '6';
6433 p += 12;
6434 if (veryfar)
6435 {
6436 strcpy (p, "b\t");
6437 p += 2;
6438 }
6439 else
6440 {
6441 strcpy (p, "ba,pt\t%%xcc, ");
6442 p += 13;
6443 }
6444 }
6445 *p++ = '%';
6446 *p++ = 'l';
6447 *p++ = '0' + label;
6448 *p++ = '%';
6449 *p++ = '#';
6450 *p = '\0';
6451
6452 return string;
6453 }
6454
6455 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6456 Such instructions cannot be used in the delay slot of return insn on v9.
6457 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6458 */
6459
6460 static int
6461 epilogue_renumber (register rtx *where, int test)
6462 {
6463 register const char *fmt;
6464 register int i;
6465 register enum rtx_code code;
6466
6467 if (*where == 0)
6468 return 0;
6469
6470 code = GET_CODE (*where);
6471
6472 switch (code)
6473 {
6474 case REG:
6475 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6476 return 1;
6477 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6478 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6479 case SCRATCH:
6480 case CC0:
6481 case PC:
6482 case CONST_INT:
6483 case CONST_DOUBLE:
6484 return 0;
6485
6486 /* Do not replace the frame pointer with the stack pointer because
6487 it can cause the delayed instruction to load below the stack.
6488 This occurs when instructions like:
6489
6490 (set (reg/i:SI 24 %i0)
6491 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6492 (const_int -20 [0xffffffec])) 0))
6493
6494 are in the return delayed slot. */
6495 case PLUS:
6496 if (GET_CODE (XEXP (*where, 0)) == REG
6497 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6498 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6499 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6500 return 1;
6501 break;
6502
6503 case MEM:
6504 if (SPARC_STACK_BIAS
6505 && GET_CODE (XEXP (*where, 0)) == REG
6506 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6507 return 1;
6508 break;
6509
6510 default:
6511 break;
6512 }
6513
6514 fmt = GET_RTX_FORMAT (code);
6515
6516 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6517 {
6518 if (fmt[i] == 'E')
6519 {
6520 register int j;
6521 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6522 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6523 return 1;
6524 }
6525 else if (fmt[i] == 'e'
6526 && epilogue_renumber (&(XEXP (*where, i)), test))
6527 return 1;
6528 }
6529 return 0;
6530 }
6531 \f
6532 /* Leaf functions and non-leaf functions have different needs. */
6533
6534 static const int
6535 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6536
6537 static const int
6538 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6539
6540 static const int *const reg_alloc_orders[] = {
6541 reg_leaf_alloc_order,
6542 reg_nonleaf_alloc_order};
6543
6544 void
6545 order_regs_for_local_alloc (void)
6546 {
6547 static int last_order_nonleaf = 1;
6548
6549 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6550 {
6551 last_order_nonleaf = !last_order_nonleaf;
6552 memcpy ((char *) reg_alloc_order,
6553 (const char *) reg_alloc_orders[last_order_nonleaf],
6554 FIRST_PSEUDO_REGISTER * sizeof (int));
6555 }
6556 }
6557 \f
6558 /* Return 1 if REG and MEM are legitimate enough to allow the various
6559 mem<-->reg splits to be run. */
6560
6561 int
6562 sparc_splitdi_legitimate (rtx reg, rtx mem)
6563 {
6564 /* Punt if we are here by mistake. */
6565 gcc_assert (reload_completed);
6566
6567 /* We must have an offsettable memory reference. */
6568 if (! offsettable_memref_p (mem))
6569 return 0;
6570
6571 /* If we have legitimate args for ldd/std, we do not want
6572 the split to happen. */
6573 if ((REGNO (reg) % 2) == 0
6574 && mem_min_alignment (mem, 8))
6575 return 0;
6576
6577 /* Success. */
6578 return 1;
6579 }
6580
6581 /* Return 1 if x and y are some kind of REG and they refer to
6582 different hard registers. This test is guaranteed to be
6583 run after reload. */
6584
6585 int
6586 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6587 {
6588 if (GET_CODE (x) != REG)
6589 return 0;
6590 if (GET_CODE (y) != REG)
6591 return 0;
6592 if (REGNO (x) == REGNO (y))
6593 return 0;
6594 return 1;
6595 }
6596
6597 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6598 This makes them candidates for using ldd and std insns.
6599
6600 Note reg1 and reg2 *must* be hard registers. */
6601
6602 int
6603 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6604 {
6605 /* We might have been passed a SUBREG. */
6606 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6607 return 0;
6608
6609 if (REGNO (reg1) % 2 != 0)
6610 return 0;
6611
6612 /* Integer ldd is deprecated in SPARC V9 */
6613 if (TARGET_V9 && REGNO (reg1) < 32)
6614 return 0;
6615
6616 return (REGNO (reg1) == REGNO (reg2) - 1);
6617 }
6618
6619 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6620 an ldd or std insn.
6621
6622 This can only happen when addr1 and addr2, the addresses in mem1
6623 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6624 addr1 must also be aligned on a 64-bit boundary.
6625
6626 Also iff dependent_reg_rtx is not null it should not be used to
6627 compute the address for mem1, i.e. we cannot optimize a sequence
6628 like:
6629 ld [%o0], %o0
6630 ld [%o0 + 4], %o1
6631 to
6632 ldd [%o0], %o0
6633 nor:
6634 ld [%g3 + 4], %g3
6635 ld [%g3], %g2
6636 to
6637 ldd [%g3], %g2
6638
6639 But, note that the transformation from:
6640 ld [%g2 + 4], %g3
6641 ld [%g2], %g2
6642 to
6643 ldd [%g2], %g2
6644 is perfectly fine. Thus, the peephole2 patterns always pass us
6645 the destination register of the first load, never the second one.
6646
6647 For stores we don't have a similar problem, so dependent_reg_rtx is
6648 NULL_RTX. */
6649
6650 int
6651 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6652 {
6653 rtx addr1, addr2;
6654 unsigned int reg1;
6655 HOST_WIDE_INT offset1;
6656
6657 /* The mems cannot be volatile. */
6658 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6659 return 0;
6660
6661 /* MEM1 should be aligned on a 64-bit boundary. */
6662 if (MEM_ALIGN (mem1) < 64)
6663 return 0;
6664
6665 addr1 = XEXP (mem1, 0);
6666 addr2 = XEXP (mem2, 0);
6667
6668 /* Extract a register number and offset (if used) from the first addr. */
6669 if (GET_CODE (addr1) == PLUS)
6670 {
6671 /* If not a REG, return zero. */
6672 if (GET_CODE (XEXP (addr1, 0)) != REG)
6673 return 0;
6674 else
6675 {
6676 reg1 = REGNO (XEXP (addr1, 0));
6677 /* The offset must be constant! */
6678 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6679 return 0;
6680 offset1 = INTVAL (XEXP (addr1, 1));
6681 }
6682 }
6683 else if (GET_CODE (addr1) != REG)
6684 return 0;
6685 else
6686 {
6687 reg1 = REGNO (addr1);
6688 /* This was a simple (mem (reg)) expression. Offset is 0. */
6689 offset1 = 0;
6690 }
6691
6692 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6693 if (GET_CODE (addr2) != PLUS)
6694 return 0;
6695
6696 if (GET_CODE (XEXP (addr2, 0)) != REG
6697 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6698 return 0;
6699
6700 if (reg1 != REGNO (XEXP (addr2, 0)))
6701 return 0;
6702
6703 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6704 return 0;
6705
6706 /* The first offset must be evenly divisible by 8 to ensure the
6707 address is 64 bit aligned. */
6708 if (offset1 % 8 != 0)
6709 return 0;
6710
6711 /* The offset for the second addr must be 4 more than the first addr. */
6712 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6713 return 0;
6714
6715 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6716 instructions. */
6717 return 1;
6718 }
6719
6720 /* Return 1 if reg is a pseudo, or is the first register in
6721 a hard register pair. This makes it a candidate for use in
6722 ldd and std insns. */
6723
6724 int
6725 register_ok_for_ldd (rtx reg)
6726 {
6727 /* We might have been passed a SUBREG. */
6728 if (GET_CODE (reg) != REG)
6729 return 0;
6730
6731 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6732 return (REGNO (reg) % 2 == 0);
6733 else
6734 return 1;
6735 }
6736 \f
6737 /* Print operand X (an rtx) in assembler syntax to file FILE.
6738 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6739 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6740
6741 void
6742 print_operand (FILE *file, rtx x, int code)
6743 {
6744 switch (code)
6745 {
6746 case '#':
6747 /* Output an insn in a delay slot. */
6748 if (final_sequence)
6749 sparc_indent_opcode = 1;
6750 else
6751 fputs ("\n\t nop", file);
6752 return;
6753 case '*':
6754 /* Output an annul flag if there's nothing for the delay slot and we
6755 are optimizing. This is always used with '(' below.
6756 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6757 this is a dbx bug. So, we only do this when optimizing.
6758 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6759 Always emit a nop in case the next instruction is a branch. */
6760 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6761 fputs (",a", file);
6762 return;
6763 case '(':
6764 /* Output a 'nop' if there's nothing for the delay slot and we are
6765 not optimizing. This is always used with '*' above. */
6766 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6767 fputs ("\n\t nop", file);
6768 else if (final_sequence)
6769 sparc_indent_opcode = 1;
6770 return;
6771 case ')':
6772 /* Output the right displacement from the saved PC on function return.
6773 The caller may have placed an "unimp" insn immediately after the call
6774 so we have to account for it. This insn is used in the 32-bit ABI
6775 when calling a function that returns a non zero-sized structure. The
6776 64-bit ABI doesn't have it. Be careful to have this test be the same
6777 as that used on the call. The exception here is that when
6778 sparc_std_struct_return is enabled, the psABI is followed exactly
6779 and the adjustment is made by the code in sparc_struct_value_rtx.
6780 The call emitted is the same when sparc_std_struct_return is
6781 present. */
6782 if (! TARGET_ARCH64
6783 && cfun->returns_struct
6784 && ! sparc_std_struct_return
6785 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6786 == INTEGER_CST)
6787 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6788 fputs ("12", file);
6789 else
6790 fputc ('8', file);
6791 return;
6792 case '_':
6793 /* Output the Embedded Medium/Anywhere code model base register. */
6794 fputs (EMBMEDANY_BASE_REG, file);
6795 return;
6796 case '&':
6797 /* Print some local dynamic TLS name. */
6798 assemble_name (file, get_some_local_dynamic_name ());
6799 return;
6800
6801 case 'Y':
6802 /* Adjust the operand to take into account a RESTORE operation. */
6803 if (GET_CODE (x) == CONST_INT)
6804 break;
6805 else if (GET_CODE (x) != REG)
6806 output_operand_lossage ("invalid %%Y operand");
6807 else if (REGNO (x) < 8)
6808 fputs (reg_names[REGNO (x)], file);
6809 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6810 fputs (reg_names[REGNO (x)-16], file);
6811 else
6812 output_operand_lossage ("invalid %%Y operand");
6813 return;
6814 case 'L':
6815 /* Print out the low order register name of a register pair. */
6816 if (WORDS_BIG_ENDIAN)
6817 fputs (reg_names[REGNO (x)+1], file);
6818 else
6819 fputs (reg_names[REGNO (x)], file);
6820 return;
6821 case 'H':
6822 /* Print out the high order register name of a register pair. */
6823 if (WORDS_BIG_ENDIAN)
6824 fputs (reg_names[REGNO (x)], file);
6825 else
6826 fputs (reg_names[REGNO (x)+1], file);
6827 return;
6828 case 'R':
6829 /* Print out the second register name of a register pair or quad.
6830 I.e., R (%o0) => %o1. */
6831 fputs (reg_names[REGNO (x)+1], file);
6832 return;
6833 case 'S':
6834 /* Print out the third register name of a register quad.
6835 I.e., S (%o0) => %o2. */
6836 fputs (reg_names[REGNO (x)+2], file);
6837 return;
6838 case 'T':
6839 /* Print out the fourth register name of a register quad.
6840 I.e., T (%o0) => %o3. */
6841 fputs (reg_names[REGNO (x)+3], file);
6842 return;
6843 case 'x':
6844 /* Print a condition code register. */
6845 if (REGNO (x) == SPARC_ICC_REG)
6846 {
6847 /* We don't handle CC[X]_NOOVmode because they're not supposed
6848 to occur here. */
6849 if (GET_MODE (x) == CCmode)
6850 fputs ("%icc", file);
6851 else if (GET_MODE (x) == CCXmode)
6852 fputs ("%xcc", file);
6853 else
6854 gcc_unreachable ();
6855 }
6856 else
6857 /* %fccN register */
6858 fputs (reg_names[REGNO (x)], file);
6859 return;
6860 case 'm':
6861 /* Print the operand's address only. */
6862 output_address (XEXP (x, 0));
6863 return;
6864 case 'r':
6865 /* In this case we need a register. Use %g0 if the
6866 operand is const0_rtx. */
6867 if (x == const0_rtx
6868 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6869 {
6870 fputs ("%g0", file);
6871 return;
6872 }
6873 else
6874 break;
6875
6876 case 'A':
6877 switch (GET_CODE (x))
6878 {
6879 case IOR: fputs ("or", file); break;
6880 case AND: fputs ("and", file); break;
6881 case XOR: fputs ("xor", file); break;
6882 default: output_operand_lossage ("invalid %%A operand");
6883 }
6884 return;
6885
6886 case 'B':
6887 switch (GET_CODE (x))
6888 {
6889 case IOR: fputs ("orn", file); break;
6890 case AND: fputs ("andn", file); break;
6891 case XOR: fputs ("xnor", file); break;
6892 default: output_operand_lossage ("invalid %%B operand");
6893 }
6894 return;
6895
6896 /* These are used by the conditional move instructions. */
6897 case 'c' :
6898 case 'C':
6899 {
6900 enum rtx_code rc = GET_CODE (x);
6901
6902 if (code == 'c')
6903 {
6904 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6905 if (mode == CCFPmode || mode == CCFPEmode)
6906 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6907 else
6908 rc = reverse_condition (GET_CODE (x));
6909 }
6910 switch (rc)
6911 {
6912 case NE: fputs ("ne", file); break;
6913 case EQ: fputs ("e", file); break;
6914 case GE: fputs ("ge", file); break;
6915 case GT: fputs ("g", file); break;
6916 case LE: fputs ("le", file); break;
6917 case LT: fputs ("l", file); break;
6918 case GEU: fputs ("geu", file); break;
6919 case GTU: fputs ("gu", file); break;
6920 case LEU: fputs ("leu", file); break;
6921 case LTU: fputs ("lu", file); break;
6922 case LTGT: fputs ("lg", file); break;
6923 case UNORDERED: fputs ("u", file); break;
6924 case ORDERED: fputs ("o", file); break;
6925 case UNLT: fputs ("ul", file); break;
6926 case UNLE: fputs ("ule", file); break;
6927 case UNGT: fputs ("ug", file); break;
6928 case UNGE: fputs ("uge", file); break;
6929 case UNEQ: fputs ("ue", file); break;
6930 default: output_operand_lossage (code == 'c'
6931 ? "invalid %%c operand"
6932 : "invalid %%C operand");
6933 }
6934 return;
6935 }
6936
6937 /* These are used by the movr instruction pattern. */
6938 case 'd':
6939 case 'D':
6940 {
6941 enum rtx_code rc = (code == 'd'
6942 ? reverse_condition (GET_CODE (x))
6943 : GET_CODE (x));
6944 switch (rc)
6945 {
6946 case NE: fputs ("ne", file); break;
6947 case EQ: fputs ("e", file); break;
6948 case GE: fputs ("gez", file); break;
6949 case LT: fputs ("lz", file); break;
6950 case LE: fputs ("lez", file); break;
6951 case GT: fputs ("gz", file); break;
6952 default: output_operand_lossage (code == 'd'
6953 ? "invalid %%d operand"
6954 : "invalid %%D operand");
6955 }
6956 return;
6957 }
6958
6959 case 'b':
6960 {
6961 /* Print a sign-extended character. */
6962 int i = trunc_int_for_mode (INTVAL (x), QImode);
6963 fprintf (file, "%d", i);
6964 return;
6965 }
6966
6967 case 'f':
6968 /* Operand must be a MEM; write its address. */
6969 if (GET_CODE (x) != MEM)
6970 output_operand_lossage ("invalid %%f operand");
6971 output_address (XEXP (x, 0));
6972 return;
6973
6974 case 's':
6975 {
6976 /* Print a sign-extended 32-bit value. */
6977 HOST_WIDE_INT i;
6978 if (GET_CODE(x) == CONST_INT)
6979 i = INTVAL (x);
6980 else if (GET_CODE(x) == CONST_DOUBLE)
6981 i = CONST_DOUBLE_LOW (x);
6982 else
6983 {
6984 output_operand_lossage ("invalid %%s operand");
6985 return;
6986 }
6987 i = trunc_int_for_mode (i, SImode);
6988 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6989 return;
6990 }
6991
6992 case 0:
6993 /* Do nothing special. */
6994 break;
6995
6996 default:
6997 /* Undocumented flag. */
6998 output_operand_lossage ("invalid operand output code");
6999 }
7000
7001 if (GET_CODE (x) == REG)
7002 fputs (reg_names[REGNO (x)], file);
7003 else if (GET_CODE (x) == MEM)
7004 {
7005 fputc ('[', file);
7006 /* Poor Sun assembler doesn't understand absolute addressing. */
7007 if (CONSTANT_P (XEXP (x, 0)))
7008 fputs ("%g0+", file);
7009 output_address (XEXP (x, 0));
7010 fputc (']', file);
7011 }
7012 else if (GET_CODE (x) == HIGH)
7013 {
7014 fputs ("%hi(", file);
7015 output_addr_const (file, XEXP (x, 0));
7016 fputc (')', file);
7017 }
7018 else if (GET_CODE (x) == LO_SUM)
7019 {
7020 print_operand (file, XEXP (x, 0), 0);
7021 if (TARGET_CM_MEDMID)
7022 fputs ("+%l44(", file);
7023 else
7024 fputs ("+%lo(", file);
7025 output_addr_const (file, XEXP (x, 1));
7026 fputc (')', file);
7027 }
7028 else if (GET_CODE (x) == CONST_DOUBLE
7029 && (GET_MODE (x) == VOIDmode
7030 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7031 {
7032 if (CONST_DOUBLE_HIGH (x) == 0)
7033 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7034 else if (CONST_DOUBLE_HIGH (x) == -1
7035 && CONST_DOUBLE_LOW (x) < 0)
7036 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7037 else
7038 output_operand_lossage ("long long constant not a valid immediate operand");
7039 }
7040 else if (GET_CODE (x) == CONST_DOUBLE)
7041 output_operand_lossage ("floating point constant not a valid immediate operand");
7042 else { output_addr_const (file, x); }
7043 }
7044 \f
7045 /* Target hook for assembling integer objects. The sparc version has
7046 special handling for aligned DI-mode objects. */
7047
7048 static bool
7049 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7050 {
7051 /* ??? We only output .xword's for symbols and only then in environments
7052 where the assembler can handle them. */
7053 if (aligned_p && size == 8
7054 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7055 {
7056 if (TARGET_V9)
7057 {
7058 assemble_integer_with_op ("\t.xword\t", x);
7059 return true;
7060 }
7061 else
7062 {
7063 assemble_aligned_integer (4, const0_rtx);
7064 assemble_aligned_integer (4, x);
7065 return true;
7066 }
7067 }
7068 return default_assemble_integer (x, size, aligned_p);
7069 }
7070 \f
7071 /* Return the value of a code used in the .proc pseudo-op that says
7072 what kind of result this function returns. For non-C types, we pick
7073 the closest C type. */
7074
7075 #ifndef SHORT_TYPE_SIZE
7076 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7077 #endif
7078
7079 #ifndef INT_TYPE_SIZE
7080 #define INT_TYPE_SIZE BITS_PER_WORD
7081 #endif
7082
7083 #ifndef LONG_TYPE_SIZE
7084 #define LONG_TYPE_SIZE BITS_PER_WORD
7085 #endif
7086
7087 #ifndef LONG_LONG_TYPE_SIZE
7088 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7089 #endif
7090
7091 #ifndef FLOAT_TYPE_SIZE
7092 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7093 #endif
7094
7095 #ifndef DOUBLE_TYPE_SIZE
7096 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7097 #endif
7098
7099 #ifndef LONG_DOUBLE_TYPE_SIZE
7100 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7101 #endif
7102
7103 unsigned long
7104 sparc_type_code (register tree type)
7105 {
7106 register unsigned long qualifiers = 0;
7107 register unsigned shift;
7108
7109 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7110 setting more, since some assemblers will give an error for this. Also,
7111 we must be careful to avoid shifts of 32 bits or more to avoid getting
7112 unpredictable results. */
7113
7114 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7115 {
7116 switch (TREE_CODE (type))
7117 {
7118 case ERROR_MARK:
7119 return qualifiers;
7120
7121 case ARRAY_TYPE:
7122 qualifiers |= (3 << shift);
7123 break;
7124
7125 case FUNCTION_TYPE:
7126 case METHOD_TYPE:
7127 qualifiers |= (2 << shift);
7128 break;
7129
7130 case POINTER_TYPE:
7131 case REFERENCE_TYPE:
7132 case OFFSET_TYPE:
7133 qualifiers |= (1 << shift);
7134 break;
7135
7136 case RECORD_TYPE:
7137 return (qualifiers | 8);
7138
7139 case UNION_TYPE:
7140 case QUAL_UNION_TYPE:
7141 return (qualifiers | 9);
7142
7143 case ENUMERAL_TYPE:
7144 return (qualifiers | 10);
7145
7146 case VOID_TYPE:
7147 return (qualifiers | 16);
7148
7149 case INTEGER_TYPE:
7150 /* If this is a range type, consider it to be the underlying
7151 type. */
7152 if (TREE_TYPE (type) != 0)
7153 break;
7154
7155 /* Carefully distinguish all the standard types of C,
7156 without messing up if the language is not C. We do this by
7157 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7158 look at both the names and the above fields, but that's redundant.
7159 Any type whose size is between two C types will be considered
7160 to be the wider of the two types. Also, we do not have a
7161 special code to use for "long long", so anything wider than
7162 long is treated the same. Note that we can't distinguish
7163 between "int" and "long" in this code if they are the same
7164 size, but that's fine, since neither can the assembler. */
7165
7166 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7167 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7168
7169 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7170 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7171
7172 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7173 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7174
7175 else
7176 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7177
7178 case REAL_TYPE:
7179 /* If this is a range type, consider it to be the underlying
7180 type. */
7181 if (TREE_TYPE (type) != 0)
7182 break;
7183
7184 /* Carefully distinguish all the standard types of C,
7185 without messing up if the language is not C. */
7186
7187 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7188 return (qualifiers | 6);
7189
7190 else
7191 return (qualifiers | 7);
7192
7193 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7194 /* ??? We need to distinguish between double and float complex types,
7195 but I don't know how yet because I can't reach this code from
7196 existing front-ends. */
7197 return (qualifiers | 7); /* Who knows? */
7198
7199 case VECTOR_TYPE:
7200 case BOOLEAN_TYPE: /* Boolean truth value type. */
7201 case LANG_TYPE: /* ? */
7202 return qualifiers;
7203
7204 default:
7205 gcc_unreachable (); /* Not a type! */
7206 }
7207 }
7208
7209 return qualifiers;
7210 }
7211 \f
7212 /* Nested function support. */
7213
7214 /* Emit RTL insns to initialize the variable parts of a trampoline.
7215 FNADDR is an RTX for the address of the function's pure code.
7216 CXT is an RTX for the static chain value for the function.
7217
7218 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7219 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7220 (to store insns). This is a bit excessive. Perhaps a different
7221 mechanism would be better here.
7222
7223 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7224
7225 void
7226 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7227 {
7228 /* SPARC 32-bit trampoline:
7229
7230 sethi %hi(fn), %g1
7231 sethi %hi(static), %g2
7232 jmp %g1+%lo(fn)
7233 or %g2, %lo(static), %g2
7234
7235 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7236 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7237 */
7238
7239 emit_move_insn
7240 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7241 expand_binop (SImode, ior_optab,
7242 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7243 size_int (10), 0, 1),
7244 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7245 NULL_RTX, 1, OPTAB_DIRECT));
7246
7247 emit_move_insn
7248 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7249 expand_binop (SImode, ior_optab,
7250 expand_shift (RSHIFT_EXPR, SImode, cxt,
7251 size_int (10), 0, 1),
7252 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7253 NULL_RTX, 1, OPTAB_DIRECT));
7254
7255 emit_move_insn
7256 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7257 expand_binop (SImode, ior_optab,
7258 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7259 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7260 NULL_RTX, 1, OPTAB_DIRECT));
7261
7262 emit_move_insn
7263 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7264 expand_binop (SImode, ior_optab,
7265 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7266 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7267 NULL_RTX, 1, OPTAB_DIRECT));
7268
7269 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7270 aligned on a 16 byte boundary so one flush clears it all. */
7271 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7272 if (sparc_cpu != PROCESSOR_ULTRASPARC
7273 && sparc_cpu != PROCESSOR_ULTRASPARC3
7274 && sparc_cpu != PROCESSOR_NIAGARA
7275 && sparc_cpu != PROCESSOR_NIAGARA2)
7276 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7277 plus_constant (tramp, 8)))));
7278
7279 /* Call __enable_execute_stack after writing onto the stack to make sure
7280 the stack address is accessible. */
7281 #ifdef ENABLE_EXECUTE_STACK
7282 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7283 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7284 #endif
7285
7286 }
7287
7288 /* The 64-bit version is simpler because it makes more sense to load the
7289 values as "immediate" data out of the trampoline. It's also easier since
7290 we can read the PC without clobbering a register. */
7291
7292 void
7293 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7294 {
7295 /* SPARC 64-bit trampoline:
7296
7297 rd %pc, %g1
7298 ldx [%g1+24], %g5
7299 jmp %g5
7300 ldx [%g1+16], %g5
7301 +16 bytes data
7302 */
7303
7304 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7305 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7306 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7307 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7308 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7309 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7310 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7311 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7312 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7313 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7314 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7315
7316 if (sparc_cpu != PROCESSOR_ULTRASPARC
7317 && sparc_cpu != PROCESSOR_ULTRASPARC3
7318 && sparc_cpu != PROCESSOR_NIAGARA
7319 && sparc_cpu != PROCESSOR_NIAGARA2)
7320 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7321
7322 /* Call __enable_execute_stack after writing onto the stack to make sure
7323 the stack address is accessible. */
7324 #ifdef ENABLE_EXECUTE_STACK
7325 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7326 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7327 #endif
7328 }
7329 \f
7330 /* Adjust the cost of a scheduling dependency. Return the new cost of
7331 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7332
7333 static int
7334 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7335 {
7336 enum attr_type insn_type;
7337
7338 if (! recog_memoized (insn))
7339 return 0;
7340
7341 insn_type = get_attr_type (insn);
7342
7343 if (REG_NOTE_KIND (link) == 0)
7344 {
7345 /* Data dependency; DEP_INSN writes a register that INSN reads some
7346 cycles later. */
7347
7348 /* if a load, then the dependence must be on the memory address;
7349 add an extra "cycle". Note that the cost could be two cycles
7350 if the reg was written late in an instruction group; we ca not tell
7351 here. */
7352 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7353 return cost + 3;
7354
7355 /* Get the delay only if the address of the store is the dependence. */
7356 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7357 {
7358 rtx pat = PATTERN(insn);
7359 rtx dep_pat = PATTERN (dep_insn);
7360
7361 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7362 return cost; /* This should not happen! */
7363
7364 /* The dependency between the two instructions was on the data that
7365 is being stored. Assume that this implies that the address of the
7366 store is not dependent. */
7367 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7368 return cost;
7369
7370 return cost + 3; /* An approximation. */
7371 }
7372
7373 /* A shift instruction cannot receive its data from an instruction
7374 in the same cycle; add a one cycle penalty. */
7375 if (insn_type == TYPE_SHIFT)
7376 return cost + 3; /* Split before cascade into shift. */
7377 }
7378 else
7379 {
7380 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7381 INSN writes some cycles later. */
7382
7383 /* These are only significant for the fpu unit; writing a fp reg before
7384 the fpu has finished with it stalls the processor. */
7385
7386 /* Reusing an integer register causes no problems. */
7387 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7388 return 0;
7389 }
7390
7391 return cost;
7392 }
7393
7394 static int
7395 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7396 {
7397 enum attr_type insn_type, dep_type;
7398 rtx pat = PATTERN(insn);
7399 rtx dep_pat = PATTERN (dep_insn);
7400
7401 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7402 return cost;
7403
7404 insn_type = get_attr_type (insn);
7405 dep_type = get_attr_type (dep_insn);
7406
7407 switch (REG_NOTE_KIND (link))
7408 {
7409 case 0:
7410 /* Data dependency; DEP_INSN writes a register that INSN reads some
7411 cycles later. */
7412
7413 switch (insn_type)
7414 {
7415 case TYPE_STORE:
7416 case TYPE_FPSTORE:
7417 /* Get the delay iff the address of the store is the dependence. */
7418 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7419 return cost;
7420
7421 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7422 return cost;
7423 return cost + 3;
7424
7425 case TYPE_LOAD:
7426 case TYPE_SLOAD:
7427 case TYPE_FPLOAD:
7428 /* If a load, then the dependence must be on the memory address. If
7429 the addresses aren't equal, then it might be a false dependency */
7430 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7431 {
7432 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7433 || GET_CODE (SET_DEST (dep_pat)) != MEM
7434 || GET_CODE (SET_SRC (pat)) != MEM
7435 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7436 XEXP (SET_SRC (pat), 0)))
7437 return cost + 2;
7438
7439 return cost + 8;
7440 }
7441 break;
7442
7443 case TYPE_BRANCH:
7444 /* Compare to branch latency is 0. There is no benefit from
7445 separating compare and branch. */
7446 if (dep_type == TYPE_COMPARE)
7447 return 0;
7448 /* Floating point compare to branch latency is less than
7449 compare to conditional move. */
7450 if (dep_type == TYPE_FPCMP)
7451 return cost - 1;
7452 break;
7453 default:
7454 break;
7455 }
7456 break;
7457
7458 case REG_DEP_ANTI:
7459 /* Anti-dependencies only penalize the fpu unit. */
7460 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7461 return 0;
7462 break;
7463
7464 default:
7465 break;
7466 }
7467
7468 return cost;
7469 }
7470
7471 static int
7472 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7473 {
7474 switch (sparc_cpu)
7475 {
7476 case PROCESSOR_SUPERSPARC:
7477 cost = supersparc_adjust_cost (insn, link, dep, cost);
7478 break;
7479 case PROCESSOR_HYPERSPARC:
7480 case PROCESSOR_SPARCLITE86X:
7481 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7482 break;
7483 default:
7484 break;
7485 }
7486 return cost;
7487 }
7488
7489 static void
7490 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7491 int sched_verbose ATTRIBUTE_UNUSED,
7492 int max_ready ATTRIBUTE_UNUSED)
7493 {
7494 }
7495
7496 static int
7497 sparc_use_sched_lookahead (void)
7498 {
7499 if (sparc_cpu == PROCESSOR_NIAGARA
7500 || sparc_cpu == PROCESSOR_NIAGARA2)
7501 return 0;
7502 if (sparc_cpu == PROCESSOR_ULTRASPARC
7503 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7504 return 4;
7505 if ((1 << sparc_cpu) &
7506 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7507 (1 << PROCESSOR_SPARCLITE86X)))
7508 return 3;
7509 return 0;
7510 }
7511
7512 static int
7513 sparc_issue_rate (void)
7514 {
7515 switch (sparc_cpu)
7516 {
7517 case PROCESSOR_NIAGARA:
7518 case PROCESSOR_NIAGARA2:
7519 default:
7520 return 1;
7521 case PROCESSOR_V9:
7522 /* Assume V9 processors are capable of at least dual-issue. */
7523 return 2;
7524 case PROCESSOR_SUPERSPARC:
7525 return 3;
7526 case PROCESSOR_HYPERSPARC:
7527 case PROCESSOR_SPARCLITE86X:
7528 return 2;
7529 case PROCESSOR_ULTRASPARC:
7530 case PROCESSOR_ULTRASPARC3:
7531 return 4;
7532 }
7533 }
7534
7535 static int
7536 set_extends (rtx insn)
7537 {
7538 register rtx pat = PATTERN (insn);
7539
7540 switch (GET_CODE (SET_SRC (pat)))
7541 {
7542 /* Load and some shift instructions zero extend. */
7543 case MEM:
7544 case ZERO_EXTEND:
7545 /* sethi clears the high bits */
7546 case HIGH:
7547 /* LO_SUM is used with sethi. sethi cleared the high
7548 bits and the values used with lo_sum are positive */
7549 case LO_SUM:
7550 /* Store flag stores 0 or 1 */
7551 case LT: case LTU:
7552 case GT: case GTU:
7553 case LE: case LEU:
7554 case GE: case GEU:
7555 case EQ:
7556 case NE:
7557 return 1;
7558 case AND:
7559 {
7560 rtx op0 = XEXP (SET_SRC (pat), 0);
7561 rtx op1 = XEXP (SET_SRC (pat), 1);
7562 if (GET_CODE (op1) == CONST_INT)
7563 return INTVAL (op1) >= 0;
7564 if (GET_CODE (op0) != REG)
7565 return 0;
7566 if (sparc_check_64 (op0, insn) == 1)
7567 return 1;
7568 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7569 }
7570 case IOR:
7571 case XOR:
7572 {
7573 rtx op0 = XEXP (SET_SRC (pat), 0);
7574 rtx op1 = XEXP (SET_SRC (pat), 1);
7575 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7576 return 0;
7577 if (GET_CODE (op1) == CONST_INT)
7578 return INTVAL (op1) >= 0;
7579 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7580 }
7581 case LSHIFTRT:
7582 return GET_MODE (SET_SRC (pat)) == SImode;
7583 /* Positive integers leave the high bits zero. */
7584 case CONST_DOUBLE:
7585 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7586 case CONST_INT:
7587 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7588 case ASHIFTRT:
7589 case SIGN_EXTEND:
7590 return - (GET_MODE (SET_SRC (pat)) == SImode);
7591 case REG:
7592 return sparc_check_64 (SET_SRC (pat), insn);
7593 default:
7594 return 0;
7595 }
7596 }
7597
7598 /* We _ought_ to have only one kind per function, but... */
7599 static GTY(()) rtx sparc_addr_diff_list;
7600 static GTY(()) rtx sparc_addr_list;
7601
7602 void
7603 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7604 {
7605 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7606 if (diff)
7607 sparc_addr_diff_list
7608 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7609 else
7610 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7611 }
7612
7613 static void
7614 sparc_output_addr_vec (rtx vec)
7615 {
7616 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7617 int idx, vlen = XVECLEN (body, 0);
7618
7619 #ifdef ASM_OUTPUT_ADDR_VEC_START
7620 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7621 #endif
7622
7623 #ifdef ASM_OUTPUT_CASE_LABEL
7624 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7625 NEXT_INSN (lab));
7626 #else
7627 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7628 #endif
7629
7630 for (idx = 0; idx < vlen; idx++)
7631 {
7632 ASM_OUTPUT_ADDR_VEC_ELT
7633 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7634 }
7635
7636 #ifdef ASM_OUTPUT_ADDR_VEC_END
7637 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7638 #endif
7639 }
7640
7641 static void
7642 sparc_output_addr_diff_vec (rtx vec)
7643 {
7644 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7645 rtx base = XEXP (XEXP (body, 0), 0);
7646 int idx, vlen = XVECLEN (body, 1);
7647
7648 #ifdef ASM_OUTPUT_ADDR_VEC_START
7649 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7650 #endif
7651
7652 #ifdef ASM_OUTPUT_CASE_LABEL
7653 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7654 NEXT_INSN (lab));
7655 #else
7656 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7657 #endif
7658
7659 for (idx = 0; idx < vlen; idx++)
7660 {
7661 ASM_OUTPUT_ADDR_DIFF_ELT
7662 (asm_out_file,
7663 body,
7664 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7665 CODE_LABEL_NUMBER (base));
7666 }
7667
7668 #ifdef ASM_OUTPUT_ADDR_VEC_END
7669 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7670 #endif
7671 }
7672
7673 static void
7674 sparc_output_deferred_case_vectors (void)
7675 {
7676 rtx t;
7677 int align;
7678
7679 if (sparc_addr_list == NULL_RTX
7680 && sparc_addr_diff_list == NULL_RTX)
7681 return;
7682
7683 /* Align to cache line in the function's code section. */
7684 switch_to_section (current_function_section ());
7685
7686 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7687 if (align > 0)
7688 ASM_OUTPUT_ALIGN (asm_out_file, align);
7689
7690 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7691 sparc_output_addr_vec (XEXP (t, 0));
7692 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7693 sparc_output_addr_diff_vec (XEXP (t, 0));
7694
7695 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7696 }
7697
7698 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7699 unknown. Return 1 if the high bits are zero, -1 if the register is
7700 sign extended. */
7701 int
7702 sparc_check_64 (rtx x, rtx insn)
7703 {
7704 /* If a register is set only once it is safe to ignore insns this
7705 code does not know how to handle. The loop will either recognize
7706 the single set and return the correct value or fail to recognize
7707 it and return 0. */
7708 int set_once = 0;
7709 rtx y = x;
7710
7711 gcc_assert (GET_CODE (x) == REG);
7712
7713 if (GET_MODE (x) == DImode)
7714 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7715
7716 if (flag_expensive_optimizations
7717 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7718 set_once = 1;
7719
7720 if (insn == 0)
7721 {
7722 if (set_once)
7723 insn = get_last_insn_anywhere ();
7724 else
7725 return 0;
7726 }
7727
7728 while ((insn = PREV_INSN (insn)))
7729 {
7730 switch (GET_CODE (insn))
7731 {
7732 case JUMP_INSN:
7733 case NOTE:
7734 break;
7735 case CODE_LABEL:
7736 case CALL_INSN:
7737 default:
7738 if (! set_once)
7739 return 0;
7740 break;
7741 case INSN:
7742 {
7743 rtx pat = PATTERN (insn);
7744 if (GET_CODE (pat) != SET)
7745 return 0;
7746 if (rtx_equal_p (x, SET_DEST (pat)))
7747 return set_extends (insn);
7748 if (y && rtx_equal_p (y, SET_DEST (pat)))
7749 return set_extends (insn);
7750 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7751 return 0;
7752 }
7753 }
7754 }
7755 return 0;
7756 }
7757
7758 /* Returns assembly code to perform a DImode shift using
7759 a 64-bit global or out register on SPARC-V8+. */
7760 const char *
7761 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7762 {
7763 static char asm_code[60];
7764
7765 /* The scratch register is only required when the destination
7766 register is not a 64-bit global or out register. */
7767 if (which_alternative != 2)
7768 operands[3] = operands[0];
7769
7770 /* We can only shift by constants <= 63. */
7771 if (GET_CODE (operands[2]) == CONST_INT)
7772 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7773
7774 if (GET_CODE (operands[1]) == CONST_INT)
7775 {
7776 output_asm_insn ("mov\t%1, %3", operands);
7777 }
7778 else
7779 {
7780 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7781 if (sparc_check_64 (operands[1], insn) <= 0)
7782 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7783 output_asm_insn ("or\t%L1, %3, %3", operands);
7784 }
7785
7786 strcpy(asm_code, opcode);
7787
7788 if (which_alternative != 2)
7789 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7790 else
7791 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7792 }
7793 \f
7794 /* Output rtl to increment the profiler label LABELNO
7795 for profiling a function entry. */
7796
7797 void
7798 sparc_profile_hook (int labelno)
7799 {
7800 char buf[32];
7801 rtx lab, fun;
7802
7803 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7804 if (NO_PROFILE_COUNTERS)
7805 {
7806 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7807 }
7808 else
7809 {
7810 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7811 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7812 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7813 }
7814 }
7815 \f
7816 #ifdef OBJECT_FORMAT_ELF
7817 static void
7818 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7819 tree decl)
7820 {
7821 if (flags & SECTION_MERGE)
7822 {
7823 /* entsize cannot be expressed in this section attributes
7824 encoding style. */
7825 default_elf_asm_named_section (name, flags, decl);
7826 return;
7827 }
7828
7829 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7830
7831 if (!(flags & SECTION_DEBUG))
7832 fputs (",#alloc", asm_out_file);
7833 if (flags & SECTION_WRITE)
7834 fputs (",#write", asm_out_file);
7835 if (flags & SECTION_TLS)
7836 fputs (",#tls", asm_out_file);
7837 if (flags & SECTION_CODE)
7838 fputs (",#execinstr", asm_out_file);
7839
7840 /* ??? Handle SECTION_BSS. */
7841
7842 fputc ('\n', asm_out_file);
7843 }
7844 #endif /* OBJECT_FORMAT_ELF */
7845
7846 /* We do not allow indirect calls to be optimized into sibling calls.
7847
7848 We cannot use sibling calls when delayed branches are disabled
7849 because they will likely require the call delay slot to be filled.
7850
7851 Also, on SPARC 32-bit we cannot emit a sibling call when the
7852 current function returns a structure. This is because the "unimp
7853 after call" convention would cause the callee to return to the
7854 wrong place. The generic code already disallows cases where the
7855 function being called returns a structure.
7856
7857 It may seem strange how this last case could occur. Usually there
7858 is code after the call which jumps to epilogue code which dumps the
7859 return value into the struct return area. That ought to invalidate
7860 the sibling call right? Well, in the C++ case we can end up passing
7861 the pointer to the struct return area to a constructor (which returns
7862 void) and then nothing else happens. Such a sibling call would look
7863 valid without the added check here.
7864
7865 VxWorks PIC PLT entries require the global pointer to be initialized
7866 on entry. We therefore can't emit sibling calls to them. */
7867 static bool
7868 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7869 {
7870 return (decl
7871 && flag_delayed_branch
7872 && (TARGET_ARCH64 || ! cfun->returns_struct)
7873 && !(TARGET_VXWORKS_RTP
7874 && flag_pic
7875 && !targetm.binds_local_p (decl)));
7876 }
7877 \f
7878 /* libfunc renaming. */
7879 #include "config/gofast.h"
7880
7881 static void
7882 sparc_init_libfuncs (void)
7883 {
7884 if (TARGET_ARCH32)
7885 {
7886 /* Use the subroutines that Sun's library provides for integer
7887 multiply and divide. The `*' prevents an underscore from
7888 being prepended by the compiler. .umul is a little faster
7889 than .mul. */
7890 set_optab_libfunc (smul_optab, SImode, "*.umul");
7891 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7892 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7893 set_optab_libfunc (smod_optab, SImode, "*.rem");
7894 set_optab_libfunc (umod_optab, SImode, "*.urem");
7895
7896 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7897 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7898 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7899 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7900 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7901 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7902
7903 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7904 is because with soft-float, the SFmode and DFmode sqrt
7905 instructions will be absent, and the compiler will notice and
7906 try to use the TFmode sqrt instruction for calls to the
7907 builtin function sqrt, but this fails. */
7908 if (TARGET_FPU)
7909 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7910
7911 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7912 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7913 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7914 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7915 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7916 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7917
7918 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7919 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7920 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7921 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7922
7923 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7924 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7925 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7926 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7927
7928 if (DITF_CONVERSION_LIBFUNCS)
7929 {
7930 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7931 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7932 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7933 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7934 }
7935
7936 if (SUN_CONVERSION_LIBFUNCS)
7937 {
7938 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7939 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7940 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7941 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7942 }
7943 }
7944 if (TARGET_ARCH64)
7945 {
7946 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7947 do not exist in the library. Make sure the compiler does not
7948 emit calls to them by accident. (It should always use the
7949 hardware instructions.) */
7950 set_optab_libfunc (smul_optab, SImode, 0);
7951 set_optab_libfunc (sdiv_optab, SImode, 0);
7952 set_optab_libfunc (udiv_optab, SImode, 0);
7953 set_optab_libfunc (smod_optab, SImode, 0);
7954 set_optab_libfunc (umod_optab, SImode, 0);
7955
7956 if (SUN_INTEGER_MULTIPLY_64)
7957 {
7958 set_optab_libfunc (smul_optab, DImode, "__mul64");
7959 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7960 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7961 set_optab_libfunc (smod_optab, DImode, "__rem64");
7962 set_optab_libfunc (umod_optab, DImode, "__urem64");
7963 }
7964
7965 if (SUN_CONVERSION_LIBFUNCS)
7966 {
7967 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7968 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7969 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7970 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7971 }
7972 }
7973
7974 gofast_maybe_init_libfuncs ();
7975 }
7976 \f
7977 #define def_builtin(NAME, CODE, TYPE) \
7978 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7979 NULL_TREE)
7980
7981 /* Implement the TARGET_INIT_BUILTINS target hook.
7982 Create builtin functions for special SPARC instructions. */
7983
7984 static void
7985 sparc_init_builtins (void)
7986 {
7987 if (TARGET_VIS)
7988 sparc_vis_init_builtins ();
7989 }
7990
7991 /* Create builtin functions for VIS 1.0 instructions. */
7992
7993 static void
7994 sparc_vis_init_builtins (void)
7995 {
7996 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7997 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7998 tree v4hi = build_vector_type (intHI_type_node, 4);
7999 tree v2hi = build_vector_type (intHI_type_node, 2);
8000 tree v2si = build_vector_type (intSI_type_node, 2);
8001
8002 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8003 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8004 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8005 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8006 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8007 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8008 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8009 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8010 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8011 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8012 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8013 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8014 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8015 v8qi, v8qi,
8016 intDI_type_node, 0);
8017 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8018 intDI_type_node,
8019 intDI_type_node, 0);
8020 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8021 ptr_type_node,
8022 intSI_type_node, 0);
8023 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8024 ptr_type_node,
8025 intDI_type_node, 0);
8026
8027 /* Packing and expanding vectors. */
8028 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8029 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8030 v8qi_ftype_v2si_v8qi);
8031 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8032 v2hi_ftype_v2si);
8033 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8034 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8035 v8qi_ftype_v4qi_v4qi);
8036
8037 /* Multiplications. */
8038 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8039 v4hi_ftype_v4qi_v4hi);
8040 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8041 v4hi_ftype_v4qi_v2hi);
8042 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8043 v4hi_ftype_v4qi_v2hi);
8044 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8045 v4hi_ftype_v8qi_v4hi);
8046 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8047 v4hi_ftype_v8qi_v4hi);
8048 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8049 v2si_ftype_v4qi_v2hi);
8050 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8051 v2si_ftype_v4qi_v2hi);
8052
8053 /* Data aligning. */
8054 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8055 v4hi_ftype_v4hi_v4hi);
8056 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8057 v8qi_ftype_v8qi_v8qi);
8058 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8059 v2si_ftype_v2si_v2si);
8060 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8061 di_ftype_di_di);
8062 if (TARGET_ARCH64)
8063 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8064 ptr_ftype_ptr_di);
8065 else
8066 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8067 ptr_ftype_ptr_si);
8068
8069 /* Pixel distance. */
8070 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8071 di_ftype_v8qi_v8qi_di);
8072 }
8073
8074 /* Handle TARGET_EXPAND_BUILTIN target hook.
8075 Expand builtin functions for sparc intrinsics. */
8076
8077 static rtx
8078 sparc_expand_builtin (tree exp, rtx target,
8079 rtx subtarget ATTRIBUTE_UNUSED,
8080 enum machine_mode tmode ATTRIBUTE_UNUSED,
8081 int ignore ATTRIBUTE_UNUSED)
8082 {
8083 tree arg;
8084 call_expr_arg_iterator iter;
8085 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8086 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8087 rtx pat, op[4];
8088 enum machine_mode mode[4];
8089 int arg_count = 0;
8090
8091 mode[0] = insn_data[icode].operand[0].mode;
8092 if (!target
8093 || GET_MODE (target) != mode[0]
8094 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8095 op[0] = gen_reg_rtx (mode[0]);
8096 else
8097 op[0] = target;
8098
8099 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8100 {
8101 arg_count++;
8102 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8103 op[arg_count] = expand_normal (arg);
8104
8105 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8106 mode[arg_count]))
8107 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8108 }
8109
8110 switch (arg_count)
8111 {
8112 case 1:
8113 pat = GEN_FCN (icode) (op[0], op[1]);
8114 break;
8115 case 2:
8116 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8117 break;
8118 case 3:
8119 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8120 break;
8121 default:
8122 gcc_unreachable ();
8123 }
8124
8125 if (!pat)
8126 return NULL_RTX;
8127
8128 emit_insn (pat);
8129
8130 return op[0];
8131 }
8132
8133 static int
8134 sparc_vis_mul8x16 (int e8, int e16)
8135 {
8136 return (e8 * e16 + 128) / 256;
8137 }
8138
8139 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8140 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8141 constants. A tree list with the results of the multiplications is returned,
8142 and each element in the list is of INNER_TYPE. */
8143
8144 static tree
8145 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8146 {
8147 tree n_elts = NULL_TREE;
8148 int scale;
8149
8150 switch (fncode)
8151 {
8152 case CODE_FOR_fmul8x16_vis:
8153 for (; elts0 && elts1;
8154 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8155 {
8156 int val
8157 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8158 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8159 n_elts = tree_cons (NULL_TREE,
8160 build_int_cst (inner_type, val),
8161 n_elts);
8162 }
8163 break;
8164
8165 case CODE_FOR_fmul8x16au_vis:
8166 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8167
8168 for (; elts0; elts0 = TREE_CHAIN (elts0))
8169 {
8170 int val
8171 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8172 scale);
8173 n_elts = tree_cons (NULL_TREE,
8174 build_int_cst (inner_type, val),
8175 n_elts);
8176 }
8177 break;
8178
8179 case CODE_FOR_fmul8x16al_vis:
8180 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8181
8182 for (; elts0; elts0 = TREE_CHAIN (elts0))
8183 {
8184 int val
8185 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8186 scale);
8187 n_elts = tree_cons (NULL_TREE,
8188 build_int_cst (inner_type, val),
8189 n_elts);
8190 }
8191 break;
8192
8193 default:
8194 gcc_unreachable ();
8195 }
8196
8197 return nreverse (n_elts);
8198
8199 }
8200 /* Handle TARGET_FOLD_BUILTIN target hook.
8201 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8202 result of the function call is ignored. NULL_TREE is returned if the
8203 function could not be folded. */
8204
8205 static tree
8206 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8207 {
8208 tree arg0, arg1, arg2;
8209 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8210
8211 if (ignore
8212 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8213 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8214 return fold_convert (rtype, integer_zero_node);
8215
8216 switch (DECL_FUNCTION_CODE (fndecl))
8217 {
8218 case CODE_FOR_fexpand_vis:
8219 arg0 = TREE_VALUE (arglist);
8220 STRIP_NOPS (arg0);
8221
8222 if (TREE_CODE (arg0) == VECTOR_CST)
8223 {
8224 tree inner_type = TREE_TYPE (rtype);
8225 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8226 tree n_elts = NULL_TREE;
8227
8228 for (; elts; elts = TREE_CHAIN (elts))
8229 {
8230 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8231 n_elts = tree_cons (NULL_TREE,
8232 build_int_cst (inner_type, val),
8233 n_elts);
8234 }
8235 return build_vector (rtype, nreverse (n_elts));
8236 }
8237 break;
8238
8239 case CODE_FOR_fmul8x16_vis:
8240 case CODE_FOR_fmul8x16au_vis:
8241 case CODE_FOR_fmul8x16al_vis:
8242 arg0 = TREE_VALUE (arglist);
8243 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8244 STRIP_NOPS (arg0);
8245 STRIP_NOPS (arg1);
8246
8247 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8248 {
8249 tree inner_type = TREE_TYPE (rtype);
8250 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8251 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8252 tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8253 inner_type, elts0, elts1);
8254
8255 return build_vector (rtype, n_elts);
8256 }
8257 break;
8258
8259 case CODE_FOR_fpmerge_vis:
8260 arg0 = TREE_VALUE (arglist);
8261 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8262 STRIP_NOPS (arg0);
8263 STRIP_NOPS (arg1);
8264
8265 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8266 {
8267 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8268 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8269 tree n_elts = NULL_TREE;
8270
8271 for (; elts0 && elts1;
8272 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8273 {
8274 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8275 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8276 }
8277
8278 return build_vector (rtype, nreverse (n_elts));
8279 }
8280 break;
8281
8282 case CODE_FOR_pdist_vis:
8283 arg0 = TREE_VALUE (arglist);
8284 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8285 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8286 STRIP_NOPS (arg0);
8287 STRIP_NOPS (arg1);
8288 STRIP_NOPS (arg2);
8289
8290 if (TREE_CODE (arg0) == VECTOR_CST
8291 && TREE_CODE (arg1) == VECTOR_CST
8292 && TREE_CODE (arg2) == INTEGER_CST)
8293 {
8294 int overflow = 0;
8295 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8296 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8297 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8298 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8299
8300 for (; elts0 && elts1;
8301 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8302 {
8303 unsigned HOST_WIDE_INT
8304 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8305 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8306 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8307 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8308
8309 unsigned HOST_WIDE_INT l;
8310 HOST_WIDE_INT h;
8311
8312 overflow |= neg_double (low1, high1, &l, &h);
8313 overflow |= add_double (low0, high0, l, h, &l, &h);
8314 if (h < 0)
8315 overflow |= neg_double (l, h, &l, &h);
8316
8317 overflow |= add_double (low, high, l, h, &low, &high);
8318 }
8319
8320 gcc_assert (overflow == 0);
8321
8322 return build_int_cst_wide (rtype, low, high);
8323 }
8324
8325 default:
8326 break;
8327 }
8328
8329 return NULL_TREE;
8330 }
8331 \f
8332 int
8333 sparc_extra_constraint_check (rtx op, int c, int strict)
8334 {
8335 int reload_ok_mem;
8336
8337 if (TARGET_ARCH64
8338 && (c == 'T' || c == 'U'))
8339 return 0;
8340
8341 switch (c)
8342 {
8343 case 'Q':
8344 return fp_sethi_p (op);
8345
8346 case 'R':
8347 return fp_mov_p (op);
8348
8349 case 'S':
8350 return fp_high_losum_p (op);
8351
8352 case 'U':
8353 if (! strict
8354 || (GET_CODE (op) == REG
8355 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8356 || reg_renumber[REGNO (op)] >= 0)))
8357 return register_ok_for_ldd (op);
8358
8359 return 0;
8360
8361 case 'W':
8362 case 'T':
8363 break;
8364
8365 case 'Y':
8366 return const_zero_operand (op, GET_MODE (op));
8367
8368 default:
8369 return 0;
8370 }
8371
8372 /* Our memory extra constraints have to emulate the
8373 behavior of 'm' and 'o' in order for reload to work
8374 correctly. */
8375 if (GET_CODE (op) == MEM)
8376 {
8377 reload_ok_mem = 0;
8378 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8379 && (! strict
8380 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8381 reload_ok_mem = 1;
8382 }
8383 else
8384 {
8385 reload_ok_mem = (reload_in_progress
8386 && GET_CODE (op) == REG
8387 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8388 && reg_renumber [REGNO (op)] < 0);
8389 }
8390
8391 return reload_ok_mem;
8392 }
8393
8394 /* ??? This duplicates information provided to the compiler by the
8395 ??? scheduler description. Some day, teach genautomata to output
8396 ??? the latencies and then CSE will just use that. */
8397
8398 static bool
8399 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8400 {
8401 enum machine_mode mode = GET_MODE (x);
8402 bool float_mode_p = FLOAT_MODE_P (mode);
8403
8404 switch (code)
8405 {
8406 case CONST_INT:
8407 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8408 {
8409 *total = 0;
8410 return true;
8411 }
8412 /* FALLTHRU */
8413
8414 case HIGH:
8415 *total = 2;
8416 return true;
8417
8418 case CONST:
8419 case LABEL_REF:
8420 case SYMBOL_REF:
8421 *total = 4;
8422 return true;
8423
8424 case CONST_DOUBLE:
8425 if (GET_MODE (x) == VOIDmode
8426 && ((CONST_DOUBLE_HIGH (x) == 0
8427 && CONST_DOUBLE_LOW (x) < 0x1000)
8428 || (CONST_DOUBLE_HIGH (x) == -1
8429 && CONST_DOUBLE_LOW (x) < 0
8430 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8431 *total = 0;
8432 else
8433 *total = 8;
8434 return true;
8435
8436 case MEM:
8437 /* If outer-code was a sign or zero extension, a cost
8438 of COSTS_N_INSNS (1) was already added in. This is
8439 why we are subtracting it back out. */
8440 if (outer_code == ZERO_EXTEND)
8441 {
8442 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8443 }
8444 else if (outer_code == SIGN_EXTEND)
8445 {
8446 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8447 }
8448 else if (float_mode_p)
8449 {
8450 *total = sparc_costs->float_load;
8451 }
8452 else
8453 {
8454 *total = sparc_costs->int_load;
8455 }
8456
8457 return true;
8458
8459 case PLUS:
8460 case MINUS:
8461 if (float_mode_p)
8462 *total = sparc_costs->float_plusminus;
8463 else
8464 *total = COSTS_N_INSNS (1);
8465 return false;
8466
8467 case MULT:
8468 if (float_mode_p)
8469 *total = sparc_costs->float_mul;
8470 else if (! TARGET_HARD_MUL)
8471 *total = COSTS_N_INSNS (25);
8472 else
8473 {
8474 int bit_cost;
8475
8476 bit_cost = 0;
8477 if (sparc_costs->int_mul_bit_factor)
8478 {
8479 int nbits;
8480
8481 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8482 {
8483 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8484 for (nbits = 0; value != 0; value &= value - 1)
8485 nbits++;
8486 }
8487 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8488 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8489 {
8490 rtx x1 = XEXP (x, 1);
8491 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8492 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8493
8494 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8495 nbits++;
8496 for (; value2 != 0; value2 &= value2 - 1)
8497 nbits++;
8498 }
8499 else
8500 nbits = 7;
8501
8502 if (nbits < 3)
8503 nbits = 3;
8504 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8505 bit_cost = COSTS_N_INSNS (bit_cost);
8506 }
8507
8508 if (mode == DImode)
8509 *total = sparc_costs->int_mulX + bit_cost;
8510 else
8511 *total = sparc_costs->int_mul + bit_cost;
8512 }
8513 return false;
8514
8515 case ASHIFT:
8516 case ASHIFTRT:
8517 case LSHIFTRT:
8518 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8519 return false;
8520
8521 case DIV:
8522 case UDIV:
8523 case MOD:
8524 case UMOD:
8525 if (float_mode_p)
8526 {
8527 if (mode == DFmode)
8528 *total = sparc_costs->float_div_df;
8529 else
8530 *total = sparc_costs->float_div_sf;
8531 }
8532 else
8533 {
8534 if (mode == DImode)
8535 *total = sparc_costs->int_divX;
8536 else
8537 *total = sparc_costs->int_div;
8538 }
8539 return false;
8540
8541 case NEG:
8542 if (! float_mode_p)
8543 {
8544 *total = COSTS_N_INSNS (1);
8545 return false;
8546 }
8547 /* FALLTHRU */
8548
8549 case ABS:
8550 case FLOAT:
8551 case UNSIGNED_FLOAT:
8552 case FIX:
8553 case UNSIGNED_FIX:
8554 case FLOAT_EXTEND:
8555 case FLOAT_TRUNCATE:
8556 *total = sparc_costs->float_move;
8557 return false;
8558
8559 case SQRT:
8560 if (mode == DFmode)
8561 *total = sparc_costs->float_sqrt_df;
8562 else
8563 *total = sparc_costs->float_sqrt_sf;
8564 return false;
8565
8566 case COMPARE:
8567 if (float_mode_p)
8568 *total = sparc_costs->float_cmp;
8569 else
8570 *total = COSTS_N_INSNS (1);
8571 return false;
8572
8573 case IF_THEN_ELSE:
8574 if (float_mode_p)
8575 *total = sparc_costs->float_cmove;
8576 else
8577 *total = sparc_costs->int_cmove;
8578 return false;
8579
8580 case IOR:
8581 /* Handle the NAND vector patterns. */
8582 if (sparc_vector_mode_supported_p (GET_MODE (x))
8583 && GET_CODE (XEXP (x, 0)) == NOT
8584 && GET_CODE (XEXP (x, 1)) == NOT)
8585 {
8586 *total = COSTS_N_INSNS (1);
8587 return true;
8588 }
8589 else
8590 return false;
8591
8592 default:
8593 return false;
8594 }
8595 }
8596
8597 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8598 This is achieved by means of a manual dynamic stack space allocation in
8599 the current frame. We make the assumption that SEQ doesn't contain any
8600 function calls, with the possible exception of calls to the PIC helper. */
8601
8602 static void
8603 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8604 {
8605 /* We must preserve the lowest 16 words for the register save area. */
8606 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8607 /* We really need only 2 words of fresh stack space. */
8608 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8609
8610 rtx slot
8611 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8612 SPARC_STACK_BIAS + offset));
8613
8614 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8615 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8616 if (reg2)
8617 emit_insn (gen_rtx_SET (VOIDmode,
8618 adjust_address (slot, word_mode, UNITS_PER_WORD),
8619 reg2));
8620 emit_insn (seq);
8621 if (reg2)
8622 emit_insn (gen_rtx_SET (VOIDmode,
8623 reg2,
8624 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8625 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8626 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8627 }
8628
8629 /* Output the assembler code for a thunk function. THUNK_DECL is the
8630 declaration for the thunk function itself, FUNCTION is the decl for
8631 the target function. DELTA is an immediate constant offset to be
8632 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8633 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8634
8635 static void
8636 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8637 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8638 tree function)
8639 {
8640 rtx this, insn, funexp;
8641 unsigned int int_arg_first;
8642
8643 reload_completed = 1;
8644 epilogue_completed = 1;
8645
8646 emit_note (NOTE_INSN_PROLOGUE_END);
8647
8648 if (flag_delayed_branch)
8649 {
8650 /* We will emit a regular sibcall below, so we need to instruct
8651 output_sibcall that we are in a leaf function. */
8652 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8653
8654 /* This will cause final.c to invoke leaf_renumber_regs so we
8655 must behave as if we were in a not-yet-leafified function. */
8656 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8657 }
8658 else
8659 {
8660 /* We will emit the sibcall manually below, so we will need to
8661 manually spill non-leaf registers. */
8662 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8663
8664 /* We really are in a leaf function. */
8665 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8666 }
8667
8668 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8669 returns a structure, the structure return pointer is there instead. */
8670 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8671 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8672 else
8673 this = gen_rtx_REG (Pmode, int_arg_first);
8674
8675 /* Add DELTA. When possible use a plain add, otherwise load it into
8676 a register first. */
8677 if (delta)
8678 {
8679 rtx delta_rtx = GEN_INT (delta);
8680
8681 if (! SPARC_SIMM13_P (delta))
8682 {
8683 rtx scratch = gen_rtx_REG (Pmode, 1);
8684 emit_move_insn (scratch, delta_rtx);
8685 delta_rtx = scratch;
8686 }
8687
8688 /* THIS += DELTA. */
8689 emit_insn (gen_add2_insn (this, delta_rtx));
8690 }
8691
8692 /* Add the word at address (*THIS + VCALL_OFFSET). */
8693 if (vcall_offset)
8694 {
8695 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8696 rtx scratch = gen_rtx_REG (Pmode, 1);
8697
8698 gcc_assert (vcall_offset < 0);
8699
8700 /* SCRATCH = *THIS. */
8701 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8702
8703 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8704 may not have any available scratch register at this point. */
8705 if (SPARC_SIMM13_P (vcall_offset))
8706 ;
8707 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8708 else if (! fixed_regs[5]
8709 /* The below sequence is made up of at least 2 insns,
8710 while the default method may need only one. */
8711 && vcall_offset < -8192)
8712 {
8713 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8714 emit_move_insn (scratch2, vcall_offset_rtx);
8715 vcall_offset_rtx = scratch2;
8716 }
8717 else
8718 {
8719 rtx increment = GEN_INT (-4096);
8720
8721 /* VCALL_OFFSET is a negative number whose typical range can be
8722 estimated as -32768..0 in 32-bit mode. In almost all cases
8723 it is therefore cheaper to emit multiple add insns than
8724 spilling and loading the constant into a register (at least
8725 6 insns). */
8726 while (! SPARC_SIMM13_P (vcall_offset))
8727 {
8728 emit_insn (gen_add2_insn (scratch, increment));
8729 vcall_offset += 4096;
8730 }
8731 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8732 }
8733
8734 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8735 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8736 gen_rtx_PLUS (Pmode,
8737 scratch,
8738 vcall_offset_rtx)));
8739
8740 /* THIS += *(*THIS + VCALL_OFFSET). */
8741 emit_insn (gen_add2_insn (this, scratch));
8742 }
8743
8744 /* Generate a tail call to the target function. */
8745 if (! TREE_USED (function))
8746 {
8747 assemble_external (function);
8748 TREE_USED (function) = 1;
8749 }
8750 funexp = XEXP (DECL_RTL (function), 0);
8751
8752 if (flag_delayed_branch)
8753 {
8754 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8755 insn = emit_call_insn (gen_sibcall (funexp));
8756 SIBLING_CALL_P (insn) = 1;
8757 }
8758 else
8759 {
8760 /* The hoops we have to jump through in order to generate a sibcall
8761 without using delay slots... */
8762 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8763
8764 if (flag_pic)
8765 {
8766 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8767 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8768 start_sequence ();
8769 /* Delay emitting the PIC helper function because it needs to
8770 change the section and we are emitting assembly code. */
8771 load_pic_register (true); /* clobbers %o7 */
8772 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8773 seq = get_insns ();
8774 end_sequence ();
8775 emit_and_preserve (seq, spill_reg, spill_reg2);
8776 }
8777 else if (TARGET_ARCH32)
8778 {
8779 emit_insn (gen_rtx_SET (VOIDmode,
8780 scratch,
8781 gen_rtx_HIGH (SImode, funexp)));
8782 emit_insn (gen_rtx_SET (VOIDmode,
8783 scratch,
8784 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8785 }
8786 else /* TARGET_ARCH64 */
8787 {
8788 switch (sparc_cmodel)
8789 {
8790 case CM_MEDLOW:
8791 case CM_MEDMID:
8792 /* The destination can serve as a temporary. */
8793 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8794 break;
8795
8796 case CM_MEDANY:
8797 case CM_EMBMEDANY:
8798 /* The destination cannot serve as a temporary. */
8799 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8800 start_sequence ();
8801 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8802 seq = get_insns ();
8803 end_sequence ();
8804 emit_and_preserve (seq, spill_reg, 0);
8805 break;
8806
8807 default:
8808 gcc_unreachable ();
8809 }
8810 }
8811
8812 emit_jump_insn (gen_indirect_jump (scratch));
8813 }
8814
8815 emit_barrier ();
8816
8817 /* Run just enough of rest_of_compilation to get the insns emitted.
8818 There's not really enough bulk here to make other passes such as
8819 instruction scheduling worth while. Note that use_thunk calls
8820 assemble_start_function and assemble_end_function. */
8821 insn = get_insns ();
8822 insn_locators_alloc ();
8823 shorten_branches (insn);
8824 final_start_function (insn, file, 1);
8825 final (insn, file, 1);
8826 final_end_function ();
8827 free_after_compilation (cfun);
8828
8829 reload_completed = 0;
8830 epilogue_completed = 0;
8831 }
8832
8833 /* Return true if sparc_output_mi_thunk would be able to output the
8834 assembler code for the thunk function specified by the arguments
8835 it is passed, and false otherwise. */
8836 static bool
8837 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8838 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8839 HOST_WIDE_INT vcall_offset,
8840 const_tree function ATTRIBUTE_UNUSED)
8841 {
8842 /* Bound the loop used in the default method above. */
8843 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8844 }
8845
8846 /* How to allocate a 'struct machine_function'. */
8847
8848 static struct machine_function *
8849 sparc_init_machine_status (void)
8850 {
8851 return ggc_alloc_cleared (sizeof (struct machine_function));
8852 }
8853
8854 /* Locate some local-dynamic symbol still in use by this function
8855 so that we can print its name in local-dynamic base patterns. */
8856
8857 static const char *
8858 get_some_local_dynamic_name (void)
8859 {
8860 rtx insn;
8861
8862 if (cfun->machine->some_ld_name)
8863 return cfun->machine->some_ld_name;
8864
8865 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8866 if (INSN_P (insn)
8867 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8868 return cfun->machine->some_ld_name;
8869
8870 gcc_unreachable ();
8871 }
8872
8873 static int
8874 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8875 {
8876 rtx x = *px;
8877
8878 if (x
8879 && GET_CODE (x) == SYMBOL_REF
8880 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8881 {
8882 cfun->machine->some_ld_name = XSTR (x, 0);
8883 return 1;
8884 }
8885
8886 return 0;
8887 }
8888
8889 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8890 This is called from dwarf2out.c to emit call frame instructions
8891 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8892 static void
8893 sparc_dwarf_handle_frame_unspec (const char *label,
8894 rtx pattern ATTRIBUTE_UNUSED,
8895 int index ATTRIBUTE_UNUSED)
8896 {
8897 gcc_assert (index == UNSPECV_SAVEW);
8898 dwarf2out_window_save (label);
8899 }
8900
8901 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8902 We need to emit DTP-relative relocations. */
8903
8904 static void
8905 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8906 {
8907 switch (size)
8908 {
8909 case 4:
8910 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8911 break;
8912 case 8:
8913 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8914 break;
8915 default:
8916 gcc_unreachable ();
8917 }
8918 output_addr_const (file, x);
8919 fputs (")", file);
8920 }
8921
8922 /* Do whatever processing is required at the end of a file. */
8923
8924 static void
8925 sparc_file_end (void)
8926 {
8927 /* If we haven't emitted the special PIC helper function, do so now. */
8928 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8929 emit_pic_helper ();
8930
8931 if (NEED_INDICATE_EXEC_STACK)
8932 file_end_indicate_exec_stack ();
8933 }
8934
8935 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8936 /* Implement TARGET_MANGLE_TYPE. */
8937
8938 static const char *
8939 sparc_mangle_type (const_tree type)
8940 {
8941 if (!TARGET_64BIT
8942 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8943 && TARGET_LONG_DOUBLE_128)
8944 return "g";
8945
8946 /* For all other types, use normal C++ mangling. */
8947 return NULL;
8948 }
8949 #endif
8950
8951 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8952 compare and swap on the word containing the byte or half-word. */
8953
8954 void
8955 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8956 {
8957 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8958 rtx addr = gen_reg_rtx (Pmode);
8959 rtx off = gen_reg_rtx (SImode);
8960 rtx oldv = gen_reg_rtx (SImode);
8961 rtx newv = gen_reg_rtx (SImode);
8962 rtx oldvalue = gen_reg_rtx (SImode);
8963 rtx newvalue = gen_reg_rtx (SImode);
8964 rtx res = gen_reg_rtx (SImode);
8965 rtx resv = gen_reg_rtx (SImode);
8966 rtx memsi, val, mask, end_label, loop_label, cc;
8967
8968 emit_insn (gen_rtx_SET (VOIDmode, addr,
8969 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8970
8971 if (Pmode != SImode)
8972 addr1 = gen_lowpart (SImode, addr1);
8973 emit_insn (gen_rtx_SET (VOIDmode, off,
8974 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8975
8976 memsi = gen_rtx_MEM (SImode, addr);
8977 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8978 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8979
8980 val = force_reg (SImode, memsi);
8981
8982 emit_insn (gen_rtx_SET (VOIDmode, off,
8983 gen_rtx_XOR (SImode, off,
8984 GEN_INT (GET_MODE (mem) == QImode
8985 ? 3 : 2))));
8986
8987 emit_insn (gen_rtx_SET (VOIDmode, off,
8988 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8989
8990 if (GET_MODE (mem) == QImode)
8991 mask = force_reg (SImode, GEN_INT (0xff));
8992 else
8993 mask = force_reg (SImode, GEN_INT (0xffff));
8994
8995 emit_insn (gen_rtx_SET (VOIDmode, mask,
8996 gen_rtx_ASHIFT (SImode, mask, off)));
8997
8998 emit_insn (gen_rtx_SET (VOIDmode, val,
8999 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9000 val)));
9001
9002 oldval = gen_lowpart (SImode, oldval);
9003 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9004 gen_rtx_ASHIFT (SImode, oldval, off)));
9005
9006 newval = gen_lowpart_common (SImode, newval);
9007 emit_insn (gen_rtx_SET (VOIDmode, newv,
9008 gen_rtx_ASHIFT (SImode, newval, off)));
9009
9010 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9011 gen_rtx_AND (SImode, oldv, mask)));
9012
9013 emit_insn (gen_rtx_SET (VOIDmode, newv,
9014 gen_rtx_AND (SImode, newv, mask)));
9015
9016 end_label = gen_label_rtx ();
9017 loop_label = gen_label_rtx ();
9018 emit_label (loop_label);
9019
9020 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9021 gen_rtx_IOR (SImode, oldv, val)));
9022
9023 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9024 gen_rtx_IOR (SImode, newv, val)));
9025
9026 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9027
9028 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9029
9030 emit_insn (gen_rtx_SET (VOIDmode, resv,
9031 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9032 res)));
9033
9034 sparc_compare_op0 = resv;
9035 sparc_compare_op1 = val;
9036 cc = gen_compare_reg (NE);
9037
9038 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9039
9040 sparc_compare_emitted = cc;
9041 emit_jump_insn (gen_bne (loop_label));
9042
9043 emit_label (end_label);
9044
9045 emit_insn (gen_rtx_SET (VOIDmode, res,
9046 gen_rtx_AND (SImode, res, mask)));
9047
9048 emit_insn (gen_rtx_SET (VOIDmode, res,
9049 gen_rtx_LSHIFTRT (SImode, res, off)));
9050
9051 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9052 }
9053
9054 #include "gt-sparc.h"