f44c6d0ad19333c39f94eb85c7b893dc56a1ea70
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "insn-codes.h"
37 #include "conditions.h"
38 #include "output.h"
39 #include "insn-attr.h"
40 #include "flags.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
52 #include "tree-gimple.h"
53 #include "langhooks.h"
54
55 /* Processor costs */
56 static const
57 struct processor_costs cypress_costs = {
58 COSTS_N_INSNS (2), /* int load */
59 COSTS_N_INSNS (2), /* int signed load */
60 COSTS_N_INSNS (2), /* int zeroed load */
61 COSTS_N_INSNS (2), /* float load */
62 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
63 COSTS_N_INSNS (5), /* fadd, fsub */
64 COSTS_N_INSNS (1), /* fcmp */
65 COSTS_N_INSNS (1), /* fmov, fmovr */
66 COSTS_N_INSNS (7), /* fmul */
67 COSTS_N_INSNS (37), /* fdivs */
68 COSTS_N_INSNS (37), /* fdivd */
69 COSTS_N_INSNS (63), /* fsqrts */
70 COSTS_N_INSNS (63), /* fsqrtd */
71 COSTS_N_INSNS (1), /* imul */
72 COSTS_N_INSNS (1), /* imulX */
73 0, /* imul bit factor */
74 COSTS_N_INSNS (1), /* idiv */
75 COSTS_N_INSNS (1), /* idivX */
76 COSTS_N_INSNS (1), /* movcc/movr */
77 0, /* shift penalty */
78 };
79
80 static const
81 struct processor_costs supersparc_costs = {
82 COSTS_N_INSNS (1), /* int load */
83 COSTS_N_INSNS (1), /* int signed load */
84 COSTS_N_INSNS (1), /* int zeroed load */
85 COSTS_N_INSNS (0), /* float load */
86 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
87 COSTS_N_INSNS (3), /* fadd, fsub */
88 COSTS_N_INSNS (3), /* fcmp */
89 COSTS_N_INSNS (1), /* fmov, fmovr */
90 COSTS_N_INSNS (3), /* fmul */
91 COSTS_N_INSNS (6), /* fdivs */
92 COSTS_N_INSNS (9), /* fdivd */
93 COSTS_N_INSNS (12), /* fsqrts */
94 COSTS_N_INSNS (12), /* fsqrtd */
95 COSTS_N_INSNS (4), /* imul */
96 COSTS_N_INSNS (4), /* imulX */
97 0, /* imul bit factor */
98 COSTS_N_INSNS (4), /* idiv */
99 COSTS_N_INSNS (4), /* idivX */
100 COSTS_N_INSNS (1), /* movcc/movr */
101 1, /* shift penalty */
102 };
103
104 static const
105 struct processor_costs hypersparc_costs = {
106 COSTS_N_INSNS (1), /* int load */
107 COSTS_N_INSNS (1), /* int signed load */
108 COSTS_N_INSNS (1), /* int zeroed load */
109 COSTS_N_INSNS (1), /* float load */
110 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
111 COSTS_N_INSNS (1), /* fadd, fsub */
112 COSTS_N_INSNS (1), /* fcmp */
113 COSTS_N_INSNS (1), /* fmov, fmovr */
114 COSTS_N_INSNS (1), /* fmul */
115 COSTS_N_INSNS (8), /* fdivs */
116 COSTS_N_INSNS (12), /* fdivd */
117 COSTS_N_INSNS (17), /* fsqrts */
118 COSTS_N_INSNS (17), /* fsqrtd */
119 COSTS_N_INSNS (17), /* imul */
120 COSTS_N_INSNS (17), /* imulX */
121 0, /* imul bit factor */
122 COSTS_N_INSNS (17), /* idiv */
123 COSTS_N_INSNS (17), /* idivX */
124 COSTS_N_INSNS (1), /* movcc/movr */
125 0, /* shift penalty */
126 };
127
128 static const
129 struct processor_costs sparclet_costs = {
130 COSTS_N_INSNS (3), /* int load */
131 COSTS_N_INSNS (3), /* int signed load */
132 COSTS_N_INSNS (1), /* int zeroed load */
133 COSTS_N_INSNS (1), /* float load */
134 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
135 COSTS_N_INSNS (1), /* fadd, fsub */
136 COSTS_N_INSNS (1), /* fcmp */
137 COSTS_N_INSNS (1), /* fmov, fmovr */
138 COSTS_N_INSNS (1), /* fmul */
139 COSTS_N_INSNS (1), /* fdivs */
140 COSTS_N_INSNS (1), /* fdivd */
141 COSTS_N_INSNS (1), /* fsqrts */
142 COSTS_N_INSNS (1), /* fsqrtd */
143 COSTS_N_INSNS (5), /* imul */
144 COSTS_N_INSNS (5), /* imulX */
145 0, /* imul bit factor */
146 COSTS_N_INSNS (5), /* idiv */
147 COSTS_N_INSNS (5), /* idivX */
148 COSTS_N_INSNS (1), /* movcc/movr */
149 0, /* shift penalty */
150 };
151
152 static const
153 struct processor_costs ultrasparc_costs = {
154 COSTS_N_INSNS (2), /* int load */
155 COSTS_N_INSNS (3), /* int signed load */
156 COSTS_N_INSNS (2), /* int zeroed load */
157 COSTS_N_INSNS (2), /* float load */
158 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
159 COSTS_N_INSNS (4), /* fadd, fsub */
160 COSTS_N_INSNS (1), /* fcmp */
161 COSTS_N_INSNS (2), /* fmov, fmovr */
162 COSTS_N_INSNS (4), /* fmul */
163 COSTS_N_INSNS (13), /* fdivs */
164 COSTS_N_INSNS (23), /* fdivd */
165 COSTS_N_INSNS (13), /* fsqrts */
166 COSTS_N_INSNS (23), /* fsqrtd */
167 COSTS_N_INSNS (4), /* imul */
168 COSTS_N_INSNS (4), /* imulX */
169 2, /* imul bit factor */
170 COSTS_N_INSNS (37), /* idiv */
171 COSTS_N_INSNS (68), /* idivX */
172 COSTS_N_INSNS (2), /* movcc/movr */
173 2, /* shift penalty */
174 };
175
176 static const
177 struct processor_costs ultrasparc3_costs = {
178 COSTS_N_INSNS (2), /* int load */
179 COSTS_N_INSNS (3), /* int signed load */
180 COSTS_N_INSNS (3), /* int zeroed load */
181 COSTS_N_INSNS (2), /* float load */
182 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
183 COSTS_N_INSNS (4), /* fadd, fsub */
184 COSTS_N_INSNS (5), /* fcmp */
185 COSTS_N_INSNS (3), /* fmov, fmovr */
186 COSTS_N_INSNS (4), /* fmul */
187 COSTS_N_INSNS (17), /* fdivs */
188 COSTS_N_INSNS (20), /* fdivd */
189 COSTS_N_INSNS (20), /* fsqrts */
190 COSTS_N_INSNS (29), /* fsqrtd */
191 COSTS_N_INSNS (6), /* imul */
192 COSTS_N_INSNS (6), /* imulX */
193 0, /* imul bit factor */
194 COSTS_N_INSNS (40), /* idiv */
195 COSTS_N_INSNS (71), /* idivX */
196 COSTS_N_INSNS (2), /* movcc/movr */
197 0, /* shift penalty */
198 };
199
200 const struct processor_costs *sparc_costs = &cypress_costs;
201
202 #ifdef HAVE_AS_RELAX_OPTION
203 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
204 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
205 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
206 somebody does not branch between the sethi and jmp. */
207 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
208 #else
209 #define LEAF_SIBCALL_SLOT_RESERVED_P \
210 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
211 #endif
212
213 /* Global variables for machine-dependent things. */
214
215 /* Size of frame. Need to know this to emit return insns from leaf procedures.
216 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
217 reload pass. This is important as the value is later used for scheduling
218 (to see what can go in a delay slot).
219 APPARENT_FSIZE is the size of the stack less the register save area and less
220 the outgoing argument area. It is used when saving call preserved regs. */
221 static HOST_WIDE_INT apparent_fsize;
222 static HOST_WIDE_INT actual_fsize;
223
224 /* Number of live general or floating point registers needed to be
225 saved (as 4-byte quantities). */
226 static int num_gfregs;
227
228 /* The alias set for prologue/epilogue register save/restore. */
229 static GTY(()) int sparc_sr_alias_set;
230
231 /* The alias set for the structure return value. */
232 static GTY(()) int struct_value_alias_set;
233
234 /* Save the operands last given to a compare for use when we
235 generate a scc or bcc insn. */
236 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
237
238 /* Vector to say how input registers are mapped to output registers.
239 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
240 eliminate it. You must use -fomit-frame-pointer to get that. */
241 char leaf_reg_remap[] =
242 { 0, 1, 2, 3, 4, 5, 6, 7,
243 -1, -1, -1, -1, -1, -1, 14, -1,
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 8, 9, 10, 11, 12, 13, -1, 15,
246
247 32, 33, 34, 35, 36, 37, 38, 39,
248 40, 41, 42, 43, 44, 45, 46, 47,
249 48, 49, 50, 51, 52, 53, 54, 55,
250 56, 57, 58, 59, 60, 61, 62, 63,
251 64, 65, 66, 67, 68, 69, 70, 71,
252 72, 73, 74, 75, 76, 77, 78, 79,
253 80, 81, 82, 83, 84, 85, 86, 87,
254 88, 89, 90, 91, 92, 93, 94, 95,
255 96, 97, 98, 99, 100};
256
257 /* Vector, indexed by hard register number, which contains 1
258 for a register that is allowable in a candidate for leaf
259 function treatment. */
260 char sparc_leaf_regs[] =
261 { 1, 1, 1, 1, 1, 1, 1, 1,
262 0, 0, 0, 0, 0, 0, 1, 0,
263 0, 0, 0, 0, 0, 0, 0, 0,
264 1, 1, 1, 1, 1, 1, 0, 1,
265 1, 1, 1, 1, 1, 1, 1, 1,
266 1, 1, 1, 1, 1, 1, 1, 1,
267 1, 1, 1, 1, 1, 1, 1, 1,
268 1, 1, 1, 1, 1, 1, 1, 1,
269 1, 1, 1, 1, 1, 1, 1, 1,
270 1, 1, 1, 1, 1, 1, 1, 1,
271 1, 1, 1, 1, 1, 1, 1, 1,
272 1, 1, 1, 1, 1, 1, 1, 1,
273 1, 1, 1, 1, 1};
274
275 struct machine_function GTY(())
276 {
277 /* Some local-dynamic TLS symbol name. */
278 const char *some_ld_name;
279
280 /* True if the current function is leaf and uses only leaf regs,
281 so that the SPARC leaf function optimization can be applied.
282 Private version of current_function_uses_only_leaf_regs, see
283 sparc_expand_prologue for the rationale. */
284 int leaf_function_p;
285
286 /* True if the data calculated by sparc_expand_prologue are valid. */
287 bool prologue_data_valid_p;
288 };
289
290 #define sparc_leaf_function_p cfun->machine->leaf_function_p
291 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
292
293 /* Register we pretend to think the frame pointer is allocated to.
294 Normally, this is %fp, but if we are in a leaf procedure, this
295 is %sp+"something". We record "something" separately as it may
296 be too big for reg+constant addressing. */
297 static rtx frame_base_reg;
298 static HOST_WIDE_INT frame_base_offset;
299
300 /* 1 if the next opcode is to be specially indented. */
301 int sparc_indent_opcode = 0;
302
303 static bool sparc_handle_option (size_t, const char *, int);
304 static void sparc_init_modes (void);
305 static void scan_record_type (tree, int *, int *, int *);
306 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
307 tree, int, int, int *, int *);
308
309 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
310 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
311
312 static void sparc_output_addr_vec (rtx);
313 static void sparc_output_addr_diff_vec (rtx);
314 static void sparc_output_deferred_case_vectors (void);
315 static rtx sparc_builtin_saveregs (void);
316 static int epilogue_renumber (rtx *, int);
317 static bool sparc_assemble_integer (rtx, unsigned int, int);
318 static int set_extends (rtx);
319 static void emit_pic_helper (void);
320 static void load_pic_register (bool);
321 static int save_or_restore_regs (int, int, rtx, int, int);
322 static void emit_save_or_restore_regs (int);
323 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
324 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
325 #ifdef OBJECT_FORMAT_ELF
326 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
327 #endif
328
329 static int sparc_adjust_cost (rtx, rtx, rtx, int);
330 static int sparc_issue_rate (void);
331 static void sparc_sched_init (FILE *, int, int);
332 static int sparc_use_sched_lookahead (void);
333
334 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
335 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
336 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
337 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
338 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
339
340 static bool sparc_function_ok_for_sibcall (tree, tree);
341 static void sparc_init_libfuncs (void);
342 static void sparc_init_builtins (void);
343 static void sparc_vis_init_builtins (void);
344 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
345 static tree sparc_fold_builtin (tree, tree, bool);
346 static int sparc_vis_mul8x16 (int, int);
347 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
348 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
349 HOST_WIDE_INT, tree);
350 static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
351 HOST_WIDE_INT, tree);
352 static struct machine_function * sparc_init_machine_status (void);
353 static bool sparc_cannot_force_const_mem (rtx);
354 static rtx sparc_tls_get_addr (void);
355 static rtx sparc_tls_got (void);
356 static const char *get_some_local_dynamic_name (void);
357 static int get_some_local_dynamic_name_1 (rtx *, void *);
358 static bool sparc_rtx_costs (rtx, int, int, int *);
359 static bool sparc_promote_prototypes (tree);
360 static rtx sparc_struct_value_rtx (tree, int);
361 static bool sparc_return_in_memory (tree, tree);
362 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
363 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
364 static bool sparc_vector_mode_supported_p (enum machine_mode);
365 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
366 enum machine_mode, tree, bool);
367 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
368 enum machine_mode, tree, bool);
369 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
370 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
371 static void sparc_file_end (void);
372 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
373 static const char *sparc_mangle_fundamental_type (tree);
374 #endif
375 #ifdef SUBTARGET_ATTRIBUTE_TABLE
376 const struct attribute_spec sparc_attribute_table[];
377 #endif
378 \f
379 /* Option handling. */
380
381 /* Parsed value. */
382 enum cmodel sparc_cmodel;
383
384 char sparc_hard_reg_printed[8];
385
386 struct sparc_cpu_select sparc_select[] =
387 {
388 /* switch name, tune arch */
389 { (char *)0, "default", 1, 1 },
390 { (char *)0, "-mcpu=", 1, 1 },
391 { (char *)0, "-mtune=", 1, 0 },
392 { 0, 0, 0, 0 }
393 };
394
395 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
396 enum processor_type sparc_cpu;
397
398 /* Whether\fan FPU option was specified. */
399 static bool fpu_option_set = false;
400
401 /* Initialize the GCC target structure. */
402
403 /* The sparc default is to use .half rather than .short for aligned
404 HI objects. Use .word instead of .long on non-ELF systems. */
405 #undef TARGET_ASM_ALIGNED_HI_OP
406 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
407 #ifndef OBJECT_FORMAT_ELF
408 #undef TARGET_ASM_ALIGNED_SI_OP
409 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
410 #endif
411
412 #undef TARGET_ASM_UNALIGNED_HI_OP
413 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
414 #undef TARGET_ASM_UNALIGNED_SI_OP
415 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
416 #undef TARGET_ASM_UNALIGNED_DI_OP
417 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
418
419 /* The target hook has to handle DI-mode values. */
420 #undef TARGET_ASM_INTEGER
421 #define TARGET_ASM_INTEGER sparc_assemble_integer
422
423 #undef TARGET_ASM_FUNCTION_PROLOGUE
424 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
425 #undef TARGET_ASM_FUNCTION_EPILOGUE
426 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
427
428 #undef TARGET_SCHED_ADJUST_COST
429 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
430 #undef TARGET_SCHED_ISSUE_RATE
431 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
432 #undef TARGET_SCHED_INIT
433 #define TARGET_SCHED_INIT sparc_sched_init
434 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
435 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
436
437 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
438 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
439
440 #undef TARGET_INIT_LIBFUNCS
441 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
442 #undef TARGET_INIT_BUILTINS
443 #define TARGET_INIT_BUILTINS sparc_init_builtins
444
445 #undef TARGET_EXPAND_BUILTIN
446 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
447 #undef TARGET_FOLD_BUILTIN
448 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
449
450 #if TARGET_TLS
451 #undef TARGET_HAVE_TLS
452 #define TARGET_HAVE_TLS true
453 #endif
454
455 #undef TARGET_CANNOT_FORCE_CONST_MEM
456 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
457
458 #undef TARGET_ASM_OUTPUT_MI_THUNK
459 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
460 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
461 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
462
463 #undef TARGET_RTX_COSTS
464 #define TARGET_RTX_COSTS sparc_rtx_costs
465 #undef TARGET_ADDRESS_COST
466 #define TARGET_ADDRESS_COST hook_int_rtx_0
467
468 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
469 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
470 test for this value. */
471 #undef TARGET_PROMOTE_FUNCTION_ARGS
472 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
473
474 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
475 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
476 test for this value. */
477 #undef TARGET_PROMOTE_FUNCTION_RETURN
478 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
479
480 #undef TARGET_PROMOTE_PROTOTYPES
481 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
482
483 #undef TARGET_STRUCT_VALUE_RTX
484 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
485 #undef TARGET_RETURN_IN_MEMORY
486 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
487 #undef TARGET_MUST_PASS_IN_STACK
488 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
489 #undef TARGET_PASS_BY_REFERENCE
490 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
491 #undef TARGET_ARG_PARTIAL_BYTES
492 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
493
494 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
495 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
496 #undef TARGET_STRICT_ARGUMENT_NAMING
497 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
498
499 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
500 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
501
502 #undef TARGET_VECTOR_MODE_SUPPORTED_P
503 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
504
505 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
506 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
507
508 #ifdef SUBTARGET_INSERT_ATTRIBUTES
509 #undef TARGET_INSERT_ATTRIBUTES
510 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
511 #endif
512
513 #ifdef SUBTARGET_ATTRIBUTE_TABLE
514 #undef TARGET_ATTRIBUTE_TABLE
515 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
516 #endif
517
518 #undef TARGET_RELAXED_ORDERING
519 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
520
521 #undef TARGET_DEFAULT_TARGET_FLAGS
522 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
523 #undef TARGET_HANDLE_OPTION
524 #define TARGET_HANDLE_OPTION sparc_handle_option
525
526 #if TARGET_GNU_TLS
527 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
528 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
529 #endif
530
531 #undef TARGET_ASM_FILE_END
532 #define TARGET_ASM_FILE_END sparc_file_end
533
534 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
535 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
536 #define TARGET_MANGLE_FUNDAMENTAL_TYPE sparc_mangle_fundamental_type
537 #endif
538
539 struct gcc_target targetm = TARGET_INITIALIZER;
540
541 /* Implement TARGET_HANDLE_OPTION. */
542
543 static bool
544 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
545 {
546 switch (code)
547 {
548 case OPT_mfpu:
549 case OPT_mhard_float:
550 case OPT_msoft_float:
551 fpu_option_set = true;
552 break;
553
554 case OPT_mcpu_:
555 sparc_select[1].string = arg;
556 break;
557
558 case OPT_mtune_:
559 sparc_select[2].string = arg;
560 break;
561 }
562
563 return true;
564 }
565
566 /* Validate and override various options, and do some machine dependent
567 initialization. */
568
569 void
570 sparc_override_options (void)
571 {
572 static struct code_model {
573 const char *const name;
574 const int value;
575 } const cmodels[] = {
576 { "32", CM_32 },
577 { "medlow", CM_MEDLOW },
578 { "medmid", CM_MEDMID },
579 { "medany", CM_MEDANY },
580 { "embmedany", CM_EMBMEDANY },
581 { 0, 0 }
582 };
583 const struct code_model *cmodel;
584 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
585 static struct cpu_default {
586 const int cpu;
587 const char *const name;
588 } const cpu_default[] = {
589 /* There must be one entry here for each TARGET_CPU value. */
590 { TARGET_CPU_sparc, "cypress" },
591 { TARGET_CPU_sparclet, "tsc701" },
592 { TARGET_CPU_sparclite, "f930" },
593 { TARGET_CPU_v8, "v8" },
594 { TARGET_CPU_hypersparc, "hypersparc" },
595 { TARGET_CPU_sparclite86x, "sparclite86x" },
596 { TARGET_CPU_supersparc, "supersparc" },
597 { TARGET_CPU_v9, "v9" },
598 { TARGET_CPU_ultrasparc, "ultrasparc" },
599 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
600 { 0, 0 }
601 };
602 const struct cpu_default *def;
603 /* Table of values for -m{cpu,tune}=. */
604 static struct cpu_table {
605 const char *const name;
606 const enum processor_type processor;
607 const int disable;
608 const int enable;
609 } const cpu_table[] = {
610 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
611 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
612 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
613 /* TI TMS390Z55 supersparc */
614 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
615 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
616 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
617 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
618 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
619 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
620 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
621 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
622 MASK_SPARCLITE },
623 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
624 /* TEMIC sparclet */
625 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
626 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
627 /* TI ultrasparc I, II, IIi */
628 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
629 /* Although insns using %y are deprecated, it is a clear win on current
630 ultrasparcs. */
631 |MASK_DEPRECATED_V8_INSNS},
632 /* TI ultrasparc III */
633 /* ??? Check if %y issue still holds true in ultra3. */
634 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
635 { 0, 0, 0, 0 }
636 };
637 const struct cpu_table *cpu;
638 const struct sparc_cpu_select *sel;
639 int fpu;
640
641 #ifndef SPARC_BI_ARCH
642 /* Check for unsupported architecture size. */
643 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
644 error ("%s is not supported by this configuration",
645 DEFAULT_ARCH32_P ? "-m64" : "-m32");
646 #endif
647
648 /* We force all 64bit archs to use 128 bit long double */
649 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
650 {
651 error ("-mlong-double-64 not allowed with -m64");
652 target_flags |= MASK_LONG_DOUBLE_128;
653 }
654
655 /* Code model selection. */
656 sparc_cmodel = SPARC_DEFAULT_CMODEL;
657
658 #ifdef SPARC_BI_ARCH
659 if (TARGET_ARCH32)
660 sparc_cmodel = CM_32;
661 #endif
662
663 if (sparc_cmodel_string != NULL)
664 {
665 if (TARGET_ARCH64)
666 {
667 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
668 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
669 break;
670 if (cmodel->name == NULL)
671 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
672 else
673 sparc_cmodel = cmodel->value;
674 }
675 else
676 error ("-mcmodel= is not supported on 32 bit systems");
677 }
678
679 fpu = TARGET_FPU; /* save current -mfpu status */
680
681 /* Set the default CPU. */
682 for (def = &cpu_default[0]; def->name; ++def)
683 if (def->cpu == TARGET_CPU_DEFAULT)
684 break;
685 gcc_assert (def->name);
686 sparc_select[0].string = def->name;
687
688 for (sel = &sparc_select[0]; sel->name; ++sel)
689 {
690 if (sel->string)
691 {
692 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
693 if (! strcmp (sel->string, cpu->name))
694 {
695 if (sel->set_tune_p)
696 sparc_cpu = cpu->processor;
697
698 if (sel->set_arch_p)
699 {
700 target_flags &= ~cpu->disable;
701 target_flags |= cpu->enable;
702 }
703 break;
704 }
705
706 if (! cpu->name)
707 error ("bad value (%s) for %s switch", sel->string, sel->name);
708 }
709 }
710
711 /* If -mfpu or -mno-fpu was explicitly used, don't override with
712 the processor default. */
713 if (fpu_option_set)
714 target_flags = (target_flags & ~MASK_FPU) | fpu;
715
716 /* Don't allow -mvis if FPU is disabled. */
717 if (! TARGET_FPU)
718 target_flags &= ~MASK_VIS;
719
720 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
721 are available.
722 -m64 also implies v9. */
723 if (TARGET_VIS || TARGET_ARCH64)
724 {
725 target_flags |= MASK_V9;
726 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
727 }
728
729 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
730 if (TARGET_V9 && TARGET_ARCH32)
731 target_flags |= MASK_DEPRECATED_V8_INSNS;
732
733 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
734 if (! TARGET_V9 || TARGET_ARCH64)
735 target_flags &= ~MASK_V8PLUS;
736
737 /* Don't use stack biasing in 32 bit mode. */
738 if (TARGET_ARCH32)
739 target_flags &= ~MASK_STACK_BIAS;
740
741 /* Supply a default value for align_functions. */
742 if (align_functions == 0
743 && (sparc_cpu == PROCESSOR_ULTRASPARC
744 || sparc_cpu == PROCESSOR_ULTRASPARC3))
745 align_functions = 32;
746
747 /* Validate PCC_STRUCT_RETURN. */
748 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
749 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
750
751 /* Only use .uaxword when compiling for a 64-bit target. */
752 if (!TARGET_ARCH64)
753 targetm.asm_out.unaligned_op.di = NULL;
754
755 /* Do various machine dependent initializations. */
756 sparc_init_modes ();
757
758 /* Acquire unique alias sets for our private stuff. */
759 sparc_sr_alias_set = new_alias_set ();
760 struct_value_alias_set = new_alias_set ();
761
762 /* Set up function hooks. */
763 init_machine_status = sparc_init_machine_status;
764
765 switch (sparc_cpu)
766 {
767 case PROCESSOR_V7:
768 case PROCESSOR_CYPRESS:
769 sparc_costs = &cypress_costs;
770 break;
771 case PROCESSOR_V8:
772 case PROCESSOR_SPARCLITE:
773 case PROCESSOR_SUPERSPARC:
774 sparc_costs = &supersparc_costs;
775 break;
776 case PROCESSOR_F930:
777 case PROCESSOR_F934:
778 case PROCESSOR_HYPERSPARC:
779 case PROCESSOR_SPARCLITE86X:
780 sparc_costs = &hypersparc_costs;
781 break;
782 case PROCESSOR_SPARCLET:
783 case PROCESSOR_TSC701:
784 sparc_costs = &sparclet_costs;
785 break;
786 case PROCESSOR_V9:
787 case PROCESSOR_ULTRASPARC:
788 sparc_costs = &ultrasparc_costs;
789 break;
790 case PROCESSOR_ULTRASPARC3:
791 sparc_costs = &ultrasparc3_costs;
792 break;
793 };
794
795 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
796 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
797 target_flags |= MASK_LONG_DOUBLE_128;
798 #endif
799 }
800 \f
801 #ifdef SUBTARGET_ATTRIBUTE_TABLE
802 /* Table of valid machine attributes. */
803 const struct attribute_spec sparc_attribute_table[] =
804 {
805 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
806 SUBTARGET_ATTRIBUTE_TABLE,
807 { NULL, 0, 0, false, false, false, NULL }
808 };
809 #endif
810 \f
811 /* Miscellaneous utilities. */
812
813 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
814 or branch on register contents instructions. */
815
816 int
817 v9_regcmp_p (enum rtx_code code)
818 {
819 return (code == EQ || code == NE || code == GE || code == LT
820 || code == LE || code == GT);
821 }
822
823 /* Nonzero if OP is a floating point constant which can
824 be loaded into an integer register using a single
825 sethi instruction. */
826
827 int
828 fp_sethi_p (rtx op)
829 {
830 if (GET_CODE (op) == CONST_DOUBLE)
831 {
832 REAL_VALUE_TYPE r;
833 long i;
834
835 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
836 REAL_VALUE_TO_TARGET_SINGLE (r, i);
837 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
838 }
839
840 return 0;
841 }
842
843 /* Nonzero if OP is a floating point constant which can
844 be loaded into an integer register using a single
845 mov instruction. */
846
847 int
848 fp_mov_p (rtx op)
849 {
850 if (GET_CODE (op) == CONST_DOUBLE)
851 {
852 REAL_VALUE_TYPE r;
853 long i;
854
855 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
856 REAL_VALUE_TO_TARGET_SINGLE (r, i);
857 return SPARC_SIMM13_P (i);
858 }
859
860 return 0;
861 }
862
863 /* Nonzero if OP is a floating point constant which can
864 be loaded into an integer register using a high/losum
865 instruction sequence. */
866
867 int
868 fp_high_losum_p (rtx op)
869 {
870 /* The constraints calling this should only be in
871 SFmode move insns, so any constant which cannot
872 be moved using a single insn will do. */
873 if (GET_CODE (op) == CONST_DOUBLE)
874 {
875 REAL_VALUE_TYPE r;
876 long i;
877
878 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
879 REAL_VALUE_TO_TARGET_SINGLE (r, i);
880 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
881 }
882
883 return 0;
884 }
885
886 /* Expand a move instruction. Return true if all work is done. */
887
888 bool
889 sparc_expand_move (enum machine_mode mode, rtx *operands)
890 {
891 /* Handle sets of MEM first. */
892 if (GET_CODE (operands[0]) == MEM)
893 {
894 /* 0 is a register (or a pair of registers) on SPARC. */
895 if (register_or_zero_operand (operands[1], mode))
896 return false;
897
898 if (!reload_in_progress)
899 {
900 operands[0] = validize_mem (operands[0]);
901 operands[1] = force_reg (mode, operands[1]);
902 }
903 }
904
905 /* Fixup TLS cases. */
906 if (TARGET_HAVE_TLS
907 && CONSTANT_P (operands[1])
908 && GET_CODE (operands[1]) != HIGH
909 && sparc_tls_referenced_p (operands [1]))
910 {
911 rtx sym = operands[1];
912 rtx addend = NULL;
913
914 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
915 {
916 addend = XEXP (XEXP (sym, 0), 1);
917 sym = XEXP (XEXP (sym, 0), 0);
918 }
919
920 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
921
922 sym = legitimize_tls_address (sym);
923 if (addend)
924 {
925 sym = gen_rtx_PLUS (mode, sym, addend);
926 sym = force_operand (sym, operands[0]);
927 }
928 operands[1] = sym;
929 }
930
931 /* Fixup PIC cases. */
932 if (flag_pic && CONSTANT_P (operands[1]))
933 {
934 if (pic_address_needs_scratch (operands[1]))
935 operands[1] = legitimize_pic_address (operands[1], mode, 0);
936
937 if (GET_CODE (operands[1]) == LABEL_REF && mode == SImode)
938 {
939 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
940 return true;
941 }
942
943 if (GET_CODE (operands[1]) == LABEL_REF && mode == DImode)
944 {
945 gcc_assert (TARGET_ARCH64);
946 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
947 return true;
948 }
949
950 if (symbolic_operand (operands[1], mode))
951 {
952 operands[1] = legitimize_pic_address (operands[1],
953 mode,
954 (reload_in_progress ?
955 operands[0] :
956 NULL_RTX));
957 return false;
958 }
959 }
960
961 /* If we are trying to toss an integer constant into FP registers,
962 or loading a FP or vector constant, force it into memory. */
963 if (CONSTANT_P (operands[1])
964 && REG_P (operands[0])
965 && (SPARC_FP_REG_P (REGNO (operands[0]))
966 || SCALAR_FLOAT_MODE_P (mode)
967 || VECTOR_MODE_P (mode)))
968 {
969 /* emit_group_store will send such bogosity to us when it is
970 not storing directly into memory. So fix this up to avoid
971 crashes in output_constant_pool. */
972 if (operands [1] == const0_rtx)
973 operands[1] = CONST0_RTX (mode);
974
975 /* We can clear FP registers if TARGET_VIS, and always other regs. */
976 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
977 && const_zero_operand (operands[1], mode))
978 return false;
979
980 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
981 /* We are able to build any SF constant in integer registers
982 with at most 2 instructions. */
983 && (mode == SFmode
984 /* And any DF constant in integer registers. */
985 || (mode == DFmode
986 && (reload_completed || reload_in_progress))))
987 return false;
988
989 operands[1] = force_const_mem (mode, operands[1]);
990 if (!reload_in_progress)
991 operands[1] = validize_mem (operands[1]);
992 return false;
993 }
994
995 /* Accept non-constants and valid constants unmodified. */
996 if (!CONSTANT_P (operands[1])
997 || GET_CODE (operands[1]) == HIGH
998 || input_operand (operands[1], mode))
999 return false;
1000
1001 switch (mode)
1002 {
1003 case QImode:
1004 /* All QImode constants require only one insn, so proceed. */
1005 break;
1006
1007 case HImode:
1008 case SImode:
1009 sparc_emit_set_const32 (operands[0], operands[1]);
1010 return true;
1011
1012 case DImode:
1013 /* input_operand should have filtered out 32-bit mode. */
1014 sparc_emit_set_const64 (operands[0], operands[1]);
1015 return true;
1016
1017 default:
1018 gcc_unreachable ();
1019 }
1020
1021 return false;
1022 }
1023
1024 /* Load OP1, a 32-bit constant, into OP0, a register.
1025 We know it can't be done in one insn when we get
1026 here, the move expander guarantees this. */
1027
1028 void
1029 sparc_emit_set_const32 (rtx op0, rtx op1)
1030 {
1031 enum machine_mode mode = GET_MODE (op0);
1032 rtx temp;
1033
1034 if (reload_in_progress || reload_completed)
1035 temp = op0;
1036 else
1037 temp = gen_reg_rtx (mode);
1038
1039 if (GET_CODE (op1) == CONST_INT)
1040 {
1041 gcc_assert (!small_int_operand (op1, mode)
1042 && !const_high_operand (op1, mode));
1043
1044 /* Emit them as real moves instead of a HIGH/LO_SUM,
1045 this way CSE can see everything and reuse intermediate
1046 values if it wants. */
1047 emit_insn (gen_rtx_SET (VOIDmode, temp,
1048 GEN_INT (INTVAL (op1)
1049 & ~(HOST_WIDE_INT)0x3ff)));
1050
1051 emit_insn (gen_rtx_SET (VOIDmode,
1052 op0,
1053 gen_rtx_IOR (mode, temp,
1054 GEN_INT (INTVAL (op1) & 0x3ff))));
1055 }
1056 else
1057 {
1058 /* A symbol, emit in the traditional way. */
1059 emit_insn (gen_rtx_SET (VOIDmode, temp,
1060 gen_rtx_HIGH (mode, op1)));
1061 emit_insn (gen_rtx_SET (VOIDmode,
1062 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1063 }
1064 }
1065
1066 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1067 If TEMP is nonzero, we are forbidden to use any other scratch
1068 registers. Otherwise, we are allowed to generate them as needed.
1069
1070 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1071 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1072
1073 void
1074 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1075 {
1076 rtx temp1, temp2, temp3, temp4, temp5;
1077 rtx ti_temp = 0;
1078
1079 if (temp && GET_MODE (temp) == TImode)
1080 {
1081 ti_temp = temp;
1082 temp = gen_rtx_REG (DImode, REGNO (temp));
1083 }
1084
1085 /* SPARC-V9 code-model support. */
1086 switch (sparc_cmodel)
1087 {
1088 case CM_MEDLOW:
1089 /* The range spanned by all instructions in the object is less
1090 than 2^31 bytes (2GB) and the distance from any instruction
1091 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1092 than 2^31 bytes (2GB).
1093
1094 The executable must be in the low 4TB of the virtual address
1095 space.
1096
1097 sethi %hi(symbol), %temp1
1098 or %temp1, %lo(symbol), %reg */
1099 if (temp)
1100 temp1 = temp; /* op0 is allowed. */
1101 else
1102 temp1 = gen_reg_rtx (DImode);
1103
1104 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1105 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1106 break;
1107
1108 case CM_MEDMID:
1109 /* The range spanned by all instructions in the object is less
1110 than 2^31 bytes (2GB) and the distance from any instruction
1111 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1112 than 2^31 bytes (2GB).
1113
1114 The executable must be in the low 16TB of the virtual address
1115 space.
1116
1117 sethi %h44(symbol), %temp1
1118 or %temp1, %m44(symbol), %temp2
1119 sllx %temp2, 12, %temp3
1120 or %temp3, %l44(symbol), %reg */
1121 if (temp)
1122 {
1123 temp1 = op0;
1124 temp2 = op0;
1125 temp3 = temp; /* op0 is allowed. */
1126 }
1127 else
1128 {
1129 temp1 = gen_reg_rtx (DImode);
1130 temp2 = gen_reg_rtx (DImode);
1131 temp3 = gen_reg_rtx (DImode);
1132 }
1133
1134 emit_insn (gen_seth44 (temp1, op1));
1135 emit_insn (gen_setm44 (temp2, temp1, op1));
1136 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1137 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1138 emit_insn (gen_setl44 (op0, temp3, op1));
1139 break;
1140
1141 case CM_MEDANY:
1142 /* The range spanned by all instructions in the object is less
1143 than 2^31 bytes (2GB) and the distance from any instruction
1144 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1145 than 2^31 bytes (2GB).
1146
1147 The executable can be placed anywhere in the virtual address
1148 space.
1149
1150 sethi %hh(symbol), %temp1
1151 sethi %lm(symbol), %temp2
1152 or %temp1, %hm(symbol), %temp3
1153 sllx %temp3, 32, %temp4
1154 or %temp4, %temp2, %temp5
1155 or %temp5, %lo(symbol), %reg */
1156 if (temp)
1157 {
1158 /* It is possible that one of the registers we got for operands[2]
1159 might coincide with that of operands[0] (which is why we made
1160 it TImode). Pick the other one to use as our scratch. */
1161 if (rtx_equal_p (temp, op0))
1162 {
1163 gcc_assert (ti_temp);
1164 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1165 }
1166 temp1 = op0;
1167 temp2 = temp; /* op0 is _not_ allowed, see above. */
1168 temp3 = op0;
1169 temp4 = op0;
1170 temp5 = op0;
1171 }
1172 else
1173 {
1174 temp1 = gen_reg_rtx (DImode);
1175 temp2 = gen_reg_rtx (DImode);
1176 temp3 = gen_reg_rtx (DImode);
1177 temp4 = gen_reg_rtx (DImode);
1178 temp5 = gen_reg_rtx (DImode);
1179 }
1180
1181 emit_insn (gen_sethh (temp1, op1));
1182 emit_insn (gen_setlm (temp2, op1));
1183 emit_insn (gen_sethm (temp3, temp1, op1));
1184 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1185 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1186 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1187 gen_rtx_PLUS (DImode, temp4, temp2)));
1188 emit_insn (gen_setlo (op0, temp5, op1));
1189 break;
1190
1191 case CM_EMBMEDANY:
1192 /* Old old old backwards compatibility kruft here.
1193 Essentially it is MEDLOW with a fixed 64-bit
1194 virtual base added to all data segment addresses.
1195 Text-segment stuff is computed like MEDANY, we can't
1196 reuse the code above because the relocation knobs
1197 look different.
1198
1199 Data segment: sethi %hi(symbol), %temp1
1200 add %temp1, EMBMEDANY_BASE_REG, %temp2
1201 or %temp2, %lo(symbol), %reg */
1202 if (data_segment_operand (op1, GET_MODE (op1)))
1203 {
1204 if (temp)
1205 {
1206 temp1 = temp; /* op0 is allowed. */
1207 temp2 = op0;
1208 }
1209 else
1210 {
1211 temp1 = gen_reg_rtx (DImode);
1212 temp2 = gen_reg_rtx (DImode);
1213 }
1214
1215 emit_insn (gen_embmedany_sethi (temp1, op1));
1216 emit_insn (gen_embmedany_brsum (temp2, temp1));
1217 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1218 }
1219
1220 /* Text segment: sethi %uhi(symbol), %temp1
1221 sethi %hi(symbol), %temp2
1222 or %temp1, %ulo(symbol), %temp3
1223 sllx %temp3, 32, %temp4
1224 or %temp4, %temp2, %temp5
1225 or %temp5, %lo(symbol), %reg */
1226 else
1227 {
1228 if (temp)
1229 {
1230 /* It is possible that one of the registers we got for operands[2]
1231 might coincide with that of operands[0] (which is why we made
1232 it TImode). Pick the other one to use as our scratch. */
1233 if (rtx_equal_p (temp, op0))
1234 {
1235 gcc_assert (ti_temp);
1236 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1237 }
1238 temp1 = op0;
1239 temp2 = temp; /* op0 is _not_ allowed, see above. */
1240 temp3 = op0;
1241 temp4 = op0;
1242 temp5 = op0;
1243 }
1244 else
1245 {
1246 temp1 = gen_reg_rtx (DImode);
1247 temp2 = gen_reg_rtx (DImode);
1248 temp3 = gen_reg_rtx (DImode);
1249 temp4 = gen_reg_rtx (DImode);
1250 temp5 = gen_reg_rtx (DImode);
1251 }
1252
1253 emit_insn (gen_embmedany_textuhi (temp1, op1));
1254 emit_insn (gen_embmedany_texthi (temp2, op1));
1255 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1256 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1257 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1258 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1259 gen_rtx_PLUS (DImode, temp4, temp2)));
1260 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1261 }
1262 break;
1263
1264 default:
1265 gcc_unreachable ();
1266 }
1267 }
1268
1269 #if HOST_BITS_PER_WIDE_INT == 32
1270 void
1271 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1272 {
1273 gcc_unreachable ();
1274 }
1275 #else
1276 /* These avoid problems when cross compiling. If we do not
1277 go through all this hair then the optimizer will see
1278 invalid REG_EQUAL notes or in some cases none at all. */
1279 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1280 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1281 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1282 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1283
1284 /* The optimizer is not to assume anything about exactly
1285 which bits are set for a HIGH, they are unspecified.
1286 Unfortunately this leads to many missed optimizations
1287 during CSE. We mask out the non-HIGH bits, and matches
1288 a plain movdi, to alleviate this problem. */
1289 static rtx
1290 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1291 {
1292 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1293 }
1294
1295 static rtx
1296 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1297 {
1298 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1299 }
1300
1301 static rtx
1302 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1303 {
1304 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1305 }
1306
1307 static rtx
1308 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1309 {
1310 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1311 }
1312
1313 /* Worker routines for 64-bit constant formation on arch64.
1314 One of the key things to be doing in these emissions is
1315 to create as many temp REGs as possible. This makes it
1316 possible for half-built constants to be used later when
1317 such values are similar to something required later on.
1318 Without doing this, the optimizer cannot see such
1319 opportunities. */
1320
1321 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1322 unsigned HOST_WIDE_INT, int);
1323
1324 static void
1325 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1326 unsigned HOST_WIDE_INT low_bits, int is_neg)
1327 {
1328 unsigned HOST_WIDE_INT high_bits;
1329
1330 if (is_neg)
1331 high_bits = (~low_bits) & 0xffffffff;
1332 else
1333 high_bits = low_bits;
1334
1335 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1336 if (!is_neg)
1337 {
1338 emit_insn (gen_rtx_SET (VOIDmode, op0,
1339 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1340 }
1341 else
1342 {
1343 /* If we are XOR'ing with -1, then we should emit a one's complement
1344 instead. This way the combiner will notice logical operations
1345 such as ANDN later on and substitute. */
1346 if ((low_bits & 0x3ff) == 0x3ff)
1347 {
1348 emit_insn (gen_rtx_SET (VOIDmode, op0,
1349 gen_rtx_NOT (DImode, temp)));
1350 }
1351 else
1352 {
1353 emit_insn (gen_rtx_SET (VOIDmode, op0,
1354 gen_safe_XOR64 (temp,
1355 (-(HOST_WIDE_INT)0x400
1356 | (low_bits & 0x3ff)))));
1357 }
1358 }
1359 }
1360
1361 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1362 unsigned HOST_WIDE_INT, int);
1363
1364 static void
1365 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1366 unsigned HOST_WIDE_INT high_bits,
1367 unsigned HOST_WIDE_INT low_immediate,
1368 int shift_count)
1369 {
1370 rtx temp2 = op0;
1371
1372 if ((high_bits & 0xfffffc00) != 0)
1373 {
1374 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1375 if ((high_bits & ~0xfffffc00) != 0)
1376 emit_insn (gen_rtx_SET (VOIDmode, op0,
1377 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1378 else
1379 temp2 = temp;
1380 }
1381 else
1382 {
1383 emit_insn (gen_safe_SET64 (temp, high_bits));
1384 temp2 = temp;
1385 }
1386
1387 /* Now shift it up into place. */
1388 emit_insn (gen_rtx_SET (VOIDmode, op0,
1389 gen_rtx_ASHIFT (DImode, temp2,
1390 GEN_INT (shift_count))));
1391
1392 /* If there is a low immediate part piece, finish up by
1393 putting that in as well. */
1394 if (low_immediate != 0)
1395 emit_insn (gen_rtx_SET (VOIDmode, op0,
1396 gen_safe_OR64 (op0, low_immediate)));
1397 }
1398
1399 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1400 unsigned HOST_WIDE_INT);
1401
1402 /* Full 64-bit constant decomposition. Even though this is the
1403 'worst' case, we still optimize a few things away. */
1404 static void
1405 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1406 unsigned HOST_WIDE_INT high_bits,
1407 unsigned HOST_WIDE_INT low_bits)
1408 {
1409 rtx sub_temp;
1410
1411 if (reload_in_progress || reload_completed)
1412 sub_temp = op0;
1413 else
1414 sub_temp = gen_reg_rtx (DImode);
1415
1416 if ((high_bits & 0xfffffc00) != 0)
1417 {
1418 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1419 if ((high_bits & ~0xfffffc00) != 0)
1420 emit_insn (gen_rtx_SET (VOIDmode,
1421 sub_temp,
1422 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1423 else
1424 sub_temp = temp;
1425 }
1426 else
1427 {
1428 emit_insn (gen_safe_SET64 (temp, high_bits));
1429 sub_temp = temp;
1430 }
1431
1432 if (!reload_in_progress && !reload_completed)
1433 {
1434 rtx temp2 = gen_reg_rtx (DImode);
1435 rtx temp3 = gen_reg_rtx (DImode);
1436 rtx temp4 = gen_reg_rtx (DImode);
1437
1438 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1439 gen_rtx_ASHIFT (DImode, sub_temp,
1440 GEN_INT (32))));
1441
1442 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1443 if ((low_bits & ~0xfffffc00) != 0)
1444 {
1445 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1446 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1447 emit_insn (gen_rtx_SET (VOIDmode, op0,
1448 gen_rtx_PLUS (DImode, temp4, temp3)));
1449 }
1450 else
1451 {
1452 emit_insn (gen_rtx_SET (VOIDmode, op0,
1453 gen_rtx_PLUS (DImode, temp4, temp2)));
1454 }
1455 }
1456 else
1457 {
1458 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1459 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1460 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1461 int to_shift = 12;
1462
1463 /* We are in the middle of reload, so this is really
1464 painful. However we do still make an attempt to
1465 avoid emitting truly stupid code. */
1466 if (low1 != const0_rtx)
1467 {
1468 emit_insn (gen_rtx_SET (VOIDmode, op0,
1469 gen_rtx_ASHIFT (DImode, sub_temp,
1470 GEN_INT (to_shift))));
1471 emit_insn (gen_rtx_SET (VOIDmode, op0,
1472 gen_rtx_IOR (DImode, op0, low1)));
1473 sub_temp = op0;
1474 to_shift = 12;
1475 }
1476 else
1477 {
1478 to_shift += 12;
1479 }
1480 if (low2 != const0_rtx)
1481 {
1482 emit_insn (gen_rtx_SET (VOIDmode, op0,
1483 gen_rtx_ASHIFT (DImode, sub_temp,
1484 GEN_INT (to_shift))));
1485 emit_insn (gen_rtx_SET (VOIDmode, op0,
1486 gen_rtx_IOR (DImode, op0, low2)));
1487 sub_temp = op0;
1488 to_shift = 8;
1489 }
1490 else
1491 {
1492 to_shift += 8;
1493 }
1494 emit_insn (gen_rtx_SET (VOIDmode, op0,
1495 gen_rtx_ASHIFT (DImode, sub_temp,
1496 GEN_INT (to_shift))));
1497 if (low3 != const0_rtx)
1498 emit_insn (gen_rtx_SET (VOIDmode, op0,
1499 gen_rtx_IOR (DImode, op0, low3)));
1500 /* phew... */
1501 }
1502 }
1503
1504 /* Analyze a 64-bit constant for certain properties. */
1505 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1506 unsigned HOST_WIDE_INT,
1507 int *, int *, int *);
1508
1509 static void
1510 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1511 unsigned HOST_WIDE_INT low_bits,
1512 int *hbsp, int *lbsp, int *abbasp)
1513 {
1514 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1515 int i;
1516
1517 lowest_bit_set = highest_bit_set = -1;
1518 i = 0;
1519 do
1520 {
1521 if ((lowest_bit_set == -1)
1522 && ((low_bits >> i) & 1))
1523 lowest_bit_set = i;
1524 if ((highest_bit_set == -1)
1525 && ((high_bits >> (32 - i - 1)) & 1))
1526 highest_bit_set = (64 - i - 1);
1527 }
1528 while (++i < 32
1529 && ((highest_bit_set == -1)
1530 || (lowest_bit_set == -1)));
1531 if (i == 32)
1532 {
1533 i = 0;
1534 do
1535 {
1536 if ((lowest_bit_set == -1)
1537 && ((high_bits >> i) & 1))
1538 lowest_bit_set = i + 32;
1539 if ((highest_bit_set == -1)
1540 && ((low_bits >> (32 - i - 1)) & 1))
1541 highest_bit_set = 32 - i - 1;
1542 }
1543 while (++i < 32
1544 && ((highest_bit_set == -1)
1545 || (lowest_bit_set == -1)));
1546 }
1547 /* If there are no bits set this should have gone out
1548 as one instruction! */
1549 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1550 all_bits_between_are_set = 1;
1551 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1552 {
1553 if (i < 32)
1554 {
1555 if ((low_bits & (1 << i)) != 0)
1556 continue;
1557 }
1558 else
1559 {
1560 if ((high_bits & (1 << (i - 32))) != 0)
1561 continue;
1562 }
1563 all_bits_between_are_set = 0;
1564 break;
1565 }
1566 *hbsp = highest_bit_set;
1567 *lbsp = lowest_bit_set;
1568 *abbasp = all_bits_between_are_set;
1569 }
1570
1571 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1572
1573 static int
1574 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1575 unsigned HOST_WIDE_INT low_bits)
1576 {
1577 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1578
1579 if (high_bits == 0
1580 || high_bits == 0xffffffff)
1581 return 1;
1582
1583 analyze_64bit_constant (high_bits, low_bits,
1584 &highest_bit_set, &lowest_bit_set,
1585 &all_bits_between_are_set);
1586
1587 if ((highest_bit_set == 63
1588 || lowest_bit_set == 0)
1589 && all_bits_between_are_set != 0)
1590 return 1;
1591
1592 if ((highest_bit_set - lowest_bit_set) < 21)
1593 return 1;
1594
1595 return 0;
1596 }
1597
1598 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1599 unsigned HOST_WIDE_INT,
1600 int, int);
1601
1602 static unsigned HOST_WIDE_INT
1603 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1604 unsigned HOST_WIDE_INT low_bits,
1605 int lowest_bit_set, int shift)
1606 {
1607 HOST_WIDE_INT hi, lo;
1608
1609 if (lowest_bit_set < 32)
1610 {
1611 lo = (low_bits >> lowest_bit_set) << shift;
1612 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1613 }
1614 else
1615 {
1616 lo = 0;
1617 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1618 }
1619 gcc_assert (! (hi & lo));
1620 return (hi | lo);
1621 }
1622
1623 /* Here we are sure to be arch64 and this is an integer constant
1624 being loaded into a register. Emit the most efficient
1625 insn sequence possible. Detection of all the 1-insn cases
1626 has been done already. */
1627 void
1628 sparc_emit_set_const64 (rtx op0, rtx op1)
1629 {
1630 unsigned HOST_WIDE_INT high_bits, low_bits;
1631 int lowest_bit_set, highest_bit_set;
1632 int all_bits_between_are_set;
1633 rtx temp = 0;
1634
1635 /* Sanity check that we know what we are working with. */
1636 gcc_assert (TARGET_ARCH64
1637 && (GET_CODE (op0) == SUBREG
1638 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1639
1640 if (reload_in_progress || reload_completed)
1641 temp = op0;
1642
1643 if (GET_CODE (op1) != CONST_INT)
1644 {
1645 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1646 return;
1647 }
1648
1649 if (! temp)
1650 temp = gen_reg_rtx (DImode);
1651
1652 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1653 low_bits = (INTVAL (op1) & 0xffffffff);
1654
1655 /* low_bits bits 0 --> 31
1656 high_bits bits 32 --> 63 */
1657
1658 analyze_64bit_constant (high_bits, low_bits,
1659 &highest_bit_set, &lowest_bit_set,
1660 &all_bits_between_are_set);
1661
1662 /* First try for a 2-insn sequence. */
1663
1664 /* These situations are preferred because the optimizer can
1665 * do more things with them:
1666 * 1) mov -1, %reg
1667 * sllx %reg, shift, %reg
1668 * 2) mov -1, %reg
1669 * srlx %reg, shift, %reg
1670 * 3) mov some_small_const, %reg
1671 * sllx %reg, shift, %reg
1672 */
1673 if (((highest_bit_set == 63
1674 || lowest_bit_set == 0)
1675 && all_bits_between_are_set != 0)
1676 || ((highest_bit_set - lowest_bit_set) < 12))
1677 {
1678 HOST_WIDE_INT the_const = -1;
1679 int shift = lowest_bit_set;
1680
1681 if ((highest_bit_set != 63
1682 && lowest_bit_set != 0)
1683 || all_bits_between_are_set == 0)
1684 {
1685 the_const =
1686 create_simple_focus_bits (high_bits, low_bits,
1687 lowest_bit_set, 0);
1688 }
1689 else if (lowest_bit_set == 0)
1690 shift = -(63 - highest_bit_set);
1691
1692 gcc_assert (SPARC_SIMM13_P (the_const));
1693 gcc_assert (shift != 0);
1694
1695 emit_insn (gen_safe_SET64 (temp, the_const));
1696 if (shift > 0)
1697 emit_insn (gen_rtx_SET (VOIDmode,
1698 op0,
1699 gen_rtx_ASHIFT (DImode,
1700 temp,
1701 GEN_INT (shift))));
1702 else if (shift < 0)
1703 emit_insn (gen_rtx_SET (VOIDmode,
1704 op0,
1705 gen_rtx_LSHIFTRT (DImode,
1706 temp,
1707 GEN_INT (-shift))));
1708 return;
1709 }
1710
1711 /* Now a range of 22 or less bits set somewhere.
1712 * 1) sethi %hi(focus_bits), %reg
1713 * sllx %reg, shift, %reg
1714 * 2) sethi %hi(focus_bits), %reg
1715 * srlx %reg, shift, %reg
1716 */
1717 if ((highest_bit_set - lowest_bit_set) < 21)
1718 {
1719 unsigned HOST_WIDE_INT focus_bits =
1720 create_simple_focus_bits (high_bits, low_bits,
1721 lowest_bit_set, 10);
1722
1723 gcc_assert (SPARC_SETHI_P (focus_bits));
1724 gcc_assert (lowest_bit_set != 10);
1725
1726 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1727
1728 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1729 if (lowest_bit_set < 10)
1730 emit_insn (gen_rtx_SET (VOIDmode,
1731 op0,
1732 gen_rtx_LSHIFTRT (DImode, temp,
1733 GEN_INT (10 - lowest_bit_set))));
1734 else if (lowest_bit_set > 10)
1735 emit_insn (gen_rtx_SET (VOIDmode,
1736 op0,
1737 gen_rtx_ASHIFT (DImode, temp,
1738 GEN_INT (lowest_bit_set - 10))));
1739 return;
1740 }
1741
1742 /* 1) sethi %hi(low_bits), %reg
1743 * or %reg, %lo(low_bits), %reg
1744 * 2) sethi %hi(~low_bits), %reg
1745 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1746 */
1747 if (high_bits == 0
1748 || high_bits == 0xffffffff)
1749 {
1750 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1751 (high_bits == 0xffffffff));
1752 return;
1753 }
1754
1755 /* Now, try 3-insn sequences. */
1756
1757 /* 1) sethi %hi(high_bits), %reg
1758 * or %reg, %lo(high_bits), %reg
1759 * sllx %reg, 32, %reg
1760 */
1761 if (low_bits == 0)
1762 {
1763 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1764 return;
1765 }
1766
1767 /* We may be able to do something quick
1768 when the constant is negated, so try that. */
1769 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1770 (~low_bits) & 0xfffffc00))
1771 {
1772 /* NOTE: The trailing bits get XOR'd so we need the
1773 non-negated bits, not the negated ones. */
1774 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1775
1776 if ((((~high_bits) & 0xffffffff) == 0
1777 && ((~low_bits) & 0x80000000) == 0)
1778 || (((~high_bits) & 0xffffffff) == 0xffffffff
1779 && ((~low_bits) & 0x80000000) != 0))
1780 {
1781 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1782
1783 if ((SPARC_SETHI_P (fast_int)
1784 && (~high_bits & 0xffffffff) == 0)
1785 || SPARC_SIMM13_P (fast_int))
1786 emit_insn (gen_safe_SET64 (temp, fast_int));
1787 else
1788 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1789 }
1790 else
1791 {
1792 rtx negated_const;
1793 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1794 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1795 sparc_emit_set_const64 (temp, negated_const);
1796 }
1797
1798 /* If we are XOR'ing with -1, then we should emit a one's complement
1799 instead. This way the combiner will notice logical operations
1800 such as ANDN later on and substitute. */
1801 if (trailing_bits == 0x3ff)
1802 {
1803 emit_insn (gen_rtx_SET (VOIDmode, op0,
1804 gen_rtx_NOT (DImode, temp)));
1805 }
1806 else
1807 {
1808 emit_insn (gen_rtx_SET (VOIDmode,
1809 op0,
1810 gen_safe_XOR64 (temp,
1811 (-0x400 | trailing_bits))));
1812 }
1813 return;
1814 }
1815
1816 /* 1) sethi %hi(xxx), %reg
1817 * or %reg, %lo(xxx), %reg
1818 * sllx %reg, yyy, %reg
1819 *
1820 * ??? This is just a generalized version of the low_bits==0
1821 * thing above, FIXME...
1822 */
1823 if ((highest_bit_set - lowest_bit_set) < 32)
1824 {
1825 unsigned HOST_WIDE_INT focus_bits =
1826 create_simple_focus_bits (high_bits, low_bits,
1827 lowest_bit_set, 0);
1828
1829 /* We can't get here in this state. */
1830 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1831
1832 /* So what we know is that the set bits straddle the
1833 middle of the 64-bit word. */
1834 sparc_emit_set_const64_quick2 (op0, temp,
1835 focus_bits, 0,
1836 lowest_bit_set);
1837 return;
1838 }
1839
1840 /* 1) sethi %hi(high_bits), %reg
1841 * or %reg, %lo(high_bits), %reg
1842 * sllx %reg, 32, %reg
1843 * or %reg, low_bits, %reg
1844 */
1845 if (SPARC_SIMM13_P(low_bits)
1846 && ((int)low_bits > 0))
1847 {
1848 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1849 return;
1850 }
1851
1852 /* The easiest way when all else fails, is full decomposition. */
1853 #if 0
1854 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1855 high_bits, low_bits, ~high_bits, ~low_bits);
1856 #endif
1857 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1858 }
1859 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1860
1861 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1862 return the mode to be used for the comparison. For floating-point,
1863 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1864 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1865 processing is needed. */
1866
1867 enum machine_mode
1868 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1869 {
1870 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1871 {
1872 switch (op)
1873 {
1874 case EQ:
1875 case NE:
1876 case UNORDERED:
1877 case ORDERED:
1878 case UNLT:
1879 case UNLE:
1880 case UNGT:
1881 case UNGE:
1882 case UNEQ:
1883 case LTGT:
1884 return CCFPmode;
1885
1886 case LT:
1887 case LE:
1888 case GT:
1889 case GE:
1890 return CCFPEmode;
1891
1892 default:
1893 gcc_unreachable ();
1894 }
1895 }
1896 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1897 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1898 {
1899 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1900 return CCX_NOOVmode;
1901 else
1902 return CC_NOOVmode;
1903 }
1904 else
1905 {
1906 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1907 return CCXmode;
1908 else
1909 return CCmode;
1910 }
1911 }
1912
1913 /* X and Y are two things to compare using CODE. Emit the compare insn and
1914 return the rtx for the cc reg in the proper mode. */
1915
1916 rtx
1917 gen_compare_reg (enum rtx_code code)
1918 {
1919 rtx x = sparc_compare_op0;
1920 rtx y = sparc_compare_op1;
1921 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
1922 rtx cc_reg;
1923
1924 if (sparc_compare_emitted != NULL_RTX)
1925 {
1926 cc_reg = sparc_compare_emitted;
1927 sparc_compare_emitted = NULL_RTX;
1928 return cc_reg;
1929 }
1930
1931 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
1932 fcc regs (cse can't tell they're really call clobbered regs and will
1933 remove a duplicate comparison even if there is an intervening function
1934 call - it will then try to reload the cc reg via an int reg which is why
1935 we need the movcc patterns). It is possible to provide the movcc
1936 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
1937 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
1938 to tell cse that CCFPE mode registers (even pseudos) are call
1939 clobbered. */
1940
1941 /* ??? This is an experiment. Rather than making changes to cse which may
1942 or may not be easy/clean, we do our own cse. This is possible because
1943 we will generate hard registers. Cse knows they're call clobbered (it
1944 doesn't know the same thing about pseudos). If we guess wrong, no big
1945 deal, but if we win, great! */
1946
1947 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1948 #if 1 /* experiment */
1949 {
1950 int reg;
1951 /* We cycle through the registers to ensure they're all exercised. */
1952 static int next_fcc_reg = 0;
1953 /* Previous x,y for each fcc reg. */
1954 static rtx prev_args[4][2];
1955
1956 /* Scan prev_args for x,y. */
1957 for (reg = 0; reg < 4; reg++)
1958 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
1959 break;
1960 if (reg == 4)
1961 {
1962 reg = next_fcc_reg;
1963 prev_args[reg][0] = x;
1964 prev_args[reg][1] = y;
1965 next_fcc_reg = (next_fcc_reg + 1) & 3;
1966 }
1967 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
1968 }
1969 #else
1970 cc_reg = gen_reg_rtx (mode);
1971 #endif /* ! experiment */
1972 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1973 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
1974 else
1975 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
1976
1977 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
1978 gen_rtx_COMPARE (mode, x, y)));
1979
1980 return cc_reg;
1981 }
1982
1983 /* This function is used for v9 only.
1984 CODE is the code for an Scc's comparison.
1985 OPERANDS[0] is the target of the Scc insn.
1986 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
1987 been generated yet).
1988
1989 This function is needed to turn
1990
1991 (set (reg:SI 110)
1992 (gt (reg:CCX 100 %icc)
1993 (const_int 0)))
1994 into
1995 (set (reg:SI 110)
1996 (gt:DI (reg:CCX 100 %icc)
1997 (const_int 0)))
1998
1999 IE: The instruction recognizer needs to see the mode of the comparison to
2000 find the right instruction. We could use "gt:DI" right in the
2001 define_expand, but leaving it out allows us to handle DI, SI, etc.
2002
2003 We refer to the global sparc compare operands sparc_compare_op0 and
2004 sparc_compare_op1. */
2005
2006 int
2007 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2008 {
2009 if (! TARGET_ARCH64
2010 && (GET_MODE (sparc_compare_op0) == DImode
2011 || GET_MODE (operands[0]) == DImode))
2012 return 0;
2013
2014 /* Try to use the movrCC insns. */
2015 if (TARGET_ARCH64
2016 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2017 && sparc_compare_op1 == const0_rtx
2018 && v9_regcmp_p (compare_code))
2019 {
2020 rtx op0 = sparc_compare_op0;
2021 rtx temp;
2022
2023 /* Special case for op0 != 0. This can be done with one instruction if
2024 operands[0] == sparc_compare_op0. */
2025
2026 if (compare_code == NE
2027 && GET_MODE (operands[0]) == DImode
2028 && rtx_equal_p (op0, operands[0]))
2029 {
2030 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2031 gen_rtx_IF_THEN_ELSE (DImode,
2032 gen_rtx_fmt_ee (compare_code, DImode,
2033 op0, const0_rtx),
2034 const1_rtx,
2035 operands[0])));
2036 return 1;
2037 }
2038
2039 if (reg_overlap_mentioned_p (operands[0], op0))
2040 {
2041 /* Handle the case where operands[0] == sparc_compare_op0.
2042 We "early clobber" the result. */
2043 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2044 emit_move_insn (op0, sparc_compare_op0);
2045 }
2046
2047 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2048 if (GET_MODE (op0) != DImode)
2049 {
2050 temp = gen_reg_rtx (DImode);
2051 convert_move (temp, op0, 0);
2052 }
2053 else
2054 temp = op0;
2055 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2056 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2057 gen_rtx_fmt_ee (compare_code, DImode,
2058 temp, const0_rtx),
2059 const1_rtx,
2060 operands[0])));
2061 return 1;
2062 }
2063 else
2064 {
2065 operands[1] = gen_compare_reg (compare_code);
2066
2067 switch (GET_MODE (operands[1]))
2068 {
2069 case CCmode :
2070 case CCXmode :
2071 case CCFPEmode :
2072 case CCFPmode :
2073 break;
2074 default :
2075 gcc_unreachable ();
2076 }
2077 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2078 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2079 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2080 gen_rtx_fmt_ee (compare_code,
2081 GET_MODE (operands[1]),
2082 operands[1], const0_rtx),
2083 const1_rtx, operands[0])));
2084 return 1;
2085 }
2086 }
2087
2088 /* Emit a conditional jump insn for the v9 architecture using comparison code
2089 CODE and jump target LABEL.
2090 This function exists to take advantage of the v9 brxx insns. */
2091
2092 void
2093 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2094 {
2095 gcc_assert (sparc_compare_emitted == NULL_RTX);
2096 emit_jump_insn (gen_rtx_SET (VOIDmode,
2097 pc_rtx,
2098 gen_rtx_IF_THEN_ELSE (VOIDmode,
2099 gen_rtx_fmt_ee (code, GET_MODE (op0),
2100 op0, const0_rtx),
2101 gen_rtx_LABEL_REF (VOIDmode, label),
2102 pc_rtx)));
2103 }
2104
2105 /* Generate a DFmode part of a hard TFmode register.
2106 REG is the TFmode hard register, LOW is 1 for the
2107 low 64bit of the register and 0 otherwise.
2108 */
2109 rtx
2110 gen_df_reg (rtx reg, int low)
2111 {
2112 int regno = REGNO (reg);
2113
2114 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2115 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2116 return gen_rtx_REG (DFmode, regno);
2117 }
2118 \f
2119 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2120 Unlike normal calls, TFmode operands are passed by reference. It is
2121 assumed that no more than 3 operands are required. */
2122
2123 static void
2124 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2125 {
2126 rtx ret_slot = NULL, arg[3], func_sym;
2127 int i;
2128
2129 /* We only expect to be called for conversions, unary, and binary ops. */
2130 gcc_assert (nargs == 2 || nargs == 3);
2131
2132 for (i = 0; i < nargs; ++i)
2133 {
2134 rtx this_arg = operands[i];
2135 rtx this_slot;
2136
2137 /* TFmode arguments and return values are passed by reference. */
2138 if (GET_MODE (this_arg) == TFmode)
2139 {
2140 int force_stack_temp;
2141
2142 force_stack_temp = 0;
2143 if (TARGET_BUGGY_QP_LIB && i == 0)
2144 force_stack_temp = 1;
2145
2146 if (GET_CODE (this_arg) == MEM
2147 && ! force_stack_temp)
2148 this_arg = XEXP (this_arg, 0);
2149 else if (CONSTANT_P (this_arg)
2150 && ! force_stack_temp)
2151 {
2152 this_slot = force_const_mem (TFmode, this_arg);
2153 this_arg = XEXP (this_slot, 0);
2154 }
2155 else
2156 {
2157 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2158
2159 /* Operand 0 is the return value. We'll copy it out later. */
2160 if (i > 0)
2161 emit_move_insn (this_slot, this_arg);
2162 else
2163 ret_slot = this_slot;
2164
2165 this_arg = XEXP (this_slot, 0);
2166 }
2167 }
2168
2169 arg[i] = this_arg;
2170 }
2171
2172 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2173
2174 if (GET_MODE (operands[0]) == TFmode)
2175 {
2176 if (nargs == 2)
2177 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2178 arg[0], GET_MODE (arg[0]),
2179 arg[1], GET_MODE (arg[1]));
2180 else
2181 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2182 arg[0], GET_MODE (arg[0]),
2183 arg[1], GET_MODE (arg[1]),
2184 arg[2], GET_MODE (arg[2]));
2185
2186 if (ret_slot)
2187 emit_move_insn (operands[0], ret_slot);
2188 }
2189 else
2190 {
2191 rtx ret;
2192
2193 gcc_assert (nargs == 2);
2194
2195 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2196 GET_MODE (operands[0]), 1,
2197 arg[1], GET_MODE (arg[1]));
2198
2199 if (ret != operands[0])
2200 emit_move_insn (operands[0], ret);
2201 }
2202 }
2203
2204 /* Expand soft-float TFmode calls to sparc abi routines. */
2205
2206 static void
2207 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2208 {
2209 const char *func;
2210
2211 switch (code)
2212 {
2213 case PLUS:
2214 func = "_Qp_add";
2215 break;
2216 case MINUS:
2217 func = "_Qp_sub";
2218 break;
2219 case MULT:
2220 func = "_Qp_mul";
2221 break;
2222 case DIV:
2223 func = "_Qp_div";
2224 break;
2225 default:
2226 gcc_unreachable ();
2227 }
2228
2229 emit_soft_tfmode_libcall (func, 3, operands);
2230 }
2231
2232 static void
2233 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2234 {
2235 const char *func;
2236
2237 gcc_assert (code == SQRT);
2238 func = "_Qp_sqrt";
2239
2240 emit_soft_tfmode_libcall (func, 2, operands);
2241 }
2242
2243 static void
2244 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2245 {
2246 const char *func;
2247
2248 switch (code)
2249 {
2250 case FLOAT_EXTEND:
2251 switch (GET_MODE (operands[1]))
2252 {
2253 case SFmode:
2254 func = "_Qp_stoq";
2255 break;
2256 case DFmode:
2257 func = "_Qp_dtoq";
2258 break;
2259 default:
2260 gcc_unreachable ();
2261 }
2262 break;
2263
2264 case FLOAT_TRUNCATE:
2265 switch (GET_MODE (operands[0]))
2266 {
2267 case SFmode:
2268 func = "_Qp_qtos";
2269 break;
2270 case DFmode:
2271 func = "_Qp_qtod";
2272 break;
2273 default:
2274 gcc_unreachable ();
2275 }
2276 break;
2277
2278 case FLOAT:
2279 switch (GET_MODE (operands[1]))
2280 {
2281 case SImode:
2282 func = "_Qp_itoq";
2283 break;
2284 case DImode:
2285 func = "_Qp_xtoq";
2286 break;
2287 default:
2288 gcc_unreachable ();
2289 }
2290 break;
2291
2292 case UNSIGNED_FLOAT:
2293 switch (GET_MODE (operands[1]))
2294 {
2295 case SImode:
2296 func = "_Qp_uitoq";
2297 break;
2298 case DImode:
2299 func = "_Qp_uxtoq";
2300 break;
2301 default:
2302 gcc_unreachable ();
2303 }
2304 break;
2305
2306 case FIX:
2307 switch (GET_MODE (operands[0]))
2308 {
2309 case SImode:
2310 func = "_Qp_qtoi";
2311 break;
2312 case DImode:
2313 func = "_Qp_qtox";
2314 break;
2315 default:
2316 gcc_unreachable ();
2317 }
2318 break;
2319
2320 case UNSIGNED_FIX:
2321 switch (GET_MODE (operands[0]))
2322 {
2323 case SImode:
2324 func = "_Qp_qtoui";
2325 break;
2326 case DImode:
2327 func = "_Qp_qtoux";
2328 break;
2329 default:
2330 gcc_unreachable ();
2331 }
2332 break;
2333
2334 default:
2335 gcc_unreachable ();
2336 }
2337
2338 emit_soft_tfmode_libcall (func, 2, operands);
2339 }
2340
2341 /* Expand a hard-float tfmode operation. All arguments must be in
2342 registers. */
2343
2344 static void
2345 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2346 {
2347 rtx op, dest;
2348
2349 if (GET_RTX_CLASS (code) == RTX_UNARY)
2350 {
2351 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2352 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2353 }
2354 else
2355 {
2356 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2357 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2358 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2359 operands[1], operands[2]);
2360 }
2361
2362 if (register_operand (operands[0], VOIDmode))
2363 dest = operands[0];
2364 else
2365 dest = gen_reg_rtx (GET_MODE (operands[0]));
2366
2367 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2368
2369 if (dest != operands[0])
2370 emit_move_insn (operands[0], dest);
2371 }
2372
2373 void
2374 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2375 {
2376 if (TARGET_HARD_QUAD)
2377 emit_hard_tfmode_operation (code, operands);
2378 else
2379 emit_soft_tfmode_binop (code, operands);
2380 }
2381
2382 void
2383 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2384 {
2385 if (TARGET_HARD_QUAD)
2386 emit_hard_tfmode_operation (code, operands);
2387 else
2388 emit_soft_tfmode_unop (code, operands);
2389 }
2390
2391 void
2392 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2393 {
2394 if (TARGET_HARD_QUAD)
2395 emit_hard_tfmode_operation (code, operands);
2396 else
2397 emit_soft_tfmode_cvt (code, operands);
2398 }
2399 \f
2400 /* Return nonzero if a branch/jump/call instruction will be emitting
2401 nop into its delay slot. */
2402
2403 int
2404 empty_delay_slot (rtx insn)
2405 {
2406 rtx seq;
2407
2408 /* If no previous instruction (should not happen), return true. */
2409 if (PREV_INSN (insn) == NULL)
2410 return 1;
2411
2412 seq = NEXT_INSN (PREV_INSN (insn));
2413 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2414 return 0;
2415
2416 return 1;
2417 }
2418
2419 /* Return nonzero if TRIAL can go into the call delay slot. */
2420
2421 int
2422 tls_call_delay (rtx trial)
2423 {
2424 rtx pat;
2425
2426 /* Binutils allows
2427 call __tls_get_addr, %tgd_call (foo)
2428 add %l7, %o0, %o0, %tgd_add (foo)
2429 while Sun as/ld does not. */
2430 if (TARGET_GNU_TLS || !TARGET_TLS)
2431 return 1;
2432
2433 pat = PATTERN (trial);
2434
2435 /* We must reject tgd_add{32|64}, i.e.
2436 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2437 and tldm_add{32|64}, i.e.
2438 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2439 for Sun as/ld. */
2440 if (GET_CODE (pat) == SET
2441 && GET_CODE (SET_SRC (pat)) == PLUS)
2442 {
2443 rtx unspec = XEXP (SET_SRC (pat), 1);
2444
2445 if (GET_CODE (unspec) == UNSPEC
2446 && (XINT (unspec, 1) == UNSPEC_TLSGD
2447 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2448 return 0;
2449 }
2450
2451 return 1;
2452 }
2453
2454 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2455 instruction. RETURN_P is true if the v9 variant 'return' is to be
2456 considered in the test too.
2457
2458 TRIAL must be a SET whose destination is a REG appropriate for the
2459 'restore' instruction or, if RETURN_P is true, for the 'return'
2460 instruction. */
2461
2462 static int
2463 eligible_for_restore_insn (rtx trial, bool return_p)
2464 {
2465 rtx pat = PATTERN (trial);
2466 rtx src = SET_SRC (pat);
2467
2468 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2469 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2470 && arith_operand (src, GET_MODE (src)))
2471 {
2472 if (TARGET_ARCH64)
2473 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2474 else
2475 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2476 }
2477
2478 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2479 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2480 && arith_double_operand (src, GET_MODE (src)))
2481 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2482
2483 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2484 else if (! TARGET_FPU && register_operand (src, SFmode))
2485 return 1;
2486
2487 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2488 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2489 return 1;
2490
2491 /* If we have the 'return' instruction, anything that does not use
2492 local or output registers and can go into a delay slot wins. */
2493 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2494 && (get_attr_in_uncond_branch_delay (trial)
2495 == IN_UNCOND_BRANCH_DELAY_TRUE))
2496 return 1;
2497
2498 /* The 'restore src1,src2,dest' pattern for SImode. */
2499 else if (GET_CODE (src) == PLUS
2500 && register_operand (XEXP (src, 0), SImode)
2501 && arith_operand (XEXP (src, 1), SImode))
2502 return 1;
2503
2504 /* The 'restore src1,src2,dest' pattern for DImode. */
2505 else if (GET_CODE (src) == PLUS
2506 && register_operand (XEXP (src, 0), DImode)
2507 && arith_double_operand (XEXP (src, 1), DImode))
2508 return 1;
2509
2510 /* The 'restore src1,%lo(src2),dest' pattern. */
2511 else if (GET_CODE (src) == LO_SUM
2512 && ! TARGET_CM_MEDMID
2513 && ((register_operand (XEXP (src, 0), SImode)
2514 && immediate_operand (XEXP (src, 1), SImode))
2515 || (TARGET_ARCH64
2516 && register_operand (XEXP (src, 0), DImode)
2517 && immediate_operand (XEXP (src, 1), DImode))))
2518 return 1;
2519
2520 /* The 'restore src,src,dest' pattern. */
2521 else if (GET_CODE (src) == ASHIFT
2522 && (register_operand (XEXP (src, 0), SImode)
2523 || register_operand (XEXP (src, 0), DImode))
2524 && XEXP (src, 1) == const1_rtx)
2525 return 1;
2526
2527 return 0;
2528 }
2529
2530 /* Return nonzero if TRIAL can go into the function return's
2531 delay slot. */
2532
2533 int
2534 eligible_for_return_delay (rtx trial)
2535 {
2536 rtx pat;
2537
2538 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2539 return 0;
2540
2541 if (get_attr_length (trial) != 1)
2542 return 0;
2543
2544 /* If there are any call-saved registers, we should scan TRIAL if it
2545 does not reference them. For now just make it easy. */
2546 if (num_gfregs)
2547 return 0;
2548
2549 /* If the function uses __builtin_eh_return, the eh_return machinery
2550 occupies the delay slot. */
2551 if (current_function_calls_eh_return)
2552 return 0;
2553
2554 /* In the case of a true leaf function, anything can go into the slot. */
2555 if (sparc_leaf_function_p)
2556 return get_attr_in_uncond_branch_delay (trial)
2557 == IN_UNCOND_BRANCH_DELAY_TRUE;
2558
2559 pat = PATTERN (trial);
2560
2561 /* Otherwise, only operations which can be done in tandem with
2562 a `restore' or `return' insn can go into the delay slot. */
2563 if (GET_CODE (SET_DEST (pat)) != REG
2564 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2565 return 0;
2566
2567 /* If this instruction sets up floating point register and we have a return
2568 instruction, it can probably go in. But restore will not work
2569 with FP_REGS. */
2570 if (REGNO (SET_DEST (pat)) >= 32)
2571 return (TARGET_V9
2572 && ! epilogue_renumber (&pat, 1)
2573 && (get_attr_in_uncond_branch_delay (trial)
2574 == IN_UNCOND_BRANCH_DELAY_TRUE));
2575
2576 return eligible_for_restore_insn (trial, true);
2577 }
2578
2579 /* Return nonzero if TRIAL can go into the sibling call's
2580 delay slot. */
2581
2582 int
2583 eligible_for_sibcall_delay (rtx trial)
2584 {
2585 rtx pat;
2586
2587 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2588 return 0;
2589
2590 if (get_attr_length (trial) != 1)
2591 return 0;
2592
2593 pat = PATTERN (trial);
2594
2595 if (sparc_leaf_function_p)
2596 {
2597 /* If the tail call is done using the call instruction,
2598 we have to restore %o7 in the delay slot. */
2599 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2600 return 0;
2601
2602 /* %g1 is used to build the function address */
2603 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2604 return 0;
2605
2606 return 1;
2607 }
2608
2609 /* Otherwise, only operations which can be done in tandem with
2610 a `restore' insn can go into the delay slot. */
2611 if (GET_CODE (SET_DEST (pat)) != REG
2612 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2613 || REGNO (SET_DEST (pat)) >= 32)
2614 return 0;
2615
2616 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2617 in most cases. */
2618 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2619 return 0;
2620
2621 return eligible_for_restore_insn (trial, false);
2622 }
2623
2624 int
2625 short_branch (int uid1, int uid2)
2626 {
2627 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2628
2629 /* Leave a few words of "slop". */
2630 if (delta >= -1023 && delta <= 1022)
2631 return 1;
2632
2633 return 0;
2634 }
2635
2636 /* Return nonzero if REG is not used after INSN.
2637 We assume REG is a reload reg, and therefore does
2638 not live past labels or calls or jumps. */
2639 int
2640 reg_unused_after (rtx reg, rtx insn)
2641 {
2642 enum rtx_code code, prev_code = UNKNOWN;
2643
2644 while ((insn = NEXT_INSN (insn)))
2645 {
2646 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2647 return 1;
2648
2649 code = GET_CODE (insn);
2650 if (GET_CODE (insn) == CODE_LABEL)
2651 return 1;
2652
2653 if (INSN_P (insn))
2654 {
2655 rtx set = single_set (insn);
2656 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2657 if (set && in_src)
2658 return 0;
2659 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2660 return 1;
2661 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2662 return 0;
2663 }
2664 prev_code = code;
2665 }
2666 return 1;
2667 }
2668 \f
2669 /* Determine if it's legal to put X into the constant pool. This
2670 is not possible if X contains the address of a symbol that is
2671 not constant (TLS) or not known at final link time (PIC). */
2672
2673 static bool
2674 sparc_cannot_force_const_mem (rtx x)
2675 {
2676 switch (GET_CODE (x))
2677 {
2678 case CONST_INT:
2679 case CONST_DOUBLE:
2680 case CONST_VECTOR:
2681 /* Accept all non-symbolic constants. */
2682 return false;
2683
2684 case LABEL_REF:
2685 /* Labels are OK iff we are non-PIC. */
2686 return flag_pic != 0;
2687
2688 case SYMBOL_REF:
2689 /* 'Naked' TLS symbol references are never OK,
2690 non-TLS symbols are OK iff we are non-PIC. */
2691 if (SYMBOL_REF_TLS_MODEL (x))
2692 return true;
2693 else
2694 return flag_pic != 0;
2695
2696 case CONST:
2697 return sparc_cannot_force_const_mem (XEXP (x, 0));
2698 case PLUS:
2699 case MINUS:
2700 return sparc_cannot_force_const_mem (XEXP (x, 0))
2701 || sparc_cannot_force_const_mem (XEXP (x, 1));
2702 case UNSPEC:
2703 return true;
2704 default:
2705 gcc_unreachable ();
2706 }
2707 }
2708 \f
2709 /* PIC support. */
2710 static GTY(()) char pic_helper_symbol_name[256];
2711 static GTY(()) rtx pic_helper_symbol;
2712 static GTY(()) bool pic_helper_emitted_p = false;
2713 static GTY(()) rtx global_offset_table;
2714
2715 /* Ensure that we are not using patterns that are not OK with PIC. */
2716
2717 int
2718 check_pic (int i)
2719 {
2720 switch (flag_pic)
2721 {
2722 case 1:
2723 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2724 && (GET_CODE (recog_data.operand[i]) != CONST
2725 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2726 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2727 == global_offset_table)
2728 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2729 == CONST))));
2730 case 2:
2731 default:
2732 return 1;
2733 }
2734 }
2735
2736 /* Return true if X is an address which needs a temporary register when
2737 reloaded while generating PIC code. */
2738
2739 int
2740 pic_address_needs_scratch (rtx x)
2741 {
2742 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2743 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2744 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2745 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2746 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2747 return 1;
2748
2749 return 0;
2750 }
2751
2752 /* Determine if a given RTX is a valid constant. We already know this
2753 satisfies CONSTANT_P. */
2754
2755 bool
2756 legitimate_constant_p (rtx x)
2757 {
2758 rtx inner;
2759
2760 switch (GET_CODE (x))
2761 {
2762 case SYMBOL_REF:
2763 /* TLS symbols are not constant. */
2764 if (SYMBOL_REF_TLS_MODEL (x))
2765 return false;
2766 break;
2767
2768 case CONST:
2769 inner = XEXP (x, 0);
2770
2771 /* Offsets of TLS symbols are never valid.
2772 Discourage CSE from creating them. */
2773 if (GET_CODE (inner) == PLUS
2774 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2775 return false;
2776 break;
2777
2778 case CONST_DOUBLE:
2779 if (GET_MODE (x) == VOIDmode)
2780 return true;
2781
2782 /* Floating point constants are generally not ok.
2783 The only exception is 0.0 in VIS. */
2784 if (TARGET_VIS
2785 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2786 && const_zero_operand (x, GET_MODE (x)))
2787 return true;
2788
2789 return false;
2790
2791 case CONST_VECTOR:
2792 /* Vector constants are generally not ok.
2793 The only exception is 0 in VIS. */
2794 if (TARGET_VIS
2795 && const_zero_operand (x, GET_MODE (x)))
2796 return true;
2797
2798 return false;
2799
2800 default:
2801 break;
2802 }
2803
2804 return true;
2805 }
2806
2807 /* Determine if a given RTX is a valid constant address. */
2808
2809 bool
2810 constant_address_p (rtx x)
2811 {
2812 switch (GET_CODE (x))
2813 {
2814 case LABEL_REF:
2815 case CONST_INT:
2816 case HIGH:
2817 return true;
2818
2819 case CONST:
2820 if (flag_pic && pic_address_needs_scratch (x))
2821 return false;
2822 return legitimate_constant_p (x);
2823
2824 case SYMBOL_REF:
2825 return !flag_pic && legitimate_constant_p (x);
2826
2827 default:
2828 return false;
2829 }
2830 }
2831
2832 /* Nonzero if the constant value X is a legitimate general operand
2833 when generating PIC code. It is given that flag_pic is on and
2834 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2835
2836 bool
2837 legitimate_pic_operand_p (rtx x)
2838 {
2839 if (pic_address_needs_scratch (x))
2840 return false;
2841 if (SPARC_SYMBOL_REF_TLS_P (x)
2842 || (GET_CODE (x) == CONST
2843 && GET_CODE (XEXP (x, 0)) == PLUS
2844 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2845 return false;
2846 return true;
2847 }
2848
2849 /* Return nonzero if ADDR is a valid memory address.
2850 STRICT specifies whether strict register checking applies. */
2851
2852 int
2853 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2854 {
2855 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2856
2857 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2858 rs1 = addr;
2859 else if (GET_CODE (addr) == PLUS)
2860 {
2861 rs1 = XEXP (addr, 0);
2862 rs2 = XEXP (addr, 1);
2863
2864 /* Canonicalize. REG comes first, if there are no regs,
2865 LO_SUM comes first. */
2866 if (!REG_P (rs1)
2867 && GET_CODE (rs1) != SUBREG
2868 && (REG_P (rs2)
2869 || GET_CODE (rs2) == SUBREG
2870 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2871 {
2872 rs1 = XEXP (addr, 1);
2873 rs2 = XEXP (addr, 0);
2874 }
2875
2876 if ((flag_pic == 1
2877 && rs1 == pic_offset_table_rtx
2878 && !REG_P (rs2)
2879 && GET_CODE (rs2) != SUBREG
2880 && GET_CODE (rs2) != LO_SUM
2881 && GET_CODE (rs2) != MEM
2882 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2883 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2884 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2885 || ((REG_P (rs1)
2886 || GET_CODE (rs1) == SUBREG)
2887 && RTX_OK_FOR_OFFSET_P (rs2)))
2888 {
2889 imm1 = rs2;
2890 rs2 = NULL;
2891 }
2892 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2893 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2894 {
2895 /* We prohibit REG + REG for TFmode when there are no quad move insns
2896 and we consequently need to split. We do this because REG+REG
2897 is not an offsettable address. If we get the situation in reload
2898 where source and destination of a movtf pattern are both MEMs with
2899 REG+REG address, then only one of them gets converted to an
2900 offsettable address. */
2901 if (mode == TFmode
2902 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2903 return 0;
2904
2905 /* We prohibit REG + REG on ARCH32 if not optimizing for
2906 DFmode/DImode because then mem_min_alignment is likely to be zero
2907 after reload and the forced split would lack a matching splitter
2908 pattern. */
2909 if (TARGET_ARCH32 && !optimize
2910 && (mode == DFmode || mode == DImode))
2911 return 0;
2912 }
2913 else if (USE_AS_OFFSETABLE_LO10
2914 && GET_CODE (rs1) == LO_SUM
2915 && TARGET_ARCH64
2916 && ! TARGET_CM_MEDMID
2917 && RTX_OK_FOR_OLO10_P (rs2))
2918 {
2919 rs2 = NULL;
2920 imm1 = XEXP (rs1, 1);
2921 rs1 = XEXP (rs1, 0);
2922 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2923 return 0;
2924 }
2925 }
2926 else if (GET_CODE (addr) == LO_SUM)
2927 {
2928 rs1 = XEXP (addr, 0);
2929 imm1 = XEXP (addr, 1);
2930
2931 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2932 return 0;
2933
2934 /* We can't allow TFmode in 32-bit mode, because an offset greater
2935 than the alignment (8) may cause the LO_SUM to overflow. */
2936 if (mode == TFmode && TARGET_ARCH32)
2937 return 0;
2938 }
2939 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
2940 return 1;
2941 else
2942 return 0;
2943
2944 if (GET_CODE (rs1) == SUBREG)
2945 rs1 = SUBREG_REG (rs1);
2946 if (!REG_P (rs1))
2947 return 0;
2948
2949 if (rs2)
2950 {
2951 if (GET_CODE (rs2) == SUBREG)
2952 rs2 = SUBREG_REG (rs2);
2953 if (!REG_P (rs2))
2954 return 0;
2955 }
2956
2957 if (strict)
2958 {
2959 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
2960 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
2961 return 0;
2962 }
2963 else
2964 {
2965 if ((REGNO (rs1) >= 32
2966 && REGNO (rs1) != FRAME_POINTER_REGNUM
2967 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
2968 || (rs2
2969 && (REGNO (rs2) >= 32
2970 && REGNO (rs2) != FRAME_POINTER_REGNUM
2971 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
2972 return 0;
2973 }
2974 return 1;
2975 }
2976
2977 /* Construct the SYMBOL_REF for the tls_get_offset function. */
2978
2979 static GTY(()) rtx sparc_tls_symbol;
2980
2981 static rtx
2982 sparc_tls_get_addr (void)
2983 {
2984 if (!sparc_tls_symbol)
2985 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
2986
2987 return sparc_tls_symbol;
2988 }
2989
2990 static rtx
2991 sparc_tls_got (void)
2992 {
2993 rtx temp;
2994 if (flag_pic)
2995 {
2996 current_function_uses_pic_offset_table = 1;
2997 return pic_offset_table_rtx;
2998 }
2999
3000 if (!global_offset_table)
3001 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3002 temp = gen_reg_rtx (Pmode);
3003 emit_move_insn (temp, global_offset_table);
3004 return temp;
3005 }
3006
3007 /* Return 1 if *X is a thread-local symbol. */
3008
3009 static int
3010 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3011 {
3012 return SPARC_SYMBOL_REF_TLS_P (*x);
3013 }
3014
3015 /* Return 1 if X contains a thread-local symbol. */
3016
3017 bool
3018 sparc_tls_referenced_p (rtx x)
3019 {
3020 if (!TARGET_HAVE_TLS)
3021 return false;
3022
3023 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3024 }
3025
3026 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3027 this (thread-local) address. */
3028
3029 rtx
3030 legitimize_tls_address (rtx addr)
3031 {
3032 rtx temp1, temp2, temp3, ret, o0, got, insn;
3033
3034 gcc_assert (! no_new_pseudos);
3035
3036 if (GET_CODE (addr) == SYMBOL_REF)
3037 switch (SYMBOL_REF_TLS_MODEL (addr))
3038 {
3039 case TLS_MODEL_GLOBAL_DYNAMIC:
3040 start_sequence ();
3041 temp1 = gen_reg_rtx (SImode);
3042 temp2 = gen_reg_rtx (SImode);
3043 ret = gen_reg_rtx (Pmode);
3044 o0 = gen_rtx_REG (Pmode, 8);
3045 got = sparc_tls_got ();
3046 emit_insn (gen_tgd_hi22 (temp1, addr));
3047 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3048 if (TARGET_ARCH32)
3049 {
3050 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3051 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3052 addr, const1_rtx));
3053 }
3054 else
3055 {
3056 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3057 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3058 addr, const1_rtx));
3059 }
3060 CALL_INSN_FUNCTION_USAGE (insn)
3061 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3062 CALL_INSN_FUNCTION_USAGE (insn));
3063 insn = get_insns ();
3064 end_sequence ();
3065 emit_libcall_block (insn, ret, o0, addr);
3066 break;
3067
3068 case TLS_MODEL_LOCAL_DYNAMIC:
3069 start_sequence ();
3070 temp1 = gen_reg_rtx (SImode);
3071 temp2 = gen_reg_rtx (SImode);
3072 temp3 = gen_reg_rtx (Pmode);
3073 ret = gen_reg_rtx (Pmode);
3074 o0 = gen_rtx_REG (Pmode, 8);
3075 got = sparc_tls_got ();
3076 emit_insn (gen_tldm_hi22 (temp1));
3077 emit_insn (gen_tldm_lo10 (temp2, temp1));
3078 if (TARGET_ARCH32)
3079 {
3080 emit_insn (gen_tldm_add32 (o0, got, temp2));
3081 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3082 const1_rtx));
3083 }
3084 else
3085 {
3086 emit_insn (gen_tldm_add64 (o0, got, temp2));
3087 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3088 const1_rtx));
3089 }
3090 CALL_INSN_FUNCTION_USAGE (insn)
3091 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3092 CALL_INSN_FUNCTION_USAGE (insn));
3093 insn = get_insns ();
3094 end_sequence ();
3095 emit_libcall_block (insn, temp3, o0,
3096 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3097 UNSPEC_TLSLD_BASE));
3098 temp1 = gen_reg_rtx (SImode);
3099 temp2 = gen_reg_rtx (SImode);
3100 emit_insn (gen_tldo_hix22 (temp1, addr));
3101 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3102 if (TARGET_ARCH32)
3103 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3104 else
3105 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3106 break;
3107
3108 case TLS_MODEL_INITIAL_EXEC:
3109 temp1 = gen_reg_rtx (SImode);
3110 temp2 = gen_reg_rtx (SImode);
3111 temp3 = gen_reg_rtx (Pmode);
3112 got = sparc_tls_got ();
3113 emit_insn (gen_tie_hi22 (temp1, addr));
3114 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3115 if (TARGET_ARCH32)
3116 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3117 else
3118 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3119 if (TARGET_SUN_TLS)
3120 {
3121 ret = gen_reg_rtx (Pmode);
3122 if (TARGET_ARCH32)
3123 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3124 temp3, addr));
3125 else
3126 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3127 temp3, addr));
3128 }
3129 else
3130 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3131 break;
3132
3133 case TLS_MODEL_LOCAL_EXEC:
3134 temp1 = gen_reg_rtx (Pmode);
3135 temp2 = gen_reg_rtx (Pmode);
3136 if (TARGET_ARCH32)
3137 {
3138 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3139 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3140 }
3141 else
3142 {
3143 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3144 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3145 }
3146 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3147 break;
3148
3149 default:
3150 gcc_unreachable ();
3151 }
3152
3153 else
3154 gcc_unreachable (); /* for now ... */
3155
3156 return ret;
3157 }
3158
3159
3160 /* Legitimize PIC addresses. If the address is already position-independent,
3161 we return ORIG. Newly generated position-independent addresses go into a
3162 reg. This is REG if nonzero, otherwise we allocate register(s) as
3163 necessary. */
3164
3165 rtx
3166 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3167 rtx reg)
3168 {
3169 if (GET_CODE (orig) == SYMBOL_REF)
3170 {
3171 rtx pic_ref, address;
3172 rtx insn;
3173
3174 if (reg == 0)
3175 {
3176 gcc_assert (! reload_in_progress && ! reload_completed);
3177 reg = gen_reg_rtx (Pmode);
3178 }
3179
3180 if (flag_pic == 2)
3181 {
3182 /* If not during reload, allocate another temp reg here for loading
3183 in the address, so that these instructions can be optimized
3184 properly. */
3185 rtx temp_reg = ((reload_in_progress || reload_completed)
3186 ? reg : gen_reg_rtx (Pmode));
3187
3188 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3189 won't get confused into thinking that these two instructions
3190 are loading in the true address of the symbol. If in the
3191 future a PIC rtx exists, that should be used instead. */
3192 if (TARGET_ARCH64)
3193 {
3194 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3195 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3196 }
3197 else
3198 {
3199 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3200 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3201 }
3202 address = temp_reg;
3203 }
3204 else
3205 address = orig;
3206
3207 pic_ref = gen_const_mem (Pmode,
3208 gen_rtx_PLUS (Pmode,
3209 pic_offset_table_rtx, address));
3210 current_function_uses_pic_offset_table = 1;
3211 insn = emit_move_insn (reg, pic_ref);
3212 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3213 by loop. */
3214 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3215 REG_NOTES (insn));
3216 return reg;
3217 }
3218 else if (GET_CODE (orig) == CONST)
3219 {
3220 rtx base, offset;
3221
3222 if (GET_CODE (XEXP (orig, 0)) == PLUS
3223 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3224 return orig;
3225
3226 if (reg == 0)
3227 {
3228 gcc_assert (! reload_in_progress && ! reload_completed);
3229 reg = gen_reg_rtx (Pmode);
3230 }
3231
3232 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3233 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3234 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3235 base == reg ? 0 : reg);
3236
3237 if (GET_CODE (offset) == CONST_INT)
3238 {
3239 if (SMALL_INT (offset))
3240 return plus_constant (base, INTVAL (offset));
3241 else if (! reload_in_progress && ! reload_completed)
3242 offset = force_reg (Pmode, offset);
3243 else
3244 /* If we reach here, then something is seriously wrong. */
3245 gcc_unreachable ();
3246 }
3247 return gen_rtx_PLUS (Pmode, base, offset);
3248 }
3249 else if (GET_CODE (orig) == LABEL_REF)
3250 /* ??? Why do we do this? */
3251 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3252 the register is live instead, in case it is eliminated. */
3253 current_function_uses_pic_offset_table = 1;
3254
3255 return orig;
3256 }
3257
3258 /* Try machine-dependent ways of modifying an illegitimate address X
3259 to be legitimate. If we find one, return the new, valid address.
3260
3261 OLDX is the address as it was before break_out_memory_refs was called.
3262 In some cases it is useful to look at this to decide what needs to be done.
3263
3264 MODE is the mode of the operand pointed to by X. */
3265
3266 rtx
3267 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3268 {
3269 rtx orig_x = x;
3270
3271 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3272 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3273 force_operand (XEXP (x, 0), NULL_RTX));
3274 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3275 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3276 force_operand (XEXP (x, 1), NULL_RTX));
3277 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3278 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3279 XEXP (x, 1));
3280 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3281 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3282 force_operand (XEXP (x, 1), NULL_RTX));
3283
3284 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3285 return x;
3286
3287 if (SPARC_SYMBOL_REF_TLS_P (x))
3288 x = legitimize_tls_address (x);
3289 else if (flag_pic)
3290 x = legitimize_pic_address (x, mode, 0);
3291 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3292 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3293 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3294 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3295 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3296 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3297 else if (GET_CODE (x) == SYMBOL_REF
3298 || GET_CODE (x) == CONST
3299 || GET_CODE (x) == LABEL_REF)
3300 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3301 return x;
3302 }
3303
3304 /* Emit the special PIC helper function. */
3305
3306 static void
3307 emit_pic_helper (void)
3308 {
3309 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3310 int align;
3311
3312 switch_to_section (text_section);
3313
3314 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3315 if (align > 0)
3316 ASM_OUTPUT_ALIGN (asm_out_file, align);
3317 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3318 if (flag_delayed_branch)
3319 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3320 pic_name, pic_name);
3321 else
3322 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3323 pic_name, pic_name);
3324
3325 pic_helper_emitted_p = true;
3326 }
3327
3328 /* Emit code to load the PIC register. */
3329
3330 static void
3331 load_pic_register (bool delay_pic_helper)
3332 {
3333 int orig_flag_pic = flag_pic;
3334
3335 /* If we haven't initialized the special PIC symbols, do so now. */
3336 if (!pic_helper_symbol_name[0])
3337 {
3338 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3339 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3340 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3341 }
3342
3343 /* If we haven't emitted the special PIC helper function, do so now unless
3344 we are requested to delay it. */
3345 if (!delay_pic_helper && !pic_helper_emitted_p)
3346 emit_pic_helper ();
3347
3348 flag_pic = 0;
3349 if (TARGET_ARCH64)
3350 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3351 pic_helper_symbol));
3352 else
3353 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3354 pic_helper_symbol));
3355 flag_pic = orig_flag_pic;
3356
3357 /* Need to emit this whether or not we obey regdecls,
3358 since setjmp/longjmp can cause life info to screw up.
3359 ??? In the case where we don't obey regdecls, this is not sufficient
3360 since we may not fall out the bottom. */
3361 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3362 }
3363 \f
3364 /* Return 1 if RTX is a MEM which is known to be aligned to at
3365 least a DESIRED byte boundary. */
3366
3367 int
3368 mem_min_alignment (rtx mem, int desired)
3369 {
3370 rtx addr, base, offset;
3371
3372 /* If it's not a MEM we can't accept it. */
3373 if (GET_CODE (mem) != MEM)
3374 return 0;
3375
3376 /* Obviously... */
3377 if (!TARGET_UNALIGNED_DOUBLES
3378 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3379 return 1;
3380
3381 /* ??? The rest of the function predates MEM_ALIGN so
3382 there is probably a bit of redundancy. */
3383 addr = XEXP (mem, 0);
3384 base = offset = NULL_RTX;
3385 if (GET_CODE (addr) == PLUS)
3386 {
3387 if (GET_CODE (XEXP (addr, 0)) == REG)
3388 {
3389 base = XEXP (addr, 0);
3390
3391 /* What we are saying here is that if the base
3392 REG is aligned properly, the compiler will make
3393 sure any REG based index upon it will be so
3394 as well. */
3395 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3396 offset = XEXP (addr, 1);
3397 else
3398 offset = const0_rtx;
3399 }
3400 }
3401 else if (GET_CODE (addr) == REG)
3402 {
3403 base = addr;
3404 offset = const0_rtx;
3405 }
3406
3407 if (base != NULL_RTX)
3408 {
3409 int regno = REGNO (base);
3410
3411 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3412 {
3413 /* Check if the compiler has recorded some information
3414 about the alignment of the base REG. If reload has
3415 completed, we already matched with proper alignments.
3416 If not running global_alloc, reload might give us
3417 unaligned pointer to local stack though. */
3418 if (((cfun != 0
3419 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3420 || (optimize && reload_completed))
3421 && (INTVAL (offset) & (desired - 1)) == 0)
3422 return 1;
3423 }
3424 else
3425 {
3426 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3427 return 1;
3428 }
3429 }
3430 else if (! TARGET_UNALIGNED_DOUBLES
3431 || CONSTANT_P (addr)
3432 || GET_CODE (addr) == LO_SUM)
3433 {
3434 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3435 is true, in which case we can only assume that an access is aligned if
3436 it is to a constant address, or the address involves a LO_SUM. */
3437 return 1;
3438 }
3439
3440 /* An obviously unaligned address. */
3441 return 0;
3442 }
3443
3444 \f
3445 /* Vectors to keep interesting information about registers where it can easily
3446 be got. We used to use the actual mode value as the bit number, but there
3447 are more than 32 modes now. Instead we use two tables: one indexed by
3448 hard register number, and one indexed by mode. */
3449
3450 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3451 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
3452 mapped into one sparc_mode_class mode. */
3453
3454 enum sparc_mode_class {
3455 S_MODE, D_MODE, T_MODE, O_MODE,
3456 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3457 CC_MODE, CCFP_MODE
3458 };
3459
3460 /* Modes for single-word and smaller quantities. */
3461 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3462
3463 /* Modes for double-word and smaller quantities. */
3464 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3465
3466 /* Modes for quad-word and smaller quantities. */
3467 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3468
3469 /* Modes for 8-word and smaller quantities. */
3470 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3471
3472 /* Modes for single-float quantities. We must allow any single word or
3473 smaller quantity. This is because the fix/float conversion instructions
3474 take integer inputs/outputs from the float registers. */
3475 #define SF_MODES (S_MODES)
3476
3477 /* Modes for double-float and smaller quantities. */
3478 #define DF_MODES (S_MODES | D_MODES)
3479
3480 /* Modes for double-float only quantities. */
3481 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3482
3483 /* Modes for quad-float only quantities. */
3484 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3485
3486 /* Modes for quad-float and smaller quantities. */
3487 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3488
3489 /* Modes for quad-float and double-float quantities. */
3490 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3491
3492 /* Modes for quad-float pair only quantities. */
3493 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3494
3495 /* Modes for quad-float pairs and smaller quantities. */
3496 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3497
3498 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3499
3500 /* Modes for condition codes. */
3501 #define CC_MODES (1 << (int) CC_MODE)
3502 #define CCFP_MODES (1 << (int) CCFP_MODE)
3503
3504 /* Value is 1 if register/mode pair is acceptable on sparc.
3505 The funny mixture of D and T modes is because integer operations
3506 do not specially operate on tetra quantities, so non-quad-aligned
3507 registers can hold quadword quantities (except %o4 and %i4 because
3508 they cross fixed registers). */
3509
3510 /* This points to either the 32 bit or the 64 bit version. */
3511 const int *hard_regno_mode_classes;
3512
3513 static const int hard_32bit_mode_classes[] = {
3514 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3515 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3516 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3517 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3518
3519 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3520 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3521 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3522 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3523
3524 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3525 and none can hold SFmode/SImode values. */
3526 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3527 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3528 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3529 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3530
3531 /* %fcc[0123] */
3532 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3533
3534 /* %icc */
3535 CC_MODES
3536 };
3537
3538 static const int hard_64bit_mode_classes[] = {
3539 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3540 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3541 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3542 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3543
3544 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3545 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3546 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3547 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3548
3549 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3550 and none can hold SFmode/SImode values. */
3551 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3552 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3553 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3554 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3555
3556 /* %fcc[0123] */
3557 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3558
3559 /* %icc */
3560 CC_MODES
3561 };
3562
3563 int sparc_mode_class [NUM_MACHINE_MODES];
3564
3565 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3566
3567 static void
3568 sparc_init_modes (void)
3569 {
3570 int i;
3571
3572 for (i = 0; i < NUM_MACHINE_MODES; i++)
3573 {
3574 switch (GET_MODE_CLASS (i))
3575 {
3576 case MODE_INT:
3577 case MODE_PARTIAL_INT:
3578 case MODE_COMPLEX_INT:
3579 if (GET_MODE_SIZE (i) <= 4)
3580 sparc_mode_class[i] = 1 << (int) S_MODE;
3581 else if (GET_MODE_SIZE (i) == 8)
3582 sparc_mode_class[i] = 1 << (int) D_MODE;
3583 else if (GET_MODE_SIZE (i) == 16)
3584 sparc_mode_class[i] = 1 << (int) T_MODE;
3585 else if (GET_MODE_SIZE (i) == 32)
3586 sparc_mode_class[i] = 1 << (int) O_MODE;
3587 else
3588 sparc_mode_class[i] = 0;
3589 break;
3590 case MODE_VECTOR_INT:
3591 if (GET_MODE_SIZE (i) <= 4)
3592 sparc_mode_class[i] = 1 << (int)SF_MODE;
3593 else if (GET_MODE_SIZE (i) == 8)
3594 sparc_mode_class[i] = 1 << (int)DF_MODE;
3595 break;
3596 case MODE_FLOAT:
3597 case MODE_COMPLEX_FLOAT:
3598 if (GET_MODE_SIZE (i) <= 4)
3599 sparc_mode_class[i] = 1 << (int) SF_MODE;
3600 else if (GET_MODE_SIZE (i) == 8)
3601 sparc_mode_class[i] = 1 << (int) DF_MODE;
3602 else if (GET_MODE_SIZE (i) == 16)
3603 sparc_mode_class[i] = 1 << (int) TF_MODE;
3604 else if (GET_MODE_SIZE (i) == 32)
3605 sparc_mode_class[i] = 1 << (int) OF_MODE;
3606 else
3607 sparc_mode_class[i] = 0;
3608 break;
3609 case MODE_CC:
3610 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3611 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3612 else
3613 sparc_mode_class[i] = 1 << (int) CC_MODE;
3614 break;
3615 default:
3616 sparc_mode_class[i] = 0;
3617 break;
3618 }
3619 }
3620
3621 if (TARGET_ARCH64)
3622 hard_regno_mode_classes = hard_64bit_mode_classes;
3623 else
3624 hard_regno_mode_classes = hard_32bit_mode_classes;
3625
3626 /* Initialize the array used by REGNO_REG_CLASS. */
3627 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3628 {
3629 if (i < 16 && TARGET_V8PLUS)
3630 sparc_regno_reg_class[i] = I64_REGS;
3631 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3632 sparc_regno_reg_class[i] = GENERAL_REGS;
3633 else if (i < 64)
3634 sparc_regno_reg_class[i] = FP_REGS;
3635 else if (i < 96)
3636 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3637 else if (i < 100)
3638 sparc_regno_reg_class[i] = FPCC_REGS;
3639 else
3640 sparc_regno_reg_class[i] = NO_REGS;
3641 }
3642 }
3643 \f
3644 /* Compute the frame size required by the function. This function is called
3645 during the reload pass and also by sparc_expand_prologue. */
3646
3647 HOST_WIDE_INT
3648 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3649 {
3650 int outgoing_args_size = (current_function_outgoing_args_size
3651 + REG_PARM_STACK_SPACE (current_function_decl));
3652 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3653 int i;
3654
3655 if (TARGET_ARCH64)
3656 {
3657 for (i = 0; i < 8; i++)
3658 if (regs_ever_live[i] && ! call_used_regs[i])
3659 n_regs += 2;
3660 }
3661 else
3662 {
3663 for (i = 0; i < 8; i += 2)
3664 if ((regs_ever_live[i] && ! call_used_regs[i])
3665 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3666 n_regs += 2;
3667 }
3668
3669 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3670 if ((regs_ever_live[i] && ! call_used_regs[i])
3671 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3672 n_regs += 2;
3673
3674 /* Set up values for use in prologue and epilogue. */
3675 num_gfregs = n_regs;
3676
3677 if (leaf_function_p
3678 && n_regs == 0
3679 && size == 0
3680 && current_function_outgoing_args_size == 0)
3681 actual_fsize = apparent_fsize = 0;
3682 else
3683 {
3684 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3685 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3686 apparent_fsize += n_regs * 4;
3687 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3688 }
3689
3690 /* Make sure nothing can clobber our register windows.
3691 If a SAVE must be done, or there is a stack-local variable,
3692 the register window area must be allocated. */
3693 if (! leaf_function_p || size > 0)
3694 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3695
3696 return SPARC_STACK_ALIGN (actual_fsize);
3697 }
3698
3699 /* Output any necessary .register pseudo-ops. */
3700
3701 void
3702 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3703 {
3704 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3705 int i;
3706
3707 if (TARGET_ARCH32)
3708 return;
3709
3710 /* Check if %g[2367] were used without
3711 .register being printed for them already. */
3712 for (i = 2; i < 8; i++)
3713 {
3714 if (regs_ever_live [i]
3715 && ! sparc_hard_reg_printed [i])
3716 {
3717 sparc_hard_reg_printed [i] = 1;
3718 /* %g7 is used as TLS base register, use #ignore
3719 for it instead of #scratch. */
3720 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3721 i == 7 ? "ignore" : "scratch");
3722 }
3723 if (i == 3) i = 5;
3724 }
3725 #endif
3726 }
3727
3728 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3729 as needed. LOW should be double-word aligned for 32-bit registers.
3730 Return the new OFFSET. */
3731
3732 #define SORR_SAVE 0
3733 #define SORR_RESTORE 1
3734
3735 static int
3736 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3737 {
3738 rtx mem, insn;
3739 int i;
3740
3741 if (TARGET_ARCH64 && high <= 32)
3742 {
3743 for (i = low; i < high; i++)
3744 {
3745 if (regs_ever_live[i] && ! call_used_regs[i])
3746 {
3747 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3748 set_mem_alias_set (mem, sparc_sr_alias_set);
3749 if (action == SORR_SAVE)
3750 {
3751 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3752 RTX_FRAME_RELATED_P (insn) = 1;
3753 }
3754 else /* action == SORR_RESTORE */
3755 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3756 offset += 8;
3757 }
3758 }
3759 }
3760 else
3761 {
3762 for (i = low; i < high; i += 2)
3763 {
3764 bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
3765 bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
3766 enum machine_mode mode;
3767 int regno;
3768
3769 if (reg0 && reg1)
3770 {
3771 mode = i < 32 ? DImode : DFmode;
3772 regno = i;
3773 }
3774 else if (reg0)
3775 {
3776 mode = i < 32 ? SImode : SFmode;
3777 regno = i;
3778 }
3779 else if (reg1)
3780 {
3781 mode = i < 32 ? SImode : SFmode;
3782 regno = i + 1;
3783 offset += 4;
3784 }
3785 else
3786 continue;
3787
3788 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3789 set_mem_alias_set (mem, sparc_sr_alias_set);
3790 if (action == SORR_SAVE)
3791 {
3792 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3793 RTX_FRAME_RELATED_P (insn) = 1;
3794 }
3795 else /* action == SORR_RESTORE */
3796 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3797
3798 /* Always preserve double-word alignment. */
3799 offset = (offset + 7) & -8;
3800 }
3801 }
3802
3803 return offset;
3804 }
3805
3806 /* Emit code to save call-saved registers. */
3807
3808 static void
3809 emit_save_or_restore_regs (int action)
3810 {
3811 HOST_WIDE_INT offset;
3812 rtx base;
3813
3814 offset = frame_base_offset - apparent_fsize;
3815
3816 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3817 {
3818 /* ??? This might be optimized a little as %g1 might already have a
3819 value close enough that a single add insn will do. */
3820 /* ??? Although, all of this is probably only a temporary fix
3821 because if %g1 can hold a function result, then
3822 sparc_expand_epilogue will lose (the result will be
3823 clobbered). */
3824 base = gen_rtx_REG (Pmode, 1);
3825 emit_move_insn (base, GEN_INT (offset));
3826 emit_insn (gen_rtx_SET (VOIDmode,
3827 base,
3828 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3829 offset = 0;
3830 }
3831 else
3832 base = frame_base_reg;
3833
3834 offset = save_or_restore_regs (0, 8, base, offset, action);
3835 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3836 }
3837
3838 /* Generate a save_register_window insn. */
3839
3840 static rtx
3841 gen_save_register_window (rtx increment)
3842 {
3843 if (TARGET_ARCH64)
3844 return gen_save_register_windowdi (increment);
3845 else
3846 return gen_save_register_windowsi (increment);
3847 }
3848
3849 /* Generate an increment for the stack pointer. */
3850
3851 static rtx
3852 gen_stack_pointer_inc (rtx increment)
3853 {
3854 return gen_rtx_SET (VOIDmode,
3855 stack_pointer_rtx,
3856 gen_rtx_PLUS (Pmode,
3857 stack_pointer_rtx,
3858 increment));
3859 }
3860
3861 /* Generate a decrement for the stack pointer. */
3862
3863 static rtx
3864 gen_stack_pointer_dec (rtx decrement)
3865 {
3866 return gen_rtx_SET (VOIDmode,
3867 stack_pointer_rtx,
3868 gen_rtx_MINUS (Pmode,
3869 stack_pointer_rtx,
3870 decrement));
3871 }
3872
3873 /* Expand the function prologue. The prologue is responsible for reserving
3874 storage for the frame, saving the call-saved registers and loading the
3875 PIC register if needed. */
3876
3877 void
3878 sparc_expand_prologue (void)
3879 {
3880 rtx insn;
3881 int i;
3882
3883 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
3884 on the final value of the flag means deferring the prologue/epilogue
3885 expansion until just before the second scheduling pass, which is too
3886 late to emit multiple epilogues or return insns.
3887
3888 Of course we are making the assumption that the value of the flag
3889 will not change between now and its final value. Of the three parts
3890 of the formula, only the last one can reasonably vary. Let's take a
3891 closer look, after assuming that the first two ones are set to true
3892 (otherwise the last value is effectively silenced).
3893
3894 If only_leaf_regs_used returns false, the global predicate will also
3895 be false so the actual frame size calculated below will be positive.
3896 As a consequence, the save_register_window insn will be emitted in
3897 the instruction stream; now this insn explicitly references %fp
3898 which is not a leaf register so only_leaf_regs_used will always
3899 return false subsequently.
3900
3901 If only_leaf_regs_used returns true, we hope that the subsequent
3902 optimization passes won't cause non-leaf registers to pop up. For
3903 example, the regrename pass has special provisions to not rename to
3904 non-leaf registers in a leaf function. */
3905 sparc_leaf_function_p
3906 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
3907
3908 /* Need to use actual_fsize, since we are also allocating
3909 space for our callee (and our own register save area). */
3910 actual_fsize
3911 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
3912
3913 /* Advertise that the data calculated just above are now valid. */
3914 sparc_prologue_data_valid_p = true;
3915
3916 if (sparc_leaf_function_p)
3917 {
3918 frame_base_reg = stack_pointer_rtx;
3919 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
3920 }
3921 else
3922 {
3923 frame_base_reg = hard_frame_pointer_rtx;
3924 frame_base_offset = SPARC_STACK_BIAS;
3925 }
3926
3927 if (actual_fsize == 0)
3928 /* do nothing. */ ;
3929 else if (sparc_leaf_function_p)
3930 {
3931 if (actual_fsize <= 4096)
3932 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
3933 else if (actual_fsize <= 8192)
3934 {
3935 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
3936 /* %sp is still the CFA register. */
3937 RTX_FRAME_RELATED_P (insn) = 1;
3938 insn
3939 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3940 }
3941 else
3942 {
3943 rtx reg = gen_rtx_REG (Pmode, 1);
3944 emit_move_insn (reg, GEN_INT (-actual_fsize));
3945 insn = emit_insn (gen_stack_pointer_inc (reg));
3946 REG_NOTES (insn) =
3947 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3948 gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
3949 REG_NOTES (insn));
3950 }
3951
3952 RTX_FRAME_RELATED_P (insn) = 1;
3953 }
3954 else
3955 {
3956 if (actual_fsize <= 4096)
3957 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
3958 else if (actual_fsize <= 8192)
3959 {
3960 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
3961 /* %sp is not the CFA register anymore. */
3962 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3963 }
3964 else
3965 {
3966 rtx reg = gen_rtx_REG (Pmode, 1);
3967 emit_move_insn (reg, GEN_INT (-actual_fsize));
3968 insn = emit_insn (gen_save_register_window (reg));
3969 }
3970
3971 RTX_FRAME_RELATED_P (insn) = 1;
3972 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
3973 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
3974 }
3975
3976 if (num_gfregs)
3977 emit_save_or_restore_regs (SORR_SAVE);
3978
3979 /* Load the PIC register if needed. */
3980 if (flag_pic && current_function_uses_pic_offset_table)
3981 load_pic_register (false);
3982 }
3983
3984 /* This function generates the assembly code for function entry, which boils
3985 down to emitting the necessary .register directives. */
3986
3987 static void
3988 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3989 {
3990 /* Check that the assumption we made in sparc_expand_prologue is valid. */
3991 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
3992
3993 sparc_output_scratch_registers (file);
3994 }
3995
3996 /* Expand the function epilogue, either normal or part of a sibcall.
3997 We emit all the instructions except the return or the call. */
3998
3999 void
4000 sparc_expand_epilogue (void)
4001 {
4002 if (num_gfregs)
4003 emit_save_or_restore_regs (SORR_RESTORE);
4004
4005 if (actual_fsize == 0)
4006 /* do nothing. */ ;
4007 else if (sparc_leaf_function_p)
4008 {
4009 if (actual_fsize <= 4096)
4010 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4011 else if (actual_fsize <= 8192)
4012 {
4013 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4014 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4015 }
4016 else
4017 {
4018 rtx reg = gen_rtx_REG (Pmode, 1);
4019 emit_move_insn (reg, GEN_INT (-actual_fsize));
4020 emit_insn (gen_stack_pointer_dec (reg));
4021 }
4022 }
4023 }
4024
4025 /* Return true if it is appropriate to emit `return' instructions in the
4026 body of a function. */
4027
4028 bool
4029 sparc_can_use_return_insn_p (void)
4030 {
4031 return sparc_prologue_data_valid_p
4032 && (actual_fsize == 0 || !sparc_leaf_function_p);
4033 }
4034
4035 /* This function generates the assembly code for function exit. */
4036
4037 static void
4038 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4039 {
4040 /* If code does not drop into the epilogue, we have to still output
4041 a dummy nop for the sake of sane backtraces. Otherwise, if the
4042 last two instructions of a function were "call foo; dslot;" this
4043 can make the return PC of foo (i.e. address of call instruction
4044 plus 8) point to the first instruction in the next function. */
4045
4046 rtx insn, last_real_insn;
4047
4048 insn = get_last_insn ();
4049
4050 last_real_insn = prev_real_insn (insn);
4051 if (last_real_insn
4052 && GET_CODE (last_real_insn) == INSN
4053 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4054 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4055
4056 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4057 fputs("\tnop\n", file);
4058
4059 sparc_output_deferred_case_vectors ();
4060 }
4061
4062 /* Output a 'restore' instruction. */
4063
4064 static void
4065 output_restore (rtx pat)
4066 {
4067 rtx operands[3];
4068
4069 if (! pat)
4070 {
4071 fputs ("\t restore\n", asm_out_file);
4072 return;
4073 }
4074
4075 gcc_assert (GET_CODE (pat) == SET);
4076
4077 operands[0] = SET_DEST (pat);
4078 pat = SET_SRC (pat);
4079
4080 switch (GET_CODE (pat))
4081 {
4082 case PLUS:
4083 operands[1] = XEXP (pat, 0);
4084 operands[2] = XEXP (pat, 1);
4085 output_asm_insn (" restore %r1, %2, %Y0", operands);
4086 break;
4087 case LO_SUM:
4088 operands[1] = XEXP (pat, 0);
4089 operands[2] = XEXP (pat, 1);
4090 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4091 break;
4092 case ASHIFT:
4093 operands[1] = XEXP (pat, 0);
4094 gcc_assert (XEXP (pat, 1) == const1_rtx);
4095 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4096 break;
4097 default:
4098 operands[1] = pat;
4099 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4100 break;
4101 }
4102 }
4103
4104 /* Output a return. */
4105
4106 const char *
4107 output_return (rtx insn)
4108 {
4109 if (sparc_leaf_function_p)
4110 {
4111 /* This is a leaf function so we don't have to bother restoring the
4112 register window, which frees us from dealing with the convoluted
4113 semantics of restore/return. We simply output the jump to the
4114 return address and the insn in the delay slot (if any). */
4115
4116 gcc_assert (! current_function_calls_eh_return);
4117
4118 return "jmp\t%%o7+%)%#";
4119 }
4120 else
4121 {
4122 /* This is a regular function so we have to restore the register window.
4123 We may have a pending insn for the delay slot, which will be either
4124 combined with the 'restore' instruction or put in the delay slot of
4125 the 'return' instruction. */
4126
4127 if (current_function_calls_eh_return)
4128 {
4129 /* If the function uses __builtin_eh_return, the eh_return
4130 machinery occupies the delay slot. */
4131 gcc_assert (! final_sequence);
4132
4133 if (! flag_delayed_branch)
4134 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4135
4136 if (TARGET_V9)
4137 fputs ("\treturn\t%i7+8\n", asm_out_file);
4138 else
4139 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4140
4141 if (flag_delayed_branch)
4142 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4143 else
4144 fputs ("\t nop\n", asm_out_file);
4145 }
4146 else if (final_sequence)
4147 {
4148 rtx delay, pat;
4149
4150 delay = NEXT_INSN (insn);
4151 gcc_assert (delay);
4152
4153 pat = PATTERN (delay);
4154
4155 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4156 {
4157 epilogue_renumber (&pat, 0);
4158 return "return\t%%i7+%)%#";
4159 }
4160 else
4161 {
4162 output_asm_insn ("jmp\t%%i7+%)", NULL);
4163 output_restore (pat);
4164 PATTERN (delay) = gen_blockage ();
4165 INSN_CODE (delay) = -1;
4166 }
4167 }
4168 else
4169 {
4170 /* The delay slot is empty. */
4171 if (TARGET_V9)
4172 return "return\t%%i7+%)\n\t nop";
4173 else if (flag_delayed_branch)
4174 return "jmp\t%%i7+%)\n\t restore";
4175 else
4176 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4177 }
4178 }
4179
4180 return "";
4181 }
4182
4183 /* Output a sibling call. */
4184
4185 const char *
4186 output_sibcall (rtx insn, rtx call_operand)
4187 {
4188 rtx operands[1];
4189
4190 gcc_assert (flag_delayed_branch);
4191
4192 operands[0] = call_operand;
4193
4194 if (sparc_leaf_function_p)
4195 {
4196 /* This is a leaf function so we don't have to bother restoring the
4197 register window. We simply output the jump to the function and
4198 the insn in the delay slot (if any). */
4199
4200 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4201
4202 if (final_sequence)
4203 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4204 operands);
4205 else
4206 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4207 it into branch if possible. */
4208 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4209 operands);
4210 }
4211 else
4212 {
4213 /* This is a regular function so we have to restore the register window.
4214 We may have a pending insn for the delay slot, which will be combined
4215 with the 'restore' instruction. */
4216
4217 output_asm_insn ("call\t%a0, 0", operands);
4218
4219 if (final_sequence)
4220 {
4221 rtx delay = NEXT_INSN (insn);
4222 gcc_assert (delay);
4223
4224 output_restore (PATTERN (delay));
4225
4226 PATTERN (delay) = gen_blockage ();
4227 INSN_CODE (delay) = -1;
4228 }
4229 else
4230 output_restore (NULL_RTX);
4231 }
4232
4233 return "";
4234 }
4235 \f
4236 /* Functions for handling argument passing.
4237
4238 For 32-bit, the first 6 args are normally in registers and the rest are
4239 pushed. Any arg that starts within the first 6 words is at least
4240 partially passed in a register unless its data type forbids.
4241
4242 For 64-bit, the argument registers are laid out as an array of 16 elements
4243 and arguments are added sequentially. The first 6 int args and up to the
4244 first 16 fp args (depending on size) are passed in regs.
4245
4246 Slot Stack Integral Float Float in structure Double Long Double
4247 ---- ----- -------- ----- ------------------ ------ -----------
4248 15 [SP+248] %f31 %f30,%f31 %d30
4249 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4250 13 [SP+232] %f27 %f26,%f27 %d26
4251 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4252 11 [SP+216] %f23 %f22,%f23 %d22
4253 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4254 9 [SP+200] %f19 %f18,%f19 %d18
4255 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4256 7 [SP+184] %f15 %f14,%f15 %d14
4257 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4258 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4259 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4260 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4261 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4262 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4263 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4264
4265 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4266
4267 Integral arguments are always passed as 64-bit quantities appropriately
4268 extended.
4269
4270 Passing of floating point values is handled as follows.
4271 If a prototype is in scope:
4272 If the value is in a named argument (i.e. not a stdarg function or a
4273 value not part of the `...') then the value is passed in the appropriate
4274 fp reg.
4275 If the value is part of the `...' and is passed in one of the first 6
4276 slots then the value is passed in the appropriate int reg.
4277 If the value is part of the `...' and is not passed in one of the first 6
4278 slots then the value is passed in memory.
4279 If a prototype is not in scope:
4280 If the value is one of the first 6 arguments the value is passed in the
4281 appropriate integer reg and the appropriate fp reg.
4282 If the value is not one of the first 6 arguments the value is passed in
4283 the appropriate fp reg and in memory.
4284
4285
4286 Summary of the calling conventions implemented by GCC on SPARC:
4287
4288 32-bit ABI:
4289 size argument return value
4290
4291 small integer <4 int. reg. int. reg.
4292 word 4 int. reg. int. reg.
4293 double word 8 int. reg. int. reg.
4294
4295 _Complex small integer <8 int. reg. int. reg.
4296 _Complex word 8 int. reg. int. reg.
4297 _Complex double word 16 memory int. reg.
4298
4299 vector integer <=8 int. reg. FP reg.
4300 vector integer >8 memory memory
4301
4302 float 4 int. reg. FP reg.
4303 double 8 int. reg. FP reg.
4304 long double 16 memory memory
4305
4306 _Complex float 8 memory FP reg.
4307 _Complex double 16 memory FP reg.
4308 _Complex long double 32 memory FP reg.
4309
4310 vector float any memory memory
4311
4312 aggregate any memory memory
4313
4314
4315
4316 64-bit ABI:
4317 size argument return value
4318
4319 small integer <8 int. reg. int. reg.
4320 word 8 int. reg. int. reg.
4321 double word 16 int. reg. int. reg.
4322
4323 _Complex small integer <16 int. reg. int. reg.
4324 _Complex word 16 int. reg. int. reg.
4325 _Complex double word 32 memory int. reg.
4326
4327 vector integer <=16 FP reg. FP reg.
4328 vector integer 16<s<=32 memory FP reg.
4329 vector integer >32 memory memory
4330
4331 float 4 FP reg. FP reg.
4332 double 8 FP reg. FP reg.
4333 long double 16 FP reg. FP reg.
4334
4335 _Complex float 8 FP reg. FP reg.
4336 _Complex double 16 FP reg. FP reg.
4337 _Complex long double 32 memory FP reg.
4338
4339 vector float <=16 FP reg. FP reg.
4340 vector float 16<s<=32 memory FP reg.
4341 vector float >32 memory memory
4342
4343 aggregate <=16 reg. reg.
4344 aggregate 16<s<=32 memory reg.
4345 aggregate >32 memory memory
4346
4347
4348
4349 Note #1: complex floating-point types follow the extended SPARC ABIs as
4350 implemented by the Sun compiler.
4351
4352 Note #2: integral vector types follow the scalar floating-point types
4353 conventions to match what is implemented by the Sun VIS SDK.
4354
4355 Note #3: floating-point vector types follow the aggregate types
4356 conventions. */
4357
4358
4359 /* Maximum number of int regs for args. */
4360 #define SPARC_INT_ARG_MAX 6
4361 /* Maximum number of fp regs for args. */
4362 #define SPARC_FP_ARG_MAX 16
4363
4364 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4365
4366 /* Handle the INIT_CUMULATIVE_ARGS macro.
4367 Initialize a variable CUM of type CUMULATIVE_ARGS
4368 for a call to a function whose data type is FNTYPE.
4369 For a library call, FNTYPE is 0. */
4370
4371 void
4372 init_cumulative_args (struct sparc_args *cum, tree fntype,
4373 rtx libname ATTRIBUTE_UNUSED,
4374 tree fndecl ATTRIBUTE_UNUSED)
4375 {
4376 cum->words = 0;
4377 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4378 cum->libcall_p = fntype == 0;
4379 }
4380
4381 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4382 When a prototype says `char' or `short', really pass an `int'. */
4383
4384 static bool
4385 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
4386 {
4387 return TARGET_ARCH32 ? true : false;
4388 }
4389
4390 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4391
4392 static bool
4393 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4394 {
4395 return TARGET_ARCH64 ? true : false;
4396 }
4397
4398 /* Scan the record type TYPE and return the following predicates:
4399 - INTREGS_P: the record contains at least one field or sub-field
4400 that is eligible for promotion in integer registers.
4401 - FP_REGS_P: the record contains at least one field or sub-field
4402 that is eligible for promotion in floating-point registers.
4403 - PACKED_P: the record contains at least one field that is packed.
4404
4405 Sub-fields are not taken into account for the PACKED_P predicate. */
4406
4407 static void
4408 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4409 {
4410 tree field;
4411
4412 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4413 {
4414 if (TREE_CODE (field) == FIELD_DECL)
4415 {
4416 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4417 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4418 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4419 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4420 && TARGET_FPU)
4421 *fpregs_p = 1;
4422 else
4423 *intregs_p = 1;
4424
4425 if (packed_p && DECL_PACKED (field))
4426 *packed_p = 1;
4427 }
4428 }
4429 }
4430
4431 /* Compute the slot number to pass an argument in.
4432 Return the slot number or -1 if passing on the stack.
4433
4434 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4435 the preceding args and about the function being called.
4436 MODE is the argument's machine mode.
4437 TYPE is the data type of the argument (as a tree).
4438 This is null for libcalls where that information may
4439 not be available.
4440 NAMED is nonzero if this argument is a named parameter
4441 (otherwise it is an extra parameter matching an ellipsis).
4442 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4443 *PREGNO records the register number to use if scalar type.
4444 *PPADDING records the amount of padding needed in words. */
4445
4446 static int
4447 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4448 tree type, int named, int incoming_p,
4449 int *pregno, int *ppadding)
4450 {
4451 int regbase = (incoming_p
4452 ? SPARC_INCOMING_INT_ARG_FIRST
4453 : SPARC_OUTGOING_INT_ARG_FIRST);
4454 int slotno = cum->words;
4455 enum mode_class mclass;
4456 int regno;
4457
4458 *ppadding = 0;
4459
4460 if (type && TREE_ADDRESSABLE (type))
4461 return -1;
4462
4463 if (TARGET_ARCH32
4464 && mode == BLKmode
4465 && type
4466 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4467 return -1;
4468
4469 /* For SPARC64, objects requiring 16-byte alignment get it. */
4470 if (TARGET_ARCH64
4471 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4472 && (slotno & 1) != 0)
4473 slotno++, *ppadding = 1;
4474
4475 mclass = GET_MODE_CLASS (mode);
4476 if (type && TREE_CODE (type) == VECTOR_TYPE)
4477 {
4478 /* Vector types deserve special treatment because they are
4479 polymorphic wrt their mode, depending upon whether VIS
4480 instructions are enabled. */
4481 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4482 {
4483 /* The SPARC port defines no floating-point vector modes. */
4484 gcc_assert (mode == BLKmode);
4485 }
4486 else
4487 {
4488 /* Integral vector types should either have a vector
4489 mode or an integral mode, because we are guaranteed
4490 by pass_by_reference that their size is not greater
4491 than 16 bytes and TImode is 16-byte wide. */
4492 gcc_assert (mode != BLKmode);
4493
4494 /* Vector integers are handled like floats according to
4495 the Sun VIS SDK. */
4496 mclass = MODE_FLOAT;
4497 }
4498 }
4499
4500 switch (mclass)
4501 {
4502 case MODE_FLOAT:
4503 case MODE_COMPLEX_FLOAT:
4504 if (TARGET_ARCH64 && TARGET_FPU && named)
4505 {
4506 if (slotno >= SPARC_FP_ARG_MAX)
4507 return -1;
4508 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4509 /* Arguments filling only one single FP register are
4510 right-justified in the outer double FP register. */
4511 if (GET_MODE_SIZE (mode) <= 4)
4512 regno++;
4513 break;
4514 }
4515 /* fallthrough */
4516
4517 case MODE_INT:
4518 case MODE_COMPLEX_INT:
4519 if (slotno >= SPARC_INT_ARG_MAX)
4520 return -1;
4521 regno = regbase + slotno;
4522 break;
4523
4524 case MODE_RANDOM:
4525 if (mode == VOIDmode)
4526 /* MODE is VOIDmode when generating the actual call. */
4527 return -1;
4528
4529 gcc_assert (mode == BLKmode);
4530
4531 if (TARGET_ARCH32 || !type || (TREE_CODE (type) == UNION_TYPE))
4532 {
4533 if (slotno >= SPARC_INT_ARG_MAX)
4534 return -1;
4535 regno = regbase + slotno;
4536 }
4537 else /* TARGET_ARCH64 && type */
4538 {
4539 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4540
4541 /* First see what kinds of registers we would need. */
4542 if (TREE_CODE (type) == VECTOR_TYPE)
4543 fpregs_p = 1;
4544 else
4545 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4546
4547 /* The ABI obviously doesn't specify how packed structures
4548 are passed. These are defined to be passed in int regs
4549 if possible, otherwise memory. */
4550 if (packed_p || !named)
4551 fpregs_p = 0, intregs_p = 1;
4552
4553 /* If all arg slots are filled, then must pass on stack. */
4554 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4555 return -1;
4556
4557 /* If there are only int args and all int arg slots are filled,
4558 then must pass on stack. */
4559 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4560 return -1;
4561
4562 /* Note that even if all int arg slots are filled, fp members may
4563 still be passed in regs if such regs are available.
4564 *PREGNO isn't set because there may be more than one, it's up
4565 to the caller to compute them. */
4566 return slotno;
4567 }
4568 break;
4569
4570 default :
4571 gcc_unreachable ();
4572 }
4573
4574 *pregno = regno;
4575 return slotno;
4576 }
4577
4578 /* Handle recursive register counting for structure field layout. */
4579
4580 struct function_arg_record_value_parms
4581 {
4582 rtx ret; /* return expression being built. */
4583 int slotno; /* slot number of the argument. */
4584 int named; /* whether the argument is named. */
4585 int regbase; /* regno of the base register. */
4586 int stack; /* 1 if part of the argument is on the stack. */
4587 int intoffset; /* offset of the first pending integer field. */
4588 unsigned int nregs; /* number of words passed in registers. */
4589 };
4590
4591 static void function_arg_record_value_3
4592 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4593 static void function_arg_record_value_2
4594 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4595 static void function_arg_record_value_1
4596 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4597 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
4598 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4599
4600 /* A subroutine of function_arg_record_value. Traverse the structure
4601 recursively and determine how many registers will be required. */
4602
4603 static void
4604 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
4605 struct function_arg_record_value_parms *parms,
4606 bool packed_p)
4607 {
4608 tree field;
4609
4610 /* We need to compute how many registers are needed so we can
4611 allocate the PARALLEL but before we can do that we need to know
4612 whether there are any packed fields. The ABI obviously doesn't
4613 specify how structures are passed in this case, so they are
4614 defined to be passed in int regs if possible, otherwise memory,
4615 regardless of whether there are fp values present. */
4616
4617 if (! packed_p)
4618 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4619 {
4620 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4621 {
4622 packed_p = true;
4623 break;
4624 }
4625 }
4626
4627 /* Compute how many registers we need. */
4628 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4629 {
4630 if (TREE_CODE (field) == FIELD_DECL)
4631 {
4632 HOST_WIDE_INT bitpos = startbitpos;
4633
4634 if (DECL_SIZE (field) != 0)
4635 {
4636 if (integer_zerop (DECL_SIZE (field)))
4637 continue;
4638
4639 if (host_integerp (bit_position (field), 1))
4640 bitpos += int_bit_position (field);
4641 }
4642
4643 /* ??? FIXME: else assume zero offset. */
4644
4645 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4646 function_arg_record_value_1 (TREE_TYPE (field),
4647 bitpos,
4648 parms,
4649 packed_p);
4650 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4651 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4652 && TARGET_FPU
4653 && parms->named
4654 && ! packed_p)
4655 {
4656 if (parms->intoffset != -1)
4657 {
4658 unsigned int startbit, endbit;
4659 int intslots, this_slotno;
4660
4661 startbit = parms->intoffset & -BITS_PER_WORD;
4662 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4663
4664 intslots = (endbit - startbit) / BITS_PER_WORD;
4665 this_slotno = parms->slotno + parms->intoffset
4666 / BITS_PER_WORD;
4667
4668 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4669 {
4670 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4671 /* We need to pass this field on the stack. */
4672 parms->stack = 1;
4673 }
4674
4675 parms->nregs += intslots;
4676 parms->intoffset = -1;
4677 }
4678
4679 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4680 If it wasn't true we wouldn't be here. */
4681 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4682 && DECL_MODE (field) == BLKmode)
4683 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4684 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4685 parms->nregs += 2;
4686 else
4687 parms->nregs += 1;
4688 }
4689 else
4690 {
4691 if (parms->intoffset == -1)
4692 parms->intoffset = bitpos;
4693 }
4694 }
4695 }
4696 }
4697
4698 /* A subroutine of function_arg_record_value. Assign the bits of the
4699 structure between parms->intoffset and bitpos to integer registers. */
4700
4701 static void
4702 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4703 struct function_arg_record_value_parms *parms)
4704 {
4705 enum machine_mode mode;
4706 unsigned int regno;
4707 unsigned int startbit, endbit;
4708 int this_slotno, intslots, intoffset;
4709 rtx reg;
4710
4711 if (parms->intoffset == -1)
4712 return;
4713
4714 intoffset = parms->intoffset;
4715 parms->intoffset = -1;
4716
4717 startbit = intoffset & -BITS_PER_WORD;
4718 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4719 intslots = (endbit - startbit) / BITS_PER_WORD;
4720 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4721
4722 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4723 if (intslots <= 0)
4724 return;
4725
4726 /* If this is the trailing part of a word, only load that much into
4727 the register. Otherwise load the whole register. Note that in
4728 the latter case we may pick up unwanted bits. It's not a problem
4729 at the moment but may wish to revisit. */
4730
4731 if (intoffset % BITS_PER_WORD != 0)
4732 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4733 MODE_INT);
4734 else
4735 mode = word_mode;
4736
4737 intoffset /= BITS_PER_UNIT;
4738 do
4739 {
4740 regno = parms->regbase + this_slotno;
4741 reg = gen_rtx_REG (mode, regno);
4742 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4743 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4744
4745 this_slotno += 1;
4746 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4747 mode = word_mode;
4748 parms->nregs += 1;
4749 intslots -= 1;
4750 }
4751 while (intslots > 0);
4752 }
4753
4754 /* A subroutine of function_arg_record_value. Traverse the structure
4755 recursively and assign bits to floating point registers. Track which
4756 bits in between need integer registers; invoke function_arg_record_value_3
4757 to make that happen. */
4758
4759 static void
4760 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
4761 struct function_arg_record_value_parms *parms,
4762 bool packed_p)
4763 {
4764 tree field;
4765
4766 if (! packed_p)
4767 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4768 {
4769 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4770 {
4771 packed_p = true;
4772 break;
4773 }
4774 }
4775
4776 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4777 {
4778 if (TREE_CODE (field) == FIELD_DECL)
4779 {
4780 HOST_WIDE_INT bitpos = startbitpos;
4781
4782 if (DECL_SIZE (field) != 0)
4783 {
4784 if (integer_zerop (DECL_SIZE (field)))
4785 continue;
4786
4787 if (host_integerp (bit_position (field), 1))
4788 bitpos += int_bit_position (field);
4789 }
4790
4791 /* ??? FIXME: else assume zero offset. */
4792
4793 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4794 function_arg_record_value_2 (TREE_TYPE (field),
4795 bitpos,
4796 parms,
4797 packed_p);
4798 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4799 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4800 && TARGET_FPU
4801 && parms->named
4802 && ! packed_p)
4803 {
4804 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4805 int regno, nregs, pos;
4806 enum machine_mode mode = DECL_MODE (field);
4807 rtx reg;
4808
4809 function_arg_record_value_3 (bitpos, parms);
4810
4811 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4812 && mode == BLKmode)
4813 {
4814 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4815 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4816 }
4817 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4818 {
4819 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4820 nregs = 2;
4821 }
4822 else
4823 nregs = 1;
4824
4825 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4826 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4827 regno++;
4828 reg = gen_rtx_REG (mode, regno);
4829 pos = bitpos / BITS_PER_UNIT;
4830 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4831 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4832 parms->nregs += 1;
4833 while (--nregs > 0)
4834 {
4835 regno += GET_MODE_SIZE (mode) / 4;
4836 reg = gen_rtx_REG (mode, regno);
4837 pos += GET_MODE_SIZE (mode);
4838 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4839 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4840 parms->nregs += 1;
4841 }
4842 }
4843 else
4844 {
4845 if (parms->intoffset == -1)
4846 parms->intoffset = bitpos;
4847 }
4848 }
4849 }
4850 }
4851
4852 /* Used by function_arg and function_value to implement the complex
4853 conventions of the 64-bit ABI for passing and returning structures.
4854 Return an expression valid as a return value for the two macros
4855 FUNCTION_ARG and FUNCTION_VALUE.
4856
4857 TYPE is the data type of the argument (as a tree).
4858 This is null for libcalls where that information may
4859 not be available.
4860 MODE is the argument's machine mode.
4861 SLOTNO is the index number of the argument's slot in the parameter array.
4862 NAMED is nonzero if this argument is a named parameter
4863 (otherwise it is an extra parameter matching an ellipsis).
4864 REGBASE is the regno of the base register for the parameter array. */
4865
4866 static rtx
4867 function_arg_record_value (tree type, enum machine_mode mode,
4868 int slotno, int named, int regbase)
4869 {
4870 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4871 struct function_arg_record_value_parms parms;
4872 unsigned int nregs;
4873
4874 parms.ret = NULL_RTX;
4875 parms.slotno = slotno;
4876 parms.named = named;
4877 parms.regbase = regbase;
4878 parms.stack = 0;
4879
4880 /* Compute how many registers we need. */
4881 parms.nregs = 0;
4882 parms.intoffset = 0;
4883 function_arg_record_value_1 (type, 0, &parms, false);
4884
4885 /* Take into account pending integer fields. */
4886 if (parms.intoffset != -1)
4887 {
4888 unsigned int startbit, endbit;
4889 int intslots, this_slotno;
4890
4891 startbit = parms.intoffset & -BITS_PER_WORD;
4892 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4893 intslots = (endbit - startbit) / BITS_PER_WORD;
4894 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4895
4896 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4897 {
4898 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4899 /* We need to pass this field on the stack. */
4900 parms.stack = 1;
4901 }
4902
4903 parms.nregs += intslots;
4904 }
4905 nregs = parms.nregs;
4906
4907 /* Allocate the vector and handle some annoying special cases. */
4908 if (nregs == 0)
4909 {
4910 /* ??? Empty structure has no value? Duh? */
4911 if (typesize <= 0)
4912 {
4913 /* Though there's nothing really to store, return a word register
4914 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
4915 leads to breakage due to the fact that there are zero bytes to
4916 load. */
4917 return gen_rtx_REG (mode, regbase);
4918 }
4919 else
4920 {
4921 /* ??? C++ has structures with no fields, and yet a size. Give up
4922 for now and pass everything back in integer registers. */
4923 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4924 }
4925 if (nregs + slotno > SPARC_INT_ARG_MAX)
4926 nregs = SPARC_INT_ARG_MAX - slotno;
4927 }
4928 gcc_assert (nregs != 0);
4929
4930 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
4931
4932 /* If at least one field must be passed on the stack, generate
4933 (parallel [(expr_list (nil) ...) ...]) so that all fields will
4934 also be passed on the stack. We can't do much better because the
4935 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
4936 of structures for which the fields passed exclusively in registers
4937 are not at the beginning of the structure. */
4938 if (parms.stack)
4939 XVECEXP (parms.ret, 0, 0)
4940 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
4941
4942 /* Fill in the entries. */
4943 parms.nregs = 0;
4944 parms.intoffset = 0;
4945 function_arg_record_value_2 (type, 0, &parms, false);
4946 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
4947
4948 gcc_assert (parms.nregs == nregs);
4949
4950 return parms.ret;
4951 }
4952
4953 /* Used by function_arg and function_value to implement the conventions
4954 of the 64-bit ABI for passing and returning unions.
4955 Return an expression valid as a return value for the two macros
4956 FUNCTION_ARG and FUNCTION_VALUE.
4957
4958 SIZE is the size in bytes of the union.
4959 MODE is the argument's machine mode.
4960 REGNO is the hard register the union will be passed in. */
4961
4962 static rtx
4963 function_arg_union_value (int size, enum machine_mode mode, int slotno,
4964 int regno)
4965 {
4966 int nwords = ROUND_ADVANCE (size), i;
4967 rtx regs;
4968
4969 /* See comment in previous function for empty structures. */
4970 if (nwords == 0)
4971 return gen_rtx_REG (mode, regno);
4972
4973 if (slotno == SPARC_INT_ARG_MAX - 1)
4974 nwords = 1;
4975
4976 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
4977
4978 for (i = 0; i < nwords; i++)
4979 {
4980 /* Unions are passed left-justified. */
4981 XVECEXP (regs, 0, i)
4982 = gen_rtx_EXPR_LIST (VOIDmode,
4983 gen_rtx_REG (word_mode, regno),
4984 GEN_INT (UNITS_PER_WORD * i));
4985 regno++;
4986 }
4987
4988 return regs;
4989 }
4990
4991 /* Used by function_arg and function_value to implement the conventions
4992 for passing and returning large (BLKmode) vectors.
4993 Return an expression valid as a return value for the two macros
4994 FUNCTION_ARG and FUNCTION_VALUE.
4995
4996 SIZE is the size in bytes of the vector.
4997 BASE_MODE is the argument's base machine mode.
4998 REGNO is the FP hard register the vector will be passed in. */
4999
5000 static rtx
5001 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5002 {
5003 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5004 int nregs = size / base_mode_size, i;
5005 rtx regs;
5006
5007 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5008
5009 for (i = 0; i < nregs; i++)
5010 {
5011 XVECEXP (regs, 0, i)
5012 = gen_rtx_EXPR_LIST (VOIDmode,
5013 gen_rtx_REG (base_mode, regno),
5014 GEN_INT (base_mode_size * i));
5015 regno += base_mode_size / 4;
5016 }
5017
5018 return regs;
5019 }
5020
5021 /* Handle the FUNCTION_ARG macro.
5022 Determine where to put an argument to a function.
5023 Value is zero to push the argument on the stack,
5024 or a hard register in which to store the argument.
5025
5026 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5027 the preceding args and about the function being called.
5028 MODE is the argument's machine mode.
5029 TYPE is the data type of the argument (as a tree).
5030 This is null for libcalls where that information may
5031 not be available.
5032 NAMED is nonzero if this argument is a named parameter
5033 (otherwise it is an extra parameter matching an ellipsis).
5034 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5035
5036 rtx
5037 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5038 tree type, int named, int incoming_p)
5039 {
5040 int regbase = (incoming_p
5041 ? SPARC_INCOMING_INT_ARG_FIRST
5042 : SPARC_OUTGOING_INT_ARG_FIRST);
5043 int slotno, regno, padding;
5044 enum mode_class mclass = GET_MODE_CLASS (mode);
5045 rtx reg;
5046
5047 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5048 &regno, &padding);
5049
5050 if (slotno == -1)
5051 return 0;
5052
5053 if (TARGET_ARCH32)
5054 {
5055 reg = gen_rtx_REG (mode, regno);
5056 return reg;
5057 }
5058
5059 if (type && TREE_CODE (type) == RECORD_TYPE)
5060 {
5061 /* Structures up to 16 bytes in size are passed in arg slots on the
5062 stack and are promoted to registers where possible. */
5063
5064 gcc_assert (int_size_in_bytes (type) <= 16);
5065
5066 return function_arg_record_value (type, mode, slotno, named, regbase);
5067 }
5068 else if (type && TREE_CODE (type) == UNION_TYPE)
5069 {
5070 HOST_WIDE_INT size = int_size_in_bytes (type);
5071
5072 gcc_assert (size <= 16);
5073
5074 return function_arg_union_value (size, mode, slotno, regno);
5075 }
5076 else if (type && TREE_CODE (type) == VECTOR_TYPE)
5077 {
5078 /* Vector types deserve special treatment because they are
5079 polymorphic wrt their mode, depending upon whether VIS
5080 instructions are enabled. */
5081 HOST_WIDE_INT size = int_size_in_bytes (type);
5082
5083 gcc_assert (size <= 16);
5084
5085 if (mode == BLKmode)
5086 return function_arg_vector_value (size,
5087 TYPE_MODE (TREE_TYPE (type)),
5088 SPARC_FP_ARG_FIRST + 2*slotno);
5089 else
5090 mclass = MODE_FLOAT;
5091 }
5092
5093 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5094 but also have the slot allocated for them.
5095 If no prototype is in scope fp values in register slots get passed
5096 in two places, either fp regs and int regs or fp regs and memory. */
5097 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5098 && SPARC_FP_REG_P (regno))
5099 {
5100 reg = gen_rtx_REG (mode, regno);
5101 if (cum->prototype_p || cum->libcall_p)
5102 {
5103 /* "* 2" because fp reg numbers are recorded in 4 byte
5104 quantities. */
5105 #if 0
5106 /* ??? This will cause the value to be passed in the fp reg and
5107 in the stack. When a prototype exists we want to pass the
5108 value in the reg but reserve space on the stack. That's an
5109 optimization, and is deferred [for a bit]. */
5110 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5111 return gen_rtx_PARALLEL (mode,
5112 gen_rtvec (2,
5113 gen_rtx_EXPR_LIST (VOIDmode,
5114 NULL_RTX, const0_rtx),
5115 gen_rtx_EXPR_LIST (VOIDmode,
5116 reg, const0_rtx)));
5117 else
5118 #else
5119 /* ??? It seems that passing back a register even when past
5120 the area declared by REG_PARM_STACK_SPACE will allocate
5121 space appropriately, and will not copy the data onto the
5122 stack, exactly as we desire.
5123
5124 This is due to locate_and_pad_parm being called in
5125 expand_call whenever reg_parm_stack_space > 0, which
5126 while beneficial to our example here, would seem to be
5127 in error from what had been intended. Ho hum... -- r~ */
5128 #endif
5129 return reg;
5130 }
5131 else
5132 {
5133 rtx v0, v1;
5134
5135 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5136 {
5137 int intreg;
5138
5139 /* On incoming, we don't need to know that the value
5140 is passed in %f0 and %i0, and it confuses other parts
5141 causing needless spillage even on the simplest cases. */
5142 if (incoming_p)
5143 return reg;
5144
5145 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5146 + (regno - SPARC_FP_ARG_FIRST) / 2);
5147
5148 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5149 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5150 const0_rtx);
5151 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5152 }
5153 else
5154 {
5155 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5156 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5157 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5158 }
5159 }
5160 }
5161 else
5162 {
5163 /* Scalar or complex int. */
5164 reg = gen_rtx_REG (mode, regno);
5165 }
5166
5167 return reg;
5168 }
5169
5170 /* For an arg passed partly in registers and partly in memory,
5171 this is the number of bytes of registers used.
5172 For args passed entirely in registers or entirely in memory, zero.
5173
5174 Any arg that starts in the first 6 regs but won't entirely fit in them
5175 needs partial registers on v8. On v9, structures with integer
5176 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5177 values that begin in the last fp reg [where "last fp reg" varies with the
5178 mode] will be split between that reg and memory. */
5179
5180 static int
5181 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5182 tree type, bool named)
5183 {
5184 int slotno, regno, padding;
5185
5186 /* We pass 0 for incoming_p here, it doesn't matter. */
5187 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5188
5189 if (slotno == -1)
5190 return 0;
5191
5192 if (TARGET_ARCH32)
5193 {
5194 if ((slotno + (mode == BLKmode
5195 ? ROUND_ADVANCE (int_size_in_bytes (type))
5196 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5197 > SPARC_INT_ARG_MAX)
5198 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5199 }
5200 else
5201 {
5202 /* We are guaranteed by pass_by_reference that the size of the
5203 argument is not greater than 16 bytes, so we only need to return
5204 one word if the argument is partially passed in registers. */
5205
5206 if (type && AGGREGATE_TYPE_P (type))
5207 {
5208 int size = int_size_in_bytes (type);
5209
5210 if (size > UNITS_PER_WORD
5211 && slotno == SPARC_INT_ARG_MAX - 1)
5212 return UNITS_PER_WORD;
5213 }
5214 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5215 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5216 && ! (TARGET_FPU && named)))
5217 {
5218 /* The complex types are passed as packed types. */
5219 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5220 && slotno == SPARC_INT_ARG_MAX - 1)
5221 return UNITS_PER_WORD;
5222 }
5223 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5224 {
5225 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5226 > SPARC_FP_ARG_MAX)
5227 return UNITS_PER_WORD;
5228 }
5229 }
5230
5231 return 0;
5232 }
5233
5234 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5235 Specify whether to pass the argument by reference. */
5236
5237 static bool
5238 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5239 enum machine_mode mode, tree type,
5240 bool named ATTRIBUTE_UNUSED)
5241 {
5242 if (TARGET_ARCH32)
5243 {
5244 /* Original SPARC 32-bit ABI says that structures and unions,
5245 and quad-precision floats are passed by reference. For Pascal,
5246 also pass arrays by reference. All other base types are passed
5247 in registers.
5248
5249 Extended ABI (as implemented by the Sun compiler) says that all
5250 complex floats are passed by reference. Pass complex integers
5251 in registers up to 8 bytes. More generally, enforce the 2-word
5252 cap for passing arguments in registers.
5253
5254 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5255 integers are passed like floats of the same size, that is in
5256 registers up to 8 bytes. Pass all vector floats by reference
5257 like structure and unions. */
5258 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5259 || mode == SCmode
5260 /* Catch CDImode, TFmode, DCmode and TCmode. */
5261 || GET_MODE_SIZE (mode) > 8
5262 || (type
5263 && TREE_CODE (type) == VECTOR_TYPE
5264 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5265 }
5266 else
5267 {
5268 /* Original SPARC 64-bit ABI says that structures and unions
5269 smaller than 16 bytes are passed in registers, as well as
5270 all other base types. For Pascal, pass arrays by reference.
5271
5272 Extended ABI (as implemented by the Sun compiler) says that
5273 complex floats are passed in registers up to 16 bytes. Pass
5274 all complex integers in registers up to 16 bytes. More generally,
5275 enforce the 2-word cap for passing arguments in registers.
5276
5277 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5278 integers are passed like floats of the same size, that is in
5279 registers (up to 16 bytes). Pass all vector floats like structure
5280 and unions. */
5281 return ((type && TREE_CODE (type) == ARRAY_TYPE)
5282 || (type
5283 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5284 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5285 /* Catch CTImode and TCmode. */
5286 || GET_MODE_SIZE (mode) > 16);
5287 }
5288 }
5289
5290 /* Handle the FUNCTION_ARG_ADVANCE macro.
5291 Update the data in CUM to advance over an argument
5292 of mode MODE and data type TYPE.
5293 TYPE is null for libcalls where that information may not be available. */
5294
5295 void
5296 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5297 tree type, int named)
5298 {
5299 int slotno, regno, padding;
5300
5301 /* We pass 0 for incoming_p here, it doesn't matter. */
5302 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5303
5304 /* If register required leading padding, add it. */
5305 if (slotno != -1)
5306 cum->words += padding;
5307
5308 if (TARGET_ARCH32)
5309 {
5310 cum->words += (mode != BLKmode
5311 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5312 : ROUND_ADVANCE (int_size_in_bytes (type)));
5313 }
5314 else
5315 {
5316 if (type && AGGREGATE_TYPE_P (type))
5317 {
5318 int size = int_size_in_bytes (type);
5319
5320 if (size <= 8)
5321 ++cum->words;
5322 else if (size <= 16)
5323 cum->words += 2;
5324 else /* passed by reference */
5325 ++cum->words;
5326 }
5327 else
5328 {
5329 cum->words += (mode != BLKmode
5330 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5331 : ROUND_ADVANCE (int_size_in_bytes (type)));
5332 }
5333 }
5334 }
5335
5336 /* Handle the FUNCTION_ARG_PADDING macro.
5337 For the 64 bit ABI structs are always stored left shifted in their
5338 argument slot. */
5339
5340 enum direction
5341 function_arg_padding (enum machine_mode mode, tree type)
5342 {
5343 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5344 return upward;
5345
5346 /* Fall back to the default. */
5347 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5348 }
5349
5350 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5351 Specify whether to return the return value in memory. */
5352
5353 static bool
5354 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5355 {
5356 if (TARGET_ARCH32)
5357 /* Original SPARC 32-bit ABI says that structures and unions,
5358 and quad-precision floats are returned in memory. All other
5359 base types are returned in registers.
5360
5361 Extended ABI (as implemented by the Sun compiler) says that
5362 all complex floats are returned in registers (8 FP registers
5363 at most for '_Complex long double'). Return all complex integers
5364 in registers (4 at most for '_Complex long long').
5365
5366 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5367 integers are returned like floats of the same size, that is in
5368 registers up to 8 bytes and in memory otherwise. Return all
5369 vector floats in memory like structure and unions; note that
5370 they always have BLKmode like the latter. */
5371 return (TYPE_MODE (type) == BLKmode
5372 || TYPE_MODE (type) == TFmode
5373 || (TREE_CODE (type) == VECTOR_TYPE
5374 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5375 else
5376 /* Original SPARC 64-bit ABI says that structures and unions
5377 smaller than 32 bytes are returned in registers, as well as
5378 all other base types.
5379
5380 Extended ABI (as implemented by the Sun compiler) says that all
5381 complex floats are returned in registers (8 FP registers at most
5382 for '_Complex long double'). Return all complex integers in
5383 registers (4 at most for '_Complex TItype').
5384
5385 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5386 integers are returned like floats of the same size, that is in
5387 registers. Return all vector floats like structure and unions;
5388 note that they always have BLKmode like the latter. */
5389 return ((TYPE_MODE (type) == BLKmode
5390 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5391 }
5392
5393 /* Handle the TARGET_STRUCT_VALUE target hook.
5394 Return where to find the structure return value address. */
5395
5396 static rtx
5397 sparc_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED, int incoming)
5398 {
5399 if (TARGET_ARCH64)
5400 return 0;
5401 else
5402 {
5403 rtx mem;
5404
5405 if (incoming)
5406 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5407 STRUCT_VALUE_OFFSET));
5408 else
5409 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5410 STRUCT_VALUE_OFFSET));
5411
5412 set_mem_alias_set (mem, struct_value_alias_set);
5413 return mem;
5414 }
5415 }
5416
5417 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5418 For v9, function return values are subject to the same rules as arguments,
5419 except that up to 32 bytes may be returned in registers. */
5420
5421 rtx
5422 function_value (tree type, enum machine_mode mode, int incoming_p)
5423 {
5424 /* Beware that the two values are swapped here wrt function_arg. */
5425 int regbase = (incoming_p
5426 ? SPARC_OUTGOING_INT_ARG_FIRST
5427 : SPARC_INCOMING_INT_ARG_FIRST);
5428 enum mode_class mclass = GET_MODE_CLASS (mode);
5429 int regno;
5430
5431 if (type && TREE_CODE (type) == VECTOR_TYPE)
5432 {
5433 /* Vector types deserve special treatment because they are
5434 polymorphic wrt their mode, depending upon whether VIS
5435 instructions are enabled. */
5436 HOST_WIDE_INT size = int_size_in_bytes (type);
5437
5438 gcc_assert ((TARGET_ARCH32 && size <= 8)
5439 || (TARGET_ARCH64 && size <= 32));
5440
5441 if (mode == BLKmode)
5442 return function_arg_vector_value (size,
5443 TYPE_MODE (TREE_TYPE (type)),
5444 SPARC_FP_ARG_FIRST);
5445 else
5446 mclass = MODE_FLOAT;
5447 }
5448 else if (type && TARGET_ARCH64)
5449 {
5450 if (TREE_CODE (type) == RECORD_TYPE)
5451 {
5452 /* Structures up to 32 bytes in size are passed in registers,
5453 promoted to fp registers where possible. */
5454
5455 gcc_assert (int_size_in_bytes (type) <= 32);
5456
5457 return function_arg_record_value (type, mode, 0, 1, regbase);
5458 }
5459 else if (TREE_CODE (type) == UNION_TYPE)
5460 {
5461 HOST_WIDE_INT size = int_size_in_bytes (type);
5462
5463 gcc_assert (size <= 32);
5464
5465 return function_arg_union_value (size, mode, 0, regbase);
5466 }
5467 else if (AGGREGATE_TYPE_P (type))
5468 {
5469 /* All other aggregate types are passed in an integer register
5470 in a mode corresponding to the size of the type. */
5471 HOST_WIDE_INT bytes = int_size_in_bytes (type);
5472
5473 gcc_assert (bytes <= 32);
5474
5475 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
5476
5477 /* ??? We probably should have made the same ABI change in
5478 3.4.0 as the one we made for unions. The latter was
5479 required by the SCD though, while the former is not
5480 specified, so we favored compatibility and efficiency.
5481
5482 Now we're stuck for aggregates larger than 16 bytes,
5483 because OImode vanished in the meantime. Let's not
5484 try to be unduly clever, and simply follow the ABI
5485 for unions in that case. */
5486 if (mode == BLKmode)
5487 return function_arg_union_value (bytes, mode, 0, regbase);
5488 else
5489 mclass = MODE_INT;
5490 }
5491 else if (mclass == MODE_INT
5492 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5493 mode = word_mode;
5494 }
5495
5496 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5497 && TARGET_FPU)
5498 regno = SPARC_FP_ARG_FIRST;
5499 else
5500 regno = regbase;
5501
5502 return gen_rtx_REG (mode, regno);
5503 }
5504
5505 /* Do what is necessary for `va_start'. We look at the current function
5506 to determine if stdarg or varargs is used and return the address of
5507 the first unnamed parameter. */
5508
5509 static rtx
5510 sparc_builtin_saveregs (void)
5511 {
5512 int first_reg = current_function_args_info.words;
5513 rtx address;
5514 int regno;
5515
5516 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5517 emit_move_insn (gen_rtx_MEM (word_mode,
5518 gen_rtx_PLUS (Pmode,
5519 frame_pointer_rtx,
5520 GEN_INT (FIRST_PARM_OFFSET (0)
5521 + (UNITS_PER_WORD
5522 * regno)))),
5523 gen_rtx_REG (word_mode,
5524 SPARC_INCOMING_INT_ARG_FIRST + regno));
5525
5526 address = gen_rtx_PLUS (Pmode,
5527 frame_pointer_rtx,
5528 GEN_INT (FIRST_PARM_OFFSET (0)
5529 + UNITS_PER_WORD * first_reg));
5530
5531 return address;
5532 }
5533
5534 /* Implement `va_start' for stdarg. */
5535
5536 void
5537 sparc_va_start (tree valist, rtx nextarg)
5538 {
5539 nextarg = expand_builtin_saveregs ();
5540 std_expand_builtin_va_start (valist, nextarg);
5541 }
5542
5543 /* Implement `va_arg' for stdarg. */
5544
5545 static tree
5546 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5547 {
5548 HOST_WIDE_INT size, rsize, align;
5549 tree addr, incr;
5550 bool indirect;
5551 tree ptrtype = build_pointer_type (type);
5552
5553 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5554 {
5555 indirect = true;
5556 size = rsize = UNITS_PER_WORD;
5557 align = 0;
5558 }
5559 else
5560 {
5561 indirect = false;
5562 size = int_size_in_bytes (type);
5563 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5564 align = 0;
5565
5566 if (TARGET_ARCH64)
5567 {
5568 /* For SPARC64, objects requiring 16-byte alignment get it. */
5569 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5570 align = 2 * UNITS_PER_WORD;
5571
5572 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5573 are left-justified in their slots. */
5574 if (AGGREGATE_TYPE_P (type))
5575 {
5576 if (size == 0)
5577 size = rsize = UNITS_PER_WORD;
5578 else
5579 size = rsize;
5580 }
5581 }
5582 }
5583
5584 incr = valist;
5585 if (align)
5586 {
5587 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5588 ssize_int (align - 1)));
5589 incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
5590 ssize_int (-align)));
5591 }
5592
5593 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5594 addr = incr;
5595
5596 if (BYTES_BIG_ENDIAN && size < rsize)
5597 addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5598 ssize_int (rsize - size)));
5599
5600 if (indirect)
5601 {
5602 addr = fold_convert (build_pointer_type (ptrtype), addr);
5603 addr = build_va_arg_indirect_ref (addr);
5604 }
5605 /* If the address isn't aligned properly for the type,
5606 we may need to copy to a temporary.
5607 FIXME: This is inefficient. Usually we can do this
5608 in registers. */
5609 else if (align == 0
5610 && TYPE_ALIGN (type) > BITS_PER_WORD)
5611 {
5612 tree tmp = create_tmp_var (type, "va_arg_tmp");
5613 tree dest_addr = build_fold_addr_expr (tmp);
5614
5615 tree copy = build_function_call_expr
5616 (implicit_built_in_decls[BUILT_IN_MEMCPY],
5617 tree_cons (NULL_TREE, dest_addr,
5618 tree_cons (NULL_TREE, addr,
5619 tree_cons (NULL_TREE, size_int (rsize),
5620 NULL_TREE))));
5621
5622 gimplify_and_add (copy, pre_p);
5623 addr = dest_addr;
5624 }
5625 else
5626 addr = fold_convert (ptrtype, addr);
5627
5628 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
5629 incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
5630 gimplify_and_add (incr, post_p);
5631
5632 return build_va_arg_indirect_ref (addr);
5633 }
5634 \f
5635 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5636 Specify whether the vector mode is supported by the hardware. */
5637
5638 static bool
5639 sparc_vector_mode_supported_p (enum machine_mode mode)
5640 {
5641 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5642 }
5643 \f
5644 /* Return the string to output an unconditional branch to LABEL, which is
5645 the operand number of the label.
5646
5647 DEST is the destination insn (i.e. the label), INSN is the source. */
5648
5649 const char *
5650 output_ubranch (rtx dest, int label, rtx insn)
5651 {
5652 static char string[64];
5653 bool v9_form = false;
5654 char *p;
5655
5656 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5657 {
5658 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5659 - INSN_ADDRESSES (INSN_UID (insn)));
5660 /* Leave some instructions for "slop". */
5661 if (delta >= -260000 && delta < 260000)
5662 v9_form = true;
5663 }
5664
5665 if (v9_form)
5666 strcpy (string, "ba%*,pt\t%%xcc, ");
5667 else
5668 strcpy (string, "b%*\t");
5669
5670 p = strchr (string, '\0');
5671 *p++ = '%';
5672 *p++ = 'l';
5673 *p++ = '0' + label;
5674 *p++ = '%';
5675 *p++ = '(';
5676 *p = '\0';
5677
5678 return string;
5679 }
5680
5681 /* Return the string to output a conditional branch to LABEL, which is
5682 the operand number of the label. OP is the conditional expression.
5683 XEXP (OP, 0) is assumed to be a condition code register (integer or
5684 floating point) and its mode specifies what kind of comparison we made.
5685
5686 DEST is the destination insn (i.e. the label), INSN is the source.
5687
5688 REVERSED is nonzero if we should reverse the sense of the comparison.
5689
5690 ANNUL is nonzero if we should generate an annulling branch. */
5691
5692 const char *
5693 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5694 rtx insn)
5695 {
5696 static char string[64];
5697 enum rtx_code code = GET_CODE (op);
5698 rtx cc_reg = XEXP (op, 0);
5699 enum machine_mode mode = GET_MODE (cc_reg);
5700 const char *labelno, *branch;
5701 int spaces = 8, far;
5702 char *p;
5703
5704 /* v9 branches are limited to +-1MB. If it is too far away,
5705 change
5706
5707 bne,pt %xcc, .LC30
5708
5709 to
5710
5711 be,pn %xcc, .+12
5712 nop
5713 ba .LC30
5714
5715 and
5716
5717 fbne,a,pn %fcc2, .LC29
5718
5719 to
5720
5721 fbe,pt %fcc2, .+16
5722 nop
5723 ba .LC29 */
5724
5725 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5726 if (reversed ^ far)
5727 {
5728 /* Reversal of FP compares takes care -- an ordered compare
5729 becomes an unordered compare and vice versa. */
5730 if (mode == CCFPmode || mode == CCFPEmode)
5731 code = reverse_condition_maybe_unordered (code);
5732 else
5733 code = reverse_condition (code);
5734 }
5735
5736 /* Start by writing the branch condition. */
5737 if (mode == CCFPmode || mode == CCFPEmode)
5738 {
5739 switch (code)
5740 {
5741 case NE:
5742 branch = "fbne";
5743 break;
5744 case EQ:
5745 branch = "fbe";
5746 break;
5747 case GE:
5748 branch = "fbge";
5749 break;
5750 case GT:
5751 branch = "fbg";
5752 break;
5753 case LE:
5754 branch = "fble";
5755 break;
5756 case LT:
5757 branch = "fbl";
5758 break;
5759 case UNORDERED:
5760 branch = "fbu";
5761 break;
5762 case ORDERED:
5763 branch = "fbo";
5764 break;
5765 case UNGT:
5766 branch = "fbug";
5767 break;
5768 case UNLT:
5769 branch = "fbul";
5770 break;
5771 case UNEQ:
5772 branch = "fbue";
5773 break;
5774 case UNGE:
5775 branch = "fbuge";
5776 break;
5777 case UNLE:
5778 branch = "fbule";
5779 break;
5780 case LTGT:
5781 branch = "fblg";
5782 break;
5783
5784 default:
5785 gcc_unreachable ();
5786 }
5787
5788 /* ??? !v9: FP branches cannot be preceded by another floating point
5789 insn. Because there is currently no concept of pre-delay slots,
5790 we can fix this only by always emitting a nop before a floating
5791 point branch. */
5792
5793 string[0] = '\0';
5794 if (! TARGET_V9)
5795 strcpy (string, "nop\n\t");
5796 strcat (string, branch);
5797 }
5798 else
5799 {
5800 switch (code)
5801 {
5802 case NE:
5803 branch = "bne";
5804 break;
5805 case EQ:
5806 branch = "be";
5807 break;
5808 case GE:
5809 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5810 branch = "bpos";
5811 else
5812 branch = "bge";
5813 break;
5814 case GT:
5815 branch = "bg";
5816 break;
5817 case LE:
5818 branch = "ble";
5819 break;
5820 case LT:
5821 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5822 branch = "bneg";
5823 else
5824 branch = "bl";
5825 break;
5826 case GEU:
5827 branch = "bgeu";
5828 break;
5829 case GTU:
5830 branch = "bgu";
5831 break;
5832 case LEU:
5833 branch = "bleu";
5834 break;
5835 case LTU:
5836 branch = "blu";
5837 break;
5838
5839 default:
5840 gcc_unreachable ();
5841 }
5842 strcpy (string, branch);
5843 }
5844 spaces -= strlen (branch);
5845 p = strchr (string, '\0');
5846
5847 /* Now add the annulling, the label, and a possible noop. */
5848 if (annul && ! far)
5849 {
5850 strcpy (p, ",a");
5851 p += 2;
5852 spaces -= 2;
5853 }
5854
5855 if (TARGET_V9)
5856 {
5857 rtx note;
5858 int v8 = 0;
5859
5860 if (! far && insn && INSN_ADDRESSES_SET_P ())
5861 {
5862 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5863 - INSN_ADDRESSES (INSN_UID (insn)));
5864 /* Leave some instructions for "slop". */
5865 if (delta < -260000 || delta >= 260000)
5866 v8 = 1;
5867 }
5868
5869 if (mode == CCFPmode || mode == CCFPEmode)
5870 {
5871 static char v9_fcc_labelno[] = "%%fccX, ";
5872 /* Set the char indicating the number of the fcc reg to use. */
5873 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
5874 labelno = v9_fcc_labelno;
5875 if (v8)
5876 {
5877 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
5878 labelno = "";
5879 }
5880 }
5881 else if (mode == CCXmode || mode == CCX_NOOVmode)
5882 {
5883 labelno = "%%xcc, ";
5884 gcc_assert (! v8);
5885 }
5886 else
5887 {
5888 labelno = "%%icc, ";
5889 if (v8)
5890 labelno = "";
5891 }
5892
5893 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
5894 {
5895 strcpy (p,
5896 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
5897 ? ",pt" : ",pn");
5898 p += 3;
5899 spaces -= 3;
5900 }
5901 }
5902 else
5903 labelno = "";
5904
5905 if (spaces > 0)
5906 *p++ = '\t';
5907 else
5908 *p++ = ' ';
5909 strcpy (p, labelno);
5910 p = strchr (p, '\0');
5911 if (far)
5912 {
5913 strcpy (p, ".+12\n\t nop\n\tb\t");
5914 /* Skip the next insn if requested or
5915 if we know that it will be a nop. */
5916 if (annul || ! final_sequence)
5917 p[3] = '6';
5918 p += 14;
5919 }
5920 *p++ = '%';
5921 *p++ = 'l';
5922 *p++ = label + '0';
5923 *p++ = '%';
5924 *p++ = '#';
5925 *p = '\0';
5926
5927 return string;
5928 }
5929
5930 /* Emit a library call comparison between floating point X and Y.
5931 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
5932 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
5933 values as arguments instead of the TFmode registers themselves,
5934 that's why we cannot call emit_float_lib_cmp. */
5935 void
5936 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
5937 {
5938 const char *qpfunc;
5939 rtx slot0, slot1, result, tem, tem2;
5940 enum machine_mode mode;
5941
5942 switch (comparison)
5943 {
5944 case EQ:
5945 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
5946 break;
5947
5948 case NE:
5949 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
5950 break;
5951
5952 case GT:
5953 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
5954 break;
5955
5956 case GE:
5957 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
5958 break;
5959
5960 case LT:
5961 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
5962 break;
5963
5964 case LE:
5965 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
5966 break;
5967
5968 case ORDERED:
5969 case UNORDERED:
5970 case UNGT:
5971 case UNLT:
5972 case UNEQ:
5973 case UNGE:
5974 case UNLE:
5975 case LTGT:
5976 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
5977 break;
5978
5979 default:
5980 gcc_unreachable ();
5981 }
5982
5983 if (TARGET_ARCH64)
5984 {
5985 if (GET_CODE (x) != MEM)
5986 {
5987 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
5988 emit_move_insn (slot0, x);
5989 }
5990 else
5991 slot0 = x;
5992
5993 if (GET_CODE (y) != MEM)
5994 {
5995 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
5996 emit_move_insn (slot1, y);
5997 }
5998 else
5999 slot1 = y;
6000
6001 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6002 DImode, 2,
6003 XEXP (slot0, 0), Pmode,
6004 XEXP (slot1, 0), Pmode);
6005
6006 mode = DImode;
6007 }
6008 else
6009 {
6010 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6011 SImode, 2,
6012 x, TFmode, y, TFmode);
6013
6014 mode = SImode;
6015 }
6016
6017
6018 /* Immediately move the result of the libcall into a pseudo
6019 register so reload doesn't clobber the value if it needs
6020 the return register for a spill reg. */
6021 result = gen_reg_rtx (mode);
6022 emit_move_insn (result, hard_libcall_value (mode));
6023
6024 switch (comparison)
6025 {
6026 default:
6027 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6028 break;
6029 case ORDERED:
6030 case UNORDERED:
6031 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6032 NULL_RTX, mode, 0);
6033 break;
6034 case UNGT:
6035 case UNGE:
6036 emit_cmp_insn (result, const1_rtx,
6037 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6038 break;
6039 case UNLE:
6040 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6041 break;
6042 case UNLT:
6043 tem = gen_reg_rtx (mode);
6044 if (TARGET_ARCH32)
6045 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6046 else
6047 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6048 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6049 break;
6050 case UNEQ:
6051 case LTGT:
6052 tem = gen_reg_rtx (mode);
6053 if (TARGET_ARCH32)
6054 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6055 else
6056 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6057 tem2 = gen_reg_rtx (mode);
6058 if (TARGET_ARCH32)
6059 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6060 else
6061 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6062 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6063 NULL_RTX, mode, 0);
6064 break;
6065 }
6066 }
6067
6068 /* Generate an unsigned DImode to FP conversion. This is the same code
6069 optabs would emit if we didn't have TFmode patterns. */
6070
6071 void
6072 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6073 {
6074 rtx neglab, donelab, i0, i1, f0, in, out;
6075
6076 out = operands[0];
6077 in = force_reg (DImode, operands[1]);
6078 neglab = gen_label_rtx ();
6079 donelab = gen_label_rtx ();
6080 i0 = gen_reg_rtx (DImode);
6081 i1 = gen_reg_rtx (DImode);
6082 f0 = gen_reg_rtx (mode);
6083
6084 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6085
6086 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6087 emit_jump_insn (gen_jump (donelab));
6088 emit_barrier ();
6089
6090 emit_label (neglab);
6091
6092 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6093 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6094 emit_insn (gen_iordi3 (i0, i0, i1));
6095 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6096 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6097
6098 emit_label (donelab);
6099 }
6100
6101 /* Generate an FP to unsigned DImode conversion. This is the same code
6102 optabs would emit if we didn't have TFmode patterns. */
6103
6104 void
6105 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6106 {
6107 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6108
6109 out = operands[0];
6110 in = force_reg (mode, operands[1]);
6111 neglab = gen_label_rtx ();
6112 donelab = gen_label_rtx ();
6113 i0 = gen_reg_rtx (DImode);
6114 i1 = gen_reg_rtx (DImode);
6115 limit = gen_reg_rtx (mode);
6116 f0 = gen_reg_rtx (mode);
6117
6118 emit_move_insn (limit,
6119 CONST_DOUBLE_FROM_REAL_VALUE (
6120 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6121 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6122
6123 emit_insn (gen_rtx_SET (VOIDmode,
6124 out,
6125 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6126 emit_jump_insn (gen_jump (donelab));
6127 emit_barrier ();
6128
6129 emit_label (neglab);
6130
6131 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6132 emit_insn (gen_rtx_SET (VOIDmode,
6133 i0,
6134 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6135 emit_insn (gen_movdi (i1, const1_rtx));
6136 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6137 emit_insn (gen_xordi3 (out, i0, i1));
6138
6139 emit_label (donelab);
6140 }
6141
6142 /* Return the string to output a conditional branch to LABEL, testing
6143 register REG. LABEL is the operand number of the label; REG is the
6144 operand number of the reg. OP is the conditional expression. The mode
6145 of REG says what kind of comparison we made.
6146
6147 DEST is the destination insn (i.e. the label), INSN is the source.
6148
6149 REVERSED is nonzero if we should reverse the sense of the comparison.
6150
6151 ANNUL is nonzero if we should generate an annulling branch. */
6152
6153 const char *
6154 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6155 int annul, rtx insn)
6156 {
6157 static char string[64];
6158 enum rtx_code code = GET_CODE (op);
6159 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6160 rtx note;
6161 int far;
6162 char *p;
6163
6164 /* branch on register are limited to +-128KB. If it is too far away,
6165 change
6166
6167 brnz,pt %g1, .LC30
6168
6169 to
6170
6171 brz,pn %g1, .+12
6172 nop
6173 ba,pt %xcc, .LC30
6174
6175 and
6176
6177 brgez,a,pn %o1, .LC29
6178
6179 to
6180
6181 brlz,pt %o1, .+16
6182 nop
6183 ba,pt %xcc, .LC29 */
6184
6185 far = get_attr_length (insn) >= 3;
6186
6187 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6188 if (reversed ^ far)
6189 code = reverse_condition (code);
6190
6191 /* Only 64 bit versions of these instructions exist. */
6192 gcc_assert (mode == DImode);
6193
6194 /* Start by writing the branch condition. */
6195
6196 switch (code)
6197 {
6198 case NE:
6199 strcpy (string, "brnz");
6200 break;
6201
6202 case EQ:
6203 strcpy (string, "brz");
6204 break;
6205
6206 case GE:
6207 strcpy (string, "brgez");
6208 break;
6209
6210 case LT:
6211 strcpy (string, "brlz");
6212 break;
6213
6214 case LE:
6215 strcpy (string, "brlez");
6216 break;
6217
6218 case GT:
6219 strcpy (string, "brgz");
6220 break;
6221
6222 default:
6223 gcc_unreachable ();
6224 }
6225
6226 p = strchr (string, '\0');
6227
6228 /* Now add the annulling, reg, label, and nop. */
6229 if (annul && ! far)
6230 {
6231 strcpy (p, ",a");
6232 p += 2;
6233 }
6234
6235 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6236 {
6237 strcpy (p,
6238 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6239 ? ",pt" : ",pn");
6240 p += 3;
6241 }
6242
6243 *p = p < string + 8 ? '\t' : ' ';
6244 p++;
6245 *p++ = '%';
6246 *p++ = '0' + reg;
6247 *p++ = ',';
6248 *p++ = ' ';
6249 if (far)
6250 {
6251 int veryfar = 1, delta;
6252
6253 if (INSN_ADDRESSES_SET_P ())
6254 {
6255 delta = (INSN_ADDRESSES (INSN_UID (dest))
6256 - INSN_ADDRESSES (INSN_UID (insn)));
6257 /* Leave some instructions for "slop". */
6258 if (delta >= -260000 && delta < 260000)
6259 veryfar = 0;
6260 }
6261
6262 strcpy (p, ".+12\n\t nop\n\t");
6263 /* Skip the next insn if requested or
6264 if we know that it will be a nop. */
6265 if (annul || ! final_sequence)
6266 p[3] = '6';
6267 p += 12;
6268 if (veryfar)
6269 {
6270 strcpy (p, "b\t");
6271 p += 2;
6272 }
6273 else
6274 {
6275 strcpy (p, "ba,pt\t%%xcc, ");
6276 p += 13;
6277 }
6278 }
6279 *p++ = '%';
6280 *p++ = 'l';
6281 *p++ = '0' + label;
6282 *p++ = '%';
6283 *p++ = '#';
6284 *p = '\0';
6285
6286 return string;
6287 }
6288
6289 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6290 Such instructions cannot be used in the delay slot of return insn on v9.
6291 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6292 */
6293
6294 static int
6295 epilogue_renumber (register rtx *where, int test)
6296 {
6297 register const char *fmt;
6298 register int i;
6299 register enum rtx_code code;
6300
6301 if (*where == 0)
6302 return 0;
6303
6304 code = GET_CODE (*where);
6305
6306 switch (code)
6307 {
6308 case REG:
6309 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6310 return 1;
6311 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6312 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6313 case SCRATCH:
6314 case CC0:
6315 case PC:
6316 case CONST_INT:
6317 case CONST_DOUBLE:
6318 return 0;
6319
6320 /* Do not replace the frame pointer with the stack pointer because
6321 it can cause the delayed instruction to load below the stack.
6322 This occurs when instructions like:
6323
6324 (set (reg/i:SI 24 %i0)
6325 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6326 (const_int -20 [0xffffffec])) 0))
6327
6328 are in the return delayed slot. */
6329 case PLUS:
6330 if (GET_CODE (XEXP (*where, 0)) == REG
6331 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6332 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6333 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6334 return 1;
6335 break;
6336
6337 case MEM:
6338 if (SPARC_STACK_BIAS
6339 && GET_CODE (XEXP (*where, 0)) == REG
6340 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6341 return 1;
6342 break;
6343
6344 default:
6345 break;
6346 }
6347
6348 fmt = GET_RTX_FORMAT (code);
6349
6350 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6351 {
6352 if (fmt[i] == 'E')
6353 {
6354 register int j;
6355 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6356 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6357 return 1;
6358 }
6359 else if (fmt[i] == 'e'
6360 && epilogue_renumber (&(XEXP (*where, i)), test))
6361 return 1;
6362 }
6363 return 0;
6364 }
6365 \f
6366 /* Leaf functions and non-leaf functions have different needs. */
6367
6368 static const int
6369 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6370
6371 static const int
6372 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6373
6374 static const int *const reg_alloc_orders[] = {
6375 reg_leaf_alloc_order,
6376 reg_nonleaf_alloc_order};
6377
6378 void
6379 order_regs_for_local_alloc (void)
6380 {
6381 static int last_order_nonleaf = 1;
6382
6383 if (regs_ever_live[15] != last_order_nonleaf)
6384 {
6385 last_order_nonleaf = !last_order_nonleaf;
6386 memcpy ((char *) reg_alloc_order,
6387 (const char *) reg_alloc_orders[last_order_nonleaf],
6388 FIRST_PSEUDO_REGISTER * sizeof (int));
6389 }
6390 }
6391 \f
6392 /* Return 1 if REG and MEM are legitimate enough to allow the various
6393 mem<-->reg splits to be run. */
6394
6395 int
6396 sparc_splitdi_legitimate (rtx reg, rtx mem)
6397 {
6398 /* Punt if we are here by mistake. */
6399 gcc_assert (reload_completed);
6400
6401 /* We must have an offsettable memory reference. */
6402 if (! offsettable_memref_p (mem))
6403 return 0;
6404
6405 /* If we have legitimate args for ldd/std, we do not want
6406 the split to happen. */
6407 if ((REGNO (reg) % 2) == 0
6408 && mem_min_alignment (mem, 8))
6409 return 0;
6410
6411 /* Success. */
6412 return 1;
6413 }
6414
6415 /* Return 1 if x and y are some kind of REG and they refer to
6416 different hard registers. This test is guaranteed to be
6417 run after reload. */
6418
6419 int
6420 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6421 {
6422 if (GET_CODE (x) != REG)
6423 return 0;
6424 if (GET_CODE (y) != REG)
6425 return 0;
6426 if (REGNO (x) == REGNO (y))
6427 return 0;
6428 return 1;
6429 }
6430
6431 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6432 This makes them candidates for using ldd and std insns.
6433
6434 Note reg1 and reg2 *must* be hard registers. */
6435
6436 int
6437 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6438 {
6439 /* We might have been passed a SUBREG. */
6440 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6441 return 0;
6442
6443 if (REGNO (reg1) % 2 != 0)
6444 return 0;
6445
6446 /* Integer ldd is deprecated in SPARC V9 */
6447 if (TARGET_V9 && REGNO (reg1) < 32)
6448 return 0;
6449
6450 return (REGNO (reg1) == REGNO (reg2) - 1);
6451 }
6452
6453 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6454 an ldd or std insn.
6455
6456 This can only happen when addr1 and addr2, the addresses in mem1
6457 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6458 addr1 must also be aligned on a 64-bit boundary.
6459
6460 Also iff dependent_reg_rtx is not null it should not be used to
6461 compute the address for mem1, i.e. we cannot optimize a sequence
6462 like:
6463 ld [%o0], %o0
6464 ld [%o0 + 4], %o1
6465 to
6466 ldd [%o0], %o0
6467 nor:
6468 ld [%g3 + 4], %g3
6469 ld [%g3], %g2
6470 to
6471 ldd [%g3], %g2
6472
6473 But, note that the transformation from:
6474 ld [%g2 + 4], %g3
6475 ld [%g2], %g2
6476 to
6477 ldd [%g2], %g2
6478 is perfectly fine. Thus, the peephole2 patterns always pass us
6479 the destination register of the first load, never the second one.
6480
6481 For stores we don't have a similar problem, so dependent_reg_rtx is
6482 NULL_RTX. */
6483
6484 int
6485 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6486 {
6487 rtx addr1, addr2;
6488 unsigned int reg1;
6489 HOST_WIDE_INT offset1;
6490
6491 /* The mems cannot be volatile. */
6492 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6493 return 0;
6494
6495 /* MEM1 should be aligned on a 64-bit boundary. */
6496 if (MEM_ALIGN (mem1) < 64)
6497 return 0;
6498
6499 addr1 = XEXP (mem1, 0);
6500 addr2 = XEXP (mem2, 0);
6501
6502 /* Extract a register number and offset (if used) from the first addr. */
6503 if (GET_CODE (addr1) == PLUS)
6504 {
6505 /* If not a REG, return zero. */
6506 if (GET_CODE (XEXP (addr1, 0)) != REG)
6507 return 0;
6508 else
6509 {
6510 reg1 = REGNO (XEXP (addr1, 0));
6511 /* The offset must be constant! */
6512 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6513 return 0;
6514 offset1 = INTVAL (XEXP (addr1, 1));
6515 }
6516 }
6517 else if (GET_CODE (addr1) != REG)
6518 return 0;
6519 else
6520 {
6521 reg1 = REGNO (addr1);
6522 /* This was a simple (mem (reg)) expression. Offset is 0. */
6523 offset1 = 0;
6524 }
6525
6526 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6527 if (GET_CODE (addr2) != PLUS)
6528 return 0;
6529
6530 if (GET_CODE (XEXP (addr2, 0)) != REG
6531 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6532 return 0;
6533
6534 if (reg1 != REGNO (XEXP (addr2, 0)))
6535 return 0;
6536
6537 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6538 return 0;
6539
6540 /* The first offset must be evenly divisible by 8 to ensure the
6541 address is 64 bit aligned. */
6542 if (offset1 % 8 != 0)
6543 return 0;
6544
6545 /* The offset for the second addr must be 4 more than the first addr. */
6546 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6547 return 0;
6548
6549 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6550 instructions. */
6551 return 1;
6552 }
6553
6554 /* Return 1 if reg is a pseudo, or is the first register in
6555 a hard register pair. This makes it a candidate for use in
6556 ldd and std insns. */
6557
6558 int
6559 register_ok_for_ldd (rtx reg)
6560 {
6561 /* We might have been passed a SUBREG. */
6562 if (GET_CODE (reg) != REG)
6563 return 0;
6564
6565 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6566 return (REGNO (reg) % 2 == 0);
6567 else
6568 return 1;
6569 }
6570 \f
6571 /* Print operand X (an rtx) in assembler syntax to file FILE.
6572 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6573 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6574
6575 void
6576 print_operand (FILE *file, rtx x, int code)
6577 {
6578 switch (code)
6579 {
6580 case '#':
6581 /* Output an insn in a delay slot. */
6582 if (final_sequence)
6583 sparc_indent_opcode = 1;
6584 else
6585 fputs ("\n\t nop", file);
6586 return;
6587 case '*':
6588 /* Output an annul flag if there's nothing for the delay slot and we
6589 are optimizing. This is always used with '(' below.
6590 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6591 this is a dbx bug. So, we only do this when optimizing.
6592 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6593 Always emit a nop in case the next instruction is a branch. */
6594 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6595 fputs (",a", file);
6596 return;
6597 case '(':
6598 /* Output a 'nop' if there's nothing for the delay slot and we are
6599 not optimizing. This is always used with '*' above. */
6600 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6601 fputs ("\n\t nop", file);
6602 else if (final_sequence)
6603 sparc_indent_opcode = 1;
6604 return;
6605 case ')':
6606 /* Output the right displacement from the saved PC on function return.
6607 The caller may have placed an "unimp" insn immediately after the call
6608 so we have to account for it. This insn is used in the 32-bit ABI
6609 when calling a function that returns a non zero-sized structure. The
6610 64-bit ABI doesn't have it. Be careful to have this test be the same
6611 as that used on the call. */
6612 if (! TARGET_ARCH64
6613 && current_function_returns_struct
6614 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6615 == INTEGER_CST)
6616 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6617 fputs ("12", file);
6618 else
6619 fputc ('8', file);
6620 return;
6621 case '_':
6622 /* Output the Embedded Medium/Anywhere code model base register. */
6623 fputs (EMBMEDANY_BASE_REG, file);
6624 return;
6625 case '&':
6626 /* Print some local dynamic TLS name. */
6627 assemble_name (file, get_some_local_dynamic_name ());
6628 return;
6629
6630 case 'Y':
6631 /* Adjust the operand to take into account a RESTORE operation. */
6632 if (GET_CODE (x) == CONST_INT)
6633 break;
6634 else if (GET_CODE (x) != REG)
6635 output_operand_lossage ("invalid %%Y operand");
6636 else if (REGNO (x) < 8)
6637 fputs (reg_names[REGNO (x)], file);
6638 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6639 fputs (reg_names[REGNO (x)-16], file);
6640 else
6641 output_operand_lossage ("invalid %%Y operand");
6642 return;
6643 case 'L':
6644 /* Print out the low order register name of a register pair. */
6645 if (WORDS_BIG_ENDIAN)
6646 fputs (reg_names[REGNO (x)+1], file);
6647 else
6648 fputs (reg_names[REGNO (x)], file);
6649 return;
6650 case 'H':
6651 /* Print out the high order register name of a register pair. */
6652 if (WORDS_BIG_ENDIAN)
6653 fputs (reg_names[REGNO (x)], file);
6654 else
6655 fputs (reg_names[REGNO (x)+1], file);
6656 return;
6657 case 'R':
6658 /* Print out the second register name of a register pair or quad.
6659 I.e., R (%o0) => %o1. */
6660 fputs (reg_names[REGNO (x)+1], file);
6661 return;
6662 case 'S':
6663 /* Print out the third register name of a register quad.
6664 I.e., S (%o0) => %o2. */
6665 fputs (reg_names[REGNO (x)+2], file);
6666 return;
6667 case 'T':
6668 /* Print out the fourth register name of a register quad.
6669 I.e., T (%o0) => %o3. */
6670 fputs (reg_names[REGNO (x)+3], file);
6671 return;
6672 case 'x':
6673 /* Print a condition code register. */
6674 if (REGNO (x) == SPARC_ICC_REG)
6675 {
6676 /* We don't handle CC[X]_NOOVmode because they're not supposed
6677 to occur here. */
6678 if (GET_MODE (x) == CCmode)
6679 fputs ("%icc", file);
6680 else if (GET_MODE (x) == CCXmode)
6681 fputs ("%xcc", file);
6682 else
6683 gcc_unreachable ();
6684 }
6685 else
6686 /* %fccN register */
6687 fputs (reg_names[REGNO (x)], file);
6688 return;
6689 case 'm':
6690 /* Print the operand's address only. */
6691 output_address (XEXP (x, 0));
6692 return;
6693 case 'r':
6694 /* In this case we need a register. Use %g0 if the
6695 operand is const0_rtx. */
6696 if (x == const0_rtx
6697 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6698 {
6699 fputs ("%g0", file);
6700 return;
6701 }
6702 else
6703 break;
6704
6705 case 'A':
6706 switch (GET_CODE (x))
6707 {
6708 case IOR: fputs ("or", file); break;
6709 case AND: fputs ("and", file); break;
6710 case XOR: fputs ("xor", file); break;
6711 default: output_operand_lossage ("invalid %%A operand");
6712 }
6713 return;
6714
6715 case 'B':
6716 switch (GET_CODE (x))
6717 {
6718 case IOR: fputs ("orn", file); break;
6719 case AND: fputs ("andn", file); break;
6720 case XOR: fputs ("xnor", file); break;
6721 default: output_operand_lossage ("invalid %%B operand");
6722 }
6723 return;
6724
6725 /* These are used by the conditional move instructions. */
6726 case 'c' :
6727 case 'C':
6728 {
6729 enum rtx_code rc = GET_CODE (x);
6730
6731 if (code == 'c')
6732 {
6733 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6734 if (mode == CCFPmode || mode == CCFPEmode)
6735 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6736 else
6737 rc = reverse_condition (GET_CODE (x));
6738 }
6739 switch (rc)
6740 {
6741 case NE: fputs ("ne", file); break;
6742 case EQ: fputs ("e", file); break;
6743 case GE: fputs ("ge", file); break;
6744 case GT: fputs ("g", file); break;
6745 case LE: fputs ("le", file); break;
6746 case LT: fputs ("l", file); break;
6747 case GEU: fputs ("geu", file); break;
6748 case GTU: fputs ("gu", file); break;
6749 case LEU: fputs ("leu", file); break;
6750 case LTU: fputs ("lu", file); break;
6751 case LTGT: fputs ("lg", file); break;
6752 case UNORDERED: fputs ("u", file); break;
6753 case ORDERED: fputs ("o", file); break;
6754 case UNLT: fputs ("ul", file); break;
6755 case UNLE: fputs ("ule", file); break;
6756 case UNGT: fputs ("ug", file); break;
6757 case UNGE: fputs ("uge", file); break;
6758 case UNEQ: fputs ("ue", file); break;
6759 default: output_operand_lossage (code == 'c'
6760 ? "invalid %%c operand"
6761 : "invalid %%C operand");
6762 }
6763 return;
6764 }
6765
6766 /* These are used by the movr instruction pattern. */
6767 case 'd':
6768 case 'D':
6769 {
6770 enum rtx_code rc = (code == 'd'
6771 ? reverse_condition (GET_CODE (x))
6772 : GET_CODE (x));
6773 switch (rc)
6774 {
6775 case NE: fputs ("ne", file); break;
6776 case EQ: fputs ("e", file); break;
6777 case GE: fputs ("gez", file); break;
6778 case LT: fputs ("lz", file); break;
6779 case LE: fputs ("lez", file); break;
6780 case GT: fputs ("gz", file); break;
6781 default: output_operand_lossage (code == 'd'
6782 ? "invalid %%d operand"
6783 : "invalid %%D operand");
6784 }
6785 return;
6786 }
6787
6788 case 'b':
6789 {
6790 /* Print a sign-extended character. */
6791 int i = trunc_int_for_mode (INTVAL (x), QImode);
6792 fprintf (file, "%d", i);
6793 return;
6794 }
6795
6796 case 'f':
6797 /* Operand must be a MEM; write its address. */
6798 if (GET_CODE (x) != MEM)
6799 output_operand_lossage ("invalid %%f operand");
6800 output_address (XEXP (x, 0));
6801 return;
6802
6803 case 's':
6804 {
6805 /* Print a sign-extended 32-bit value. */
6806 HOST_WIDE_INT i;
6807 if (GET_CODE(x) == CONST_INT)
6808 i = INTVAL (x);
6809 else if (GET_CODE(x) == CONST_DOUBLE)
6810 i = CONST_DOUBLE_LOW (x);
6811 else
6812 {
6813 output_operand_lossage ("invalid %%s operand");
6814 return;
6815 }
6816 i = trunc_int_for_mode (i, SImode);
6817 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6818 return;
6819 }
6820
6821 case 0:
6822 /* Do nothing special. */
6823 break;
6824
6825 default:
6826 /* Undocumented flag. */
6827 output_operand_lossage ("invalid operand output code");
6828 }
6829
6830 if (GET_CODE (x) == REG)
6831 fputs (reg_names[REGNO (x)], file);
6832 else if (GET_CODE (x) == MEM)
6833 {
6834 fputc ('[', file);
6835 /* Poor Sun assembler doesn't understand absolute addressing. */
6836 if (CONSTANT_P (XEXP (x, 0)))
6837 fputs ("%g0+", file);
6838 output_address (XEXP (x, 0));
6839 fputc (']', file);
6840 }
6841 else if (GET_CODE (x) == HIGH)
6842 {
6843 fputs ("%hi(", file);
6844 output_addr_const (file, XEXP (x, 0));
6845 fputc (')', file);
6846 }
6847 else if (GET_CODE (x) == LO_SUM)
6848 {
6849 print_operand (file, XEXP (x, 0), 0);
6850 if (TARGET_CM_MEDMID)
6851 fputs ("+%l44(", file);
6852 else
6853 fputs ("+%lo(", file);
6854 output_addr_const (file, XEXP (x, 1));
6855 fputc (')', file);
6856 }
6857 else if (GET_CODE (x) == CONST_DOUBLE
6858 && (GET_MODE (x) == VOIDmode
6859 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
6860 {
6861 if (CONST_DOUBLE_HIGH (x) == 0)
6862 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
6863 else if (CONST_DOUBLE_HIGH (x) == -1
6864 && CONST_DOUBLE_LOW (x) < 0)
6865 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
6866 else
6867 output_operand_lossage ("long long constant not a valid immediate operand");
6868 }
6869 else if (GET_CODE (x) == CONST_DOUBLE)
6870 output_operand_lossage ("floating point constant not a valid immediate operand");
6871 else { output_addr_const (file, x); }
6872 }
6873 \f
6874 /* Target hook for assembling integer objects. The sparc version has
6875 special handling for aligned DI-mode objects. */
6876
6877 static bool
6878 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
6879 {
6880 /* ??? We only output .xword's for symbols and only then in environments
6881 where the assembler can handle them. */
6882 if (aligned_p && size == 8
6883 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
6884 {
6885 if (TARGET_V9)
6886 {
6887 assemble_integer_with_op ("\t.xword\t", x);
6888 return true;
6889 }
6890 else
6891 {
6892 assemble_aligned_integer (4, const0_rtx);
6893 assemble_aligned_integer (4, x);
6894 return true;
6895 }
6896 }
6897 return default_assemble_integer (x, size, aligned_p);
6898 }
6899 \f
6900 /* Return the value of a code used in the .proc pseudo-op that says
6901 what kind of result this function returns. For non-C types, we pick
6902 the closest C type. */
6903
6904 #ifndef SHORT_TYPE_SIZE
6905 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
6906 #endif
6907
6908 #ifndef INT_TYPE_SIZE
6909 #define INT_TYPE_SIZE BITS_PER_WORD
6910 #endif
6911
6912 #ifndef LONG_TYPE_SIZE
6913 #define LONG_TYPE_SIZE BITS_PER_WORD
6914 #endif
6915
6916 #ifndef LONG_LONG_TYPE_SIZE
6917 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
6918 #endif
6919
6920 #ifndef FLOAT_TYPE_SIZE
6921 #define FLOAT_TYPE_SIZE BITS_PER_WORD
6922 #endif
6923
6924 #ifndef DOUBLE_TYPE_SIZE
6925 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
6926 #endif
6927
6928 #ifndef LONG_DOUBLE_TYPE_SIZE
6929 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
6930 #endif
6931
6932 unsigned long
6933 sparc_type_code (register tree type)
6934 {
6935 register unsigned long qualifiers = 0;
6936 register unsigned shift;
6937
6938 /* Only the first 30 bits of the qualifier are valid. We must refrain from
6939 setting more, since some assemblers will give an error for this. Also,
6940 we must be careful to avoid shifts of 32 bits or more to avoid getting
6941 unpredictable results. */
6942
6943 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
6944 {
6945 switch (TREE_CODE (type))
6946 {
6947 case ERROR_MARK:
6948 return qualifiers;
6949
6950 case ARRAY_TYPE:
6951 qualifiers |= (3 << shift);
6952 break;
6953
6954 case FUNCTION_TYPE:
6955 case METHOD_TYPE:
6956 qualifiers |= (2 << shift);
6957 break;
6958
6959 case POINTER_TYPE:
6960 case REFERENCE_TYPE:
6961 case OFFSET_TYPE:
6962 qualifiers |= (1 << shift);
6963 break;
6964
6965 case RECORD_TYPE:
6966 return (qualifiers | 8);
6967
6968 case UNION_TYPE:
6969 case QUAL_UNION_TYPE:
6970 return (qualifiers | 9);
6971
6972 case ENUMERAL_TYPE:
6973 return (qualifiers | 10);
6974
6975 case VOID_TYPE:
6976 return (qualifiers | 16);
6977
6978 case INTEGER_TYPE:
6979 /* If this is a range type, consider it to be the underlying
6980 type. */
6981 if (TREE_TYPE (type) != 0)
6982 break;
6983
6984 /* Carefully distinguish all the standard types of C,
6985 without messing up if the language is not C. We do this by
6986 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
6987 look at both the names and the above fields, but that's redundant.
6988 Any type whose size is between two C types will be considered
6989 to be the wider of the two types. Also, we do not have a
6990 special code to use for "long long", so anything wider than
6991 long is treated the same. Note that we can't distinguish
6992 between "int" and "long" in this code if they are the same
6993 size, but that's fine, since neither can the assembler. */
6994
6995 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
6996 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
6997
6998 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
6999 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7000
7001 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7002 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7003
7004 else
7005 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7006
7007 case REAL_TYPE:
7008 /* If this is a range type, consider it to be the underlying
7009 type. */
7010 if (TREE_TYPE (type) != 0)
7011 break;
7012
7013 /* Carefully distinguish all the standard types of C,
7014 without messing up if the language is not C. */
7015
7016 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7017 return (qualifiers | 6);
7018
7019 else
7020 return (qualifiers | 7);
7021
7022 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7023 /* ??? We need to distinguish between double and float complex types,
7024 but I don't know how yet because I can't reach this code from
7025 existing front-ends. */
7026 return (qualifiers | 7); /* Who knows? */
7027
7028 case VECTOR_TYPE:
7029 case BOOLEAN_TYPE: /* Boolean truth value type. */
7030 case LANG_TYPE: /* ? */
7031 return qualifiers;
7032
7033 default:
7034 gcc_unreachable (); /* Not a type! */
7035 }
7036 }
7037
7038 return qualifiers;
7039 }
7040 \f
7041 /* Nested function support. */
7042
7043 /* Emit RTL insns to initialize the variable parts of a trampoline.
7044 FNADDR is an RTX for the address of the function's pure code.
7045 CXT is an RTX for the static chain value for the function.
7046
7047 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7048 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7049 (to store insns). This is a bit excessive. Perhaps a different
7050 mechanism would be better here.
7051
7052 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7053
7054 void
7055 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7056 {
7057 /* SPARC 32-bit trampoline:
7058
7059 sethi %hi(fn), %g1
7060 sethi %hi(static), %g2
7061 jmp %g1+%lo(fn)
7062 or %g2, %lo(static), %g2
7063
7064 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7065 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7066 */
7067
7068 emit_move_insn
7069 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7070 expand_binop (SImode, ior_optab,
7071 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7072 size_int (10), 0, 1),
7073 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7074 NULL_RTX, 1, OPTAB_DIRECT));
7075
7076 emit_move_insn
7077 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7078 expand_binop (SImode, ior_optab,
7079 expand_shift (RSHIFT_EXPR, SImode, cxt,
7080 size_int (10), 0, 1),
7081 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7082 NULL_RTX, 1, OPTAB_DIRECT));
7083
7084 emit_move_insn
7085 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7086 expand_binop (SImode, ior_optab,
7087 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7088 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7089 NULL_RTX, 1, OPTAB_DIRECT));
7090
7091 emit_move_insn
7092 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7093 expand_binop (SImode, ior_optab,
7094 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7095 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7096 NULL_RTX, 1, OPTAB_DIRECT));
7097
7098 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7099 aligned on a 16 byte boundary so one flush clears it all. */
7100 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7101 if (sparc_cpu != PROCESSOR_ULTRASPARC
7102 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7103 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7104 plus_constant (tramp, 8)))));
7105
7106 /* Call __enable_execute_stack after writing onto the stack to make sure
7107 the stack address is accessible. */
7108 #ifdef ENABLE_EXECUTE_STACK
7109 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7110 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7111 #endif
7112
7113 }
7114
7115 /* The 64-bit version is simpler because it makes more sense to load the
7116 values as "immediate" data out of the trampoline. It's also easier since
7117 we can read the PC without clobbering a register. */
7118
7119 void
7120 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7121 {
7122 /* SPARC 64-bit trampoline:
7123
7124 rd %pc, %g1
7125 ldx [%g1+24], %g5
7126 jmp %g5
7127 ldx [%g1+16], %g5
7128 +16 bytes data
7129 */
7130
7131 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7132 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7133 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7134 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7135 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7136 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7137 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7138 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7139 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7140 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7141 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7142
7143 if (sparc_cpu != PROCESSOR_ULTRASPARC
7144 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7145 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7146
7147 /* Call __enable_execute_stack after writing onto the stack to make sure
7148 the stack address is accessible. */
7149 #ifdef ENABLE_EXECUTE_STACK
7150 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7151 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7152 #endif
7153 }
7154 \f
7155 /* Adjust the cost of a scheduling dependency. Return the new cost of
7156 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7157
7158 static int
7159 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7160 {
7161 enum attr_type insn_type;
7162
7163 if (! recog_memoized (insn))
7164 return 0;
7165
7166 insn_type = get_attr_type (insn);
7167
7168 if (REG_NOTE_KIND (link) == 0)
7169 {
7170 /* Data dependency; DEP_INSN writes a register that INSN reads some
7171 cycles later. */
7172
7173 /* if a load, then the dependence must be on the memory address;
7174 add an extra "cycle". Note that the cost could be two cycles
7175 if the reg was written late in an instruction group; we ca not tell
7176 here. */
7177 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7178 return cost + 3;
7179
7180 /* Get the delay only if the address of the store is the dependence. */
7181 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7182 {
7183 rtx pat = PATTERN(insn);
7184 rtx dep_pat = PATTERN (dep_insn);
7185
7186 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7187 return cost; /* This should not happen! */
7188
7189 /* The dependency between the two instructions was on the data that
7190 is being stored. Assume that this implies that the address of the
7191 store is not dependent. */
7192 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7193 return cost;
7194
7195 return cost + 3; /* An approximation. */
7196 }
7197
7198 /* A shift instruction cannot receive its data from an instruction
7199 in the same cycle; add a one cycle penalty. */
7200 if (insn_type == TYPE_SHIFT)
7201 return cost + 3; /* Split before cascade into shift. */
7202 }
7203 else
7204 {
7205 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7206 INSN writes some cycles later. */
7207
7208 /* These are only significant for the fpu unit; writing a fp reg before
7209 the fpu has finished with it stalls the processor. */
7210
7211 /* Reusing an integer register causes no problems. */
7212 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7213 return 0;
7214 }
7215
7216 return cost;
7217 }
7218
7219 static int
7220 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7221 {
7222 enum attr_type insn_type, dep_type;
7223 rtx pat = PATTERN(insn);
7224 rtx dep_pat = PATTERN (dep_insn);
7225
7226 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7227 return cost;
7228
7229 insn_type = get_attr_type (insn);
7230 dep_type = get_attr_type (dep_insn);
7231
7232 switch (REG_NOTE_KIND (link))
7233 {
7234 case 0:
7235 /* Data dependency; DEP_INSN writes a register that INSN reads some
7236 cycles later. */
7237
7238 switch (insn_type)
7239 {
7240 case TYPE_STORE:
7241 case TYPE_FPSTORE:
7242 /* Get the delay iff the address of the store is the dependence. */
7243 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7244 return cost;
7245
7246 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7247 return cost;
7248 return cost + 3;
7249
7250 case TYPE_LOAD:
7251 case TYPE_SLOAD:
7252 case TYPE_FPLOAD:
7253 /* If a load, then the dependence must be on the memory address. If
7254 the addresses aren't equal, then it might be a false dependency */
7255 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7256 {
7257 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7258 || GET_CODE (SET_DEST (dep_pat)) != MEM
7259 || GET_CODE (SET_SRC (pat)) != MEM
7260 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7261 XEXP (SET_SRC (pat), 0)))
7262 return cost + 2;
7263
7264 return cost + 8;
7265 }
7266 break;
7267
7268 case TYPE_BRANCH:
7269 /* Compare to branch latency is 0. There is no benefit from
7270 separating compare and branch. */
7271 if (dep_type == TYPE_COMPARE)
7272 return 0;
7273 /* Floating point compare to branch latency is less than
7274 compare to conditional move. */
7275 if (dep_type == TYPE_FPCMP)
7276 return cost - 1;
7277 break;
7278 default:
7279 break;
7280 }
7281 break;
7282
7283 case REG_DEP_ANTI:
7284 /* Anti-dependencies only penalize the fpu unit. */
7285 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7286 return 0;
7287 break;
7288
7289 default:
7290 break;
7291 }
7292
7293 return cost;
7294 }
7295
7296 static int
7297 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7298 {
7299 switch (sparc_cpu)
7300 {
7301 case PROCESSOR_SUPERSPARC:
7302 cost = supersparc_adjust_cost (insn, link, dep, cost);
7303 break;
7304 case PROCESSOR_HYPERSPARC:
7305 case PROCESSOR_SPARCLITE86X:
7306 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7307 break;
7308 default:
7309 break;
7310 }
7311 return cost;
7312 }
7313
7314 static void
7315 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7316 int sched_verbose ATTRIBUTE_UNUSED,
7317 int max_ready ATTRIBUTE_UNUSED)
7318 {
7319 }
7320
7321 static int
7322 sparc_use_sched_lookahead (void)
7323 {
7324 if (sparc_cpu == PROCESSOR_ULTRASPARC
7325 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7326 return 4;
7327 if ((1 << sparc_cpu) &
7328 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7329 (1 << PROCESSOR_SPARCLITE86X)))
7330 return 3;
7331 return 0;
7332 }
7333
7334 static int
7335 sparc_issue_rate (void)
7336 {
7337 switch (sparc_cpu)
7338 {
7339 default:
7340 return 1;
7341 case PROCESSOR_V9:
7342 /* Assume V9 processors are capable of at least dual-issue. */
7343 return 2;
7344 case PROCESSOR_SUPERSPARC:
7345 return 3;
7346 case PROCESSOR_HYPERSPARC:
7347 case PROCESSOR_SPARCLITE86X:
7348 return 2;
7349 case PROCESSOR_ULTRASPARC:
7350 case PROCESSOR_ULTRASPARC3:
7351 return 4;
7352 }
7353 }
7354
7355 static int
7356 set_extends (rtx insn)
7357 {
7358 register rtx pat = PATTERN (insn);
7359
7360 switch (GET_CODE (SET_SRC (pat)))
7361 {
7362 /* Load and some shift instructions zero extend. */
7363 case MEM:
7364 case ZERO_EXTEND:
7365 /* sethi clears the high bits */
7366 case HIGH:
7367 /* LO_SUM is used with sethi. sethi cleared the high
7368 bits and the values used with lo_sum are positive */
7369 case LO_SUM:
7370 /* Store flag stores 0 or 1 */
7371 case LT: case LTU:
7372 case GT: case GTU:
7373 case LE: case LEU:
7374 case GE: case GEU:
7375 case EQ:
7376 case NE:
7377 return 1;
7378 case AND:
7379 {
7380 rtx op0 = XEXP (SET_SRC (pat), 0);
7381 rtx op1 = XEXP (SET_SRC (pat), 1);
7382 if (GET_CODE (op1) == CONST_INT)
7383 return INTVAL (op1) >= 0;
7384 if (GET_CODE (op0) != REG)
7385 return 0;
7386 if (sparc_check_64 (op0, insn) == 1)
7387 return 1;
7388 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7389 }
7390 case IOR:
7391 case XOR:
7392 {
7393 rtx op0 = XEXP (SET_SRC (pat), 0);
7394 rtx op1 = XEXP (SET_SRC (pat), 1);
7395 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7396 return 0;
7397 if (GET_CODE (op1) == CONST_INT)
7398 return INTVAL (op1) >= 0;
7399 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7400 }
7401 case LSHIFTRT:
7402 return GET_MODE (SET_SRC (pat)) == SImode;
7403 /* Positive integers leave the high bits zero. */
7404 case CONST_DOUBLE:
7405 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7406 case CONST_INT:
7407 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7408 case ASHIFTRT:
7409 case SIGN_EXTEND:
7410 return - (GET_MODE (SET_SRC (pat)) == SImode);
7411 case REG:
7412 return sparc_check_64 (SET_SRC (pat), insn);
7413 default:
7414 return 0;
7415 }
7416 }
7417
7418 /* We _ought_ to have only one kind per function, but... */
7419 static GTY(()) rtx sparc_addr_diff_list;
7420 static GTY(()) rtx sparc_addr_list;
7421
7422 void
7423 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7424 {
7425 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7426 if (diff)
7427 sparc_addr_diff_list
7428 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7429 else
7430 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7431 }
7432
7433 static void
7434 sparc_output_addr_vec (rtx vec)
7435 {
7436 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7437 int idx, vlen = XVECLEN (body, 0);
7438
7439 #ifdef ASM_OUTPUT_ADDR_VEC_START
7440 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7441 #endif
7442
7443 #ifdef ASM_OUTPUT_CASE_LABEL
7444 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7445 NEXT_INSN (lab));
7446 #else
7447 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7448 #endif
7449
7450 for (idx = 0; idx < vlen; idx++)
7451 {
7452 ASM_OUTPUT_ADDR_VEC_ELT
7453 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7454 }
7455
7456 #ifdef ASM_OUTPUT_ADDR_VEC_END
7457 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7458 #endif
7459 }
7460
7461 static void
7462 sparc_output_addr_diff_vec (rtx vec)
7463 {
7464 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7465 rtx base = XEXP (XEXP (body, 0), 0);
7466 int idx, vlen = XVECLEN (body, 1);
7467
7468 #ifdef ASM_OUTPUT_ADDR_VEC_START
7469 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7470 #endif
7471
7472 #ifdef ASM_OUTPUT_CASE_LABEL
7473 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7474 NEXT_INSN (lab));
7475 #else
7476 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7477 #endif
7478
7479 for (idx = 0; idx < vlen; idx++)
7480 {
7481 ASM_OUTPUT_ADDR_DIFF_ELT
7482 (asm_out_file,
7483 body,
7484 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7485 CODE_LABEL_NUMBER (base));
7486 }
7487
7488 #ifdef ASM_OUTPUT_ADDR_VEC_END
7489 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7490 #endif
7491 }
7492
7493 static void
7494 sparc_output_deferred_case_vectors (void)
7495 {
7496 rtx t;
7497 int align;
7498
7499 if (sparc_addr_list == NULL_RTX
7500 && sparc_addr_diff_list == NULL_RTX)
7501 return;
7502
7503 /* Align to cache line in the function's code section. */
7504 switch_to_section (current_function_section ());
7505
7506 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7507 if (align > 0)
7508 ASM_OUTPUT_ALIGN (asm_out_file, align);
7509
7510 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7511 sparc_output_addr_vec (XEXP (t, 0));
7512 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7513 sparc_output_addr_diff_vec (XEXP (t, 0));
7514
7515 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7516 }
7517
7518 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7519 unknown. Return 1 if the high bits are zero, -1 if the register is
7520 sign extended. */
7521 int
7522 sparc_check_64 (rtx x, rtx insn)
7523 {
7524 /* If a register is set only once it is safe to ignore insns this
7525 code does not know how to handle. The loop will either recognize
7526 the single set and return the correct value or fail to recognize
7527 it and return 0. */
7528 int set_once = 0;
7529 rtx y = x;
7530
7531 gcc_assert (GET_CODE (x) == REG);
7532
7533 if (GET_MODE (x) == DImode)
7534 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7535
7536 if (flag_expensive_optimizations
7537 && REG_N_SETS (REGNO (y)) == 1)
7538 set_once = 1;
7539
7540 if (insn == 0)
7541 {
7542 if (set_once)
7543 insn = get_last_insn_anywhere ();
7544 else
7545 return 0;
7546 }
7547
7548 while ((insn = PREV_INSN (insn)))
7549 {
7550 switch (GET_CODE (insn))
7551 {
7552 case JUMP_INSN:
7553 case NOTE:
7554 break;
7555 case CODE_LABEL:
7556 case CALL_INSN:
7557 default:
7558 if (! set_once)
7559 return 0;
7560 break;
7561 case INSN:
7562 {
7563 rtx pat = PATTERN (insn);
7564 if (GET_CODE (pat) != SET)
7565 return 0;
7566 if (rtx_equal_p (x, SET_DEST (pat)))
7567 return set_extends (insn);
7568 if (y && rtx_equal_p (y, SET_DEST (pat)))
7569 return set_extends (insn);
7570 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7571 return 0;
7572 }
7573 }
7574 }
7575 return 0;
7576 }
7577
7578 /* Returns assembly code to perform a DImode shift using
7579 a 64-bit global or out register on SPARC-V8+. */
7580 const char *
7581 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7582 {
7583 static char asm_code[60];
7584
7585 /* The scratch register is only required when the destination
7586 register is not a 64-bit global or out register. */
7587 if (which_alternative != 2)
7588 operands[3] = operands[0];
7589
7590 /* We can only shift by constants <= 63. */
7591 if (GET_CODE (operands[2]) == CONST_INT)
7592 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7593
7594 if (GET_CODE (operands[1]) == CONST_INT)
7595 {
7596 output_asm_insn ("mov\t%1, %3", operands);
7597 }
7598 else
7599 {
7600 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7601 if (sparc_check_64 (operands[1], insn) <= 0)
7602 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7603 output_asm_insn ("or\t%L1, %3, %3", operands);
7604 }
7605
7606 strcpy(asm_code, opcode);
7607
7608 if (which_alternative != 2)
7609 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7610 else
7611 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7612 }
7613 \f
7614 /* Output rtl to increment the profiler label LABELNO
7615 for profiling a function entry. */
7616
7617 void
7618 sparc_profile_hook (int labelno)
7619 {
7620 char buf[32];
7621 rtx lab, fun;
7622
7623 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7624 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7625 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7626
7627 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7628 }
7629 \f
7630 #ifdef OBJECT_FORMAT_ELF
7631 static void
7632 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7633 tree decl)
7634 {
7635 if (flags & SECTION_MERGE)
7636 {
7637 /* entsize cannot be expressed in this section attributes
7638 encoding style. */
7639 default_elf_asm_named_section (name, flags, decl);
7640 return;
7641 }
7642
7643 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7644
7645 if (!(flags & SECTION_DEBUG))
7646 fputs (",#alloc", asm_out_file);
7647 if (flags & SECTION_WRITE)
7648 fputs (",#write", asm_out_file);
7649 if (flags & SECTION_TLS)
7650 fputs (",#tls", asm_out_file);
7651 if (flags & SECTION_CODE)
7652 fputs (",#execinstr", asm_out_file);
7653
7654 /* ??? Handle SECTION_BSS. */
7655
7656 fputc ('\n', asm_out_file);
7657 }
7658 #endif /* OBJECT_FORMAT_ELF */
7659
7660 /* We do not allow indirect calls to be optimized into sibling calls.
7661
7662 We cannot use sibling calls when delayed branches are disabled
7663 because they will likely require the call delay slot to be filled.
7664
7665 Also, on SPARC 32-bit we cannot emit a sibling call when the
7666 current function returns a structure. This is because the "unimp
7667 after call" convention would cause the callee to return to the
7668 wrong place. The generic code already disallows cases where the
7669 function being called returns a structure.
7670
7671 It may seem strange how this last case could occur. Usually there
7672 is code after the call which jumps to epilogue code which dumps the
7673 return value into the struct return area. That ought to invalidate
7674 the sibling call right? Well, in the C++ case we can end up passing
7675 the pointer to the struct return area to a constructor (which returns
7676 void) and then nothing else happens. Such a sibling call would look
7677 valid without the added check here. */
7678 static bool
7679 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7680 {
7681 return (decl
7682 && flag_delayed_branch
7683 && (TARGET_ARCH64 || ! current_function_returns_struct));
7684 }
7685 \f
7686 /* libfunc renaming. */
7687 #include "config/gofast.h"
7688
7689 static void
7690 sparc_init_libfuncs (void)
7691 {
7692 if (TARGET_ARCH32)
7693 {
7694 /* Use the subroutines that Sun's library provides for integer
7695 multiply and divide. The `*' prevents an underscore from
7696 being prepended by the compiler. .umul is a little faster
7697 than .mul. */
7698 set_optab_libfunc (smul_optab, SImode, "*.umul");
7699 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7700 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7701 set_optab_libfunc (smod_optab, SImode, "*.rem");
7702 set_optab_libfunc (umod_optab, SImode, "*.urem");
7703
7704 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7705 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7706 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7707 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7708 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7709 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7710
7711 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7712 is because with soft-float, the SFmode and DFmode sqrt
7713 instructions will be absent, and the compiler will notice and
7714 try to use the TFmode sqrt instruction for calls to the
7715 builtin function sqrt, but this fails. */
7716 if (TARGET_FPU)
7717 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7718
7719 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7720 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7721 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7722 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7723 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7724 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7725
7726 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7727 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7728 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7729 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7730
7731 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7732 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7733 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7734 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7735
7736 if (DITF_CONVERSION_LIBFUNCS)
7737 {
7738 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7739 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7740 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7741 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7742 }
7743
7744 if (SUN_CONVERSION_LIBFUNCS)
7745 {
7746 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7747 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7748 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7749 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7750 }
7751 }
7752 if (TARGET_ARCH64)
7753 {
7754 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7755 do not exist in the library. Make sure the compiler does not
7756 emit calls to them by accident. (It should always use the
7757 hardware instructions.) */
7758 set_optab_libfunc (smul_optab, SImode, 0);
7759 set_optab_libfunc (sdiv_optab, SImode, 0);
7760 set_optab_libfunc (udiv_optab, SImode, 0);
7761 set_optab_libfunc (smod_optab, SImode, 0);
7762 set_optab_libfunc (umod_optab, SImode, 0);
7763
7764 if (SUN_INTEGER_MULTIPLY_64)
7765 {
7766 set_optab_libfunc (smul_optab, DImode, "__mul64");
7767 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7768 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7769 set_optab_libfunc (smod_optab, DImode, "__rem64");
7770 set_optab_libfunc (umod_optab, DImode, "__urem64");
7771 }
7772
7773 if (SUN_CONVERSION_LIBFUNCS)
7774 {
7775 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7776 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7777 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7778 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7779 }
7780 }
7781
7782 gofast_maybe_init_libfuncs ();
7783 }
7784 \f
7785 #define def_builtin(NAME, CODE, TYPE) \
7786 lang_hooks.builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7787 NULL_TREE)
7788
7789 /* Implement the TARGET_INIT_BUILTINS target hook.
7790 Create builtin functions for special SPARC instructions. */
7791
7792 static void
7793 sparc_init_builtins (void)
7794 {
7795 if (TARGET_VIS)
7796 sparc_vis_init_builtins ();
7797 }
7798
7799 /* Create builtin functions for VIS 1.0 instructions. */
7800
7801 static void
7802 sparc_vis_init_builtins (void)
7803 {
7804 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7805 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7806 tree v4hi = build_vector_type (intHI_type_node, 4);
7807 tree v2hi = build_vector_type (intHI_type_node, 2);
7808 tree v2si = build_vector_type (intSI_type_node, 2);
7809
7810 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
7811 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
7812 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
7813 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
7814 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
7815 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
7816 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
7817 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
7818 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
7819 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
7820 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
7821 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
7822 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
7823 v8qi, v8qi,
7824 intDI_type_node, 0);
7825 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
7826 intDI_type_node,
7827 intDI_type_node, 0);
7828 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
7829 ptr_type_node,
7830 intSI_type_node, 0);
7831 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
7832 ptr_type_node,
7833 intDI_type_node, 0);
7834
7835 /* Packing and expanding vectors. */
7836 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
7837 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
7838 v8qi_ftype_v2si_v8qi);
7839 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
7840 v2hi_ftype_v2si);
7841 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
7842 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
7843 v8qi_ftype_v4qi_v4qi);
7844
7845 /* Multiplications. */
7846 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
7847 v4hi_ftype_v4qi_v4hi);
7848 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
7849 v4hi_ftype_v4qi_v2hi);
7850 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
7851 v4hi_ftype_v4qi_v2hi);
7852 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
7853 v4hi_ftype_v8qi_v4hi);
7854 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
7855 v4hi_ftype_v8qi_v4hi);
7856 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
7857 v2si_ftype_v4qi_v2hi);
7858 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
7859 v2si_ftype_v4qi_v2hi);
7860
7861 /* Data aligning. */
7862 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
7863 v4hi_ftype_v4hi_v4hi);
7864 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
7865 v8qi_ftype_v8qi_v8qi);
7866 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
7867 v2si_ftype_v2si_v2si);
7868 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
7869 di_ftype_di_di);
7870 if (TARGET_ARCH64)
7871 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
7872 ptr_ftype_ptr_di);
7873 else
7874 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
7875 ptr_ftype_ptr_si);
7876
7877 /* Pixel distance. */
7878 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
7879 di_ftype_v8qi_v8qi_di);
7880 }
7881
7882 /* Handle TARGET_EXPAND_BUILTIN target hook.
7883 Expand builtin functions for sparc intrinsics. */
7884
7885 static rtx
7886 sparc_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
7887 enum machine_mode tmode, int ignore ATTRIBUTE_UNUSED)
7888 {
7889 tree arglist;
7890 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7891 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
7892 rtx pat, op[4];
7893 enum machine_mode mode[4];
7894 int arg_count = 0;
7895
7896 mode[arg_count] = tmode;
7897
7898 if (target == 0
7899 || GET_MODE (target) != tmode
7900 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7901 op[arg_count] = gen_reg_rtx (tmode);
7902 else
7903 op[arg_count] = target;
7904
7905 for (arglist = TREE_OPERAND (exp, 1); arglist;
7906 arglist = TREE_CHAIN (arglist))
7907 {
7908 tree arg = TREE_VALUE (arglist);
7909
7910 arg_count++;
7911 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
7912 op[arg_count] = expand_normal (arg);
7913
7914 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
7915 mode[arg_count]))
7916 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
7917 }
7918
7919 switch (arg_count)
7920 {
7921 case 1:
7922 pat = GEN_FCN (icode) (op[0], op[1]);
7923 break;
7924 case 2:
7925 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
7926 break;
7927 case 3:
7928 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
7929 break;
7930 default:
7931 gcc_unreachable ();
7932 }
7933
7934 if (!pat)
7935 return NULL_RTX;
7936
7937 emit_insn (pat);
7938
7939 return op[0];
7940 }
7941
7942 static int
7943 sparc_vis_mul8x16 (int e8, int e16)
7944 {
7945 return (e8 * e16 + 128) / 256;
7946 }
7947
7948 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
7949 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
7950 constants. A tree list with the results of the multiplications is returned,
7951 and each element in the list is of INNER_TYPE. */
7952
7953 static tree
7954 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
7955 {
7956 tree n_elts = NULL_TREE;
7957 int scale;
7958
7959 switch (fncode)
7960 {
7961 case CODE_FOR_fmul8x16_vis:
7962 for (; elts0 && elts1;
7963 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
7964 {
7965 int val
7966 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
7967 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
7968 n_elts = tree_cons (NULL_TREE,
7969 build_int_cst (inner_type, val),
7970 n_elts);
7971 }
7972 break;
7973
7974 case CODE_FOR_fmul8x16au_vis:
7975 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
7976
7977 for (; elts0; elts0 = TREE_CHAIN (elts0))
7978 {
7979 int val
7980 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
7981 scale);
7982 n_elts = tree_cons (NULL_TREE,
7983 build_int_cst (inner_type, val),
7984 n_elts);
7985 }
7986 break;
7987
7988 case CODE_FOR_fmul8x16al_vis:
7989 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
7990
7991 for (; elts0; elts0 = TREE_CHAIN (elts0))
7992 {
7993 int val
7994 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
7995 scale);
7996 n_elts = tree_cons (NULL_TREE,
7997 build_int_cst (inner_type, val),
7998 n_elts);
7999 }
8000 break;
8001
8002 default:
8003 gcc_unreachable ();
8004 }
8005
8006 return nreverse (n_elts);
8007
8008 }
8009 /* Handle TARGET_FOLD_BUILTIN target hook.
8010 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8011 result of the function call is ignored. NULL_TREE is returned if the
8012 function could not be folded. */
8013
8014 static tree
8015 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8016 {
8017 tree arg0, arg1, arg2;
8018 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8019
8020
8021 if (ignore && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8022 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8023 return build_int_cst (rtype, 0);
8024
8025 switch (DECL_FUNCTION_CODE (fndecl))
8026 {
8027 case CODE_FOR_fexpand_vis:
8028 arg0 = TREE_VALUE (arglist);
8029 STRIP_NOPS (arg0);
8030
8031 if (TREE_CODE (arg0) == VECTOR_CST)
8032 {
8033 tree inner_type = TREE_TYPE (rtype);
8034 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8035 tree n_elts = NULL_TREE;
8036
8037 for (; elts; elts = TREE_CHAIN (elts))
8038 {
8039 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8040 n_elts = tree_cons (NULL_TREE,
8041 build_int_cst (inner_type, val),
8042 n_elts);
8043 }
8044 return build_vector (rtype, nreverse (n_elts));
8045 }
8046 break;
8047
8048 case CODE_FOR_fmul8x16_vis:
8049 case CODE_FOR_fmul8x16au_vis:
8050 case CODE_FOR_fmul8x16al_vis:
8051 arg0 = TREE_VALUE (arglist);
8052 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8053 STRIP_NOPS (arg0);
8054 STRIP_NOPS (arg1);
8055
8056 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8057 {
8058 tree inner_type = TREE_TYPE (rtype);
8059 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8060 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8061 tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8062 inner_type, elts0, elts1);
8063
8064 return build_vector (rtype, n_elts);
8065 }
8066 break;
8067
8068 case CODE_FOR_fpmerge_vis:
8069 arg0 = TREE_VALUE (arglist);
8070 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8071 STRIP_NOPS (arg0);
8072 STRIP_NOPS (arg1);
8073
8074 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8075 {
8076 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8077 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8078 tree n_elts = NULL_TREE;
8079
8080 for (; elts0 && elts1;
8081 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8082 {
8083 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8084 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8085 }
8086
8087 return build_vector (rtype, nreverse (n_elts));
8088 }
8089 break;
8090
8091 case CODE_FOR_pdist_vis:
8092 arg0 = TREE_VALUE (arglist);
8093 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8094 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8095 STRIP_NOPS (arg0);
8096 STRIP_NOPS (arg1);
8097 STRIP_NOPS (arg2);
8098
8099 if (TREE_CODE (arg0) == VECTOR_CST
8100 && TREE_CODE (arg1) == VECTOR_CST
8101 && TREE_CODE (arg2) == INTEGER_CST)
8102 {
8103 int overflow = 0;
8104 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8105 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8106 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8107 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8108
8109 for (; elts0 && elts1;
8110 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8111 {
8112 unsigned HOST_WIDE_INT
8113 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8114 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8115 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8116 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8117
8118 unsigned HOST_WIDE_INT l;
8119 HOST_WIDE_INT h;
8120
8121 overflow |= neg_double (low1, high1, &l, &h);
8122 overflow |= add_double (low0, high0, l, h, &l, &h);
8123 if (h < 0)
8124 overflow |= neg_double (l, h, &l, &h);
8125
8126 overflow |= add_double (low, high, l, h, &low, &high);
8127 }
8128
8129 gcc_assert (overflow == 0);
8130
8131 return build_int_cst_wide (rtype, low, high);
8132 }
8133
8134 default:
8135 break;
8136 }
8137 return NULL_TREE;
8138 }
8139 \f
8140 int
8141 sparc_extra_constraint_check (rtx op, int c, int strict)
8142 {
8143 int reload_ok_mem;
8144
8145 if (TARGET_ARCH64
8146 && (c == 'T' || c == 'U'))
8147 return 0;
8148
8149 switch (c)
8150 {
8151 case 'Q':
8152 return fp_sethi_p (op);
8153
8154 case 'R':
8155 return fp_mov_p (op);
8156
8157 case 'S':
8158 return fp_high_losum_p (op);
8159
8160 case 'U':
8161 if (! strict
8162 || (GET_CODE (op) == REG
8163 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8164 || reg_renumber[REGNO (op)] >= 0)))
8165 return register_ok_for_ldd (op);
8166
8167 return 0;
8168
8169 case 'W':
8170 case 'T':
8171 break;
8172
8173 case 'Y':
8174 return const_zero_operand (op, GET_MODE (op));
8175
8176 default:
8177 return 0;
8178 }
8179
8180 /* Our memory extra constraints have to emulate the
8181 behavior of 'm' and 'o' in order for reload to work
8182 correctly. */
8183 if (GET_CODE (op) == MEM)
8184 {
8185 reload_ok_mem = 0;
8186 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8187 && (! strict
8188 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8189 reload_ok_mem = 1;
8190 }
8191 else
8192 {
8193 reload_ok_mem = (reload_in_progress
8194 && GET_CODE (op) == REG
8195 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8196 && reg_renumber [REGNO (op)] < 0);
8197 }
8198
8199 return reload_ok_mem;
8200 }
8201
8202 /* ??? This duplicates information provided to the compiler by the
8203 ??? scheduler description. Some day, teach genautomata to output
8204 ??? the latencies and then CSE will just use that. */
8205
8206 static bool
8207 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8208 {
8209 enum machine_mode mode = GET_MODE (x);
8210 bool float_mode_p = FLOAT_MODE_P (mode);
8211
8212 switch (code)
8213 {
8214 case CONST_INT:
8215 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8216 {
8217 *total = 0;
8218 return true;
8219 }
8220 /* FALLTHRU */
8221
8222 case HIGH:
8223 *total = 2;
8224 return true;
8225
8226 case CONST:
8227 case LABEL_REF:
8228 case SYMBOL_REF:
8229 *total = 4;
8230 return true;
8231
8232 case CONST_DOUBLE:
8233 if (GET_MODE (x) == VOIDmode
8234 && ((CONST_DOUBLE_HIGH (x) == 0
8235 && CONST_DOUBLE_LOW (x) < 0x1000)
8236 || (CONST_DOUBLE_HIGH (x) == -1
8237 && CONST_DOUBLE_LOW (x) < 0
8238 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8239 *total = 0;
8240 else
8241 *total = 8;
8242 return true;
8243
8244 case MEM:
8245 /* If outer-code was a sign or zero extension, a cost
8246 of COSTS_N_INSNS (1) was already added in. This is
8247 why we are subtracting it back out. */
8248 if (outer_code == ZERO_EXTEND)
8249 {
8250 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8251 }
8252 else if (outer_code == SIGN_EXTEND)
8253 {
8254 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8255 }
8256 else if (float_mode_p)
8257 {
8258 *total = sparc_costs->float_load;
8259 }
8260 else
8261 {
8262 *total = sparc_costs->int_load;
8263 }
8264
8265 return true;
8266
8267 case PLUS:
8268 case MINUS:
8269 if (float_mode_p)
8270 *total = sparc_costs->float_plusminus;
8271 else
8272 *total = COSTS_N_INSNS (1);
8273 return false;
8274
8275 case MULT:
8276 if (float_mode_p)
8277 *total = sparc_costs->float_mul;
8278 else if (! TARGET_HARD_MUL)
8279 *total = COSTS_N_INSNS (25);
8280 else
8281 {
8282 int bit_cost;
8283
8284 bit_cost = 0;
8285 if (sparc_costs->int_mul_bit_factor)
8286 {
8287 int nbits;
8288
8289 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8290 {
8291 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8292 for (nbits = 0; value != 0; value &= value - 1)
8293 nbits++;
8294 }
8295 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8296 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8297 {
8298 rtx x1 = XEXP (x, 1);
8299 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8300 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8301
8302 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8303 nbits++;
8304 for (; value2 != 0; value2 &= value2 - 1)
8305 nbits++;
8306 }
8307 else
8308 nbits = 7;
8309
8310 if (nbits < 3)
8311 nbits = 3;
8312 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8313 bit_cost = COSTS_N_INSNS (bit_cost);
8314 }
8315
8316 if (mode == DImode)
8317 *total = sparc_costs->int_mulX + bit_cost;
8318 else
8319 *total = sparc_costs->int_mul + bit_cost;
8320 }
8321 return false;
8322
8323 case ASHIFT:
8324 case ASHIFTRT:
8325 case LSHIFTRT:
8326 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8327 return false;
8328
8329 case DIV:
8330 case UDIV:
8331 case MOD:
8332 case UMOD:
8333 if (float_mode_p)
8334 {
8335 if (mode == DFmode)
8336 *total = sparc_costs->float_div_df;
8337 else
8338 *total = sparc_costs->float_div_sf;
8339 }
8340 else
8341 {
8342 if (mode == DImode)
8343 *total = sparc_costs->int_divX;
8344 else
8345 *total = sparc_costs->int_div;
8346 }
8347 return false;
8348
8349 case NEG:
8350 if (! float_mode_p)
8351 {
8352 *total = COSTS_N_INSNS (1);
8353 return false;
8354 }
8355 /* FALLTHRU */
8356
8357 case ABS:
8358 case FLOAT:
8359 case UNSIGNED_FLOAT:
8360 case FIX:
8361 case UNSIGNED_FIX:
8362 case FLOAT_EXTEND:
8363 case FLOAT_TRUNCATE:
8364 *total = sparc_costs->float_move;
8365 return false;
8366
8367 case SQRT:
8368 if (mode == DFmode)
8369 *total = sparc_costs->float_sqrt_df;
8370 else
8371 *total = sparc_costs->float_sqrt_sf;
8372 return false;
8373
8374 case COMPARE:
8375 if (float_mode_p)
8376 *total = sparc_costs->float_cmp;
8377 else
8378 *total = COSTS_N_INSNS (1);
8379 return false;
8380
8381 case IF_THEN_ELSE:
8382 if (float_mode_p)
8383 *total = sparc_costs->float_cmove;
8384 else
8385 *total = sparc_costs->int_cmove;
8386 return false;
8387
8388 case IOR:
8389 /* Handle the NAND vector patterns. */
8390 if (sparc_vector_mode_supported_p (GET_MODE (x))
8391 && GET_CODE (XEXP (x, 0)) == NOT
8392 && GET_CODE (XEXP (x, 1)) == NOT)
8393 {
8394 *total = COSTS_N_INSNS (1);
8395 return true;
8396 }
8397 else
8398 return false;
8399
8400 default:
8401 return false;
8402 }
8403 }
8404
8405 /* Emit the sequence of insns SEQ while preserving the registers. */
8406
8407 static void
8408 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8409 {
8410 /* STACK_BOUNDARY guarantees that this is a 2-word slot. */
8411 rtx slot = gen_rtx_MEM (word_mode,
8412 plus_constant (stack_pointer_rtx, SPARC_STACK_BIAS));
8413
8414 emit_insn (gen_stack_pointer_dec (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT)));
8415 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8416 if (reg2)
8417 emit_insn (gen_rtx_SET (VOIDmode,
8418 adjust_address (slot, word_mode, UNITS_PER_WORD),
8419 reg2));
8420 emit_insn (seq);
8421 if (reg2)
8422 emit_insn (gen_rtx_SET (VOIDmode,
8423 reg2,
8424 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8425 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8426 emit_insn (gen_stack_pointer_inc (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT)));
8427 }
8428
8429 /* Output the assembler code for a thunk function. THUNK_DECL is the
8430 declaration for the thunk function itself, FUNCTION is the decl for
8431 the target function. DELTA is an immediate constant offset to be
8432 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8433 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8434
8435 static void
8436 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8437 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8438 tree function)
8439 {
8440 rtx this, insn, funexp;
8441 unsigned int int_arg_first;
8442
8443 reload_completed = 1;
8444 epilogue_completed = 1;
8445 no_new_pseudos = 1;
8446 reset_block_changes ();
8447
8448 emit_note (NOTE_INSN_PROLOGUE_END);
8449
8450 if (flag_delayed_branch)
8451 {
8452 /* We will emit a regular sibcall below, so we need to instruct
8453 output_sibcall that we are in a leaf function. */
8454 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8455
8456 /* This will cause final.c to invoke leaf_renumber_regs so we
8457 must behave as if we were in a not-yet-leafified function. */
8458 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8459 }
8460 else
8461 {
8462 /* We will emit the sibcall manually below, so we will need to
8463 manually spill non-leaf registers. */
8464 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8465
8466 /* We really are in a leaf function. */
8467 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8468 }
8469
8470 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8471 returns a structure, the structure return pointer is there instead. */
8472 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8473 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8474 else
8475 this = gen_rtx_REG (Pmode, int_arg_first);
8476
8477 /* Add DELTA. When possible use a plain add, otherwise load it into
8478 a register first. */
8479 if (delta)
8480 {
8481 rtx delta_rtx = GEN_INT (delta);
8482
8483 if (! SPARC_SIMM13_P (delta))
8484 {
8485 rtx scratch = gen_rtx_REG (Pmode, 1);
8486 emit_move_insn (scratch, delta_rtx);
8487 delta_rtx = scratch;
8488 }
8489
8490 /* THIS += DELTA. */
8491 emit_insn (gen_add2_insn (this, delta_rtx));
8492 }
8493
8494 /* Add the word at address (*THIS + VCALL_OFFSET). */
8495 if (vcall_offset)
8496 {
8497 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8498 rtx scratch = gen_rtx_REG (Pmode, 1);
8499
8500 gcc_assert (vcall_offset < 0);
8501
8502 /* SCRATCH = *THIS. */
8503 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8504
8505 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8506 may not have any available scratch register at this point. */
8507 if (SPARC_SIMM13_P (vcall_offset))
8508 ;
8509 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8510 else if (! fixed_regs[5]
8511 /* The below sequence is made up of at least 2 insns,
8512 while the default method may need only one. */
8513 && vcall_offset < -8192)
8514 {
8515 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8516 emit_move_insn (scratch2, vcall_offset_rtx);
8517 vcall_offset_rtx = scratch2;
8518 }
8519 else
8520 {
8521 rtx increment = GEN_INT (-4096);
8522
8523 /* VCALL_OFFSET is a negative number whose typical range can be
8524 estimated as -32768..0 in 32-bit mode. In almost all cases
8525 it is therefore cheaper to emit multiple add insns than
8526 spilling and loading the constant into a register (at least
8527 6 insns). */
8528 while (! SPARC_SIMM13_P (vcall_offset))
8529 {
8530 emit_insn (gen_add2_insn (scratch, increment));
8531 vcall_offset += 4096;
8532 }
8533 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8534 }
8535
8536 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8537 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8538 gen_rtx_PLUS (Pmode,
8539 scratch,
8540 vcall_offset_rtx)));
8541
8542 /* THIS += *(*THIS + VCALL_OFFSET). */
8543 emit_insn (gen_add2_insn (this, scratch));
8544 }
8545
8546 /* Generate a tail call to the target function. */
8547 if (! TREE_USED (function))
8548 {
8549 assemble_external (function);
8550 TREE_USED (function) = 1;
8551 }
8552 funexp = XEXP (DECL_RTL (function), 0);
8553
8554 if (flag_delayed_branch)
8555 {
8556 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8557 insn = emit_call_insn (gen_sibcall (funexp));
8558 SIBLING_CALL_P (insn) = 1;
8559 }
8560 else
8561 {
8562 /* The hoops we have to jump through in order to generate a sibcall
8563 without using delay slots... */
8564 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8565
8566 if (flag_pic)
8567 {
8568 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8569 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8570 start_sequence ();
8571 /* Delay emitting the PIC helper function because it needs to
8572 change the section and we are emitting assembly code. */
8573 load_pic_register (true); /* clobbers %o7 */
8574 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8575 seq = get_insns ();
8576 end_sequence ();
8577 emit_and_preserve (seq, spill_reg, spill_reg2);
8578 }
8579 else if (TARGET_ARCH32)
8580 {
8581 emit_insn (gen_rtx_SET (VOIDmode,
8582 scratch,
8583 gen_rtx_HIGH (SImode, funexp)));
8584 emit_insn (gen_rtx_SET (VOIDmode,
8585 scratch,
8586 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8587 }
8588 else /* TARGET_ARCH64 */
8589 {
8590 switch (sparc_cmodel)
8591 {
8592 case CM_MEDLOW:
8593 case CM_MEDMID:
8594 /* The destination can serve as a temporary. */
8595 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8596 break;
8597
8598 case CM_MEDANY:
8599 case CM_EMBMEDANY:
8600 /* The destination cannot serve as a temporary. */
8601 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8602 start_sequence ();
8603 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8604 seq = get_insns ();
8605 end_sequence ();
8606 emit_and_preserve (seq, spill_reg, 0);
8607 break;
8608
8609 default:
8610 gcc_unreachable ();
8611 }
8612 }
8613
8614 emit_jump_insn (gen_indirect_jump (scratch));
8615 }
8616
8617 emit_barrier ();
8618
8619 /* Run just enough of rest_of_compilation to get the insns emitted.
8620 There's not really enough bulk here to make other passes such as
8621 instruction scheduling worth while. Note that use_thunk calls
8622 assemble_start_function and assemble_end_function. */
8623 insn = get_insns ();
8624 insn_locators_initialize ();
8625 shorten_branches (insn);
8626 final_start_function (insn, file, 1);
8627 final (insn, file, 1);
8628 final_end_function ();
8629
8630 reload_completed = 0;
8631 epilogue_completed = 0;
8632 no_new_pseudos = 0;
8633 }
8634
8635 /* Return true if sparc_output_mi_thunk would be able to output the
8636 assembler code for the thunk function specified by the arguments
8637 it is passed, and false otherwise. */
8638 static bool
8639 sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8640 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8641 HOST_WIDE_INT vcall_offset,
8642 tree function ATTRIBUTE_UNUSED)
8643 {
8644 /* Bound the loop used in the default method above. */
8645 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8646 }
8647
8648 /* How to allocate a 'struct machine_function'. */
8649
8650 static struct machine_function *
8651 sparc_init_machine_status (void)
8652 {
8653 return ggc_alloc_cleared (sizeof (struct machine_function));
8654 }
8655
8656 /* Locate some local-dynamic symbol still in use by this function
8657 so that we can print its name in local-dynamic base patterns. */
8658
8659 static const char *
8660 get_some_local_dynamic_name (void)
8661 {
8662 rtx insn;
8663
8664 if (cfun->machine->some_ld_name)
8665 return cfun->machine->some_ld_name;
8666
8667 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8668 if (INSN_P (insn)
8669 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8670 return cfun->machine->some_ld_name;
8671
8672 gcc_unreachable ();
8673 }
8674
8675 static int
8676 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8677 {
8678 rtx x = *px;
8679
8680 if (x
8681 && GET_CODE (x) == SYMBOL_REF
8682 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8683 {
8684 cfun->machine->some_ld_name = XSTR (x, 0);
8685 return 1;
8686 }
8687
8688 return 0;
8689 }
8690
8691 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8692 This is called from dwarf2out.c to emit call frame instructions
8693 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8694 static void
8695 sparc_dwarf_handle_frame_unspec (const char *label,
8696 rtx pattern ATTRIBUTE_UNUSED,
8697 int index ATTRIBUTE_UNUSED)
8698 {
8699 gcc_assert (index == UNSPECV_SAVEW);
8700 dwarf2out_window_save (label);
8701 }
8702
8703 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8704 We need to emit DTP-relative relocations. */
8705
8706 static void
8707 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8708 {
8709 switch (size)
8710 {
8711 case 4:
8712 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8713 break;
8714 case 8:
8715 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8716 break;
8717 default:
8718 gcc_unreachable ();
8719 }
8720 output_addr_const (file, x);
8721 fputs (")", file);
8722 }
8723
8724 /* Do whatever processing is required at the end of a file. */
8725
8726 static void
8727 sparc_file_end (void)
8728 {
8729 /* If we haven't emitted the special PIC helper function, do so now. */
8730 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8731 emit_pic_helper ();
8732
8733 if (NEED_INDICATE_EXEC_STACK)
8734 file_end_indicate_exec_stack ();
8735 }
8736
8737 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8738 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
8739
8740 static const char *
8741 sparc_mangle_fundamental_type (tree type)
8742 {
8743 if (!TARGET_64BIT
8744 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8745 && TARGET_LONG_DOUBLE_128)
8746 return "g";
8747
8748 /* For all other types, use normal C++ mangling. */
8749 return NULL;
8750 }
8751 #endif
8752
8753 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8754 compare and swap on the word containing the byte or half-word. */
8755
8756 void
8757 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8758 {
8759 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8760 rtx addr = gen_reg_rtx (Pmode);
8761 rtx off = gen_reg_rtx (SImode);
8762 rtx oldv = gen_reg_rtx (SImode);
8763 rtx newv = gen_reg_rtx (SImode);
8764 rtx oldvalue = gen_reg_rtx (SImode);
8765 rtx newvalue = gen_reg_rtx (SImode);
8766 rtx res = gen_reg_rtx (SImode);
8767 rtx resv = gen_reg_rtx (SImode);
8768 rtx memsi, val, mask, end_label, loop_label, cc;
8769
8770 emit_insn (gen_rtx_SET (VOIDmode, addr,
8771 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8772
8773 if (Pmode != SImode)
8774 addr1 = gen_lowpart (SImode, addr1);
8775 emit_insn (gen_rtx_SET (VOIDmode, off,
8776 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8777
8778 memsi = gen_rtx_MEM (SImode, addr);
8779 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8780 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8781
8782 val = force_reg (SImode, memsi);
8783
8784 emit_insn (gen_rtx_SET (VOIDmode, off,
8785 gen_rtx_XOR (SImode, off,
8786 GEN_INT (GET_MODE (mem) == QImode
8787 ? 3 : 2))));
8788
8789 emit_insn (gen_rtx_SET (VOIDmode, off,
8790 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8791
8792 if (GET_MODE (mem) == QImode)
8793 mask = force_reg (SImode, GEN_INT (0xff));
8794 else
8795 mask = force_reg (SImode, GEN_INT (0xffff));
8796
8797 emit_insn (gen_rtx_SET (VOIDmode, mask,
8798 gen_rtx_ASHIFT (SImode, mask, off)));
8799
8800 emit_insn (gen_rtx_SET (VOIDmode, val,
8801 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8802 val)));
8803
8804 oldval = gen_lowpart (SImode, oldval);
8805 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8806 gen_rtx_ASHIFT (SImode, oldval, off)));
8807
8808 newval = gen_lowpart_common (SImode, newval);
8809 emit_insn (gen_rtx_SET (VOIDmode, newv,
8810 gen_rtx_ASHIFT (SImode, newval, off)));
8811
8812 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8813 gen_rtx_AND (SImode, oldv, mask)));
8814
8815 emit_insn (gen_rtx_SET (VOIDmode, newv,
8816 gen_rtx_AND (SImode, newv, mask)));
8817
8818 end_label = gen_label_rtx ();
8819 loop_label = gen_label_rtx ();
8820 emit_label (loop_label);
8821
8822 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
8823 gen_rtx_IOR (SImode, oldv, val)));
8824
8825 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
8826 gen_rtx_IOR (SImode, newv, val)));
8827
8828 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
8829
8830 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
8831
8832 emit_insn (gen_rtx_SET (VOIDmode, resv,
8833 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8834 res)));
8835
8836 sparc_compare_op0 = resv;
8837 sparc_compare_op1 = val;
8838 cc = gen_compare_reg (NE);
8839
8840 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
8841
8842 sparc_compare_emitted = cc;
8843 emit_jump_insn (gen_bne (loop_label));
8844
8845 emit_label (end_label);
8846
8847 emit_insn (gen_rtx_SET (VOIDmode, res,
8848 gen_rtx_AND (SImode, res, mask)));
8849
8850 emit_insn (gen_rtx_SET (VOIDmode, res,
8851 gen_rtx_LSHIFTRT (SImode, res, off)));
8852
8853 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
8854 }
8855
8856 #include "gt-sparc.h"