Fix the RTL of some sparc VIS patterns.
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
8 at Cygnus Support.
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
16
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "except.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "diagnostic-core.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "cfglayout.h"
53 #include "gimple.h"
54 #include "langhooks.h"
55 #include "reload.h"
56 #include "params.h"
57 #include "df.h"
58 #include "dwarf2out.h"
59 #include "opts.h"
60
61 /* Processor costs */
62 static const
63 struct processor_costs cypress_costs = {
64 COSTS_N_INSNS (2), /* int load */
65 COSTS_N_INSNS (2), /* int signed load */
66 COSTS_N_INSNS (2), /* int zeroed load */
67 COSTS_N_INSNS (2), /* float load */
68 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
69 COSTS_N_INSNS (5), /* fadd, fsub */
70 COSTS_N_INSNS (1), /* fcmp */
71 COSTS_N_INSNS (1), /* fmov, fmovr */
72 COSTS_N_INSNS (7), /* fmul */
73 COSTS_N_INSNS (37), /* fdivs */
74 COSTS_N_INSNS (37), /* fdivd */
75 COSTS_N_INSNS (63), /* fsqrts */
76 COSTS_N_INSNS (63), /* fsqrtd */
77 COSTS_N_INSNS (1), /* imul */
78 COSTS_N_INSNS (1), /* imulX */
79 0, /* imul bit factor */
80 COSTS_N_INSNS (1), /* idiv */
81 COSTS_N_INSNS (1), /* idivX */
82 COSTS_N_INSNS (1), /* movcc/movr */
83 0, /* shift penalty */
84 };
85
86 static const
87 struct processor_costs supersparc_costs = {
88 COSTS_N_INSNS (1), /* int load */
89 COSTS_N_INSNS (1), /* int signed load */
90 COSTS_N_INSNS (1), /* int zeroed load */
91 COSTS_N_INSNS (0), /* float load */
92 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
93 COSTS_N_INSNS (3), /* fadd, fsub */
94 COSTS_N_INSNS (3), /* fcmp */
95 COSTS_N_INSNS (1), /* fmov, fmovr */
96 COSTS_N_INSNS (3), /* fmul */
97 COSTS_N_INSNS (6), /* fdivs */
98 COSTS_N_INSNS (9), /* fdivd */
99 COSTS_N_INSNS (12), /* fsqrts */
100 COSTS_N_INSNS (12), /* fsqrtd */
101 COSTS_N_INSNS (4), /* imul */
102 COSTS_N_INSNS (4), /* imulX */
103 0, /* imul bit factor */
104 COSTS_N_INSNS (4), /* idiv */
105 COSTS_N_INSNS (4), /* idivX */
106 COSTS_N_INSNS (1), /* movcc/movr */
107 1, /* shift penalty */
108 };
109
110 static const
111 struct processor_costs hypersparc_costs = {
112 COSTS_N_INSNS (1), /* int load */
113 COSTS_N_INSNS (1), /* int signed load */
114 COSTS_N_INSNS (1), /* int zeroed load */
115 COSTS_N_INSNS (1), /* float load */
116 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
117 COSTS_N_INSNS (1), /* fadd, fsub */
118 COSTS_N_INSNS (1), /* fcmp */
119 COSTS_N_INSNS (1), /* fmov, fmovr */
120 COSTS_N_INSNS (1), /* fmul */
121 COSTS_N_INSNS (8), /* fdivs */
122 COSTS_N_INSNS (12), /* fdivd */
123 COSTS_N_INSNS (17), /* fsqrts */
124 COSTS_N_INSNS (17), /* fsqrtd */
125 COSTS_N_INSNS (17), /* imul */
126 COSTS_N_INSNS (17), /* imulX */
127 0, /* imul bit factor */
128 COSTS_N_INSNS (17), /* idiv */
129 COSTS_N_INSNS (17), /* idivX */
130 COSTS_N_INSNS (1), /* movcc/movr */
131 0, /* shift penalty */
132 };
133
134 static const
135 struct processor_costs leon_costs = {
136 COSTS_N_INSNS (1), /* int load */
137 COSTS_N_INSNS (1), /* int signed load */
138 COSTS_N_INSNS (1), /* int zeroed load */
139 COSTS_N_INSNS (1), /* float load */
140 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
141 COSTS_N_INSNS (1), /* fadd, fsub */
142 COSTS_N_INSNS (1), /* fcmp */
143 COSTS_N_INSNS (1), /* fmov, fmovr */
144 COSTS_N_INSNS (1), /* fmul */
145 COSTS_N_INSNS (15), /* fdivs */
146 COSTS_N_INSNS (15), /* fdivd */
147 COSTS_N_INSNS (23), /* fsqrts */
148 COSTS_N_INSNS (23), /* fsqrtd */
149 COSTS_N_INSNS (5), /* imul */
150 COSTS_N_INSNS (5), /* imulX */
151 0, /* imul bit factor */
152 COSTS_N_INSNS (5), /* idiv */
153 COSTS_N_INSNS (5), /* idivX */
154 COSTS_N_INSNS (1), /* movcc/movr */
155 0, /* shift penalty */
156 };
157
158 static const
159 struct processor_costs sparclet_costs = {
160 COSTS_N_INSNS (3), /* int load */
161 COSTS_N_INSNS (3), /* int signed load */
162 COSTS_N_INSNS (1), /* int zeroed load */
163 COSTS_N_INSNS (1), /* float load */
164 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
165 COSTS_N_INSNS (1), /* fadd, fsub */
166 COSTS_N_INSNS (1), /* fcmp */
167 COSTS_N_INSNS (1), /* fmov, fmovr */
168 COSTS_N_INSNS (1), /* fmul */
169 COSTS_N_INSNS (1), /* fdivs */
170 COSTS_N_INSNS (1), /* fdivd */
171 COSTS_N_INSNS (1), /* fsqrts */
172 COSTS_N_INSNS (1), /* fsqrtd */
173 COSTS_N_INSNS (5), /* imul */
174 COSTS_N_INSNS (5), /* imulX */
175 0, /* imul bit factor */
176 COSTS_N_INSNS (5), /* idiv */
177 COSTS_N_INSNS (5), /* idivX */
178 COSTS_N_INSNS (1), /* movcc/movr */
179 0, /* shift penalty */
180 };
181
182 static const
183 struct processor_costs ultrasparc_costs = {
184 COSTS_N_INSNS (2), /* int load */
185 COSTS_N_INSNS (3), /* int signed load */
186 COSTS_N_INSNS (2), /* int zeroed load */
187 COSTS_N_INSNS (2), /* float load */
188 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
189 COSTS_N_INSNS (4), /* fadd, fsub */
190 COSTS_N_INSNS (1), /* fcmp */
191 COSTS_N_INSNS (2), /* fmov, fmovr */
192 COSTS_N_INSNS (4), /* fmul */
193 COSTS_N_INSNS (13), /* fdivs */
194 COSTS_N_INSNS (23), /* fdivd */
195 COSTS_N_INSNS (13), /* fsqrts */
196 COSTS_N_INSNS (23), /* fsqrtd */
197 COSTS_N_INSNS (4), /* imul */
198 COSTS_N_INSNS (4), /* imulX */
199 2, /* imul bit factor */
200 COSTS_N_INSNS (37), /* idiv */
201 COSTS_N_INSNS (68), /* idivX */
202 COSTS_N_INSNS (2), /* movcc/movr */
203 2, /* shift penalty */
204 };
205
206 static const
207 struct processor_costs ultrasparc3_costs = {
208 COSTS_N_INSNS (2), /* int load */
209 COSTS_N_INSNS (3), /* int signed load */
210 COSTS_N_INSNS (3), /* int zeroed load */
211 COSTS_N_INSNS (2), /* float load */
212 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
213 COSTS_N_INSNS (4), /* fadd, fsub */
214 COSTS_N_INSNS (5), /* fcmp */
215 COSTS_N_INSNS (3), /* fmov, fmovr */
216 COSTS_N_INSNS (4), /* fmul */
217 COSTS_N_INSNS (17), /* fdivs */
218 COSTS_N_INSNS (20), /* fdivd */
219 COSTS_N_INSNS (20), /* fsqrts */
220 COSTS_N_INSNS (29), /* fsqrtd */
221 COSTS_N_INSNS (6), /* imul */
222 COSTS_N_INSNS (6), /* imulX */
223 0, /* imul bit factor */
224 COSTS_N_INSNS (40), /* idiv */
225 COSTS_N_INSNS (71), /* idivX */
226 COSTS_N_INSNS (2), /* movcc/movr */
227 0, /* shift penalty */
228 };
229
230 static const
231 struct processor_costs niagara_costs = {
232 COSTS_N_INSNS (3), /* int load */
233 COSTS_N_INSNS (3), /* int signed load */
234 COSTS_N_INSNS (3), /* int zeroed load */
235 COSTS_N_INSNS (9), /* float load */
236 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
237 COSTS_N_INSNS (8), /* fadd, fsub */
238 COSTS_N_INSNS (26), /* fcmp */
239 COSTS_N_INSNS (8), /* fmov, fmovr */
240 COSTS_N_INSNS (29), /* fmul */
241 COSTS_N_INSNS (54), /* fdivs */
242 COSTS_N_INSNS (83), /* fdivd */
243 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
244 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
245 COSTS_N_INSNS (11), /* imul */
246 COSTS_N_INSNS (11), /* imulX */
247 0, /* imul bit factor */
248 COSTS_N_INSNS (72), /* idiv */
249 COSTS_N_INSNS (72), /* idivX */
250 COSTS_N_INSNS (1), /* movcc/movr */
251 0, /* shift penalty */
252 };
253
254 static const
255 struct processor_costs niagara2_costs = {
256 COSTS_N_INSNS (3), /* int load */
257 COSTS_N_INSNS (3), /* int signed load */
258 COSTS_N_INSNS (3), /* int zeroed load */
259 COSTS_N_INSNS (3), /* float load */
260 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
261 COSTS_N_INSNS (6), /* fadd, fsub */
262 COSTS_N_INSNS (6), /* fcmp */
263 COSTS_N_INSNS (6), /* fmov, fmovr */
264 COSTS_N_INSNS (6), /* fmul */
265 COSTS_N_INSNS (19), /* fdivs */
266 COSTS_N_INSNS (33), /* fdivd */
267 COSTS_N_INSNS (19), /* fsqrts */
268 COSTS_N_INSNS (33), /* fsqrtd */
269 COSTS_N_INSNS (5), /* imul */
270 COSTS_N_INSNS (5), /* imulX */
271 0, /* imul bit factor */
272 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
273 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
274 COSTS_N_INSNS (1), /* movcc/movr */
275 0, /* shift penalty */
276 };
277
278 static const
279 struct processor_costs niagara3_costs = {
280 COSTS_N_INSNS (3), /* int load */
281 COSTS_N_INSNS (3), /* int signed load */
282 COSTS_N_INSNS (3), /* int zeroed load */
283 COSTS_N_INSNS (3), /* float load */
284 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
285 COSTS_N_INSNS (9), /* fadd, fsub */
286 COSTS_N_INSNS (9), /* fcmp */
287 COSTS_N_INSNS (9), /* fmov, fmovr */
288 COSTS_N_INSNS (9), /* fmul */
289 COSTS_N_INSNS (23), /* fdivs */
290 COSTS_N_INSNS (37), /* fdivd */
291 COSTS_N_INSNS (23), /* fsqrts */
292 COSTS_N_INSNS (37), /* fsqrtd */
293 COSTS_N_INSNS (9), /* imul */
294 COSTS_N_INSNS (9), /* imulX */
295 0, /* imul bit factor */
296 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
297 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
298 COSTS_N_INSNS (1), /* movcc/movr */
299 0, /* shift penalty */
300 };
301
302 const struct processor_costs *sparc_costs = &cypress_costs;
303
304 #ifdef HAVE_AS_RELAX_OPTION
305 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
306 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
307 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
308 somebody does not branch between the sethi and jmp. */
309 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
310 #else
311 #define LEAF_SIBCALL_SLOT_RESERVED_P \
312 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
313 #endif
314
315 /* Vector to say how input registers are mapped to output registers.
316 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
317 eliminate it. You must use -fomit-frame-pointer to get that. */
318 char leaf_reg_remap[] =
319 { 0, 1, 2, 3, 4, 5, 6, 7,
320 -1, -1, -1, -1, -1, -1, 14, -1,
321 -1, -1, -1, -1, -1, -1, -1, -1,
322 8, 9, 10, 11, 12, 13, -1, 15,
323
324 32, 33, 34, 35, 36, 37, 38, 39,
325 40, 41, 42, 43, 44, 45, 46, 47,
326 48, 49, 50, 51, 52, 53, 54, 55,
327 56, 57, 58, 59, 60, 61, 62, 63,
328 64, 65, 66, 67, 68, 69, 70, 71,
329 72, 73, 74, 75, 76, 77, 78, 79,
330 80, 81, 82, 83, 84, 85, 86, 87,
331 88, 89, 90, 91, 92, 93, 94, 95,
332 96, 97, 98, 99, 100, 101, 102};
333
334 /* Vector, indexed by hard register number, which contains 1
335 for a register that is allowable in a candidate for leaf
336 function treatment. */
337 char sparc_leaf_regs[] =
338 { 1, 1, 1, 1, 1, 1, 1, 1,
339 0, 0, 0, 0, 0, 0, 1, 0,
340 0, 0, 0, 0, 0, 0, 0, 0,
341 1, 1, 1, 1, 1, 1, 0, 1,
342 1, 1, 1, 1, 1, 1, 1, 1,
343 1, 1, 1, 1, 1, 1, 1, 1,
344 1, 1, 1, 1, 1, 1, 1, 1,
345 1, 1, 1, 1, 1, 1, 1, 1,
346 1, 1, 1, 1, 1, 1, 1, 1,
347 1, 1, 1, 1, 1, 1, 1, 1,
348 1, 1, 1, 1, 1, 1, 1, 1,
349 1, 1, 1, 1, 1, 1, 1, 1,
350 1, 1, 1, 1, 1, 1, 1};
351
352 struct GTY(()) machine_function
353 {
354 /* Size of the frame of the function. */
355 HOST_WIDE_INT frame_size;
356
357 /* Size of the frame of the function minus the register window save area
358 and the outgoing argument area. */
359 HOST_WIDE_INT apparent_frame_size;
360
361 /* Register we pretend the frame pointer is allocated to. Normally, this
362 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
363 record "offset" separately as it may be too big for (reg + disp). */
364 rtx frame_base_reg;
365 HOST_WIDE_INT frame_base_offset;
366
367 /* Some local-dynamic TLS symbol name. */
368 const char *some_ld_name;
369
370 /* Number of global or FP registers to be saved (as 4-byte quantities). */
371 int n_global_fp_regs;
372
373 /* True if the current function is leaf and uses only leaf regs,
374 so that the SPARC leaf function optimization can be applied.
375 Private version of current_function_uses_only_leaf_regs, see
376 sparc_expand_prologue for the rationale. */
377 int leaf_function_p;
378
379 /* True if the prologue saves local or in registers. */
380 bool save_local_in_regs_p;
381
382 /* True if the data calculated by sparc_expand_prologue are valid. */
383 bool prologue_data_valid_p;
384 };
385
386 #define sparc_frame_size cfun->machine->frame_size
387 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
388 #define sparc_frame_base_reg cfun->machine->frame_base_reg
389 #define sparc_frame_base_offset cfun->machine->frame_base_offset
390 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
391 #define sparc_leaf_function_p cfun->machine->leaf_function_p
392 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
393 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
394
395 /* 1 if the next opcode is to be specially indented. */
396 int sparc_indent_opcode = 0;
397
398 static void sparc_option_override (void);
399 static void sparc_init_modes (void);
400 static void scan_record_type (const_tree, int *, int *, int *);
401 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
402 const_tree, bool, bool, int *, int *);
403
404 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
405 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
406
407 static void sparc_emit_set_const32 (rtx, rtx);
408 static void sparc_emit_set_const64 (rtx, rtx);
409 static void sparc_output_addr_vec (rtx);
410 static void sparc_output_addr_diff_vec (rtx);
411 static void sparc_output_deferred_case_vectors (void);
412 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
413 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
414 static rtx sparc_builtin_saveregs (void);
415 static int epilogue_renumber (rtx *, int);
416 static bool sparc_assemble_integer (rtx, unsigned int, int);
417 static int set_extends (rtx);
418 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
419 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
420 #ifdef TARGET_SOLARIS
421 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
422 tree) ATTRIBUTE_UNUSED;
423 #endif
424 static int sparc_adjust_cost (rtx, rtx, rtx, int);
425 static int sparc_issue_rate (void);
426 static void sparc_sched_init (FILE *, int, int);
427 static int sparc_use_sched_lookahead (void);
428
429 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
430 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
431 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
432 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
433 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
434
435 static bool sparc_function_ok_for_sibcall (tree, tree);
436 static void sparc_init_libfuncs (void);
437 static void sparc_init_builtins (void);
438 static void sparc_vis_init_builtins (void);
439 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
440 static tree sparc_fold_builtin (tree, int, tree *, bool);
441 static int sparc_vis_mul8x16 (int, int);
442 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
443 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
444 HOST_WIDE_INT, tree);
445 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
446 HOST_WIDE_INT, const_tree);
447 static void sparc_reorg (void);
448 static struct machine_function * sparc_init_machine_status (void);
449 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
450 static rtx sparc_tls_get_addr (void);
451 static rtx sparc_tls_got (void);
452 static const char *get_some_local_dynamic_name (void);
453 static int get_some_local_dynamic_name_1 (rtx *, void *);
454 static int sparc_register_move_cost (enum machine_mode,
455 reg_class_t, reg_class_t);
456 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
457 static rtx sparc_function_value (const_tree, const_tree, bool);
458 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
459 static bool sparc_function_value_regno_p (const unsigned int);
460 static rtx sparc_struct_value_rtx (tree, int);
461 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
462 int *, const_tree, int);
463 static bool sparc_return_in_memory (const_tree, const_tree);
464 static bool sparc_strict_argument_naming (cumulative_args_t);
465 static void sparc_va_start (tree, rtx);
466 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
467 static bool sparc_vector_mode_supported_p (enum machine_mode);
468 static bool sparc_tls_referenced_p (rtx);
469 static rtx sparc_legitimize_tls_address (rtx);
470 static rtx sparc_legitimize_pic_address (rtx, rtx);
471 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
472 static rtx sparc_delegitimize_address (rtx);
473 static bool sparc_mode_dependent_address_p (const_rtx);
474 static bool sparc_pass_by_reference (cumulative_args_t,
475 enum machine_mode, const_tree, bool);
476 static void sparc_function_arg_advance (cumulative_args_t,
477 enum machine_mode, const_tree, bool);
478 static rtx sparc_function_arg_1 (cumulative_args_t,
479 enum machine_mode, const_tree, bool, bool);
480 static rtx sparc_function_arg (cumulative_args_t,
481 enum machine_mode, const_tree, bool);
482 static rtx sparc_function_incoming_arg (cumulative_args_t,
483 enum machine_mode, const_tree, bool);
484 static unsigned int sparc_function_arg_boundary (enum machine_mode,
485 const_tree);
486 static int sparc_arg_partial_bytes (cumulative_args_t,
487 enum machine_mode, tree, bool);
488 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
489 static void sparc_file_end (void);
490 static bool sparc_frame_pointer_required (void);
491 static bool sparc_can_eliminate (const int, const int);
492 static rtx sparc_builtin_setjmp_frame_value (void);
493 static void sparc_conditional_register_usage (void);
494 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
495 static const char *sparc_mangle_type (const_tree);
496 #endif
497 static void sparc_trampoline_init (rtx, tree, rtx);
498 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
499 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
500 static bool sparc_print_operand_punct_valid_p (unsigned char);
501 static void sparc_print_operand (FILE *, rtx, int);
502 static void sparc_print_operand_address (FILE *, rtx);
503 \f
504 #ifdef SUBTARGET_ATTRIBUTE_TABLE
505 /* Table of valid machine attributes. */
506 static const struct attribute_spec sparc_attribute_table[] =
507 {
508 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
509 do_diagnostic } */
510 SUBTARGET_ATTRIBUTE_TABLE,
511 { NULL, 0, 0, false, false, false, NULL, false }
512 };
513 #endif
514 \f
515 /* Option handling. */
516
517 /* Parsed value. */
518 enum cmodel sparc_cmodel;
519
520 char sparc_hard_reg_printed[8];
521
522 /* Initialize the GCC target structure. */
523
524 /* The default is to use .half rather than .short for aligned HI objects. */
525 #undef TARGET_ASM_ALIGNED_HI_OP
526 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
527
528 #undef TARGET_ASM_UNALIGNED_HI_OP
529 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
530 #undef TARGET_ASM_UNALIGNED_SI_OP
531 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
532 #undef TARGET_ASM_UNALIGNED_DI_OP
533 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
534
535 /* The target hook has to handle DI-mode values. */
536 #undef TARGET_ASM_INTEGER
537 #define TARGET_ASM_INTEGER sparc_assemble_integer
538
539 #undef TARGET_ASM_FUNCTION_PROLOGUE
540 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
541 #undef TARGET_ASM_FUNCTION_EPILOGUE
542 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
543
544 #undef TARGET_SCHED_ADJUST_COST
545 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
546 #undef TARGET_SCHED_ISSUE_RATE
547 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
548 #undef TARGET_SCHED_INIT
549 #define TARGET_SCHED_INIT sparc_sched_init
550 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
551 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
552
553 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
554 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
555
556 #undef TARGET_INIT_LIBFUNCS
557 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
558 #undef TARGET_INIT_BUILTINS
559 #define TARGET_INIT_BUILTINS sparc_init_builtins
560
561 #undef TARGET_LEGITIMIZE_ADDRESS
562 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
563 #undef TARGET_DELEGITIMIZE_ADDRESS
564 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
565 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
566 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
567
568 #undef TARGET_EXPAND_BUILTIN
569 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
570 #undef TARGET_FOLD_BUILTIN
571 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
572
573 #if TARGET_TLS
574 #undef TARGET_HAVE_TLS
575 #define TARGET_HAVE_TLS true
576 #endif
577
578 #undef TARGET_CANNOT_FORCE_CONST_MEM
579 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
580
581 #undef TARGET_ASM_OUTPUT_MI_THUNK
582 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
583 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
584 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
585
586 #undef TARGET_MACHINE_DEPENDENT_REORG
587 #define TARGET_MACHINE_DEPENDENT_REORG sparc_reorg
588
589 #undef TARGET_RTX_COSTS
590 #define TARGET_RTX_COSTS sparc_rtx_costs
591 #undef TARGET_ADDRESS_COST
592 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
593 #undef TARGET_REGISTER_MOVE_COST
594 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
595
596 #undef TARGET_PROMOTE_FUNCTION_MODE
597 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
598
599 #undef TARGET_FUNCTION_VALUE
600 #define TARGET_FUNCTION_VALUE sparc_function_value
601 #undef TARGET_LIBCALL_VALUE
602 #define TARGET_LIBCALL_VALUE sparc_libcall_value
603 #undef TARGET_FUNCTION_VALUE_REGNO_P
604 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
605
606 #undef TARGET_STRUCT_VALUE_RTX
607 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
608 #undef TARGET_RETURN_IN_MEMORY
609 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
610 #undef TARGET_MUST_PASS_IN_STACK
611 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
612 #undef TARGET_PASS_BY_REFERENCE
613 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
614 #undef TARGET_ARG_PARTIAL_BYTES
615 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
616 #undef TARGET_FUNCTION_ARG_ADVANCE
617 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
618 #undef TARGET_FUNCTION_ARG
619 #define TARGET_FUNCTION_ARG sparc_function_arg
620 #undef TARGET_FUNCTION_INCOMING_ARG
621 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
622 #undef TARGET_FUNCTION_ARG_BOUNDARY
623 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
624
625 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
626 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
627 #undef TARGET_STRICT_ARGUMENT_NAMING
628 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
629
630 #undef TARGET_EXPAND_BUILTIN_VA_START
631 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
632 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
633 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
634
635 #undef TARGET_VECTOR_MODE_SUPPORTED_P
636 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
637
638 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
639 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
640
641 #ifdef SUBTARGET_INSERT_ATTRIBUTES
642 #undef TARGET_INSERT_ATTRIBUTES
643 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
644 #endif
645
646 #ifdef SUBTARGET_ATTRIBUTE_TABLE
647 #undef TARGET_ATTRIBUTE_TABLE
648 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
649 #endif
650
651 #undef TARGET_RELAXED_ORDERING
652 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
653
654 #undef TARGET_OPTION_OVERRIDE
655 #define TARGET_OPTION_OVERRIDE sparc_option_override
656
657 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
658 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
659 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
660 #endif
661
662 #undef TARGET_ASM_FILE_END
663 #define TARGET_ASM_FILE_END sparc_file_end
664
665 #undef TARGET_FRAME_POINTER_REQUIRED
666 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
667
668 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
669 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
670
671 #undef TARGET_CAN_ELIMINATE
672 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
673
674 #undef TARGET_PREFERRED_RELOAD_CLASS
675 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
676
677 #undef TARGET_CONDITIONAL_REGISTER_USAGE
678 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
679
680 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
681 #undef TARGET_MANGLE_TYPE
682 #define TARGET_MANGLE_TYPE sparc_mangle_type
683 #endif
684
685 #undef TARGET_LEGITIMATE_ADDRESS_P
686 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
687
688 #undef TARGET_LEGITIMATE_CONSTANT_P
689 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
690
691 #undef TARGET_TRAMPOLINE_INIT
692 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
693
694 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
695 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
696 #undef TARGET_PRINT_OPERAND
697 #define TARGET_PRINT_OPERAND sparc_print_operand
698 #undef TARGET_PRINT_OPERAND_ADDRESS
699 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
700
701 struct gcc_target targetm = TARGET_INITIALIZER;
702
703 /* Validate and override various options, and do some machine dependent
704 initialization. */
705
706 static void
707 sparc_option_override (void)
708 {
709 static struct code_model {
710 const char *const name;
711 const enum cmodel value;
712 } const cmodels[] = {
713 { "32", CM_32 },
714 { "medlow", CM_MEDLOW },
715 { "medmid", CM_MEDMID },
716 { "medany", CM_MEDANY },
717 { "embmedany", CM_EMBMEDANY },
718 { NULL, (enum cmodel) 0 }
719 };
720 const struct code_model *cmodel;
721 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
722 static struct cpu_default {
723 const int cpu;
724 const enum processor_type processor;
725 } const cpu_default[] = {
726 /* There must be one entry here for each TARGET_CPU value. */
727 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
728 { TARGET_CPU_v8, PROCESSOR_V8 },
729 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
730 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
731 { TARGET_CPU_leon, PROCESSOR_LEON },
732 { TARGET_CPU_sparclite, PROCESSOR_F930 },
733 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
734 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
735 { TARGET_CPU_v9, PROCESSOR_V9 },
736 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
737 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
738 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
739 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
740 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
741 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
742 { -1, PROCESSOR_V7 }
743 };
744 const struct cpu_default *def;
745 /* Table of values for -m{cpu,tune}=. This must match the order of
746 the PROCESSOR_* enumeration. */
747 static struct cpu_table {
748 const int disable;
749 const int enable;
750 } const cpu_table[] = {
751 { MASK_ISA, 0 },
752 { MASK_ISA, 0 },
753 { MASK_ISA, MASK_V8 },
754 /* TI TMS390Z55 supersparc */
755 { MASK_ISA, MASK_V8 },
756 { MASK_ISA, MASK_V8|MASK_FPU },
757 /* LEON */
758 { MASK_ISA, MASK_V8|MASK_FPU },
759 { MASK_ISA, MASK_SPARCLITE },
760 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
761 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
762 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
763 { MASK_ISA, MASK_SPARCLITE|MASK_FPU },
764 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
765 { MASK_ISA, MASK_SPARCLET },
766 /* TEMIC sparclet */
767 { MASK_ISA, MASK_SPARCLET },
768 { MASK_ISA, MASK_V9 },
769 /* UltraSPARC I, II, IIi */
770 { MASK_ISA,
771 /* Although insns using %y are deprecated, it is a clear win. */
772 MASK_V9|MASK_DEPRECATED_V8_INSNS},
773 /* UltraSPARC III */
774 /* ??? Check if %y issue still holds true. */
775 { MASK_ISA,
776 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2},
777 /* UltraSPARC T1 */
778 { MASK_ISA,
779 MASK_V9|MASK_DEPRECATED_V8_INSNS},
780 /* UltraSPARC T2 */
781 { MASK_ISA, MASK_V9|MASK_POPC|MASK_VIS2},
782 /* UltraSPARC T3 */
783 { MASK_ISA, MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF},
784 /* UltraSPARC T4 */
785 { MASK_ISA, MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF},
786 };
787 const struct cpu_table *cpu;
788 unsigned int i;
789 int fpu;
790
791 #ifdef SUBTARGET_OVERRIDE_OPTIONS
792 SUBTARGET_OVERRIDE_OPTIONS;
793 #endif
794
795 #ifndef SPARC_BI_ARCH
796 /* Check for unsupported architecture size. */
797 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
798 error ("%s is not supported by this configuration",
799 DEFAULT_ARCH32_P ? "-m64" : "-m32");
800 #endif
801
802 /* We force all 64bit archs to use 128 bit long double */
803 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
804 {
805 error ("-mlong-double-64 not allowed with -m64");
806 target_flags |= MASK_LONG_DOUBLE_128;
807 }
808
809 /* Code model selection. */
810 sparc_cmodel = SPARC_DEFAULT_CMODEL;
811
812 #ifdef SPARC_BI_ARCH
813 if (TARGET_ARCH32)
814 sparc_cmodel = CM_32;
815 #endif
816
817 if (sparc_cmodel_string != NULL)
818 {
819 if (TARGET_ARCH64)
820 {
821 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
822 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
823 break;
824 if (cmodel->name == NULL)
825 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
826 else
827 sparc_cmodel = cmodel->value;
828 }
829 else
830 error ("-mcmodel= is not supported on 32 bit systems");
831 }
832
833 /* Check that -fcall-saved-REG wasn't specified for out registers. */
834 for (i = 8; i < 16; i++)
835 if (!call_used_regs [i])
836 {
837 error ("-fcall-saved-REG is not supported for out registers");
838 call_used_regs [i] = 1;
839 }
840
841 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
842
843 /* Set the default CPU. */
844 if (!global_options_set.x_sparc_cpu_and_features)
845 {
846 for (def = &cpu_default[0]; def->cpu != -1; ++def)
847 if (def->cpu == TARGET_CPU_DEFAULT)
848 break;
849 gcc_assert (def->cpu != -1);
850 sparc_cpu_and_features = def->processor;
851 }
852 if (!global_options_set.x_sparc_cpu)
853 sparc_cpu = sparc_cpu_and_features;
854
855 cpu = &cpu_table[(int) sparc_cpu_and_features];
856 target_flags &= ~cpu->disable;
857 target_flags |= (cpu->enable
858 #ifndef HAVE_AS_FMAF_HPC_VIS3
859 & ~(MASK_FMAF | MASK_VIS3)
860 #endif
861 );
862
863 /* If -mfpu or -mno-fpu was explicitly used, don't override with
864 the processor default. */
865 if (target_flags_explicit & MASK_FPU)
866 target_flags = (target_flags & ~MASK_FPU) | fpu;
867
868 /* -mvis2 implies -mvis */
869 if (TARGET_VIS2)
870 target_flags |= MASK_VIS;
871
872 /* -mvis3 implies -mvis2 and -mvis */
873 if (TARGET_VIS3)
874 target_flags |= MASK_VIS2 | MASK_VIS;
875
876 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is disabled. */
877 if (! TARGET_FPU)
878 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_FMAF);
879
880 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
881 are available.
882 -m64 also implies v9. */
883 if (TARGET_VIS || TARGET_ARCH64)
884 {
885 target_flags |= MASK_V9;
886 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
887 }
888
889 /* -mvis also implies -mv8plus on 32-bit */
890 if (TARGET_VIS && ! TARGET_ARCH64)
891 target_flags |= MASK_V8PLUS;
892
893 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
894 if (TARGET_V9 && TARGET_ARCH32)
895 target_flags |= MASK_DEPRECATED_V8_INSNS;
896
897 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
898 if (! TARGET_V9 || TARGET_ARCH64)
899 target_flags &= ~MASK_V8PLUS;
900
901 /* Don't use stack biasing in 32 bit mode. */
902 if (TARGET_ARCH32)
903 target_flags &= ~MASK_STACK_BIAS;
904
905 /* Supply a default value for align_functions. */
906 if (align_functions == 0
907 && (sparc_cpu == PROCESSOR_ULTRASPARC
908 || sparc_cpu == PROCESSOR_ULTRASPARC3
909 || sparc_cpu == PROCESSOR_NIAGARA
910 || sparc_cpu == PROCESSOR_NIAGARA2
911 || sparc_cpu == PROCESSOR_NIAGARA3
912 || sparc_cpu == PROCESSOR_NIAGARA4))
913 align_functions = 32;
914
915 /* Validate PCC_STRUCT_RETURN. */
916 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
917 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
918
919 /* Only use .uaxword when compiling for a 64-bit target. */
920 if (!TARGET_ARCH64)
921 targetm.asm_out.unaligned_op.di = NULL;
922
923 /* Do various machine dependent initializations. */
924 sparc_init_modes ();
925
926 /* Set up function hooks. */
927 init_machine_status = sparc_init_machine_status;
928
929 switch (sparc_cpu)
930 {
931 case PROCESSOR_V7:
932 case PROCESSOR_CYPRESS:
933 sparc_costs = &cypress_costs;
934 break;
935 case PROCESSOR_V8:
936 case PROCESSOR_SPARCLITE:
937 case PROCESSOR_SUPERSPARC:
938 sparc_costs = &supersparc_costs;
939 break;
940 case PROCESSOR_F930:
941 case PROCESSOR_F934:
942 case PROCESSOR_HYPERSPARC:
943 case PROCESSOR_SPARCLITE86X:
944 sparc_costs = &hypersparc_costs;
945 break;
946 case PROCESSOR_LEON:
947 sparc_costs = &leon_costs;
948 break;
949 case PROCESSOR_SPARCLET:
950 case PROCESSOR_TSC701:
951 sparc_costs = &sparclet_costs;
952 break;
953 case PROCESSOR_V9:
954 case PROCESSOR_ULTRASPARC:
955 sparc_costs = &ultrasparc_costs;
956 break;
957 case PROCESSOR_ULTRASPARC3:
958 sparc_costs = &ultrasparc3_costs;
959 break;
960 case PROCESSOR_NIAGARA:
961 sparc_costs = &niagara_costs;
962 break;
963 case PROCESSOR_NIAGARA2:
964 sparc_costs = &niagara2_costs;
965 break;
966 case PROCESSOR_NIAGARA3:
967 case PROCESSOR_NIAGARA4:
968 sparc_costs = &niagara3_costs;
969 break;
970 case PROCESSOR_NATIVE:
971 gcc_unreachable ();
972 };
973
974 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
975 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
976 target_flags |= MASK_LONG_DOUBLE_128;
977 #endif
978
979 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
980 ((sparc_cpu == PROCESSOR_ULTRASPARC
981 || sparc_cpu == PROCESSOR_NIAGARA
982 || sparc_cpu == PROCESSOR_NIAGARA2
983 || sparc_cpu == PROCESSOR_NIAGARA3
984 || sparc_cpu == PROCESSOR_NIAGARA4)
985 ? 2
986 : (sparc_cpu == PROCESSOR_ULTRASPARC3
987 ? 8 : 3)),
988 global_options.x_param_values,
989 global_options_set.x_param_values);
990 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
991 ((sparc_cpu == PROCESSOR_ULTRASPARC
992 || sparc_cpu == PROCESSOR_ULTRASPARC3
993 || sparc_cpu == PROCESSOR_NIAGARA
994 || sparc_cpu == PROCESSOR_NIAGARA2
995 || sparc_cpu == PROCESSOR_NIAGARA3
996 || sparc_cpu == PROCESSOR_NIAGARA4)
997 ? 64 : 32),
998 global_options.x_param_values,
999 global_options_set.x_param_values);
1000
1001 /* Disable save slot sharing for call-clobbered registers by default.
1002 The IRA sharing algorithm works on single registers only and this
1003 pessimizes for double floating-point registers. */
1004 if (!global_options_set.x_flag_ira_share_save_slots)
1005 flag_ira_share_save_slots = 0;
1006 }
1007 \f
1008 /* Miscellaneous utilities. */
1009
1010 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1011 or branch on register contents instructions. */
1012
1013 int
1014 v9_regcmp_p (enum rtx_code code)
1015 {
1016 return (code == EQ || code == NE || code == GE || code == LT
1017 || code == LE || code == GT);
1018 }
1019
1020 /* Nonzero if OP is a floating point constant which can
1021 be loaded into an integer register using a single
1022 sethi instruction. */
1023
1024 int
1025 fp_sethi_p (rtx op)
1026 {
1027 if (GET_CODE (op) == CONST_DOUBLE)
1028 {
1029 REAL_VALUE_TYPE r;
1030 long i;
1031
1032 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1033 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1034 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1035 }
1036
1037 return 0;
1038 }
1039
1040 /* Nonzero if OP is a floating point constant which can
1041 be loaded into an integer register using a single
1042 mov instruction. */
1043
1044 int
1045 fp_mov_p (rtx op)
1046 {
1047 if (GET_CODE (op) == CONST_DOUBLE)
1048 {
1049 REAL_VALUE_TYPE r;
1050 long i;
1051
1052 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1053 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1054 return SPARC_SIMM13_P (i);
1055 }
1056
1057 return 0;
1058 }
1059
1060 /* Nonzero if OP is a floating point constant which can
1061 be loaded into an integer register using a high/losum
1062 instruction sequence. */
1063
1064 int
1065 fp_high_losum_p (rtx op)
1066 {
1067 /* The constraints calling this should only be in
1068 SFmode move insns, so any constant which cannot
1069 be moved using a single insn will do. */
1070 if (GET_CODE (op) == CONST_DOUBLE)
1071 {
1072 REAL_VALUE_TYPE r;
1073 long i;
1074
1075 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1076 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1077 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1078 }
1079
1080 return 0;
1081 }
1082
1083 /* Return true if the address of LABEL can be loaded by means of the
1084 mov{si,di}_pic_label_ref patterns in PIC mode. */
1085
1086 static bool
1087 can_use_mov_pic_label_ref (rtx label)
1088 {
1089 /* VxWorks does not impose a fixed gap between segments; the run-time
1090 gap can be different from the object-file gap. We therefore can't
1091 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1092 are absolutely sure that X is in the same segment as the GOT.
1093 Unfortunately, the flexibility of linker scripts means that we
1094 can't be sure of that in general, so assume that GOT-relative
1095 accesses are never valid on VxWorks. */
1096 if (TARGET_VXWORKS_RTP)
1097 return false;
1098
1099 /* Similarly, if the label is non-local, it might end up being placed
1100 in a different section than the current one; now mov_pic_label_ref
1101 requires the label and the code to be in the same section. */
1102 if (LABEL_REF_NONLOCAL_P (label))
1103 return false;
1104
1105 /* Finally, if we are reordering basic blocks and partition into hot
1106 and cold sections, this might happen for any label. */
1107 if (flag_reorder_blocks_and_partition)
1108 return false;
1109
1110 return true;
1111 }
1112
1113 /* Expand a move instruction. Return true if all work is done. */
1114
1115 bool
1116 sparc_expand_move (enum machine_mode mode, rtx *operands)
1117 {
1118 /* Handle sets of MEM first. */
1119 if (GET_CODE (operands[0]) == MEM)
1120 {
1121 /* 0 is a register (or a pair of registers) on SPARC. */
1122 if (register_or_zero_operand (operands[1], mode))
1123 return false;
1124
1125 if (!reload_in_progress)
1126 {
1127 operands[0] = validize_mem (operands[0]);
1128 operands[1] = force_reg (mode, operands[1]);
1129 }
1130 }
1131
1132 /* Fixup TLS cases. */
1133 if (TARGET_HAVE_TLS
1134 && CONSTANT_P (operands[1])
1135 && sparc_tls_referenced_p (operands [1]))
1136 {
1137 operands[1] = sparc_legitimize_tls_address (operands[1]);
1138 return false;
1139 }
1140
1141 /* Fixup PIC cases. */
1142 if (flag_pic && CONSTANT_P (operands[1]))
1143 {
1144 if (pic_address_needs_scratch (operands[1]))
1145 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1146
1147 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1148 if (GET_CODE (operands[1]) == LABEL_REF
1149 && can_use_mov_pic_label_ref (operands[1]))
1150 {
1151 if (mode == SImode)
1152 {
1153 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1154 return true;
1155 }
1156
1157 if (mode == DImode)
1158 {
1159 gcc_assert (TARGET_ARCH64);
1160 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1161 return true;
1162 }
1163 }
1164
1165 if (symbolic_operand (operands[1], mode))
1166 {
1167 operands[1]
1168 = sparc_legitimize_pic_address (operands[1],
1169 reload_in_progress
1170 ? operands[0] : NULL_RTX);
1171 return false;
1172 }
1173 }
1174
1175 /* If we are trying to toss an integer constant into FP registers,
1176 or loading a FP or vector constant, force it into memory. */
1177 if (CONSTANT_P (operands[1])
1178 && REG_P (operands[0])
1179 && (SPARC_FP_REG_P (REGNO (operands[0]))
1180 || SCALAR_FLOAT_MODE_P (mode)
1181 || VECTOR_MODE_P (mode)))
1182 {
1183 /* emit_group_store will send such bogosity to us when it is
1184 not storing directly into memory. So fix this up to avoid
1185 crashes in output_constant_pool. */
1186 if (operands [1] == const0_rtx)
1187 operands[1] = CONST0_RTX (mode);
1188
1189 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1190 always other regs. */
1191 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1192 && (const_zero_operand (operands[1], mode)
1193 || const_all_ones_operand (operands[1], mode)))
1194 return false;
1195
1196 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1197 /* We are able to build any SF constant in integer registers
1198 with at most 2 instructions. */
1199 && (mode == SFmode
1200 /* And any DF constant in integer registers. */
1201 || (mode == DFmode
1202 && (reload_completed || reload_in_progress))))
1203 return false;
1204
1205 operands[1] = force_const_mem (mode, operands[1]);
1206 if (!reload_in_progress)
1207 operands[1] = validize_mem (operands[1]);
1208 return false;
1209 }
1210
1211 /* Accept non-constants and valid constants unmodified. */
1212 if (!CONSTANT_P (operands[1])
1213 || GET_CODE (operands[1]) == HIGH
1214 || input_operand (operands[1], mode))
1215 return false;
1216
1217 switch (mode)
1218 {
1219 case QImode:
1220 /* All QImode constants require only one insn, so proceed. */
1221 break;
1222
1223 case HImode:
1224 case SImode:
1225 sparc_emit_set_const32 (operands[0], operands[1]);
1226 return true;
1227
1228 case DImode:
1229 /* input_operand should have filtered out 32-bit mode. */
1230 sparc_emit_set_const64 (operands[0], operands[1]);
1231 return true;
1232
1233 default:
1234 gcc_unreachable ();
1235 }
1236
1237 return false;
1238 }
1239
1240 /* Load OP1, a 32-bit constant, into OP0, a register.
1241 We know it can't be done in one insn when we get
1242 here, the move expander guarantees this. */
1243
1244 static void
1245 sparc_emit_set_const32 (rtx op0, rtx op1)
1246 {
1247 enum machine_mode mode = GET_MODE (op0);
1248 rtx temp;
1249
1250 if (reload_in_progress || reload_completed)
1251 temp = op0;
1252 else
1253 temp = gen_reg_rtx (mode);
1254
1255 if (GET_CODE (op1) == CONST_INT)
1256 {
1257 gcc_assert (!small_int_operand (op1, mode)
1258 && !const_high_operand (op1, mode));
1259
1260 /* Emit them as real moves instead of a HIGH/LO_SUM,
1261 this way CSE can see everything and reuse intermediate
1262 values if it wants. */
1263 emit_insn (gen_rtx_SET (VOIDmode, temp,
1264 GEN_INT (INTVAL (op1)
1265 & ~(HOST_WIDE_INT)0x3ff)));
1266
1267 emit_insn (gen_rtx_SET (VOIDmode,
1268 op0,
1269 gen_rtx_IOR (mode, temp,
1270 GEN_INT (INTVAL (op1) & 0x3ff))));
1271 }
1272 else
1273 {
1274 /* A symbol, emit in the traditional way. */
1275 emit_insn (gen_rtx_SET (VOIDmode, temp,
1276 gen_rtx_HIGH (mode, op1)));
1277 emit_insn (gen_rtx_SET (VOIDmode,
1278 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1279 }
1280 }
1281
1282 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1283 If TEMP is nonzero, we are forbidden to use any other scratch
1284 registers. Otherwise, we are allowed to generate them as needed.
1285
1286 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1287 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1288
1289 void
1290 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1291 {
1292 rtx temp1, temp2, temp3, temp4, temp5;
1293 rtx ti_temp = 0;
1294
1295 if (temp && GET_MODE (temp) == TImode)
1296 {
1297 ti_temp = temp;
1298 temp = gen_rtx_REG (DImode, REGNO (temp));
1299 }
1300
1301 /* SPARC-V9 code-model support. */
1302 switch (sparc_cmodel)
1303 {
1304 case CM_MEDLOW:
1305 /* The range spanned by all instructions in the object is less
1306 than 2^31 bytes (2GB) and the distance from any instruction
1307 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1308 than 2^31 bytes (2GB).
1309
1310 The executable must be in the low 4TB of the virtual address
1311 space.
1312
1313 sethi %hi(symbol), %temp1
1314 or %temp1, %lo(symbol), %reg */
1315 if (temp)
1316 temp1 = temp; /* op0 is allowed. */
1317 else
1318 temp1 = gen_reg_rtx (DImode);
1319
1320 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1321 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1322 break;
1323
1324 case CM_MEDMID:
1325 /* The range spanned by all instructions in the object is less
1326 than 2^31 bytes (2GB) and the distance from any instruction
1327 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1328 than 2^31 bytes (2GB).
1329
1330 The executable must be in the low 16TB of the virtual address
1331 space.
1332
1333 sethi %h44(symbol), %temp1
1334 or %temp1, %m44(symbol), %temp2
1335 sllx %temp2, 12, %temp3
1336 or %temp3, %l44(symbol), %reg */
1337 if (temp)
1338 {
1339 temp1 = op0;
1340 temp2 = op0;
1341 temp3 = temp; /* op0 is allowed. */
1342 }
1343 else
1344 {
1345 temp1 = gen_reg_rtx (DImode);
1346 temp2 = gen_reg_rtx (DImode);
1347 temp3 = gen_reg_rtx (DImode);
1348 }
1349
1350 emit_insn (gen_seth44 (temp1, op1));
1351 emit_insn (gen_setm44 (temp2, temp1, op1));
1352 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1353 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1354 emit_insn (gen_setl44 (op0, temp3, op1));
1355 break;
1356
1357 case CM_MEDANY:
1358 /* The range spanned by all instructions in the object is less
1359 than 2^31 bytes (2GB) and the distance from any instruction
1360 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1361 than 2^31 bytes (2GB).
1362
1363 The executable can be placed anywhere in the virtual address
1364 space.
1365
1366 sethi %hh(symbol), %temp1
1367 sethi %lm(symbol), %temp2
1368 or %temp1, %hm(symbol), %temp3
1369 sllx %temp3, 32, %temp4
1370 or %temp4, %temp2, %temp5
1371 or %temp5, %lo(symbol), %reg */
1372 if (temp)
1373 {
1374 /* It is possible that one of the registers we got for operands[2]
1375 might coincide with that of operands[0] (which is why we made
1376 it TImode). Pick the other one to use as our scratch. */
1377 if (rtx_equal_p (temp, op0))
1378 {
1379 gcc_assert (ti_temp);
1380 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1381 }
1382 temp1 = op0;
1383 temp2 = temp; /* op0 is _not_ allowed, see above. */
1384 temp3 = op0;
1385 temp4 = op0;
1386 temp5 = op0;
1387 }
1388 else
1389 {
1390 temp1 = gen_reg_rtx (DImode);
1391 temp2 = gen_reg_rtx (DImode);
1392 temp3 = gen_reg_rtx (DImode);
1393 temp4 = gen_reg_rtx (DImode);
1394 temp5 = gen_reg_rtx (DImode);
1395 }
1396
1397 emit_insn (gen_sethh (temp1, op1));
1398 emit_insn (gen_setlm (temp2, op1));
1399 emit_insn (gen_sethm (temp3, temp1, op1));
1400 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1401 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1402 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1403 gen_rtx_PLUS (DImode, temp4, temp2)));
1404 emit_insn (gen_setlo (op0, temp5, op1));
1405 break;
1406
1407 case CM_EMBMEDANY:
1408 /* Old old old backwards compatibility kruft here.
1409 Essentially it is MEDLOW with a fixed 64-bit
1410 virtual base added to all data segment addresses.
1411 Text-segment stuff is computed like MEDANY, we can't
1412 reuse the code above because the relocation knobs
1413 look different.
1414
1415 Data segment: sethi %hi(symbol), %temp1
1416 add %temp1, EMBMEDANY_BASE_REG, %temp2
1417 or %temp2, %lo(symbol), %reg */
1418 if (data_segment_operand (op1, GET_MODE (op1)))
1419 {
1420 if (temp)
1421 {
1422 temp1 = temp; /* op0 is allowed. */
1423 temp2 = op0;
1424 }
1425 else
1426 {
1427 temp1 = gen_reg_rtx (DImode);
1428 temp2 = gen_reg_rtx (DImode);
1429 }
1430
1431 emit_insn (gen_embmedany_sethi (temp1, op1));
1432 emit_insn (gen_embmedany_brsum (temp2, temp1));
1433 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1434 }
1435
1436 /* Text segment: sethi %uhi(symbol), %temp1
1437 sethi %hi(symbol), %temp2
1438 or %temp1, %ulo(symbol), %temp3
1439 sllx %temp3, 32, %temp4
1440 or %temp4, %temp2, %temp5
1441 or %temp5, %lo(symbol), %reg */
1442 else
1443 {
1444 if (temp)
1445 {
1446 /* It is possible that one of the registers we got for operands[2]
1447 might coincide with that of operands[0] (which is why we made
1448 it TImode). Pick the other one to use as our scratch. */
1449 if (rtx_equal_p (temp, op0))
1450 {
1451 gcc_assert (ti_temp);
1452 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1453 }
1454 temp1 = op0;
1455 temp2 = temp; /* op0 is _not_ allowed, see above. */
1456 temp3 = op0;
1457 temp4 = op0;
1458 temp5 = op0;
1459 }
1460 else
1461 {
1462 temp1 = gen_reg_rtx (DImode);
1463 temp2 = gen_reg_rtx (DImode);
1464 temp3 = gen_reg_rtx (DImode);
1465 temp4 = gen_reg_rtx (DImode);
1466 temp5 = gen_reg_rtx (DImode);
1467 }
1468
1469 emit_insn (gen_embmedany_textuhi (temp1, op1));
1470 emit_insn (gen_embmedany_texthi (temp2, op1));
1471 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1472 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1473 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1474 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1475 gen_rtx_PLUS (DImode, temp4, temp2)));
1476 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1477 }
1478 break;
1479
1480 default:
1481 gcc_unreachable ();
1482 }
1483 }
1484
1485 #if HOST_BITS_PER_WIDE_INT == 32
1486 static void
1487 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1488 {
1489 gcc_unreachable ();
1490 }
1491 #else
1492 /* These avoid problems when cross compiling. If we do not
1493 go through all this hair then the optimizer will see
1494 invalid REG_EQUAL notes or in some cases none at all. */
1495 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1496 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1497 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1498 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1499
1500 /* The optimizer is not to assume anything about exactly
1501 which bits are set for a HIGH, they are unspecified.
1502 Unfortunately this leads to many missed optimizations
1503 during CSE. We mask out the non-HIGH bits, and matches
1504 a plain movdi, to alleviate this problem. */
1505 static rtx
1506 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1507 {
1508 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1509 }
1510
1511 static rtx
1512 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1513 {
1514 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1515 }
1516
1517 static rtx
1518 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1519 {
1520 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1521 }
1522
1523 static rtx
1524 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1525 {
1526 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1527 }
1528
1529 /* Worker routines for 64-bit constant formation on arch64.
1530 One of the key things to be doing in these emissions is
1531 to create as many temp REGs as possible. This makes it
1532 possible for half-built constants to be used later when
1533 such values are similar to something required later on.
1534 Without doing this, the optimizer cannot see such
1535 opportunities. */
1536
1537 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1538 unsigned HOST_WIDE_INT, int);
1539
1540 static void
1541 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1542 unsigned HOST_WIDE_INT low_bits, int is_neg)
1543 {
1544 unsigned HOST_WIDE_INT high_bits;
1545
1546 if (is_neg)
1547 high_bits = (~low_bits) & 0xffffffff;
1548 else
1549 high_bits = low_bits;
1550
1551 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1552 if (!is_neg)
1553 {
1554 emit_insn (gen_rtx_SET (VOIDmode, op0,
1555 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1556 }
1557 else
1558 {
1559 /* If we are XOR'ing with -1, then we should emit a one's complement
1560 instead. This way the combiner will notice logical operations
1561 such as ANDN later on and substitute. */
1562 if ((low_bits & 0x3ff) == 0x3ff)
1563 {
1564 emit_insn (gen_rtx_SET (VOIDmode, op0,
1565 gen_rtx_NOT (DImode, temp)));
1566 }
1567 else
1568 {
1569 emit_insn (gen_rtx_SET (VOIDmode, op0,
1570 gen_safe_XOR64 (temp,
1571 (-(HOST_WIDE_INT)0x400
1572 | (low_bits & 0x3ff)))));
1573 }
1574 }
1575 }
1576
1577 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1578 unsigned HOST_WIDE_INT, int);
1579
1580 static void
1581 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1582 unsigned HOST_WIDE_INT high_bits,
1583 unsigned HOST_WIDE_INT low_immediate,
1584 int shift_count)
1585 {
1586 rtx temp2 = op0;
1587
1588 if ((high_bits & 0xfffffc00) != 0)
1589 {
1590 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1591 if ((high_bits & ~0xfffffc00) != 0)
1592 emit_insn (gen_rtx_SET (VOIDmode, op0,
1593 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1594 else
1595 temp2 = temp;
1596 }
1597 else
1598 {
1599 emit_insn (gen_safe_SET64 (temp, high_bits));
1600 temp2 = temp;
1601 }
1602
1603 /* Now shift it up into place. */
1604 emit_insn (gen_rtx_SET (VOIDmode, op0,
1605 gen_rtx_ASHIFT (DImode, temp2,
1606 GEN_INT (shift_count))));
1607
1608 /* If there is a low immediate part piece, finish up by
1609 putting that in as well. */
1610 if (low_immediate != 0)
1611 emit_insn (gen_rtx_SET (VOIDmode, op0,
1612 gen_safe_OR64 (op0, low_immediate)));
1613 }
1614
1615 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1616 unsigned HOST_WIDE_INT);
1617
1618 /* Full 64-bit constant decomposition. Even though this is the
1619 'worst' case, we still optimize a few things away. */
1620 static void
1621 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1622 unsigned HOST_WIDE_INT high_bits,
1623 unsigned HOST_WIDE_INT low_bits)
1624 {
1625 rtx sub_temp;
1626
1627 if (reload_in_progress || reload_completed)
1628 sub_temp = op0;
1629 else
1630 sub_temp = gen_reg_rtx (DImode);
1631
1632 if ((high_bits & 0xfffffc00) != 0)
1633 {
1634 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1635 if ((high_bits & ~0xfffffc00) != 0)
1636 emit_insn (gen_rtx_SET (VOIDmode,
1637 sub_temp,
1638 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1639 else
1640 sub_temp = temp;
1641 }
1642 else
1643 {
1644 emit_insn (gen_safe_SET64 (temp, high_bits));
1645 sub_temp = temp;
1646 }
1647
1648 if (!reload_in_progress && !reload_completed)
1649 {
1650 rtx temp2 = gen_reg_rtx (DImode);
1651 rtx temp3 = gen_reg_rtx (DImode);
1652 rtx temp4 = gen_reg_rtx (DImode);
1653
1654 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1655 gen_rtx_ASHIFT (DImode, sub_temp,
1656 GEN_INT (32))));
1657
1658 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1659 if ((low_bits & ~0xfffffc00) != 0)
1660 {
1661 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1662 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1663 emit_insn (gen_rtx_SET (VOIDmode, op0,
1664 gen_rtx_PLUS (DImode, temp4, temp3)));
1665 }
1666 else
1667 {
1668 emit_insn (gen_rtx_SET (VOIDmode, op0,
1669 gen_rtx_PLUS (DImode, temp4, temp2)));
1670 }
1671 }
1672 else
1673 {
1674 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1675 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1676 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1677 int to_shift = 12;
1678
1679 /* We are in the middle of reload, so this is really
1680 painful. However we do still make an attempt to
1681 avoid emitting truly stupid code. */
1682 if (low1 != const0_rtx)
1683 {
1684 emit_insn (gen_rtx_SET (VOIDmode, op0,
1685 gen_rtx_ASHIFT (DImode, sub_temp,
1686 GEN_INT (to_shift))));
1687 emit_insn (gen_rtx_SET (VOIDmode, op0,
1688 gen_rtx_IOR (DImode, op0, low1)));
1689 sub_temp = op0;
1690 to_shift = 12;
1691 }
1692 else
1693 {
1694 to_shift += 12;
1695 }
1696 if (low2 != const0_rtx)
1697 {
1698 emit_insn (gen_rtx_SET (VOIDmode, op0,
1699 gen_rtx_ASHIFT (DImode, sub_temp,
1700 GEN_INT (to_shift))));
1701 emit_insn (gen_rtx_SET (VOIDmode, op0,
1702 gen_rtx_IOR (DImode, op0, low2)));
1703 sub_temp = op0;
1704 to_shift = 8;
1705 }
1706 else
1707 {
1708 to_shift += 8;
1709 }
1710 emit_insn (gen_rtx_SET (VOIDmode, op0,
1711 gen_rtx_ASHIFT (DImode, sub_temp,
1712 GEN_INT (to_shift))));
1713 if (low3 != const0_rtx)
1714 emit_insn (gen_rtx_SET (VOIDmode, op0,
1715 gen_rtx_IOR (DImode, op0, low3)));
1716 /* phew... */
1717 }
1718 }
1719
1720 /* Analyze a 64-bit constant for certain properties. */
1721 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1722 unsigned HOST_WIDE_INT,
1723 int *, int *, int *);
1724
1725 static void
1726 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1727 unsigned HOST_WIDE_INT low_bits,
1728 int *hbsp, int *lbsp, int *abbasp)
1729 {
1730 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1731 int i;
1732
1733 lowest_bit_set = highest_bit_set = -1;
1734 i = 0;
1735 do
1736 {
1737 if ((lowest_bit_set == -1)
1738 && ((low_bits >> i) & 1))
1739 lowest_bit_set = i;
1740 if ((highest_bit_set == -1)
1741 && ((high_bits >> (32 - i - 1)) & 1))
1742 highest_bit_set = (64 - i - 1);
1743 }
1744 while (++i < 32
1745 && ((highest_bit_set == -1)
1746 || (lowest_bit_set == -1)));
1747 if (i == 32)
1748 {
1749 i = 0;
1750 do
1751 {
1752 if ((lowest_bit_set == -1)
1753 && ((high_bits >> i) & 1))
1754 lowest_bit_set = i + 32;
1755 if ((highest_bit_set == -1)
1756 && ((low_bits >> (32 - i - 1)) & 1))
1757 highest_bit_set = 32 - i - 1;
1758 }
1759 while (++i < 32
1760 && ((highest_bit_set == -1)
1761 || (lowest_bit_set == -1)));
1762 }
1763 /* If there are no bits set this should have gone out
1764 as one instruction! */
1765 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1766 all_bits_between_are_set = 1;
1767 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1768 {
1769 if (i < 32)
1770 {
1771 if ((low_bits & (1 << i)) != 0)
1772 continue;
1773 }
1774 else
1775 {
1776 if ((high_bits & (1 << (i - 32))) != 0)
1777 continue;
1778 }
1779 all_bits_between_are_set = 0;
1780 break;
1781 }
1782 *hbsp = highest_bit_set;
1783 *lbsp = lowest_bit_set;
1784 *abbasp = all_bits_between_are_set;
1785 }
1786
1787 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1788
1789 static int
1790 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1791 unsigned HOST_WIDE_INT low_bits)
1792 {
1793 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1794
1795 if (high_bits == 0
1796 || high_bits == 0xffffffff)
1797 return 1;
1798
1799 analyze_64bit_constant (high_bits, low_bits,
1800 &highest_bit_set, &lowest_bit_set,
1801 &all_bits_between_are_set);
1802
1803 if ((highest_bit_set == 63
1804 || lowest_bit_set == 0)
1805 && all_bits_between_are_set != 0)
1806 return 1;
1807
1808 if ((highest_bit_set - lowest_bit_set) < 21)
1809 return 1;
1810
1811 return 0;
1812 }
1813
1814 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1815 unsigned HOST_WIDE_INT,
1816 int, int);
1817
1818 static unsigned HOST_WIDE_INT
1819 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1820 unsigned HOST_WIDE_INT low_bits,
1821 int lowest_bit_set, int shift)
1822 {
1823 HOST_WIDE_INT hi, lo;
1824
1825 if (lowest_bit_set < 32)
1826 {
1827 lo = (low_bits >> lowest_bit_set) << shift;
1828 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1829 }
1830 else
1831 {
1832 lo = 0;
1833 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1834 }
1835 gcc_assert (! (hi & lo));
1836 return (hi | lo);
1837 }
1838
1839 /* Here we are sure to be arch64 and this is an integer constant
1840 being loaded into a register. Emit the most efficient
1841 insn sequence possible. Detection of all the 1-insn cases
1842 has been done already. */
1843 static void
1844 sparc_emit_set_const64 (rtx op0, rtx op1)
1845 {
1846 unsigned HOST_WIDE_INT high_bits, low_bits;
1847 int lowest_bit_set, highest_bit_set;
1848 int all_bits_between_are_set;
1849 rtx temp = 0;
1850
1851 /* Sanity check that we know what we are working with. */
1852 gcc_assert (TARGET_ARCH64
1853 && (GET_CODE (op0) == SUBREG
1854 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1855
1856 if (reload_in_progress || reload_completed)
1857 temp = op0;
1858
1859 if (GET_CODE (op1) != CONST_INT)
1860 {
1861 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1862 return;
1863 }
1864
1865 if (! temp)
1866 temp = gen_reg_rtx (DImode);
1867
1868 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1869 low_bits = (INTVAL (op1) & 0xffffffff);
1870
1871 /* low_bits bits 0 --> 31
1872 high_bits bits 32 --> 63 */
1873
1874 analyze_64bit_constant (high_bits, low_bits,
1875 &highest_bit_set, &lowest_bit_set,
1876 &all_bits_between_are_set);
1877
1878 /* First try for a 2-insn sequence. */
1879
1880 /* These situations are preferred because the optimizer can
1881 * do more things with them:
1882 * 1) mov -1, %reg
1883 * sllx %reg, shift, %reg
1884 * 2) mov -1, %reg
1885 * srlx %reg, shift, %reg
1886 * 3) mov some_small_const, %reg
1887 * sllx %reg, shift, %reg
1888 */
1889 if (((highest_bit_set == 63
1890 || lowest_bit_set == 0)
1891 && all_bits_between_are_set != 0)
1892 || ((highest_bit_set - lowest_bit_set) < 12))
1893 {
1894 HOST_WIDE_INT the_const = -1;
1895 int shift = lowest_bit_set;
1896
1897 if ((highest_bit_set != 63
1898 && lowest_bit_set != 0)
1899 || all_bits_between_are_set == 0)
1900 {
1901 the_const =
1902 create_simple_focus_bits (high_bits, low_bits,
1903 lowest_bit_set, 0);
1904 }
1905 else if (lowest_bit_set == 0)
1906 shift = -(63 - highest_bit_set);
1907
1908 gcc_assert (SPARC_SIMM13_P (the_const));
1909 gcc_assert (shift != 0);
1910
1911 emit_insn (gen_safe_SET64 (temp, the_const));
1912 if (shift > 0)
1913 emit_insn (gen_rtx_SET (VOIDmode,
1914 op0,
1915 gen_rtx_ASHIFT (DImode,
1916 temp,
1917 GEN_INT (shift))));
1918 else if (shift < 0)
1919 emit_insn (gen_rtx_SET (VOIDmode,
1920 op0,
1921 gen_rtx_LSHIFTRT (DImode,
1922 temp,
1923 GEN_INT (-shift))));
1924 return;
1925 }
1926
1927 /* Now a range of 22 or less bits set somewhere.
1928 * 1) sethi %hi(focus_bits), %reg
1929 * sllx %reg, shift, %reg
1930 * 2) sethi %hi(focus_bits), %reg
1931 * srlx %reg, shift, %reg
1932 */
1933 if ((highest_bit_set - lowest_bit_set) < 21)
1934 {
1935 unsigned HOST_WIDE_INT focus_bits =
1936 create_simple_focus_bits (high_bits, low_bits,
1937 lowest_bit_set, 10);
1938
1939 gcc_assert (SPARC_SETHI_P (focus_bits));
1940 gcc_assert (lowest_bit_set != 10);
1941
1942 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1943
1944 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1945 if (lowest_bit_set < 10)
1946 emit_insn (gen_rtx_SET (VOIDmode,
1947 op0,
1948 gen_rtx_LSHIFTRT (DImode, temp,
1949 GEN_INT (10 - lowest_bit_set))));
1950 else if (lowest_bit_set > 10)
1951 emit_insn (gen_rtx_SET (VOIDmode,
1952 op0,
1953 gen_rtx_ASHIFT (DImode, temp,
1954 GEN_INT (lowest_bit_set - 10))));
1955 return;
1956 }
1957
1958 /* 1) sethi %hi(low_bits), %reg
1959 * or %reg, %lo(low_bits), %reg
1960 * 2) sethi %hi(~low_bits), %reg
1961 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1962 */
1963 if (high_bits == 0
1964 || high_bits == 0xffffffff)
1965 {
1966 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1967 (high_bits == 0xffffffff));
1968 return;
1969 }
1970
1971 /* Now, try 3-insn sequences. */
1972
1973 /* 1) sethi %hi(high_bits), %reg
1974 * or %reg, %lo(high_bits), %reg
1975 * sllx %reg, 32, %reg
1976 */
1977 if (low_bits == 0)
1978 {
1979 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1980 return;
1981 }
1982
1983 /* We may be able to do something quick
1984 when the constant is negated, so try that. */
1985 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1986 (~low_bits) & 0xfffffc00))
1987 {
1988 /* NOTE: The trailing bits get XOR'd so we need the
1989 non-negated bits, not the negated ones. */
1990 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1991
1992 if ((((~high_bits) & 0xffffffff) == 0
1993 && ((~low_bits) & 0x80000000) == 0)
1994 || (((~high_bits) & 0xffffffff) == 0xffffffff
1995 && ((~low_bits) & 0x80000000) != 0))
1996 {
1997 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1998
1999 if ((SPARC_SETHI_P (fast_int)
2000 && (~high_bits & 0xffffffff) == 0)
2001 || SPARC_SIMM13_P (fast_int))
2002 emit_insn (gen_safe_SET64 (temp, fast_int));
2003 else
2004 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2005 }
2006 else
2007 {
2008 rtx negated_const;
2009 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2010 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2011 sparc_emit_set_const64 (temp, negated_const);
2012 }
2013
2014 /* If we are XOR'ing with -1, then we should emit a one's complement
2015 instead. This way the combiner will notice logical operations
2016 such as ANDN later on and substitute. */
2017 if (trailing_bits == 0x3ff)
2018 {
2019 emit_insn (gen_rtx_SET (VOIDmode, op0,
2020 gen_rtx_NOT (DImode, temp)));
2021 }
2022 else
2023 {
2024 emit_insn (gen_rtx_SET (VOIDmode,
2025 op0,
2026 gen_safe_XOR64 (temp,
2027 (-0x400 | trailing_bits))));
2028 }
2029 return;
2030 }
2031
2032 /* 1) sethi %hi(xxx), %reg
2033 * or %reg, %lo(xxx), %reg
2034 * sllx %reg, yyy, %reg
2035 *
2036 * ??? This is just a generalized version of the low_bits==0
2037 * thing above, FIXME...
2038 */
2039 if ((highest_bit_set - lowest_bit_set) < 32)
2040 {
2041 unsigned HOST_WIDE_INT focus_bits =
2042 create_simple_focus_bits (high_bits, low_bits,
2043 lowest_bit_set, 0);
2044
2045 /* We can't get here in this state. */
2046 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2047
2048 /* So what we know is that the set bits straddle the
2049 middle of the 64-bit word. */
2050 sparc_emit_set_const64_quick2 (op0, temp,
2051 focus_bits, 0,
2052 lowest_bit_set);
2053 return;
2054 }
2055
2056 /* 1) sethi %hi(high_bits), %reg
2057 * or %reg, %lo(high_bits), %reg
2058 * sllx %reg, 32, %reg
2059 * or %reg, low_bits, %reg
2060 */
2061 if (SPARC_SIMM13_P(low_bits)
2062 && ((int)low_bits > 0))
2063 {
2064 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2065 return;
2066 }
2067
2068 /* The easiest way when all else fails, is full decomposition. */
2069 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2070 }
2071 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2072
2073 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2074 return the mode to be used for the comparison. For floating-point,
2075 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2076 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2077 processing is needed. */
2078
2079 enum machine_mode
2080 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2081 {
2082 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2083 {
2084 switch (op)
2085 {
2086 case EQ:
2087 case NE:
2088 case UNORDERED:
2089 case ORDERED:
2090 case UNLT:
2091 case UNLE:
2092 case UNGT:
2093 case UNGE:
2094 case UNEQ:
2095 case LTGT:
2096 return CCFPmode;
2097
2098 case LT:
2099 case LE:
2100 case GT:
2101 case GE:
2102 return CCFPEmode;
2103
2104 default:
2105 gcc_unreachable ();
2106 }
2107 }
2108 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2109 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2110 {
2111 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2112 return CCX_NOOVmode;
2113 else
2114 return CC_NOOVmode;
2115 }
2116 else
2117 {
2118 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2119 return CCXmode;
2120 else
2121 return CCmode;
2122 }
2123 }
2124
2125 /* Emit the compare insn and return the CC reg for a CODE comparison
2126 with operands X and Y. */
2127
2128 static rtx
2129 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2130 {
2131 enum machine_mode mode;
2132 rtx cc_reg;
2133
2134 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2135 return x;
2136
2137 mode = SELECT_CC_MODE (code, x, y);
2138
2139 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2140 fcc regs (cse can't tell they're really call clobbered regs and will
2141 remove a duplicate comparison even if there is an intervening function
2142 call - it will then try to reload the cc reg via an int reg which is why
2143 we need the movcc patterns). It is possible to provide the movcc
2144 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2145 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2146 to tell cse that CCFPE mode registers (even pseudos) are call
2147 clobbered. */
2148
2149 /* ??? This is an experiment. Rather than making changes to cse which may
2150 or may not be easy/clean, we do our own cse. This is possible because
2151 we will generate hard registers. Cse knows they're call clobbered (it
2152 doesn't know the same thing about pseudos). If we guess wrong, no big
2153 deal, but if we win, great! */
2154
2155 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2156 #if 1 /* experiment */
2157 {
2158 int reg;
2159 /* We cycle through the registers to ensure they're all exercised. */
2160 static int next_fcc_reg = 0;
2161 /* Previous x,y for each fcc reg. */
2162 static rtx prev_args[4][2];
2163
2164 /* Scan prev_args for x,y. */
2165 for (reg = 0; reg < 4; reg++)
2166 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2167 break;
2168 if (reg == 4)
2169 {
2170 reg = next_fcc_reg;
2171 prev_args[reg][0] = x;
2172 prev_args[reg][1] = y;
2173 next_fcc_reg = (next_fcc_reg + 1) & 3;
2174 }
2175 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2176 }
2177 #else
2178 cc_reg = gen_reg_rtx (mode);
2179 #endif /* ! experiment */
2180 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2181 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2182 else
2183 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2184
2185 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2186 will only result in an unrecognizable insn so no point in asserting. */
2187 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2188
2189 return cc_reg;
2190 }
2191
2192
2193 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2194
2195 rtx
2196 gen_compare_reg (rtx cmp)
2197 {
2198 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2199 }
2200
2201 /* This function is used for v9 only.
2202 DEST is the target of the Scc insn.
2203 CODE is the code for an Scc's comparison.
2204 X and Y are the values we compare.
2205
2206 This function is needed to turn
2207
2208 (set (reg:SI 110)
2209 (gt (reg:CCX 100 %icc)
2210 (const_int 0)))
2211 into
2212 (set (reg:SI 110)
2213 (gt:DI (reg:CCX 100 %icc)
2214 (const_int 0)))
2215
2216 IE: The instruction recognizer needs to see the mode of the comparison to
2217 find the right instruction. We could use "gt:DI" right in the
2218 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2219
2220 static int
2221 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2222 {
2223 if (! TARGET_ARCH64
2224 && (GET_MODE (x) == DImode
2225 || GET_MODE (dest) == DImode))
2226 return 0;
2227
2228 /* Try to use the movrCC insns. */
2229 if (TARGET_ARCH64
2230 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2231 && y == const0_rtx
2232 && v9_regcmp_p (compare_code))
2233 {
2234 rtx op0 = x;
2235 rtx temp;
2236
2237 /* Special case for op0 != 0. This can be done with one instruction if
2238 dest == x. */
2239
2240 if (compare_code == NE
2241 && GET_MODE (dest) == DImode
2242 && rtx_equal_p (op0, dest))
2243 {
2244 emit_insn (gen_rtx_SET (VOIDmode, dest,
2245 gen_rtx_IF_THEN_ELSE (DImode,
2246 gen_rtx_fmt_ee (compare_code, DImode,
2247 op0, const0_rtx),
2248 const1_rtx,
2249 dest)));
2250 return 1;
2251 }
2252
2253 if (reg_overlap_mentioned_p (dest, op0))
2254 {
2255 /* Handle the case where dest == x.
2256 We "early clobber" the result. */
2257 op0 = gen_reg_rtx (GET_MODE (x));
2258 emit_move_insn (op0, x);
2259 }
2260
2261 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2262 if (GET_MODE (op0) != DImode)
2263 {
2264 temp = gen_reg_rtx (DImode);
2265 convert_move (temp, op0, 0);
2266 }
2267 else
2268 temp = op0;
2269 emit_insn (gen_rtx_SET (VOIDmode, dest,
2270 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2271 gen_rtx_fmt_ee (compare_code, DImode,
2272 temp, const0_rtx),
2273 const1_rtx,
2274 dest)));
2275 return 1;
2276 }
2277 else
2278 {
2279 x = gen_compare_reg_1 (compare_code, x, y);
2280 y = const0_rtx;
2281
2282 gcc_assert (GET_MODE (x) != CC_NOOVmode
2283 && GET_MODE (x) != CCX_NOOVmode);
2284
2285 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2286 emit_insn (gen_rtx_SET (VOIDmode, dest,
2287 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2288 gen_rtx_fmt_ee (compare_code,
2289 GET_MODE (x), x, y),
2290 const1_rtx, dest)));
2291 return 1;
2292 }
2293 }
2294
2295
2296 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2297 without jumps using the addx/subx instructions. */
2298
2299 bool
2300 emit_scc_insn (rtx operands[])
2301 {
2302 rtx tem;
2303 rtx x;
2304 rtx y;
2305 enum rtx_code code;
2306
2307 /* The quad-word fp compare library routines all return nonzero to indicate
2308 true, which is different from the equivalent libgcc routines, so we must
2309 handle them specially here. */
2310 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2311 {
2312 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2313 GET_CODE (operands[1]));
2314 operands[2] = XEXP (operands[1], 0);
2315 operands[3] = XEXP (operands[1], 1);
2316 }
2317
2318 code = GET_CODE (operands[1]);
2319 x = operands[2];
2320 y = operands[3];
2321
2322 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2323 more applications). The exception to this is "reg != 0" which can
2324 be done in one instruction on v9 (so we do it). */
2325 if (code == EQ)
2326 {
2327 if (GET_MODE (x) == SImode)
2328 {
2329 rtx pat = gen_seqsi_special (operands[0], x, y);
2330 emit_insn (pat);
2331 return true;
2332 }
2333 else if (GET_MODE (x) == DImode)
2334 {
2335 rtx pat = gen_seqdi_special (operands[0], x, y);
2336 emit_insn (pat);
2337 return true;
2338 }
2339 }
2340
2341 if (code == NE)
2342 {
2343 if (GET_MODE (x) == SImode)
2344 {
2345 rtx pat = gen_snesi_special (operands[0], x, y);
2346 emit_insn (pat);
2347 return true;
2348 }
2349 else if (GET_MODE (x) == DImode)
2350 {
2351 rtx pat = gen_snedi_special (operands[0], x, y);
2352 emit_insn (pat);
2353 return true;
2354 }
2355 }
2356
2357 /* For the rest, on v9 we can use conditional moves. */
2358
2359 if (TARGET_V9)
2360 {
2361 if (gen_v9_scc (operands[0], code, x, y))
2362 return true;
2363 }
2364
2365 /* We can do LTU and GEU using the addx/subx instructions too. And
2366 for GTU/LEU, if both operands are registers swap them and fall
2367 back to the easy case. */
2368 if (code == GTU || code == LEU)
2369 {
2370 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2371 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2372 {
2373 tem = x;
2374 x = y;
2375 y = tem;
2376 code = swap_condition (code);
2377 }
2378 }
2379
2380 if (code == LTU || code == GEU)
2381 {
2382 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2383 gen_rtx_fmt_ee (code, SImode,
2384 gen_compare_reg_1 (code, x, y),
2385 const0_rtx)));
2386 return true;
2387 }
2388
2389 /* Nope, do branches. */
2390 return false;
2391 }
2392
2393 /* Emit a conditional jump insn for the v9 architecture using comparison code
2394 CODE and jump target LABEL.
2395 This function exists to take advantage of the v9 brxx insns. */
2396
2397 static void
2398 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2399 {
2400 emit_jump_insn (gen_rtx_SET (VOIDmode,
2401 pc_rtx,
2402 gen_rtx_IF_THEN_ELSE (VOIDmode,
2403 gen_rtx_fmt_ee (code, GET_MODE (op0),
2404 op0, const0_rtx),
2405 gen_rtx_LABEL_REF (VOIDmode, label),
2406 pc_rtx)));
2407 }
2408
2409 void
2410 emit_conditional_branch_insn (rtx operands[])
2411 {
2412 /* The quad-word fp compare library routines all return nonzero to indicate
2413 true, which is different from the equivalent libgcc routines, so we must
2414 handle them specially here. */
2415 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2416 {
2417 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2418 GET_CODE (operands[0]));
2419 operands[1] = XEXP (operands[0], 0);
2420 operands[2] = XEXP (operands[0], 1);
2421 }
2422
2423 if (TARGET_ARCH64 && operands[2] == const0_rtx
2424 && GET_CODE (operands[1]) == REG
2425 && GET_MODE (operands[1]) == DImode)
2426 {
2427 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2428 return;
2429 }
2430
2431 operands[1] = gen_compare_reg (operands[0]);
2432 operands[2] = const0_rtx;
2433 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2434 operands[1], operands[2]);
2435 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2436 operands[3]));
2437 }
2438
2439
2440 /* Generate a DFmode part of a hard TFmode register.
2441 REG is the TFmode hard register, LOW is 1 for the
2442 low 64bit of the register and 0 otherwise.
2443 */
2444 rtx
2445 gen_df_reg (rtx reg, int low)
2446 {
2447 int regno = REGNO (reg);
2448
2449 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2450 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2451 return gen_rtx_REG (DFmode, regno);
2452 }
2453 \f
2454 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2455 Unlike normal calls, TFmode operands are passed by reference. It is
2456 assumed that no more than 3 operands are required. */
2457
2458 static void
2459 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2460 {
2461 rtx ret_slot = NULL, arg[3], func_sym;
2462 int i;
2463
2464 /* We only expect to be called for conversions, unary, and binary ops. */
2465 gcc_assert (nargs == 2 || nargs == 3);
2466
2467 for (i = 0; i < nargs; ++i)
2468 {
2469 rtx this_arg = operands[i];
2470 rtx this_slot;
2471
2472 /* TFmode arguments and return values are passed by reference. */
2473 if (GET_MODE (this_arg) == TFmode)
2474 {
2475 int force_stack_temp;
2476
2477 force_stack_temp = 0;
2478 if (TARGET_BUGGY_QP_LIB && i == 0)
2479 force_stack_temp = 1;
2480
2481 if (GET_CODE (this_arg) == MEM
2482 && ! force_stack_temp)
2483 this_arg = XEXP (this_arg, 0);
2484 else if (CONSTANT_P (this_arg)
2485 && ! force_stack_temp)
2486 {
2487 this_slot = force_const_mem (TFmode, this_arg);
2488 this_arg = XEXP (this_slot, 0);
2489 }
2490 else
2491 {
2492 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2493
2494 /* Operand 0 is the return value. We'll copy it out later. */
2495 if (i > 0)
2496 emit_move_insn (this_slot, this_arg);
2497 else
2498 ret_slot = this_slot;
2499
2500 this_arg = XEXP (this_slot, 0);
2501 }
2502 }
2503
2504 arg[i] = this_arg;
2505 }
2506
2507 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2508
2509 if (GET_MODE (operands[0]) == TFmode)
2510 {
2511 if (nargs == 2)
2512 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2513 arg[0], GET_MODE (arg[0]),
2514 arg[1], GET_MODE (arg[1]));
2515 else
2516 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2517 arg[0], GET_MODE (arg[0]),
2518 arg[1], GET_MODE (arg[1]),
2519 arg[2], GET_MODE (arg[2]));
2520
2521 if (ret_slot)
2522 emit_move_insn (operands[0], ret_slot);
2523 }
2524 else
2525 {
2526 rtx ret;
2527
2528 gcc_assert (nargs == 2);
2529
2530 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2531 GET_MODE (operands[0]), 1,
2532 arg[1], GET_MODE (arg[1]));
2533
2534 if (ret != operands[0])
2535 emit_move_insn (operands[0], ret);
2536 }
2537 }
2538
2539 /* Expand soft-float TFmode calls to sparc abi routines. */
2540
2541 static void
2542 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2543 {
2544 const char *func;
2545
2546 switch (code)
2547 {
2548 case PLUS:
2549 func = "_Qp_add";
2550 break;
2551 case MINUS:
2552 func = "_Qp_sub";
2553 break;
2554 case MULT:
2555 func = "_Qp_mul";
2556 break;
2557 case DIV:
2558 func = "_Qp_div";
2559 break;
2560 default:
2561 gcc_unreachable ();
2562 }
2563
2564 emit_soft_tfmode_libcall (func, 3, operands);
2565 }
2566
2567 static void
2568 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2569 {
2570 const char *func;
2571
2572 gcc_assert (code == SQRT);
2573 func = "_Qp_sqrt";
2574
2575 emit_soft_tfmode_libcall (func, 2, operands);
2576 }
2577
2578 static void
2579 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2580 {
2581 const char *func;
2582
2583 switch (code)
2584 {
2585 case FLOAT_EXTEND:
2586 switch (GET_MODE (operands[1]))
2587 {
2588 case SFmode:
2589 func = "_Qp_stoq";
2590 break;
2591 case DFmode:
2592 func = "_Qp_dtoq";
2593 break;
2594 default:
2595 gcc_unreachable ();
2596 }
2597 break;
2598
2599 case FLOAT_TRUNCATE:
2600 switch (GET_MODE (operands[0]))
2601 {
2602 case SFmode:
2603 func = "_Qp_qtos";
2604 break;
2605 case DFmode:
2606 func = "_Qp_qtod";
2607 break;
2608 default:
2609 gcc_unreachable ();
2610 }
2611 break;
2612
2613 case FLOAT:
2614 switch (GET_MODE (operands[1]))
2615 {
2616 case SImode:
2617 func = "_Qp_itoq";
2618 if (TARGET_ARCH64)
2619 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2620 break;
2621 case DImode:
2622 func = "_Qp_xtoq";
2623 break;
2624 default:
2625 gcc_unreachable ();
2626 }
2627 break;
2628
2629 case UNSIGNED_FLOAT:
2630 switch (GET_MODE (operands[1]))
2631 {
2632 case SImode:
2633 func = "_Qp_uitoq";
2634 if (TARGET_ARCH64)
2635 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2636 break;
2637 case DImode:
2638 func = "_Qp_uxtoq";
2639 break;
2640 default:
2641 gcc_unreachable ();
2642 }
2643 break;
2644
2645 case FIX:
2646 switch (GET_MODE (operands[0]))
2647 {
2648 case SImode:
2649 func = "_Qp_qtoi";
2650 break;
2651 case DImode:
2652 func = "_Qp_qtox";
2653 break;
2654 default:
2655 gcc_unreachable ();
2656 }
2657 break;
2658
2659 case UNSIGNED_FIX:
2660 switch (GET_MODE (operands[0]))
2661 {
2662 case SImode:
2663 func = "_Qp_qtoui";
2664 break;
2665 case DImode:
2666 func = "_Qp_qtoux";
2667 break;
2668 default:
2669 gcc_unreachable ();
2670 }
2671 break;
2672
2673 default:
2674 gcc_unreachable ();
2675 }
2676
2677 emit_soft_tfmode_libcall (func, 2, operands);
2678 }
2679
2680 /* Expand a hard-float tfmode operation. All arguments must be in
2681 registers. */
2682
2683 static void
2684 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2685 {
2686 rtx op, dest;
2687
2688 if (GET_RTX_CLASS (code) == RTX_UNARY)
2689 {
2690 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2691 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2692 }
2693 else
2694 {
2695 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2696 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2697 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2698 operands[1], operands[2]);
2699 }
2700
2701 if (register_operand (operands[0], VOIDmode))
2702 dest = operands[0];
2703 else
2704 dest = gen_reg_rtx (GET_MODE (operands[0]));
2705
2706 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2707
2708 if (dest != operands[0])
2709 emit_move_insn (operands[0], dest);
2710 }
2711
2712 void
2713 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2714 {
2715 if (TARGET_HARD_QUAD)
2716 emit_hard_tfmode_operation (code, operands);
2717 else
2718 emit_soft_tfmode_binop (code, operands);
2719 }
2720
2721 void
2722 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2723 {
2724 if (TARGET_HARD_QUAD)
2725 emit_hard_tfmode_operation (code, operands);
2726 else
2727 emit_soft_tfmode_unop (code, operands);
2728 }
2729
2730 void
2731 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2732 {
2733 if (TARGET_HARD_QUAD)
2734 emit_hard_tfmode_operation (code, operands);
2735 else
2736 emit_soft_tfmode_cvt (code, operands);
2737 }
2738 \f
2739 /* Return nonzero if a branch/jump/call instruction will be emitting
2740 nop into its delay slot. */
2741
2742 int
2743 empty_delay_slot (rtx insn)
2744 {
2745 rtx seq;
2746
2747 /* If no previous instruction (should not happen), return true. */
2748 if (PREV_INSN (insn) == NULL)
2749 return 1;
2750
2751 seq = NEXT_INSN (PREV_INSN (insn));
2752 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2753 return 0;
2754
2755 return 1;
2756 }
2757
2758 /* Return nonzero if TRIAL can go into the call delay slot. */
2759
2760 int
2761 tls_call_delay (rtx trial)
2762 {
2763 rtx pat;
2764
2765 /* Binutils allows
2766 call __tls_get_addr, %tgd_call (foo)
2767 add %l7, %o0, %o0, %tgd_add (foo)
2768 while Sun as/ld does not. */
2769 if (TARGET_GNU_TLS || !TARGET_TLS)
2770 return 1;
2771
2772 pat = PATTERN (trial);
2773
2774 /* We must reject tgd_add{32|64}, i.e.
2775 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2776 and tldm_add{32|64}, i.e.
2777 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2778 for Sun as/ld. */
2779 if (GET_CODE (pat) == SET
2780 && GET_CODE (SET_SRC (pat)) == PLUS)
2781 {
2782 rtx unspec = XEXP (SET_SRC (pat), 1);
2783
2784 if (GET_CODE (unspec) == UNSPEC
2785 && (XINT (unspec, 1) == UNSPEC_TLSGD
2786 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2787 return 0;
2788 }
2789
2790 return 1;
2791 }
2792
2793 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2794 instruction. RETURN_P is true if the v9 variant 'return' is to be
2795 considered in the test too.
2796
2797 TRIAL must be a SET whose destination is a REG appropriate for the
2798 'restore' instruction or, if RETURN_P is true, for the 'return'
2799 instruction. */
2800
2801 static int
2802 eligible_for_restore_insn (rtx trial, bool return_p)
2803 {
2804 rtx pat = PATTERN (trial);
2805 rtx src = SET_SRC (pat);
2806
2807 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2808 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2809 && arith_operand (src, GET_MODE (src)))
2810 {
2811 if (TARGET_ARCH64)
2812 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2813 else
2814 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2815 }
2816
2817 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2818 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2819 && arith_double_operand (src, GET_MODE (src)))
2820 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2821
2822 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2823 else if (! TARGET_FPU && register_operand (src, SFmode))
2824 return 1;
2825
2826 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2827 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2828 return 1;
2829
2830 /* If we have the 'return' instruction, anything that does not use
2831 local or output registers and can go into a delay slot wins. */
2832 else if (return_p
2833 && TARGET_V9
2834 && !epilogue_renumber (&pat, 1)
2835 && get_attr_in_uncond_branch_delay (trial)
2836 == IN_UNCOND_BRANCH_DELAY_TRUE)
2837 return 1;
2838
2839 /* The 'restore src1,src2,dest' pattern for SImode. */
2840 else if (GET_CODE (src) == PLUS
2841 && register_operand (XEXP (src, 0), SImode)
2842 && arith_operand (XEXP (src, 1), SImode))
2843 return 1;
2844
2845 /* The 'restore src1,src2,dest' pattern for DImode. */
2846 else if (GET_CODE (src) == PLUS
2847 && register_operand (XEXP (src, 0), DImode)
2848 && arith_double_operand (XEXP (src, 1), DImode))
2849 return 1;
2850
2851 /* The 'restore src1,%lo(src2),dest' pattern. */
2852 else if (GET_CODE (src) == LO_SUM
2853 && ! TARGET_CM_MEDMID
2854 && ((register_operand (XEXP (src, 0), SImode)
2855 && immediate_operand (XEXP (src, 1), SImode))
2856 || (TARGET_ARCH64
2857 && register_operand (XEXP (src, 0), DImode)
2858 && immediate_operand (XEXP (src, 1), DImode))))
2859 return 1;
2860
2861 /* The 'restore src,src,dest' pattern. */
2862 else if (GET_CODE (src) == ASHIFT
2863 && (register_operand (XEXP (src, 0), SImode)
2864 || register_operand (XEXP (src, 0), DImode))
2865 && XEXP (src, 1) == const1_rtx)
2866 return 1;
2867
2868 return 0;
2869 }
2870
2871 /* Return nonzero if TRIAL can go into the function return's delay slot. */
2872
2873 int
2874 eligible_for_return_delay (rtx trial)
2875 {
2876 int regno;
2877 rtx pat;
2878
2879 if (GET_CODE (trial) != INSN)
2880 return 0;
2881
2882 if (get_attr_length (trial) != 1)
2883 return 0;
2884
2885 /* If the function uses __builtin_eh_return, the eh_return machinery
2886 occupies the delay slot. */
2887 if (crtl->calls_eh_return)
2888 return 0;
2889
2890 /* In the case of a leaf or flat function, anything can go into the slot. */
2891 if (sparc_leaf_function_p || TARGET_FLAT)
2892 return
2893 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
2894
2895 pat = PATTERN (trial);
2896 if (GET_CODE (pat) == PARALLEL)
2897 {
2898 int i;
2899
2900 if (! TARGET_V9)
2901 return 0;
2902 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2903 {
2904 rtx expr = XVECEXP (pat, 0, i);
2905 if (GET_CODE (expr) != SET)
2906 return 0;
2907 if (GET_CODE (SET_DEST (expr)) != REG)
2908 return 0;
2909 regno = REGNO (SET_DEST (expr));
2910 if (regno >= 8 && regno < 24)
2911 return 0;
2912 }
2913 return !epilogue_renumber (&pat, 1)
2914 && (get_attr_in_uncond_branch_delay (trial)
2915 == IN_UNCOND_BRANCH_DELAY_TRUE);
2916 }
2917
2918 if (GET_CODE (pat) != SET)
2919 return 0;
2920
2921 if (GET_CODE (SET_DEST (pat)) != REG)
2922 return 0;
2923
2924 regno = REGNO (SET_DEST (pat));
2925
2926 /* Otherwise, only operations which can be done in tandem with
2927 a `restore' or `return' insn can go into the delay slot. */
2928 if (regno >= 8 && regno < 24)
2929 return 0;
2930
2931 /* If this instruction sets up floating point register and we have a return
2932 instruction, it can probably go in. But restore will not work
2933 with FP_REGS. */
2934 if (regno >= 32)
2935 return (TARGET_V9
2936 && !epilogue_renumber (&pat, 1)
2937 && get_attr_in_uncond_branch_delay (trial)
2938 == IN_UNCOND_BRANCH_DELAY_TRUE);
2939
2940 return eligible_for_restore_insn (trial, true);
2941 }
2942
2943 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
2944
2945 int
2946 eligible_for_sibcall_delay (rtx trial)
2947 {
2948 rtx pat;
2949
2950 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2951 return 0;
2952
2953 if (get_attr_length (trial) != 1)
2954 return 0;
2955
2956 pat = PATTERN (trial);
2957
2958 if (sparc_leaf_function_p || TARGET_FLAT)
2959 {
2960 /* If the tail call is done using the call instruction,
2961 we have to restore %o7 in the delay slot. */
2962 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2963 return 0;
2964
2965 /* %g1 is used to build the function address */
2966 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2967 return 0;
2968
2969 return 1;
2970 }
2971
2972 /* Otherwise, only operations which can be done in tandem with
2973 a `restore' insn can go into the delay slot. */
2974 if (GET_CODE (SET_DEST (pat)) != REG
2975 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2976 || REGNO (SET_DEST (pat)) >= 32)
2977 return 0;
2978
2979 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2980 in most cases. */
2981 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2982 return 0;
2983
2984 return eligible_for_restore_insn (trial, false);
2985 }
2986
2987 int
2988 short_branch (int uid1, int uid2)
2989 {
2990 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2991
2992 /* Leave a few words of "slop". */
2993 if (delta >= -1023 && delta <= 1022)
2994 return 1;
2995
2996 return 0;
2997 }
2998
2999 /* Return nonzero if REG is not used after INSN.
3000 We assume REG is a reload reg, and therefore does
3001 not live past labels or calls or jumps. */
3002 int
3003 reg_unused_after (rtx reg, rtx insn)
3004 {
3005 enum rtx_code code, prev_code = UNKNOWN;
3006
3007 while ((insn = NEXT_INSN (insn)))
3008 {
3009 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
3010 return 1;
3011
3012 code = GET_CODE (insn);
3013 if (GET_CODE (insn) == CODE_LABEL)
3014 return 1;
3015
3016 if (INSN_P (insn))
3017 {
3018 rtx set = single_set (insn);
3019 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
3020 if (set && in_src)
3021 return 0;
3022 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
3023 return 1;
3024 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
3025 return 0;
3026 }
3027 prev_code = code;
3028 }
3029 return 1;
3030 }
3031 \f
3032 /* Determine if it's legal to put X into the constant pool. This
3033 is not possible if X contains the address of a symbol that is
3034 not constant (TLS) or not known at final link time (PIC). */
3035
3036 static bool
3037 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
3038 {
3039 switch (GET_CODE (x))
3040 {
3041 case CONST_INT:
3042 case CONST_DOUBLE:
3043 case CONST_VECTOR:
3044 /* Accept all non-symbolic constants. */
3045 return false;
3046
3047 case LABEL_REF:
3048 /* Labels are OK iff we are non-PIC. */
3049 return flag_pic != 0;
3050
3051 case SYMBOL_REF:
3052 /* 'Naked' TLS symbol references are never OK,
3053 non-TLS symbols are OK iff we are non-PIC. */
3054 if (SYMBOL_REF_TLS_MODEL (x))
3055 return true;
3056 else
3057 return flag_pic != 0;
3058
3059 case CONST:
3060 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3061 case PLUS:
3062 case MINUS:
3063 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3064 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3065 case UNSPEC:
3066 return true;
3067 default:
3068 gcc_unreachable ();
3069 }
3070 }
3071 \f
3072 /* Global Offset Table support. */
3073 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3074 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3075
3076 /* Return the SYMBOL_REF for the Global Offset Table. */
3077
3078 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3079
3080 static rtx
3081 sparc_got (void)
3082 {
3083 if (!sparc_got_symbol)
3084 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3085
3086 return sparc_got_symbol;
3087 }
3088
3089 /* Ensure that we are not using patterns that are not OK with PIC. */
3090
3091 int
3092 check_pic (int i)
3093 {
3094 rtx op;
3095
3096 switch (flag_pic)
3097 {
3098 case 1:
3099 op = recog_data.operand[i];
3100 gcc_assert (GET_CODE (op) != SYMBOL_REF
3101 && (GET_CODE (op) != CONST
3102 || (GET_CODE (XEXP (op, 0)) == MINUS
3103 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3104 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3105 case 2:
3106 default:
3107 return 1;
3108 }
3109 }
3110
3111 /* Return true if X is an address which needs a temporary register when
3112 reloaded while generating PIC code. */
3113
3114 int
3115 pic_address_needs_scratch (rtx x)
3116 {
3117 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3118 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3119 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3120 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3121 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3122 return 1;
3123
3124 return 0;
3125 }
3126
3127 /* Determine if a given RTX is a valid constant. We already know this
3128 satisfies CONSTANT_P. */
3129
3130 static bool
3131 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3132 {
3133 switch (GET_CODE (x))
3134 {
3135 case CONST:
3136 case SYMBOL_REF:
3137 if (sparc_tls_referenced_p (x))
3138 return false;
3139 break;
3140
3141 case CONST_DOUBLE:
3142 if (GET_MODE (x) == VOIDmode)
3143 return true;
3144
3145 /* Floating point constants are generally not ok.
3146 The only exception is 0.0 and all-ones in VIS. */
3147 if (TARGET_VIS
3148 && SCALAR_FLOAT_MODE_P (mode)
3149 && (const_zero_operand (x, mode)
3150 || const_all_ones_operand (x, mode)))
3151 return true;
3152
3153 return false;
3154
3155 case CONST_VECTOR:
3156 /* Vector constants are generally not ok.
3157 The only exception is 0 or -1 in VIS. */
3158 if (TARGET_VIS
3159 && (const_zero_operand (x, mode)
3160 || const_all_ones_operand (x, mode)))
3161 return true;
3162
3163 return false;
3164
3165 default:
3166 break;
3167 }
3168
3169 return true;
3170 }
3171
3172 /* Determine if a given RTX is a valid constant address. */
3173
3174 bool
3175 constant_address_p (rtx x)
3176 {
3177 switch (GET_CODE (x))
3178 {
3179 case LABEL_REF:
3180 case CONST_INT:
3181 case HIGH:
3182 return true;
3183
3184 case CONST:
3185 if (flag_pic && pic_address_needs_scratch (x))
3186 return false;
3187 return sparc_legitimate_constant_p (Pmode, x);
3188
3189 case SYMBOL_REF:
3190 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3191
3192 default:
3193 return false;
3194 }
3195 }
3196
3197 /* Nonzero if the constant value X is a legitimate general operand
3198 when generating PIC code. It is given that flag_pic is on and
3199 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3200
3201 bool
3202 legitimate_pic_operand_p (rtx x)
3203 {
3204 if (pic_address_needs_scratch (x))
3205 return false;
3206 if (sparc_tls_referenced_p (x))
3207 return false;
3208 return true;
3209 }
3210
3211 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3212 (CONST_INT_P (X) \
3213 && INTVAL (X) >= -0x1000 \
3214 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3215
3216 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3217 (CONST_INT_P (X) \
3218 && INTVAL (X) >= -0x1000 \
3219 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3220
3221 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3222
3223 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3224 ordinarily. This changes a bit when generating PIC. */
3225
3226 static bool
3227 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3228 {
3229 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3230
3231 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3232 rs1 = addr;
3233 else if (GET_CODE (addr) == PLUS)
3234 {
3235 rs1 = XEXP (addr, 0);
3236 rs2 = XEXP (addr, 1);
3237
3238 /* Canonicalize. REG comes first, if there are no regs,
3239 LO_SUM comes first. */
3240 if (!REG_P (rs1)
3241 && GET_CODE (rs1) != SUBREG
3242 && (REG_P (rs2)
3243 || GET_CODE (rs2) == SUBREG
3244 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3245 {
3246 rs1 = XEXP (addr, 1);
3247 rs2 = XEXP (addr, 0);
3248 }
3249
3250 if ((flag_pic == 1
3251 && rs1 == pic_offset_table_rtx
3252 && !REG_P (rs2)
3253 && GET_CODE (rs2) != SUBREG
3254 && GET_CODE (rs2) != LO_SUM
3255 && GET_CODE (rs2) != MEM
3256 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3257 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3258 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3259 || ((REG_P (rs1)
3260 || GET_CODE (rs1) == SUBREG)
3261 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3262 {
3263 imm1 = rs2;
3264 rs2 = NULL;
3265 }
3266 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3267 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3268 {
3269 /* We prohibit REG + REG for TFmode when there are no quad move insns
3270 and we consequently need to split. We do this because REG+REG
3271 is not an offsettable address. If we get the situation in reload
3272 where source and destination of a movtf pattern are both MEMs with
3273 REG+REG address, then only one of them gets converted to an
3274 offsettable address. */
3275 if (mode == TFmode
3276 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3277 return 0;
3278
3279 /* We prohibit REG + REG on ARCH32 if not optimizing for
3280 DFmode/DImode because then mem_min_alignment is likely to be zero
3281 after reload and the forced split would lack a matching splitter
3282 pattern. */
3283 if (TARGET_ARCH32 && !optimize
3284 && (mode == DFmode || mode == DImode))
3285 return 0;
3286 }
3287 else if (USE_AS_OFFSETABLE_LO10
3288 && GET_CODE (rs1) == LO_SUM
3289 && TARGET_ARCH64
3290 && ! TARGET_CM_MEDMID
3291 && RTX_OK_FOR_OLO10_P (rs2, mode))
3292 {
3293 rs2 = NULL;
3294 imm1 = XEXP (rs1, 1);
3295 rs1 = XEXP (rs1, 0);
3296 if (!CONSTANT_P (imm1)
3297 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3298 return 0;
3299 }
3300 }
3301 else if (GET_CODE (addr) == LO_SUM)
3302 {
3303 rs1 = XEXP (addr, 0);
3304 imm1 = XEXP (addr, 1);
3305
3306 if (!CONSTANT_P (imm1)
3307 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3308 return 0;
3309
3310 /* We can't allow TFmode in 32-bit mode, because an offset greater
3311 than the alignment (8) may cause the LO_SUM to overflow. */
3312 if (mode == TFmode && TARGET_ARCH32)
3313 return 0;
3314 }
3315 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3316 return 1;
3317 else
3318 return 0;
3319
3320 if (GET_CODE (rs1) == SUBREG)
3321 rs1 = SUBREG_REG (rs1);
3322 if (!REG_P (rs1))
3323 return 0;
3324
3325 if (rs2)
3326 {
3327 if (GET_CODE (rs2) == SUBREG)
3328 rs2 = SUBREG_REG (rs2);
3329 if (!REG_P (rs2))
3330 return 0;
3331 }
3332
3333 if (strict)
3334 {
3335 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3336 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3337 return 0;
3338 }
3339 else
3340 {
3341 if ((REGNO (rs1) >= 32
3342 && REGNO (rs1) != FRAME_POINTER_REGNUM
3343 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3344 || (rs2
3345 && (REGNO (rs2) >= 32
3346 && REGNO (rs2) != FRAME_POINTER_REGNUM
3347 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3348 return 0;
3349 }
3350 return 1;
3351 }
3352
3353 /* Return the SYMBOL_REF for the tls_get_addr function. */
3354
3355 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3356
3357 static rtx
3358 sparc_tls_get_addr (void)
3359 {
3360 if (!sparc_tls_symbol)
3361 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3362
3363 return sparc_tls_symbol;
3364 }
3365
3366 /* Return the Global Offset Table to be used in TLS mode. */
3367
3368 static rtx
3369 sparc_tls_got (void)
3370 {
3371 /* In PIC mode, this is just the PIC offset table. */
3372 if (flag_pic)
3373 {
3374 crtl->uses_pic_offset_table = 1;
3375 return pic_offset_table_rtx;
3376 }
3377
3378 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3379 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3380 if (TARGET_SUN_TLS && TARGET_ARCH32)
3381 {
3382 load_got_register ();
3383 return global_offset_table_rtx;
3384 }
3385
3386 /* In all other cases, we load a new pseudo with the GOT symbol. */
3387 return copy_to_reg (sparc_got ());
3388 }
3389
3390 /* Return true if X contains a thread-local symbol. */
3391
3392 static bool
3393 sparc_tls_referenced_p (rtx x)
3394 {
3395 if (!TARGET_HAVE_TLS)
3396 return false;
3397
3398 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3399 x = XEXP (XEXP (x, 0), 0);
3400
3401 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3402 return true;
3403
3404 /* That's all we handle in sparc_legitimize_tls_address for now. */
3405 return false;
3406 }
3407
3408 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3409 this (thread-local) address. */
3410
3411 static rtx
3412 sparc_legitimize_tls_address (rtx addr)
3413 {
3414 rtx temp1, temp2, temp3, ret, o0, got, insn;
3415
3416 gcc_assert (can_create_pseudo_p ());
3417
3418 if (GET_CODE (addr) == SYMBOL_REF)
3419 switch (SYMBOL_REF_TLS_MODEL (addr))
3420 {
3421 case TLS_MODEL_GLOBAL_DYNAMIC:
3422 start_sequence ();
3423 temp1 = gen_reg_rtx (SImode);
3424 temp2 = gen_reg_rtx (SImode);
3425 ret = gen_reg_rtx (Pmode);
3426 o0 = gen_rtx_REG (Pmode, 8);
3427 got = sparc_tls_got ();
3428 emit_insn (gen_tgd_hi22 (temp1, addr));
3429 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3430 if (TARGET_ARCH32)
3431 {
3432 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3433 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3434 addr, const1_rtx));
3435 }
3436 else
3437 {
3438 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3439 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3440 addr, const1_rtx));
3441 }
3442 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3443 insn = get_insns ();
3444 end_sequence ();
3445 emit_libcall_block (insn, ret, o0, addr);
3446 break;
3447
3448 case TLS_MODEL_LOCAL_DYNAMIC:
3449 start_sequence ();
3450 temp1 = gen_reg_rtx (SImode);
3451 temp2 = gen_reg_rtx (SImode);
3452 temp3 = gen_reg_rtx (Pmode);
3453 ret = gen_reg_rtx (Pmode);
3454 o0 = gen_rtx_REG (Pmode, 8);
3455 got = sparc_tls_got ();
3456 emit_insn (gen_tldm_hi22 (temp1));
3457 emit_insn (gen_tldm_lo10 (temp2, temp1));
3458 if (TARGET_ARCH32)
3459 {
3460 emit_insn (gen_tldm_add32 (o0, got, temp2));
3461 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3462 const1_rtx));
3463 }
3464 else
3465 {
3466 emit_insn (gen_tldm_add64 (o0, got, temp2));
3467 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3468 const1_rtx));
3469 }
3470 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3471 insn = get_insns ();
3472 end_sequence ();
3473 emit_libcall_block (insn, temp3, o0,
3474 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3475 UNSPEC_TLSLD_BASE));
3476 temp1 = gen_reg_rtx (SImode);
3477 temp2 = gen_reg_rtx (SImode);
3478 emit_insn (gen_tldo_hix22 (temp1, addr));
3479 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3480 if (TARGET_ARCH32)
3481 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3482 else
3483 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3484 break;
3485
3486 case TLS_MODEL_INITIAL_EXEC:
3487 temp1 = gen_reg_rtx (SImode);
3488 temp2 = gen_reg_rtx (SImode);
3489 temp3 = gen_reg_rtx (Pmode);
3490 got = sparc_tls_got ();
3491 emit_insn (gen_tie_hi22 (temp1, addr));
3492 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3493 if (TARGET_ARCH32)
3494 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3495 else
3496 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3497 if (TARGET_SUN_TLS)
3498 {
3499 ret = gen_reg_rtx (Pmode);
3500 if (TARGET_ARCH32)
3501 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3502 temp3, addr));
3503 else
3504 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3505 temp3, addr));
3506 }
3507 else
3508 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3509 break;
3510
3511 case TLS_MODEL_LOCAL_EXEC:
3512 temp1 = gen_reg_rtx (Pmode);
3513 temp2 = gen_reg_rtx (Pmode);
3514 if (TARGET_ARCH32)
3515 {
3516 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3517 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3518 }
3519 else
3520 {
3521 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3522 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3523 }
3524 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3525 break;
3526
3527 default:
3528 gcc_unreachable ();
3529 }
3530
3531 else if (GET_CODE (addr) == CONST)
3532 {
3533 rtx base, offset;
3534
3535 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3536
3537 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3538 offset = XEXP (XEXP (addr, 0), 1);
3539
3540 base = force_operand (base, NULL_RTX);
3541 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3542 offset = force_reg (Pmode, offset);
3543 ret = gen_rtx_PLUS (Pmode, base, offset);
3544 }
3545
3546 else
3547 gcc_unreachable (); /* for now ... */
3548
3549 return ret;
3550 }
3551
3552 /* Legitimize PIC addresses. If the address is already position-independent,
3553 we return ORIG. Newly generated position-independent addresses go into a
3554 reg. This is REG if nonzero, otherwise we allocate register(s) as
3555 necessary. */
3556
3557 static rtx
3558 sparc_legitimize_pic_address (rtx orig, rtx reg)
3559 {
3560 bool gotdata_op = false;
3561
3562 if (GET_CODE (orig) == SYMBOL_REF
3563 /* See the comment in sparc_expand_move. */
3564 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3565 {
3566 rtx pic_ref, address;
3567 rtx insn;
3568
3569 if (reg == 0)
3570 {
3571 gcc_assert (! reload_in_progress && ! reload_completed);
3572 reg = gen_reg_rtx (Pmode);
3573 }
3574
3575 if (flag_pic == 2)
3576 {
3577 /* If not during reload, allocate another temp reg here for loading
3578 in the address, so that these instructions can be optimized
3579 properly. */
3580 rtx temp_reg = ((reload_in_progress || reload_completed)
3581 ? reg : gen_reg_rtx (Pmode));
3582
3583 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3584 won't get confused into thinking that these two instructions
3585 are loading in the true address of the symbol. If in the
3586 future a PIC rtx exists, that should be used instead. */
3587 if (TARGET_ARCH64)
3588 {
3589 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3590 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3591 }
3592 else
3593 {
3594 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3595 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3596 }
3597 address = temp_reg;
3598 gotdata_op = true;
3599 }
3600 else
3601 address = orig;
3602
3603 crtl->uses_pic_offset_table = 1;
3604 if (gotdata_op)
3605 {
3606 if (TARGET_ARCH64)
3607 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3608 pic_offset_table_rtx,
3609 address, orig));
3610 else
3611 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3612 pic_offset_table_rtx,
3613 address, orig));
3614 }
3615 else
3616 {
3617 pic_ref
3618 = gen_const_mem (Pmode,
3619 gen_rtx_PLUS (Pmode,
3620 pic_offset_table_rtx, address));
3621 insn = emit_move_insn (reg, pic_ref);
3622 }
3623
3624 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3625 by loop. */
3626 set_unique_reg_note (insn, REG_EQUAL, orig);
3627 return reg;
3628 }
3629 else if (GET_CODE (orig) == CONST)
3630 {
3631 rtx base, offset;
3632
3633 if (GET_CODE (XEXP (orig, 0)) == PLUS
3634 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3635 return orig;
3636
3637 if (reg == 0)
3638 {
3639 gcc_assert (! reload_in_progress && ! reload_completed);
3640 reg = gen_reg_rtx (Pmode);
3641 }
3642
3643 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3644 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3645 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3646 base == reg ? NULL_RTX : reg);
3647
3648 if (GET_CODE (offset) == CONST_INT)
3649 {
3650 if (SMALL_INT (offset))
3651 return plus_constant (base, INTVAL (offset));
3652 else if (! reload_in_progress && ! reload_completed)
3653 offset = force_reg (Pmode, offset);
3654 else
3655 /* If we reach here, then something is seriously wrong. */
3656 gcc_unreachable ();
3657 }
3658 return gen_rtx_PLUS (Pmode, base, offset);
3659 }
3660 else if (GET_CODE (orig) == LABEL_REF)
3661 /* ??? We ought to be checking that the register is live instead, in case
3662 it is eliminated. */
3663 crtl->uses_pic_offset_table = 1;
3664
3665 return orig;
3666 }
3667
3668 /* Try machine-dependent ways of modifying an illegitimate address X
3669 to be legitimate. If we find one, return the new, valid address.
3670
3671 OLDX is the address as it was before break_out_memory_refs was called.
3672 In some cases it is useful to look at this to decide what needs to be done.
3673
3674 MODE is the mode of the operand pointed to by X.
3675
3676 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3677
3678 static rtx
3679 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3680 enum machine_mode mode)
3681 {
3682 rtx orig_x = x;
3683
3684 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3685 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3686 force_operand (XEXP (x, 0), NULL_RTX));
3687 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3688 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3689 force_operand (XEXP (x, 1), NULL_RTX));
3690 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3691 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3692 XEXP (x, 1));
3693 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3694 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3695 force_operand (XEXP (x, 1), NULL_RTX));
3696
3697 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3698 return x;
3699
3700 if (sparc_tls_referenced_p (x))
3701 x = sparc_legitimize_tls_address (x);
3702 else if (flag_pic)
3703 x = sparc_legitimize_pic_address (x, NULL_RTX);
3704 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3705 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3706 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3707 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3708 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3709 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3710 else if (GET_CODE (x) == SYMBOL_REF
3711 || GET_CODE (x) == CONST
3712 || GET_CODE (x) == LABEL_REF)
3713 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3714
3715 return x;
3716 }
3717
3718 /* Delegitimize an address that was legitimized by the above function. */
3719
3720 static rtx
3721 sparc_delegitimize_address (rtx x)
3722 {
3723 x = delegitimize_mem_from_attrs (x);
3724
3725 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3726 switch (XINT (XEXP (x, 1), 1))
3727 {
3728 case UNSPEC_MOVE_PIC:
3729 case UNSPEC_TLSLE:
3730 x = XVECEXP (XEXP (x, 1), 0, 0);
3731 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3732 break;
3733 default:
3734 break;
3735 }
3736
3737 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3738 if (GET_CODE (x) == MINUS
3739 && REG_P (XEXP (x, 0))
3740 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3741 && GET_CODE (XEXP (x, 1)) == LO_SUM
3742 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3743 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3744 {
3745 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3746 gcc_assert (GET_CODE (x) == LABEL_REF);
3747 }
3748
3749 return x;
3750 }
3751
3752 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3753 replace the input X, or the original X if no replacement is called for.
3754 The output parameter *WIN is 1 if the calling macro should goto WIN,
3755 0 if it should not.
3756
3757 For SPARC, we wish to handle addresses by splitting them into
3758 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3759 This cuts the number of extra insns by one.
3760
3761 Do nothing when generating PIC code and the address is a symbolic
3762 operand or requires a scratch register. */
3763
3764 rtx
3765 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3766 int opnum, int type,
3767 int ind_levels ATTRIBUTE_UNUSED, int *win)
3768 {
3769 /* Decompose SImode constants into HIGH+LO_SUM. */
3770 if (CONSTANT_P (x)
3771 && (mode != TFmode || TARGET_ARCH64)
3772 && GET_MODE (x) == SImode
3773 && GET_CODE (x) != LO_SUM
3774 && GET_CODE (x) != HIGH
3775 && sparc_cmodel <= CM_MEDLOW
3776 && !(flag_pic
3777 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3778 {
3779 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3780 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3781 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3782 opnum, (enum reload_type)type);
3783 *win = 1;
3784 return x;
3785 }
3786
3787 /* We have to recognize what we have already generated above. */
3788 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3789 {
3790 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3791 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3792 opnum, (enum reload_type)type);
3793 *win = 1;
3794 return x;
3795 }
3796
3797 *win = 0;
3798 return x;
3799 }
3800
3801 /* Return true if ADDR (a legitimate address expression)
3802 has an effect that depends on the machine mode it is used for.
3803
3804 In PIC mode,
3805
3806 (mem:HI [%l7+a])
3807
3808 is not equivalent to
3809
3810 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3811
3812 because [%l7+a+1] is interpreted as the address of (a+1). */
3813
3814
3815 static bool
3816 sparc_mode_dependent_address_p (const_rtx addr)
3817 {
3818 if (flag_pic && GET_CODE (addr) == PLUS)
3819 {
3820 rtx op0 = XEXP (addr, 0);
3821 rtx op1 = XEXP (addr, 1);
3822 if (op0 == pic_offset_table_rtx
3823 && symbolic_operand (op1, VOIDmode))
3824 return true;
3825 }
3826
3827 return false;
3828 }
3829
3830 #ifdef HAVE_GAS_HIDDEN
3831 # define USE_HIDDEN_LINKONCE 1
3832 #else
3833 # define USE_HIDDEN_LINKONCE 0
3834 #endif
3835
3836 static void
3837 get_pc_thunk_name (char name[32], unsigned int regno)
3838 {
3839 const char *reg_name = reg_names[regno];
3840
3841 /* Skip the leading '%' as that cannot be used in a
3842 symbol name. */
3843 reg_name += 1;
3844
3845 if (USE_HIDDEN_LINKONCE)
3846 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
3847 else
3848 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3849 }
3850
3851 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
3852
3853 static rtx
3854 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
3855 {
3856 int orig_flag_pic = flag_pic;
3857 rtx insn;
3858
3859 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
3860 flag_pic = 0;
3861 if (TARGET_ARCH64)
3862 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
3863 else
3864 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
3865 flag_pic = orig_flag_pic;
3866
3867 return insn;
3868 }
3869
3870 /* Emit code to load the GOT register. */
3871
3872 void
3873 load_got_register (void)
3874 {
3875 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
3876 if (!global_offset_table_rtx)
3877 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
3878
3879 if (TARGET_VXWORKS_RTP)
3880 emit_insn (gen_vxworks_load_got ());
3881 else
3882 {
3883 /* The GOT symbol is subject to a PC-relative relocation so we need a
3884 helper function to add the PC value and thus get the final value. */
3885 if (!got_helper_rtx)
3886 {
3887 char name[32];
3888 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
3889 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3890 }
3891
3892 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
3893 got_helper_rtx,
3894 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
3895 }
3896
3897 /* Need to emit this whether or not we obey regdecls,
3898 since setjmp/longjmp can cause life info to screw up.
3899 ??? In the case where we don't obey regdecls, this is not sufficient
3900 since we may not fall out the bottom. */
3901 emit_use (global_offset_table_rtx);
3902 }
3903
3904 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3905 address of the call target. */
3906
3907 void
3908 sparc_emit_call_insn (rtx pat, rtx addr)
3909 {
3910 rtx insn;
3911
3912 insn = emit_call_insn (pat);
3913
3914 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3915 if (TARGET_VXWORKS_RTP
3916 && flag_pic
3917 && GET_CODE (addr) == SYMBOL_REF
3918 && (SYMBOL_REF_DECL (addr)
3919 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3920 : !SYMBOL_REF_LOCAL_P (addr)))
3921 {
3922 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3923 crtl->uses_pic_offset_table = 1;
3924 }
3925 }
3926 \f
3927 /* Return 1 if RTX is a MEM which is known to be aligned to at
3928 least a DESIRED byte boundary. */
3929
3930 int
3931 mem_min_alignment (rtx mem, int desired)
3932 {
3933 rtx addr, base, offset;
3934
3935 /* If it's not a MEM we can't accept it. */
3936 if (GET_CODE (mem) != MEM)
3937 return 0;
3938
3939 /* Obviously... */
3940 if (!TARGET_UNALIGNED_DOUBLES
3941 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3942 return 1;
3943
3944 /* ??? The rest of the function predates MEM_ALIGN so
3945 there is probably a bit of redundancy. */
3946 addr = XEXP (mem, 0);
3947 base = offset = NULL_RTX;
3948 if (GET_CODE (addr) == PLUS)
3949 {
3950 if (GET_CODE (XEXP (addr, 0)) == REG)
3951 {
3952 base = XEXP (addr, 0);
3953
3954 /* What we are saying here is that if the base
3955 REG is aligned properly, the compiler will make
3956 sure any REG based index upon it will be so
3957 as well. */
3958 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3959 offset = XEXP (addr, 1);
3960 else
3961 offset = const0_rtx;
3962 }
3963 }
3964 else if (GET_CODE (addr) == REG)
3965 {
3966 base = addr;
3967 offset = const0_rtx;
3968 }
3969
3970 if (base != NULL_RTX)
3971 {
3972 int regno = REGNO (base);
3973
3974 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3975 {
3976 /* Check if the compiler has recorded some information
3977 about the alignment of the base REG. If reload has
3978 completed, we already matched with proper alignments.
3979 If not running global_alloc, reload might give us
3980 unaligned pointer to local stack though. */
3981 if (((cfun != 0
3982 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3983 || (optimize && reload_completed))
3984 && (INTVAL (offset) & (desired - 1)) == 0)
3985 return 1;
3986 }
3987 else
3988 {
3989 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3990 return 1;
3991 }
3992 }
3993 else if (! TARGET_UNALIGNED_DOUBLES
3994 || CONSTANT_P (addr)
3995 || GET_CODE (addr) == LO_SUM)
3996 {
3997 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3998 is true, in which case we can only assume that an access is aligned if
3999 it is to a constant address, or the address involves a LO_SUM. */
4000 return 1;
4001 }
4002
4003 /* An obviously unaligned address. */
4004 return 0;
4005 }
4006
4007 \f
4008 /* Vectors to keep interesting information about registers where it can easily
4009 be got. We used to use the actual mode value as the bit number, but there
4010 are more than 32 modes now. Instead we use two tables: one indexed by
4011 hard register number, and one indexed by mode. */
4012
4013 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4014 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4015 mapped into one sparc_mode_class mode. */
4016
4017 enum sparc_mode_class {
4018 S_MODE, D_MODE, T_MODE, O_MODE,
4019 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4020 CC_MODE, CCFP_MODE
4021 };
4022
4023 /* Modes for single-word and smaller quantities. */
4024 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4025
4026 /* Modes for double-word and smaller quantities. */
4027 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4028
4029 /* Modes for quad-word and smaller quantities. */
4030 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4031
4032 /* Modes for 8-word and smaller quantities. */
4033 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4034
4035 /* Modes for single-float quantities. We must allow any single word or
4036 smaller quantity. This is because the fix/float conversion instructions
4037 take integer inputs/outputs from the float registers. */
4038 #define SF_MODES (S_MODES)
4039
4040 /* Modes for double-float and smaller quantities. */
4041 #define DF_MODES (D_MODES)
4042
4043 /* Modes for quad-float and smaller quantities. */
4044 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4045
4046 /* Modes for quad-float pairs and smaller quantities. */
4047 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4048
4049 /* Modes for double-float only quantities. */
4050 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4051
4052 /* Modes for quad-float and double-float only quantities. */
4053 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4054
4055 /* Modes for quad-float pairs and double-float only quantities. */
4056 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4057
4058 /* Modes for condition codes. */
4059 #define CC_MODES (1 << (int) CC_MODE)
4060 #define CCFP_MODES (1 << (int) CCFP_MODE)
4061
4062 /* Value is 1 if register/mode pair is acceptable on sparc.
4063 The funny mixture of D and T modes is because integer operations
4064 do not specially operate on tetra quantities, so non-quad-aligned
4065 registers can hold quadword quantities (except %o4 and %i4 because
4066 they cross fixed registers). */
4067
4068 /* This points to either the 32 bit or the 64 bit version. */
4069 const int *hard_regno_mode_classes;
4070
4071 static const int hard_32bit_mode_classes[] = {
4072 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4073 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4074 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4075 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4076
4077 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4078 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4079 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4080 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4081
4082 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4083 and none can hold SFmode/SImode values. */
4084 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4085 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4086 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4087 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4088
4089 /* %fcc[0123] */
4090 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4091
4092 /* %icc, %sfp, %gsr */
4093 CC_MODES, 0, D_MODES
4094 };
4095
4096 static const int hard_64bit_mode_classes[] = {
4097 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4098 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4099 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4100 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4101
4102 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4103 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4104 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4105 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4106
4107 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4108 and none can hold SFmode/SImode values. */
4109 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4110 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4111 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4112 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4113
4114 /* %fcc[0123] */
4115 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4116
4117 /* %icc, %sfp, %gsr */
4118 CC_MODES, 0, D_MODES
4119 };
4120
4121 int sparc_mode_class [NUM_MACHINE_MODES];
4122
4123 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4124
4125 static void
4126 sparc_init_modes (void)
4127 {
4128 int i;
4129
4130 for (i = 0; i < NUM_MACHINE_MODES; i++)
4131 {
4132 switch (GET_MODE_CLASS (i))
4133 {
4134 case MODE_INT:
4135 case MODE_PARTIAL_INT:
4136 case MODE_COMPLEX_INT:
4137 if (GET_MODE_SIZE (i) <= 4)
4138 sparc_mode_class[i] = 1 << (int) S_MODE;
4139 else if (GET_MODE_SIZE (i) == 8)
4140 sparc_mode_class[i] = 1 << (int) D_MODE;
4141 else if (GET_MODE_SIZE (i) == 16)
4142 sparc_mode_class[i] = 1 << (int) T_MODE;
4143 else if (GET_MODE_SIZE (i) == 32)
4144 sparc_mode_class[i] = 1 << (int) O_MODE;
4145 else
4146 sparc_mode_class[i] = 0;
4147 break;
4148 case MODE_VECTOR_INT:
4149 if (GET_MODE_SIZE (i) <= 4)
4150 sparc_mode_class[i] = 1 << (int)SF_MODE;
4151 else if (GET_MODE_SIZE (i) == 8)
4152 sparc_mode_class[i] = 1 << (int)DF_MODE;
4153 break;
4154 case MODE_FLOAT:
4155 case MODE_COMPLEX_FLOAT:
4156 if (GET_MODE_SIZE (i) <= 4)
4157 sparc_mode_class[i] = 1 << (int) SF_MODE;
4158 else if (GET_MODE_SIZE (i) == 8)
4159 sparc_mode_class[i] = 1 << (int) DF_MODE;
4160 else if (GET_MODE_SIZE (i) == 16)
4161 sparc_mode_class[i] = 1 << (int) TF_MODE;
4162 else if (GET_MODE_SIZE (i) == 32)
4163 sparc_mode_class[i] = 1 << (int) OF_MODE;
4164 else
4165 sparc_mode_class[i] = 0;
4166 break;
4167 case MODE_CC:
4168 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4169 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4170 else
4171 sparc_mode_class[i] = 1 << (int) CC_MODE;
4172 break;
4173 default:
4174 sparc_mode_class[i] = 0;
4175 break;
4176 }
4177 }
4178
4179 if (TARGET_ARCH64)
4180 hard_regno_mode_classes = hard_64bit_mode_classes;
4181 else
4182 hard_regno_mode_classes = hard_32bit_mode_classes;
4183
4184 /* Initialize the array used by REGNO_REG_CLASS. */
4185 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4186 {
4187 if (i < 16 && TARGET_V8PLUS)
4188 sparc_regno_reg_class[i] = I64_REGS;
4189 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4190 sparc_regno_reg_class[i] = GENERAL_REGS;
4191 else if (i < 64)
4192 sparc_regno_reg_class[i] = FP_REGS;
4193 else if (i < 96)
4194 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4195 else if (i < 100)
4196 sparc_regno_reg_class[i] = FPCC_REGS;
4197 else
4198 sparc_regno_reg_class[i] = NO_REGS;
4199 }
4200 }
4201 \f
4202 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4203
4204 static inline bool
4205 save_global_or_fp_reg_p (unsigned int regno,
4206 int leaf_function ATTRIBUTE_UNUSED)
4207 {
4208 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4209 }
4210
4211 /* Return whether the return address register (%i7) is needed. */
4212
4213 static inline bool
4214 return_addr_reg_needed_p (int leaf_function)
4215 {
4216 /* If it is live, for example because of __builtin_return_address (0). */
4217 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4218 return true;
4219
4220 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4221 if (!leaf_function
4222 /* Loading the GOT register clobbers %o7. */
4223 || crtl->uses_pic_offset_table
4224 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4225 return true;
4226
4227 return false;
4228 }
4229
4230 /* Return whether REGNO, a local or in register, must be saved/restored. */
4231
4232 static bool
4233 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4234 {
4235 /* General case: call-saved registers live at some point. */
4236 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4237 return true;
4238
4239 /* Frame pointer register (%fp) if needed. */
4240 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4241 return true;
4242
4243 /* Return address register (%i7) if needed. */
4244 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4245 return true;
4246
4247 /* GOT register (%l7) if needed. */
4248 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4249 return true;
4250
4251 /* If the function accesses prior frames, the frame pointer and the return
4252 address of the previous frame must be saved on the stack. */
4253 if (crtl->accesses_prior_frames
4254 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4255 return true;
4256
4257 return false;
4258 }
4259
4260 /* Compute the frame size required by the function. This function is called
4261 during the reload pass and also by sparc_expand_prologue. */
4262
4263 HOST_WIDE_INT
4264 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4265 {
4266 HOST_WIDE_INT frame_size, apparent_frame_size;
4267 int args_size, n_global_fp_regs = 0;
4268 bool save_local_in_regs_p = false;
4269 unsigned int i;
4270
4271 /* If the function allocates dynamic stack space, the dynamic offset is
4272 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4273 if (leaf_function && !cfun->calls_alloca)
4274 args_size = 0;
4275 else
4276 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4277
4278 /* Calculate space needed for global registers. */
4279 if (TARGET_ARCH64)
4280 for (i = 0; i < 8; i++)
4281 if (save_global_or_fp_reg_p (i, 0))
4282 n_global_fp_regs += 2;
4283 else
4284 for (i = 0; i < 8; i += 2)
4285 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4286 n_global_fp_regs += 2;
4287
4288 /* In the flat window model, find out which local and in registers need to
4289 be saved. We don't reserve space in the current frame for them as they
4290 will be spilled into the register window save area of the caller's frame.
4291 However, as soon as we use this register window save area, we must create
4292 that of the current frame to make it the live one. */
4293 if (TARGET_FLAT)
4294 for (i = 16; i < 32; i++)
4295 if (save_local_or_in_reg_p (i, leaf_function))
4296 {
4297 save_local_in_regs_p = true;
4298 break;
4299 }
4300
4301 /* Calculate space needed for FP registers. */
4302 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4303 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4304 n_global_fp_regs += 2;
4305
4306 if (size == 0
4307 && n_global_fp_regs == 0
4308 && args_size == 0
4309 && !save_local_in_regs_p)
4310 frame_size = apparent_frame_size = 0;
4311 else
4312 {
4313 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4314 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4315 apparent_frame_size += n_global_fp_regs * 4;
4316
4317 /* We need to add the size of the outgoing argument area. */
4318 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4319
4320 /* And that of the register window save area. */
4321 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4322
4323 /* Finally, bump to the appropriate alignment. */
4324 frame_size = SPARC_STACK_ALIGN (frame_size);
4325 }
4326
4327 /* Set up values for use in prologue and epilogue. */
4328 sparc_frame_size = frame_size;
4329 sparc_apparent_frame_size = apparent_frame_size;
4330 sparc_n_global_fp_regs = n_global_fp_regs;
4331 sparc_save_local_in_regs_p = save_local_in_regs_p;
4332
4333 return frame_size;
4334 }
4335
4336 /* Output any necessary .register pseudo-ops. */
4337
4338 void
4339 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4340 {
4341 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4342 int i;
4343
4344 if (TARGET_ARCH32)
4345 return;
4346
4347 /* Check if %g[2367] were used without
4348 .register being printed for them already. */
4349 for (i = 2; i < 8; i++)
4350 {
4351 if (df_regs_ever_live_p (i)
4352 && ! sparc_hard_reg_printed [i])
4353 {
4354 sparc_hard_reg_printed [i] = 1;
4355 /* %g7 is used as TLS base register, use #ignore
4356 for it instead of #scratch. */
4357 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4358 i == 7 ? "ignore" : "scratch");
4359 }
4360 if (i == 3) i = 5;
4361 }
4362 #endif
4363 }
4364
4365 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4366
4367 #if PROBE_INTERVAL > 4096
4368 #error Cannot use indexed addressing mode for stack probing
4369 #endif
4370
4371 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4372 inclusive. These are offsets from the current stack pointer.
4373
4374 Note that we don't use the REG+REG addressing mode for the probes because
4375 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4376 so the advantages of having a single code win here. */
4377
4378 static void
4379 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4380 {
4381 rtx g1 = gen_rtx_REG (Pmode, 1);
4382
4383 /* See if we have a constant small number of probes to generate. If so,
4384 that's the easy case. */
4385 if (size <= PROBE_INTERVAL)
4386 {
4387 emit_move_insn (g1, GEN_INT (first));
4388 emit_insn (gen_rtx_SET (VOIDmode, g1,
4389 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4390 emit_stack_probe (plus_constant (g1, -size));
4391 }
4392
4393 /* The run-time loop is made up of 10 insns in the generic case while the
4394 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4395 else if (size <= 5 * PROBE_INTERVAL)
4396 {
4397 HOST_WIDE_INT i;
4398
4399 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4400 emit_insn (gen_rtx_SET (VOIDmode, g1,
4401 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4402 emit_stack_probe (g1);
4403
4404 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4405 it exceeds SIZE. If only two probes are needed, this will not
4406 generate any code. Then probe at FIRST + SIZE. */
4407 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4408 {
4409 emit_insn (gen_rtx_SET (VOIDmode, g1,
4410 plus_constant (g1, -PROBE_INTERVAL)));
4411 emit_stack_probe (g1);
4412 }
4413
4414 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4415 }
4416
4417 /* Otherwise, do the same as above, but in a loop. Note that we must be
4418 extra careful with variables wrapping around because we might be at
4419 the very top (or the very bottom) of the address space and we have
4420 to be able to handle this case properly; in particular, we use an
4421 equality test for the loop condition. */
4422 else
4423 {
4424 HOST_WIDE_INT rounded_size;
4425 rtx g4 = gen_rtx_REG (Pmode, 4);
4426
4427 emit_move_insn (g1, GEN_INT (first));
4428
4429
4430 /* Step 1: round SIZE to the previous multiple of the interval. */
4431
4432 rounded_size = size & -PROBE_INTERVAL;
4433 emit_move_insn (g4, GEN_INT (rounded_size));
4434
4435
4436 /* Step 2: compute initial and final value of the loop counter. */
4437
4438 /* TEST_ADDR = SP + FIRST. */
4439 emit_insn (gen_rtx_SET (VOIDmode, g1,
4440 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4441
4442 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4443 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4444
4445
4446 /* Step 3: the loop
4447
4448 while (TEST_ADDR != LAST_ADDR)
4449 {
4450 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4451 probe at TEST_ADDR
4452 }
4453
4454 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4455 until it is equal to ROUNDED_SIZE. */
4456
4457 if (TARGET_64BIT)
4458 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4459 else
4460 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4461
4462
4463 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4464 that SIZE is equal to ROUNDED_SIZE. */
4465
4466 if (size != rounded_size)
4467 emit_stack_probe (plus_constant (g4, rounded_size - size));
4468 }
4469
4470 /* Make sure nothing is scheduled before we are done. */
4471 emit_insn (gen_blockage ());
4472 }
4473
4474 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4475 absolute addresses. */
4476
4477 const char *
4478 output_probe_stack_range (rtx reg1, rtx reg2)
4479 {
4480 static int labelno = 0;
4481 char loop_lab[32], end_lab[32];
4482 rtx xops[2];
4483
4484 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4485 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4486
4487 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4488
4489 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4490 xops[0] = reg1;
4491 xops[1] = reg2;
4492 output_asm_insn ("cmp\t%0, %1", xops);
4493 if (TARGET_ARCH64)
4494 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4495 else
4496 fputs ("\tbe\t", asm_out_file);
4497 assemble_name_raw (asm_out_file, end_lab);
4498 fputc ('\n', asm_out_file);
4499
4500 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4501 xops[1] = GEN_INT (-PROBE_INTERVAL);
4502 output_asm_insn (" add\t%0, %1, %0", xops);
4503
4504 /* Probe at TEST_ADDR and branch. */
4505 if (TARGET_ARCH64)
4506 fputs ("\tba,pt\t%xcc,", asm_out_file);
4507 else
4508 fputs ("\tba\t", asm_out_file);
4509 assemble_name_raw (asm_out_file, loop_lab);
4510 fputc ('\n', asm_out_file);
4511 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4512 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4513
4514 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4515
4516 return "";
4517 }
4518
4519 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4520 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4521 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4522 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4523 the action to be performed if it returns false. Return the new offset. */
4524
4525 typedef bool (*sorr_pred_t) (unsigned int, int);
4526 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4527
4528 static int
4529 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4530 int offset, int leaf_function, sorr_pred_t save_p,
4531 sorr_act_t action_true, sorr_act_t action_false)
4532 {
4533 unsigned int i;
4534 rtx mem, insn;
4535
4536 if (TARGET_ARCH64 && high <= 32)
4537 {
4538 int fp_offset = -1;
4539
4540 for (i = low; i < high; i++)
4541 {
4542 if (save_p (i, leaf_function))
4543 {
4544 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4545 if (action_true == SORR_SAVE)
4546 {
4547 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4548 RTX_FRAME_RELATED_P (insn) = 1;
4549 }
4550 else /* action_true == SORR_RESTORE */
4551 {
4552 /* The frame pointer must be restored last since its old
4553 value may be used as base address for the frame. This
4554 is problematic in 64-bit mode only because of the lack
4555 of double-word load instruction. */
4556 if (i == HARD_FRAME_POINTER_REGNUM)
4557 fp_offset = offset;
4558 else
4559 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4560 }
4561 offset += 8;
4562 }
4563 else if (action_false == SORR_ADVANCE)
4564 offset += 8;
4565 }
4566
4567 if (fp_offset >= 0)
4568 {
4569 mem = gen_frame_mem (DImode, plus_constant (base, fp_offset));
4570 emit_move_insn (hard_frame_pointer_rtx, mem);
4571 }
4572 }
4573 else
4574 {
4575 for (i = low; i < high; i += 2)
4576 {
4577 bool reg0 = save_p (i, leaf_function);
4578 bool reg1 = save_p (i + 1, leaf_function);
4579 enum machine_mode mode;
4580 int regno;
4581
4582 if (reg0 && reg1)
4583 {
4584 mode = i < 32 ? DImode : DFmode;
4585 regno = i;
4586 }
4587 else if (reg0)
4588 {
4589 mode = i < 32 ? SImode : SFmode;
4590 regno = i;
4591 }
4592 else if (reg1)
4593 {
4594 mode = i < 32 ? SImode : SFmode;
4595 regno = i + 1;
4596 offset += 4;
4597 }
4598 else
4599 {
4600 if (action_false == SORR_ADVANCE)
4601 offset += 8;
4602 continue;
4603 }
4604
4605 mem = gen_frame_mem (mode, plus_constant (base, offset));
4606 if (action_true == SORR_SAVE)
4607 {
4608 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4609 RTX_FRAME_RELATED_P (insn) = 1;
4610 if (mode == DImode)
4611 {
4612 rtx set1, set2;
4613 mem = gen_frame_mem (SImode, plus_constant (base, offset));
4614 set1 = gen_rtx_SET (VOIDmode, mem,
4615 gen_rtx_REG (SImode, regno));
4616 RTX_FRAME_RELATED_P (set1) = 1;
4617 mem
4618 = gen_frame_mem (SImode, plus_constant (base, offset + 4));
4619 set2 = gen_rtx_SET (VOIDmode, mem,
4620 gen_rtx_REG (SImode, regno + 1));
4621 RTX_FRAME_RELATED_P (set2) = 1;
4622 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4623 gen_rtx_PARALLEL (VOIDmode,
4624 gen_rtvec (2, set1, set2)));
4625 }
4626 }
4627 else /* action_true == SORR_RESTORE */
4628 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4629
4630 /* Always preserve double-word alignment. */
4631 offset = (offset + 8) & -8;
4632 }
4633 }
4634
4635 return offset;
4636 }
4637
4638 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4639
4640 static rtx
4641 emit_adjust_base_to_offset (rtx base, int offset)
4642 {
4643 /* ??? This might be optimized a little as %g1 might already have a
4644 value close enough that a single add insn will do. */
4645 /* ??? Although, all of this is probably only a temporary fix because
4646 if %g1 can hold a function result, then sparc_expand_epilogue will
4647 lose (the result will be clobbered). */
4648 rtx new_base = gen_rtx_REG (Pmode, 1);
4649 emit_move_insn (new_base, GEN_INT (offset));
4650 emit_insn (gen_rtx_SET (VOIDmode,
4651 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4652 return new_base;
4653 }
4654
4655 /* Emit code to save/restore call-saved global and FP registers. */
4656
4657 static void
4658 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4659 {
4660 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4661 {
4662 base = emit_adjust_base_to_offset (base, offset);
4663 offset = 0;
4664 }
4665
4666 offset
4667 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4668 save_global_or_fp_reg_p, action, SORR_NONE);
4669 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4670 save_global_or_fp_reg_p, action, SORR_NONE);
4671 }
4672
4673 /* Emit code to save/restore call-saved local and in registers. */
4674
4675 static void
4676 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4677 {
4678 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4679 {
4680 base = emit_adjust_base_to_offset (base, offset);
4681 offset = 0;
4682 }
4683
4684 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4685 save_local_or_in_reg_p, action, SORR_ADVANCE);
4686 }
4687
4688 /* Emit a window_save insn. */
4689
4690 static rtx
4691 emit_window_save (rtx increment)
4692 {
4693 rtx insn = emit_insn (gen_window_save (increment));
4694 RTX_FRAME_RELATED_P (insn) = 1;
4695
4696 /* The incoming return address (%o7) is saved in %i7. */
4697 add_reg_note (insn, REG_CFA_REGISTER,
4698 gen_rtx_SET (VOIDmode,
4699 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4700 gen_rtx_REG (Pmode,
4701 INCOMING_RETURN_ADDR_REGNUM)));
4702
4703 /* The window save event. */
4704 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4705
4706 /* The CFA is %fp, the hard frame pointer. */
4707 add_reg_note (insn, REG_CFA_DEF_CFA,
4708 plus_constant (hard_frame_pointer_rtx,
4709 INCOMING_FRAME_SP_OFFSET));
4710
4711 return insn;
4712 }
4713
4714 /* Generate an increment for the stack pointer. */
4715
4716 static rtx
4717 gen_stack_pointer_inc (rtx increment)
4718 {
4719 return gen_rtx_SET (VOIDmode,
4720 stack_pointer_rtx,
4721 gen_rtx_PLUS (Pmode,
4722 stack_pointer_rtx,
4723 increment));
4724 }
4725
4726 /* Generate a decrement for the stack pointer. */
4727
4728 static rtx
4729 gen_stack_pointer_dec (rtx decrement)
4730 {
4731 return gen_rtx_SET (VOIDmode,
4732 stack_pointer_rtx,
4733 gen_rtx_MINUS (Pmode,
4734 stack_pointer_rtx,
4735 decrement));
4736 }
4737
4738 /* Expand the function prologue. The prologue is responsible for reserving
4739 storage for the frame, saving the call-saved registers and loading the
4740 GOT register if needed. */
4741
4742 void
4743 sparc_expand_prologue (void)
4744 {
4745 HOST_WIDE_INT size;
4746 rtx insn;
4747
4748 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4749 on the final value of the flag means deferring the prologue/epilogue
4750 expansion until just before the second scheduling pass, which is too
4751 late to emit multiple epilogues or return insns.
4752
4753 Of course we are making the assumption that the value of the flag
4754 will not change between now and its final value. Of the three parts
4755 of the formula, only the last one can reasonably vary. Let's take a
4756 closer look, after assuming that the first two ones are set to true
4757 (otherwise the last value is effectively silenced).
4758
4759 If only_leaf_regs_used returns false, the global predicate will also
4760 be false so the actual frame size calculated below will be positive.
4761 As a consequence, the save_register_window insn will be emitted in
4762 the instruction stream; now this insn explicitly references %fp
4763 which is not a leaf register so only_leaf_regs_used will always
4764 return false subsequently.
4765
4766 If only_leaf_regs_used returns true, we hope that the subsequent
4767 optimization passes won't cause non-leaf registers to pop up. For
4768 example, the regrename pass has special provisions to not rename to
4769 non-leaf registers in a leaf function. */
4770 sparc_leaf_function_p
4771 = optimize > 0 && current_function_is_leaf && only_leaf_regs_used ();
4772
4773 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4774
4775 if (flag_stack_usage_info)
4776 current_function_static_stack_size = size;
4777
4778 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4779 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4780
4781 if (size == 0)
4782 ; /* do nothing. */
4783 else if (sparc_leaf_function_p)
4784 {
4785 rtx size_int_rtx = GEN_INT (-size);
4786
4787 if (size <= 4096)
4788 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4789 else if (size <= 8192)
4790 {
4791 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4792 /* %sp is still the CFA register. */
4793 RTX_FRAME_RELATED_P (insn) = 1;
4794 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4795 }
4796 else
4797 {
4798 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4799 emit_move_insn (size_rtx, size_int_rtx);
4800 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4801 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4802 gen_stack_pointer_inc (size_int_rtx));
4803 }
4804
4805 RTX_FRAME_RELATED_P (insn) = 1;
4806 }
4807 else
4808 {
4809 rtx size_int_rtx = GEN_INT (-size);
4810
4811 if (size <= 4096)
4812 emit_window_save (size_int_rtx);
4813 else if (size <= 8192)
4814 {
4815 emit_window_save (GEN_INT (-4096));
4816 /* %sp is not the CFA register anymore. */
4817 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4818 }
4819 else
4820 {
4821 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4822 emit_move_insn (size_rtx, size_int_rtx);
4823 emit_window_save (size_rtx);
4824 }
4825 }
4826
4827 if (sparc_leaf_function_p)
4828 {
4829 sparc_frame_base_reg = stack_pointer_rtx;
4830 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4831 }
4832 else
4833 {
4834 sparc_frame_base_reg = hard_frame_pointer_rtx;
4835 sparc_frame_base_offset = SPARC_STACK_BIAS;
4836 }
4837
4838 if (sparc_n_global_fp_regs > 0)
4839 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4840 sparc_frame_base_offset
4841 - sparc_apparent_frame_size,
4842 SORR_SAVE);
4843
4844 /* Load the GOT register if needed. */
4845 if (crtl->uses_pic_offset_table)
4846 load_got_register ();
4847
4848 /* Advertise that the data calculated just above are now valid. */
4849 sparc_prologue_data_valid_p = true;
4850 }
4851
4852 /* Expand the function prologue. The prologue is responsible for reserving
4853 storage for the frame, saving the call-saved registers and loading the
4854 GOT register if needed. */
4855
4856 void
4857 sparc_flat_expand_prologue (void)
4858 {
4859 HOST_WIDE_INT size;
4860 rtx insn;
4861
4862 sparc_leaf_function_p = optimize > 0 && current_function_is_leaf;
4863
4864 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4865
4866 if (flag_stack_usage_info)
4867 current_function_static_stack_size = size;
4868
4869 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4870 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4871
4872 if (sparc_save_local_in_regs_p)
4873 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
4874 SORR_SAVE);
4875
4876 if (size == 0)
4877 ; /* do nothing. */
4878 else
4879 {
4880 rtx size_int_rtx, size_rtx;
4881
4882 size_rtx = size_int_rtx = GEN_INT (-size);
4883
4884 /* We establish the frame (i.e. decrement the stack pointer) first, even
4885 if we use a frame pointer, because we cannot clobber any call-saved
4886 registers, including the frame pointer, if we haven't created a new
4887 register save area, for the sake of compatibility with the ABI. */
4888 if (size <= 4096)
4889 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4890 else if (size <= 8192 && !frame_pointer_needed)
4891 {
4892 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4893 RTX_FRAME_RELATED_P (insn) = 1;
4894 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4895 }
4896 else
4897 {
4898 size_rtx = gen_rtx_REG (Pmode, 1);
4899 emit_move_insn (size_rtx, size_int_rtx);
4900 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4901 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4902 gen_stack_pointer_inc (size_int_rtx));
4903 }
4904 RTX_FRAME_RELATED_P (insn) = 1;
4905
4906 /* Ensure nothing is scheduled until after the frame is established. */
4907 emit_insn (gen_blockage ());
4908
4909 if (frame_pointer_needed)
4910 {
4911 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
4912 gen_rtx_MINUS (Pmode,
4913 stack_pointer_rtx,
4914 size_rtx)));
4915 RTX_FRAME_RELATED_P (insn) = 1;
4916
4917 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4918 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
4919 plus_constant (stack_pointer_rtx,
4920 size)));
4921 }
4922
4923 if (return_addr_reg_needed_p (sparc_leaf_function_p))
4924 {
4925 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
4926 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
4927
4928 insn = emit_move_insn (i7, o7);
4929 RTX_FRAME_RELATED_P (insn) = 1;
4930
4931 add_reg_note (insn, REG_CFA_REGISTER,
4932 gen_rtx_SET (VOIDmode, i7, o7));
4933
4934 /* Prevent this instruction from ever being considered dead,
4935 even if this function has no epilogue. */
4936 emit_insn (gen_rtx_USE (VOIDmode, i7));
4937 }
4938 }
4939
4940 if (frame_pointer_needed)
4941 {
4942 sparc_frame_base_reg = hard_frame_pointer_rtx;
4943 sparc_frame_base_offset = SPARC_STACK_BIAS;
4944 }
4945 else
4946 {
4947 sparc_frame_base_reg = stack_pointer_rtx;
4948 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4949 }
4950
4951 if (sparc_n_global_fp_regs > 0)
4952 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4953 sparc_frame_base_offset
4954 - sparc_apparent_frame_size,
4955 SORR_SAVE);
4956
4957 /* Load the GOT register if needed. */
4958 if (crtl->uses_pic_offset_table)
4959 load_got_register ();
4960
4961 /* Advertise that the data calculated just above are now valid. */
4962 sparc_prologue_data_valid_p = true;
4963 }
4964
4965 /* This function generates the assembly code for function entry, which boils
4966 down to emitting the necessary .register directives. */
4967
4968 static void
4969 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4970 {
4971 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4972 if (!TARGET_FLAT)
4973 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4974
4975 sparc_output_scratch_registers (file);
4976 }
4977
4978 /* Expand the function epilogue, either normal or part of a sibcall.
4979 We emit all the instructions except the return or the call. */
4980
4981 void
4982 sparc_expand_epilogue (bool for_eh)
4983 {
4984 HOST_WIDE_INT size = sparc_frame_size;
4985
4986 if (sparc_n_global_fp_regs > 0)
4987 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4988 sparc_frame_base_offset
4989 - sparc_apparent_frame_size,
4990 SORR_RESTORE);
4991
4992 if (size == 0 || for_eh)
4993 ; /* do nothing. */
4994 else if (sparc_leaf_function_p)
4995 {
4996 if (size <= 4096)
4997 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
4998 else if (size <= 8192)
4999 {
5000 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5001 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5002 }
5003 else
5004 {
5005 rtx reg = gen_rtx_REG (Pmode, 1);
5006 emit_move_insn (reg, GEN_INT (-size));
5007 emit_insn (gen_stack_pointer_dec (reg));
5008 }
5009 }
5010 }
5011
5012 /* Expand the function epilogue, either normal or part of a sibcall.
5013 We emit all the instructions except the return or the call. */
5014
5015 void
5016 sparc_flat_expand_epilogue (bool for_eh)
5017 {
5018 HOST_WIDE_INT size = sparc_frame_size;
5019
5020 if (sparc_n_global_fp_regs > 0)
5021 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5022 sparc_frame_base_offset
5023 - sparc_apparent_frame_size,
5024 SORR_RESTORE);
5025
5026 /* If we have a frame pointer, we'll need both to restore it before the
5027 frame is destroyed and use its current value in destroying the frame.
5028 Since we don't have an atomic way to do that in the flat window model,
5029 we save the current value into a temporary register (%g1). */
5030 if (frame_pointer_needed && !for_eh)
5031 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5032
5033 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5034 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5035 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5036
5037 if (sparc_save_local_in_regs_p)
5038 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5039 sparc_frame_base_offset,
5040 SORR_RESTORE);
5041
5042 if (size == 0 || for_eh)
5043 ; /* do nothing. */
5044 else if (frame_pointer_needed)
5045 {
5046 /* Make sure the frame is destroyed after everything else is done. */
5047 emit_insn (gen_blockage ());
5048
5049 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5050 }
5051 else
5052 {
5053 /* Likewise. */
5054 emit_insn (gen_blockage ());
5055
5056 if (size <= 4096)
5057 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5058 else if (size <= 8192)
5059 {
5060 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5061 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5062 }
5063 else
5064 {
5065 rtx reg = gen_rtx_REG (Pmode, 1);
5066 emit_move_insn (reg, GEN_INT (-size));
5067 emit_insn (gen_stack_pointer_dec (reg));
5068 }
5069 }
5070 }
5071
5072 /* Return true if it is appropriate to emit `return' instructions in the
5073 body of a function. */
5074
5075 bool
5076 sparc_can_use_return_insn_p (void)
5077 {
5078 return sparc_prologue_data_valid_p
5079 && sparc_n_global_fp_regs == 0
5080 && TARGET_FLAT
5081 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
5082 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
5083 }
5084
5085 /* This function generates the assembly code for function exit. */
5086
5087 static void
5088 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5089 {
5090 /* If the last two instructions of a function are "call foo; dslot;"
5091 the return address might point to the first instruction in the next
5092 function and we have to output a dummy nop for the sake of sane
5093 backtraces in such cases. This is pointless for sibling calls since
5094 the return address is explicitly adjusted. */
5095
5096 rtx insn, last_real_insn;
5097
5098 insn = get_last_insn ();
5099
5100 last_real_insn = prev_real_insn (insn);
5101 if (last_real_insn
5102 && GET_CODE (last_real_insn) == INSN
5103 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5104 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5105
5106 if (last_real_insn
5107 && CALL_P (last_real_insn)
5108 && !SIBLING_CALL_P (last_real_insn))
5109 fputs("\tnop\n", file);
5110
5111 sparc_output_deferred_case_vectors ();
5112 }
5113
5114 /* Output a 'restore' instruction. */
5115
5116 static void
5117 output_restore (rtx pat)
5118 {
5119 rtx operands[3];
5120
5121 if (! pat)
5122 {
5123 fputs ("\t restore\n", asm_out_file);
5124 return;
5125 }
5126
5127 gcc_assert (GET_CODE (pat) == SET);
5128
5129 operands[0] = SET_DEST (pat);
5130 pat = SET_SRC (pat);
5131
5132 switch (GET_CODE (pat))
5133 {
5134 case PLUS:
5135 operands[1] = XEXP (pat, 0);
5136 operands[2] = XEXP (pat, 1);
5137 output_asm_insn (" restore %r1, %2, %Y0", operands);
5138 break;
5139 case LO_SUM:
5140 operands[1] = XEXP (pat, 0);
5141 operands[2] = XEXP (pat, 1);
5142 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5143 break;
5144 case ASHIFT:
5145 operands[1] = XEXP (pat, 0);
5146 gcc_assert (XEXP (pat, 1) == const1_rtx);
5147 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5148 break;
5149 default:
5150 operands[1] = pat;
5151 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5152 break;
5153 }
5154 }
5155
5156 /* Output a return. */
5157
5158 const char *
5159 output_return (rtx insn)
5160 {
5161 if (crtl->calls_eh_return)
5162 {
5163 /* If the function uses __builtin_eh_return, the eh_return
5164 machinery occupies the delay slot. */
5165 gcc_assert (!final_sequence);
5166
5167 if (flag_delayed_branch)
5168 {
5169 if (!TARGET_FLAT && TARGET_V9)
5170 fputs ("\treturn\t%i7+8\n", asm_out_file);
5171 else
5172 {
5173 if (!TARGET_FLAT)
5174 fputs ("\trestore\n", asm_out_file);
5175
5176 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5177 }
5178
5179 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5180 }
5181 else
5182 {
5183 if (!TARGET_FLAT)
5184 fputs ("\trestore\n", asm_out_file);
5185
5186 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5187 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5188 }
5189 }
5190 else if (sparc_leaf_function_p || TARGET_FLAT)
5191 {
5192 /* This is a leaf or flat function so we don't have to bother restoring
5193 the register window, which frees us from dealing with the convoluted
5194 semantics of restore/return. We simply output the jump to the
5195 return address and the insn in the delay slot (if any). */
5196
5197 return "jmp\t%%o7+%)%#";
5198 }
5199 else
5200 {
5201 /* This is a regular function so we have to restore the register window.
5202 We may have a pending insn for the delay slot, which will be either
5203 combined with the 'restore' instruction or put in the delay slot of
5204 the 'return' instruction. */
5205
5206 if (final_sequence)
5207 {
5208 rtx delay, pat;
5209
5210 delay = NEXT_INSN (insn);
5211 gcc_assert (delay);
5212
5213 pat = PATTERN (delay);
5214
5215 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5216 {
5217 epilogue_renumber (&pat, 0);
5218 return "return\t%%i7+%)%#";
5219 }
5220 else
5221 {
5222 output_asm_insn ("jmp\t%%i7+%)", NULL);
5223 output_restore (pat);
5224 PATTERN (delay) = gen_blockage ();
5225 INSN_CODE (delay) = -1;
5226 }
5227 }
5228 else
5229 {
5230 /* The delay slot is empty. */
5231 if (TARGET_V9)
5232 return "return\t%%i7+%)\n\t nop";
5233 else if (flag_delayed_branch)
5234 return "jmp\t%%i7+%)\n\t restore";
5235 else
5236 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5237 }
5238 }
5239
5240 return "";
5241 }
5242
5243 /* Output a sibling call. */
5244
5245 const char *
5246 output_sibcall (rtx insn, rtx call_operand)
5247 {
5248 rtx operands[1];
5249
5250 gcc_assert (flag_delayed_branch);
5251
5252 operands[0] = call_operand;
5253
5254 if (sparc_leaf_function_p || TARGET_FLAT)
5255 {
5256 /* This is a leaf or flat function so we don't have to bother restoring
5257 the register window. We simply output the jump to the function and
5258 the insn in the delay slot (if any). */
5259
5260 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5261
5262 if (final_sequence)
5263 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5264 operands);
5265 else
5266 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5267 it into branch if possible. */
5268 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5269 operands);
5270 }
5271 else
5272 {
5273 /* This is a regular function so we have to restore the register window.
5274 We may have a pending insn for the delay slot, which will be combined
5275 with the 'restore' instruction. */
5276
5277 output_asm_insn ("call\t%a0, 0", operands);
5278
5279 if (final_sequence)
5280 {
5281 rtx delay = NEXT_INSN (insn);
5282 gcc_assert (delay);
5283
5284 output_restore (PATTERN (delay));
5285
5286 PATTERN (delay) = gen_blockage ();
5287 INSN_CODE (delay) = -1;
5288 }
5289 else
5290 output_restore (NULL_RTX);
5291 }
5292
5293 return "";
5294 }
5295 \f
5296 /* Functions for handling argument passing.
5297
5298 For 32-bit, the first 6 args are normally in registers and the rest are
5299 pushed. Any arg that starts within the first 6 words is at least
5300 partially passed in a register unless its data type forbids.
5301
5302 For 64-bit, the argument registers are laid out as an array of 16 elements
5303 and arguments are added sequentially. The first 6 int args and up to the
5304 first 16 fp args (depending on size) are passed in regs.
5305
5306 Slot Stack Integral Float Float in structure Double Long Double
5307 ---- ----- -------- ----- ------------------ ------ -----------
5308 15 [SP+248] %f31 %f30,%f31 %d30
5309 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5310 13 [SP+232] %f27 %f26,%f27 %d26
5311 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5312 11 [SP+216] %f23 %f22,%f23 %d22
5313 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5314 9 [SP+200] %f19 %f18,%f19 %d18
5315 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5316 7 [SP+184] %f15 %f14,%f15 %d14
5317 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5318 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5319 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5320 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5321 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5322 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5323 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5324
5325 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5326
5327 Integral arguments are always passed as 64-bit quantities appropriately
5328 extended.
5329
5330 Passing of floating point values is handled as follows.
5331 If a prototype is in scope:
5332 If the value is in a named argument (i.e. not a stdarg function or a
5333 value not part of the `...') then the value is passed in the appropriate
5334 fp reg.
5335 If the value is part of the `...' and is passed in one of the first 6
5336 slots then the value is passed in the appropriate int reg.
5337 If the value is part of the `...' and is not passed in one of the first 6
5338 slots then the value is passed in memory.
5339 If a prototype is not in scope:
5340 If the value is one of the first 6 arguments the value is passed in the
5341 appropriate integer reg and the appropriate fp reg.
5342 If the value is not one of the first 6 arguments the value is passed in
5343 the appropriate fp reg and in memory.
5344
5345
5346 Summary of the calling conventions implemented by GCC on the SPARC:
5347
5348 32-bit ABI:
5349 size argument return value
5350
5351 small integer <4 int. reg. int. reg.
5352 word 4 int. reg. int. reg.
5353 double word 8 int. reg. int. reg.
5354
5355 _Complex small integer <8 int. reg. int. reg.
5356 _Complex word 8 int. reg. int. reg.
5357 _Complex double word 16 memory int. reg.
5358
5359 vector integer <=8 int. reg. FP reg.
5360 vector integer >8 memory memory
5361
5362 float 4 int. reg. FP reg.
5363 double 8 int. reg. FP reg.
5364 long double 16 memory memory
5365
5366 _Complex float 8 memory FP reg.
5367 _Complex double 16 memory FP reg.
5368 _Complex long double 32 memory FP reg.
5369
5370 vector float any memory memory
5371
5372 aggregate any memory memory
5373
5374
5375
5376 64-bit ABI:
5377 size argument return value
5378
5379 small integer <8 int. reg. int. reg.
5380 word 8 int. reg. int. reg.
5381 double word 16 int. reg. int. reg.
5382
5383 _Complex small integer <16 int. reg. int. reg.
5384 _Complex word 16 int. reg. int. reg.
5385 _Complex double word 32 memory int. reg.
5386
5387 vector integer <=16 FP reg. FP reg.
5388 vector integer 16<s<=32 memory FP reg.
5389 vector integer >32 memory memory
5390
5391 float 4 FP reg. FP reg.
5392 double 8 FP reg. FP reg.
5393 long double 16 FP reg. FP reg.
5394
5395 _Complex float 8 FP reg. FP reg.
5396 _Complex double 16 FP reg. FP reg.
5397 _Complex long double 32 memory FP reg.
5398
5399 vector float <=16 FP reg. FP reg.
5400 vector float 16<s<=32 memory FP reg.
5401 vector float >32 memory memory
5402
5403 aggregate <=16 reg. reg.
5404 aggregate 16<s<=32 memory reg.
5405 aggregate >32 memory memory
5406
5407
5408
5409 Note #1: complex floating-point types follow the extended SPARC ABIs as
5410 implemented by the Sun compiler.
5411
5412 Note #2: integral vector types follow the scalar floating-point types
5413 conventions to match what is implemented by the Sun VIS SDK.
5414
5415 Note #3: floating-point vector types follow the aggregate types
5416 conventions. */
5417
5418
5419 /* Maximum number of int regs for args. */
5420 #define SPARC_INT_ARG_MAX 6
5421 /* Maximum number of fp regs for args. */
5422 #define SPARC_FP_ARG_MAX 16
5423
5424 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5425
5426 /* Handle the INIT_CUMULATIVE_ARGS macro.
5427 Initialize a variable CUM of type CUMULATIVE_ARGS
5428 for a call to a function whose data type is FNTYPE.
5429 For a library call, FNTYPE is 0. */
5430
5431 void
5432 init_cumulative_args (struct sparc_args *cum, tree fntype,
5433 rtx libname ATTRIBUTE_UNUSED,
5434 tree fndecl ATTRIBUTE_UNUSED)
5435 {
5436 cum->words = 0;
5437 cum->prototype_p = fntype && prototype_p (fntype);
5438 cum->libcall_p = fntype == 0;
5439 }
5440
5441 /* Handle promotion of pointer and integer arguments. */
5442
5443 static enum machine_mode
5444 sparc_promote_function_mode (const_tree type,
5445 enum machine_mode mode,
5446 int *punsignedp,
5447 const_tree fntype ATTRIBUTE_UNUSED,
5448 int for_return ATTRIBUTE_UNUSED)
5449 {
5450 if (type != NULL_TREE && POINTER_TYPE_P (type))
5451 {
5452 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5453 return Pmode;
5454 }
5455
5456 /* Integral arguments are passed as full words, as per the ABI. */
5457 if (GET_MODE_CLASS (mode) == MODE_INT
5458 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5459 return word_mode;
5460
5461 return mode;
5462 }
5463
5464 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5465
5466 static bool
5467 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5468 {
5469 return TARGET_ARCH64 ? true : false;
5470 }
5471
5472 /* Scan the record type TYPE and return the following predicates:
5473 - INTREGS_P: the record contains at least one field or sub-field
5474 that is eligible for promotion in integer registers.
5475 - FP_REGS_P: the record contains at least one field or sub-field
5476 that is eligible for promotion in floating-point registers.
5477 - PACKED_P: the record contains at least one field that is packed.
5478
5479 Sub-fields are not taken into account for the PACKED_P predicate. */
5480
5481 static void
5482 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5483 int *packed_p)
5484 {
5485 tree field;
5486
5487 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5488 {
5489 if (TREE_CODE (field) == FIELD_DECL)
5490 {
5491 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5492 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5493 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5494 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5495 && TARGET_FPU)
5496 *fpregs_p = 1;
5497 else
5498 *intregs_p = 1;
5499
5500 if (packed_p && DECL_PACKED (field))
5501 *packed_p = 1;
5502 }
5503 }
5504 }
5505
5506 /* Compute the slot number to pass an argument in.
5507 Return the slot number or -1 if passing on the stack.
5508
5509 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5510 the preceding args and about the function being called.
5511 MODE is the argument's machine mode.
5512 TYPE is the data type of the argument (as a tree).
5513 This is null for libcalls where that information may
5514 not be available.
5515 NAMED is nonzero if this argument is a named parameter
5516 (otherwise it is an extra parameter matching an ellipsis).
5517 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5518 *PREGNO records the register number to use if scalar type.
5519 *PPADDING records the amount of padding needed in words. */
5520
5521 static int
5522 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5523 const_tree type, bool named, bool incoming_p,
5524 int *pregno, int *ppadding)
5525 {
5526 int regbase = (incoming_p
5527 ? SPARC_INCOMING_INT_ARG_FIRST
5528 : SPARC_OUTGOING_INT_ARG_FIRST);
5529 int slotno = cum->words;
5530 enum mode_class mclass;
5531 int regno;
5532
5533 *ppadding = 0;
5534
5535 if (type && TREE_ADDRESSABLE (type))
5536 return -1;
5537
5538 if (TARGET_ARCH32
5539 && mode == BLKmode
5540 && type
5541 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5542 return -1;
5543
5544 /* For SPARC64, objects requiring 16-byte alignment get it. */
5545 if (TARGET_ARCH64
5546 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5547 && (slotno & 1) != 0)
5548 slotno++, *ppadding = 1;
5549
5550 mclass = GET_MODE_CLASS (mode);
5551 if (type && TREE_CODE (type) == VECTOR_TYPE)
5552 {
5553 /* Vector types deserve special treatment because they are
5554 polymorphic wrt their mode, depending upon whether VIS
5555 instructions are enabled. */
5556 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5557 {
5558 /* The SPARC port defines no floating-point vector modes. */
5559 gcc_assert (mode == BLKmode);
5560 }
5561 else
5562 {
5563 /* Integral vector types should either have a vector
5564 mode or an integral mode, because we are guaranteed
5565 by pass_by_reference that their size is not greater
5566 than 16 bytes and TImode is 16-byte wide. */
5567 gcc_assert (mode != BLKmode);
5568
5569 /* Vector integers are handled like floats according to
5570 the Sun VIS SDK. */
5571 mclass = MODE_FLOAT;
5572 }
5573 }
5574
5575 switch (mclass)
5576 {
5577 case MODE_FLOAT:
5578 case MODE_COMPLEX_FLOAT:
5579 case MODE_VECTOR_INT:
5580 if (TARGET_ARCH64 && TARGET_FPU && named)
5581 {
5582 if (slotno >= SPARC_FP_ARG_MAX)
5583 return -1;
5584 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5585 /* Arguments filling only one single FP register are
5586 right-justified in the outer double FP register. */
5587 if (GET_MODE_SIZE (mode) <= 4)
5588 regno++;
5589 break;
5590 }
5591 /* fallthrough */
5592
5593 case MODE_INT:
5594 case MODE_COMPLEX_INT:
5595 if (slotno >= SPARC_INT_ARG_MAX)
5596 return -1;
5597 regno = regbase + slotno;
5598 break;
5599
5600 case MODE_RANDOM:
5601 if (mode == VOIDmode)
5602 /* MODE is VOIDmode when generating the actual call. */
5603 return -1;
5604
5605 gcc_assert (mode == BLKmode);
5606
5607 if (TARGET_ARCH32
5608 || !type
5609 || (TREE_CODE (type) != VECTOR_TYPE
5610 && TREE_CODE (type) != RECORD_TYPE))
5611 {
5612 if (slotno >= SPARC_INT_ARG_MAX)
5613 return -1;
5614 regno = regbase + slotno;
5615 }
5616 else /* TARGET_ARCH64 && type */
5617 {
5618 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5619
5620 /* First see what kinds of registers we would need. */
5621 if (TREE_CODE (type) == VECTOR_TYPE)
5622 fpregs_p = 1;
5623 else
5624 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5625
5626 /* The ABI obviously doesn't specify how packed structures
5627 are passed. These are defined to be passed in int regs
5628 if possible, otherwise memory. */
5629 if (packed_p || !named)
5630 fpregs_p = 0, intregs_p = 1;
5631
5632 /* If all arg slots are filled, then must pass on stack. */
5633 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5634 return -1;
5635
5636 /* If there are only int args and all int arg slots are filled,
5637 then must pass on stack. */
5638 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5639 return -1;
5640
5641 /* Note that even if all int arg slots are filled, fp members may
5642 still be passed in regs if such regs are available.
5643 *PREGNO isn't set because there may be more than one, it's up
5644 to the caller to compute them. */
5645 return slotno;
5646 }
5647 break;
5648
5649 default :
5650 gcc_unreachable ();
5651 }
5652
5653 *pregno = regno;
5654 return slotno;
5655 }
5656
5657 /* Handle recursive register counting for structure field layout. */
5658
5659 struct function_arg_record_value_parms
5660 {
5661 rtx ret; /* return expression being built. */
5662 int slotno; /* slot number of the argument. */
5663 int named; /* whether the argument is named. */
5664 int regbase; /* regno of the base register. */
5665 int stack; /* 1 if part of the argument is on the stack. */
5666 int intoffset; /* offset of the first pending integer field. */
5667 unsigned int nregs; /* number of words passed in registers. */
5668 };
5669
5670 static void function_arg_record_value_3
5671 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5672 static void function_arg_record_value_2
5673 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5674 static void function_arg_record_value_1
5675 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5676 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5677 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5678
5679 /* A subroutine of function_arg_record_value. Traverse the structure
5680 recursively and determine how many registers will be required. */
5681
5682 static void
5683 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5684 struct function_arg_record_value_parms *parms,
5685 bool packed_p)
5686 {
5687 tree field;
5688
5689 /* We need to compute how many registers are needed so we can
5690 allocate the PARALLEL but before we can do that we need to know
5691 whether there are any packed fields. The ABI obviously doesn't
5692 specify how structures are passed in this case, so they are
5693 defined to be passed in int regs if possible, otherwise memory,
5694 regardless of whether there are fp values present. */
5695
5696 if (! packed_p)
5697 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5698 {
5699 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5700 {
5701 packed_p = true;
5702 break;
5703 }
5704 }
5705
5706 /* Compute how many registers we need. */
5707 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5708 {
5709 if (TREE_CODE (field) == FIELD_DECL)
5710 {
5711 HOST_WIDE_INT bitpos = startbitpos;
5712
5713 if (DECL_SIZE (field) != 0)
5714 {
5715 if (integer_zerop (DECL_SIZE (field)))
5716 continue;
5717
5718 if (host_integerp (bit_position (field), 1))
5719 bitpos += int_bit_position (field);
5720 }
5721
5722 /* ??? FIXME: else assume zero offset. */
5723
5724 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5725 function_arg_record_value_1 (TREE_TYPE (field),
5726 bitpos,
5727 parms,
5728 packed_p);
5729 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5730 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5731 && TARGET_FPU
5732 && parms->named
5733 && ! packed_p)
5734 {
5735 if (parms->intoffset != -1)
5736 {
5737 unsigned int startbit, endbit;
5738 int intslots, this_slotno;
5739
5740 startbit = parms->intoffset & -BITS_PER_WORD;
5741 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5742
5743 intslots = (endbit - startbit) / BITS_PER_WORD;
5744 this_slotno = parms->slotno + parms->intoffset
5745 / BITS_PER_WORD;
5746
5747 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5748 {
5749 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5750 /* We need to pass this field on the stack. */
5751 parms->stack = 1;
5752 }
5753
5754 parms->nregs += intslots;
5755 parms->intoffset = -1;
5756 }
5757
5758 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5759 If it wasn't true we wouldn't be here. */
5760 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5761 && DECL_MODE (field) == BLKmode)
5762 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5763 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5764 parms->nregs += 2;
5765 else
5766 parms->nregs += 1;
5767 }
5768 else
5769 {
5770 if (parms->intoffset == -1)
5771 parms->intoffset = bitpos;
5772 }
5773 }
5774 }
5775 }
5776
5777 /* A subroutine of function_arg_record_value. Assign the bits of the
5778 structure between parms->intoffset and bitpos to integer registers. */
5779
5780 static void
5781 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5782 struct function_arg_record_value_parms *parms)
5783 {
5784 enum machine_mode mode;
5785 unsigned int regno;
5786 unsigned int startbit, endbit;
5787 int this_slotno, intslots, intoffset;
5788 rtx reg;
5789
5790 if (parms->intoffset == -1)
5791 return;
5792
5793 intoffset = parms->intoffset;
5794 parms->intoffset = -1;
5795
5796 startbit = intoffset & -BITS_PER_WORD;
5797 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5798 intslots = (endbit - startbit) / BITS_PER_WORD;
5799 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5800
5801 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5802 if (intslots <= 0)
5803 return;
5804
5805 /* If this is the trailing part of a word, only load that much into
5806 the register. Otherwise load the whole register. Note that in
5807 the latter case we may pick up unwanted bits. It's not a problem
5808 at the moment but may wish to revisit. */
5809
5810 if (intoffset % BITS_PER_WORD != 0)
5811 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5812 MODE_INT);
5813 else
5814 mode = word_mode;
5815
5816 intoffset /= BITS_PER_UNIT;
5817 do
5818 {
5819 regno = parms->regbase + this_slotno;
5820 reg = gen_rtx_REG (mode, regno);
5821 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5822 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5823
5824 this_slotno += 1;
5825 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5826 mode = word_mode;
5827 parms->nregs += 1;
5828 intslots -= 1;
5829 }
5830 while (intslots > 0);
5831 }
5832
5833 /* A subroutine of function_arg_record_value. Traverse the structure
5834 recursively and assign bits to floating point registers. Track which
5835 bits in between need integer registers; invoke function_arg_record_value_3
5836 to make that happen. */
5837
5838 static void
5839 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5840 struct function_arg_record_value_parms *parms,
5841 bool packed_p)
5842 {
5843 tree field;
5844
5845 if (! packed_p)
5846 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5847 {
5848 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5849 {
5850 packed_p = true;
5851 break;
5852 }
5853 }
5854
5855 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5856 {
5857 if (TREE_CODE (field) == FIELD_DECL)
5858 {
5859 HOST_WIDE_INT bitpos = startbitpos;
5860
5861 if (DECL_SIZE (field) != 0)
5862 {
5863 if (integer_zerop (DECL_SIZE (field)))
5864 continue;
5865
5866 if (host_integerp (bit_position (field), 1))
5867 bitpos += int_bit_position (field);
5868 }
5869
5870 /* ??? FIXME: else assume zero offset. */
5871
5872 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5873 function_arg_record_value_2 (TREE_TYPE (field),
5874 bitpos,
5875 parms,
5876 packed_p);
5877 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5878 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5879 && TARGET_FPU
5880 && parms->named
5881 && ! packed_p)
5882 {
5883 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5884 int regno, nregs, pos;
5885 enum machine_mode mode = DECL_MODE (field);
5886 rtx reg;
5887
5888 function_arg_record_value_3 (bitpos, parms);
5889
5890 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5891 && mode == BLKmode)
5892 {
5893 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5894 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5895 }
5896 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5897 {
5898 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5899 nregs = 2;
5900 }
5901 else
5902 nregs = 1;
5903
5904 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5905 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5906 regno++;
5907 reg = gen_rtx_REG (mode, regno);
5908 pos = bitpos / BITS_PER_UNIT;
5909 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5910 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5911 parms->nregs += 1;
5912 while (--nregs > 0)
5913 {
5914 regno += GET_MODE_SIZE (mode) / 4;
5915 reg = gen_rtx_REG (mode, regno);
5916 pos += GET_MODE_SIZE (mode);
5917 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5918 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5919 parms->nregs += 1;
5920 }
5921 }
5922 else
5923 {
5924 if (parms->intoffset == -1)
5925 parms->intoffset = bitpos;
5926 }
5927 }
5928 }
5929 }
5930
5931 /* Used by function_arg and sparc_function_value_1 to implement the complex
5932 conventions of the 64-bit ABI for passing and returning structures.
5933 Return an expression valid as a return value for the FUNCTION_ARG
5934 and TARGET_FUNCTION_VALUE.
5935
5936 TYPE is the data type of the argument (as a tree).
5937 This is null for libcalls where that information may
5938 not be available.
5939 MODE is the argument's machine mode.
5940 SLOTNO is the index number of the argument's slot in the parameter array.
5941 NAMED is nonzero if this argument is a named parameter
5942 (otherwise it is an extra parameter matching an ellipsis).
5943 REGBASE is the regno of the base register for the parameter array. */
5944
5945 static rtx
5946 function_arg_record_value (const_tree type, enum machine_mode mode,
5947 int slotno, int named, int regbase)
5948 {
5949 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5950 struct function_arg_record_value_parms parms;
5951 unsigned int nregs;
5952
5953 parms.ret = NULL_RTX;
5954 parms.slotno = slotno;
5955 parms.named = named;
5956 parms.regbase = regbase;
5957 parms.stack = 0;
5958
5959 /* Compute how many registers we need. */
5960 parms.nregs = 0;
5961 parms.intoffset = 0;
5962 function_arg_record_value_1 (type, 0, &parms, false);
5963
5964 /* Take into account pending integer fields. */
5965 if (parms.intoffset != -1)
5966 {
5967 unsigned int startbit, endbit;
5968 int intslots, this_slotno;
5969
5970 startbit = parms.intoffset & -BITS_PER_WORD;
5971 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5972 intslots = (endbit - startbit) / BITS_PER_WORD;
5973 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5974
5975 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5976 {
5977 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5978 /* We need to pass this field on the stack. */
5979 parms.stack = 1;
5980 }
5981
5982 parms.nregs += intslots;
5983 }
5984 nregs = parms.nregs;
5985
5986 /* Allocate the vector and handle some annoying special cases. */
5987 if (nregs == 0)
5988 {
5989 /* ??? Empty structure has no value? Duh? */
5990 if (typesize <= 0)
5991 {
5992 /* Though there's nothing really to store, return a word register
5993 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5994 leads to breakage due to the fact that there are zero bytes to
5995 load. */
5996 return gen_rtx_REG (mode, regbase);
5997 }
5998 else
5999 {
6000 /* ??? C++ has structures with no fields, and yet a size. Give up
6001 for now and pass everything back in integer registers. */
6002 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6003 }
6004 if (nregs + slotno > SPARC_INT_ARG_MAX)
6005 nregs = SPARC_INT_ARG_MAX - slotno;
6006 }
6007 gcc_assert (nregs != 0);
6008
6009 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
6010
6011 /* If at least one field must be passed on the stack, generate
6012 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6013 also be passed on the stack. We can't do much better because the
6014 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6015 of structures for which the fields passed exclusively in registers
6016 are not at the beginning of the structure. */
6017 if (parms.stack)
6018 XVECEXP (parms.ret, 0, 0)
6019 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6020
6021 /* Fill in the entries. */
6022 parms.nregs = 0;
6023 parms.intoffset = 0;
6024 function_arg_record_value_2 (type, 0, &parms, false);
6025 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
6026
6027 gcc_assert (parms.nregs == nregs);
6028
6029 return parms.ret;
6030 }
6031
6032 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6033 of the 64-bit ABI for passing and returning unions.
6034 Return an expression valid as a return value for the FUNCTION_ARG
6035 and TARGET_FUNCTION_VALUE.
6036
6037 SIZE is the size in bytes of the union.
6038 MODE is the argument's machine mode.
6039 REGNO is the hard register the union will be passed in. */
6040
6041 static rtx
6042 function_arg_union_value (int size, enum machine_mode mode, int slotno,
6043 int regno)
6044 {
6045 int nwords = ROUND_ADVANCE (size), i;
6046 rtx regs;
6047
6048 /* See comment in previous function for empty structures. */
6049 if (nwords == 0)
6050 return gen_rtx_REG (mode, regno);
6051
6052 if (slotno == SPARC_INT_ARG_MAX - 1)
6053 nwords = 1;
6054
6055 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6056
6057 for (i = 0; i < nwords; i++)
6058 {
6059 /* Unions are passed left-justified. */
6060 XVECEXP (regs, 0, i)
6061 = gen_rtx_EXPR_LIST (VOIDmode,
6062 gen_rtx_REG (word_mode, regno),
6063 GEN_INT (UNITS_PER_WORD * i));
6064 regno++;
6065 }
6066
6067 return regs;
6068 }
6069
6070 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6071 for passing and returning large (BLKmode) vectors.
6072 Return an expression valid as a return value for the FUNCTION_ARG
6073 and TARGET_FUNCTION_VALUE.
6074
6075 SIZE is the size in bytes of the vector (at least 8 bytes).
6076 REGNO is the FP hard register the vector will be passed in. */
6077
6078 static rtx
6079 function_arg_vector_value (int size, int regno)
6080 {
6081 int i, nregs = size / 8;
6082 rtx regs;
6083
6084 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6085
6086 for (i = 0; i < nregs; i++)
6087 {
6088 XVECEXP (regs, 0, i)
6089 = gen_rtx_EXPR_LIST (VOIDmode,
6090 gen_rtx_REG (DImode, regno + 2*i),
6091 GEN_INT (i*8));
6092 }
6093
6094 return regs;
6095 }
6096
6097 /* Determine where to put an argument to a function.
6098 Value is zero to push the argument on the stack,
6099 or a hard register in which to store the argument.
6100
6101 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6102 the preceding args and about the function being called.
6103 MODE is the argument's machine mode.
6104 TYPE is the data type of the argument (as a tree).
6105 This is null for libcalls where that information may
6106 not be available.
6107 NAMED is true if this argument is a named parameter
6108 (otherwise it is an extra parameter matching an ellipsis).
6109 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6110 TARGET_FUNCTION_INCOMING_ARG. */
6111
6112 static rtx
6113 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6114 const_tree type, bool named, bool incoming_p)
6115 {
6116 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6117
6118 int regbase = (incoming_p
6119 ? SPARC_INCOMING_INT_ARG_FIRST
6120 : SPARC_OUTGOING_INT_ARG_FIRST);
6121 int slotno, regno, padding;
6122 enum mode_class mclass = GET_MODE_CLASS (mode);
6123
6124 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6125 &regno, &padding);
6126 if (slotno == -1)
6127 return 0;
6128
6129 /* Vector types deserve special treatment because they are polymorphic wrt
6130 their mode, depending upon whether VIS instructions are enabled. */
6131 if (type && TREE_CODE (type) == VECTOR_TYPE)
6132 {
6133 HOST_WIDE_INT size = int_size_in_bytes (type);
6134 gcc_assert ((TARGET_ARCH32 && size <= 8)
6135 || (TARGET_ARCH64 && size <= 16));
6136
6137 if (mode == BLKmode)
6138 return function_arg_vector_value (size,
6139 SPARC_FP_ARG_FIRST + 2*slotno);
6140 else
6141 mclass = MODE_FLOAT;
6142 }
6143
6144 if (TARGET_ARCH32)
6145 return gen_rtx_REG (mode, regno);
6146
6147 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6148 and are promoted to registers if possible. */
6149 if (type && TREE_CODE (type) == RECORD_TYPE)
6150 {
6151 HOST_WIDE_INT size = int_size_in_bytes (type);
6152 gcc_assert (size <= 16);
6153
6154 return function_arg_record_value (type, mode, slotno, named, regbase);
6155 }
6156
6157 /* Unions up to 16 bytes in size are passed in integer registers. */
6158 else if (type && TREE_CODE (type) == UNION_TYPE)
6159 {
6160 HOST_WIDE_INT size = int_size_in_bytes (type);
6161 gcc_assert (size <= 16);
6162
6163 return function_arg_union_value (size, mode, slotno, regno);
6164 }
6165
6166 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6167 but also have the slot allocated for them.
6168 If no prototype is in scope fp values in register slots get passed
6169 in two places, either fp regs and int regs or fp regs and memory. */
6170 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6171 && SPARC_FP_REG_P (regno))
6172 {
6173 rtx reg = gen_rtx_REG (mode, regno);
6174 if (cum->prototype_p || cum->libcall_p)
6175 {
6176 /* "* 2" because fp reg numbers are recorded in 4 byte
6177 quantities. */
6178 #if 0
6179 /* ??? This will cause the value to be passed in the fp reg and
6180 in the stack. When a prototype exists we want to pass the
6181 value in the reg but reserve space on the stack. That's an
6182 optimization, and is deferred [for a bit]. */
6183 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6184 return gen_rtx_PARALLEL (mode,
6185 gen_rtvec (2,
6186 gen_rtx_EXPR_LIST (VOIDmode,
6187 NULL_RTX, const0_rtx),
6188 gen_rtx_EXPR_LIST (VOIDmode,
6189 reg, const0_rtx)));
6190 else
6191 #else
6192 /* ??? It seems that passing back a register even when past
6193 the area declared by REG_PARM_STACK_SPACE will allocate
6194 space appropriately, and will not copy the data onto the
6195 stack, exactly as we desire.
6196
6197 This is due to locate_and_pad_parm being called in
6198 expand_call whenever reg_parm_stack_space > 0, which
6199 while beneficial to our example here, would seem to be
6200 in error from what had been intended. Ho hum... -- r~ */
6201 #endif
6202 return reg;
6203 }
6204 else
6205 {
6206 rtx v0, v1;
6207
6208 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6209 {
6210 int intreg;
6211
6212 /* On incoming, we don't need to know that the value
6213 is passed in %f0 and %i0, and it confuses other parts
6214 causing needless spillage even on the simplest cases. */
6215 if (incoming_p)
6216 return reg;
6217
6218 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6219 + (regno - SPARC_FP_ARG_FIRST) / 2);
6220
6221 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6222 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6223 const0_rtx);
6224 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6225 }
6226 else
6227 {
6228 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6229 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6230 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6231 }
6232 }
6233 }
6234
6235 /* All other aggregate types are passed in an integer register in a mode
6236 corresponding to the size of the type. */
6237 else if (type && AGGREGATE_TYPE_P (type))
6238 {
6239 HOST_WIDE_INT size = int_size_in_bytes (type);
6240 gcc_assert (size <= 16);
6241
6242 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6243 }
6244
6245 return gen_rtx_REG (mode, regno);
6246 }
6247
6248 /* Handle the TARGET_FUNCTION_ARG target hook. */
6249
6250 static rtx
6251 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6252 const_tree type, bool named)
6253 {
6254 return sparc_function_arg_1 (cum, mode, type, named, false);
6255 }
6256
6257 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6258
6259 static rtx
6260 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6261 const_tree type, bool named)
6262 {
6263 return sparc_function_arg_1 (cum, mode, type, named, true);
6264 }
6265
6266 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6267
6268 static unsigned int
6269 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6270 {
6271 return ((TARGET_ARCH64
6272 && (GET_MODE_ALIGNMENT (mode) == 128
6273 || (type && TYPE_ALIGN (type) == 128)))
6274 ? 128
6275 : PARM_BOUNDARY);
6276 }
6277
6278 /* For an arg passed partly in registers and partly in memory,
6279 this is the number of bytes of registers used.
6280 For args passed entirely in registers or entirely in memory, zero.
6281
6282 Any arg that starts in the first 6 regs but won't entirely fit in them
6283 needs partial registers on v8. On v9, structures with integer
6284 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6285 values that begin in the last fp reg [where "last fp reg" varies with the
6286 mode] will be split between that reg and memory. */
6287
6288 static int
6289 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6290 tree type, bool named)
6291 {
6292 int slotno, regno, padding;
6293
6294 /* We pass false for incoming_p here, it doesn't matter. */
6295 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6296 false, &regno, &padding);
6297
6298 if (slotno == -1)
6299 return 0;
6300
6301 if (TARGET_ARCH32)
6302 {
6303 if ((slotno + (mode == BLKmode
6304 ? ROUND_ADVANCE (int_size_in_bytes (type))
6305 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6306 > SPARC_INT_ARG_MAX)
6307 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6308 }
6309 else
6310 {
6311 /* We are guaranteed by pass_by_reference that the size of the
6312 argument is not greater than 16 bytes, so we only need to return
6313 one word if the argument is partially passed in registers. */
6314
6315 if (type && AGGREGATE_TYPE_P (type))
6316 {
6317 int size = int_size_in_bytes (type);
6318
6319 if (size > UNITS_PER_WORD
6320 && slotno == SPARC_INT_ARG_MAX - 1)
6321 return UNITS_PER_WORD;
6322 }
6323 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6324 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6325 && ! (TARGET_FPU && named)))
6326 {
6327 /* The complex types are passed as packed types. */
6328 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6329 && slotno == SPARC_INT_ARG_MAX - 1)
6330 return UNITS_PER_WORD;
6331 }
6332 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6333 {
6334 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6335 > SPARC_FP_ARG_MAX)
6336 return UNITS_PER_WORD;
6337 }
6338 }
6339
6340 return 0;
6341 }
6342
6343 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6344 Specify whether to pass the argument by reference. */
6345
6346 static bool
6347 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6348 enum machine_mode mode, const_tree type,
6349 bool named ATTRIBUTE_UNUSED)
6350 {
6351 if (TARGET_ARCH32)
6352 /* Original SPARC 32-bit ABI says that structures and unions,
6353 and quad-precision floats are passed by reference. For Pascal,
6354 also pass arrays by reference. All other base types are passed
6355 in registers.
6356
6357 Extended ABI (as implemented by the Sun compiler) says that all
6358 complex floats are passed by reference. Pass complex integers
6359 in registers up to 8 bytes. More generally, enforce the 2-word
6360 cap for passing arguments in registers.
6361
6362 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6363 integers are passed like floats of the same size, that is in
6364 registers up to 8 bytes. Pass all vector floats by reference
6365 like structure and unions. */
6366 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6367 || mode == SCmode
6368 /* Catch CDImode, TFmode, DCmode and TCmode. */
6369 || GET_MODE_SIZE (mode) > 8
6370 || (type
6371 && TREE_CODE (type) == VECTOR_TYPE
6372 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6373 else
6374 /* Original SPARC 64-bit ABI says that structures and unions
6375 smaller than 16 bytes are passed in registers, as well as
6376 all other base types.
6377
6378 Extended ABI (as implemented by the Sun compiler) says that
6379 complex floats are passed in registers up to 16 bytes. Pass
6380 all complex integers in registers up to 16 bytes. More generally,
6381 enforce the 2-word cap for passing arguments in registers.
6382
6383 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6384 integers are passed like floats of the same size, that is in
6385 registers (up to 16 bytes). Pass all vector floats like structure
6386 and unions. */
6387 return ((type
6388 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6389 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6390 /* Catch CTImode and TCmode. */
6391 || GET_MODE_SIZE (mode) > 16);
6392 }
6393
6394 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6395 Update the data in CUM to advance over an argument
6396 of mode MODE and data type TYPE.
6397 TYPE is null for libcalls where that information may not be available. */
6398
6399 static void
6400 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6401 const_tree type, bool named)
6402 {
6403 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6404 int regno, padding;
6405
6406 /* We pass false for incoming_p here, it doesn't matter. */
6407 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
6408
6409 /* If argument requires leading padding, add it. */
6410 cum->words += padding;
6411
6412 if (TARGET_ARCH32)
6413 {
6414 cum->words += (mode != BLKmode
6415 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6416 : ROUND_ADVANCE (int_size_in_bytes (type)));
6417 }
6418 else
6419 {
6420 if (type && AGGREGATE_TYPE_P (type))
6421 {
6422 int size = int_size_in_bytes (type);
6423
6424 if (size <= 8)
6425 ++cum->words;
6426 else if (size <= 16)
6427 cum->words += 2;
6428 else /* passed by reference */
6429 ++cum->words;
6430 }
6431 else
6432 {
6433 cum->words += (mode != BLKmode
6434 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6435 : ROUND_ADVANCE (int_size_in_bytes (type)));
6436 }
6437 }
6438 }
6439
6440 /* Handle the FUNCTION_ARG_PADDING macro.
6441 For the 64 bit ABI structs are always stored left shifted in their
6442 argument slot. */
6443
6444 enum direction
6445 function_arg_padding (enum machine_mode mode, const_tree type)
6446 {
6447 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6448 return upward;
6449
6450 /* Fall back to the default. */
6451 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6452 }
6453
6454 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6455 Specify whether to return the return value in memory. */
6456
6457 static bool
6458 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6459 {
6460 if (TARGET_ARCH32)
6461 /* Original SPARC 32-bit ABI says that structures and unions,
6462 and quad-precision floats are returned in memory. All other
6463 base types are returned in registers.
6464
6465 Extended ABI (as implemented by the Sun compiler) says that
6466 all complex floats are returned in registers (8 FP registers
6467 at most for '_Complex long double'). Return all complex integers
6468 in registers (4 at most for '_Complex long long').
6469
6470 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6471 integers are returned like floats of the same size, that is in
6472 registers up to 8 bytes and in memory otherwise. Return all
6473 vector floats in memory like structure and unions; note that
6474 they always have BLKmode like the latter. */
6475 return (TYPE_MODE (type) == BLKmode
6476 || TYPE_MODE (type) == TFmode
6477 || (TREE_CODE (type) == VECTOR_TYPE
6478 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6479 else
6480 /* Original SPARC 64-bit ABI says that structures and unions
6481 smaller than 32 bytes are returned in registers, as well as
6482 all other base types.
6483
6484 Extended ABI (as implemented by the Sun compiler) says that all
6485 complex floats are returned in registers (8 FP registers at most
6486 for '_Complex long double'). Return all complex integers in
6487 registers (4 at most for '_Complex TItype').
6488
6489 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6490 integers are returned like floats of the same size, that is in
6491 registers. Return all vector floats like structure and unions;
6492 note that they always have BLKmode like the latter. */
6493 return (TYPE_MODE (type) == BLKmode
6494 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6495 }
6496
6497 /* Handle the TARGET_STRUCT_VALUE target hook.
6498 Return where to find the structure return value address. */
6499
6500 static rtx
6501 sparc_struct_value_rtx (tree fndecl, int incoming)
6502 {
6503 if (TARGET_ARCH64)
6504 return 0;
6505 else
6506 {
6507 rtx mem;
6508
6509 if (incoming)
6510 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6511 STRUCT_VALUE_OFFSET));
6512 else
6513 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6514 STRUCT_VALUE_OFFSET));
6515
6516 /* Only follow the SPARC ABI for fixed-size structure returns.
6517 Variable size structure returns are handled per the normal
6518 procedures in GCC. This is enabled by -mstd-struct-return */
6519 if (incoming == 2
6520 && sparc_std_struct_return
6521 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6522 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6523 {
6524 /* We must check and adjust the return address, as it is
6525 optional as to whether the return object is really
6526 provided. */
6527 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6528 rtx scratch = gen_reg_rtx (SImode);
6529 rtx endlab = gen_label_rtx ();
6530
6531 /* Calculate the return object size */
6532 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6533 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6534 /* Construct a temporary return value */
6535 rtx temp_val
6536 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6537
6538 /* Implement SPARC 32-bit psABI callee return struct checking:
6539
6540 Fetch the instruction where we will return to and see if
6541 it's an unimp instruction (the most significant 10 bits
6542 will be zero). */
6543 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6544 plus_constant (ret_reg, 8)));
6545 /* Assume the size is valid and pre-adjust */
6546 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6547 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6548 0, endlab);
6549 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6550 /* Write the address of the memory pointed to by temp_val into
6551 the memory pointed to by mem */
6552 emit_move_insn (mem, XEXP (temp_val, 0));
6553 emit_label (endlab);
6554 }
6555
6556 return mem;
6557 }
6558 }
6559
6560 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6561 For v9, function return values are subject to the same rules as arguments,
6562 except that up to 32 bytes may be returned in registers. */
6563
6564 static rtx
6565 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6566 bool outgoing)
6567 {
6568 /* Beware that the two values are swapped here wrt function_arg. */
6569 int regbase = (outgoing
6570 ? SPARC_INCOMING_INT_ARG_FIRST
6571 : SPARC_OUTGOING_INT_ARG_FIRST);
6572 enum mode_class mclass = GET_MODE_CLASS (mode);
6573 int regno;
6574
6575 /* Vector types deserve special treatment because they are polymorphic wrt
6576 their mode, depending upon whether VIS instructions are enabled. */
6577 if (type && TREE_CODE (type) == VECTOR_TYPE)
6578 {
6579 HOST_WIDE_INT size = int_size_in_bytes (type);
6580 gcc_assert ((TARGET_ARCH32 && size <= 8)
6581 || (TARGET_ARCH64 && size <= 32));
6582
6583 if (mode == BLKmode)
6584 return function_arg_vector_value (size,
6585 SPARC_FP_ARG_FIRST);
6586 else
6587 mclass = MODE_FLOAT;
6588 }
6589
6590 if (TARGET_ARCH64 && type)
6591 {
6592 /* Structures up to 32 bytes in size are returned in registers. */
6593 if (TREE_CODE (type) == RECORD_TYPE)
6594 {
6595 HOST_WIDE_INT size = int_size_in_bytes (type);
6596 gcc_assert (size <= 32);
6597
6598 return function_arg_record_value (type, mode, 0, 1, regbase);
6599 }
6600
6601 /* Unions up to 32 bytes in size are returned in integer registers. */
6602 else if (TREE_CODE (type) == UNION_TYPE)
6603 {
6604 HOST_WIDE_INT size = int_size_in_bytes (type);
6605 gcc_assert (size <= 32);
6606
6607 return function_arg_union_value (size, mode, 0, regbase);
6608 }
6609
6610 /* Objects that require it are returned in FP registers. */
6611 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6612 ;
6613
6614 /* All other aggregate types are returned in an integer register in a
6615 mode corresponding to the size of the type. */
6616 else if (AGGREGATE_TYPE_P (type))
6617 {
6618 /* All other aggregate types are passed in an integer register
6619 in a mode corresponding to the size of the type. */
6620 HOST_WIDE_INT size = int_size_in_bytes (type);
6621 gcc_assert (size <= 32);
6622
6623 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6624
6625 /* ??? We probably should have made the same ABI change in
6626 3.4.0 as the one we made for unions. The latter was
6627 required by the SCD though, while the former is not
6628 specified, so we favored compatibility and efficiency.
6629
6630 Now we're stuck for aggregates larger than 16 bytes,
6631 because OImode vanished in the meantime. Let's not
6632 try to be unduly clever, and simply follow the ABI
6633 for unions in that case. */
6634 if (mode == BLKmode)
6635 return function_arg_union_value (size, mode, 0, regbase);
6636 else
6637 mclass = MODE_INT;
6638 }
6639
6640 /* We should only have pointer and integer types at this point. This
6641 must match sparc_promote_function_mode. */
6642 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6643 mode = word_mode;
6644 }
6645
6646 /* We should only have pointer and integer types at this point. This must
6647 match sparc_promote_function_mode. */
6648 else if (TARGET_ARCH32
6649 && mclass == MODE_INT
6650 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6651 mode = word_mode;
6652
6653 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6654 regno = SPARC_FP_ARG_FIRST;
6655 else
6656 regno = regbase;
6657
6658 return gen_rtx_REG (mode, regno);
6659 }
6660
6661 /* Handle TARGET_FUNCTION_VALUE.
6662 On the SPARC, the value is found in the first "output" register, but the
6663 called function leaves it in the first "input" register. */
6664
6665 static rtx
6666 sparc_function_value (const_tree valtype,
6667 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6668 bool outgoing)
6669 {
6670 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6671 }
6672
6673 /* Handle TARGET_LIBCALL_VALUE. */
6674
6675 static rtx
6676 sparc_libcall_value (enum machine_mode mode,
6677 const_rtx fun ATTRIBUTE_UNUSED)
6678 {
6679 return sparc_function_value_1 (NULL_TREE, mode, false);
6680 }
6681
6682 /* Handle FUNCTION_VALUE_REGNO_P.
6683 On the SPARC, the first "output" reg is used for integer values, and the
6684 first floating point register is used for floating point values. */
6685
6686 static bool
6687 sparc_function_value_regno_p (const unsigned int regno)
6688 {
6689 return (regno == 8 || regno == 32);
6690 }
6691
6692 /* Do what is necessary for `va_start'. We look at the current function
6693 to determine if stdarg or varargs is used and return the address of
6694 the first unnamed parameter. */
6695
6696 static rtx
6697 sparc_builtin_saveregs (void)
6698 {
6699 int first_reg = crtl->args.info.words;
6700 rtx address;
6701 int regno;
6702
6703 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6704 emit_move_insn (gen_rtx_MEM (word_mode,
6705 gen_rtx_PLUS (Pmode,
6706 frame_pointer_rtx,
6707 GEN_INT (FIRST_PARM_OFFSET (0)
6708 + (UNITS_PER_WORD
6709 * regno)))),
6710 gen_rtx_REG (word_mode,
6711 SPARC_INCOMING_INT_ARG_FIRST + regno));
6712
6713 address = gen_rtx_PLUS (Pmode,
6714 frame_pointer_rtx,
6715 GEN_INT (FIRST_PARM_OFFSET (0)
6716 + UNITS_PER_WORD * first_reg));
6717
6718 return address;
6719 }
6720
6721 /* Implement `va_start' for stdarg. */
6722
6723 static void
6724 sparc_va_start (tree valist, rtx nextarg)
6725 {
6726 nextarg = expand_builtin_saveregs ();
6727 std_expand_builtin_va_start (valist, nextarg);
6728 }
6729
6730 /* Implement `va_arg' for stdarg. */
6731
6732 static tree
6733 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6734 gimple_seq *post_p)
6735 {
6736 HOST_WIDE_INT size, rsize, align;
6737 tree addr, incr;
6738 bool indirect;
6739 tree ptrtype = build_pointer_type (type);
6740
6741 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6742 {
6743 indirect = true;
6744 size = rsize = UNITS_PER_WORD;
6745 align = 0;
6746 }
6747 else
6748 {
6749 indirect = false;
6750 size = int_size_in_bytes (type);
6751 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6752 align = 0;
6753
6754 if (TARGET_ARCH64)
6755 {
6756 /* For SPARC64, objects requiring 16-byte alignment get it. */
6757 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6758 align = 2 * UNITS_PER_WORD;
6759
6760 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6761 are left-justified in their slots. */
6762 if (AGGREGATE_TYPE_P (type))
6763 {
6764 if (size == 0)
6765 size = rsize = UNITS_PER_WORD;
6766 else
6767 size = rsize;
6768 }
6769 }
6770 }
6771
6772 incr = valist;
6773 if (align)
6774 {
6775 incr = fold_build_pointer_plus_hwi (incr, align - 1);
6776 incr = fold_convert (sizetype, incr);
6777 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6778 size_int (-align));
6779 incr = fold_convert (ptr_type_node, incr);
6780 }
6781
6782 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6783 addr = incr;
6784
6785 if (BYTES_BIG_ENDIAN && size < rsize)
6786 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
6787
6788 if (indirect)
6789 {
6790 addr = fold_convert (build_pointer_type (ptrtype), addr);
6791 addr = build_va_arg_indirect_ref (addr);
6792 }
6793
6794 /* If the address isn't aligned properly for the type, we need a temporary.
6795 FIXME: This is inefficient, usually we can do this in registers. */
6796 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6797 {
6798 tree tmp = create_tmp_var (type, "va_arg_tmp");
6799 tree dest_addr = build_fold_addr_expr (tmp);
6800 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
6801 3, dest_addr, addr, size_int (rsize));
6802 TREE_ADDRESSABLE (tmp) = 1;
6803 gimplify_and_add (copy, pre_p);
6804 addr = dest_addr;
6805 }
6806
6807 else
6808 addr = fold_convert (ptrtype, addr);
6809
6810 incr = fold_build_pointer_plus_hwi (incr, rsize);
6811 gimplify_assign (valist, incr, post_p);
6812
6813 return build_va_arg_indirect_ref (addr);
6814 }
6815 \f
6816 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6817 Specify whether the vector mode is supported by the hardware. */
6818
6819 static bool
6820 sparc_vector_mode_supported_p (enum machine_mode mode)
6821 {
6822 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6823 }
6824 \f
6825 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6826
6827 static enum machine_mode
6828 sparc_preferred_simd_mode (enum machine_mode mode)
6829 {
6830 if (TARGET_VIS)
6831 switch (mode)
6832 {
6833 case SImode:
6834 return V2SImode;
6835 case HImode:
6836 return V4HImode;
6837 case QImode:
6838 return V8QImode;
6839
6840 default:;
6841 }
6842
6843 return word_mode;
6844 }
6845 \f
6846 /* Return the string to output an unconditional branch to LABEL, which is
6847 the operand number of the label.
6848
6849 DEST is the destination insn (i.e. the label), INSN is the source. */
6850
6851 const char *
6852 output_ubranch (rtx dest, int label, rtx insn)
6853 {
6854 static char string[64];
6855 bool v9_form = false;
6856 char *p;
6857
6858 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6859 {
6860 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6861 - INSN_ADDRESSES (INSN_UID (insn)));
6862 /* Leave some instructions for "slop". */
6863 if (delta >= -260000 && delta < 260000)
6864 v9_form = true;
6865 }
6866
6867 if (v9_form)
6868 strcpy (string, "ba%*,pt\t%%xcc, ");
6869 else
6870 strcpy (string, "b%*\t");
6871
6872 p = strchr (string, '\0');
6873 *p++ = '%';
6874 *p++ = 'l';
6875 *p++ = '0' + label;
6876 *p++ = '%';
6877 *p++ = '(';
6878 *p = '\0';
6879
6880 return string;
6881 }
6882
6883 /* Return the string to output a conditional branch to LABEL, which is
6884 the operand number of the label. OP is the conditional expression.
6885 XEXP (OP, 0) is assumed to be a condition code register (integer or
6886 floating point) and its mode specifies what kind of comparison we made.
6887
6888 DEST is the destination insn (i.e. the label), INSN is the source.
6889
6890 REVERSED is nonzero if we should reverse the sense of the comparison.
6891
6892 ANNUL is nonzero if we should generate an annulling branch. */
6893
6894 const char *
6895 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6896 rtx insn)
6897 {
6898 static char string[64];
6899 enum rtx_code code = GET_CODE (op);
6900 rtx cc_reg = XEXP (op, 0);
6901 enum machine_mode mode = GET_MODE (cc_reg);
6902 const char *labelno, *branch;
6903 int spaces = 8, far;
6904 char *p;
6905
6906 /* v9 branches are limited to +-1MB. If it is too far away,
6907 change
6908
6909 bne,pt %xcc, .LC30
6910
6911 to
6912
6913 be,pn %xcc, .+12
6914 nop
6915 ba .LC30
6916
6917 and
6918
6919 fbne,a,pn %fcc2, .LC29
6920
6921 to
6922
6923 fbe,pt %fcc2, .+16
6924 nop
6925 ba .LC29 */
6926
6927 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6928 if (reversed ^ far)
6929 {
6930 /* Reversal of FP compares takes care -- an ordered compare
6931 becomes an unordered compare and vice versa. */
6932 if (mode == CCFPmode || mode == CCFPEmode)
6933 code = reverse_condition_maybe_unordered (code);
6934 else
6935 code = reverse_condition (code);
6936 }
6937
6938 /* Start by writing the branch condition. */
6939 if (mode == CCFPmode || mode == CCFPEmode)
6940 {
6941 switch (code)
6942 {
6943 case NE:
6944 branch = "fbne";
6945 break;
6946 case EQ:
6947 branch = "fbe";
6948 break;
6949 case GE:
6950 branch = "fbge";
6951 break;
6952 case GT:
6953 branch = "fbg";
6954 break;
6955 case LE:
6956 branch = "fble";
6957 break;
6958 case LT:
6959 branch = "fbl";
6960 break;
6961 case UNORDERED:
6962 branch = "fbu";
6963 break;
6964 case ORDERED:
6965 branch = "fbo";
6966 break;
6967 case UNGT:
6968 branch = "fbug";
6969 break;
6970 case UNLT:
6971 branch = "fbul";
6972 break;
6973 case UNEQ:
6974 branch = "fbue";
6975 break;
6976 case UNGE:
6977 branch = "fbuge";
6978 break;
6979 case UNLE:
6980 branch = "fbule";
6981 break;
6982 case LTGT:
6983 branch = "fblg";
6984 break;
6985
6986 default:
6987 gcc_unreachable ();
6988 }
6989
6990 /* ??? !v9: FP branches cannot be preceded by another floating point
6991 insn. Because there is currently no concept of pre-delay slots,
6992 we can fix this only by always emitting a nop before a floating
6993 point branch. */
6994
6995 string[0] = '\0';
6996 if (! TARGET_V9)
6997 strcpy (string, "nop\n\t");
6998 strcat (string, branch);
6999 }
7000 else
7001 {
7002 switch (code)
7003 {
7004 case NE:
7005 branch = "bne";
7006 break;
7007 case EQ:
7008 branch = "be";
7009 break;
7010 case GE:
7011 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7012 branch = "bpos";
7013 else
7014 branch = "bge";
7015 break;
7016 case GT:
7017 branch = "bg";
7018 break;
7019 case LE:
7020 branch = "ble";
7021 break;
7022 case LT:
7023 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7024 branch = "bneg";
7025 else
7026 branch = "bl";
7027 break;
7028 case GEU:
7029 branch = "bgeu";
7030 break;
7031 case GTU:
7032 branch = "bgu";
7033 break;
7034 case LEU:
7035 branch = "bleu";
7036 break;
7037 case LTU:
7038 branch = "blu";
7039 break;
7040
7041 default:
7042 gcc_unreachable ();
7043 }
7044 strcpy (string, branch);
7045 }
7046 spaces -= strlen (branch);
7047 p = strchr (string, '\0');
7048
7049 /* Now add the annulling, the label, and a possible noop. */
7050 if (annul && ! far)
7051 {
7052 strcpy (p, ",a");
7053 p += 2;
7054 spaces -= 2;
7055 }
7056
7057 if (TARGET_V9)
7058 {
7059 rtx note;
7060 int v8 = 0;
7061
7062 if (! far && insn && INSN_ADDRESSES_SET_P ())
7063 {
7064 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7065 - INSN_ADDRESSES (INSN_UID (insn)));
7066 /* Leave some instructions for "slop". */
7067 if (delta < -260000 || delta >= 260000)
7068 v8 = 1;
7069 }
7070
7071 if (mode == CCFPmode || mode == CCFPEmode)
7072 {
7073 static char v9_fcc_labelno[] = "%%fccX, ";
7074 /* Set the char indicating the number of the fcc reg to use. */
7075 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7076 labelno = v9_fcc_labelno;
7077 if (v8)
7078 {
7079 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7080 labelno = "";
7081 }
7082 }
7083 else if (mode == CCXmode || mode == CCX_NOOVmode)
7084 {
7085 labelno = "%%xcc, ";
7086 gcc_assert (! v8);
7087 }
7088 else
7089 {
7090 labelno = "%%icc, ";
7091 if (v8)
7092 labelno = "";
7093 }
7094
7095 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7096 {
7097 strcpy (p,
7098 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7099 ? ",pt" : ",pn");
7100 p += 3;
7101 spaces -= 3;
7102 }
7103 }
7104 else
7105 labelno = "";
7106
7107 if (spaces > 0)
7108 *p++ = '\t';
7109 else
7110 *p++ = ' ';
7111 strcpy (p, labelno);
7112 p = strchr (p, '\0');
7113 if (far)
7114 {
7115 strcpy (p, ".+12\n\t nop\n\tb\t");
7116 /* Skip the next insn if requested or
7117 if we know that it will be a nop. */
7118 if (annul || ! final_sequence)
7119 p[3] = '6';
7120 p += 14;
7121 }
7122 *p++ = '%';
7123 *p++ = 'l';
7124 *p++ = label + '0';
7125 *p++ = '%';
7126 *p++ = '#';
7127 *p = '\0';
7128
7129 return string;
7130 }
7131
7132 /* Emit a library call comparison between floating point X and Y.
7133 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7134 Return the new operator to be used in the comparison sequence.
7135
7136 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7137 values as arguments instead of the TFmode registers themselves,
7138 that's why we cannot call emit_float_lib_cmp. */
7139
7140 rtx
7141 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7142 {
7143 const char *qpfunc;
7144 rtx slot0, slot1, result, tem, tem2, libfunc;
7145 enum machine_mode mode;
7146 enum rtx_code new_comparison;
7147
7148 switch (comparison)
7149 {
7150 case EQ:
7151 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7152 break;
7153
7154 case NE:
7155 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7156 break;
7157
7158 case GT:
7159 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7160 break;
7161
7162 case GE:
7163 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7164 break;
7165
7166 case LT:
7167 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7168 break;
7169
7170 case LE:
7171 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7172 break;
7173
7174 case ORDERED:
7175 case UNORDERED:
7176 case UNGT:
7177 case UNLT:
7178 case UNEQ:
7179 case UNGE:
7180 case UNLE:
7181 case LTGT:
7182 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7183 break;
7184
7185 default:
7186 gcc_unreachable ();
7187 }
7188
7189 if (TARGET_ARCH64)
7190 {
7191 if (MEM_P (x))
7192 slot0 = x;
7193 else
7194 {
7195 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7196 emit_move_insn (slot0, x);
7197 }
7198
7199 if (MEM_P (y))
7200 slot1 = y;
7201 else
7202 {
7203 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7204 emit_move_insn (slot1, y);
7205 }
7206
7207 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7208 emit_library_call (libfunc, LCT_NORMAL,
7209 DImode, 2,
7210 XEXP (slot0, 0), Pmode,
7211 XEXP (slot1, 0), Pmode);
7212 mode = DImode;
7213 }
7214 else
7215 {
7216 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7217 emit_library_call (libfunc, LCT_NORMAL,
7218 SImode, 2,
7219 x, TFmode, y, TFmode);
7220 mode = SImode;
7221 }
7222
7223
7224 /* Immediately move the result of the libcall into a pseudo
7225 register so reload doesn't clobber the value if it needs
7226 the return register for a spill reg. */
7227 result = gen_reg_rtx (mode);
7228 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7229
7230 switch (comparison)
7231 {
7232 default:
7233 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7234 case ORDERED:
7235 case UNORDERED:
7236 new_comparison = (comparison == UNORDERED ? EQ : NE);
7237 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7238 case UNGT:
7239 case UNGE:
7240 new_comparison = (comparison == UNGT ? GT : NE);
7241 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7242 case UNLE:
7243 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7244 case UNLT:
7245 tem = gen_reg_rtx (mode);
7246 if (TARGET_ARCH32)
7247 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7248 else
7249 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7250 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7251 case UNEQ:
7252 case LTGT:
7253 tem = gen_reg_rtx (mode);
7254 if (TARGET_ARCH32)
7255 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7256 else
7257 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7258 tem2 = gen_reg_rtx (mode);
7259 if (TARGET_ARCH32)
7260 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7261 else
7262 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7263 new_comparison = (comparison == UNEQ ? EQ : NE);
7264 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7265 }
7266
7267 gcc_unreachable ();
7268 }
7269
7270 /* Generate an unsigned DImode to FP conversion. This is the same code
7271 optabs would emit if we didn't have TFmode patterns. */
7272
7273 void
7274 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7275 {
7276 rtx neglab, donelab, i0, i1, f0, in, out;
7277
7278 out = operands[0];
7279 in = force_reg (DImode, operands[1]);
7280 neglab = gen_label_rtx ();
7281 donelab = gen_label_rtx ();
7282 i0 = gen_reg_rtx (DImode);
7283 i1 = gen_reg_rtx (DImode);
7284 f0 = gen_reg_rtx (mode);
7285
7286 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7287
7288 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7289 emit_jump_insn (gen_jump (donelab));
7290 emit_barrier ();
7291
7292 emit_label (neglab);
7293
7294 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7295 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7296 emit_insn (gen_iordi3 (i0, i0, i1));
7297 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7298 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7299
7300 emit_label (donelab);
7301 }
7302
7303 /* Generate an FP to unsigned DImode conversion. This is the same code
7304 optabs would emit if we didn't have TFmode patterns. */
7305
7306 void
7307 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7308 {
7309 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7310
7311 out = operands[0];
7312 in = force_reg (mode, operands[1]);
7313 neglab = gen_label_rtx ();
7314 donelab = gen_label_rtx ();
7315 i0 = gen_reg_rtx (DImode);
7316 i1 = gen_reg_rtx (DImode);
7317 limit = gen_reg_rtx (mode);
7318 f0 = gen_reg_rtx (mode);
7319
7320 emit_move_insn (limit,
7321 CONST_DOUBLE_FROM_REAL_VALUE (
7322 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7323 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7324
7325 emit_insn (gen_rtx_SET (VOIDmode,
7326 out,
7327 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7328 emit_jump_insn (gen_jump (donelab));
7329 emit_barrier ();
7330
7331 emit_label (neglab);
7332
7333 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7334 emit_insn (gen_rtx_SET (VOIDmode,
7335 i0,
7336 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7337 emit_insn (gen_movdi (i1, const1_rtx));
7338 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7339 emit_insn (gen_xordi3 (out, i0, i1));
7340
7341 emit_label (donelab);
7342 }
7343
7344 /* Return the string to output a conditional branch to LABEL, testing
7345 register REG. LABEL is the operand number of the label; REG is the
7346 operand number of the reg. OP is the conditional expression. The mode
7347 of REG says what kind of comparison we made.
7348
7349 DEST is the destination insn (i.e. the label), INSN is the source.
7350
7351 REVERSED is nonzero if we should reverse the sense of the comparison.
7352
7353 ANNUL is nonzero if we should generate an annulling branch. */
7354
7355 const char *
7356 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7357 int annul, rtx insn)
7358 {
7359 static char string[64];
7360 enum rtx_code code = GET_CODE (op);
7361 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7362 rtx note;
7363 int far;
7364 char *p;
7365
7366 /* branch on register are limited to +-128KB. If it is too far away,
7367 change
7368
7369 brnz,pt %g1, .LC30
7370
7371 to
7372
7373 brz,pn %g1, .+12
7374 nop
7375 ba,pt %xcc, .LC30
7376
7377 and
7378
7379 brgez,a,pn %o1, .LC29
7380
7381 to
7382
7383 brlz,pt %o1, .+16
7384 nop
7385 ba,pt %xcc, .LC29 */
7386
7387 far = get_attr_length (insn) >= 3;
7388
7389 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7390 if (reversed ^ far)
7391 code = reverse_condition (code);
7392
7393 /* Only 64 bit versions of these instructions exist. */
7394 gcc_assert (mode == DImode);
7395
7396 /* Start by writing the branch condition. */
7397
7398 switch (code)
7399 {
7400 case NE:
7401 strcpy (string, "brnz");
7402 break;
7403
7404 case EQ:
7405 strcpy (string, "brz");
7406 break;
7407
7408 case GE:
7409 strcpy (string, "brgez");
7410 break;
7411
7412 case LT:
7413 strcpy (string, "brlz");
7414 break;
7415
7416 case LE:
7417 strcpy (string, "brlez");
7418 break;
7419
7420 case GT:
7421 strcpy (string, "brgz");
7422 break;
7423
7424 default:
7425 gcc_unreachable ();
7426 }
7427
7428 p = strchr (string, '\0');
7429
7430 /* Now add the annulling, reg, label, and nop. */
7431 if (annul && ! far)
7432 {
7433 strcpy (p, ",a");
7434 p += 2;
7435 }
7436
7437 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7438 {
7439 strcpy (p,
7440 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7441 ? ",pt" : ",pn");
7442 p += 3;
7443 }
7444
7445 *p = p < string + 8 ? '\t' : ' ';
7446 p++;
7447 *p++ = '%';
7448 *p++ = '0' + reg;
7449 *p++ = ',';
7450 *p++ = ' ';
7451 if (far)
7452 {
7453 int veryfar = 1, delta;
7454
7455 if (INSN_ADDRESSES_SET_P ())
7456 {
7457 delta = (INSN_ADDRESSES (INSN_UID (dest))
7458 - INSN_ADDRESSES (INSN_UID (insn)));
7459 /* Leave some instructions for "slop". */
7460 if (delta >= -260000 && delta < 260000)
7461 veryfar = 0;
7462 }
7463
7464 strcpy (p, ".+12\n\t nop\n\t");
7465 /* Skip the next insn if requested or
7466 if we know that it will be a nop. */
7467 if (annul || ! final_sequence)
7468 p[3] = '6';
7469 p += 12;
7470 if (veryfar)
7471 {
7472 strcpy (p, "b\t");
7473 p += 2;
7474 }
7475 else
7476 {
7477 strcpy (p, "ba,pt\t%%xcc, ");
7478 p += 13;
7479 }
7480 }
7481 *p++ = '%';
7482 *p++ = 'l';
7483 *p++ = '0' + label;
7484 *p++ = '%';
7485 *p++ = '#';
7486 *p = '\0';
7487
7488 return string;
7489 }
7490
7491 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7492 Such instructions cannot be used in the delay slot of return insn on v9.
7493 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7494 */
7495
7496 static int
7497 epilogue_renumber (register rtx *where, int test)
7498 {
7499 register const char *fmt;
7500 register int i;
7501 register enum rtx_code code;
7502
7503 if (*where == 0)
7504 return 0;
7505
7506 code = GET_CODE (*where);
7507
7508 switch (code)
7509 {
7510 case REG:
7511 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7512 return 1;
7513 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7514 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7515 case SCRATCH:
7516 case CC0:
7517 case PC:
7518 case CONST_INT:
7519 case CONST_DOUBLE:
7520 return 0;
7521
7522 /* Do not replace the frame pointer with the stack pointer because
7523 it can cause the delayed instruction to load below the stack.
7524 This occurs when instructions like:
7525
7526 (set (reg/i:SI 24 %i0)
7527 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7528 (const_int -20 [0xffffffec])) 0))
7529
7530 are in the return delayed slot. */
7531 case PLUS:
7532 if (GET_CODE (XEXP (*where, 0)) == REG
7533 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7534 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7535 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7536 return 1;
7537 break;
7538
7539 case MEM:
7540 if (SPARC_STACK_BIAS
7541 && GET_CODE (XEXP (*where, 0)) == REG
7542 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7543 return 1;
7544 break;
7545
7546 default:
7547 break;
7548 }
7549
7550 fmt = GET_RTX_FORMAT (code);
7551
7552 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7553 {
7554 if (fmt[i] == 'E')
7555 {
7556 register int j;
7557 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7558 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7559 return 1;
7560 }
7561 else if (fmt[i] == 'e'
7562 && epilogue_renumber (&(XEXP (*where, i)), test))
7563 return 1;
7564 }
7565 return 0;
7566 }
7567 \f
7568 /* Leaf functions and non-leaf functions have different needs. */
7569
7570 static const int
7571 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7572
7573 static const int
7574 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7575
7576 static const int *const reg_alloc_orders[] = {
7577 reg_leaf_alloc_order,
7578 reg_nonleaf_alloc_order};
7579
7580 void
7581 order_regs_for_local_alloc (void)
7582 {
7583 static int last_order_nonleaf = 1;
7584
7585 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7586 {
7587 last_order_nonleaf = !last_order_nonleaf;
7588 memcpy ((char *) reg_alloc_order,
7589 (const char *) reg_alloc_orders[last_order_nonleaf],
7590 FIRST_PSEUDO_REGISTER * sizeof (int));
7591 }
7592 }
7593 \f
7594 /* Return 1 if REG and MEM are legitimate enough to allow the various
7595 mem<-->reg splits to be run. */
7596
7597 int
7598 sparc_splitdi_legitimate (rtx reg, rtx mem)
7599 {
7600 /* Punt if we are here by mistake. */
7601 gcc_assert (reload_completed);
7602
7603 /* We must have an offsettable memory reference. */
7604 if (! offsettable_memref_p (mem))
7605 return 0;
7606
7607 /* If we have legitimate args for ldd/std, we do not want
7608 the split to happen. */
7609 if ((REGNO (reg) % 2) == 0
7610 && mem_min_alignment (mem, 8))
7611 return 0;
7612
7613 /* Success. */
7614 return 1;
7615 }
7616
7617 /* Return 1 if x and y are some kind of REG and they refer to
7618 different hard registers. This test is guaranteed to be
7619 run after reload. */
7620
7621 int
7622 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7623 {
7624 if (GET_CODE (x) != REG)
7625 return 0;
7626 if (GET_CODE (y) != REG)
7627 return 0;
7628 if (REGNO (x) == REGNO (y))
7629 return 0;
7630 return 1;
7631 }
7632
7633 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7634 This makes them candidates for using ldd and std insns.
7635
7636 Note reg1 and reg2 *must* be hard registers. */
7637
7638 int
7639 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7640 {
7641 /* We might have been passed a SUBREG. */
7642 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7643 return 0;
7644
7645 if (REGNO (reg1) % 2 != 0)
7646 return 0;
7647
7648 /* Integer ldd is deprecated in SPARC V9 */
7649 if (TARGET_V9 && REGNO (reg1) < 32)
7650 return 0;
7651
7652 return (REGNO (reg1) == REGNO (reg2) - 1);
7653 }
7654
7655 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7656 an ldd or std insn.
7657
7658 This can only happen when addr1 and addr2, the addresses in mem1
7659 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7660 addr1 must also be aligned on a 64-bit boundary.
7661
7662 Also iff dependent_reg_rtx is not null it should not be used to
7663 compute the address for mem1, i.e. we cannot optimize a sequence
7664 like:
7665 ld [%o0], %o0
7666 ld [%o0 + 4], %o1
7667 to
7668 ldd [%o0], %o0
7669 nor:
7670 ld [%g3 + 4], %g3
7671 ld [%g3], %g2
7672 to
7673 ldd [%g3], %g2
7674
7675 But, note that the transformation from:
7676 ld [%g2 + 4], %g3
7677 ld [%g2], %g2
7678 to
7679 ldd [%g2], %g2
7680 is perfectly fine. Thus, the peephole2 patterns always pass us
7681 the destination register of the first load, never the second one.
7682
7683 For stores we don't have a similar problem, so dependent_reg_rtx is
7684 NULL_RTX. */
7685
7686 int
7687 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7688 {
7689 rtx addr1, addr2;
7690 unsigned int reg1;
7691 HOST_WIDE_INT offset1;
7692
7693 /* The mems cannot be volatile. */
7694 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7695 return 0;
7696
7697 /* MEM1 should be aligned on a 64-bit boundary. */
7698 if (MEM_ALIGN (mem1) < 64)
7699 return 0;
7700
7701 addr1 = XEXP (mem1, 0);
7702 addr2 = XEXP (mem2, 0);
7703
7704 /* Extract a register number and offset (if used) from the first addr. */
7705 if (GET_CODE (addr1) == PLUS)
7706 {
7707 /* If not a REG, return zero. */
7708 if (GET_CODE (XEXP (addr1, 0)) != REG)
7709 return 0;
7710 else
7711 {
7712 reg1 = REGNO (XEXP (addr1, 0));
7713 /* The offset must be constant! */
7714 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7715 return 0;
7716 offset1 = INTVAL (XEXP (addr1, 1));
7717 }
7718 }
7719 else if (GET_CODE (addr1) != REG)
7720 return 0;
7721 else
7722 {
7723 reg1 = REGNO (addr1);
7724 /* This was a simple (mem (reg)) expression. Offset is 0. */
7725 offset1 = 0;
7726 }
7727
7728 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7729 if (GET_CODE (addr2) != PLUS)
7730 return 0;
7731
7732 if (GET_CODE (XEXP (addr2, 0)) != REG
7733 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7734 return 0;
7735
7736 if (reg1 != REGNO (XEXP (addr2, 0)))
7737 return 0;
7738
7739 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7740 return 0;
7741
7742 /* The first offset must be evenly divisible by 8 to ensure the
7743 address is 64 bit aligned. */
7744 if (offset1 % 8 != 0)
7745 return 0;
7746
7747 /* The offset for the second addr must be 4 more than the first addr. */
7748 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7749 return 0;
7750
7751 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7752 instructions. */
7753 return 1;
7754 }
7755
7756 /* Return 1 if reg is a pseudo, or is the first register in
7757 a hard register pair. This makes it suitable for use in
7758 ldd and std insns. */
7759
7760 int
7761 register_ok_for_ldd (rtx reg)
7762 {
7763 /* We might have been passed a SUBREG. */
7764 if (!REG_P (reg))
7765 return 0;
7766
7767 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7768 return (REGNO (reg) % 2 == 0);
7769
7770 return 1;
7771 }
7772
7773 /* Return 1 if OP is a memory whose address is known to be
7774 aligned to 8-byte boundary, or a pseudo during reload.
7775 This makes it suitable for use in ldd and std insns. */
7776
7777 int
7778 memory_ok_for_ldd (rtx op)
7779 {
7780 if (MEM_P (op))
7781 {
7782 /* In 64-bit mode, we assume that the address is word-aligned. */
7783 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7784 return 0;
7785
7786 if ((reload_in_progress || reload_completed)
7787 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7788 return 0;
7789 }
7790 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7791 {
7792 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7793 return 0;
7794 }
7795 else
7796 return 0;
7797
7798 return 1;
7799 }
7800 \f
7801 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7802
7803 static bool
7804 sparc_print_operand_punct_valid_p (unsigned char code)
7805 {
7806 if (code == '#'
7807 || code == '*'
7808 || code == '('
7809 || code == ')'
7810 || code == '_'
7811 || code == '&')
7812 return true;
7813
7814 return false;
7815 }
7816
7817 /* Implement TARGET_PRINT_OPERAND.
7818 Print operand X (an rtx) in assembler syntax to file FILE.
7819 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7820 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7821
7822 static void
7823 sparc_print_operand (FILE *file, rtx x, int code)
7824 {
7825 switch (code)
7826 {
7827 case '#':
7828 /* Output an insn in a delay slot. */
7829 if (final_sequence)
7830 sparc_indent_opcode = 1;
7831 else
7832 fputs ("\n\t nop", file);
7833 return;
7834 case '*':
7835 /* Output an annul flag if there's nothing for the delay slot and we
7836 are optimizing. This is always used with '(' below.
7837 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7838 this is a dbx bug. So, we only do this when optimizing.
7839 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7840 Always emit a nop in case the next instruction is a branch. */
7841 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7842 fputs (",a", file);
7843 return;
7844 case '(':
7845 /* Output a 'nop' if there's nothing for the delay slot and we are
7846 not optimizing. This is always used with '*' above. */
7847 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7848 fputs ("\n\t nop", file);
7849 else if (final_sequence)
7850 sparc_indent_opcode = 1;
7851 return;
7852 case ')':
7853 /* Output the right displacement from the saved PC on function return.
7854 The caller may have placed an "unimp" insn immediately after the call
7855 so we have to account for it. This insn is used in the 32-bit ABI
7856 when calling a function that returns a non zero-sized structure. The
7857 64-bit ABI doesn't have it. Be careful to have this test be the same
7858 as that for the call. The exception is when sparc_std_struct_return
7859 is enabled, the psABI is followed exactly and the adjustment is made
7860 by the code in sparc_struct_value_rtx. The call emitted is the same
7861 when sparc_std_struct_return is enabled. */
7862 if (!TARGET_ARCH64
7863 && cfun->returns_struct
7864 && !sparc_std_struct_return
7865 && DECL_SIZE (DECL_RESULT (current_function_decl))
7866 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7867 == INTEGER_CST
7868 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7869 fputs ("12", file);
7870 else
7871 fputc ('8', file);
7872 return;
7873 case '_':
7874 /* Output the Embedded Medium/Anywhere code model base register. */
7875 fputs (EMBMEDANY_BASE_REG, file);
7876 return;
7877 case '&':
7878 /* Print some local dynamic TLS name. */
7879 assemble_name (file, get_some_local_dynamic_name ());
7880 return;
7881
7882 case 'Y':
7883 /* Adjust the operand to take into account a RESTORE operation. */
7884 if (GET_CODE (x) == CONST_INT)
7885 break;
7886 else if (GET_CODE (x) != REG)
7887 output_operand_lossage ("invalid %%Y operand");
7888 else if (REGNO (x) < 8)
7889 fputs (reg_names[REGNO (x)], file);
7890 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7891 fputs (reg_names[REGNO (x)-16], file);
7892 else
7893 output_operand_lossage ("invalid %%Y operand");
7894 return;
7895 case 'L':
7896 /* Print out the low order register name of a register pair. */
7897 if (WORDS_BIG_ENDIAN)
7898 fputs (reg_names[REGNO (x)+1], file);
7899 else
7900 fputs (reg_names[REGNO (x)], file);
7901 return;
7902 case 'H':
7903 /* Print out the high order register name of a register pair. */
7904 if (WORDS_BIG_ENDIAN)
7905 fputs (reg_names[REGNO (x)], file);
7906 else
7907 fputs (reg_names[REGNO (x)+1], file);
7908 return;
7909 case 'R':
7910 /* Print out the second register name of a register pair or quad.
7911 I.e., R (%o0) => %o1. */
7912 fputs (reg_names[REGNO (x)+1], file);
7913 return;
7914 case 'S':
7915 /* Print out the third register name of a register quad.
7916 I.e., S (%o0) => %o2. */
7917 fputs (reg_names[REGNO (x)+2], file);
7918 return;
7919 case 'T':
7920 /* Print out the fourth register name of a register quad.
7921 I.e., T (%o0) => %o3. */
7922 fputs (reg_names[REGNO (x)+3], file);
7923 return;
7924 case 'x':
7925 /* Print a condition code register. */
7926 if (REGNO (x) == SPARC_ICC_REG)
7927 {
7928 /* We don't handle CC[X]_NOOVmode because they're not supposed
7929 to occur here. */
7930 if (GET_MODE (x) == CCmode)
7931 fputs ("%icc", file);
7932 else if (GET_MODE (x) == CCXmode)
7933 fputs ("%xcc", file);
7934 else
7935 gcc_unreachable ();
7936 }
7937 else
7938 /* %fccN register */
7939 fputs (reg_names[REGNO (x)], file);
7940 return;
7941 case 'm':
7942 /* Print the operand's address only. */
7943 output_address (XEXP (x, 0));
7944 return;
7945 case 'r':
7946 /* In this case we need a register. Use %g0 if the
7947 operand is const0_rtx. */
7948 if (x == const0_rtx
7949 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7950 {
7951 fputs ("%g0", file);
7952 return;
7953 }
7954 else
7955 break;
7956
7957 case 'A':
7958 switch (GET_CODE (x))
7959 {
7960 case IOR: fputs ("or", file); break;
7961 case AND: fputs ("and", file); break;
7962 case XOR: fputs ("xor", file); break;
7963 default: output_operand_lossage ("invalid %%A operand");
7964 }
7965 return;
7966
7967 case 'B':
7968 switch (GET_CODE (x))
7969 {
7970 case IOR: fputs ("orn", file); break;
7971 case AND: fputs ("andn", file); break;
7972 case XOR: fputs ("xnor", file); break;
7973 default: output_operand_lossage ("invalid %%B operand");
7974 }
7975 return;
7976
7977 /* These are used by the conditional move instructions. */
7978 case 'c' :
7979 case 'C':
7980 {
7981 enum rtx_code rc = GET_CODE (x);
7982
7983 if (code == 'c')
7984 {
7985 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7986 if (mode == CCFPmode || mode == CCFPEmode)
7987 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7988 else
7989 rc = reverse_condition (GET_CODE (x));
7990 }
7991 switch (rc)
7992 {
7993 case NE: fputs ("ne", file); break;
7994 case EQ: fputs ("e", file); break;
7995 case GE: fputs ("ge", file); break;
7996 case GT: fputs ("g", file); break;
7997 case LE: fputs ("le", file); break;
7998 case LT: fputs ("l", file); break;
7999 case GEU: fputs ("geu", file); break;
8000 case GTU: fputs ("gu", file); break;
8001 case LEU: fputs ("leu", file); break;
8002 case LTU: fputs ("lu", file); break;
8003 case LTGT: fputs ("lg", file); break;
8004 case UNORDERED: fputs ("u", file); break;
8005 case ORDERED: fputs ("o", file); break;
8006 case UNLT: fputs ("ul", file); break;
8007 case UNLE: fputs ("ule", file); break;
8008 case UNGT: fputs ("ug", file); break;
8009 case UNGE: fputs ("uge", file); break;
8010 case UNEQ: fputs ("ue", file); break;
8011 default: output_operand_lossage (code == 'c'
8012 ? "invalid %%c operand"
8013 : "invalid %%C operand");
8014 }
8015 return;
8016 }
8017
8018 /* These are used by the movr instruction pattern. */
8019 case 'd':
8020 case 'D':
8021 {
8022 enum rtx_code rc = (code == 'd'
8023 ? reverse_condition (GET_CODE (x))
8024 : GET_CODE (x));
8025 switch (rc)
8026 {
8027 case NE: fputs ("ne", file); break;
8028 case EQ: fputs ("e", file); break;
8029 case GE: fputs ("gez", file); break;
8030 case LT: fputs ("lz", file); break;
8031 case LE: fputs ("lez", file); break;
8032 case GT: fputs ("gz", file); break;
8033 default: output_operand_lossage (code == 'd'
8034 ? "invalid %%d operand"
8035 : "invalid %%D operand");
8036 }
8037 return;
8038 }
8039
8040 case 'b':
8041 {
8042 /* Print a sign-extended character. */
8043 int i = trunc_int_for_mode (INTVAL (x), QImode);
8044 fprintf (file, "%d", i);
8045 return;
8046 }
8047
8048 case 'f':
8049 /* Operand must be a MEM; write its address. */
8050 if (GET_CODE (x) != MEM)
8051 output_operand_lossage ("invalid %%f operand");
8052 output_address (XEXP (x, 0));
8053 return;
8054
8055 case 's':
8056 {
8057 /* Print a sign-extended 32-bit value. */
8058 HOST_WIDE_INT i;
8059 if (GET_CODE(x) == CONST_INT)
8060 i = INTVAL (x);
8061 else if (GET_CODE(x) == CONST_DOUBLE)
8062 i = CONST_DOUBLE_LOW (x);
8063 else
8064 {
8065 output_operand_lossage ("invalid %%s operand");
8066 return;
8067 }
8068 i = trunc_int_for_mode (i, SImode);
8069 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
8070 return;
8071 }
8072
8073 case 0:
8074 /* Do nothing special. */
8075 break;
8076
8077 default:
8078 /* Undocumented flag. */
8079 output_operand_lossage ("invalid operand output code");
8080 }
8081
8082 if (GET_CODE (x) == REG)
8083 fputs (reg_names[REGNO (x)], file);
8084 else if (GET_CODE (x) == MEM)
8085 {
8086 fputc ('[', file);
8087 /* Poor Sun assembler doesn't understand absolute addressing. */
8088 if (CONSTANT_P (XEXP (x, 0)))
8089 fputs ("%g0+", file);
8090 output_address (XEXP (x, 0));
8091 fputc (']', file);
8092 }
8093 else if (GET_CODE (x) == HIGH)
8094 {
8095 fputs ("%hi(", file);
8096 output_addr_const (file, XEXP (x, 0));
8097 fputc (')', file);
8098 }
8099 else if (GET_CODE (x) == LO_SUM)
8100 {
8101 sparc_print_operand (file, XEXP (x, 0), 0);
8102 if (TARGET_CM_MEDMID)
8103 fputs ("+%l44(", file);
8104 else
8105 fputs ("+%lo(", file);
8106 output_addr_const (file, XEXP (x, 1));
8107 fputc (')', file);
8108 }
8109 else if (GET_CODE (x) == CONST_DOUBLE
8110 && (GET_MODE (x) == VOIDmode
8111 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8112 {
8113 if (CONST_DOUBLE_HIGH (x) == 0)
8114 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8115 else if (CONST_DOUBLE_HIGH (x) == -1
8116 && CONST_DOUBLE_LOW (x) < 0)
8117 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8118 else
8119 output_operand_lossage ("long long constant not a valid immediate operand");
8120 }
8121 else if (GET_CODE (x) == CONST_DOUBLE)
8122 output_operand_lossage ("floating point constant not a valid immediate operand");
8123 else { output_addr_const (file, x); }
8124 }
8125
8126 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8127
8128 static void
8129 sparc_print_operand_address (FILE *file, rtx x)
8130 {
8131 register rtx base, index = 0;
8132 int offset = 0;
8133 register rtx addr = x;
8134
8135 if (REG_P (addr))
8136 fputs (reg_names[REGNO (addr)], file);
8137 else if (GET_CODE (addr) == PLUS)
8138 {
8139 if (CONST_INT_P (XEXP (addr, 0)))
8140 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8141 else if (CONST_INT_P (XEXP (addr, 1)))
8142 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8143 else
8144 base = XEXP (addr, 0), index = XEXP (addr, 1);
8145 if (GET_CODE (base) == LO_SUM)
8146 {
8147 gcc_assert (USE_AS_OFFSETABLE_LO10
8148 && TARGET_ARCH64
8149 && ! TARGET_CM_MEDMID);
8150 output_operand (XEXP (base, 0), 0);
8151 fputs ("+%lo(", file);
8152 output_address (XEXP (base, 1));
8153 fprintf (file, ")+%d", offset);
8154 }
8155 else
8156 {
8157 fputs (reg_names[REGNO (base)], file);
8158 if (index == 0)
8159 fprintf (file, "%+d", offset);
8160 else if (REG_P (index))
8161 fprintf (file, "+%s", reg_names[REGNO (index)]);
8162 else if (GET_CODE (index) == SYMBOL_REF
8163 || GET_CODE (index) == LABEL_REF
8164 || GET_CODE (index) == CONST)
8165 fputc ('+', file), output_addr_const (file, index);
8166 else gcc_unreachable ();
8167 }
8168 }
8169 else if (GET_CODE (addr) == MINUS
8170 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8171 {
8172 output_addr_const (file, XEXP (addr, 0));
8173 fputs ("-(", file);
8174 output_addr_const (file, XEXP (addr, 1));
8175 fputs ("-.)", file);
8176 }
8177 else if (GET_CODE (addr) == LO_SUM)
8178 {
8179 output_operand (XEXP (addr, 0), 0);
8180 if (TARGET_CM_MEDMID)
8181 fputs ("+%l44(", file);
8182 else
8183 fputs ("+%lo(", file);
8184 output_address (XEXP (addr, 1));
8185 fputc (')', file);
8186 }
8187 else if (flag_pic
8188 && GET_CODE (addr) == CONST
8189 && GET_CODE (XEXP (addr, 0)) == MINUS
8190 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8191 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8192 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8193 {
8194 addr = XEXP (addr, 0);
8195 output_addr_const (file, XEXP (addr, 0));
8196 /* Group the args of the second CONST in parenthesis. */
8197 fputs ("-(", file);
8198 /* Skip past the second CONST--it does nothing for us. */
8199 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8200 /* Close the parenthesis. */
8201 fputc (')', file);
8202 }
8203 else
8204 {
8205 output_addr_const (file, addr);
8206 }
8207 }
8208 \f
8209 /* Target hook for assembling integer objects. The sparc version has
8210 special handling for aligned DI-mode objects. */
8211
8212 static bool
8213 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8214 {
8215 /* ??? We only output .xword's for symbols and only then in environments
8216 where the assembler can handle them. */
8217 if (aligned_p && size == 8
8218 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8219 {
8220 if (TARGET_V9)
8221 {
8222 assemble_integer_with_op ("\t.xword\t", x);
8223 return true;
8224 }
8225 else
8226 {
8227 assemble_aligned_integer (4, const0_rtx);
8228 assemble_aligned_integer (4, x);
8229 return true;
8230 }
8231 }
8232 return default_assemble_integer (x, size, aligned_p);
8233 }
8234 \f
8235 /* Return the value of a code used in the .proc pseudo-op that says
8236 what kind of result this function returns. For non-C types, we pick
8237 the closest C type. */
8238
8239 #ifndef SHORT_TYPE_SIZE
8240 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8241 #endif
8242
8243 #ifndef INT_TYPE_SIZE
8244 #define INT_TYPE_SIZE BITS_PER_WORD
8245 #endif
8246
8247 #ifndef LONG_TYPE_SIZE
8248 #define LONG_TYPE_SIZE BITS_PER_WORD
8249 #endif
8250
8251 #ifndef LONG_LONG_TYPE_SIZE
8252 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8253 #endif
8254
8255 #ifndef FLOAT_TYPE_SIZE
8256 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8257 #endif
8258
8259 #ifndef DOUBLE_TYPE_SIZE
8260 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8261 #endif
8262
8263 #ifndef LONG_DOUBLE_TYPE_SIZE
8264 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8265 #endif
8266
8267 unsigned long
8268 sparc_type_code (register tree type)
8269 {
8270 register unsigned long qualifiers = 0;
8271 register unsigned shift;
8272
8273 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8274 setting more, since some assemblers will give an error for this. Also,
8275 we must be careful to avoid shifts of 32 bits or more to avoid getting
8276 unpredictable results. */
8277
8278 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8279 {
8280 switch (TREE_CODE (type))
8281 {
8282 case ERROR_MARK:
8283 return qualifiers;
8284
8285 case ARRAY_TYPE:
8286 qualifiers |= (3 << shift);
8287 break;
8288
8289 case FUNCTION_TYPE:
8290 case METHOD_TYPE:
8291 qualifiers |= (2 << shift);
8292 break;
8293
8294 case POINTER_TYPE:
8295 case REFERENCE_TYPE:
8296 case OFFSET_TYPE:
8297 qualifiers |= (1 << shift);
8298 break;
8299
8300 case RECORD_TYPE:
8301 return (qualifiers | 8);
8302
8303 case UNION_TYPE:
8304 case QUAL_UNION_TYPE:
8305 return (qualifiers | 9);
8306
8307 case ENUMERAL_TYPE:
8308 return (qualifiers | 10);
8309
8310 case VOID_TYPE:
8311 return (qualifiers | 16);
8312
8313 case INTEGER_TYPE:
8314 /* If this is a range type, consider it to be the underlying
8315 type. */
8316 if (TREE_TYPE (type) != 0)
8317 break;
8318
8319 /* Carefully distinguish all the standard types of C,
8320 without messing up if the language is not C. We do this by
8321 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8322 look at both the names and the above fields, but that's redundant.
8323 Any type whose size is between two C types will be considered
8324 to be the wider of the two types. Also, we do not have a
8325 special code to use for "long long", so anything wider than
8326 long is treated the same. Note that we can't distinguish
8327 between "int" and "long" in this code if they are the same
8328 size, but that's fine, since neither can the assembler. */
8329
8330 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8331 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8332
8333 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8334 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8335
8336 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8337 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8338
8339 else
8340 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8341
8342 case REAL_TYPE:
8343 /* If this is a range type, consider it to be the underlying
8344 type. */
8345 if (TREE_TYPE (type) != 0)
8346 break;
8347
8348 /* Carefully distinguish all the standard types of C,
8349 without messing up if the language is not C. */
8350
8351 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8352 return (qualifiers | 6);
8353
8354 else
8355 return (qualifiers | 7);
8356
8357 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8358 /* ??? We need to distinguish between double and float complex types,
8359 but I don't know how yet because I can't reach this code from
8360 existing front-ends. */
8361 return (qualifiers | 7); /* Who knows? */
8362
8363 case VECTOR_TYPE:
8364 case BOOLEAN_TYPE: /* Boolean truth value type. */
8365 case LANG_TYPE:
8366 case NULLPTR_TYPE:
8367 return qualifiers;
8368
8369 default:
8370 gcc_unreachable (); /* Not a type! */
8371 }
8372 }
8373
8374 return qualifiers;
8375 }
8376 \f
8377 /* Nested function support. */
8378
8379 /* Emit RTL insns to initialize the variable parts of a trampoline.
8380 FNADDR is an RTX for the address of the function's pure code.
8381 CXT is an RTX for the static chain value for the function.
8382
8383 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8384 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8385 (to store insns). This is a bit excessive. Perhaps a different
8386 mechanism would be better here.
8387
8388 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8389
8390 static void
8391 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8392 {
8393 /* SPARC 32-bit trampoline:
8394
8395 sethi %hi(fn), %g1
8396 sethi %hi(static), %g2
8397 jmp %g1+%lo(fn)
8398 or %g2, %lo(static), %g2
8399
8400 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8401 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8402 */
8403
8404 emit_move_insn
8405 (adjust_address (m_tramp, SImode, 0),
8406 expand_binop (SImode, ior_optab,
8407 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8408 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8409 NULL_RTX, 1, OPTAB_DIRECT));
8410
8411 emit_move_insn
8412 (adjust_address (m_tramp, SImode, 4),
8413 expand_binop (SImode, ior_optab,
8414 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8415 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8416 NULL_RTX, 1, OPTAB_DIRECT));
8417
8418 emit_move_insn
8419 (adjust_address (m_tramp, SImode, 8),
8420 expand_binop (SImode, ior_optab,
8421 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8422 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8423 NULL_RTX, 1, OPTAB_DIRECT));
8424
8425 emit_move_insn
8426 (adjust_address (m_tramp, SImode, 12),
8427 expand_binop (SImode, ior_optab,
8428 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8429 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8430 NULL_RTX, 1, OPTAB_DIRECT));
8431
8432 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8433 aligned on a 16 byte boundary so one flush clears it all. */
8434 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8435 if (sparc_cpu != PROCESSOR_ULTRASPARC
8436 && sparc_cpu != PROCESSOR_ULTRASPARC3
8437 && sparc_cpu != PROCESSOR_NIAGARA
8438 && sparc_cpu != PROCESSOR_NIAGARA2
8439 && sparc_cpu != PROCESSOR_NIAGARA3
8440 && sparc_cpu != PROCESSOR_NIAGARA4)
8441 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8442
8443 /* Call __enable_execute_stack after writing onto the stack to make sure
8444 the stack address is accessible. */
8445 #ifdef HAVE_ENABLE_EXECUTE_STACK
8446 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8447 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8448 #endif
8449
8450 }
8451
8452 /* The 64-bit version is simpler because it makes more sense to load the
8453 values as "immediate" data out of the trampoline. It's also easier since
8454 we can read the PC without clobbering a register. */
8455
8456 static void
8457 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8458 {
8459 /* SPARC 64-bit trampoline:
8460
8461 rd %pc, %g1
8462 ldx [%g1+24], %g5
8463 jmp %g5
8464 ldx [%g1+16], %g5
8465 +16 bytes data
8466 */
8467
8468 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8469 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8470 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8471 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8472 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8473 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8474 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8475 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8476 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8477 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8478 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8479
8480 if (sparc_cpu != PROCESSOR_ULTRASPARC
8481 && sparc_cpu != PROCESSOR_ULTRASPARC3
8482 && sparc_cpu != PROCESSOR_NIAGARA
8483 && sparc_cpu != PROCESSOR_NIAGARA2
8484 && sparc_cpu != PROCESSOR_NIAGARA3
8485 && sparc_cpu != PROCESSOR_NIAGARA4)
8486 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8487
8488 /* Call __enable_execute_stack after writing onto the stack to make sure
8489 the stack address is accessible. */
8490 #ifdef HAVE_ENABLE_EXECUTE_STACK
8491 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8492 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8493 #endif
8494 }
8495
8496 /* Worker for TARGET_TRAMPOLINE_INIT. */
8497
8498 static void
8499 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8500 {
8501 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8502 cxt = force_reg (Pmode, cxt);
8503 if (TARGET_ARCH64)
8504 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8505 else
8506 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8507 }
8508 \f
8509 /* Adjust the cost of a scheduling dependency. Return the new cost of
8510 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8511
8512 static int
8513 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8514 {
8515 enum attr_type insn_type;
8516
8517 if (! recog_memoized (insn))
8518 return 0;
8519
8520 insn_type = get_attr_type (insn);
8521
8522 if (REG_NOTE_KIND (link) == 0)
8523 {
8524 /* Data dependency; DEP_INSN writes a register that INSN reads some
8525 cycles later. */
8526
8527 /* if a load, then the dependence must be on the memory address;
8528 add an extra "cycle". Note that the cost could be two cycles
8529 if the reg was written late in an instruction group; we ca not tell
8530 here. */
8531 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8532 return cost + 3;
8533
8534 /* Get the delay only if the address of the store is the dependence. */
8535 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8536 {
8537 rtx pat = PATTERN(insn);
8538 rtx dep_pat = PATTERN (dep_insn);
8539
8540 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8541 return cost; /* This should not happen! */
8542
8543 /* The dependency between the two instructions was on the data that
8544 is being stored. Assume that this implies that the address of the
8545 store is not dependent. */
8546 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8547 return cost;
8548
8549 return cost + 3; /* An approximation. */
8550 }
8551
8552 /* A shift instruction cannot receive its data from an instruction
8553 in the same cycle; add a one cycle penalty. */
8554 if (insn_type == TYPE_SHIFT)
8555 return cost + 3; /* Split before cascade into shift. */
8556 }
8557 else
8558 {
8559 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8560 INSN writes some cycles later. */
8561
8562 /* These are only significant for the fpu unit; writing a fp reg before
8563 the fpu has finished with it stalls the processor. */
8564
8565 /* Reusing an integer register causes no problems. */
8566 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8567 return 0;
8568 }
8569
8570 return cost;
8571 }
8572
8573 static int
8574 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8575 {
8576 enum attr_type insn_type, dep_type;
8577 rtx pat = PATTERN(insn);
8578 rtx dep_pat = PATTERN (dep_insn);
8579
8580 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8581 return cost;
8582
8583 insn_type = get_attr_type (insn);
8584 dep_type = get_attr_type (dep_insn);
8585
8586 switch (REG_NOTE_KIND (link))
8587 {
8588 case 0:
8589 /* Data dependency; DEP_INSN writes a register that INSN reads some
8590 cycles later. */
8591
8592 switch (insn_type)
8593 {
8594 case TYPE_STORE:
8595 case TYPE_FPSTORE:
8596 /* Get the delay iff the address of the store is the dependence. */
8597 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8598 return cost;
8599
8600 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8601 return cost;
8602 return cost + 3;
8603
8604 case TYPE_LOAD:
8605 case TYPE_SLOAD:
8606 case TYPE_FPLOAD:
8607 /* If a load, then the dependence must be on the memory address. If
8608 the addresses aren't equal, then it might be a false dependency */
8609 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8610 {
8611 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8612 || GET_CODE (SET_DEST (dep_pat)) != MEM
8613 || GET_CODE (SET_SRC (pat)) != MEM
8614 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8615 XEXP (SET_SRC (pat), 0)))
8616 return cost + 2;
8617
8618 return cost + 8;
8619 }
8620 break;
8621
8622 case TYPE_BRANCH:
8623 /* Compare to branch latency is 0. There is no benefit from
8624 separating compare and branch. */
8625 if (dep_type == TYPE_COMPARE)
8626 return 0;
8627 /* Floating point compare to branch latency is less than
8628 compare to conditional move. */
8629 if (dep_type == TYPE_FPCMP)
8630 return cost - 1;
8631 break;
8632 default:
8633 break;
8634 }
8635 break;
8636
8637 case REG_DEP_ANTI:
8638 /* Anti-dependencies only penalize the fpu unit. */
8639 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8640 return 0;
8641 break;
8642
8643 default:
8644 break;
8645 }
8646
8647 return cost;
8648 }
8649
8650 static int
8651 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8652 {
8653 switch (sparc_cpu)
8654 {
8655 case PROCESSOR_SUPERSPARC:
8656 cost = supersparc_adjust_cost (insn, link, dep, cost);
8657 break;
8658 case PROCESSOR_HYPERSPARC:
8659 case PROCESSOR_SPARCLITE86X:
8660 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8661 break;
8662 default:
8663 break;
8664 }
8665 return cost;
8666 }
8667
8668 static void
8669 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8670 int sched_verbose ATTRIBUTE_UNUSED,
8671 int max_ready ATTRIBUTE_UNUSED)
8672 {}
8673
8674 static int
8675 sparc_use_sched_lookahead (void)
8676 {
8677 if (sparc_cpu == PROCESSOR_NIAGARA
8678 || sparc_cpu == PROCESSOR_NIAGARA2
8679 || sparc_cpu == PROCESSOR_NIAGARA3
8680 || sparc_cpu == PROCESSOR_NIAGARA4)
8681 return 0;
8682 if (sparc_cpu == PROCESSOR_ULTRASPARC
8683 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8684 return 4;
8685 if ((1 << sparc_cpu) &
8686 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8687 (1 << PROCESSOR_SPARCLITE86X)))
8688 return 3;
8689 return 0;
8690 }
8691
8692 static int
8693 sparc_issue_rate (void)
8694 {
8695 switch (sparc_cpu)
8696 {
8697 case PROCESSOR_NIAGARA:
8698 case PROCESSOR_NIAGARA2:
8699 case PROCESSOR_NIAGARA3:
8700 case PROCESSOR_NIAGARA4:
8701 default:
8702 return 1;
8703 case PROCESSOR_V9:
8704 /* Assume V9 processors are capable of at least dual-issue. */
8705 return 2;
8706 case PROCESSOR_SUPERSPARC:
8707 return 3;
8708 case PROCESSOR_HYPERSPARC:
8709 case PROCESSOR_SPARCLITE86X:
8710 return 2;
8711 case PROCESSOR_ULTRASPARC:
8712 case PROCESSOR_ULTRASPARC3:
8713 return 4;
8714 }
8715 }
8716
8717 static int
8718 set_extends (rtx insn)
8719 {
8720 register rtx pat = PATTERN (insn);
8721
8722 switch (GET_CODE (SET_SRC (pat)))
8723 {
8724 /* Load and some shift instructions zero extend. */
8725 case MEM:
8726 case ZERO_EXTEND:
8727 /* sethi clears the high bits */
8728 case HIGH:
8729 /* LO_SUM is used with sethi. sethi cleared the high
8730 bits and the values used with lo_sum are positive */
8731 case LO_SUM:
8732 /* Store flag stores 0 or 1 */
8733 case LT: case LTU:
8734 case GT: case GTU:
8735 case LE: case LEU:
8736 case GE: case GEU:
8737 case EQ:
8738 case NE:
8739 return 1;
8740 case AND:
8741 {
8742 rtx op0 = XEXP (SET_SRC (pat), 0);
8743 rtx op1 = XEXP (SET_SRC (pat), 1);
8744 if (GET_CODE (op1) == CONST_INT)
8745 return INTVAL (op1) >= 0;
8746 if (GET_CODE (op0) != REG)
8747 return 0;
8748 if (sparc_check_64 (op0, insn) == 1)
8749 return 1;
8750 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8751 }
8752 case IOR:
8753 case XOR:
8754 {
8755 rtx op0 = XEXP (SET_SRC (pat), 0);
8756 rtx op1 = XEXP (SET_SRC (pat), 1);
8757 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8758 return 0;
8759 if (GET_CODE (op1) == CONST_INT)
8760 return INTVAL (op1) >= 0;
8761 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8762 }
8763 case LSHIFTRT:
8764 return GET_MODE (SET_SRC (pat)) == SImode;
8765 /* Positive integers leave the high bits zero. */
8766 case CONST_DOUBLE:
8767 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8768 case CONST_INT:
8769 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8770 case ASHIFTRT:
8771 case SIGN_EXTEND:
8772 return - (GET_MODE (SET_SRC (pat)) == SImode);
8773 case REG:
8774 return sparc_check_64 (SET_SRC (pat), insn);
8775 default:
8776 return 0;
8777 }
8778 }
8779
8780 /* We _ought_ to have only one kind per function, but... */
8781 static GTY(()) rtx sparc_addr_diff_list;
8782 static GTY(()) rtx sparc_addr_list;
8783
8784 void
8785 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8786 {
8787 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8788 if (diff)
8789 sparc_addr_diff_list
8790 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8791 else
8792 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8793 }
8794
8795 static void
8796 sparc_output_addr_vec (rtx vec)
8797 {
8798 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8799 int idx, vlen = XVECLEN (body, 0);
8800
8801 #ifdef ASM_OUTPUT_ADDR_VEC_START
8802 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8803 #endif
8804
8805 #ifdef ASM_OUTPUT_CASE_LABEL
8806 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8807 NEXT_INSN (lab));
8808 #else
8809 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8810 #endif
8811
8812 for (idx = 0; idx < vlen; idx++)
8813 {
8814 ASM_OUTPUT_ADDR_VEC_ELT
8815 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8816 }
8817
8818 #ifdef ASM_OUTPUT_ADDR_VEC_END
8819 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8820 #endif
8821 }
8822
8823 static void
8824 sparc_output_addr_diff_vec (rtx vec)
8825 {
8826 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8827 rtx base = XEXP (XEXP (body, 0), 0);
8828 int idx, vlen = XVECLEN (body, 1);
8829
8830 #ifdef ASM_OUTPUT_ADDR_VEC_START
8831 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8832 #endif
8833
8834 #ifdef ASM_OUTPUT_CASE_LABEL
8835 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8836 NEXT_INSN (lab));
8837 #else
8838 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8839 #endif
8840
8841 for (idx = 0; idx < vlen; idx++)
8842 {
8843 ASM_OUTPUT_ADDR_DIFF_ELT
8844 (asm_out_file,
8845 body,
8846 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8847 CODE_LABEL_NUMBER (base));
8848 }
8849
8850 #ifdef ASM_OUTPUT_ADDR_VEC_END
8851 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8852 #endif
8853 }
8854
8855 static void
8856 sparc_output_deferred_case_vectors (void)
8857 {
8858 rtx t;
8859 int align;
8860
8861 if (sparc_addr_list == NULL_RTX
8862 && sparc_addr_diff_list == NULL_RTX)
8863 return;
8864
8865 /* Align to cache line in the function's code section. */
8866 switch_to_section (current_function_section ());
8867
8868 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8869 if (align > 0)
8870 ASM_OUTPUT_ALIGN (asm_out_file, align);
8871
8872 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8873 sparc_output_addr_vec (XEXP (t, 0));
8874 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8875 sparc_output_addr_diff_vec (XEXP (t, 0));
8876
8877 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8878 }
8879
8880 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8881 unknown. Return 1 if the high bits are zero, -1 if the register is
8882 sign extended. */
8883 int
8884 sparc_check_64 (rtx x, rtx insn)
8885 {
8886 /* If a register is set only once it is safe to ignore insns this
8887 code does not know how to handle. The loop will either recognize
8888 the single set and return the correct value or fail to recognize
8889 it and return 0. */
8890 int set_once = 0;
8891 rtx y = x;
8892
8893 gcc_assert (GET_CODE (x) == REG);
8894
8895 if (GET_MODE (x) == DImode)
8896 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8897
8898 if (flag_expensive_optimizations
8899 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8900 set_once = 1;
8901
8902 if (insn == 0)
8903 {
8904 if (set_once)
8905 insn = get_last_insn_anywhere ();
8906 else
8907 return 0;
8908 }
8909
8910 while ((insn = PREV_INSN (insn)))
8911 {
8912 switch (GET_CODE (insn))
8913 {
8914 case JUMP_INSN:
8915 case NOTE:
8916 break;
8917 case CODE_LABEL:
8918 case CALL_INSN:
8919 default:
8920 if (! set_once)
8921 return 0;
8922 break;
8923 case INSN:
8924 {
8925 rtx pat = PATTERN (insn);
8926 if (GET_CODE (pat) != SET)
8927 return 0;
8928 if (rtx_equal_p (x, SET_DEST (pat)))
8929 return set_extends (insn);
8930 if (y && rtx_equal_p (y, SET_DEST (pat)))
8931 return set_extends (insn);
8932 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8933 return 0;
8934 }
8935 }
8936 }
8937 return 0;
8938 }
8939
8940 /* Returns assembly code to perform a DImode shift using
8941 a 64-bit global or out register on SPARC-V8+. */
8942 const char *
8943 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8944 {
8945 static char asm_code[60];
8946
8947 /* The scratch register is only required when the destination
8948 register is not a 64-bit global or out register. */
8949 if (which_alternative != 2)
8950 operands[3] = operands[0];
8951
8952 /* We can only shift by constants <= 63. */
8953 if (GET_CODE (operands[2]) == CONST_INT)
8954 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8955
8956 if (GET_CODE (operands[1]) == CONST_INT)
8957 {
8958 output_asm_insn ("mov\t%1, %3", operands);
8959 }
8960 else
8961 {
8962 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8963 if (sparc_check_64 (operands[1], insn) <= 0)
8964 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8965 output_asm_insn ("or\t%L1, %3, %3", operands);
8966 }
8967
8968 strcpy(asm_code, opcode);
8969
8970 if (which_alternative != 2)
8971 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8972 else
8973 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8974 }
8975 \f
8976 /* Output rtl to increment the profiler label LABELNO
8977 for profiling a function entry. */
8978
8979 void
8980 sparc_profile_hook (int labelno)
8981 {
8982 char buf[32];
8983 rtx lab, fun;
8984
8985 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8986 if (NO_PROFILE_COUNTERS)
8987 {
8988 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8989 }
8990 else
8991 {
8992 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8993 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8994 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8995 }
8996 }
8997 \f
8998 #ifdef TARGET_SOLARIS
8999 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9000
9001 static void
9002 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
9003 tree decl ATTRIBUTE_UNUSED)
9004 {
9005 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
9006 {
9007 solaris_elf_asm_comdat_section (name, flags, decl);
9008 return;
9009 }
9010
9011 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
9012
9013 if (!(flags & SECTION_DEBUG))
9014 fputs (",#alloc", asm_out_file);
9015 if (flags & SECTION_WRITE)
9016 fputs (",#write", asm_out_file);
9017 if (flags & SECTION_TLS)
9018 fputs (",#tls", asm_out_file);
9019 if (flags & SECTION_CODE)
9020 fputs (",#execinstr", asm_out_file);
9021
9022 /* ??? Handle SECTION_BSS. */
9023
9024 fputc ('\n', asm_out_file);
9025 }
9026 #endif /* TARGET_SOLARIS */
9027
9028 /* We do not allow indirect calls to be optimized into sibling calls.
9029
9030 We cannot use sibling calls when delayed branches are disabled
9031 because they will likely require the call delay slot to be filled.
9032
9033 Also, on SPARC 32-bit we cannot emit a sibling call when the
9034 current function returns a structure. This is because the "unimp
9035 after call" convention would cause the callee to return to the
9036 wrong place. The generic code already disallows cases where the
9037 function being called returns a structure.
9038
9039 It may seem strange how this last case could occur. Usually there
9040 is code after the call which jumps to epilogue code which dumps the
9041 return value into the struct return area. That ought to invalidate
9042 the sibling call right? Well, in the C++ case we can end up passing
9043 the pointer to the struct return area to a constructor (which returns
9044 void) and then nothing else happens. Such a sibling call would look
9045 valid without the added check here.
9046
9047 VxWorks PIC PLT entries require the global pointer to be initialized
9048 on entry. We therefore can't emit sibling calls to them. */
9049 static bool
9050 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9051 {
9052 return (decl
9053 && flag_delayed_branch
9054 && (TARGET_ARCH64 || ! cfun->returns_struct)
9055 && !(TARGET_VXWORKS_RTP
9056 && flag_pic
9057 && !targetm.binds_local_p (decl)));
9058 }
9059 \f
9060 /* libfunc renaming. */
9061
9062 static void
9063 sparc_init_libfuncs (void)
9064 {
9065 if (TARGET_ARCH32)
9066 {
9067 /* Use the subroutines that Sun's library provides for integer
9068 multiply and divide. The `*' prevents an underscore from
9069 being prepended by the compiler. .umul is a little faster
9070 than .mul. */
9071 set_optab_libfunc (smul_optab, SImode, "*.umul");
9072 set_optab_libfunc (sdiv_optab, SImode, "*.div");
9073 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
9074 set_optab_libfunc (smod_optab, SImode, "*.rem");
9075 set_optab_libfunc (umod_optab, SImode, "*.urem");
9076
9077 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9078 set_optab_libfunc (add_optab, TFmode, "_Q_add");
9079 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
9080 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
9081 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
9082 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
9083
9084 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9085 is because with soft-float, the SFmode and DFmode sqrt
9086 instructions will be absent, and the compiler will notice and
9087 try to use the TFmode sqrt instruction for calls to the
9088 builtin function sqrt, but this fails. */
9089 if (TARGET_FPU)
9090 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
9091
9092 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
9093 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
9094 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
9095 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
9096 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
9097 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
9098
9099 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
9100 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9101 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9102 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9103
9104 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9105 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9106 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9107 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9108
9109 if (DITF_CONVERSION_LIBFUNCS)
9110 {
9111 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9112 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9113 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9114 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9115 }
9116
9117 if (SUN_CONVERSION_LIBFUNCS)
9118 {
9119 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9120 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9121 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9122 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9123 }
9124 }
9125 if (TARGET_ARCH64)
9126 {
9127 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9128 do not exist in the library. Make sure the compiler does not
9129 emit calls to them by accident. (It should always use the
9130 hardware instructions.) */
9131 set_optab_libfunc (smul_optab, SImode, 0);
9132 set_optab_libfunc (sdiv_optab, SImode, 0);
9133 set_optab_libfunc (udiv_optab, SImode, 0);
9134 set_optab_libfunc (smod_optab, SImode, 0);
9135 set_optab_libfunc (umod_optab, SImode, 0);
9136
9137 if (SUN_INTEGER_MULTIPLY_64)
9138 {
9139 set_optab_libfunc (smul_optab, DImode, "__mul64");
9140 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9141 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9142 set_optab_libfunc (smod_optab, DImode, "__rem64");
9143 set_optab_libfunc (umod_optab, DImode, "__urem64");
9144 }
9145
9146 if (SUN_CONVERSION_LIBFUNCS)
9147 {
9148 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9149 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9150 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9151 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9152 }
9153 }
9154 }
9155 \f
9156 static tree def_builtin(const char *name, int code, tree type)
9157 {
9158 return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
9159 NULL_TREE);
9160 }
9161
9162 static tree def_builtin_const(const char *name, int code, tree type)
9163 {
9164 tree t = def_builtin(name, code, type);
9165
9166 if (t)
9167 TREE_READONLY (t) = 1;
9168
9169 return t;
9170 }
9171
9172 /* Implement the TARGET_INIT_BUILTINS target hook.
9173 Create builtin functions for special SPARC instructions. */
9174
9175 static void
9176 sparc_init_builtins (void)
9177 {
9178 if (TARGET_VIS)
9179 sparc_vis_init_builtins ();
9180 }
9181
9182 /* Create builtin functions for VIS 1.0 instructions. */
9183
9184 static void
9185 sparc_vis_init_builtins (void)
9186 {
9187 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9188 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9189 tree v4hi = build_vector_type (intHI_type_node, 4);
9190 tree v2hi = build_vector_type (intHI_type_node, 2);
9191 tree v2si = build_vector_type (intSI_type_node, 2);
9192 tree v1si = build_vector_type (intSI_type_node, 1);
9193
9194 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9195 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9196 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9197 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9198 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9199 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9200 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9201 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9202 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9203 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9204 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9205 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9206 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
9207 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
9208 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9209 v8qi, v8qi,
9210 intDI_type_node, 0);
9211 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
9212 v8qi, v8qi, 0);
9213 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
9214 v8qi, v8qi, 0);
9215 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9216 intDI_type_node,
9217 intDI_type_node, 0);
9218 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
9219 intSI_type_node,
9220 intSI_type_node, 0);
9221 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9222 ptr_type_node,
9223 intSI_type_node, 0);
9224 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9225 ptr_type_node,
9226 intDI_type_node, 0);
9227 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
9228 ptr_type_node,
9229 ptr_type_node, 0);
9230 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
9231 ptr_type_node,
9232 ptr_type_node, 0);
9233 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
9234 v4hi, v4hi, 0);
9235 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
9236 v2si, v2si, 0);
9237 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
9238 v4hi, v4hi, 0);
9239 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
9240 v2si, v2si, 0);
9241 tree void_ftype_di = build_function_type_list (void_type_node,
9242 intDI_type_node, 0);
9243 tree di_ftype_void = build_function_type_list (intDI_type_node,
9244 void_type_node, 0);
9245 tree void_ftype_si = build_function_type_list (void_type_node,
9246 intSI_type_node, 0);
9247 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
9248 float_type_node,
9249 float_type_node, 0);
9250 tree df_ftype_df_df = build_function_type_list (double_type_node,
9251 double_type_node,
9252 double_type_node, 0);
9253
9254 /* Packing and expanding vectors. */
9255 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
9256 v4qi_ftype_v4hi);
9257 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9258 v8qi_ftype_v2si_v8qi);
9259 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9260 v2hi_ftype_v2si);
9261 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
9262 v4hi_ftype_v4qi);
9263 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9264 v8qi_ftype_v4qi_v4qi);
9265
9266 /* Multiplications. */
9267 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9268 v4hi_ftype_v4qi_v4hi);
9269 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9270 v4hi_ftype_v4qi_v2hi);
9271 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9272 v4hi_ftype_v4qi_v2hi);
9273 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9274 v4hi_ftype_v8qi_v4hi);
9275 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9276 v4hi_ftype_v8qi_v4hi);
9277 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9278 v2si_ftype_v4qi_v2hi);
9279 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9280 v2si_ftype_v4qi_v2hi);
9281
9282 /* Data aligning. */
9283 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9284 v4hi_ftype_v4hi_v4hi);
9285 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9286 v8qi_ftype_v8qi_v8qi);
9287 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9288 v2si_ftype_v2si_v2si);
9289 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
9290 di_ftype_di_di);
9291
9292 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
9293 void_ftype_di);
9294 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
9295 di_ftype_void);
9296
9297 if (TARGET_ARCH64)
9298 {
9299 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9300 ptr_ftype_ptr_di);
9301 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
9302 ptr_ftype_ptr_di);
9303 }
9304 else
9305 {
9306 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9307 ptr_ftype_ptr_si);
9308 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
9309 ptr_ftype_ptr_si);
9310 }
9311
9312 /* Pixel distance. */
9313 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9314 di_ftype_v8qi_v8qi_di);
9315
9316 /* Edge handling. */
9317 if (TARGET_ARCH64)
9318 {
9319 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
9320 di_ftype_ptr_ptr);
9321 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
9322 di_ftype_ptr_ptr);
9323 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
9324 di_ftype_ptr_ptr);
9325 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
9326 di_ftype_ptr_ptr);
9327 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
9328 di_ftype_ptr_ptr);
9329 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
9330 di_ftype_ptr_ptr);
9331 if (TARGET_VIS2)
9332 {
9333 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
9334 di_ftype_ptr_ptr);
9335 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
9336 di_ftype_ptr_ptr);
9337 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
9338 di_ftype_ptr_ptr);
9339 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
9340 di_ftype_ptr_ptr);
9341 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
9342 di_ftype_ptr_ptr);
9343 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
9344 di_ftype_ptr_ptr);
9345 }
9346 }
9347 else
9348 {
9349 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
9350 si_ftype_ptr_ptr);
9351 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
9352 si_ftype_ptr_ptr);
9353 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
9354 si_ftype_ptr_ptr);
9355 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
9356 si_ftype_ptr_ptr);
9357 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
9358 si_ftype_ptr_ptr);
9359 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
9360 si_ftype_ptr_ptr);
9361 if (TARGET_VIS2)
9362 {
9363 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
9364 si_ftype_ptr_ptr);
9365 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
9366 si_ftype_ptr_ptr);
9367 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
9368 si_ftype_ptr_ptr);
9369 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
9370 si_ftype_ptr_ptr);
9371 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
9372 si_ftype_ptr_ptr);
9373 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
9374 si_ftype_ptr_ptr);
9375 }
9376 }
9377
9378 /* Pixel compare. */
9379 if (TARGET_ARCH64)
9380 {
9381 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
9382 di_ftype_v4hi_v4hi);
9383 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
9384 di_ftype_v2si_v2si);
9385 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
9386 di_ftype_v4hi_v4hi);
9387 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
9388 di_ftype_v2si_v2si);
9389 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
9390 di_ftype_v4hi_v4hi);
9391 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
9392 di_ftype_v2si_v2si);
9393 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
9394 di_ftype_v4hi_v4hi);
9395 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
9396 di_ftype_v2si_v2si);
9397 }
9398 else
9399 {
9400 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
9401 si_ftype_v4hi_v4hi);
9402 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
9403 si_ftype_v2si_v2si);
9404 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
9405 si_ftype_v4hi_v4hi);
9406 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
9407 si_ftype_v2si_v2si);
9408 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
9409 si_ftype_v4hi_v4hi);
9410 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
9411 si_ftype_v2si_v2si);
9412 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
9413 si_ftype_v4hi_v4hi);
9414 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
9415 si_ftype_v2si_v2si);
9416 }
9417
9418 /* Addition and subtraction. */
9419 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
9420 v4hi_ftype_v4hi_v4hi);
9421 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
9422 v2hi_ftype_v2hi_v2hi);
9423 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
9424 v2si_ftype_v2si_v2si);
9425 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addsi3,
9426 v1si_ftype_v1si_v1si);
9427 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
9428 v4hi_ftype_v4hi_v4hi);
9429 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
9430 v2hi_ftype_v2hi_v2hi);
9431 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
9432 v2si_ftype_v2si_v2si);
9433 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subsi3,
9434 v1si_ftype_v1si_v1si);
9435
9436 /* Three-dimensional array addressing. */
9437 if (TARGET_ARCH64)
9438 {
9439 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
9440 di_ftype_di_di);
9441 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
9442 di_ftype_di_di);
9443 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
9444 di_ftype_di_di);
9445 }
9446 else
9447 {
9448 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
9449 si_ftype_si_si);
9450 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
9451 si_ftype_si_si);
9452 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
9453 si_ftype_si_si);
9454 }
9455
9456 if (TARGET_VIS2)
9457 {
9458 /* Byte mask and shuffle */
9459 if (TARGET_ARCH64)
9460 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
9461 di_ftype_di_di);
9462 else
9463 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
9464 si_ftype_si_si);
9465 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
9466 v4hi_ftype_v4hi_v4hi);
9467 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
9468 v8qi_ftype_v8qi_v8qi);
9469 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
9470 v2si_ftype_v2si_v2si);
9471 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshuffledi_vis,
9472 di_ftype_di_di);
9473 }
9474
9475 if (TARGET_VIS3)
9476 {
9477 if (TARGET_ARCH64)
9478 {
9479 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
9480 void_ftype_di);
9481 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
9482 void_ftype_di);
9483 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
9484 void_ftype_di);
9485 }
9486 else
9487 {
9488 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
9489 void_ftype_si);
9490 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
9491 void_ftype_si);
9492 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
9493 void_ftype_si);
9494 }
9495
9496 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
9497 v4hi_ftype_v4hi_v4hi);
9498
9499 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
9500 v4hi_ftype_v4hi_v4hi);
9501 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
9502 v4hi_ftype_v4hi_v4hi);
9503 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
9504 v4hi_ftype_v4hi_v4hi);
9505 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
9506 v4hi_ftype_v4hi_v4hi);
9507 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
9508 v2si_ftype_v2si_v2si);
9509 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
9510 v2si_ftype_v2si_v2si);
9511 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
9512 v2si_ftype_v2si_v2si);
9513 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
9514 v2si_ftype_v2si_v2si);
9515
9516 if (TARGET_ARCH64)
9517 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
9518 di_ftype_v8qi_v8qi);
9519 else
9520 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
9521 si_ftype_v8qi_v8qi);
9522
9523 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
9524 v4hi_ftype_v4hi_v4hi);
9525 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
9526 di_ftype_di_di);
9527 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
9528 di_ftype_di_di);
9529
9530 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
9531 v4hi_ftype_v4hi_v4hi);
9532 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
9533 v2hi_ftype_v2hi_v2hi);
9534 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
9535 v4hi_ftype_v4hi_v4hi);
9536 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
9537 v2hi_ftype_v2hi_v2hi);
9538 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
9539 v2si_ftype_v2si_v2si);
9540 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddsi3,
9541 v1si_ftype_v1si_v1si);
9542 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
9543 v2si_ftype_v2si_v2si);
9544 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubsi3,
9545 v1si_ftype_v1si_v1si);
9546
9547 if (TARGET_ARCH64)
9548 {
9549 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
9550 di_ftype_v8qi_v8qi);
9551 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
9552 di_ftype_v8qi_v8qi);
9553 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
9554 di_ftype_v8qi_v8qi);
9555 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
9556 di_ftype_v8qi_v8qi);
9557 }
9558 else
9559 {
9560 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
9561 si_ftype_v8qi_v8qi);
9562 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
9563 si_ftype_v8qi_v8qi);
9564 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
9565 si_ftype_v8qi_v8qi);
9566 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
9567 si_ftype_v8qi_v8qi);
9568 }
9569
9570 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
9571 sf_ftype_sf_sf);
9572 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
9573 df_ftype_df_df);
9574 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
9575 sf_ftype_sf_sf);
9576 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
9577 df_ftype_df_df);
9578 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
9579 sf_ftype_sf_sf);
9580 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
9581 df_ftype_df_df);
9582
9583 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
9584 di_ftype_di_di);
9585 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
9586 di_ftype_di_di);
9587 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
9588 di_ftype_di_di);
9589 }
9590 }
9591
9592 /* Handle TARGET_EXPAND_BUILTIN target hook.
9593 Expand builtin functions for sparc intrinsics. */
9594
9595 static rtx
9596 sparc_expand_builtin (tree exp, rtx target,
9597 rtx subtarget ATTRIBUTE_UNUSED,
9598 enum machine_mode tmode ATTRIBUTE_UNUSED,
9599 int ignore ATTRIBUTE_UNUSED)
9600 {
9601 tree arg;
9602 call_expr_arg_iterator iter;
9603 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9604 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9605 rtx pat, op[4];
9606 int arg_count = 0;
9607 bool nonvoid;
9608
9609 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9610
9611 if (nonvoid)
9612 {
9613 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9614 if (!target
9615 || GET_MODE (target) != tmode
9616 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9617 op[0] = gen_reg_rtx (tmode);
9618 else
9619 op[0] = target;
9620 }
9621 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9622 {
9623 const struct insn_operand_data *insn_op;
9624 int idx;
9625
9626 if (arg == error_mark_node)
9627 return NULL_RTX;
9628
9629 arg_count++;
9630 idx = arg_count - !nonvoid;
9631 insn_op = &insn_data[icode].operand[idx];
9632 op[arg_count] = expand_normal (arg);
9633
9634 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
9635 insn_op->mode))
9636 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
9637 }
9638
9639 switch (arg_count)
9640 {
9641 case 0:
9642 pat = GEN_FCN (icode) (op[0]);
9643 break;
9644 case 1:
9645 if (nonvoid)
9646 pat = GEN_FCN (icode) (op[0], op[1]);
9647 else
9648 pat = GEN_FCN (icode) (op[1]);
9649 break;
9650 case 2:
9651 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9652 break;
9653 case 3:
9654 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9655 break;
9656 default:
9657 gcc_unreachable ();
9658 }
9659
9660 if (!pat)
9661 return NULL_RTX;
9662
9663 emit_insn (pat);
9664
9665 if (nonvoid)
9666 return op[0];
9667 else
9668 return const0_rtx;
9669 }
9670
9671 static int
9672 sparc_vis_mul8x16 (int e8, int e16)
9673 {
9674 return (e8 * e16 + 128) / 256;
9675 }
9676
9677 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
9678 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
9679 constants. A tree list with the results of the multiplications is returned,
9680 and each element in the list is of INNER_TYPE. */
9681
9682 static tree
9683 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
9684 {
9685 tree n_elts = NULL_TREE;
9686 int scale;
9687
9688 switch (fncode)
9689 {
9690 case CODE_FOR_fmul8x16_vis:
9691 for (; elts0 && elts1;
9692 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9693 {
9694 int val
9695 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9696 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
9697 n_elts = tree_cons (NULL_TREE,
9698 build_int_cst (inner_type, val),
9699 n_elts);
9700 }
9701 break;
9702
9703 case CODE_FOR_fmul8x16au_vis:
9704 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9705
9706 for (; elts0; elts0 = TREE_CHAIN (elts0))
9707 {
9708 int val
9709 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9710 scale);
9711 n_elts = tree_cons (NULL_TREE,
9712 build_int_cst (inner_type, val),
9713 n_elts);
9714 }
9715 break;
9716
9717 case CODE_FOR_fmul8x16al_vis:
9718 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
9719
9720 for (; elts0; elts0 = TREE_CHAIN (elts0))
9721 {
9722 int val
9723 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9724 scale);
9725 n_elts = tree_cons (NULL_TREE,
9726 build_int_cst (inner_type, val),
9727 n_elts);
9728 }
9729 break;
9730
9731 default:
9732 gcc_unreachable ();
9733 }
9734
9735 return nreverse (n_elts);
9736
9737 }
9738 /* Handle TARGET_FOLD_BUILTIN target hook.
9739 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
9740 result of the function call is ignored. NULL_TREE is returned if the
9741 function could not be folded. */
9742
9743 static tree
9744 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
9745 tree *args, bool ignore)
9746 {
9747 tree arg0, arg1, arg2;
9748 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
9749 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
9750
9751 if (ignore)
9752 {
9753 /* Note that a switch statement instead of the sequence of tests would
9754 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
9755 and that would yield multiple alternatives with identical values. */
9756 if (icode == CODE_FOR_alignaddrsi_vis
9757 || icode == CODE_FOR_alignaddrdi_vis
9758 || icode == CODE_FOR_wrgsr_vis
9759 || icode == CODE_FOR_bmasksi_vis
9760 || icode == CODE_FOR_bmaskdi_vis
9761 || icode == CODE_FOR_cmask8si_vis
9762 || icode == CODE_FOR_cmask8di_vis
9763 || icode == CODE_FOR_cmask16si_vis
9764 || icode == CODE_FOR_cmask16di_vis
9765 || icode == CODE_FOR_cmask32si_vis
9766 || icode == CODE_FOR_cmask32di_vis)
9767 ;
9768 else
9769 return build_zero_cst (rtype);
9770 }
9771
9772 switch (icode)
9773 {
9774 case CODE_FOR_fexpand_vis:
9775 arg0 = args[0];
9776 STRIP_NOPS (arg0);
9777
9778 if (TREE_CODE (arg0) == VECTOR_CST)
9779 {
9780 tree inner_type = TREE_TYPE (rtype);
9781 tree elts = TREE_VECTOR_CST_ELTS (arg0);
9782 tree n_elts = NULL_TREE;
9783
9784 for (; elts; elts = TREE_CHAIN (elts))
9785 {
9786 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
9787 n_elts = tree_cons (NULL_TREE,
9788 build_int_cst (inner_type, val),
9789 n_elts);
9790 }
9791 return build_vector (rtype, nreverse (n_elts));
9792 }
9793 break;
9794
9795 case CODE_FOR_fmul8x16_vis:
9796 case CODE_FOR_fmul8x16au_vis:
9797 case CODE_FOR_fmul8x16al_vis:
9798 arg0 = args[0];
9799 arg1 = args[1];
9800 STRIP_NOPS (arg0);
9801 STRIP_NOPS (arg1);
9802
9803 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9804 {
9805 tree inner_type = TREE_TYPE (rtype);
9806 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9807 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9808 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
9809 elts1);
9810
9811 return build_vector (rtype, n_elts);
9812 }
9813 break;
9814
9815 case CODE_FOR_fpmerge_vis:
9816 arg0 = args[0];
9817 arg1 = args[1];
9818 STRIP_NOPS (arg0);
9819 STRIP_NOPS (arg1);
9820
9821 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9822 {
9823 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9824 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9825 tree n_elts = NULL_TREE;
9826
9827 for (; elts0 && elts1;
9828 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9829 {
9830 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
9831 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
9832 }
9833
9834 return build_vector (rtype, nreverse (n_elts));
9835 }
9836 break;
9837
9838 case CODE_FOR_pdist_vis:
9839 arg0 = args[0];
9840 arg1 = args[1];
9841 arg2 = args[2];
9842 STRIP_NOPS (arg0);
9843 STRIP_NOPS (arg1);
9844 STRIP_NOPS (arg2);
9845
9846 if (TREE_CODE (arg0) == VECTOR_CST
9847 && TREE_CODE (arg1) == VECTOR_CST
9848 && TREE_CODE (arg2) == INTEGER_CST)
9849 {
9850 int overflow = 0;
9851 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
9852 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
9853 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9854 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9855
9856 for (; elts0 && elts1;
9857 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9858 {
9859 unsigned HOST_WIDE_INT
9860 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9861 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9862 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
9863 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
9864
9865 unsigned HOST_WIDE_INT l;
9866 HOST_WIDE_INT h;
9867
9868 overflow |= neg_double (low1, high1, &l, &h);
9869 overflow |= add_double (low0, high0, l, h, &l, &h);
9870 if (h < 0)
9871 overflow |= neg_double (l, h, &l, &h);
9872
9873 overflow |= add_double (low, high, l, h, &low, &high);
9874 }
9875
9876 gcc_assert (overflow == 0);
9877
9878 return build_int_cst_wide (rtype, low, high);
9879 }
9880
9881 default:
9882 break;
9883 }
9884
9885 return NULL_TREE;
9886 }
9887 \f
9888 /* ??? This duplicates information provided to the compiler by the
9889 ??? scheduler description. Some day, teach genautomata to output
9890 ??? the latencies and then CSE will just use that. */
9891
9892 static bool
9893 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
9894 int *total, bool speed ATTRIBUTE_UNUSED)
9895 {
9896 enum machine_mode mode = GET_MODE (x);
9897 bool float_mode_p = FLOAT_MODE_P (mode);
9898
9899 switch (code)
9900 {
9901 case CONST_INT:
9902 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
9903 {
9904 *total = 0;
9905 return true;
9906 }
9907 /* FALLTHRU */
9908
9909 case HIGH:
9910 *total = 2;
9911 return true;
9912
9913 case CONST:
9914 case LABEL_REF:
9915 case SYMBOL_REF:
9916 *total = 4;
9917 return true;
9918
9919 case CONST_DOUBLE:
9920 if (GET_MODE (x) == VOIDmode
9921 && ((CONST_DOUBLE_HIGH (x) == 0
9922 && CONST_DOUBLE_LOW (x) < 0x1000)
9923 || (CONST_DOUBLE_HIGH (x) == -1
9924 && CONST_DOUBLE_LOW (x) < 0
9925 && CONST_DOUBLE_LOW (x) >= -0x1000)))
9926 *total = 0;
9927 else
9928 *total = 8;
9929 return true;
9930
9931 case MEM:
9932 /* If outer-code was a sign or zero extension, a cost
9933 of COSTS_N_INSNS (1) was already added in. This is
9934 why we are subtracting it back out. */
9935 if (outer_code == ZERO_EXTEND)
9936 {
9937 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
9938 }
9939 else if (outer_code == SIGN_EXTEND)
9940 {
9941 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
9942 }
9943 else if (float_mode_p)
9944 {
9945 *total = sparc_costs->float_load;
9946 }
9947 else
9948 {
9949 *total = sparc_costs->int_load;
9950 }
9951
9952 return true;
9953
9954 case PLUS:
9955 case MINUS:
9956 if (float_mode_p)
9957 *total = sparc_costs->float_plusminus;
9958 else
9959 *total = COSTS_N_INSNS (1);
9960 return false;
9961
9962 case FMA:
9963 {
9964 rtx sub;
9965
9966 gcc_assert (float_mode_p);
9967 *total = sparc_costs->float_mul;
9968
9969 sub = XEXP (x, 0);
9970 if (GET_CODE (sub) == NEG)
9971 sub = XEXP (sub, 0);
9972 *total += rtx_cost (sub, FMA, 0, speed);
9973
9974 sub = XEXP (x, 2);
9975 if (GET_CODE (sub) == NEG)
9976 sub = XEXP (sub, 0);
9977 *total += rtx_cost (sub, FMA, 2, speed);
9978 return true;
9979 }
9980
9981 case MULT:
9982 if (float_mode_p)
9983 *total = sparc_costs->float_mul;
9984 else if (! TARGET_HARD_MUL)
9985 *total = COSTS_N_INSNS (25);
9986 else
9987 {
9988 int bit_cost;
9989
9990 bit_cost = 0;
9991 if (sparc_costs->int_mul_bit_factor)
9992 {
9993 int nbits;
9994
9995 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
9996 {
9997 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
9998 for (nbits = 0; value != 0; value &= value - 1)
9999 nbits++;
10000 }
10001 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
10002 && GET_MODE (XEXP (x, 1)) == VOIDmode)
10003 {
10004 rtx x1 = XEXP (x, 1);
10005 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
10006 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
10007
10008 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
10009 nbits++;
10010 for (; value2 != 0; value2 &= value2 - 1)
10011 nbits++;
10012 }
10013 else
10014 nbits = 7;
10015
10016 if (nbits < 3)
10017 nbits = 3;
10018 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
10019 bit_cost = COSTS_N_INSNS (bit_cost);
10020 }
10021
10022 if (mode == DImode)
10023 *total = sparc_costs->int_mulX + bit_cost;
10024 else
10025 *total = sparc_costs->int_mul + bit_cost;
10026 }
10027 return false;
10028
10029 case ASHIFT:
10030 case ASHIFTRT:
10031 case LSHIFTRT:
10032 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
10033 return false;
10034
10035 case DIV:
10036 case UDIV:
10037 case MOD:
10038 case UMOD:
10039 if (float_mode_p)
10040 {
10041 if (mode == DFmode)
10042 *total = sparc_costs->float_div_df;
10043 else
10044 *total = sparc_costs->float_div_sf;
10045 }
10046 else
10047 {
10048 if (mode == DImode)
10049 *total = sparc_costs->int_divX;
10050 else
10051 *total = sparc_costs->int_div;
10052 }
10053 return false;
10054
10055 case NEG:
10056 if (! float_mode_p)
10057 {
10058 *total = COSTS_N_INSNS (1);
10059 return false;
10060 }
10061 /* FALLTHRU */
10062
10063 case ABS:
10064 case FLOAT:
10065 case UNSIGNED_FLOAT:
10066 case FIX:
10067 case UNSIGNED_FIX:
10068 case FLOAT_EXTEND:
10069 case FLOAT_TRUNCATE:
10070 *total = sparc_costs->float_move;
10071 return false;
10072
10073 case SQRT:
10074 if (mode == DFmode)
10075 *total = sparc_costs->float_sqrt_df;
10076 else
10077 *total = sparc_costs->float_sqrt_sf;
10078 return false;
10079
10080 case COMPARE:
10081 if (float_mode_p)
10082 *total = sparc_costs->float_cmp;
10083 else
10084 *total = COSTS_N_INSNS (1);
10085 return false;
10086
10087 case IF_THEN_ELSE:
10088 if (float_mode_p)
10089 *total = sparc_costs->float_cmove;
10090 else
10091 *total = sparc_costs->int_cmove;
10092 return false;
10093
10094 case IOR:
10095 /* Handle the NAND vector patterns. */
10096 if (sparc_vector_mode_supported_p (GET_MODE (x))
10097 && GET_CODE (XEXP (x, 0)) == NOT
10098 && GET_CODE (XEXP (x, 1)) == NOT)
10099 {
10100 *total = COSTS_N_INSNS (1);
10101 return true;
10102 }
10103 else
10104 return false;
10105
10106 default:
10107 return false;
10108 }
10109 }
10110
10111 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10112
10113 static inline bool
10114 general_or_i64_p (reg_class_t rclass)
10115 {
10116 return (rclass == GENERAL_REGS || rclass == I64_REGS);
10117 }
10118
10119 /* Implement TARGET_REGISTER_MOVE_COST. */
10120
10121 static int
10122 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10123 reg_class_t from, reg_class_t to)
10124 {
10125 if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
10126 || (general_or_i64_p (from) && FP_REG_CLASS_P (to))
10127 || from == FPCC_REGS
10128 || to == FPCC_REGS)
10129 {
10130 if (sparc_cpu == PROCESSOR_ULTRASPARC
10131 || sparc_cpu == PROCESSOR_ULTRASPARC3
10132 || sparc_cpu == PROCESSOR_NIAGARA
10133 || sparc_cpu == PROCESSOR_NIAGARA2
10134 || sparc_cpu == PROCESSOR_NIAGARA3
10135 || sparc_cpu == PROCESSOR_NIAGARA4)
10136 return 12;
10137
10138 return 6;
10139 }
10140
10141 return 2;
10142 }
10143
10144 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10145 This is achieved by means of a manual dynamic stack space allocation in
10146 the current frame. We make the assumption that SEQ doesn't contain any
10147 function calls, with the possible exception of calls to the GOT helper. */
10148
10149 static void
10150 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
10151 {
10152 /* We must preserve the lowest 16 words for the register save area. */
10153 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
10154 /* We really need only 2 words of fresh stack space. */
10155 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
10156
10157 rtx slot
10158 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
10159 SPARC_STACK_BIAS + offset));
10160
10161 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
10162 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
10163 if (reg2)
10164 emit_insn (gen_rtx_SET (VOIDmode,
10165 adjust_address (slot, word_mode, UNITS_PER_WORD),
10166 reg2));
10167 emit_insn (seq);
10168 if (reg2)
10169 emit_insn (gen_rtx_SET (VOIDmode,
10170 reg2,
10171 adjust_address (slot, word_mode, UNITS_PER_WORD)));
10172 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
10173 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
10174 }
10175
10176 /* Output the assembler code for a thunk function. THUNK_DECL is the
10177 declaration for the thunk function itself, FUNCTION is the decl for
10178 the target function. DELTA is an immediate constant offset to be
10179 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10180 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10181
10182 static void
10183 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10184 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10185 tree function)
10186 {
10187 rtx this_rtx, insn, funexp;
10188 unsigned int int_arg_first;
10189
10190 reload_completed = 1;
10191 epilogue_completed = 1;
10192
10193 emit_note (NOTE_INSN_PROLOGUE_END);
10194
10195 if (TARGET_FLAT)
10196 {
10197 sparc_leaf_function_p = 1;
10198
10199 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10200 }
10201 else if (flag_delayed_branch)
10202 {
10203 /* We will emit a regular sibcall below, so we need to instruct
10204 output_sibcall that we are in a leaf function. */
10205 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
10206
10207 /* This will cause final.c to invoke leaf_renumber_regs so we
10208 must behave as if we were in a not-yet-leafified function. */
10209 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
10210 }
10211 else
10212 {
10213 /* We will emit the sibcall manually below, so we will need to
10214 manually spill non-leaf registers. */
10215 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
10216
10217 /* We really are in a leaf function. */
10218 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10219 }
10220
10221 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
10222 returns a structure, the structure return pointer is there instead. */
10223 if (TARGET_ARCH64
10224 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10225 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
10226 else
10227 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
10228
10229 /* Add DELTA. When possible use a plain add, otherwise load it into
10230 a register first. */
10231 if (delta)
10232 {
10233 rtx delta_rtx = GEN_INT (delta);
10234
10235 if (! SPARC_SIMM13_P (delta))
10236 {
10237 rtx scratch = gen_rtx_REG (Pmode, 1);
10238 emit_move_insn (scratch, delta_rtx);
10239 delta_rtx = scratch;
10240 }
10241
10242 /* THIS_RTX += DELTA. */
10243 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
10244 }
10245
10246 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
10247 if (vcall_offset)
10248 {
10249 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10250 rtx scratch = gen_rtx_REG (Pmode, 1);
10251
10252 gcc_assert (vcall_offset < 0);
10253
10254 /* SCRATCH = *THIS_RTX. */
10255 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
10256
10257 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
10258 may not have any available scratch register at this point. */
10259 if (SPARC_SIMM13_P (vcall_offset))
10260 ;
10261 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
10262 else if (! fixed_regs[5]
10263 /* The below sequence is made up of at least 2 insns,
10264 while the default method may need only one. */
10265 && vcall_offset < -8192)
10266 {
10267 rtx scratch2 = gen_rtx_REG (Pmode, 5);
10268 emit_move_insn (scratch2, vcall_offset_rtx);
10269 vcall_offset_rtx = scratch2;
10270 }
10271 else
10272 {
10273 rtx increment = GEN_INT (-4096);
10274
10275 /* VCALL_OFFSET is a negative number whose typical range can be
10276 estimated as -32768..0 in 32-bit mode. In almost all cases
10277 it is therefore cheaper to emit multiple add insns than
10278 spilling and loading the constant into a register (at least
10279 6 insns). */
10280 while (! SPARC_SIMM13_P (vcall_offset))
10281 {
10282 emit_insn (gen_add2_insn (scratch, increment));
10283 vcall_offset += 4096;
10284 }
10285 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
10286 }
10287
10288 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
10289 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
10290 gen_rtx_PLUS (Pmode,
10291 scratch,
10292 vcall_offset_rtx)));
10293
10294 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
10295 emit_insn (gen_add2_insn (this_rtx, scratch));
10296 }
10297
10298 /* Generate a tail call to the target function. */
10299 if (! TREE_USED (function))
10300 {
10301 assemble_external (function);
10302 TREE_USED (function) = 1;
10303 }
10304 funexp = XEXP (DECL_RTL (function), 0);
10305
10306 if (flag_delayed_branch)
10307 {
10308 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10309 insn = emit_call_insn (gen_sibcall (funexp));
10310 SIBLING_CALL_P (insn) = 1;
10311 }
10312 else
10313 {
10314 /* The hoops we have to jump through in order to generate a sibcall
10315 without using delay slots... */
10316 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
10317
10318 if (flag_pic)
10319 {
10320 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
10321 start_sequence ();
10322 load_got_register (); /* clobbers %o7 */
10323 scratch = sparc_legitimize_pic_address (funexp, scratch);
10324 seq = get_insns ();
10325 end_sequence ();
10326 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
10327 }
10328 else if (TARGET_ARCH32)
10329 {
10330 emit_insn (gen_rtx_SET (VOIDmode,
10331 scratch,
10332 gen_rtx_HIGH (SImode, funexp)));
10333 emit_insn (gen_rtx_SET (VOIDmode,
10334 scratch,
10335 gen_rtx_LO_SUM (SImode, scratch, funexp)));
10336 }
10337 else /* TARGET_ARCH64 */
10338 {
10339 switch (sparc_cmodel)
10340 {
10341 case CM_MEDLOW:
10342 case CM_MEDMID:
10343 /* The destination can serve as a temporary. */
10344 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
10345 break;
10346
10347 case CM_MEDANY:
10348 case CM_EMBMEDANY:
10349 /* The destination cannot serve as a temporary. */
10350 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
10351 start_sequence ();
10352 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
10353 seq = get_insns ();
10354 end_sequence ();
10355 emit_and_preserve (seq, spill_reg, 0);
10356 break;
10357
10358 default:
10359 gcc_unreachable ();
10360 }
10361 }
10362
10363 emit_jump_insn (gen_indirect_jump (scratch));
10364 }
10365
10366 emit_barrier ();
10367
10368 /* Run just enough of rest_of_compilation to get the insns emitted.
10369 There's not really enough bulk here to make other passes such as
10370 instruction scheduling worth while. Note that use_thunk calls
10371 assemble_start_function and assemble_end_function. */
10372 insn = get_insns ();
10373 insn_locators_alloc ();
10374 shorten_branches (insn);
10375 final_start_function (insn, file, 1);
10376 final (insn, file, 1);
10377 final_end_function ();
10378
10379 reload_completed = 0;
10380 epilogue_completed = 0;
10381 }
10382
10383 /* Return true if sparc_output_mi_thunk would be able to output the
10384 assembler code for the thunk function specified by the arguments
10385 it is passed, and false otherwise. */
10386 static bool
10387 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
10388 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
10389 HOST_WIDE_INT vcall_offset,
10390 const_tree function ATTRIBUTE_UNUSED)
10391 {
10392 /* Bound the loop used in the default method above. */
10393 return (vcall_offset >= -32768 || ! fixed_regs[5]);
10394 }
10395
10396 /* We use the machine specific reorg pass to enable workarounds for errata. */
10397
10398 static void
10399 sparc_reorg (void)
10400 {
10401 rtx insn, next;
10402
10403 /* The only erratum we handle for now is that of the AT697F processor. */
10404 if (!sparc_fix_at697f)
10405 return;
10406
10407 /* We need to have the (essentially) final form of the insn stream in order
10408 to properly detect the various hazards. Run delay slot scheduling. */
10409 if (optimize > 0 && flag_delayed_branch)
10410 dbr_schedule (get_insns ());
10411
10412 /* Now look for specific patterns in the insn stream. */
10413 for (insn = get_insns (); insn; insn = next)
10414 {
10415 bool insert_nop = false;
10416 rtx set;
10417
10418 /* Look for a single-word load into an odd-numbered FP register. */
10419 if (NONJUMP_INSN_P (insn)
10420 && (set = single_set (insn)) != NULL_RTX
10421 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
10422 && MEM_P (SET_SRC (set))
10423 && REG_P (SET_DEST (set))
10424 && REGNO (SET_DEST (set)) > 31
10425 && REGNO (SET_DEST (set)) % 2 != 0)
10426 {
10427 /* The wrong dependency is on the enclosing double register. */
10428 unsigned int x = REGNO (SET_DEST (set)) - 1;
10429 unsigned int src1, src2, dest;
10430 int code;
10431
10432 /* If the insn has a delay slot, then it cannot be problematic. */
10433 next = next_active_insn (insn);
10434 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
10435 code = -1;
10436 else
10437 {
10438 extract_insn (next);
10439 code = INSN_CODE (next);
10440 }
10441
10442 switch (code)
10443 {
10444 case CODE_FOR_adddf3:
10445 case CODE_FOR_subdf3:
10446 case CODE_FOR_muldf3:
10447 case CODE_FOR_divdf3:
10448 dest = REGNO (recog_data.operand[0]);
10449 src1 = REGNO (recog_data.operand[1]);
10450 src2 = REGNO (recog_data.operand[2]);
10451 if (src1 != src2)
10452 {
10453 /* Case [1-4]:
10454 ld [address], %fx+1
10455 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
10456 if ((src1 == x || src2 == x)
10457 && (dest == src1 || dest == src2))
10458 insert_nop = true;
10459 }
10460 else
10461 {
10462 /* Case 5:
10463 ld [address], %fx+1
10464 FPOPd %fx, %fx, %fx */
10465 if (src1 == x
10466 && dest == src1
10467 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
10468 insert_nop = true;
10469 }
10470 break;
10471
10472 case CODE_FOR_sqrtdf2:
10473 dest = REGNO (recog_data.operand[0]);
10474 src1 = REGNO (recog_data.operand[1]);
10475 /* Case 6:
10476 ld [address], %fx+1
10477 fsqrtd %fx, %fx */
10478 if (src1 == x && dest == src1)
10479 insert_nop = true;
10480 break;
10481
10482 default:
10483 break;
10484 }
10485 }
10486 else
10487 next = NEXT_INSN (insn);
10488
10489 if (insert_nop)
10490 emit_insn_after (gen_nop (), insn);
10491 }
10492 }
10493
10494 /* How to allocate a 'struct machine_function'. */
10495
10496 static struct machine_function *
10497 sparc_init_machine_status (void)
10498 {
10499 return ggc_alloc_cleared_machine_function ();
10500 }
10501
10502 /* Locate some local-dynamic symbol still in use by this function
10503 so that we can print its name in local-dynamic base patterns. */
10504
10505 static const char *
10506 get_some_local_dynamic_name (void)
10507 {
10508 rtx insn;
10509
10510 if (cfun->machine->some_ld_name)
10511 return cfun->machine->some_ld_name;
10512
10513 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10514 if (INSN_P (insn)
10515 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10516 return cfun->machine->some_ld_name;
10517
10518 gcc_unreachable ();
10519 }
10520
10521 static int
10522 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10523 {
10524 rtx x = *px;
10525
10526 if (x
10527 && GET_CODE (x) == SYMBOL_REF
10528 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10529 {
10530 cfun->machine->some_ld_name = XSTR (x, 0);
10531 return 1;
10532 }
10533
10534 return 0;
10535 }
10536
10537 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10538 We need to emit DTP-relative relocations. */
10539
10540 static void
10541 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
10542 {
10543 switch (size)
10544 {
10545 case 4:
10546 fputs ("\t.word\t%r_tls_dtpoff32(", file);
10547 break;
10548 case 8:
10549 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
10550 break;
10551 default:
10552 gcc_unreachable ();
10553 }
10554 output_addr_const (file, x);
10555 fputs (")", file);
10556 }
10557
10558 /* Do whatever processing is required at the end of a file. */
10559
10560 static void
10561 sparc_file_end (void)
10562 {
10563 /* If we need to emit the special GOT helper function, do so now. */
10564 if (got_helper_rtx)
10565 {
10566 const char *name = XSTR (got_helper_rtx, 0);
10567 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
10568 #ifdef DWARF2_UNWIND_INFO
10569 bool do_cfi;
10570 #endif
10571
10572 if (USE_HIDDEN_LINKONCE)
10573 {
10574 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
10575 get_identifier (name),
10576 build_function_type_list (void_type_node,
10577 NULL_TREE));
10578 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
10579 NULL_TREE, void_type_node);
10580 TREE_STATIC (decl) = 1;
10581 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
10582 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
10583 DECL_VISIBILITY_SPECIFIED (decl) = 1;
10584 resolve_unique_section (decl, 0, flag_function_sections);
10585 allocate_struct_function (decl, true);
10586 cfun->is_thunk = 1;
10587 current_function_decl = decl;
10588 init_varasm_status ();
10589 assemble_start_function (decl, name);
10590 }
10591 else
10592 {
10593 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10594 switch_to_section (text_section);
10595 if (align > 0)
10596 ASM_OUTPUT_ALIGN (asm_out_file, align);
10597 ASM_OUTPUT_LABEL (asm_out_file, name);
10598 }
10599
10600 #ifdef DWARF2_UNWIND_INFO
10601 do_cfi = dwarf2out_do_cfi_asm ();
10602 if (do_cfi)
10603 fprintf (asm_out_file, "\t.cfi_startproc\n");
10604 #endif
10605 if (flag_delayed_branch)
10606 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10607 reg_name, reg_name);
10608 else
10609 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10610 reg_name, reg_name);
10611 #ifdef DWARF2_UNWIND_INFO
10612 if (do_cfi)
10613 fprintf (asm_out_file, "\t.cfi_endproc\n");
10614 #endif
10615 }
10616
10617 if (NEED_INDICATE_EXEC_STACK)
10618 file_end_indicate_exec_stack ();
10619
10620 #ifdef TARGET_SOLARIS
10621 solaris_file_end ();
10622 #endif
10623 }
10624
10625 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10626 /* Implement TARGET_MANGLE_TYPE. */
10627
10628 static const char *
10629 sparc_mangle_type (const_tree type)
10630 {
10631 if (!TARGET_64BIT
10632 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10633 && TARGET_LONG_DOUBLE_128)
10634 return "g";
10635
10636 /* For all other types, use normal C++ mangling. */
10637 return NULL;
10638 }
10639 #endif
10640
10641 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
10642 compare and swap on the word containing the byte or half-word. */
10643
10644 void
10645 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
10646 {
10647 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10648 rtx addr = gen_reg_rtx (Pmode);
10649 rtx off = gen_reg_rtx (SImode);
10650 rtx oldv = gen_reg_rtx (SImode);
10651 rtx newv = gen_reg_rtx (SImode);
10652 rtx oldvalue = gen_reg_rtx (SImode);
10653 rtx newvalue = gen_reg_rtx (SImode);
10654 rtx res = gen_reg_rtx (SImode);
10655 rtx resv = gen_reg_rtx (SImode);
10656 rtx memsi, val, mask, end_label, loop_label, cc;
10657
10658 emit_insn (gen_rtx_SET (VOIDmode, addr,
10659 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10660
10661 if (Pmode != SImode)
10662 addr1 = gen_lowpart (SImode, addr1);
10663 emit_insn (gen_rtx_SET (VOIDmode, off,
10664 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10665
10666 memsi = gen_rtx_MEM (SImode, addr);
10667 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10668 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10669
10670 val = force_reg (SImode, memsi);
10671
10672 emit_insn (gen_rtx_SET (VOIDmode, off,
10673 gen_rtx_XOR (SImode, off,
10674 GEN_INT (GET_MODE (mem) == QImode
10675 ? 3 : 2))));
10676
10677 emit_insn (gen_rtx_SET (VOIDmode, off,
10678 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10679
10680 if (GET_MODE (mem) == QImode)
10681 mask = force_reg (SImode, GEN_INT (0xff));
10682 else
10683 mask = force_reg (SImode, GEN_INT (0xffff));
10684
10685 emit_insn (gen_rtx_SET (VOIDmode, mask,
10686 gen_rtx_ASHIFT (SImode, mask, off)));
10687
10688 emit_insn (gen_rtx_SET (VOIDmode, val,
10689 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10690 val)));
10691
10692 oldval = gen_lowpart (SImode, oldval);
10693 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10694 gen_rtx_ASHIFT (SImode, oldval, off)));
10695
10696 newval = gen_lowpart_common (SImode, newval);
10697 emit_insn (gen_rtx_SET (VOIDmode, newv,
10698 gen_rtx_ASHIFT (SImode, newval, off)));
10699
10700 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10701 gen_rtx_AND (SImode, oldv, mask)));
10702
10703 emit_insn (gen_rtx_SET (VOIDmode, newv,
10704 gen_rtx_AND (SImode, newv, mask)));
10705
10706 end_label = gen_label_rtx ();
10707 loop_label = gen_label_rtx ();
10708 emit_label (loop_label);
10709
10710 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
10711 gen_rtx_IOR (SImode, oldv, val)));
10712
10713 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
10714 gen_rtx_IOR (SImode, newv, val)));
10715
10716 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
10717
10718 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
10719
10720 emit_insn (gen_rtx_SET (VOIDmode, resv,
10721 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10722 res)));
10723
10724 cc = gen_compare_reg_1 (NE, resv, val);
10725 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
10726
10727 /* Use cbranchcc4 to separate the compare and branch! */
10728 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
10729 cc, const0_rtx, loop_label));
10730
10731 emit_label (end_label);
10732
10733 emit_insn (gen_rtx_SET (VOIDmode, res,
10734 gen_rtx_AND (SImode, res, mask)));
10735
10736 emit_insn (gen_rtx_SET (VOIDmode, res,
10737 gen_rtx_LSHIFTRT (SImode, res, off)));
10738
10739 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10740 }
10741
10742 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
10743
10744 static bool
10745 sparc_frame_pointer_required (void)
10746 {
10747 /* If the stack pointer is dynamically modified in the function, it cannot
10748 serve as the frame pointer. */
10749 if (cfun->calls_alloca)
10750 return true;
10751
10752 /* If the function receives nonlocal gotos, it needs to save the frame
10753 pointer in the nonlocal_goto_save_area object. */
10754 if (cfun->has_nonlocal_label)
10755 return true;
10756
10757 /* In flat mode, that's it. */
10758 if (TARGET_FLAT)
10759 return false;
10760
10761 /* Otherwise, the frame pointer is required if the function isn't leaf. */
10762 return !(current_function_is_leaf && only_leaf_regs_used ());
10763 }
10764
10765 /* The way this is structured, we can't eliminate SFP in favor of SP
10766 if the frame pointer is required: we want to use the SFP->HFP elimination
10767 in that case. But the test in update_eliminables doesn't know we are
10768 assuming below that we only do the former elimination. */
10769
10770 static bool
10771 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
10772 {
10773 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
10774 }
10775
10776 /* Return the hard frame pointer directly to bypass the stack bias. */
10777
10778 static rtx
10779 sparc_builtin_setjmp_frame_value (void)
10780 {
10781 return hard_frame_pointer_rtx;
10782 }
10783
10784 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
10785 they won't be allocated. */
10786
10787 static void
10788 sparc_conditional_register_usage (void)
10789 {
10790 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
10791 {
10792 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10793 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10794 }
10795 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
10796 /* then honor it. */
10797 if (TARGET_ARCH32 && fixed_regs[5])
10798 fixed_regs[5] = 1;
10799 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
10800 fixed_regs[5] = 0;
10801 if (! TARGET_V9)
10802 {
10803 int regno;
10804 for (regno = SPARC_FIRST_V9_FP_REG;
10805 regno <= SPARC_LAST_V9_FP_REG;
10806 regno++)
10807 fixed_regs[regno] = 1;
10808 /* %fcc0 is used by v8 and v9. */
10809 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
10810 regno <= SPARC_LAST_V9_FCC_REG;
10811 regno++)
10812 fixed_regs[regno] = 1;
10813 }
10814 if (! TARGET_FPU)
10815 {
10816 int regno;
10817 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
10818 fixed_regs[regno] = 1;
10819 }
10820 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
10821 /* then honor it. Likewise with g3 and g4. */
10822 if (fixed_regs[2] == 2)
10823 fixed_regs[2] = ! TARGET_APP_REGS;
10824 if (fixed_regs[3] == 2)
10825 fixed_regs[3] = ! TARGET_APP_REGS;
10826 if (TARGET_ARCH32 && fixed_regs[4] == 2)
10827 fixed_regs[4] = ! TARGET_APP_REGS;
10828 else if (TARGET_CM_EMBMEDANY)
10829 fixed_regs[4] = 1;
10830 else if (fixed_regs[4] == 2)
10831 fixed_regs[4] = 0;
10832 if (TARGET_FLAT)
10833 {
10834 int regno;
10835 /* Disable leaf functions. */
10836 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
10837 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10838 leaf_reg_remap [regno] = regno;
10839 }
10840 if (TARGET_VIS)
10841 global_regs[SPARC_GSR_REG] = 1;
10842 }
10843
10844 /* Implement TARGET_PREFERRED_RELOAD_CLASS
10845
10846 - We can't load constants into FP registers.
10847 - We can't load FP constants into integer registers when soft-float,
10848 because there is no soft-float pattern with a r/F constraint.
10849 - We can't load FP constants into integer registers for TFmode unless
10850 it is 0.0L, because there is no movtf pattern with a r/F constraint.
10851 - Try and reload integer constants (symbolic or otherwise) back into
10852 registers directly, rather than having them dumped to memory. */
10853
10854 static reg_class_t
10855 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
10856 {
10857 if (CONSTANT_P (x))
10858 {
10859 if (FP_REG_CLASS_P (rclass)
10860 || rclass == GENERAL_OR_FP_REGS
10861 || rclass == GENERAL_OR_EXTRA_FP_REGS
10862 || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT && ! TARGET_FPU)
10863 || (GET_MODE (x) == TFmode && ! const_zero_operand (x, TFmode)))
10864 return NO_REGS;
10865
10866 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10867 return GENERAL_REGS;
10868 }
10869
10870 return rclass;
10871 }
10872
10873 const char *
10874 output_v8plus_mult (rtx insn, rtx *operands, const char *name)
10875 {
10876 char mulstr[32];
10877
10878 gcc_assert (! TARGET_ARCH64);
10879
10880 if (sparc_check_64 (operands[1], insn) <= 0)
10881 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10882 if (which_alternative == 1)
10883 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
10884 if (GET_CODE (operands[2]) == CONST_INT)
10885 {
10886 if (which_alternative == 1)
10887 {
10888 output_asm_insn ("or\t%L1, %H1, %H1", operands);
10889 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", name);
10890 output_asm_insn (mulstr, operands);
10891 return "srlx\t%L0, 32, %H0";
10892 }
10893 else
10894 {
10895 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10896 output_asm_insn ("or\t%L1, %3, %3", operands);
10897 sprintf (mulstr, "%s\t%%3, %%2, %%3", name);
10898 output_asm_insn (mulstr, operands);
10899 output_asm_insn ("srlx\t%3, 32, %H0", operands);
10900 return "mov\t%3, %L0";
10901 }
10902 }
10903 else if (rtx_equal_p (operands[1], operands[2]))
10904 {
10905 if (which_alternative == 1)
10906 {
10907 output_asm_insn ("or\t%L1, %H1, %H1", operands);
10908 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", name);
10909 output_asm_insn (mulstr, operands);
10910 return "srlx\t%L0, 32, %H0";
10911 }
10912 else
10913 {
10914 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10915 output_asm_insn ("or\t%L1, %3, %3", operands);
10916 sprintf (mulstr, "%s\t%%3, %%3, %%3", name);
10917 output_asm_insn (mulstr, operands);
10918 output_asm_insn ("srlx\t%3, 32, %H0", operands);
10919 return "mov\t%3, %L0";
10920 }
10921 }
10922 if (sparc_check_64 (operands[2], insn) <= 0)
10923 output_asm_insn ("srl\t%L2, 0, %L2", operands);
10924 if (which_alternative == 1)
10925 {
10926 output_asm_insn ("or\t%L1, %H1, %H1", operands);
10927 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
10928 output_asm_insn ("or\t%L2, %L1, %L1", operands);
10929 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", name);
10930 output_asm_insn (mulstr, operands);
10931 return "srlx\t%L0, 32, %H0";
10932 }
10933 else
10934 {
10935 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10936 output_asm_insn ("sllx\t%H2, 32, %4", operands);
10937 output_asm_insn ("or\t%L1, %3, %3", operands);
10938 output_asm_insn ("or\t%L2, %4, %4", operands);
10939 sprintf (mulstr, "%s\t%%3, %%4, %%3", name);
10940 output_asm_insn (mulstr, operands);
10941 output_asm_insn ("srlx\t%3, 32, %H0", operands);
10942 return "mov\t%3, %L0";
10943 }
10944 }
10945
10946 #include "gt-sparc.h"