re PR debug/24444 (invalid register in debug info)
[gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "except.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "toplev.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "intl.h"
56 #include "debug.h"
57
58 /* This is used for communication between ASM_OUTPUT_LABEL and
59 ASM_OUTPUT_LABELREF. */
60 int ia64_asm_output_label = 0;
61
62 /* Define the information needed to generate branch and scc insns. This is
63 stored from the compare operation. */
64 struct rtx_def * ia64_compare_op0;
65 struct rtx_def * ia64_compare_op1;
66
67 /* Register names for ia64_expand_prologue. */
68 static const char * const ia64_reg_numbers[96] =
69 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
70 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
71 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
72 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
73 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
74 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
75 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
76 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
77 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
78 "r104","r105","r106","r107","r108","r109","r110","r111",
79 "r112","r113","r114","r115","r116","r117","r118","r119",
80 "r120","r121","r122","r123","r124","r125","r126","r127"};
81
82 /* ??? These strings could be shared with REGISTER_NAMES. */
83 static const char * const ia64_input_reg_names[8] =
84 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
85
86 /* ??? These strings could be shared with REGISTER_NAMES. */
87 static const char * const ia64_local_reg_names[80] =
88 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
89 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
90 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
91 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
92 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
93 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
94 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
95 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
96 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
97 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
98
99 /* ??? These strings could be shared with REGISTER_NAMES. */
100 static const char * const ia64_output_reg_names[8] =
101 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
102
103 /* Which cpu are we scheduling for. */
104 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
105
106 /* Determines whether we run our final scheduling pass or not. We always
107 avoid the normal second scheduling pass. */
108 static int ia64_flag_schedule_insns2;
109
110 /* Determines whether we run variable tracking in machine dependent
111 reorganization. */
112 static int ia64_flag_var_tracking;
113
114 /* Variables which are this size or smaller are put in the sdata/sbss
115 sections. */
116
117 unsigned int ia64_section_threshold;
118
119 /* The following variable is used by the DFA insn scheduler. The value is
120 TRUE if we do insn bundling instead of insn scheduling. */
121 int bundling_p = 0;
122
123 /* Structure to be filled in by ia64_compute_frame_size with register
124 save masks and offsets for the current function. */
125
126 struct ia64_frame_info
127 {
128 HOST_WIDE_INT total_size; /* size of the stack frame, not including
129 the caller's scratch area. */
130 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
131 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
132 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
133 HARD_REG_SET mask; /* mask of saved registers. */
134 unsigned int gr_used_mask; /* mask of registers in use as gr spill
135 registers or long-term scratches. */
136 int n_spilled; /* number of spilled registers. */
137 int reg_fp; /* register for fp. */
138 int reg_save_b0; /* save register for b0. */
139 int reg_save_pr; /* save register for prs. */
140 int reg_save_ar_pfs; /* save register for ar.pfs. */
141 int reg_save_ar_unat; /* save register for ar.unat. */
142 int reg_save_ar_lc; /* save register for ar.lc. */
143 int reg_save_gp; /* save register for gp. */
144 int n_input_regs; /* number of input registers used. */
145 int n_local_regs; /* number of local registers used. */
146 int n_output_regs; /* number of output registers used. */
147 int n_rotate_regs; /* number of rotating registers used. */
148
149 char need_regstk; /* true if a .regstk directive needed. */
150 char initialized; /* true if the data is finalized. */
151 };
152
153 /* Current frame information calculated by ia64_compute_frame_size. */
154 static struct ia64_frame_info current_frame_info;
155 \f
156 static int ia64_first_cycle_multipass_dfa_lookahead (void);
157 static void ia64_dependencies_evaluation_hook (rtx, rtx);
158 static void ia64_init_dfa_pre_cycle_insn (void);
159 static rtx ia64_dfa_pre_cycle_insn (void);
160 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
161 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
162 static rtx gen_tls_get_addr (void);
163 static rtx gen_thread_pointer (void);
164 static int find_gr_spill (int);
165 static int next_scratch_gr_reg (void);
166 static void mark_reg_gr_used_mask (rtx, void *);
167 static void ia64_compute_frame_size (HOST_WIDE_INT);
168 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
169 static void finish_spill_pointers (void);
170 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
171 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
172 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
173 static rtx gen_movdi_x (rtx, rtx, rtx);
174 static rtx gen_fr_spill_x (rtx, rtx, rtx);
175 static rtx gen_fr_restore_x (rtx, rtx, rtx);
176
177 static enum machine_mode hfa_element_mode (tree, bool);
178 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
179 tree, int *, int);
180 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
181 tree, bool);
182 static bool ia64_function_ok_for_sibcall (tree, tree);
183 static bool ia64_return_in_memory (tree, tree);
184 static bool ia64_rtx_costs (rtx, int, int, int *);
185 static void fix_range (const char *);
186 static bool ia64_handle_option (size_t, const char *, int);
187 static struct machine_function * ia64_init_machine_status (void);
188 static void emit_insn_group_barriers (FILE *);
189 static void emit_all_insn_group_barriers (FILE *);
190 static void final_emit_insn_group_barriers (FILE *);
191 static void emit_predicate_relation_info (void);
192 static void ia64_reorg (void);
193 static bool ia64_in_small_data_p (tree);
194 static void process_epilogue (FILE *, rtx, bool, bool);
195 static int process_set (FILE *, rtx, rtx, bool, bool);
196
197 static bool ia64_assemble_integer (rtx, unsigned int, int);
198 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
199 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
200 static void ia64_output_function_end_prologue (FILE *);
201
202 static int ia64_issue_rate (void);
203 static int ia64_adjust_cost (rtx, rtx, rtx, int);
204 static void ia64_sched_init (FILE *, int, int);
205 static void ia64_sched_finish (FILE *, int);
206 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
207 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
208 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
209 static int ia64_variable_issue (FILE *, int, rtx, int);
210
211 static struct bundle_state *get_free_bundle_state (void);
212 static void free_bundle_state (struct bundle_state *);
213 static void initiate_bundle_states (void);
214 static void finish_bundle_states (void);
215 static unsigned bundle_state_hash (const void *);
216 static int bundle_state_eq_p (const void *, const void *);
217 static int insert_bundle_state (struct bundle_state *);
218 static void initiate_bundle_state_table (void);
219 static void finish_bundle_state_table (void);
220 static int try_issue_nops (struct bundle_state *, int);
221 static int try_issue_insn (struct bundle_state *, rtx);
222 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
223 static int get_max_pos (state_t);
224 static int get_template (state_t, int);
225
226 static rtx get_next_important_insn (rtx, rtx);
227 static void bundling (FILE *, int, rtx, rtx);
228
229 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
230 HOST_WIDE_INT, tree);
231 static void ia64_file_start (void);
232
233 static section *ia64_select_rtx_section (enum machine_mode, rtx,
234 unsigned HOST_WIDE_INT);
235 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
236 ATTRIBUTE_UNUSED;
237 static section *ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
238 ATTRIBUTE_UNUSED;
239 static void ia64_rwreloc_unique_section (tree, int)
240 ATTRIBUTE_UNUSED;
241 static section *ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
242 unsigned HOST_WIDE_INT)
243 ATTRIBUTE_UNUSED;
244 static unsigned int ia64_section_type_flags (tree, const char *, int);
245 static void ia64_hpux_add_extern_decl (tree decl)
246 ATTRIBUTE_UNUSED;
247 static void ia64_hpux_file_end (void)
248 ATTRIBUTE_UNUSED;
249 static void ia64_init_libfuncs (void)
250 ATTRIBUTE_UNUSED;
251 static void ia64_hpux_init_libfuncs (void)
252 ATTRIBUTE_UNUSED;
253 static void ia64_sysv4_init_libfuncs (void)
254 ATTRIBUTE_UNUSED;
255 static void ia64_vms_init_libfuncs (void)
256 ATTRIBUTE_UNUSED;
257
258 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
259 static void ia64_encode_section_info (tree, rtx, int);
260 static rtx ia64_struct_value_rtx (tree, int);
261 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
262 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
263 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
264 static bool ia64_cannot_force_const_mem (rtx);
265 static const char *ia64_mangle_fundamental_type (tree);
266 static const char *ia64_invalid_conversion (tree, tree);
267 static const char *ia64_invalid_unary_op (int, tree);
268 static const char *ia64_invalid_binary_op (int, tree, tree);
269 \f
270 /* Table of valid machine attributes. */
271 static const struct attribute_spec ia64_attribute_table[] =
272 {
273 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
274 { "syscall_linkage", 0, 0, false, true, true, NULL },
275 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
276 { NULL, 0, 0, false, false, false, NULL }
277 };
278
279 /* Initialize the GCC target structure. */
280 #undef TARGET_ATTRIBUTE_TABLE
281 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
282
283 #undef TARGET_INIT_BUILTINS
284 #define TARGET_INIT_BUILTINS ia64_init_builtins
285
286 #undef TARGET_EXPAND_BUILTIN
287 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
288
289 #undef TARGET_ASM_BYTE_OP
290 #define TARGET_ASM_BYTE_OP "\tdata1\t"
291 #undef TARGET_ASM_ALIGNED_HI_OP
292 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
293 #undef TARGET_ASM_ALIGNED_SI_OP
294 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
295 #undef TARGET_ASM_ALIGNED_DI_OP
296 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
297 #undef TARGET_ASM_UNALIGNED_HI_OP
298 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
299 #undef TARGET_ASM_UNALIGNED_SI_OP
300 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
301 #undef TARGET_ASM_UNALIGNED_DI_OP
302 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
303 #undef TARGET_ASM_INTEGER
304 #define TARGET_ASM_INTEGER ia64_assemble_integer
305
306 #undef TARGET_ASM_FUNCTION_PROLOGUE
307 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
308 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
309 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
310 #undef TARGET_ASM_FUNCTION_EPILOGUE
311 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
312
313 #undef TARGET_IN_SMALL_DATA_P
314 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
315
316 #undef TARGET_SCHED_ADJUST_COST
317 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
318 #undef TARGET_SCHED_ISSUE_RATE
319 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
320 #undef TARGET_SCHED_VARIABLE_ISSUE
321 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
322 #undef TARGET_SCHED_INIT
323 #define TARGET_SCHED_INIT ia64_sched_init
324 #undef TARGET_SCHED_FINISH
325 #define TARGET_SCHED_FINISH ia64_sched_finish
326 #undef TARGET_SCHED_REORDER
327 #define TARGET_SCHED_REORDER ia64_sched_reorder
328 #undef TARGET_SCHED_REORDER2
329 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
330
331 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
332 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
333
334 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
335 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
336
337 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
338 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
339 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
340 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
341
342 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
343 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
344 ia64_first_cycle_multipass_dfa_lookahead_guard
345
346 #undef TARGET_SCHED_DFA_NEW_CYCLE
347 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
348
349 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
350 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
351 #undef TARGET_ARG_PARTIAL_BYTES
352 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
353
354 #undef TARGET_ASM_OUTPUT_MI_THUNK
355 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
356 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
357 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
358
359 #undef TARGET_ASM_FILE_START
360 #define TARGET_ASM_FILE_START ia64_file_start
361
362 #undef TARGET_RTX_COSTS
363 #define TARGET_RTX_COSTS ia64_rtx_costs
364 #undef TARGET_ADDRESS_COST
365 #define TARGET_ADDRESS_COST hook_int_rtx_0
366
367 #undef TARGET_MACHINE_DEPENDENT_REORG
368 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
369
370 #undef TARGET_ENCODE_SECTION_INFO
371 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
372
373 #undef TARGET_SECTION_TYPE_FLAGS
374 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
375
376 #ifdef HAVE_AS_TLS
377 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
378 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
379 #endif
380
381 /* ??? ABI doesn't allow us to define this. */
382 #if 0
383 #undef TARGET_PROMOTE_FUNCTION_ARGS
384 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
385 #endif
386
387 /* ??? ABI doesn't allow us to define this. */
388 #if 0
389 #undef TARGET_PROMOTE_FUNCTION_RETURN
390 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
391 #endif
392
393 /* ??? Investigate. */
394 #if 0
395 #undef TARGET_PROMOTE_PROTOTYPES
396 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
397 #endif
398
399 #undef TARGET_STRUCT_VALUE_RTX
400 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
401 #undef TARGET_RETURN_IN_MEMORY
402 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
403 #undef TARGET_SETUP_INCOMING_VARARGS
404 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
405 #undef TARGET_STRICT_ARGUMENT_NAMING
406 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
407 #undef TARGET_MUST_PASS_IN_STACK
408 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
409
410 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
411 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
412
413 #undef TARGET_UNWIND_EMIT
414 #define TARGET_UNWIND_EMIT process_for_unwind_directive
415
416 #undef TARGET_SCALAR_MODE_SUPPORTED_P
417 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
418 #undef TARGET_VECTOR_MODE_SUPPORTED_P
419 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
420
421 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
422 in an order different from the specified program order. */
423 #undef TARGET_RELAXED_ORDERING
424 #define TARGET_RELAXED_ORDERING true
425
426 #undef TARGET_DEFAULT_TARGET_FLAGS
427 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
428 #undef TARGET_HANDLE_OPTION
429 #define TARGET_HANDLE_OPTION ia64_handle_option
430
431 #undef TARGET_CANNOT_FORCE_CONST_MEM
432 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
433
434 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
435 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ia64_mangle_fundamental_type
436
437 #undef TARGET_INVALID_CONVERSION
438 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
439 #undef TARGET_INVALID_UNARY_OP
440 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
441 #undef TARGET_INVALID_BINARY_OP
442 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
443
444 struct gcc_target targetm = TARGET_INITIALIZER;
445 \f
446 typedef enum
447 {
448 ADDR_AREA_NORMAL, /* normal address area */
449 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
450 }
451 ia64_addr_area;
452
453 static GTY(()) tree small_ident1;
454 static GTY(()) tree small_ident2;
455
456 static void
457 init_idents (void)
458 {
459 if (small_ident1 == 0)
460 {
461 small_ident1 = get_identifier ("small");
462 small_ident2 = get_identifier ("__small__");
463 }
464 }
465
466 /* Retrieve the address area that has been chosen for the given decl. */
467
468 static ia64_addr_area
469 ia64_get_addr_area (tree decl)
470 {
471 tree model_attr;
472
473 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
474 if (model_attr)
475 {
476 tree id;
477
478 init_idents ();
479 id = TREE_VALUE (TREE_VALUE (model_attr));
480 if (id == small_ident1 || id == small_ident2)
481 return ADDR_AREA_SMALL;
482 }
483 return ADDR_AREA_NORMAL;
484 }
485
486 static tree
487 ia64_handle_model_attribute (tree *node, tree name, tree args,
488 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
489 {
490 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
491 ia64_addr_area area;
492 tree arg, decl = *node;
493
494 init_idents ();
495 arg = TREE_VALUE (args);
496 if (arg == small_ident1 || arg == small_ident2)
497 {
498 addr_area = ADDR_AREA_SMALL;
499 }
500 else
501 {
502 warning (OPT_Wattributes, "invalid argument of %qs attribute",
503 IDENTIFIER_POINTER (name));
504 *no_add_attrs = true;
505 }
506
507 switch (TREE_CODE (decl))
508 {
509 case VAR_DECL:
510 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
511 == FUNCTION_DECL)
512 && !TREE_STATIC (decl))
513 {
514 error ("%Jan address area attribute cannot be specified for "
515 "local variables", decl);
516 *no_add_attrs = true;
517 }
518 area = ia64_get_addr_area (decl);
519 if (area != ADDR_AREA_NORMAL && addr_area != area)
520 {
521 error ("address area of %q+D conflicts with previous "
522 "declaration", decl);
523 *no_add_attrs = true;
524 }
525 break;
526
527 case FUNCTION_DECL:
528 error ("%Jaddress area attribute cannot be specified for functions",
529 decl);
530 *no_add_attrs = true;
531 break;
532
533 default:
534 warning (OPT_Wattributes, "%qs attribute ignored",
535 IDENTIFIER_POINTER (name));
536 *no_add_attrs = true;
537 break;
538 }
539
540 return NULL_TREE;
541 }
542
543 static void
544 ia64_encode_addr_area (tree decl, rtx symbol)
545 {
546 int flags;
547
548 flags = SYMBOL_REF_FLAGS (symbol);
549 switch (ia64_get_addr_area (decl))
550 {
551 case ADDR_AREA_NORMAL: break;
552 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
553 default: gcc_unreachable ();
554 }
555 SYMBOL_REF_FLAGS (symbol) = flags;
556 }
557
558 static void
559 ia64_encode_section_info (tree decl, rtx rtl, int first)
560 {
561 default_encode_section_info (decl, rtl, first);
562
563 /* Careful not to prod global register variables. */
564 if (TREE_CODE (decl) == VAR_DECL
565 && GET_CODE (DECL_RTL (decl)) == MEM
566 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
567 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
568 ia64_encode_addr_area (decl, XEXP (rtl, 0));
569 }
570 \f
571 /* Implement CONST_OK_FOR_LETTER_P. */
572
573 bool
574 ia64_const_ok_for_letter_p (HOST_WIDE_INT value, char c)
575 {
576 switch (c)
577 {
578 case 'I':
579 return CONST_OK_FOR_I (value);
580 case 'J':
581 return CONST_OK_FOR_J (value);
582 case 'K':
583 return CONST_OK_FOR_K (value);
584 case 'L':
585 return CONST_OK_FOR_L (value);
586 case 'M':
587 return CONST_OK_FOR_M (value);
588 case 'N':
589 return CONST_OK_FOR_N (value);
590 case 'O':
591 return CONST_OK_FOR_O (value);
592 case 'P':
593 return CONST_OK_FOR_P (value);
594 default:
595 return false;
596 }
597 }
598
599 /* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
600
601 bool
602 ia64_const_double_ok_for_letter_p (rtx value, char c)
603 {
604 switch (c)
605 {
606 case 'G':
607 return CONST_DOUBLE_OK_FOR_G (value);
608 default:
609 return false;
610 }
611 }
612
613 /* Implement EXTRA_CONSTRAINT. */
614
615 bool
616 ia64_extra_constraint (rtx value, char c)
617 {
618 switch (c)
619 {
620 case 'Q':
621 /* Non-volatile memory for FP_REG loads/stores. */
622 return memory_operand(value, VOIDmode) && !MEM_VOLATILE_P (value);
623
624 case 'R':
625 /* 1..4 for shladd arguments. */
626 return (GET_CODE (value) == CONST_INT
627 && INTVAL (value) >= 1 && INTVAL (value) <= 4);
628
629 case 'S':
630 /* Non-post-inc memory for asms and other unsavory creatures. */
631 return (GET_CODE (value) == MEM
632 && GET_RTX_CLASS (GET_CODE (XEXP (value, 0))) != RTX_AUTOINC
633 && (reload_in_progress || memory_operand (value, VOIDmode)));
634
635 case 'T':
636 /* Symbol ref to small-address-area. */
637 return small_addr_symbolic_operand (value, VOIDmode);
638
639 case 'U':
640 /* Vector zero. */
641 return value == CONST0_RTX (GET_MODE (value));
642
643 case 'W':
644 /* An integer vector, such that conversion to an integer yields a
645 value appropriate for an integer 'J' constraint. */
646 if (GET_CODE (value) == CONST_VECTOR
647 && GET_MODE_CLASS (GET_MODE (value)) == MODE_VECTOR_INT)
648 {
649 value = simplify_subreg (DImode, value, GET_MODE (value), 0);
650 return ia64_const_ok_for_letter_p (INTVAL (value), 'J');
651 }
652 return false;
653
654 case 'Y':
655 /* A V2SF vector containing elements that satisfy 'G'. */
656 return
657 (GET_CODE (value) == CONST_VECTOR
658 && GET_MODE (value) == V2SFmode
659 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 0), 'G')
660 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 1), 'G'));
661
662 default:
663 return false;
664 }
665 }
666 \f
667 /* Return 1 if the operands of a move are ok. */
668
669 int
670 ia64_move_ok (rtx dst, rtx src)
671 {
672 /* If we're under init_recog_no_volatile, we'll not be able to use
673 memory_operand. So check the code directly and don't worry about
674 the validity of the underlying address, which should have been
675 checked elsewhere anyway. */
676 if (GET_CODE (dst) != MEM)
677 return 1;
678 if (GET_CODE (src) == MEM)
679 return 0;
680 if (register_operand (src, VOIDmode))
681 return 1;
682
683 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
684 if (INTEGRAL_MODE_P (GET_MODE (dst)))
685 return src == const0_rtx;
686 else
687 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
688 }
689
690 /* Return 1 if the operands are ok for a floating point load pair. */
691
692 int
693 ia64_load_pair_ok (rtx dst, rtx src)
694 {
695 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
696 return 0;
697 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
698 return 0;
699 switch (GET_CODE (XEXP (src, 0)))
700 {
701 case REG:
702 case POST_INC:
703 break;
704 case POST_DEC:
705 return 0;
706 case POST_MODIFY:
707 {
708 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
709
710 if (GET_CODE (adjust) != CONST_INT
711 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
712 return 0;
713 }
714 break;
715 default:
716 abort ();
717 }
718 return 1;
719 }
720
721 int
722 addp4_optimize_ok (rtx op1, rtx op2)
723 {
724 return (basereg_operand (op1, GET_MODE(op1)) !=
725 basereg_operand (op2, GET_MODE(op2)));
726 }
727
728 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
729 Return the length of the field, or <= 0 on failure. */
730
731 int
732 ia64_depz_field_mask (rtx rop, rtx rshift)
733 {
734 unsigned HOST_WIDE_INT op = INTVAL (rop);
735 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
736
737 /* Get rid of the zero bits we're shifting in. */
738 op >>= shift;
739
740 /* We must now have a solid block of 1's at bit 0. */
741 return exact_log2 (op + 1);
742 }
743
744 /* Return the TLS model to use for ADDR. */
745
746 static enum tls_model
747 tls_symbolic_operand_type (rtx addr)
748 {
749 enum tls_model tls_kind = 0;
750
751 if (GET_CODE (addr) == CONST)
752 {
753 if (GET_CODE (XEXP (addr, 0)) == PLUS
754 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
755 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
756 }
757 else if (GET_CODE (addr) == SYMBOL_REF)
758 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
759
760 return tls_kind;
761 }
762
763 /* Return true if X is a constant that is valid for some immediate
764 field in an instruction. */
765
766 bool
767 ia64_legitimate_constant_p (rtx x)
768 {
769 switch (GET_CODE (x))
770 {
771 case CONST_INT:
772 case LABEL_REF:
773 return true;
774
775 case CONST_DOUBLE:
776 if (GET_MODE (x) == VOIDmode)
777 return true;
778 return CONST_DOUBLE_OK_FOR_G (x);
779
780 case CONST:
781 case SYMBOL_REF:
782 return tls_symbolic_operand_type (x) == 0;
783
784 case CONST_VECTOR:
785 {
786 enum machine_mode mode = GET_MODE (x);
787
788 if (mode == V2SFmode)
789 return ia64_extra_constraint (x, 'Y');
790
791 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
792 && GET_MODE_SIZE (mode) <= 8);
793 }
794
795 default:
796 return false;
797 }
798 }
799
800 /* Don't allow TLS addresses to get spilled to memory. */
801
802 static bool
803 ia64_cannot_force_const_mem (rtx x)
804 {
805 return tls_symbolic_operand_type (x) != 0;
806 }
807
808 /* Expand a symbolic constant load. */
809
810 bool
811 ia64_expand_load_address (rtx dest, rtx src)
812 {
813 gcc_assert (GET_CODE (dest) == REG);
814
815 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
816 having to pointer-extend the value afterward. Other forms of address
817 computation below are also more natural to compute as 64-bit quantities.
818 If we've been given an SImode destination register, change it. */
819 if (GET_MODE (dest) != Pmode)
820 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest), 0);
821
822 if (TARGET_NO_PIC)
823 return false;
824 if (small_addr_symbolic_operand (src, VOIDmode))
825 return false;
826
827 if (TARGET_AUTO_PIC)
828 emit_insn (gen_load_gprel64 (dest, src));
829 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
830 emit_insn (gen_load_fptr (dest, src));
831 else if (sdata_symbolic_operand (src, VOIDmode))
832 emit_insn (gen_load_gprel (dest, src));
833 else
834 {
835 HOST_WIDE_INT addend = 0;
836 rtx tmp;
837
838 /* We did split constant offsets in ia64_expand_move, and we did try
839 to keep them split in move_operand, but we also allowed reload to
840 rematerialize arbitrary constants rather than spill the value to
841 the stack and reload it. So we have to be prepared here to split
842 them apart again. */
843 if (GET_CODE (src) == CONST)
844 {
845 HOST_WIDE_INT hi, lo;
846
847 hi = INTVAL (XEXP (XEXP (src, 0), 1));
848 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
849 hi = hi - lo;
850
851 if (lo != 0)
852 {
853 addend = lo;
854 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
855 }
856 }
857
858 tmp = gen_rtx_HIGH (Pmode, src);
859 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
860 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
861
862 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
863 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
864
865 if (addend)
866 {
867 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
868 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
869 }
870 }
871
872 return true;
873 }
874
875 static GTY(()) rtx gen_tls_tga;
876 static rtx
877 gen_tls_get_addr (void)
878 {
879 if (!gen_tls_tga)
880 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
881 return gen_tls_tga;
882 }
883
884 static GTY(()) rtx thread_pointer_rtx;
885 static rtx
886 gen_thread_pointer (void)
887 {
888 if (!thread_pointer_rtx)
889 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
890 return thread_pointer_rtx;
891 }
892
893 static rtx
894 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
895 rtx orig_op1, HOST_WIDE_INT addend)
896 {
897 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
898 rtx orig_op0 = op0;
899 HOST_WIDE_INT addend_lo, addend_hi;
900
901 switch (tls_kind)
902 {
903 case TLS_MODEL_GLOBAL_DYNAMIC:
904 start_sequence ();
905
906 tga_op1 = gen_reg_rtx (Pmode);
907 emit_insn (gen_load_dtpmod (tga_op1, op1));
908
909 tga_op2 = gen_reg_rtx (Pmode);
910 emit_insn (gen_load_dtprel (tga_op2, op1));
911
912 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
913 LCT_CONST, Pmode, 2, tga_op1,
914 Pmode, tga_op2, Pmode);
915
916 insns = get_insns ();
917 end_sequence ();
918
919 if (GET_MODE (op0) != Pmode)
920 op0 = tga_ret;
921 emit_libcall_block (insns, op0, tga_ret, op1);
922 break;
923
924 case TLS_MODEL_LOCAL_DYNAMIC:
925 /* ??? This isn't the completely proper way to do local-dynamic
926 If the call to __tls_get_addr is used only by a single symbol,
927 then we should (somehow) move the dtprel to the second arg
928 to avoid the extra add. */
929 start_sequence ();
930
931 tga_op1 = gen_reg_rtx (Pmode);
932 emit_insn (gen_load_dtpmod (tga_op1, op1));
933
934 tga_op2 = const0_rtx;
935
936 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
937 LCT_CONST, Pmode, 2, tga_op1,
938 Pmode, tga_op2, Pmode);
939
940 insns = get_insns ();
941 end_sequence ();
942
943 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
944 UNSPEC_LD_BASE);
945 tmp = gen_reg_rtx (Pmode);
946 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
947
948 if (!register_operand (op0, Pmode))
949 op0 = gen_reg_rtx (Pmode);
950 if (TARGET_TLS64)
951 {
952 emit_insn (gen_load_dtprel (op0, op1));
953 emit_insn (gen_adddi3 (op0, tmp, op0));
954 }
955 else
956 emit_insn (gen_add_dtprel (op0, op1, tmp));
957 break;
958
959 case TLS_MODEL_INITIAL_EXEC:
960 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
961 addend_hi = addend - addend_lo;
962
963 op1 = plus_constant (op1, addend_hi);
964 addend = addend_lo;
965
966 tmp = gen_reg_rtx (Pmode);
967 emit_insn (gen_load_tprel (tmp, op1));
968
969 if (!register_operand (op0, Pmode))
970 op0 = gen_reg_rtx (Pmode);
971 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
972 break;
973
974 case TLS_MODEL_LOCAL_EXEC:
975 if (!register_operand (op0, Pmode))
976 op0 = gen_reg_rtx (Pmode);
977
978 op1 = orig_op1;
979 addend = 0;
980 if (TARGET_TLS64)
981 {
982 emit_insn (gen_load_tprel (op0, op1));
983 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
984 }
985 else
986 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
987 break;
988
989 default:
990 gcc_unreachable ();
991 }
992
993 if (addend)
994 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
995 orig_op0, 1, OPTAB_DIRECT);
996 if (orig_op0 == op0)
997 return NULL_RTX;
998 if (GET_MODE (orig_op0) == Pmode)
999 return op0;
1000 return gen_lowpart (GET_MODE (orig_op0), op0);
1001 }
1002
1003 rtx
1004 ia64_expand_move (rtx op0, rtx op1)
1005 {
1006 enum machine_mode mode = GET_MODE (op0);
1007
1008 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1009 op1 = force_reg (mode, op1);
1010
1011 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1012 {
1013 HOST_WIDE_INT addend = 0;
1014 enum tls_model tls_kind;
1015 rtx sym = op1;
1016
1017 if (GET_CODE (op1) == CONST
1018 && GET_CODE (XEXP (op1, 0)) == PLUS
1019 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1020 {
1021 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1022 sym = XEXP (XEXP (op1, 0), 0);
1023 }
1024
1025 tls_kind = tls_symbolic_operand_type (sym);
1026 if (tls_kind)
1027 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1028
1029 if (any_offset_symbol_operand (sym, mode))
1030 addend = 0;
1031 else if (aligned_offset_symbol_operand (sym, mode))
1032 {
1033 HOST_WIDE_INT addend_lo, addend_hi;
1034
1035 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1036 addend_hi = addend - addend_lo;
1037
1038 if (addend_lo != 0)
1039 {
1040 op1 = plus_constant (sym, addend_hi);
1041 addend = addend_lo;
1042 }
1043 else
1044 addend = 0;
1045 }
1046 else
1047 op1 = sym;
1048
1049 if (reload_completed)
1050 {
1051 /* We really should have taken care of this offset earlier. */
1052 gcc_assert (addend == 0);
1053 if (ia64_expand_load_address (op0, op1))
1054 return NULL_RTX;
1055 }
1056
1057 if (addend)
1058 {
1059 rtx subtarget = no_new_pseudos ? op0 : gen_reg_rtx (mode);
1060
1061 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1062
1063 op1 = expand_simple_binop (mode, PLUS, subtarget,
1064 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1065 if (op0 == op1)
1066 return NULL_RTX;
1067 }
1068 }
1069
1070 return op1;
1071 }
1072
1073 /* Split a move from OP1 to OP0 conditional on COND. */
1074
1075 void
1076 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1077 {
1078 rtx insn, first = get_last_insn ();
1079
1080 emit_move_insn (op0, op1);
1081
1082 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1083 if (INSN_P (insn))
1084 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1085 PATTERN (insn));
1086 }
1087
1088 /* Split a post-reload TImode or TFmode reference into two DImode
1089 components. This is made extra difficult by the fact that we do
1090 not get any scratch registers to work with, because reload cannot
1091 be prevented from giving us a scratch that overlaps the register
1092 pair involved. So instead, when addressing memory, we tweak the
1093 pointer register up and back down with POST_INCs. Or up and not
1094 back down when we can get away with it.
1095
1096 REVERSED is true when the loads must be done in reversed order
1097 (high word first) for correctness. DEAD is true when the pointer
1098 dies with the second insn we generate and therefore the second
1099 address must not carry a postmodify.
1100
1101 May return an insn which is to be emitted after the moves. */
1102
1103 static rtx
1104 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1105 {
1106 rtx fixup = 0;
1107
1108 switch (GET_CODE (in))
1109 {
1110 case REG:
1111 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1112 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1113 break;
1114
1115 case CONST_INT:
1116 case CONST_DOUBLE:
1117 /* Cannot occur reversed. */
1118 gcc_assert (!reversed);
1119
1120 if (GET_MODE (in) != TFmode)
1121 split_double (in, &out[0], &out[1]);
1122 else
1123 /* split_double does not understand how to split a TFmode
1124 quantity into a pair of DImode constants. */
1125 {
1126 REAL_VALUE_TYPE r;
1127 unsigned HOST_WIDE_INT p[2];
1128 long l[4]; /* TFmode is 128 bits */
1129
1130 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1131 real_to_target (l, &r, TFmode);
1132
1133 if (FLOAT_WORDS_BIG_ENDIAN)
1134 {
1135 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1136 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1137 }
1138 else
1139 {
1140 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1141 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1142 }
1143 out[0] = GEN_INT (p[0]);
1144 out[1] = GEN_INT (p[1]);
1145 }
1146 break;
1147
1148 case MEM:
1149 {
1150 rtx base = XEXP (in, 0);
1151 rtx offset;
1152
1153 switch (GET_CODE (base))
1154 {
1155 case REG:
1156 if (!reversed)
1157 {
1158 out[0] = adjust_automodify_address
1159 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1160 out[1] = adjust_automodify_address
1161 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1162 }
1163 else
1164 {
1165 /* Reversal requires a pre-increment, which can only
1166 be done as a separate insn. */
1167 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1168 out[0] = adjust_automodify_address
1169 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1170 out[1] = adjust_address (in, DImode, 0);
1171 }
1172 break;
1173
1174 case POST_INC:
1175 gcc_assert (!reversed && !dead);
1176
1177 /* Just do the increment in two steps. */
1178 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1179 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1180 break;
1181
1182 case POST_DEC:
1183 gcc_assert (!reversed && !dead);
1184
1185 /* Add 8, subtract 24. */
1186 base = XEXP (base, 0);
1187 out[0] = adjust_automodify_address
1188 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1189 out[1] = adjust_automodify_address
1190 (in, DImode,
1191 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1192 8);
1193 break;
1194
1195 case POST_MODIFY:
1196 gcc_assert (!reversed && !dead);
1197
1198 /* Extract and adjust the modification. This case is
1199 trickier than the others, because we might have an
1200 index register, or we might have a combined offset that
1201 doesn't fit a signed 9-bit displacement field. We can
1202 assume the incoming expression is already legitimate. */
1203 offset = XEXP (base, 1);
1204 base = XEXP (base, 0);
1205
1206 out[0] = adjust_automodify_address
1207 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1208
1209 if (GET_CODE (XEXP (offset, 1)) == REG)
1210 {
1211 /* Can't adjust the postmodify to match. Emit the
1212 original, then a separate addition insn. */
1213 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1214 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1215 }
1216 else
1217 {
1218 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1219 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1220 {
1221 /* Again the postmodify cannot be made to match,
1222 but in this case it's more efficient to get rid
1223 of the postmodify entirely and fix up with an
1224 add insn. */
1225 out[1] = adjust_automodify_address (in, DImode, base, 8);
1226 fixup = gen_adddi3
1227 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1228 }
1229 else
1230 {
1231 /* Combined offset still fits in the displacement field.
1232 (We cannot overflow it at the high end.) */
1233 out[1] = adjust_automodify_address
1234 (in, DImode, gen_rtx_POST_MODIFY
1235 (Pmode, base, gen_rtx_PLUS
1236 (Pmode, base,
1237 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1238 8);
1239 }
1240 }
1241 break;
1242
1243 default:
1244 gcc_unreachable ();
1245 }
1246 break;
1247 }
1248
1249 default:
1250 gcc_unreachable ();
1251 }
1252
1253 return fixup;
1254 }
1255
1256 /* Split a TImode or TFmode move instruction after reload.
1257 This is used by *movtf_internal and *movti_internal. */
1258 void
1259 ia64_split_tmode_move (rtx operands[])
1260 {
1261 rtx in[2], out[2], insn;
1262 rtx fixup[2];
1263 bool dead = false;
1264 bool reversed = false;
1265
1266 /* It is possible for reload to decide to overwrite a pointer with
1267 the value it points to. In that case we have to do the loads in
1268 the appropriate order so that the pointer is not destroyed too
1269 early. Also we must not generate a postmodify for that second
1270 load, or rws_access_regno will die. */
1271 if (GET_CODE (operands[1]) == MEM
1272 && reg_overlap_mentioned_p (operands[0], operands[1]))
1273 {
1274 rtx base = XEXP (operands[1], 0);
1275 while (GET_CODE (base) != REG)
1276 base = XEXP (base, 0);
1277
1278 if (REGNO (base) == REGNO (operands[0]))
1279 reversed = true;
1280 dead = true;
1281 }
1282 /* Another reason to do the moves in reversed order is if the first
1283 element of the target register pair is also the second element of
1284 the source register pair. */
1285 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1286 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1287 reversed = true;
1288
1289 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1290 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1291
1292 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1293 if (GET_CODE (EXP) == MEM \
1294 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1295 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1296 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1297 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1298 XEXP (XEXP (EXP, 0), 0), \
1299 REG_NOTES (INSN))
1300
1301 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1302 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1303 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1304
1305 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1306 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1307 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1308
1309 if (fixup[0])
1310 emit_insn (fixup[0]);
1311 if (fixup[1])
1312 emit_insn (fixup[1]);
1313
1314 #undef MAYBE_ADD_REG_INC_NOTE
1315 }
1316
1317 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1318 through memory plus an extra GR scratch register. Except that you can
1319 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1320 SECONDARY_RELOAD_CLASS, but not both.
1321
1322 We got into problems in the first place by allowing a construct like
1323 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1324 This solution attempts to prevent this situation from occurring. When
1325 we see something like the above, we spill the inner register to memory. */
1326
1327 static rtx
1328 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1329 {
1330 if (GET_CODE (in) == SUBREG
1331 && GET_MODE (SUBREG_REG (in)) == TImode
1332 && GET_CODE (SUBREG_REG (in)) == REG)
1333 {
1334 rtx memt = assign_stack_temp (TImode, 16, 0);
1335 emit_move_insn (memt, SUBREG_REG (in));
1336 return adjust_address (memt, mode, 0);
1337 }
1338 else if (force && GET_CODE (in) == REG)
1339 {
1340 rtx memx = assign_stack_temp (mode, 16, 0);
1341 emit_move_insn (memx, in);
1342 return memx;
1343 }
1344 else
1345 return in;
1346 }
1347
1348 /* Expand the movxf or movrf pattern (MODE says which) with the given
1349 OPERANDS, returning true if the pattern should then invoke
1350 DONE. */
1351
1352 bool
1353 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1354 {
1355 rtx op0 = operands[0];
1356
1357 if (GET_CODE (op0) == SUBREG)
1358 op0 = SUBREG_REG (op0);
1359
1360 /* We must support XFmode loads into general registers for stdarg/vararg,
1361 unprototyped calls, and a rare case where a long double is passed as
1362 an argument after a float HFA fills the FP registers. We split them into
1363 DImode loads for convenience. We also need to support XFmode stores
1364 for the last case. This case does not happen for stdarg/vararg routines,
1365 because we do a block store to memory of unnamed arguments. */
1366
1367 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1368 {
1369 rtx out[2];
1370
1371 /* We're hoping to transform everything that deals with XFmode
1372 quantities and GR registers early in the compiler. */
1373 gcc_assert (!no_new_pseudos);
1374
1375 /* Struct to register can just use TImode instead. */
1376 if ((GET_CODE (operands[1]) == SUBREG
1377 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1378 || (GET_CODE (operands[1]) == REG
1379 && GR_REGNO_P (REGNO (operands[1]))))
1380 {
1381 rtx op1 = operands[1];
1382
1383 if (GET_CODE (op1) == SUBREG)
1384 op1 = SUBREG_REG (op1);
1385 else
1386 op1 = gen_rtx_REG (TImode, REGNO (op1));
1387
1388 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1389 return true;
1390 }
1391
1392 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1393 {
1394 /* Don't word-swap when reading in the constant. */
1395 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1396 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1397 0, mode));
1398 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1399 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1400 0, mode));
1401 return true;
1402 }
1403
1404 /* If the quantity is in a register not known to be GR, spill it. */
1405 if (register_operand (operands[1], mode))
1406 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1407
1408 gcc_assert (GET_CODE (operands[1]) == MEM);
1409
1410 /* Don't word-swap when reading in the value. */
1411 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1412 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1413
1414 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1415 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1416 return true;
1417 }
1418
1419 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1420 {
1421 /* We're hoping to transform everything that deals with XFmode
1422 quantities and GR registers early in the compiler. */
1423 gcc_assert (!no_new_pseudos);
1424
1425 /* Op0 can't be a GR_REG here, as that case is handled above.
1426 If op0 is a register, then we spill op1, so that we now have a
1427 MEM operand. This requires creating an XFmode subreg of a TImode reg
1428 to force the spill. */
1429 if (register_operand (operands[0], mode))
1430 {
1431 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1432 op1 = gen_rtx_SUBREG (mode, op1, 0);
1433 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1434 }
1435
1436 else
1437 {
1438 rtx in[2];
1439
1440 gcc_assert (GET_CODE (operands[0]) == MEM);
1441
1442 /* Don't word-swap when writing out the value. */
1443 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1444 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1445
1446 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1447 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1448 return true;
1449 }
1450 }
1451
1452 if (!reload_in_progress && !reload_completed)
1453 {
1454 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1455
1456 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1457 {
1458 rtx memt, memx, in = operands[1];
1459 if (CONSTANT_P (in))
1460 in = validize_mem (force_const_mem (mode, in));
1461 if (GET_CODE (in) == MEM)
1462 memt = adjust_address (in, TImode, 0);
1463 else
1464 {
1465 memt = assign_stack_temp (TImode, 16, 0);
1466 memx = adjust_address (memt, mode, 0);
1467 emit_move_insn (memx, in);
1468 }
1469 emit_move_insn (op0, memt);
1470 return true;
1471 }
1472
1473 if (!ia64_move_ok (operands[0], operands[1]))
1474 operands[1] = force_reg (mode, operands[1]);
1475 }
1476
1477 return false;
1478 }
1479
1480 /* Emit comparison instruction if necessary, returning the expression
1481 that holds the compare result in the proper mode. */
1482
1483 static GTY(()) rtx cmptf_libfunc;
1484
1485 rtx
1486 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1487 {
1488 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1489 rtx cmp;
1490
1491 /* If we have a BImode input, then we already have a compare result, and
1492 do not need to emit another comparison. */
1493 if (GET_MODE (op0) == BImode)
1494 {
1495 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1496 cmp = op0;
1497 }
1498 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1499 magic number as its third argument, that indicates what to do.
1500 The return value is an integer to be compared against zero. */
1501 else if (GET_MODE (op0) == TFmode)
1502 {
1503 enum qfcmp_magic {
1504 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1505 QCMP_UNORD = 2,
1506 QCMP_EQ = 4,
1507 QCMP_LT = 8,
1508 QCMP_GT = 16
1509 } magic;
1510 enum rtx_code ncode;
1511 rtx ret, insns;
1512
1513 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1514 switch (code)
1515 {
1516 /* 1 = equal, 0 = not equal. Equality operators do
1517 not raise FP_INVALID when given an SNaN operand. */
1518 case EQ: magic = QCMP_EQ; ncode = NE; break;
1519 case NE: magic = QCMP_EQ; ncode = EQ; break;
1520 /* isunordered() from C99. */
1521 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1522 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1523 /* Relational operators raise FP_INVALID when given
1524 an SNaN operand. */
1525 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1526 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1527 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1528 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1529 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1530 Expanders for buneq etc. weuld have to be added to ia64.md
1531 for this to be useful. */
1532 default: gcc_unreachable ();
1533 }
1534
1535 start_sequence ();
1536
1537 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1538 op0, TFmode, op1, TFmode,
1539 GEN_INT (magic), DImode);
1540 cmp = gen_reg_rtx (BImode);
1541 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1542 gen_rtx_fmt_ee (ncode, BImode,
1543 ret, const0_rtx)));
1544
1545 insns = get_insns ();
1546 end_sequence ();
1547
1548 emit_libcall_block (insns, cmp, cmp,
1549 gen_rtx_fmt_ee (code, BImode, op0, op1));
1550 code = NE;
1551 }
1552 else
1553 {
1554 cmp = gen_reg_rtx (BImode);
1555 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1556 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1557 code = NE;
1558 }
1559
1560 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1561 }
1562
1563 /* Generate an integral vector comparison. Return true if the condition has
1564 been reversed, and so the sense of the comparison should be inverted. */
1565
1566 static bool
1567 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1568 rtx dest, rtx op0, rtx op1)
1569 {
1570 bool negate = false;
1571 rtx x;
1572
1573 /* Canonicalize the comparison to EQ, GT, GTU. */
1574 switch (code)
1575 {
1576 case EQ:
1577 case GT:
1578 case GTU:
1579 break;
1580
1581 case NE:
1582 case LE:
1583 case LEU:
1584 code = reverse_condition (code);
1585 negate = true;
1586 break;
1587
1588 case GE:
1589 case GEU:
1590 code = reverse_condition (code);
1591 negate = true;
1592 /* FALLTHRU */
1593
1594 case LT:
1595 case LTU:
1596 code = swap_condition (code);
1597 x = op0, op0 = op1, op1 = x;
1598 break;
1599
1600 default:
1601 gcc_unreachable ();
1602 }
1603
1604 /* Unsigned parallel compare is not supported by the hardware. Play some
1605 tricks to turn this into a signed comparison against 0. */
1606 if (code == GTU)
1607 {
1608 switch (mode)
1609 {
1610 case V2SImode:
1611 {
1612 rtx t1, t2, mask;
1613
1614 /* Perform a parallel modulo subtraction. */
1615 t1 = gen_reg_rtx (V2SImode);
1616 emit_insn (gen_subv2si3 (t1, op0, op1));
1617
1618 /* Extract the original sign bit of op0. */
1619 mask = GEN_INT (-0x80000000);
1620 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1621 mask = force_reg (V2SImode, mask);
1622 t2 = gen_reg_rtx (V2SImode);
1623 emit_insn (gen_andv2si3 (t2, op0, mask));
1624
1625 /* XOR it back into the result of the subtraction. This results
1626 in the sign bit set iff we saw unsigned underflow. */
1627 x = gen_reg_rtx (V2SImode);
1628 emit_insn (gen_xorv2si3 (x, t1, t2));
1629
1630 code = GT;
1631 op0 = x;
1632 op1 = CONST0_RTX (mode);
1633 }
1634 break;
1635
1636 case V8QImode:
1637 case V4HImode:
1638 /* Perform a parallel unsigned saturating subtraction. */
1639 x = gen_reg_rtx (mode);
1640 emit_insn (gen_rtx_SET (VOIDmode, x,
1641 gen_rtx_US_MINUS (mode, op0, op1)));
1642
1643 code = EQ;
1644 op0 = x;
1645 op1 = CONST0_RTX (mode);
1646 negate = !negate;
1647 break;
1648
1649 default:
1650 gcc_unreachable ();
1651 }
1652 }
1653
1654 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1655 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1656
1657 return negate;
1658 }
1659
1660 /* Emit an integral vector conditional move. */
1661
1662 void
1663 ia64_expand_vecint_cmov (rtx operands[])
1664 {
1665 enum machine_mode mode = GET_MODE (operands[0]);
1666 enum rtx_code code = GET_CODE (operands[3]);
1667 bool negate;
1668 rtx cmp, x, ot, of;
1669
1670 cmp = gen_reg_rtx (mode);
1671 negate = ia64_expand_vecint_compare (code, mode, cmp,
1672 operands[4], operands[5]);
1673
1674 ot = operands[1+negate];
1675 of = operands[2-negate];
1676
1677 if (ot == CONST0_RTX (mode))
1678 {
1679 if (of == CONST0_RTX (mode))
1680 {
1681 emit_move_insn (operands[0], ot);
1682 return;
1683 }
1684
1685 x = gen_rtx_NOT (mode, cmp);
1686 x = gen_rtx_AND (mode, x, of);
1687 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1688 }
1689 else if (of == CONST0_RTX (mode))
1690 {
1691 x = gen_rtx_AND (mode, cmp, ot);
1692 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1693 }
1694 else
1695 {
1696 rtx t, f;
1697
1698 t = gen_reg_rtx (mode);
1699 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1700 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1701
1702 f = gen_reg_rtx (mode);
1703 x = gen_rtx_NOT (mode, cmp);
1704 x = gen_rtx_AND (mode, x, operands[2-negate]);
1705 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1706
1707 x = gen_rtx_IOR (mode, t, f);
1708 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1709 }
1710 }
1711
1712 /* Emit an integral vector min or max operation. Return true if all done. */
1713
1714 bool
1715 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1716 rtx operands[])
1717 {
1718 rtx xops[6];
1719
1720 /* These four combinations are supported directly. */
1721 if (mode == V8QImode && (code == UMIN || code == UMAX))
1722 return false;
1723 if (mode == V4HImode && (code == SMIN || code == SMAX))
1724 return false;
1725
1726 /* This combination can be implemented with only saturating subtraction. */
1727 if (mode == V4HImode && code == UMAX)
1728 {
1729 rtx x, tmp = gen_reg_rtx (mode);
1730
1731 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1732 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1733
1734 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1735 return true;
1736 }
1737
1738 /* Everything else implemented via vector comparisons. */
1739 xops[0] = operands[0];
1740 xops[4] = xops[1] = operands[1];
1741 xops[5] = xops[2] = operands[2];
1742
1743 switch (code)
1744 {
1745 case UMIN:
1746 code = LTU;
1747 break;
1748 case UMAX:
1749 code = GTU;
1750 break;
1751 case SMIN:
1752 code = LT;
1753 break;
1754 case SMAX:
1755 code = GT;
1756 break;
1757 default:
1758 gcc_unreachable ();
1759 }
1760 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1761
1762 ia64_expand_vecint_cmov (xops);
1763 return true;
1764 }
1765
1766 /* Emit an integral vector widening sum operations. */
1767
1768 void
1769 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1770 {
1771 rtx l, h, x, s;
1772 enum machine_mode wmode, mode;
1773 rtx (*unpack_l) (rtx, rtx, rtx);
1774 rtx (*unpack_h) (rtx, rtx, rtx);
1775 rtx (*plus) (rtx, rtx, rtx);
1776
1777 wmode = GET_MODE (operands[0]);
1778 mode = GET_MODE (operands[1]);
1779
1780 switch (mode)
1781 {
1782 case V8QImode:
1783 unpack_l = gen_unpack1_l;
1784 unpack_h = gen_unpack1_h;
1785 plus = gen_addv4hi3;
1786 break;
1787 case V4HImode:
1788 unpack_l = gen_unpack2_l;
1789 unpack_h = gen_unpack2_h;
1790 plus = gen_addv2si3;
1791 break;
1792 default:
1793 gcc_unreachable ();
1794 }
1795
1796 /* Fill in x with the sign extension of each element in op1. */
1797 if (unsignedp)
1798 x = CONST0_RTX (mode);
1799 else
1800 {
1801 bool neg;
1802
1803 x = gen_reg_rtx (mode);
1804
1805 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1806 CONST0_RTX (mode));
1807 gcc_assert (!neg);
1808 }
1809
1810 l = gen_reg_rtx (wmode);
1811 h = gen_reg_rtx (wmode);
1812 s = gen_reg_rtx (wmode);
1813
1814 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1815 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1816 emit_insn (plus (s, l, operands[2]));
1817 emit_insn (plus (operands[0], h, s));
1818 }
1819
1820 /* Emit a signed or unsigned V8QI dot product operation. */
1821
1822 void
1823 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1824 {
1825 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1826
1827 /* Fill in x1 and x2 with the sign extension of each element. */
1828 if (unsignedp)
1829 x1 = x2 = CONST0_RTX (V8QImode);
1830 else
1831 {
1832 bool neg;
1833
1834 x1 = gen_reg_rtx (V8QImode);
1835 x2 = gen_reg_rtx (V8QImode);
1836
1837 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1838 CONST0_RTX (V8QImode));
1839 gcc_assert (!neg);
1840 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1841 CONST0_RTX (V8QImode));
1842 gcc_assert (!neg);
1843 }
1844
1845 l1 = gen_reg_rtx (V4HImode);
1846 l2 = gen_reg_rtx (V4HImode);
1847 h1 = gen_reg_rtx (V4HImode);
1848 h2 = gen_reg_rtx (V4HImode);
1849
1850 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1851 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1852 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1853 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1854
1855 p1 = gen_reg_rtx (V2SImode);
1856 p2 = gen_reg_rtx (V2SImode);
1857 p3 = gen_reg_rtx (V2SImode);
1858 p4 = gen_reg_rtx (V2SImode);
1859 emit_insn (gen_pmpy2_r (p1, l1, l2));
1860 emit_insn (gen_pmpy2_l (p2, l1, l2));
1861 emit_insn (gen_pmpy2_r (p3, h1, h2));
1862 emit_insn (gen_pmpy2_l (p4, h1, h2));
1863
1864 s1 = gen_reg_rtx (V2SImode);
1865 s2 = gen_reg_rtx (V2SImode);
1866 s3 = gen_reg_rtx (V2SImode);
1867 emit_insn (gen_addv2si3 (s1, p1, p2));
1868 emit_insn (gen_addv2si3 (s2, p3, p4));
1869 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1870 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1871 }
1872
1873 /* Emit the appropriate sequence for a call. */
1874
1875 void
1876 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1877 int sibcall_p)
1878 {
1879 rtx insn, b0;
1880
1881 addr = XEXP (addr, 0);
1882 addr = convert_memory_address (DImode, addr);
1883 b0 = gen_rtx_REG (DImode, R_BR (0));
1884
1885 /* ??? Should do this for functions known to bind local too. */
1886 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1887 {
1888 if (sibcall_p)
1889 insn = gen_sibcall_nogp (addr);
1890 else if (! retval)
1891 insn = gen_call_nogp (addr, b0);
1892 else
1893 insn = gen_call_value_nogp (retval, addr, b0);
1894 insn = emit_call_insn (insn);
1895 }
1896 else
1897 {
1898 if (sibcall_p)
1899 insn = gen_sibcall_gp (addr);
1900 else if (! retval)
1901 insn = gen_call_gp (addr, b0);
1902 else
1903 insn = gen_call_value_gp (retval, addr, b0);
1904 insn = emit_call_insn (insn);
1905
1906 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1907 }
1908
1909 if (sibcall_p)
1910 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1911 }
1912
1913 void
1914 ia64_reload_gp (void)
1915 {
1916 rtx tmp;
1917
1918 if (current_frame_info.reg_save_gp)
1919 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1920 else
1921 {
1922 HOST_WIDE_INT offset;
1923
1924 offset = (current_frame_info.spill_cfa_off
1925 + current_frame_info.spill_size);
1926 if (frame_pointer_needed)
1927 {
1928 tmp = hard_frame_pointer_rtx;
1929 offset = -offset;
1930 }
1931 else
1932 {
1933 tmp = stack_pointer_rtx;
1934 offset = current_frame_info.total_size - offset;
1935 }
1936
1937 if (CONST_OK_FOR_I (offset))
1938 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1939 tmp, GEN_INT (offset)));
1940 else
1941 {
1942 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1943 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1944 pic_offset_table_rtx, tmp));
1945 }
1946
1947 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1948 }
1949
1950 emit_move_insn (pic_offset_table_rtx, tmp);
1951 }
1952
1953 void
1954 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1955 rtx scratch_b, int noreturn_p, int sibcall_p)
1956 {
1957 rtx insn;
1958 bool is_desc = false;
1959
1960 /* If we find we're calling through a register, then we're actually
1961 calling through a descriptor, so load up the values. */
1962 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1963 {
1964 rtx tmp;
1965 bool addr_dead_p;
1966
1967 /* ??? We are currently constrained to *not* use peep2, because
1968 we can legitimately change the global lifetime of the GP
1969 (in the form of killing where previously live). This is
1970 because a call through a descriptor doesn't use the previous
1971 value of the GP, while a direct call does, and we do not
1972 commit to either form until the split here.
1973
1974 That said, this means that we lack precise life info for
1975 whether ADDR is dead after this call. This is not terribly
1976 important, since we can fix things up essentially for free
1977 with the POST_DEC below, but it's nice to not use it when we
1978 can immediately tell it's not necessary. */
1979 addr_dead_p = ((noreturn_p || sibcall_p
1980 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1981 REGNO (addr)))
1982 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1983
1984 /* Load the code address into scratch_b. */
1985 tmp = gen_rtx_POST_INC (Pmode, addr);
1986 tmp = gen_rtx_MEM (Pmode, tmp);
1987 emit_move_insn (scratch_r, tmp);
1988 emit_move_insn (scratch_b, scratch_r);
1989
1990 /* Load the GP address. If ADDR is not dead here, then we must
1991 revert the change made above via the POST_INCREMENT. */
1992 if (!addr_dead_p)
1993 tmp = gen_rtx_POST_DEC (Pmode, addr);
1994 else
1995 tmp = addr;
1996 tmp = gen_rtx_MEM (Pmode, tmp);
1997 emit_move_insn (pic_offset_table_rtx, tmp);
1998
1999 is_desc = true;
2000 addr = scratch_b;
2001 }
2002
2003 if (sibcall_p)
2004 insn = gen_sibcall_nogp (addr);
2005 else if (retval)
2006 insn = gen_call_value_nogp (retval, addr, retaddr);
2007 else
2008 insn = gen_call_nogp (addr, retaddr);
2009 emit_call_insn (insn);
2010
2011 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2012 ia64_reload_gp ();
2013 }
2014
2015 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2016
2017 This differs from the generic code in that we know about the zero-extending
2018 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2019 also know that ld.acq+cmpxchg.rel equals a full barrier.
2020
2021 The loop we want to generate looks like
2022
2023 cmp_reg = mem;
2024 label:
2025 old_reg = cmp_reg;
2026 new_reg = cmp_reg op val;
2027 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2028 if (cmp_reg != old_reg)
2029 goto label;
2030
2031 Note that we only do the plain load from memory once. Subsequent
2032 iterations use the value loaded by the compare-and-swap pattern. */
2033
2034 void
2035 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2036 rtx old_dst, rtx new_dst)
2037 {
2038 enum machine_mode mode = GET_MODE (mem);
2039 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2040 enum insn_code icode;
2041
2042 /* Special case for using fetchadd. */
2043 if ((mode == SImode || mode == DImode)
2044 && (code == PLUS || code == MINUS)
2045 && fetchadd_operand (val, mode))
2046 {
2047 if (code == MINUS)
2048 val = GEN_INT (-INTVAL (val));
2049
2050 if (!old_dst)
2051 old_dst = gen_reg_rtx (mode);
2052
2053 emit_insn (gen_memory_barrier ());
2054
2055 if (mode == SImode)
2056 icode = CODE_FOR_fetchadd_acq_si;
2057 else
2058 icode = CODE_FOR_fetchadd_acq_di;
2059 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2060
2061 if (new_dst)
2062 {
2063 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2064 true, OPTAB_WIDEN);
2065 if (new_reg != new_dst)
2066 emit_move_insn (new_dst, new_reg);
2067 }
2068 return;
2069 }
2070
2071 /* Because of the volatile mem read, we get an ld.acq, which is the
2072 front half of the full barrier. The end half is the cmpxchg.rel. */
2073 gcc_assert (MEM_VOLATILE_P (mem));
2074
2075 old_reg = gen_reg_rtx (DImode);
2076 cmp_reg = gen_reg_rtx (DImode);
2077 label = gen_label_rtx ();
2078
2079 if (mode != DImode)
2080 {
2081 val = simplify_gen_subreg (DImode, val, mode, 0);
2082 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2083 }
2084 else
2085 emit_move_insn (cmp_reg, mem);
2086
2087 emit_label (label);
2088
2089 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2090 emit_move_insn (old_reg, cmp_reg);
2091 emit_move_insn (ar_ccv, cmp_reg);
2092
2093 if (old_dst)
2094 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2095
2096 new_reg = cmp_reg;
2097 if (code == NOT)
2098 {
2099 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2100 code = AND;
2101 }
2102 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2103 true, OPTAB_DIRECT);
2104
2105 if (mode != DImode)
2106 new_reg = gen_lowpart (mode, new_reg);
2107 if (new_dst)
2108 emit_move_insn (new_dst, new_reg);
2109
2110 switch (mode)
2111 {
2112 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2113 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2114 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2115 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2116 default:
2117 gcc_unreachable ();
2118 }
2119
2120 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2121
2122 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2123 }
2124 \f
2125 /* Begin the assembly file. */
2126
2127 static void
2128 ia64_file_start (void)
2129 {
2130 /* Variable tracking should be run after all optimizations which change order
2131 of insns. It also needs a valid CFG. This can't be done in
2132 ia64_override_options, because flag_var_tracking is finalized after
2133 that. */
2134 ia64_flag_var_tracking = flag_var_tracking;
2135 flag_var_tracking = 0;
2136
2137 default_file_start ();
2138 emit_safe_across_calls ();
2139 }
2140
2141 void
2142 emit_safe_across_calls (void)
2143 {
2144 unsigned int rs, re;
2145 int out_state;
2146
2147 rs = 1;
2148 out_state = 0;
2149 while (1)
2150 {
2151 while (rs < 64 && call_used_regs[PR_REG (rs)])
2152 rs++;
2153 if (rs >= 64)
2154 break;
2155 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2156 continue;
2157 if (out_state == 0)
2158 {
2159 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2160 out_state = 1;
2161 }
2162 else
2163 fputc (',', asm_out_file);
2164 if (re == rs + 1)
2165 fprintf (asm_out_file, "p%u", rs);
2166 else
2167 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2168 rs = re + 1;
2169 }
2170 if (out_state)
2171 fputc ('\n', asm_out_file);
2172 }
2173
2174 /* Helper function for ia64_compute_frame_size: find an appropriate general
2175 register to spill some special register to. SPECIAL_SPILL_MASK contains
2176 bits in GR0 to GR31 that have already been allocated by this routine.
2177 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2178
2179 static int
2180 find_gr_spill (int try_locals)
2181 {
2182 int regno;
2183
2184 /* If this is a leaf function, first try an otherwise unused
2185 call-clobbered register. */
2186 if (current_function_is_leaf)
2187 {
2188 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2189 if (! regs_ever_live[regno]
2190 && call_used_regs[regno]
2191 && ! fixed_regs[regno]
2192 && ! global_regs[regno]
2193 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2194 {
2195 current_frame_info.gr_used_mask |= 1 << regno;
2196 return regno;
2197 }
2198 }
2199
2200 if (try_locals)
2201 {
2202 regno = current_frame_info.n_local_regs;
2203 /* If there is a frame pointer, then we can't use loc79, because
2204 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2205 reg_name switching code in ia64_expand_prologue. */
2206 if (regno < (80 - frame_pointer_needed))
2207 {
2208 current_frame_info.n_local_regs = regno + 1;
2209 return LOC_REG (0) + regno;
2210 }
2211 }
2212
2213 /* Failed to find a general register to spill to. Must use stack. */
2214 return 0;
2215 }
2216
2217 /* In order to make for nice schedules, we try to allocate every temporary
2218 to a different register. We must of course stay away from call-saved,
2219 fixed, and global registers. We must also stay away from registers
2220 allocated in current_frame_info.gr_used_mask, since those include regs
2221 used all through the prologue.
2222
2223 Any register allocated here must be used immediately. The idea is to
2224 aid scheduling, not to solve data flow problems. */
2225
2226 static int last_scratch_gr_reg;
2227
2228 static int
2229 next_scratch_gr_reg (void)
2230 {
2231 int i, regno;
2232
2233 for (i = 0; i < 32; ++i)
2234 {
2235 regno = (last_scratch_gr_reg + i + 1) & 31;
2236 if (call_used_regs[regno]
2237 && ! fixed_regs[regno]
2238 && ! global_regs[regno]
2239 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2240 {
2241 last_scratch_gr_reg = regno;
2242 return regno;
2243 }
2244 }
2245
2246 /* There must be _something_ available. */
2247 gcc_unreachable ();
2248 }
2249
2250 /* Helper function for ia64_compute_frame_size, called through
2251 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2252
2253 static void
2254 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2255 {
2256 unsigned int regno = REGNO (reg);
2257 if (regno < 32)
2258 {
2259 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2260 for (i = 0; i < n; ++i)
2261 current_frame_info.gr_used_mask |= 1 << (regno + i);
2262 }
2263 }
2264
2265 /* Returns the number of bytes offset between the frame pointer and the stack
2266 pointer for the current function. SIZE is the number of bytes of space
2267 needed for local variables. */
2268
2269 static void
2270 ia64_compute_frame_size (HOST_WIDE_INT size)
2271 {
2272 HOST_WIDE_INT total_size;
2273 HOST_WIDE_INT spill_size = 0;
2274 HOST_WIDE_INT extra_spill_size = 0;
2275 HOST_WIDE_INT pretend_args_size;
2276 HARD_REG_SET mask;
2277 int n_spilled = 0;
2278 int spilled_gr_p = 0;
2279 int spilled_fr_p = 0;
2280 unsigned int regno;
2281 int i;
2282
2283 if (current_frame_info.initialized)
2284 return;
2285
2286 memset (&current_frame_info, 0, sizeof current_frame_info);
2287 CLEAR_HARD_REG_SET (mask);
2288
2289 /* Don't allocate scratches to the return register. */
2290 diddle_return_value (mark_reg_gr_used_mask, NULL);
2291
2292 /* Don't allocate scratches to the EH scratch registers. */
2293 if (cfun->machine->ia64_eh_epilogue_sp)
2294 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2295 if (cfun->machine->ia64_eh_epilogue_bsp)
2296 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2297
2298 /* Find the size of the register stack frame. We have only 80 local
2299 registers, because we reserve 8 for the inputs and 8 for the
2300 outputs. */
2301
2302 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2303 since we'll be adjusting that down later. */
2304 regno = LOC_REG (78) + ! frame_pointer_needed;
2305 for (; regno >= LOC_REG (0); regno--)
2306 if (regs_ever_live[regno])
2307 break;
2308 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2309
2310 /* For functions marked with the syscall_linkage attribute, we must mark
2311 all eight input registers as in use, so that locals aren't visible to
2312 the caller. */
2313
2314 if (cfun->machine->n_varargs > 0
2315 || lookup_attribute ("syscall_linkage",
2316 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2317 current_frame_info.n_input_regs = 8;
2318 else
2319 {
2320 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2321 if (regs_ever_live[regno])
2322 break;
2323 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2324 }
2325
2326 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2327 if (regs_ever_live[regno])
2328 break;
2329 i = regno - OUT_REG (0) + 1;
2330
2331 #ifndef PROFILE_HOOK
2332 /* When -p profiling, we need one output register for the mcount argument.
2333 Likewise for -a profiling for the bb_init_func argument. For -ax
2334 profiling, we need two output registers for the two bb_init_trace_func
2335 arguments. */
2336 if (current_function_profile)
2337 i = MAX (i, 1);
2338 #endif
2339 current_frame_info.n_output_regs = i;
2340
2341 /* ??? No rotating register support yet. */
2342 current_frame_info.n_rotate_regs = 0;
2343
2344 /* Discover which registers need spilling, and how much room that
2345 will take. Begin with floating point and general registers,
2346 which will always wind up on the stack. */
2347
2348 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2349 if (regs_ever_live[regno] && ! call_used_regs[regno])
2350 {
2351 SET_HARD_REG_BIT (mask, regno);
2352 spill_size += 16;
2353 n_spilled += 1;
2354 spilled_fr_p = 1;
2355 }
2356
2357 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2358 if (regs_ever_live[regno] && ! call_used_regs[regno])
2359 {
2360 SET_HARD_REG_BIT (mask, regno);
2361 spill_size += 8;
2362 n_spilled += 1;
2363 spilled_gr_p = 1;
2364 }
2365
2366 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2367 if (regs_ever_live[regno] && ! call_used_regs[regno])
2368 {
2369 SET_HARD_REG_BIT (mask, regno);
2370 spill_size += 8;
2371 n_spilled += 1;
2372 }
2373
2374 /* Now come all special registers that might get saved in other
2375 general registers. */
2376
2377 if (frame_pointer_needed)
2378 {
2379 current_frame_info.reg_fp = find_gr_spill (1);
2380 /* If we did not get a register, then we take LOC79. This is guaranteed
2381 to be free, even if regs_ever_live is already set, because this is
2382 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2383 as we don't count loc79 above. */
2384 if (current_frame_info.reg_fp == 0)
2385 {
2386 current_frame_info.reg_fp = LOC_REG (79);
2387 current_frame_info.n_local_regs++;
2388 }
2389 }
2390
2391 if (! current_function_is_leaf)
2392 {
2393 /* Emit a save of BR0 if we call other functions. Do this even
2394 if this function doesn't return, as EH depends on this to be
2395 able to unwind the stack. */
2396 SET_HARD_REG_BIT (mask, BR_REG (0));
2397
2398 current_frame_info.reg_save_b0 = find_gr_spill (1);
2399 if (current_frame_info.reg_save_b0 == 0)
2400 {
2401 spill_size += 8;
2402 n_spilled += 1;
2403 }
2404
2405 /* Similarly for ar.pfs. */
2406 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2407 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2408 if (current_frame_info.reg_save_ar_pfs == 0)
2409 {
2410 extra_spill_size += 8;
2411 n_spilled += 1;
2412 }
2413
2414 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2415 registers are clobbered, so we fall back to the stack. */
2416 current_frame_info.reg_save_gp
2417 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2418 if (current_frame_info.reg_save_gp == 0)
2419 {
2420 SET_HARD_REG_BIT (mask, GR_REG (1));
2421 spill_size += 8;
2422 n_spilled += 1;
2423 }
2424 }
2425 else
2426 {
2427 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2428 {
2429 SET_HARD_REG_BIT (mask, BR_REG (0));
2430 spill_size += 8;
2431 n_spilled += 1;
2432 }
2433
2434 if (regs_ever_live[AR_PFS_REGNUM])
2435 {
2436 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2437 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2438 if (current_frame_info.reg_save_ar_pfs == 0)
2439 {
2440 extra_spill_size += 8;
2441 n_spilled += 1;
2442 }
2443 }
2444 }
2445
2446 /* Unwind descriptor hackery: things are most efficient if we allocate
2447 consecutive GR save registers for RP, PFS, FP in that order. However,
2448 it is absolutely critical that FP get the only hard register that's
2449 guaranteed to be free, so we allocated it first. If all three did
2450 happen to be allocated hard regs, and are consecutive, rearrange them
2451 into the preferred order now. */
2452 if (current_frame_info.reg_fp != 0
2453 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2454 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2455 {
2456 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2457 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2458 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2459 }
2460
2461 /* See if we need to store the predicate register block. */
2462 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2463 if (regs_ever_live[regno] && ! call_used_regs[regno])
2464 break;
2465 if (regno <= PR_REG (63))
2466 {
2467 SET_HARD_REG_BIT (mask, PR_REG (0));
2468 current_frame_info.reg_save_pr = find_gr_spill (1);
2469 if (current_frame_info.reg_save_pr == 0)
2470 {
2471 extra_spill_size += 8;
2472 n_spilled += 1;
2473 }
2474
2475 /* ??? Mark them all as used so that register renaming and such
2476 are free to use them. */
2477 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2478 regs_ever_live[regno] = 1;
2479 }
2480
2481 /* If we're forced to use st8.spill, we're forced to save and restore
2482 ar.unat as well. The check for existing liveness allows inline asm
2483 to touch ar.unat. */
2484 if (spilled_gr_p || cfun->machine->n_varargs
2485 || regs_ever_live[AR_UNAT_REGNUM])
2486 {
2487 regs_ever_live[AR_UNAT_REGNUM] = 1;
2488 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2489 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2490 if (current_frame_info.reg_save_ar_unat == 0)
2491 {
2492 extra_spill_size += 8;
2493 n_spilled += 1;
2494 }
2495 }
2496
2497 if (regs_ever_live[AR_LC_REGNUM])
2498 {
2499 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2500 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2501 if (current_frame_info.reg_save_ar_lc == 0)
2502 {
2503 extra_spill_size += 8;
2504 n_spilled += 1;
2505 }
2506 }
2507
2508 /* If we have an odd number of words of pretend arguments written to
2509 the stack, then the FR save area will be unaligned. We round the
2510 size of this area up to keep things 16 byte aligned. */
2511 if (spilled_fr_p)
2512 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2513 else
2514 pretend_args_size = current_function_pretend_args_size;
2515
2516 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2517 + current_function_outgoing_args_size);
2518 total_size = IA64_STACK_ALIGN (total_size);
2519
2520 /* We always use the 16-byte scratch area provided by the caller, but
2521 if we are a leaf function, there's no one to which we need to provide
2522 a scratch area. */
2523 if (current_function_is_leaf)
2524 total_size = MAX (0, total_size - 16);
2525
2526 current_frame_info.total_size = total_size;
2527 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2528 current_frame_info.spill_size = spill_size;
2529 current_frame_info.extra_spill_size = extra_spill_size;
2530 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2531 current_frame_info.n_spilled = n_spilled;
2532 current_frame_info.initialized = reload_completed;
2533 }
2534
2535 /* Compute the initial difference between the specified pair of registers. */
2536
2537 HOST_WIDE_INT
2538 ia64_initial_elimination_offset (int from, int to)
2539 {
2540 HOST_WIDE_INT offset;
2541
2542 ia64_compute_frame_size (get_frame_size ());
2543 switch (from)
2544 {
2545 case FRAME_POINTER_REGNUM:
2546 switch (to)
2547 {
2548 case HARD_FRAME_POINTER_REGNUM:
2549 if (current_function_is_leaf)
2550 offset = -current_frame_info.total_size;
2551 else
2552 offset = -(current_frame_info.total_size
2553 - current_function_outgoing_args_size - 16);
2554 break;
2555
2556 case STACK_POINTER_REGNUM:
2557 if (current_function_is_leaf)
2558 offset = 0;
2559 else
2560 offset = 16 + current_function_outgoing_args_size;
2561 break;
2562
2563 default:
2564 gcc_unreachable ();
2565 }
2566 break;
2567
2568 case ARG_POINTER_REGNUM:
2569 /* Arguments start above the 16 byte save area, unless stdarg
2570 in which case we store through the 16 byte save area. */
2571 switch (to)
2572 {
2573 case HARD_FRAME_POINTER_REGNUM:
2574 offset = 16 - current_function_pretend_args_size;
2575 break;
2576
2577 case STACK_POINTER_REGNUM:
2578 offset = (current_frame_info.total_size
2579 + 16 - current_function_pretend_args_size);
2580 break;
2581
2582 default:
2583 gcc_unreachable ();
2584 }
2585 break;
2586
2587 default:
2588 gcc_unreachable ();
2589 }
2590
2591 return offset;
2592 }
2593
2594 /* If there are more than a trivial number of register spills, we use
2595 two interleaved iterators so that we can get two memory references
2596 per insn group.
2597
2598 In order to simplify things in the prologue and epilogue expanders,
2599 we use helper functions to fix up the memory references after the
2600 fact with the appropriate offsets to a POST_MODIFY memory mode.
2601 The following data structure tracks the state of the two iterators
2602 while insns are being emitted. */
2603
2604 struct spill_fill_data
2605 {
2606 rtx init_after; /* point at which to emit initializations */
2607 rtx init_reg[2]; /* initial base register */
2608 rtx iter_reg[2]; /* the iterator registers */
2609 rtx *prev_addr[2]; /* address of last memory use */
2610 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2611 HOST_WIDE_INT prev_off[2]; /* last offset */
2612 int n_iter; /* number of iterators in use */
2613 int next_iter; /* next iterator to use */
2614 unsigned int save_gr_used_mask;
2615 };
2616
2617 static struct spill_fill_data spill_fill_data;
2618
2619 static void
2620 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2621 {
2622 int i;
2623
2624 spill_fill_data.init_after = get_last_insn ();
2625 spill_fill_data.init_reg[0] = init_reg;
2626 spill_fill_data.init_reg[1] = init_reg;
2627 spill_fill_data.prev_addr[0] = NULL;
2628 spill_fill_data.prev_addr[1] = NULL;
2629 spill_fill_data.prev_insn[0] = NULL;
2630 spill_fill_data.prev_insn[1] = NULL;
2631 spill_fill_data.prev_off[0] = cfa_off;
2632 spill_fill_data.prev_off[1] = cfa_off;
2633 spill_fill_data.next_iter = 0;
2634 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2635
2636 spill_fill_data.n_iter = 1 + (n_spills > 2);
2637 for (i = 0; i < spill_fill_data.n_iter; ++i)
2638 {
2639 int regno = next_scratch_gr_reg ();
2640 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2641 current_frame_info.gr_used_mask |= 1 << regno;
2642 }
2643 }
2644
2645 static void
2646 finish_spill_pointers (void)
2647 {
2648 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2649 }
2650
2651 static rtx
2652 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2653 {
2654 int iter = spill_fill_data.next_iter;
2655 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2656 rtx disp_rtx = GEN_INT (disp);
2657 rtx mem;
2658
2659 if (spill_fill_data.prev_addr[iter])
2660 {
2661 if (CONST_OK_FOR_N (disp))
2662 {
2663 *spill_fill_data.prev_addr[iter]
2664 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2665 gen_rtx_PLUS (DImode,
2666 spill_fill_data.iter_reg[iter],
2667 disp_rtx));
2668 REG_NOTES (spill_fill_data.prev_insn[iter])
2669 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2670 REG_NOTES (spill_fill_data.prev_insn[iter]));
2671 }
2672 else
2673 {
2674 /* ??? Could use register post_modify for loads. */
2675 if (! CONST_OK_FOR_I (disp))
2676 {
2677 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2678 emit_move_insn (tmp, disp_rtx);
2679 disp_rtx = tmp;
2680 }
2681 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2682 spill_fill_data.iter_reg[iter], disp_rtx));
2683 }
2684 }
2685 /* Micro-optimization: if we've created a frame pointer, it's at
2686 CFA 0, which may allow the real iterator to be initialized lower,
2687 slightly increasing parallelism. Also, if there are few saves
2688 it may eliminate the iterator entirely. */
2689 else if (disp == 0
2690 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2691 && frame_pointer_needed)
2692 {
2693 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2694 set_mem_alias_set (mem, get_varargs_alias_set ());
2695 return mem;
2696 }
2697 else
2698 {
2699 rtx seq, insn;
2700
2701 if (disp == 0)
2702 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2703 spill_fill_data.init_reg[iter]);
2704 else
2705 {
2706 start_sequence ();
2707
2708 if (! CONST_OK_FOR_I (disp))
2709 {
2710 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2711 emit_move_insn (tmp, disp_rtx);
2712 disp_rtx = tmp;
2713 }
2714
2715 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2716 spill_fill_data.init_reg[iter],
2717 disp_rtx));
2718
2719 seq = get_insns ();
2720 end_sequence ();
2721 }
2722
2723 /* Careful for being the first insn in a sequence. */
2724 if (spill_fill_data.init_after)
2725 insn = emit_insn_after (seq, spill_fill_data.init_after);
2726 else
2727 {
2728 rtx first = get_insns ();
2729 if (first)
2730 insn = emit_insn_before (seq, first);
2731 else
2732 insn = emit_insn (seq);
2733 }
2734 spill_fill_data.init_after = insn;
2735
2736 /* If DISP is 0, we may or may not have a further adjustment
2737 afterward. If we do, then the load/store insn may be modified
2738 to be a post-modify. If we don't, then this copy may be
2739 eliminated by copyprop_hardreg_forward, which makes this
2740 insn garbage, which runs afoul of the sanity check in
2741 propagate_one_insn. So mark this insn as legal to delete. */
2742 if (disp == 0)
2743 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2744 REG_NOTES (insn));
2745 }
2746
2747 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2748
2749 /* ??? Not all of the spills are for varargs, but some of them are.
2750 The rest of the spills belong in an alias set of their own. But
2751 it doesn't actually hurt to include them here. */
2752 set_mem_alias_set (mem, get_varargs_alias_set ());
2753
2754 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2755 spill_fill_data.prev_off[iter] = cfa_off;
2756
2757 if (++iter >= spill_fill_data.n_iter)
2758 iter = 0;
2759 spill_fill_data.next_iter = iter;
2760
2761 return mem;
2762 }
2763
2764 static void
2765 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2766 rtx frame_reg)
2767 {
2768 int iter = spill_fill_data.next_iter;
2769 rtx mem, insn;
2770
2771 mem = spill_restore_mem (reg, cfa_off);
2772 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2773 spill_fill_data.prev_insn[iter] = insn;
2774
2775 if (frame_reg)
2776 {
2777 rtx base;
2778 HOST_WIDE_INT off;
2779
2780 RTX_FRAME_RELATED_P (insn) = 1;
2781
2782 /* Don't even pretend that the unwind code can intuit its way
2783 through a pair of interleaved post_modify iterators. Just
2784 provide the correct answer. */
2785
2786 if (frame_pointer_needed)
2787 {
2788 base = hard_frame_pointer_rtx;
2789 off = - cfa_off;
2790 }
2791 else
2792 {
2793 base = stack_pointer_rtx;
2794 off = current_frame_info.total_size - cfa_off;
2795 }
2796
2797 REG_NOTES (insn)
2798 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2799 gen_rtx_SET (VOIDmode,
2800 gen_rtx_MEM (GET_MODE (reg),
2801 plus_constant (base, off)),
2802 frame_reg),
2803 REG_NOTES (insn));
2804 }
2805 }
2806
2807 static void
2808 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2809 {
2810 int iter = spill_fill_data.next_iter;
2811 rtx insn;
2812
2813 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2814 GEN_INT (cfa_off)));
2815 spill_fill_data.prev_insn[iter] = insn;
2816 }
2817
2818 /* Wrapper functions that discards the CONST_INT spill offset. These
2819 exist so that we can give gr_spill/gr_fill the offset they need and
2820 use a consistent function interface. */
2821
2822 static rtx
2823 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2824 {
2825 return gen_movdi (dest, src);
2826 }
2827
2828 static rtx
2829 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2830 {
2831 return gen_fr_spill (dest, src);
2832 }
2833
2834 static rtx
2835 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2836 {
2837 return gen_fr_restore (dest, src);
2838 }
2839
2840 /* Called after register allocation to add any instructions needed for the
2841 prologue. Using a prologue insn is favored compared to putting all of the
2842 instructions in output_function_prologue(), since it allows the scheduler
2843 to intermix instructions with the saves of the caller saved registers. In
2844 some cases, it might be necessary to emit a barrier instruction as the last
2845 insn to prevent such scheduling.
2846
2847 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2848 so that the debug info generation code can handle them properly.
2849
2850 The register save area is layed out like so:
2851 cfa+16
2852 [ varargs spill area ]
2853 [ fr register spill area ]
2854 [ br register spill area ]
2855 [ ar register spill area ]
2856 [ pr register spill area ]
2857 [ gr register spill area ] */
2858
2859 /* ??? Get inefficient code when the frame size is larger than can fit in an
2860 adds instruction. */
2861
2862 void
2863 ia64_expand_prologue (void)
2864 {
2865 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2866 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2867 rtx reg, alt_reg;
2868
2869 ia64_compute_frame_size (get_frame_size ());
2870 last_scratch_gr_reg = 15;
2871
2872 /* If there is no epilogue, then we don't need some prologue insns.
2873 We need to avoid emitting the dead prologue insns, because flow
2874 will complain about them. */
2875 if (optimize)
2876 {
2877 edge e;
2878 edge_iterator ei;
2879
2880 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2881 if ((e->flags & EDGE_FAKE) == 0
2882 && (e->flags & EDGE_FALLTHRU) != 0)
2883 break;
2884 epilogue_p = (e != NULL);
2885 }
2886 else
2887 epilogue_p = 1;
2888
2889 /* Set the local, input, and output register names. We need to do this
2890 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2891 half. If we use in/loc/out register names, then we get assembler errors
2892 in crtn.S because there is no alloc insn or regstk directive in there. */
2893 if (! TARGET_REG_NAMES)
2894 {
2895 int inputs = current_frame_info.n_input_regs;
2896 int locals = current_frame_info.n_local_regs;
2897 int outputs = current_frame_info.n_output_regs;
2898
2899 for (i = 0; i < inputs; i++)
2900 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2901 for (i = 0; i < locals; i++)
2902 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2903 for (i = 0; i < outputs; i++)
2904 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2905 }
2906
2907 /* Set the frame pointer register name. The regnum is logically loc79,
2908 but of course we'll not have allocated that many locals. Rather than
2909 worrying about renumbering the existing rtxs, we adjust the name. */
2910 /* ??? This code means that we can never use one local register when
2911 there is a frame pointer. loc79 gets wasted in this case, as it is
2912 renamed to a register that will never be used. See also the try_locals
2913 code in find_gr_spill. */
2914 if (current_frame_info.reg_fp)
2915 {
2916 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2917 reg_names[HARD_FRAME_POINTER_REGNUM]
2918 = reg_names[current_frame_info.reg_fp];
2919 reg_names[current_frame_info.reg_fp] = tmp;
2920 }
2921
2922 /* We don't need an alloc instruction if we've used no outputs or locals. */
2923 if (current_frame_info.n_local_regs == 0
2924 && current_frame_info.n_output_regs == 0
2925 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2926 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2927 {
2928 /* If there is no alloc, but there are input registers used, then we
2929 need a .regstk directive. */
2930 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2931 ar_pfs_save_reg = NULL_RTX;
2932 }
2933 else
2934 {
2935 current_frame_info.need_regstk = 0;
2936
2937 if (current_frame_info.reg_save_ar_pfs)
2938 regno = current_frame_info.reg_save_ar_pfs;
2939 else
2940 regno = next_scratch_gr_reg ();
2941 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2942
2943 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2944 GEN_INT (current_frame_info.n_input_regs),
2945 GEN_INT (current_frame_info.n_local_regs),
2946 GEN_INT (current_frame_info.n_output_regs),
2947 GEN_INT (current_frame_info.n_rotate_regs)));
2948 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2949 }
2950
2951 /* Set up frame pointer, stack pointer, and spill iterators. */
2952
2953 n_varargs = cfun->machine->n_varargs;
2954 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2955 stack_pointer_rtx, 0);
2956
2957 if (frame_pointer_needed)
2958 {
2959 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2960 RTX_FRAME_RELATED_P (insn) = 1;
2961 }
2962
2963 if (current_frame_info.total_size != 0)
2964 {
2965 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2966 rtx offset;
2967
2968 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2969 offset = frame_size_rtx;
2970 else
2971 {
2972 regno = next_scratch_gr_reg ();
2973 offset = gen_rtx_REG (DImode, regno);
2974 emit_move_insn (offset, frame_size_rtx);
2975 }
2976
2977 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2978 stack_pointer_rtx, offset));
2979
2980 if (! frame_pointer_needed)
2981 {
2982 RTX_FRAME_RELATED_P (insn) = 1;
2983 if (GET_CODE (offset) != CONST_INT)
2984 {
2985 REG_NOTES (insn)
2986 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2987 gen_rtx_SET (VOIDmode,
2988 stack_pointer_rtx,
2989 gen_rtx_PLUS (DImode,
2990 stack_pointer_rtx,
2991 frame_size_rtx)),
2992 REG_NOTES (insn));
2993 }
2994 }
2995
2996 /* ??? At this point we must generate a magic insn that appears to
2997 modify the stack pointer, the frame pointer, and all spill
2998 iterators. This would allow the most scheduling freedom. For
2999 now, just hard stop. */
3000 emit_insn (gen_blockage ());
3001 }
3002
3003 /* Must copy out ar.unat before doing any integer spills. */
3004 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3005 {
3006 if (current_frame_info.reg_save_ar_unat)
3007 ar_unat_save_reg
3008 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3009 else
3010 {
3011 alt_regno = next_scratch_gr_reg ();
3012 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3013 current_frame_info.gr_used_mask |= 1 << alt_regno;
3014 }
3015
3016 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3017 insn = emit_move_insn (ar_unat_save_reg, reg);
3018 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
3019
3020 /* Even if we're not going to generate an epilogue, we still
3021 need to save the register so that EH works. */
3022 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
3023 emit_insn (gen_prologue_use (ar_unat_save_reg));
3024 }
3025 else
3026 ar_unat_save_reg = NULL_RTX;
3027
3028 /* Spill all varargs registers. Do this before spilling any GR registers,
3029 since we want the UNAT bits for the GR registers to override the UNAT
3030 bits from varargs, which we don't care about. */
3031
3032 cfa_off = -16;
3033 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3034 {
3035 reg = gen_rtx_REG (DImode, regno);
3036 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3037 }
3038
3039 /* Locate the bottom of the register save area. */
3040 cfa_off = (current_frame_info.spill_cfa_off
3041 + current_frame_info.spill_size
3042 + current_frame_info.extra_spill_size);
3043
3044 /* Save the predicate register block either in a register or in memory. */
3045 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3046 {
3047 reg = gen_rtx_REG (DImode, PR_REG (0));
3048 if (current_frame_info.reg_save_pr != 0)
3049 {
3050 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3051 insn = emit_move_insn (alt_reg, reg);
3052
3053 /* ??? Denote pr spill/fill by a DImode move that modifies all
3054 64 hard registers. */
3055 RTX_FRAME_RELATED_P (insn) = 1;
3056 REG_NOTES (insn)
3057 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3058 gen_rtx_SET (VOIDmode, alt_reg, reg),
3059 REG_NOTES (insn));
3060
3061 /* Even if we're not going to generate an epilogue, we still
3062 need to save the register so that EH works. */
3063 if (! epilogue_p)
3064 emit_insn (gen_prologue_use (alt_reg));
3065 }
3066 else
3067 {
3068 alt_regno = next_scratch_gr_reg ();
3069 alt_reg = gen_rtx_REG (DImode, alt_regno);
3070 insn = emit_move_insn (alt_reg, reg);
3071 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3072 cfa_off -= 8;
3073 }
3074 }
3075
3076 /* Handle AR regs in numerical order. All of them get special handling. */
3077 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3078 && current_frame_info.reg_save_ar_unat == 0)
3079 {
3080 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3081 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3082 cfa_off -= 8;
3083 }
3084
3085 /* The alloc insn already copied ar.pfs into a general register. The
3086 only thing we have to do now is copy that register to a stack slot
3087 if we'd not allocated a local register for the job. */
3088 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3089 && current_frame_info.reg_save_ar_pfs == 0)
3090 {
3091 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3092 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3093 cfa_off -= 8;
3094 }
3095
3096 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3097 {
3098 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3099 if (current_frame_info.reg_save_ar_lc != 0)
3100 {
3101 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3102 insn = emit_move_insn (alt_reg, reg);
3103 RTX_FRAME_RELATED_P (insn) = 1;
3104
3105 /* Even if we're not going to generate an epilogue, we still
3106 need to save the register so that EH works. */
3107 if (! epilogue_p)
3108 emit_insn (gen_prologue_use (alt_reg));
3109 }
3110 else
3111 {
3112 alt_regno = next_scratch_gr_reg ();
3113 alt_reg = gen_rtx_REG (DImode, alt_regno);
3114 emit_move_insn (alt_reg, reg);
3115 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3116 cfa_off -= 8;
3117 }
3118 }
3119
3120 if (current_frame_info.reg_save_gp)
3121 {
3122 insn = emit_move_insn (gen_rtx_REG (DImode,
3123 current_frame_info.reg_save_gp),
3124 pic_offset_table_rtx);
3125 /* We don't know for sure yet if this is actually needed, since
3126 we've not split the PIC call patterns. If all of the calls
3127 are indirect, and not followed by any uses of the gp, then
3128 this save is dead. Allow it to go away. */
3129 REG_NOTES (insn)
3130 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
3131 }
3132
3133 /* We should now be at the base of the gr/br/fr spill area. */
3134 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3135 + current_frame_info.spill_size));
3136
3137 /* Spill all general registers. */
3138 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3139 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3140 {
3141 reg = gen_rtx_REG (DImode, regno);
3142 do_spill (gen_gr_spill, reg, cfa_off, reg);
3143 cfa_off -= 8;
3144 }
3145
3146 /* Handle BR0 specially -- it may be getting stored permanently in
3147 some GR register. */
3148 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3149 {
3150 reg = gen_rtx_REG (DImode, BR_REG (0));
3151 if (current_frame_info.reg_save_b0 != 0)
3152 {
3153 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3154 insn = emit_move_insn (alt_reg, reg);
3155 RTX_FRAME_RELATED_P (insn) = 1;
3156
3157 /* Even if we're not going to generate an epilogue, we still
3158 need to save the register so that EH works. */
3159 if (! epilogue_p)
3160 emit_insn (gen_prologue_use (alt_reg));
3161 }
3162 else
3163 {
3164 alt_regno = next_scratch_gr_reg ();
3165 alt_reg = gen_rtx_REG (DImode, alt_regno);
3166 emit_move_insn (alt_reg, reg);
3167 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3168 cfa_off -= 8;
3169 }
3170 }
3171
3172 /* Spill the rest of the BR registers. */
3173 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3174 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3175 {
3176 alt_regno = next_scratch_gr_reg ();
3177 alt_reg = gen_rtx_REG (DImode, alt_regno);
3178 reg = gen_rtx_REG (DImode, regno);
3179 emit_move_insn (alt_reg, reg);
3180 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3181 cfa_off -= 8;
3182 }
3183
3184 /* Align the frame and spill all FR registers. */
3185 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3186 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3187 {
3188 gcc_assert (!(cfa_off & 15));
3189 reg = gen_rtx_REG (XFmode, regno);
3190 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3191 cfa_off -= 16;
3192 }
3193
3194 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3195
3196 finish_spill_pointers ();
3197 }
3198
3199 /* Called after register allocation to add any instructions needed for the
3200 epilogue. Using an epilogue insn is favored compared to putting all of the
3201 instructions in output_function_prologue(), since it allows the scheduler
3202 to intermix instructions with the saves of the caller saved registers. In
3203 some cases, it might be necessary to emit a barrier instruction as the last
3204 insn to prevent such scheduling. */
3205
3206 void
3207 ia64_expand_epilogue (int sibcall_p)
3208 {
3209 rtx insn, reg, alt_reg, ar_unat_save_reg;
3210 int regno, alt_regno, cfa_off;
3211
3212 ia64_compute_frame_size (get_frame_size ());
3213
3214 /* If there is a frame pointer, then we use it instead of the stack
3215 pointer, so that the stack pointer does not need to be valid when
3216 the epilogue starts. See EXIT_IGNORE_STACK. */
3217 if (frame_pointer_needed)
3218 setup_spill_pointers (current_frame_info.n_spilled,
3219 hard_frame_pointer_rtx, 0);
3220 else
3221 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3222 current_frame_info.total_size);
3223
3224 if (current_frame_info.total_size != 0)
3225 {
3226 /* ??? At this point we must generate a magic insn that appears to
3227 modify the spill iterators and the frame pointer. This would
3228 allow the most scheduling freedom. For now, just hard stop. */
3229 emit_insn (gen_blockage ());
3230 }
3231
3232 /* Locate the bottom of the register save area. */
3233 cfa_off = (current_frame_info.spill_cfa_off
3234 + current_frame_info.spill_size
3235 + current_frame_info.extra_spill_size);
3236
3237 /* Restore the predicate registers. */
3238 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3239 {
3240 if (current_frame_info.reg_save_pr != 0)
3241 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3242 else
3243 {
3244 alt_regno = next_scratch_gr_reg ();
3245 alt_reg = gen_rtx_REG (DImode, alt_regno);
3246 do_restore (gen_movdi_x, alt_reg, cfa_off);
3247 cfa_off -= 8;
3248 }
3249 reg = gen_rtx_REG (DImode, PR_REG (0));
3250 emit_move_insn (reg, alt_reg);
3251 }
3252
3253 /* Restore the application registers. */
3254
3255 /* Load the saved unat from the stack, but do not restore it until
3256 after the GRs have been restored. */
3257 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3258 {
3259 if (current_frame_info.reg_save_ar_unat != 0)
3260 ar_unat_save_reg
3261 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3262 else
3263 {
3264 alt_regno = next_scratch_gr_reg ();
3265 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3266 current_frame_info.gr_used_mask |= 1 << alt_regno;
3267 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3268 cfa_off -= 8;
3269 }
3270 }
3271 else
3272 ar_unat_save_reg = NULL_RTX;
3273
3274 if (current_frame_info.reg_save_ar_pfs != 0)
3275 {
3276 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3277 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3278 emit_move_insn (reg, alt_reg);
3279 }
3280 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3281 {
3282 alt_regno = next_scratch_gr_reg ();
3283 alt_reg = gen_rtx_REG (DImode, alt_regno);
3284 do_restore (gen_movdi_x, alt_reg, cfa_off);
3285 cfa_off -= 8;
3286 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3287 emit_move_insn (reg, alt_reg);
3288 }
3289
3290 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3291 {
3292 if (current_frame_info.reg_save_ar_lc != 0)
3293 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3294 else
3295 {
3296 alt_regno = next_scratch_gr_reg ();
3297 alt_reg = gen_rtx_REG (DImode, alt_regno);
3298 do_restore (gen_movdi_x, alt_reg, cfa_off);
3299 cfa_off -= 8;
3300 }
3301 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3302 emit_move_insn (reg, alt_reg);
3303 }
3304
3305 /* We should now be at the base of the gr/br/fr spill area. */
3306 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3307 + current_frame_info.spill_size));
3308
3309 /* The GP may be stored on the stack in the prologue, but it's
3310 never restored in the epilogue. Skip the stack slot. */
3311 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3312 cfa_off -= 8;
3313
3314 /* Restore all general registers. */
3315 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3316 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3317 {
3318 reg = gen_rtx_REG (DImode, regno);
3319 do_restore (gen_gr_restore, reg, cfa_off);
3320 cfa_off -= 8;
3321 }
3322
3323 /* Restore the branch registers. Handle B0 specially, as it may
3324 have gotten stored in some GR register. */
3325 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3326 {
3327 if (current_frame_info.reg_save_b0 != 0)
3328 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3329 else
3330 {
3331 alt_regno = next_scratch_gr_reg ();
3332 alt_reg = gen_rtx_REG (DImode, alt_regno);
3333 do_restore (gen_movdi_x, alt_reg, cfa_off);
3334 cfa_off -= 8;
3335 }
3336 reg = gen_rtx_REG (DImode, BR_REG (0));
3337 emit_move_insn (reg, alt_reg);
3338 }
3339
3340 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3341 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3342 {
3343 alt_regno = next_scratch_gr_reg ();
3344 alt_reg = gen_rtx_REG (DImode, alt_regno);
3345 do_restore (gen_movdi_x, alt_reg, cfa_off);
3346 cfa_off -= 8;
3347 reg = gen_rtx_REG (DImode, regno);
3348 emit_move_insn (reg, alt_reg);
3349 }
3350
3351 /* Restore floating point registers. */
3352 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3353 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3354 {
3355 gcc_assert (!(cfa_off & 15));
3356 reg = gen_rtx_REG (XFmode, regno);
3357 do_restore (gen_fr_restore_x, reg, cfa_off);
3358 cfa_off -= 16;
3359 }
3360
3361 /* Restore ar.unat for real. */
3362 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3363 {
3364 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3365 emit_move_insn (reg, ar_unat_save_reg);
3366 }
3367
3368 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3369
3370 finish_spill_pointers ();
3371
3372 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3373 {
3374 /* ??? At this point we must generate a magic insn that appears to
3375 modify the spill iterators, the stack pointer, and the frame
3376 pointer. This would allow the most scheduling freedom. For now,
3377 just hard stop. */
3378 emit_insn (gen_blockage ());
3379 }
3380
3381 if (cfun->machine->ia64_eh_epilogue_sp)
3382 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3383 else if (frame_pointer_needed)
3384 {
3385 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3386 RTX_FRAME_RELATED_P (insn) = 1;
3387 }
3388 else if (current_frame_info.total_size)
3389 {
3390 rtx offset, frame_size_rtx;
3391
3392 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3393 if (CONST_OK_FOR_I (current_frame_info.total_size))
3394 offset = frame_size_rtx;
3395 else
3396 {
3397 regno = next_scratch_gr_reg ();
3398 offset = gen_rtx_REG (DImode, regno);
3399 emit_move_insn (offset, frame_size_rtx);
3400 }
3401
3402 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3403 offset));
3404
3405 RTX_FRAME_RELATED_P (insn) = 1;
3406 if (GET_CODE (offset) != CONST_INT)
3407 {
3408 REG_NOTES (insn)
3409 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3410 gen_rtx_SET (VOIDmode,
3411 stack_pointer_rtx,
3412 gen_rtx_PLUS (DImode,
3413 stack_pointer_rtx,
3414 frame_size_rtx)),
3415 REG_NOTES (insn));
3416 }
3417 }
3418
3419 if (cfun->machine->ia64_eh_epilogue_bsp)
3420 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3421
3422 if (! sibcall_p)
3423 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3424 else
3425 {
3426 int fp = GR_REG (2);
3427 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3428 first available call clobbered register. If there was a frame_pointer
3429 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3430 so we have to make sure we're using the string "r2" when emitting
3431 the register name for the assembler. */
3432 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3433 fp = HARD_FRAME_POINTER_REGNUM;
3434
3435 /* We must emit an alloc to force the input registers to become output
3436 registers. Otherwise, if the callee tries to pass its parameters
3437 through to another call without an intervening alloc, then these
3438 values get lost. */
3439 /* ??? We don't need to preserve all input registers. We only need to
3440 preserve those input registers used as arguments to the sibling call.
3441 It is unclear how to compute that number here. */
3442 if (current_frame_info.n_input_regs != 0)
3443 {
3444 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3445 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3446 const0_rtx, const0_rtx,
3447 n_inputs, const0_rtx));
3448 RTX_FRAME_RELATED_P (insn) = 1;
3449 }
3450 }
3451 }
3452
3453 /* Return 1 if br.ret can do all the work required to return from a
3454 function. */
3455
3456 int
3457 ia64_direct_return (void)
3458 {
3459 if (reload_completed && ! frame_pointer_needed)
3460 {
3461 ia64_compute_frame_size (get_frame_size ());
3462
3463 return (current_frame_info.total_size == 0
3464 && current_frame_info.n_spilled == 0
3465 && current_frame_info.reg_save_b0 == 0
3466 && current_frame_info.reg_save_pr == 0
3467 && current_frame_info.reg_save_ar_pfs == 0
3468 && current_frame_info.reg_save_ar_unat == 0
3469 && current_frame_info.reg_save_ar_lc == 0);
3470 }
3471 return 0;
3472 }
3473
3474 /* Return the magic cookie that we use to hold the return address
3475 during early compilation. */
3476
3477 rtx
3478 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3479 {
3480 if (count != 0)
3481 return NULL;
3482 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3483 }
3484
3485 /* Split this value after reload, now that we know where the return
3486 address is saved. */
3487
3488 void
3489 ia64_split_return_addr_rtx (rtx dest)
3490 {
3491 rtx src;
3492
3493 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3494 {
3495 if (current_frame_info.reg_save_b0 != 0)
3496 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3497 else
3498 {
3499 HOST_WIDE_INT off;
3500 unsigned int regno;
3501
3502 /* Compute offset from CFA for BR0. */
3503 /* ??? Must be kept in sync with ia64_expand_prologue. */
3504 off = (current_frame_info.spill_cfa_off
3505 + current_frame_info.spill_size);
3506 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3507 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3508 off -= 8;
3509
3510 /* Convert CFA offset to a register based offset. */
3511 if (frame_pointer_needed)
3512 src = hard_frame_pointer_rtx;
3513 else
3514 {
3515 src = stack_pointer_rtx;
3516 off += current_frame_info.total_size;
3517 }
3518
3519 /* Load address into scratch register. */
3520 if (CONST_OK_FOR_I (off))
3521 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3522 else
3523 {
3524 emit_move_insn (dest, GEN_INT (off));
3525 emit_insn (gen_adddi3 (dest, src, dest));
3526 }
3527
3528 src = gen_rtx_MEM (Pmode, dest);
3529 }
3530 }
3531 else
3532 src = gen_rtx_REG (DImode, BR_REG (0));
3533
3534 emit_move_insn (dest, src);
3535 }
3536
3537 int
3538 ia64_hard_regno_rename_ok (int from, int to)
3539 {
3540 /* Don't clobber any of the registers we reserved for the prologue. */
3541 if (to == current_frame_info.reg_fp
3542 || to == current_frame_info.reg_save_b0
3543 || to == current_frame_info.reg_save_pr
3544 || to == current_frame_info.reg_save_ar_pfs
3545 || to == current_frame_info.reg_save_ar_unat
3546 || to == current_frame_info.reg_save_ar_lc)
3547 return 0;
3548
3549 if (from == current_frame_info.reg_fp
3550 || from == current_frame_info.reg_save_b0
3551 || from == current_frame_info.reg_save_pr
3552 || from == current_frame_info.reg_save_ar_pfs
3553 || from == current_frame_info.reg_save_ar_unat
3554 || from == current_frame_info.reg_save_ar_lc)
3555 return 0;
3556
3557 /* Don't use output registers outside the register frame. */
3558 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3559 return 0;
3560
3561 /* Retain even/oddness on predicate register pairs. */
3562 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3563 return (from & 1) == (to & 1);
3564
3565 return 1;
3566 }
3567
3568 /* Target hook for assembling integer objects. Handle word-sized
3569 aligned objects and detect the cases when @fptr is needed. */
3570
3571 static bool
3572 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3573 {
3574 if (size == POINTER_SIZE / BITS_PER_UNIT
3575 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3576 && GET_CODE (x) == SYMBOL_REF
3577 && SYMBOL_REF_FUNCTION_P (x))
3578 {
3579 static const char * const directive[2][2] = {
3580 /* 64-bit pointer */ /* 32-bit pointer */
3581 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3582 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3583 };
3584 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3585 output_addr_const (asm_out_file, x);
3586 fputs (")\n", asm_out_file);
3587 return true;
3588 }
3589 return default_assemble_integer (x, size, aligned_p);
3590 }
3591
3592 /* Emit the function prologue. */
3593
3594 static void
3595 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3596 {
3597 int mask, grsave, grsave_prev;
3598
3599 if (current_frame_info.need_regstk)
3600 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3601 current_frame_info.n_input_regs,
3602 current_frame_info.n_local_regs,
3603 current_frame_info.n_output_regs,
3604 current_frame_info.n_rotate_regs);
3605
3606 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3607 return;
3608
3609 /* Emit the .prologue directive. */
3610
3611 mask = 0;
3612 grsave = grsave_prev = 0;
3613 if (current_frame_info.reg_save_b0 != 0)
3614 {
3615 mask |= 8;
3616 grsave = grsave_prev = current_frame_info.reg_save_b0;
3617 }
3618 if (current_frame_info.reg_save_ar_pfs != 0
3619 && (grsave_prev == 0
3620 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3621 {
3622 mask |= 4;
3623 if (grsave_prev == 0)
3624 grsave = current_frame_info.reg_save_ar_pfs;
3625 grsave_prev = current_frame_info.reg_save_ar_pfs;
3626 }
3627 if (current_frame_info.reg_fp != 0
3628 && (grsave_prev == 0
3629 || current_frame_info.reg_fp == grsave_prev + 1))
3630 {
3631 mask |= 2;
3632 if (grsave_prev == 0)
3633 grsave = HARD_FRAME_POINTER_REGNUM;
3634 grsave_prev = current_frame_info.reg_fp;
3635 }
3636 if (current_frame_info.reg_save_pr != 0
3637 && (grsave_prev == 0
3638 || current_frame_info.reg_save_pr == grsave_prev + 1))
3639 {
3640 mask |= 1;
3641 if (grsave_prev == 0)
3642 grsave = current_frame_info.reg_save_pr;
3643 }
3644
3645 if (mask && TARGET_GNU_AS)
3646 fprintf (file, "\t.prologue %d, %d\n", mask,
3647 ia64_dbx_register_number (grsave));
3648 else
3649 fputs ("\t.prologue\n", file);
3650
3651 /* Emit a .spill directive, if necessary, to relocate the base of
3652 the register spill area. */
3653 if (current_frame_info.spill_cfa_off != -16)
3654 fprintf (file, "\t.spill %ld\n",
3655 (long) (current_frame_info.spill_cfa_off
3656 + current_frame_info.spill_size));
3657 }
3658
3659 /* Emit the .body directive at the scheduled end of the prologue. */
3660
3661 static void
3662 ia64_output_function_end_prologue (FILE *file)
3663 {
3664 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3665 return;
3666
3667 fputs ("\t.body\n", file);
3668 }
3669
3670 /* Emit the function epilogue. */
3671
3672 static void
3673 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3674 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3675 {
3676 int i;
3677
3678 if (current_frame_info.reg_fp)
3679 {
3680 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3681 reg_names[HARD_FRAME_POINTER_REGNUM]
3682 = reg_names[current_frame_info.reg_fp];
3683 reg_names[current_frame_info.reg_fp] = tmp;
3684 }
3685 if (! TARGET_REG_NAMES)
3686 {
3687 for (i = 0; i < current_frame_info.n_input_regs; i++)
3688 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3689 for (i = 0; i < current_frame_info.n_local_regs; i++)
3690 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3691 for (i = 0; i < current_frame_info.n_output_regs; i++)
3692 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3693 }
3694
3695 current_frame_info.initialized = 0;
3696 }
3697
3698 int
3699 ia64_dbx_register_number (int regno)
3700 {
3701 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3702 from its home at loc79 to something inside the register frame. We
3703 must perform the same renumbering here for the debug info. */
3704 if (current_frame_info.reg_fp)
3705 {
3706 if (regno == HARD_FRAME_POINTER_REGNUM)
3707 regno = current_frame_info.reg_fp;
3708 else if (regno == current_frame_info.reg_fp)
3709 regno = HARD_FRAME_POINTER_REGNUM;
3710 }
3711
3712 if (IN_REGNO_P (regno))
3713 return 32 + regno - IN_REG (0);
3714 else if (LOC_REGNO_P (regno))
3715 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3716 else if (OUT_REGNO_P (regno))
3717 return (32 + current_frame_info.n_input_regs
3718 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3719 else
3720 return regno;
3721 }
3722
3723 void
3724 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3725 {
3726 rtx addr_reg, eight = GEN_INT (8);
3727
3728 /* The Intel assembler requires that the global __ia64_trampoline symbol
3729 be declared explicitly */
3730 if (!TARGET_GNU_AS)
3731 {
3732 static bool declared_ia64_trampoline = false;
3733
3734 if (!declared_ia64_trampoline)
3735 {
3736 declared_ia64_trampoline = true;
3737 (*targetm.asm_out.globalize_label) (asm_out_file,
3738 "__ia64_trampoline");
3739 }
3740 }
3741
3742 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3743 addr = convert_memory_address (Pmode, addr);
3744 fnaddr = convert_memory_address (Pmode, fnaddr);
3745 static_chain = convert_memory_address (Pmode, static_chain);
3746
3747 /* Load up our iterator. */
3748 addr_reg = gen_reg_rtx (Pmode);
3749 emit_move_insn (addr_reg, addr);
3750
3751 /* The first two words are the fake descriptor:
3752 __ia64_trampoline, ADDR+16. */
3753 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3754 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3755 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3756
3757 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3758 copy_to_reg (plus_constant (addr, 16)));
3759 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3760
3761 /* The third word is the target descriptor. */
3762 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3763 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3764
3765 /* The fourth word is the static chain. */
3766 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3767 }
3768 \f
3769 /* Do any needed setup for a variadic function. CUM has not been updated
3770 for the last named argument which has type TYPE and mode MODE.
3771
3772 We generate the actual spill instructions during prologue generation. */
3773
3774 static void
3775 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3776 tree type, int * pretend_size,
3777 int second_time ATTRIBUTE_UNUSED)
3778 {
3779 CUMULATIVE_ARGS next_cum = *cum;
3780
3781 /* Skip the current argument. */
3782 ia64_function_arg_advance (&next_cum, mode, type, 1);
3783
3784 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3785 {
3786 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3787 *pretend_size = n * UNITS_PER_WORD;
3788 cfun->machine->n_varargs = n;
3789 }
3790 }
3791
3792 /* Check whether TYPE is a homogeneous floating point aggregate. If
3793 it is, return the mode of the floating point type that appears
3794 in all leafs. If it is not, return VOIDmode.
3795
3796 An aggregate is a homogeneous floating point aggregate is if all
3797 fields/elements in it have the same floating point type (e.g,
3798 SFmode). 128-bit quad-precision floats are excluded.
3799
3800 Variable sized aggregates should never arrive here, since we should
3801 have already decided to pass them by reference. Top-level zero-sized
3802 aggregates are excluded because our parallels crash the middle-end. */
3803
3804 static enum machine_mode
3805 hfa_element_mode (tree type, bool nested)
3806 {
3807 enum machine_mode element_mode = VOIDmode;
3808 enum machine_mode mode;
3809 enum tree_code code = TREE_CODE (type);
3810 int know_element_mode = 0;
3811 tree t;
3812
3813 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3814 return VOIDmode;
3815
3816 switch (code)
3817 {
3818 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3819 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3820 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3821 case LANG_TYPE: case FUNCTION_TYPE:
3822 return VOIDmode;
3823
3824 /* Fortran complex types are supposed to be HFAs, so we need to handle
3825 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3826 types though. */
3827 case COMPLEX_TYPE:
3828 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3829 && TYPE_MODE (type) != TCmode)
3830 return GET_MODE_INNER (TYPE_MODE (type));
3831 else
3832 return VOIDmode;
3833
3834 case REAL_TYPE:
3835 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3836 mode if this is contained within an aggregate. */
3837 if (nested && TYPE_MODE (type) != TFmode)
3838 return TYPE_MODE (type);
3839 else
3840 return VOIDmode;
3841
3842 case ARRAY_TYPE:
3843 return hfa_element_mode (TREE_TYPE (type), 1);
3844
3845 case RECORD_TYPE:
3846 case UNION_TYPE:
3847 case QUAL_UNION_TYPE:
3848 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3849 {
3850 if (TREE_CODE (t) != FIELD_DECL)
3851 continue;
3852
3853 mode = hfa_element_mode (TREE_TYPE (t), 1);
3854 if (know_element_mode)
3855 {
3856 if (mode != element_mode)
3857 return VOIDmode;
3858 }
3859 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3860 return VOIDmode;
3861 else
3862 {
3863 know_element_mode = 1;
3864 element_mode = mode;
3865 }
3866 }
3867 return element_mode;
3868
3869 default:
3870 /* If we reach here, we probably have some front-end specific type
3871 that the backend doesn't know about. This can happen via the
3872 aggregate_value_p call in init_function_start. All we can do is
3873 ignore unknown tree types. */
3874 return VOIDmode;
3875 }
3876
3877 return VOIDmode;
3878 }
3879
3880 /* Return the number of words required to hold a quantity of TYPE and MODE
3881 when passed as an argument. */
3882 static int
3883 ia64_function_arg_words (tree type, enum machine_mode mode)
3884 {
3885 int words;
3886
3887 if (mode == BLKmode)
3888 words = int_size_in_bytes (type);
3889 else
3890 words = GET_MODE_SIZE (mode);
3891
3892 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3893 }
3894
3895 /* Return the number of registers that should be skipped so the current
3896 argument (described by TYPE and WORDS) will be properly aligned.
3897
3898 Integer and float arguments larger than 8 bytes start at the next
3899 even boundary. Aggregates larger than 8 bytes start at the next
3900 even boundary if the aggregate has 16 byte alignment. Note that
3901 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3902 but are still to be aligned in registers.
3903
3904 ??? The ABI does not specify how to handle aggregates with
3905 alignment from 9 to 15 bytes, or greater than 16. We handle them
3906 all as if they had 16 byte alignment. Such aggregates can occur
3907 only if gcc extensions are used. */
3908 static int
3909 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3910 {
3911 if ((cum->words & 1) == 0)
3912 return 0;
3913
3914 if (type
3915 && TREE_CODE (type) != INTEGER_TYPE
3916 && TREE_CODE (type) != REAL_TYPE)
3917 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3918 else
3919 return words > 1;
3920 }
3921
3922 /* Return rtx for register where argument is passed, or zero if it is passed
3923 on the stack. */
3924 /* ??? 128-bit quad-precision floats are always passed in general
3925 registers. */
3926
3927 rtx
3928 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3929 int named, int incoming)
3930 {
3931 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3932 int words = ia64_function_arg_words (type, mode);
3933 int offset = ia64_function_arg_offset (cum, type, words);
3934 enum machine_mode hfa_mode = VOIDmode;
3935
3936 /* If all argument slots are used, then it must go on the stack. */
3937 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3938 return 0;
3939
3940 /* Check for and handle homogeneous FP aggregates. */
3941 if (type)
3942 hfa_mode = hfa_element_mode (type, 0);
3943
3944 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3945 and unprototyped hfas are passed specially. */
3946 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3947 {
3948 rtx loc[16];
3949 int i = 0;
3950 int fp_regs = cum->fp_regs;
3951 int int_regs = cum->words + offset;
3952 int hfa_size = GET_MODE_SIZE (hfa_mode);
3953 int byte_size;
3954 int args_byte_size;
3955
3956 /* If prototyped, pass it in FR regs then GR regs.
3957 If not prototyped, pass it in both FR and GR regs.
3958
3959 If this is an SFmode aggregate, then it is possible to run out of
3960 FR regs while GR regs are still left. In that case, we pass the
3961 remaining part in the GR regs. */
3962
3963 /* Fill the FP regs. We do this always. We stop if we reach the end
3964 of the argument, the last FP register, or the last argument slot. */
3965
3966 byte_size = ((mode == BLKmode)
3967 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3968 args_byte_size = int_regs * UNITS_PER_WORD;
3969 offset = 0;
3970 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3971 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3972 {
3973 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3974 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3975 + fp_regs)),
3976 GEN_INT (offset));
3977 offset += hfa_size;
3978 args_byte_size += hfa_size;
3979 fp_regs++;
3980 }
3981
3982 /* If no prototype, then the whole thing must go in GR regs. */
3983 if (! cum->prototype)
3984 offset = 0;
3985 /* If this is an SFmode aggregate, then we might have some left over
3986 that needs to go in GR regs. */
3987 else if (byte_size != offset)
3988 int_regs += offset / UNITS_PER_WORD;
3989
3990 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3991
3992 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3993 {
3994 enum machine_mode gr_mode = DImode;
3995 unsigned int gr_size;
3996
3997 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3998 then this goes in a GR reg left adjusted/little endian, right
3999 adjusted/big endian. */
4000 /* ??? Currently this is handled wrong, because 4-byte hunks are
4001 always right adjusted/little endian. */
4002 if (offset & 0x4)
4003 gr_mode = SImode;
4004 /* If we have an even 4 byte hunk because the aggregate is a
4005 multiple of 4 bytes in size, then this goes in a GR reg right
4006 adjusted/little endian. */
4007 else if (byte_size - offset == 4)
4008 gr_mode = SImode;
4009
4010 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4011 gen_rtx_REG (gr_mode, (basereg
4012 + int_regs)),
4013 GEN_INT (offset));
4014
4015 gr_size = GET_MODE_SIZE (gr_mode);
4016 offset += gr_size;
4017 if (gr_size == UNITS_PER_WORD
4018 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4019 int_regs++;
4020 else if (gr_size > UNITS_PER_WORD)
4021 int_regs += gr_size / UNITS_PER_WORD;
4022 }
4023 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4024 }
4025
4026 /* Integral and aggregates go in general registers. If we have run out of
4027 FR registers, then FP values must also go in general registers. This can
4028 happen when we have a SFmode HFA. */
4029 else if (mode == TFmode || mode == TCmode
4030 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4031 {
4032 int byte_size = ((mode == BLKmode)
4033 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4034 if (BYTES_BIG_ENDIAN
4035 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4036 && byte_size < UNITS_PER_WORD
4037 && byte_size > 0)
4038 {
4039 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4040 gen_rtx_REG (DImode,
4041 (basereg + cum->words
4042 + offset)),
4043 const0_rtx);
4044 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4045 }
4046 else
4047 return gen_rtx_REG (mode, basereg + cum->words + offset);
4048
4049 }
4050
4051 /* If there is a prototype, then FP values go in a FR register when
4052 named, and in a GR register when unnamed. */
4053 else if (cum->prototype)
4054 {
4055 if (named)
4056 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4057 /* In big-endian mode, an anonymous SFmode value must be represented
4058 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4059 the value into the high half of the general register. */
4060 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4061 return gen_rtx_PARALLEL (mode,
4062 gen_rtvec (1,
4063 gen_rtx_EXPR_LIST (VOIDmode,
4064 gen_rtx_REG (DImode, basereg + cum->words + offset),
4065 const0_rtx)));
4066 else
4067 return gen_rtx_REG (mode, basereg + cum->words + offset);
4068 }
4069 /* If there is no prototype, then FP values go in both FR and GR
4070 registers. */
4071 else
4072 {
4073 /* See comment above. */
4074 enum machine_mode inner_mode =
4075 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4076
4077 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4078 gen_rtx_REG (mode, (FR_ARG_FIRST
4079 + cum->fp_regs)),
4080 const0_rtx);
4081 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4082 gen_rtx_REG (inner_mode,
4083 (basereg + cum->words
4084 + offset)),
4085 const0_rtx);
4086
4087 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4088 }
4089 }
4090
4091 /* Return number of bytes, at the beginning of the argument, that must be
4092 put in registers. 0 is the argument is entirely in registers or entirely
4093 in memory. */
4094
4095 static int
4096 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4097 tree type, bool named ATTRIBUTE_UNUSED)
4098 {
4099 int words = ia64_function_arg_words (type, mode);
4100 int offset = ia64_function_arg_offset (cum, type, words);
4101
4102 /* If all argument slots are used, then it must go on the stack. */
4103 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4104 return 0;
4105
4106 /* It doesn't matter whether the argument goes in FR or GR regs. If
4107 it fits within the 8 argument slots, then it goes entirely in
4108 registers. If it extends past the last argument slot, then the rest
4109 goes on the stack. */
4110
4111 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4112 return 0;
4113
4114 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4115 }
4116
4117 /* Update CUM to point after this argument. This is patterned after
4118 ia64_function_arg. */
4119
4120 void
4121 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4122 tree type, int named)
4123 {
4124 int words = ia64_function_arg_words (type, mode);
4125 int offset = ia64_function_arg_offset (cum, type, words);
4126 enum machine_mode hfa_mode = VOIDmode;
4127
4128 /* If all arg slots are already full, then there is nothing to do. */
4129 if (cum->words >= MAX_ARGUMENT_SLOTS)
4130 return;
4131
4132 cum->words += words + offset;
4133
4134 /* Check for and handle homogeneous FP aggregates. */
4135 if (type)
4136 hfa_mode = hfa_element_mode (type, 0);
4137
4138 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4139 and unprototyped hfas are passed specially. */
4140 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4141 {
4142 int fp_regs = cum->fp_regs;
4143 /* This is the original value of cum->words + offset. */
4144 int int_regs = cum->words - words;
4145 int hfa_size = GET_MODE_SIZE (hfa_mode);
4146 int byte_size;
4147 int args_byte_size;
4148
4149 /* If prototyped, pass it in FR regs then GR regs.
4150 If not prototyped, pass it in both FR and GR regs.
4151
4152 If this is an SFmode aggregate, then it is possible to run out of
4153 FR regs while GR regs are still left. In that case, we pass the
4154 remaining part in the GR regs. */
4155
4156 /* Fill the FP regs. We do this always. We stop if we reach the end
4157 of the argument, the last FP register, or the last argument slot. */
4158
4159 byte_size = ((mode == BLKmode)
4160 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4161 args_byte_size = int_regs * UNITS_PER_WORD;
4162 offset = 0;
4163 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4164 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4165 {
4166 offset += hfa_size;
4167 args_byte_size += hfa_size;
4168 fp_regs++;
4169 }
4170
4171 cum->fp_regs = fp_regs;
4172 }
4173
4174 /* Integral and aggregates go in general registers. So do TFmode FP values.
4175 If we have run out of FR registers, then other FP values must also go in
4176 general registers. This can happen when we have a SFmode HFA. */
4177 else if (mode == TFmode || mode == TCmode
4178 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4179 cum->int_regs = cum->words;
4180
4181 /* If there is a prototype, then FP values go in a FR register when
4182 named, and in a GR register when unnamed. */
4183 else if (cum->prototype)
4184 {
4185 if (! named)
4186 cum->int_regs = cum->words;
4187 else
4188 /* ??? Complex types should not reach here. */
4189 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4190 }
4191 /* If there is no prototype, then FP values go in both FR and GR
4192 registers. */
4193 else
4194 {
4195 /* ??? Complex types should not reach here. */
4196 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4197 cum->int_regs = cum->words;
4198 }
4199 }
4200
4201 /* Arguments with alignment larger than 8 bytes start at the next even
4202 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4203 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4204
4205 int
4206 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4207 {
4208
4209 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4210 return PARM_BOUNDARY * 2;
4211
4212 if (type)
4213 {
4214 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4215 return PARM_BOUNDARY * 2;
4216 else
4217 return PARM_BOUNDARY;
4218 }
4219
4220 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4221 return PARM_BOUNDARY * 2;
4222 else
4223 return PARM_BOUNDARY;
4224 }
4225
4226 /* True if it is OK to do sibling call optimization for the specified
4227 call expression EXP. DECL will be the called function, or NULL if
4228 this is an indirect call. */
4229 static bool
4230 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4231 {
4232 /* We can't perform a sibcall if the current function has the syscall_linkage
4233 attribute. */
4234 if (lookup_attribute ("syscall_linkage",
4235 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4236 return false;
4237
4238 /* We must always return with our current GP. This means we can
4239 only sibcall to functions defined in the current module. */
4240 return decl && (*targetm.binds_local_p) (decl);
4241 }
4242 \f
4243
4244 /* Implement va_arg. */
4245
4246 static tree
4247 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4248 {
4249 /* Variable sized types are passed by reference. */
4250 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4251 {
4252 tree ptrtype = build_pointer_type (type);
4253 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4254 return build_va_arg_indirect_ref (addr);
4255 }
4256
4257 /* Aggregate arguments with alignment larger than 8 bytes start at
4258 the next even boundary. Integer and floating point arguments
4259 do so if they are larger than 8 bytes, whether or not they are
4260 also aligned larger than 8 bytes. */
4261 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4262 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4263 {
4264 tree t = build2 (PLUS_EXPR, TREE_TYPE (valist), valist,
4265 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
4266 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4267 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
4268 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
4269 gimplify_and_add (t, pre_p);
4270 }
4271
4272 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4273 }
4274 \f
4275 /* Return 1 if function return value returned in memory. Return 0 if it is
4276 in a register. */
4277
4278 static bool
4279 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
4280 {
4281 enum machine_mode mode;
4282 enum machine_mode hfa_mode;
4283 HOST_WIDE_INT byte_size;
4284
4285 mode = TYPE_MODE (valtype);
4286 byte_size = GET_MODE_SIZE (mode);
4287 if (mode == BLKmode)
4288 {
4289 byte_size = int_size_in_bytes (valtype);
4290 if (byte_size < 0)
4291 return true;
4292 }
4293
4294 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4295
4296 hfa_mode = hfa_element_mode (valtype, 0);
4297 if (hfa_mode != VOIDmode)
4298 {
4299 int hfa_size = GET_MODE_SIZE (hfa_mode);
4300
4301 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4302 return true;
4303 else
4304 return false;
4305 }
4306 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4307 return true;
4308 else
4309 return false;
4310 }
4311
4312 /* Return rtx for register that holds the function return value. */
4313
4314 rtx
4315 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4316 {
4317 enum machine_mode mode;
4318 enum machine_mode hfa_mode;
4319
4320 mode = TYPE_MODE (valtype);
4321 hfa_mode = hfa_element_mode (valtype, 0);
4322
4323 if (hfa_mode != VOIDmode)
4324 {
4325 rtx loc[8];
4326 int i;
4327 int hfa_size;
4328 int byte_size;
4329 int offset;
4330
4331 hfa_size = GET_MODE_SIZE (hfa_mode);
4332 byte_size = ((mode == BLKmode)
4333 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4334 offset = 0;
4335 for (i = 0; offset < byte_size; i++)
4336 {
4337 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4338 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4339 GEN_INT (offset));
4340 offset += hfa_size;
4341 }
4342 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4343 }
4344 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4345 return gen_rtx_REG (mode, FR_ARG_FIRST);
4346 else
4347 {
4348 bool need_parallel = false;
4349
4350 /* In big-endian mode, we need to manage the layout of aggregates
4351 in the registers so that we get the bits properly aligned in
4352 the highpart of the registers. */
4353 if (BYTES_BIG_ENDIAN
4354 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4355 need_parallel = true;
4356
4357 /* Something like struct S { long double x; char a[0] } is not an
4358 HFA structure, and therefore doesn't go in fp registers. But
4359 the middle-end will give it XFmode anyway, and XFmode values
4360 don't normally fit in integer registers. So we need to smuggle
4361 the value inside a parallel. */
4362 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4363 need_parallel = true;
4364
4365 if (need_parallel)
4366 {
4367 rtx loc[8];
4368 int offset;
4369 int bytesize;
4370 int i;
4371
4372 offset = 0;
4373 bytesize = int_size_in_bytes (valtype);
4374 /* An empty PARALLEL is invalid here, but the return value
4375 doesn't matter for empty structs. */
4376 if (bytesize == 0)
4377 return gen_rtx_REG (mode, GR_RET_FIRST);
4378 for (i = 0; offset < bytesize; i++)
4379 {
4380 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4381 gen_rtx_REG (DImode,
4382 GR_RET_FIRST + i),
4383 GEN_INT (offset));
4384 offset += UNITS_PER_WORD;
4385 }
4386 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4387 }
4388
4389 return gen_rtx_REG (mode, GR_RET_FIRST);
4390 }
4391 }
4392
4393 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4394 We need to emit DTP-relative relocations. */
4395
4396 static void
4397 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4398 {
4399 gcc_assert (size == 4 || size == 8);
4400 if (size == 4)
4401 fputs ("\tdata4.ua\t@dtprel(", file);
4402 else
4403 fputs ("\tdata8.ua\t@dtprel(", file);
4404 output_addr_const (file, x);
4405 fputs (")", file);
4406 }
4407
4408 /* Print a memory address as an operand to reference that memory location. */
4409
4410 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4411 also call this from ia64_print_operand for memory addresses. */
4412
4413 void
4414 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4415 rtx address ATTRIBUTE_UNUSED)
4416 {
4417 }
4418
4419 /* Print an operand to an assembler instruction.
4420 C Swap and print a comparison operator.
4421 D Print an FP comparison operator.
4422 E Print 32 - constant, for SImode shifts as extract.
4423 e Print 64 - constant, for DImode rotates.
4424 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4425 a floating point register emitted normally.
4426 I Invert a predicate register by adding 1.
4427 J Select the proper predicate register for a condition.
4428 j Select the inverse predicate register for a condition.
4429 O Append .acq for volatile load.
4430 P Postincrement of a MEM.
4431 Q Append .rel for volatile store.
4432 S Shift amount for shladd instruction.
4433 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4434 for Intel assembler.
4435 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4436 for Intel assembler.
4437 X A pair of floating point registers.
4438 r Print register name, or constant 0 as r0. HP compatibility for
4439 Linux kernel.
4440 v Print vector constant value as an 8-byte integer value. */
4441
4442 void
4443 ia64_print_operand (FILE * file, rtx x, int code)
4444 {
4445 const char *str;
4446
4447 switch (code)
4448 {
4449 case 0:
4450 /* Handled below. */
4451 break;
4452
4453 case 'C':
4454 {
4455 enum rtx_code c = swap_condition (GET_CODE (x));
4456 fputs (GET_RTX_NAME (c), file);
4457 return;
4458 }
4459
4460 case 'D':
4461 switch (GET_CODE (x))
4462 {
4463 case NE:
4464 str = "neq";
4465 break;
4466 case UNORDERED:
4467 str = "unord";
4468 break;
4469 case ORDERED:
4470 str = "ord";
4471 break;
4472 default:
4473 str = GET_RTX_NAME (GET_CODE (x));
4474 break;
4475 }
4476 fputs (str, file);
4477 return;
4478
4479 case 'E':
4480 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4481 return;
4482
4483 case 'e':
4484 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4485 return;
4486
4487 case 'F':
4488 if (x == CONST0_RTX (GET_MODE (x)))
4489 str = reg_names [FR_REG (0)];
4490 else if (x == CONST1_RTX (GET_MODE (x)))
4491 str = reg_names [FR_REG (1)];
4492 else
4493 {
4494 gcc_assert (GET_CODE (x) == REG);
4495 str = reg_names [REGNO (x)];
4496 }
4497 fputs (str, file);
4498 return;
4499
4500 case 'I':
4501 fputs (reg_names [REGNO (x) + 1], file);
4502 return;
4503
4504 case 'J':
4505 case 'j':
4506 {
4507 unsigned int regno = REGNO (XEXP (x, 0));
4508 if (GET_CODE (x) == EQ)
4509 regno += 1;
4510 if (code == 'j')
4511 regno ^= 1;
4512 fputs (reg_names [regno], file);
4513 }
4514 return;
4515
4516 case 'O':
4517 if (MEM_VOLATILE_P (x))
4518 fputs(".acq", file);
4519 return;
4520
4521 case 'P':
4522 {
4523 HOST_WIDE_INT value;
4524
4525 switch (GET_CODE (XEXP (x, 0)))
4526 {
4527 default:
4528 return;
4529
4530 case POST_MODIFY:
4531 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4532 if (GET_CODE (x) == CONST_INT)
4533 value = INTVAL (x);
4534 else
4535 {
4536 gcc_assert (GET_CODE (x) == REG);
4537 fprintf (file, ", %s", reg_names[REGNO (x)]);
4538 return;
4539 }
4540 break;
4541
4542 case POST_INC:
4543 value = GET_MODE_SIZE (GET_MODE (x));
4544 break;
4545
4546 case POST_DEC:
4547 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4548 break;
4549 }
4550
4551 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4552 return;
4553 }
4554
4555 case 'Q':
4556 if (MEM_VOLATILE_P (x))
4557 fputs(".rel", file);
4558 return;
4559
4560 case 'S':
4561 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4562 return;
4563
4564 case 'T':
4565 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4566 {
4567 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4568 return;
4569 }
4570 break;
4571
4572 case 'U':
4573 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4574 {
4575 const char *prefix = "0x";
4576 if (INTVAL (x) & 0x80000000)
4577 {
4578 fprintf (file, "0xffffffff");
4579 prefix = "";
4580 }
4581 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4582 return;
4583 }
4584 break;
4585
4586 case 'X':
4587 {
4588 unsigned int regno = REGNO (x);
4589 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4590 }
4591 return;
4592
4593 case 'r':
4594 /* If this operand is the constant zero, write it as register zero.
4595 Any register, zero, or CONST_INT value is OK here. */
4596 if (GET_CODE (x) == REG)
4597 fputs (reg_names[REGNO (x)], file);
4598 else if (x == CONST0_RTX (GET_MODE (x)))
4599 fputs ("r0", file);
4600 else if (GET_CODE (x) == CONST_INT)
4601 output_addr_const (file, x);
4602 else
4603 output_operand_lossage ("invalid %%r value");
4604 return;
4605
4606 case 'v':
4607 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4608 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4609 break;
4610
4611 case '+':
4612 {
4613 const char *which;
4614
4615 /* For conditional branches, returns or calls, substitute
4616 sptk, dptk, dpnt, or spnt for %s. */
4617 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4618 if (x)
4619 {
4620 int pred_val = INTVAL (XEXP (x, 0));
4621
4622 /* Guess top and bottom 10% statically predicted. */
4623 if (pred_val < REG_BR_PROB_BASE / 50)
4624 which = ".spnt";
4625 else if (pred_val < REG_BR_PROB_BASE / 2)
4626 which = ".dpnt";
4627 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4628 which = ".dptk";
4629 else
4630 which = ".sptk";
4631 }
4632 else if (GET_CODE (current_output_insn) == CALL_INSN)
4633 which = ".sptk";
4634 else
4635 which = ".dptk";
4636
4637 fputs (which, file);
4638 return;
4639 }
4640
4641 case ',':
4642 x = current_insn_predicate;
4643 if (x)
4644 {
4645 unsigned int regno = REGNO (XEXP (x, 0));
4646 if (GET_CODE (x) == EQ)
4647 regno += 1;
4648 fprintf (file, "(%s) ", reg_names [regno]);
4649 }
4650 return;
4651
4652 default:
4653 output_operand_lossage ("ia64_print_operand: unknown code");
4654 return;
4655 }
4656
4657 switch (GET_CODE (x))
4658 {
4659 /* This happens for the spill/restore instructions. */
4660 case POST_INC:
4661 case POST_DEC:
4662 case POST_MODIFY:
4663 x = XEXP (x, 0);
4664 /* ... fall through ... */
4665
4666 case REG:
4667 fputs (reg_names [REGNO (x)], file);
4668 break;
4669
4670 case MEM:
4671 {
4672 rtx addr = XEXP (x, 0);
4673 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4674 addr = XEXP (addr, 0);
4675 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4676 break;
4677 }
4678
4679 default:
4680 output_addr_const (file, x);
4681 break;
4682 }
4683
4684 return;
4685 }
4686 \f
4687 /* Compute a (partial) cost for rtx X. Return true if the complete
4688 cost has been computed, and false if subexpressions should be
4689 scanned. In either case, *TOTAL contains the cost result. */
4690 /* ??? This is incomplete. */
4691
4692 static bool
4693 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4694 {
4695 switch (code)
4696 {
4697 case CONST_INT:
4698 switch (outer_code)
4699 {
4700 case SET:
4701 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4702 return true;
4703 case PLUS:
4704 if (CONST_OK_FOR_I (INTVAL (x)))
4705 *total = 0;
4706 else if (CONST_OK_FOR_J (INTVAL (x)))
4707 *total = 1;
4708 else
4709 *total = COSTS_N_INSNS (1);
4710 return true;
4711 default:
4712 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4713 *total = 0;
4714 else
4715 *total = COSTS_N_INSNS (1);
4716 return true;
4717 }
4718
4719 case CONST_DOUBLE:
4720 *total = COSTS_N_INSNS (1);
4721 return true;
4722
4723 case CONST:
4724 case SYMBOL_REF:
4725 case LABEL_REF:
4726 *total = COSTS_N_INSNS (3);
4727 return true;
4728
4729 case MULT:
4730 /* For multiplies wider than HImode, we have to go to the FPU,
4731 which normally involves copies. Plus there's the latency
4732 of the multiply itself, and the latency of the instructions to
4733 transfer integer regs to FP regs. */
4734 /* ??? Check for FP mode. */
4735 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4736 *total = COSTS_N_INSNS (10);
4737 else
4738 *total = COSTS_N_INSNS (2);
4739 return true;
4740
4741 case PLUS:
4742 case MINUS:
4743 case ASHIFT:
4744 case ASHIFTRT:
4745 case LSHIFTRT:
4746 *total = COSTS_N_INSNS (1);
4747 return true;
4748
4749 case DIV:
4750 case UDIV:
4751 case MOD:
4752 case UMOD:
4753 /* We make divide expensive, so that divide-by-constant will be
4754 optimized to a multiply. */
4755 *total = COSTS_N_INSNS (60);
4756 return true;
4757
4758 default:
4759 return false;
4760 }
4761 }
4762
4763 /* Calculate the cost of moving data from a register in class FROM to
4764 one in class TO, using MODE. */
4765
4766 int
4767 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4768 enum reg_class to)
4769 {
4770 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4771 if (to == ADDL_REGS)
4772 to = GR_REGS;
4773 if (from == ADDL_REGS)
4774 from = GR_REGS;
4775
4776 /* All costs are symmetric, so reduce cases by putting the
4777 lower number class as the destination. */
4778 if (from < to)
4779 {
4780 enum reg_class tmp = to;
4781 to = from, from = tmp;
4782 }
4783
4784 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4785 so that we get secondary memory reloads. Between FR_REGS,
4786 we have to make this at least as expensive as MEMORY_MOVE_COST
4787 to avoid spectacularly poor register class preferencing. */
4788 if (mode == XFmode || mode == RFmode)
4789 {
4790 if (to != GR_REGS || from != GR_REGS)
4791 return MEMORY_MOVE_COST (mode, to, 0);
4792 else
4793 return 3;
4794 }
4795
4796 switch (to)
4797 {
4798 case PR_REGS:
4799 /* Moving between PR registers takes two insns. */
4800 if (from == PR_REGS)
4801 return 3;
4802 /* Moving between PR and anything but GR is impossible. */
4803 if (from != GR_REGS)
4804 return MEMORY_MOVE_COST (mode, to, 0);
4805 break;
4806
4807 case BR_REGS:
4808 /* Moving between BR and anything but GR is impossible. */
4809 if (from != GR_REGS && from != GR_AND_BR_REGS)
4810 return MEMORY_MOVE_COST (mode, to, 0);
4811 break;
4812
4813 case AR_I_REGS:
4814 case AR_M_REGS:
4815 /* Moving between AR and anything but GR is impossible. */
4816 if (from != GR_REGS)
4817 return MEMORY_MOVE_COST (mode, to, 0);
4818 break;
4819
4820 case GR_REGS:
4821 case FR_REGS:
4822 case FP_REGS:
4823 case GR_AND_FR_REGS:
4824 case GR_AND_BR_REGS:
4825 case ALL_REGS:
4826 break;
4827
4828 default:
4829 gcc_unreachable ();
4830 }
4831
4832 return 2;
4833 }
4834
4835 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4836 to use when copying X into that class. */
4837
4838 enum reg_class
4839 ia64_preferred_reload_class (rtx x, enum reg_class class)
4840 {
4841 switch (class)
4842 {
4843 case FR_REGS:
4844 case FP_REGS:
4845 /* Don't allow volatile mem reloads into floating point registers.
4846 This is defined to force reload to choose the r/m case instead
4847 of the f/f case when reloading (set (reg fX) (mem/v)). */
4848 if (MEM_P (x) && MEM_VOLATILE_P (x))
4849 return NO_REGS;
4850
4851 /* Force all unrecognized constants into the constant pool. */
4852 if (CONSTANT_P (x))
4853 return NO_REGS;
4854 break;
4855
4856 case AR_M_REGS:
4857 case AR_I_REGS:
4858 if (!OBJECT_P (x))
4859 return NO_REGS;
4860 break;
4861
4862 default:
4863 break;
4864 }
4865
4866 return class;
4867 }
4868
4869 /* This function returns the register class required for a secondary
4870 register when copying between one of the registers in CLASS, and X,
4871 using MODE. A return value of NO_REGS means that no secondary register
4872 is required. */
4873
4874 enum reg_class
4875 ia64_secondary_reload_class (enum reg_class class,
4876 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4877 {
4878 int regno = -1;
4879
4880 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4881 regno = true_regnum (x);
4882
4883 switch (class)
4884 {
4885 case BR_REGS:
4886 case AR_M_REGS:
4887 case AR_I_REGS:
4888 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4889 interaction. We end up with two pseudos with overlapping lifetimes
4890 both of which are equiv to the same constant, and both which need
4891 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4892 changes depending on the path length, which means the qty_first_reg
4893 check in make_regs_eqv can give different answers at different times.
4894 At some point I'll probably need a reload_indi pattern to handle
4895 this.
4896
4897 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4898 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4899 non-general registers for good measure. */
4900 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4901 return GR_REGS;
4902
4903 /* This is needed if a pseudo used as a call_operand gets spilled to a
4904 stack slot. */
4905 if (GET_CODE (x) == MEM)
4906 return GR_REGS;
4907 break;
4908
4909 case FR_REGS:
4910 case FP_REGS:
4911 /* Need to go through general registers to get to other class regs. */
4912 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4913 return GR_REGS;
4914
4915 /* This can happen when a paradoxical subreg is an operand to the
4916 muldi3 pattern. */
4917 /* ??? This shouldn't be necessary after instruction scheduling is
4918 enabled, because paradoxical subregs are not accepted by
4919 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4920 stop the paradoxical subreg stupidity in the *_operand functions
4921 in recog.c. */
4922 if (GET_CODE (x) == MEM
4923 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4924 || GET_MODE (x) == QImode))
4925 return GR_REGS;
4926
4927 /* This can happen because of the ior/and/etc patterns that accept FP
4928 registers as operands. If the third operand is a constant, then it
4929 needs to be reloaded into a FP register. */
4930 if (GET_CODE (x) == CONST_INT)
4931 return GR_REGS;
4932
4933 /* This can happen because of register elimination in a muldi3 insn.
4934 E.g. `26107 * (unsigned long)&u'. */
4935 if (GET_CODE (x) == PLUS)
4936 return GR_REGS;
4937 break;
4938
4939 case PR_REGS:
4940 /* ??? This happens if we cse/gcse a BImode value across a call,
4941 and the function has a nonlocal goto. This is because global
4942 does not allocate call crossing pseudos to hard registers when
4943 current_function_has_nonlocal_goto is true. This is relatively
4944 common for C++ programs that use exceptions. To reproduce,
4945 return NO_REGS and compile libstdc++. */
4946 if (GET_CODE (x) == MEM)
4947 return GR_REGS;
4948
4949 /* This can happen when we take a BImode subreg of a DImode value,
4950 and that DImode value winds up in some non-GR register. */
4951 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4952 return GR_REGS;
4953 break;
4954
4955 default:
4956 break;
4957 }
4958
4959 return NO_REGS;
4960 }
4961
4962 \f
4963 /* Emit text to declare externally defined variables and functions, because
4964 the Intel assembler does not support undefined externals. */
4965
4966 void
4967 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4968 {
4969 int save_referenced;
4970
4971 /* GNU as does not need anything here, but the HP linker does need
4972 something for external functions. */
4973
4974 if (TARGET_GNU_AS
4975 && (!TARGET_HPUX_LD
4976 || TREE_CODE (decl) != FUNCTION_DECL
4977 || strstr (name, "__builtin_") == name))
4978 return;
4979
4980 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4981 the linker when we do this, so we need to be careful not to do this for
4982 builtin functions which have no library equivalent. Unfortunately, we
4983 can't tell here whether or not a function will actually be called by
4984 expand_expr, so we pull in library functions even if we may not need
4985 them later. */
4986 if (! strcmp (name, "__builtin_next_arg")
4987 || ! strcmp (name, "alloca")
4988 || ! strcmp (name, "__builtin_constant_p")
4989 || ! strcmp (name, "__builtin_args_info"))
4990 return;
4991
4992 if (TARGET_HPUX_LD)
4993 ia64_hpux_add_extern_decl (decl);
4994 else
4995 {
4996 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4997 restore it. */
4998 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4999 if (TREE_CODE (decl) == FUNCTION_DECL)
5000 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
5001 (*targetm.asm_out.globalize_label) (file, name);
5002 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
5003 }
5004 }
5005 \f
5006 /* Parse the -mfixed-range= option string. */
5007
5008 static void
5009 fix_range (const char *const_str)
5010 {
5011 int i, first, last;
5012 char *str, *dash, *comma;
5013
5014 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5015 REG2 are either register names or register numbers. The effect
5016 of this option is to mark the registers in the range from REG1 to
5017 REG2 as ``fixed'' so they won't be used by the compiler. This is
5018 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5019
5020 i = strlen (const_str);
5021 str = (char *) alloca (i + 1);
5022 memcpy (str, const_str, i + 1);
5023
5024 while (1)
5025 {
5026 dash = strchr (str, '-');
5027 if (!dash)
5028 {
5029 warning (0, "value of -mfixed-range must have form REG1-REG2");
5030 return;
5031 }
5032 *dash = '\0';
5033
5034 comma = strchr (dash + 1, ',');
5035 if (comma)
5036 *comma = '\0';
5037
5038 first = decode_reg_name (str);
5039 if (first < 0)
5040 {
5041 warning (0, "unknown register name: %s", str);
5042 return;
5043 }
5044
5045 last = decode_reg_name (dash + 1);
5046 if (last < 0)
5047 {
5048 warning (0, "unknown register name: %s", dash + 1);
5049 return;
5050 }
5051
5052 *dash = '-';
5053
5054 if (first > last)
5055 {
5056 warning (0, "%s-%s is an empty range", str, dash + 1);
5057 return;
5058 }
5059
5060 for (i = first; i <= last; ++i)
5061 fixed_regs[i] = call_used_regs[i] = 1;
5062
5063 if (!comma)
5064 break;
5065
5066 *comma = ',';
5067 str = comma + 1;
5068 }
5069 }
5070
5071 /* Implement TARGET_HANDLE_OPTION. */
5072
5073 static bool
5074 ia64_handle_option (size_t code, const char *arg, int value)
5075 {
5076 switch (code)
5077 {
5078 case OPT_mfixed_range_:
5079 fix_range (arg);
5080 return true;
5081
5082 case OPT_mtls_size_:
5083 if (value != 14 && value != 22 && value != 64)
5084 error ("bad value %<%s%> for -mtls-size= switch", arg);
5085 return true;
5086
5087 case OPT_mtune_:
5088 {
5089 static struct pta
5090 {
5091 const char *name; /* processor name or nickname. */
5092 enum processor_type processor;
5093 }
5094 const processor_alias_table[] =
5095 {
5096 {"itanium", PROCESSOR_ITANIUM},
5097 {"itanium1", PROCESSOR_ITANIUM},
5098 {"merced", PROCESSOR_ITANIUM},
5099 {"itanium2", PROCESSOR_ITANIUM2},
5100 {"mckinley", PROCESSOR_ITANIUM2},
5101 };
5102 int const pta_size = ARRAY_SIZE (processor_alias_table);
5103 int i;
5104
5105 for (i = 0; i < pta_size; i++)
5106 if (!strcmp (arg, processor_alias_table[i].name))
5107 {
5108 ia64_tune = processor_alias_table[i].processor;
5109 break;
5110 }
5111 if (i == pta_size)
5112 error ("bad value %<%s%> for -mtune= switch", arg);
5113 return true;
5114 }
5115
5116 default:
5117 return true;
5118 }
5119 }
5120
5121 /* Implement OVERRIDE_OPTIONS. */
5122
5123 void
5124 ia64_override_options (void)
5125 {
5126 if (TARGET_AUTO_PIC)
5127 target_flags |= MASK_CONST_GP;
5128
5129 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5130 {
5131 warning (0, "not yet implemented: latency-optimized inline square root");
5132 TARGET_INLINE_SQRT = INL_MAX_THR;
5133 }
5134
5135 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5136 flag_schedule_insns_after_reload = 0;
5137
5138 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5139
5140 init_machine_status = ia64_init_machine_status;
5141 }
5142
5143 static struct machine_function *
5144 ia64_init_machine_status (void)
5145 {
5146 return ggc_alloc_cleared (sizeof (struct machine_function));
5147 }
5148 \f
5149 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5150 static enum attr_type ia64_safe_type (rtx);
5151
5152 static enum attr_itanium_class
5153 ia64_safe_itanium_class (rtx insn)
5154 {
5155 if (recog_memoized (insn) >= 0)
5156 return get_attr_itanium_class (insn);
5157 else
5158 return ITANIUM_CLASS_UNKNOWN;
5159 }
5160
5161 static enum attr_type
5162 ia64_safe_type (rtx insn)
5163 {
5164 if (recog_memoized (insn) >= 0)
5165 return get_attr_type (insn);
5166 else
5167 return TYPE_UNKNOWN;
5168 }
5169 \f
5170 /* The following collection of routines emit instruction group stop bits as
5171 necessary to avoid dependencies. */
5172
5173 /* Need to track some additional registers as far as serialization is
5174 concerned so we can properly handle br.call and br.ret. We could
5175 make these registers visible to gcc, but since these registers are
5176 never explicitly used in gcc generated code, it seems wasteful to
5177 do so (plus it would make the call and return patterns needlessly
5178 complex). */
5179 #define REG_RP (BR_REG (0))
5180 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5181 /* This is used for volatile asms which may require a stop bit immediately
5182 before and after them. */
5183 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5184 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5185 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5186
5187 /* For each register, we keep track of how it has been written in the
5188 current instruction group.
5189
5190 If a register is written unconditionally (no qualifying predicate),
5191 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5192
5193 If a register is written if its qualifying predicate P is true, we
5194 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5195 may be written again by the complement of P (P^1) and when this happens,
5196 WRITE_COUNT gets set to 2.
5197
5198 The result of this is that whenever an insn attempts to write a register
5199 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5200
5201 If a predicate register is written by a floating-point insn, we set
5202 WRITTEN_BY_FP to true.
5203
5204 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5205 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5206
5207 struct reg_write_state
5208 {
5209 unsigned int write_count : 2;
5210 unsigned int first_pred : 16;
5211 unsigned int written_by_fp : 1;
5212 unsigned int written_by_and : 1;
5213 unsigned int written_by_or : 1;
5214 };
5215
5216 /* Cumulative info for the current instruction group. */
5217 struct reg_write_state rws_sum[NUM_REGS];
5218 /* Info for the current instruction. This gets copied to rws_sum after a
5219 stop bit is emitted. */
5220 struct reg_write_state rws_insn[NUM_REGS];
5221
5222 /* Indicates whether this is the first instruction after a stop bit,
5223 in which case we don't need another stop bit. Without this,
5224 ia64_variable_issue will die when scheduling an alloc. */
5225 static int first_instruction;
5226
5227 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5228 RTL for one instruction. */
5229 struct reg_flags
5230 {
5231 unsigned int is_write : 1; /* Is register being written? */
5232 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5233 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5234 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5235 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5236 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5237 };
5238
5239 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
5240 static int rws_access_regno (int, struct reg_flags, int);
5241 static int rws_access_reg (rtx, struct reg_flags, int);
5242 static void update_set_flags (rtx, struct reg_flags *);
5243 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5244 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5245 static void init_insn_group_barriers (void);
5246 static int group_barrier_needed (rtx);
5247 static int safe_group_barrier_needed (rtx);
5248
5249 /* Update *RWS for REGNO, which is being written by the current instruction,
5250 with predicate PRED, and associated register flags in FLAGS. */
5251
5252 static void
5253 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
5254 {
5255 if (pred)
5256 rws[regno].write_count++;
5257 else
5258 rws[regno].write_count = 2;
5259 rws[regno].written_by_fp |= flags.is_fp;
5260 /* ??? Not tracking and/or across differing predicates. */
5261 rws[regno].written_by_and = flags.is_and;
5262 rws[regno].written_by_or = flags.is_or;
5263 rws[regno].first_pred = pred;
5264 }
5265
5266 /* Handle an access to register REGNO of type FLAGS using predicate register
5267 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
5268 a dependency with an earlier instruction in the same group. */
5269
5270 static int
5271 rws_access_regno (int regno, struct reg_flags flags, int pred)
5272 {
5273 int need_barrier = 0;
5274
5275 gcc_assert (regno < NUM_REGS);
5276
5277 if (! PR_REGNO_P (regno))
5278 flags.is_and = flags.is_or = 0;
5279
5280 if (flags.is_write)
5281 {
5282 int write_count;
5283
5284 /* One insn writes same reg multiple times? */
5285 gcc_assert (!rws_insn[regno].write_count);
5286
5287 /* Update info for current instruction. */
5288 rws_update (rws_insn, regno, flags, pred);
5289 write_count = rws_sum[regno].write_count;
5290
5291 switch (write_count)
5292 {
5293 case 0:
5294 /* The register has not been written yet. */
5295 rws_update (rws_sum, regno, flags, pred);
5296 break;
5297
5298 case 1:
5299 /* The register has been written via a predicate. If this is
5300 not a complementary predicate, then we need a barrier. */
5301 /* ??? This assumes that P and P+1 are always complementary
5302 predicates for P even. */
5303 if (flags.is_and && rws_sum[regno].written_by_and)
5304 ;
5305 else if (flags.is_or && rws_sum[regno].written_by_or)
5306 ;
5307 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5308 need_barrier = 1;
5309 rws_update (rws_sum, regno, flags, pred);
5310 break;
5311
5312 case 2:
5313 /* The register has been unconditionally written already. We
5314 need a barrier. */
5315 if (flags.is_and && rws_sum[regno].written_by_and)
5316 ;
5317 else if (flags.is_or && rws_sum[regno].written_by_or)
5318 ;
5319 else
5320 need_barrier = 1;
5321 rws_sum[regno].written_by_and = flags.is_and;
5322 rws_sum[regno].written_by_or = flags.is_or;
5323 break;
5324
5325 default:
5326 gcc_unreachable ();
5327 }
5328 }
5329 else
5330 {
5331 if (flags.is_branch)
5332 {
5333 /* Branches have several RAW exceptions that allow to avoid
5334 barriers. */
5335
5336 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5337 /* RAW dependencies on branch regs are permissible as long
5338 as the writer is a non-branch instruction. Since we
5339 never generate code that uses a branch register written
5340 by a branch instruction, handling this case is
5341 easy. */
5342 return 0;
5343
5344 if (REGNO_REG_CLASS (regno) == PR_REGS
5345 && ! rws_sum[regno].written_by_fp)
5346 /* The predicates of a branch are available within the
5347 same insn group as long as the predicate was written by
5348 something other than a floating-point instruction. */
5349 return 0;
5350 }
5351
5352 if (flags.is_and && rws_sum[regno].written_by_and)
5353 return 0;
5354 if (flags.is_or && rws_sum[regno].written_by_or)
5355 return 0;
5356
5357 switch (rws_sum[regno].write_count)
5358 {
5359 case 0:
5360 /* The register has not been written yet. */
5361 break;
5362
5363 case 1:
5364 /* The register has been written via a predicate. If this is
5365 not a complementary predicate, then we need a barrier. */
5366 /* ??? This assumes that P and P+1 are always complementary
5367 predicates for P even. */
5368 if ((rws_sum[regno].first_pred ^ 1) != pred)
5369 need_barrier = 1;
5370 break;
5371
5372 case 2:
5373 /* The register has been unconditionally written already. We
5374 need a barrier. */
5375 need_barrier = 1;
5376 break;
5377
5378 default:
5379 gcc_unreachable ();
5380 }
5381 }
5382
5383 return need_barrier;
5384 }
5385
5386 static int
5387 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5388 {
5389 int regno = REGNO (reg);
5390 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5391
5392 if (n == 1)
5393 return rws_access_regno (regno, flags, pred);
5394 else
5395 {
5396 int need_barrier = 0;
5397 while (--n >= 0)
5398 need_barrier |= rws_access_regno (regno + n, flags, pred);
5399 return need_barrier;
5400 }
5401 }
5402
5403 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5404 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5405
5406 static void
5407 update_set_flags (rtx x, struct reg_flags *pflags)
5408 {
5409 rtx src = SET_SRC (x);
5410
5411 switch (GET_CODE (src))
5412 {
5413 case CALL:
5414 return;
5415
5416 case IF_THEN_ELSE:
5417 /* There are three cases here:
5418 (1) The destination is (pc), in which case this is a branch,
5419 nothing here applies.
5420 (2) The destination is ar.lc, in which case this is a
5421 doloop_end_internal,
5422 (3) The destination is an fp register, in which case this is
5423 an fselect instruction.
5424 In all cases, nothing we do in this function applies. */
5425 return;
5426
5427 default:
5428 if (COMPARISON_P (src)
5429 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5430 /* Set pflags->is_fp to 1 so that we know we're dealing
5431 with a floating point comparison when processing the
5432 destination of the SET. */
5433 pflags->is_fp = 1;
5434
5435 /* Discover if this is a parallel comparison. We only handle
5436 and.orcm and or.andcm at present, since we must retain a
5437 strict inverse on the predicate pair. */
5438 else if (GET_CODE (src) == AND)
5439 pflags->is_and = 1;
5440 else if (GET_CODE (src) == IOR)
5441 pflags->is_or = 1;
5442
5443 break;
5444 }
5445 }
5446
5447 /* Subroutine of rtx_needs_barrier; this function determines whether the
5448 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5449 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5450 for this insn. */
5451
5452 static int
5453 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5454 {
5455 int need_barrier = 0;
5456 rtx dst;
5457 rtx src = SET_SRC (x);
5458
5459 if (GET_CODE (src) == CALL)
5460 /* We don't need to worry about the result registers that
5461 get written by subroutine call. */
5462 return rtx_needs_barrier (src, flags, pred);
5463 else if (SET_DEST (x) == pc_rtx)
5464 {
5465 /* X is a conditional branch. */
5466 /* ??? This seems redundant, as the caller sets this bit for
5467 all JUMP_INSNs. */
5468 flags.is_branch = 1;
5469 return rtx_needs_barrier (src, flags, pred);
5470 }
5471
5472 need_barrier = rtx_needs_barrier (src, flags, pred);
5473
5474 dst = SET_DEST (x);
5475 if (GET_CODE (dst) == ZERO_EXTRACT)
5476 {
5477 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5478 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5479 dst = XEXP (dst, 0);
5480 }
5481 return need_barrier;
5482 }
5483
5484 /* Handle an access to rtx X of type FLAGS using predicate register
5485 PRED. Return 1 if this access creates a dependency with an earlier
5486 instruction in the same group. */
5487
5488 static int
5489 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5490 {
5491 int i, j;
5492 int is_complemented = 0;
5493 int need_barrier = 0;
5494 const char *format_ptr;
5495 struct reg_flags new_flags;
5496 rtx cond;
5497
5498 if (! x)
5499 return 0;
5500
5501 new_flags = flags;
5502
5503 switch (GET_CODE (x))
5504 {
5505 case SET:
5506 update_set_flags (x, &new_flags);
5507 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5508 if (GET_CODE (SET_SRC (x)) != CALL)
5509 {
5510 new_flags.is_write = 1;
5511 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5512 }
5513 break;
5514
5515 case CALL:
5516 new_flags.is_write = 0;
5517 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5518
5519 /* Avoid multiple register writes, in case this is a pattern with
5520 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5521 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5522 {
5523 new_flags.is_write = 1;
5524 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5525 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5526 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5527 }
5528 break;
5529
5530 case COND_EXEC:
5531 /* X is a predicated instruction. */
5532
5533 cond = COND_EXEC_TEST (x);
5534 gcc_assert (!pred);
5535 need_barrier = rtx_needs_barrier (cond, flags, 0);
5536
5537 if (GET_CODE (cond) == EQ)
5538 is_complemented = 1;
5539 cond = XEXP (cond, 0);
5540 gcc_assert (GET_CODE (cond) == REG
5541 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5542 pred = REGNO (cond);
5543 if (is_complemented)
5544 ++pred;
5545
5546 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5547 return need_barrier;
5548
5549 case CLOBBER:
5550 case USE:
5551 /* Clobber & use are for earlier compiler-phases only. */
5552 break;
5553
5554 case ASM_OPERANDS:
5555 case ASM_INPUT:
5556 /* We always emit stop bits for traditional asms. We emit stop bits
5557 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5558 if (GET_CODE (x) != ASM_OPERANDS
5559 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5560 {
5561 /* Avoid writing the register multiple times if we have multiple
5562 asm outputs. This avoids a failure in rws_access_reg. */
5563 if (! rws_insn[REG_VOLATILE].write_count)
5564 {
5565 new_flags.is_write = 1;
5566 rws_access_regno (REG_VOLATILE, new_flags, pred);
5567 }
5568 return 1;
5569 }
5570
5571 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5572 We cannot just fall through here since then we would be confused
5573 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5574 traditional asms unlike their normal usage. */
5575
5576 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5577 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5578 need_barrier = 1;
5579 break;
5580
5581 case PARALLEL:
5582 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5583 {
5584 rtx pat = XVECEXP (x, 0, i);
5585 switch (GET_CODE (pat))
5586 {
5587 case SET:
5588 update_set_flags (pat, &new_flags);
5589 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5590 break;
5591
5592 case USE:
5593 case CALL:
5594 case ASM_OPERANDS:
5595 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5596 break;
5597
5598 case CLOBBER:
5599 case RETURN:
5600 break;
5601
5602 default:
5603 gcc_unreachable ();
5604 }
5605 }
5606 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5607 {
5608 rtx pat = XVECEXP (x, 0, i);
5609 if (GET_CODE (pat) == SET)
5610 {
5611 if (GET_CODE (SET_SRC (pat)) != CALL)
5612 {
5613 new_flags.is_write = 1;
5614 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5615 pred);
5616 }
5617 }
5618 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5619 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5620 }
5621 break;
5622
5623 case SUBREG:
5624 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5625 break;
5626 case REG:
5627 if (REGNO (x) == AR_UNAT_REGNUM)
5628 {
5629 for (i = 0; i < 64; ++i)
5630 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5631 }
5632 else
5633 need_barrier = rws_access_reg (x, flags, pred);
5634 break;
5635
5636 case MEM:
5637 /* Find the regs used in memory address computation. */
5638 new_flags.is_write = 0;
5639 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5640 break;
5641
5642 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5643 case SYMBOL_REF: case LABEL_REF: case CONST:
5644 break;
5645
5646 /* Operators with side-effects. */
5647 case POST_INC: case POST_DEC:
5648 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5649
5650 new_flags.is_write = 0;
5651 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5652 new_flags.is_write = 1;
5653 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5654 break;
5655
5656 case POST_MODIFY:
5657 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5658
5659 new_flags.is_write = 0;
5660 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5661 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5662 new_flags.is_write = 1;
5663 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5664 break;
5665
5666 /* Handle common unary and binary ops for efficiency. */
5667 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5668 case MOD: case UDIV: case UMOD: case AND: case IOR:
5669 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5670 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5671 case NE: case EQ: case GE: case GT: case LE:
5672 case LT: case GEU: case GTU: case LEU: case LTU:
5673 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5674 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5675 break;
5676
5677 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5678 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5679 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5680 case SQRT: case FFS: case POPCOUNT:
5681 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5682 break;
5683
5684 case VEC_SELECT:
5685 /* VEC_SELECT's second argument is a PARALLEL with integers that
5686 describe the elements selected. On ia64, those integers are
5687 always constants. Avoid walking the PARALLEL so that we don't
5688 get confused with "normal" parallels and then die. */
5689 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5690 break;
5691
5692 case UNSPEC:
5693 switch (XINT (x, 1))
5694 {
5695 case UNSPEC_LTOFF_DTPMOD:
5696 case UNSPEC_LTOFF_DTPREL:
5697 case UNSPEC_DTPREL:
5698 case UNSPEC_LTOFF_TPREL:
5699 case UNSPEC_TPREL:
5700 case UNSPEC_PRED_REL_MUTEX:
5701 case UNSPEC_PIC_CALL:
5702 case UNSPEC_MF:
5703 case UNSPEC_FETCHADD_ACQ:
5704 case UNSPEC_BSP_VALUE:
5705 case UNSPEC_FLUSHRS:
5706 case UNSPEC_BUNDLE_SELECTOR:
5707 break;
5708
5709 case UNSPEC_GR_SPILL:
5710 case UNSPEC_GR_RESTORE:
5711 {
5712 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5713 HOST_WIDE_INT bit = (offset >> 3) & 63;
5714
5715 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5716 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5717 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5718 new_flags, pred);
5719 break;
5720 }
5721
5722 case UNSPEC_FR_SPILL:
5723 case UNSPEC_FR_RESTORE:
5724 case UNSPEC_GETF_EXP:
5725 case UNSPEC_SETF_EXP:
5726 case UNSPEC_ADDP4:
5727 case UNSPEC_FR_SQRT_RECIP_APPROX:
5728 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5729 break;
5730
5731 case UNSPEC_FR_RECIP_APPROX:
5732 case UNSPEC_SHRP:
5733 case UNSPEC_COPYSIGN:
5734 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5735 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5736 break;
5737
5738 case UNSPEC_CMPXCHG_ACQ:
5739 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5740 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5741 break;
5742
5743 default:
5744 gcc_unreachable ();
5745 }
5746 break;
5747
5748 case UNSPEC_VOLATILE:
5749 switch (XINT (x, 1))
5750 {
5751 case UNSPECV_ALLOC:
5752 /* Alloc must always be the first instruction of a group.
5753 We force this by always returning true. */
5754 /* ??? We might get better scheduling if we explicitly check for
5755 input/local/output register dependencies, and modify the
5756 scheduler so that alloc is always reordered to the start of
5757 the current group. We could then eliminate all of the
5758 first_instruction code. */
5759 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5760
5761 new_flags.is_write = 1;
5762 rws_access_regno (REG_AR_CFM, new_flags, pred);
5763 return 1;
5764
5765 case UNSPECV_SET_BSP:
5766 need_barrier = 1;
5767 break;
5768
5769 case UNSPECV_BLOCKAGE:
5770 case UNSPECV_INSN_GROUP_BARRIER:
5771 case UNSPECV_BREAK:
5772 case UNSPECV_PSAC_ALL:
5773 case UNSPECV_PSAC_NORMAL:
5774 return 0;
5775
5776 default:
5777 gcc_unreachable ();
5778 }
5779 break;
5780
5781 case RETURN:
5782 new_flags.is_write = 0;
5783 need_barrier = rws_access_regno (REG_RP, flags, pred);
5784 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5785
5786 new_flags.is_write = 1;
5787 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5788 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5789 break;
5790
5791 default:
5792 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5793 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5794 switch (format_ptr[i])
5795 {
5796 case '0': /* unused field */
5797 case 'i': /* integer */
5798 case 'n': /* note */
5799 case 'w': /* wide integer */
5800 case 's': /* pointer to string */
5801 case 'S': /* optional pointer to string */
5802 break;
5803
5804 case 'e':
5805 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5806 need_barrier = 1;
5807 break;
5808
5809 case 'E':
5810 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5811 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5812 need_barrier = 1;
5813 break;
5814
5815 default:
5816 gcc_unreachable ();
5817 }
5818 break;
5819 }
5820 return need_barrier;
5821 }
5822
5823 /* Clear out the state for group_barrier_needed at the start of a
5824 sequence of insns. */
5825
5826 static void
5827 init_insn_group_barriers (void)
5828 {
5829 memset (rws_sum, 0, sizeof (rws_sum));
5830 first_instruction = 1;
5831 }
5832
5833 /* Given the current state, determine whether a group barrier (a stop bit) is
5834 necessary before INSN. Return nonzero if so. This modifies the state to
5835 include the effects of INSN as a side-effect. */
5836
5837 static int
5838 group_barrier_needed (rtx insn)
5839 {
5840 rtx pat;
5841 int need_barrier = 0;
5842 struct reg_flags flags;
5843
5844 memset (&flags, 0, sizeof (flags));
5845 switch (GET_CODE (insn))
5846 {
5847 case NOTE:
5848 break;
5849
5850 case BARRIER:
5851 /* A barrier doesn't imply an instruction group boundary. */
5852 break;
5853
5854 case CODE_LABEL:
5855 memset (rws_insn, 0, sizeof (rws_insn));
5856 return 1;
5857
5858 case CALL_INSN:
5859 flags.is_branch = 1;
5860 flags.is_sibcall = SIBLING_CALL_P (insn);
5861 memset (rws_insn, 0, sizeof (rws_insn));
5862
5863 /* Don't bundle a call following another call. */
5864 if ((pat = prev_active_insn (insn))
5865 && GET_CODE (pat) == CALL_INSN)
5866 {
5867 need_barrier = 1;
5868 break;
5869 }
5870
5871 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5872 break;
5873
5874 case JUMP_INSN:
5875 flags.is_branch = 1;
5876
5877 /* Don't bundle a jump following a call. */
5878 if ((pat = prev_active_insn (insn))
5879 && GET_CODE (pat) == CALL_INSN)
5880 {
5881 need_barrier = 1;
5882 break;
5883 }
5884 /* FALLTHRU */
5885
5886 case INSN:
5887 if (GET_CODE (PATTERN (insn)) == USE
5888 || GET_CODE (PATTERN (insn)) == CLOBBER)
5889 /* Don't care about USE and CLOBBER "insns"---those are used to
5890 indicate to the optimizer that it shouldn't get rid of
5891 certain operations. */
5892 break;
5893
5894 pat = PATTERN (insn);
5895
5896 /* Ug. Hack hacks hacked elsewhere. */
5897 switch (recog_memoized (insn))
5898 {
5899 /* We play dependency tricks with the epilogue in order
5900 to get proper schedules. Undo this for dv analysis. */
5901 case CODE_FOR_epilogue_deallocate_stack:
5902 case CODE_FOR_prologue_allocate_stack:
5903 pat = XVECEXP (pat, 0, 0);
5904 break;
5905
5906 /* The pattern we use for br.cloop confuses the code above.
5907 The second element of the vector is representative. */
5908 case CODE_FOR_doloop_end_internal:
5909 pat = XVECEXP (pat, 0, 1);
5910 break;
5911
5912 /* Doesn't generate code. */
5913 case CODE_FOR_pred_rel_mutex:
5914 case CODE_FOR_prologue_use:
5915 return 0;
5916
5917 default:
5918 break;
5919 }
5920
5921 memset (rws_insn, 0, sizeof (rws_insn));
5922 need_barrier = rtx_needs_barrier (pat, flags, 0);
5923
5924 /* Check to see if the previous instruction was a volatile
5925 asm. */
5926 if (! need_barrier)
5927 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5928 break;
5929
5930 default:
5931 gcc_unreachable ();
5932 }
5933
5934 if (first_instruction && INSN_P (insn)
5935 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5936 && GET_CODE (PATTERN (insn)) != USE
5937 && GET_CODE (PATTERN (insn)) != CLOBBER)
5938 {
5939 need_barrier = 0;
5940 first_instruction = 0;
5941 }
5942
5943 return need_barrier;
5944 }
5945
5946 /* Like group_barrier_needed, but do not clobber the current state. */
5947
5948 static int
5949 safe_group_barrier_needed (rtx insn)
5950 {
5951 struct reg_write_state rws_saved[NUM_REGS];
5952 int saved_first_instruction;
5953 int t;
5954
5955 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5956 saved_first_instruction = first_instruction;
5957
5958 t = group_barrier_needed (insn);
5959
5960 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5961 first_instruction = saved_first_instruction;
5962
5963 return t;
5964 }
5965
5966 /* Scan the current function and insert stop bits as necessary to
5967 eliminate dependencies. This function assumes that a final
5968 instruction scheduling pass has been run which has already
5969 inserted most of the necessary stop bits. This function only
5970 inserts new ones at basic block boundaries, since these are
5971 invisible to the scheduler. */
5972
5973 static void
5974 emit_insn_group_barriers (FILE *dump)
5975 {
5976 rtx insn;
5977 rtx last_label = 0;
5978 int insns_since_last_label = 0;
5979
5980 init_insn_group_barriers ();
5981
5982 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5983 {
5984 if (GET_CODE (insn) == CODE_LABEL)
5985 {
5986 if (insns_since_last_label)
5987 last_label = insn;
5988 insns_since_last_label = 0;
5989 }
5990 else if (GET_CODE (insn) == NOTE
5991 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5992 {
5993 if (insns_since_last_label)
5994 last_label = insn;
5995 insns_since_last_label = 0;
5996 }
5997 else if (GET_CODE (insn) == INSN
5998 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5999 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6000 {
6001 init_insn_group_barriers ();
6002 last_label = 0;
6003 }
6004 else if (INSN_P (insn))
6005 {
6006 insns_since_last_label = 1;
6007
6008 if (group_barrier_needed (insn))
6009 {
6010 if (last_label)
6011 {
6012 if (dump)
6013 fprintf (dump, "Emitting stop before label %d\n",
6014 INSN_UID (last_label));
6015 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6016 insn = last_label;
6017
6018 init_insn_group_barriers ();
6019 last_label = 0;
6020 }
6021 }
6022 }
6023 }
6024 }
6025
6026 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6027 This function has to emit all necessary group barriers. */
6028
6029 static void
6030 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6031 {
6032 rtx insn;
6033
6034 init_insn_group_barriers ();
6035
6036 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6037 {
6038 if (GET_CODE (insn) == BARRIER)
6039 {
6040 rtx last = prev_active_insn (insn);
6041
6042 if (! last)
6043 continue;
6044 if (GET_CODE (last) == JUMP_INSN
6045 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6046 last = prev_active_insn (last);
6047 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6048 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6049
6050 init_insn_group_barriers ();
6051 }
6052 else if (INSN_P (insn))
6053 {
6054 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6055 init_insn_group_barriers ();
6056 else if (group_barrier_needed (insn))
6057 {
6058 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6059 init_insn_group_barriers ();
6060 group_barrier_needed (insn);
6061 }
6062 }
6063 }
6064 }
6065
6066 \f
6067
6068 /* Instruction scheduling support. */
6069
6070 #define NR_BUNDLES 10
6071
6072 /* A list of names of all available bundles. */
6073
6074 static const char *bundle_name [NR_BUNDLES] =
6075 {
6076 ".mii",
6077 ".mmi",
6078 ".mfi",
6079 ".mmf",
6080 #if NR_BUNDLES == 10
6081 ".bbb",
6082 ".mbb",
6083 #endif
6084 ".mib",
6085 ".mmb",
6086 ".mfb",
6087 ".mlx"
6088 };
6089
6090 /* Nonzero if we should insert stop bits into the schedule. */
6091
6092 int ia64_final_schedule = 0;
6093
6094 /* Codes of the corresponding queried units: */
6095
6096 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6097 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6098
6099 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6100 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6101
6102 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6103
6104 /* The following variable value is an insn group barrier. */
6105
6106 static rtx dfa_stop_insn;
6107
6108 /* The following variable value is the last issued insn. */
6109
6110 static rtx last_scheduled_insn;
6111
6112 /* The following variable value is size of the DFA state. */
6113
6114 static size_t dfa_state_size;
6115
6116 /* The following variable value is pointer to a DFA state used as
6117 temporary variable. */
6118
6119 static state_t temp_dfa_state = NULL;
6120
6121 /* The following variable value is DFA state after issuing the last
6122 insn. */
6123
6124 static state_t prev_cycle_state = NULL;
6125
6126 /* The following array element values are TRUE if the corresponding
6127 insn requires to add stop bits before it. */
6128
6129 static char *stops_p;
6130
6131 /* The following variable is used to set up the mentioned above array. */
6132
6133 static int stop_before_p = 0;
6134
6135 /* The following variable value is length of the arrays `clocks' and
6136 `add_cycles'. */
6137
6138 static int clocks_length;
6139
6140 /* The following array element values are cycles on which the
6141 corresponding insn will be issued. The array is used only for
6142 Itanium1. */
6143
6144 static int *clocks;
6145
6146 /* The following array element values are numbers of cycles should be
6147 added to improve insn scheduling for MM_insns for Itanium1. */
6148
6149 static int *add_cycles;
6150
6151 static rtx ia64_single_set (rtx);
6152 static void ia64_emit_insn_before (rtx, rtx);
6153
6154 /* Map a bundle number to its pseudo-op. */
6155
6156 const char *
6157 get_bundle_name (int b)
6158 {
6159 return bundle_name[b];
6160 }
6161
6162
6163 /* Return the maximum number of instructions a cpu can issue. */
6164
6165 static int
6166 ia64_issue_rate (void)
6167 {
6168 return 6;
6169 }
6170
6171 /* Helper function - like single_set, but look inside COND_EXEC. */
6172
6173 static rtx
6174 ia64_single_set (rtx insn)
6175 {
6176 rtx x = PATTERN (insn), ret;
6177 if (GET_CODE (x) == COND_EXEC)
6178 x = COND_EXEC_CODE (x);
6179 if (GET_CODE (x) == SET)
6180 return x;
6181
6182 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6183 Although they are not classical single set, the second set is there just
6184 to protect it from moving past FP-relative stack accesses. */
6185 switch (recog_memoized (insn))
6186 {
6187 case CODE_FOR_prologue_allocate_stack:
6188 case CODE_FOR_epilogue_deallocate_stack:
6189 ret = XVECEXP (x, 0, 0);
6190 break;
6191
6192 default:
6193 ret = single_set_2 (insn, x);
6194 break;
6195 }
6196
6197 return ret;
6198 }
6199
6200 /* Adjust the cost of a scheduling dependency. Return the new cost of
6201 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6202
6203 static int
6204 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6205 {
6206 enum attr_itanium_class dep_class;
6207 enum attr_itanium_class insn_class;
6208
6209 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6210 return cost;
6211
6212 insn_class = ia64_safe_itanium_class (insn);
6213 dep_class = ia64_safe_itanium_class (dep_insn);
6214 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6215 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6216 return 0;
6217
6218 return cost;
6219 }
6220
6221 /* Like emit_insn_before, but skip cycle_display notes.
6222 ??? When cycle display notes are implemented, update this. */
6223
6224 static void
6225 ia64_emit_insn_before (rtx insn, rtx before)
6226 {
6227 emit_insn_before (insn, before);
6228 }
6229
6230 /* The following function marks insns who produce addresses for load
6231 and store insns. Such insns will be placed into M slots because it
6232 decrease latency time for Itanium1 (see function
6233 `ia64_produce_address_p' and the DFA descriptions). */
6234
6235 static void
6236 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6237 {
6238 rtx insn, link, next, next_tail;
6239
6240 /* Before reload, which_alternative is not set, which means that
6241 ia64_safe_itanium_class will produce wrong results for (at least)
6242 move instructions. */
6243 if (!reload_completed)
6244 return;
6245
6246 next_tail = NEXT_INSN (tail);
6247 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6248 if (INSN_P (insn))
6249 insn->call = 0;
6250 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6251 if (INSN_P (insn)
6252 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6253 {
6254 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
6255 {
6256 enum attr_itanium_class c;
6257
6258 if (REG_NOTE_KIND (link) != REG_DEP_TRUE)
6259 continue;
6260 next = XEXP (link, 0);
6261 c = ia64_safe_itanium_class (next);
6262 if ((c == ITANIUM_CLASS_ST
6263 || c == ITANIUM_CLASS_STF)
6264 && ia64_st_address_bypass_p (insn, next))
6265 break;
6266 else if ((c == ITANIUM_CLASS_LD
6267 || c == ITANIUM_CLASS_FLD
6268 || c == ITANIUM_CLASS_FLDP)
6269 && ia64_ld_address_bypass_p (insn, next))
6270 break;
6271 }
6272 insn->call = link != 0;
6273 }
6274 }
6275
6276 /* We're beginning a new block. Initialize data structures as necessary. */
6277
6278 static void
6279 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6280 int sched_verbose ATTRIBUTE_UNUSED,
6281 int max_ready ATTRIBUTE_UNUSED)
6282 {
6283 #ifdef ENABLE_CHECKING
6284 rtx insn;
6285
6286 if (reload_completed)
6287 for (insn = NEXT_INSN (current_sched_info->prev_head);
6288 insn != current_sched_info->next_tail;
6289 insn = NEXT_INSN (insn))
6290 gcc_assert (!SCHED_GROUP_P (insn));
6291 #endif
6292 last_scheduled_insn = NULL_RTX;
6293 init_insn_group_barriers ();
6294 }
6295
6296 /* We are about to being issuing insns for this clock cycle.
6297 Override the default sort algorithm to better slot instructions. */
6298
6299 static int
6300 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6301 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6302 int reorder_type)
6303 {
6304 int n_asms;
6305 int n_ready = *pn_ready;
6306 rtx *e_ready = ready + n_ready;
6307 rtx *insnp;
6308
6309 if (sched_verbose)
6310 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6311
6312 if (reorder_type == 0)
6313 {
6314 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6315 n_asms = 0;
6316 for (insnp = ready; insnp < e_ready; insnp++)
6317 if (insnp < e_ready)
6318 {
6319 rtx insn = *insnp;
6320 enum attr_type t = ia64_safe_type (insn);
6321 if (t == TYPE_UNKNOWN)
6322 {
6323 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6324 || asm_noperands (PATTERN (insn)) >= 0)
6325 {
6326 rtx lowest = ready[n_asms];
6327 ready[n_asms] = insn;
6328 *insnp = lowest;
6329 n_asms++;
6330 }
6331 else
6332 {
6333 rtx highest = ready[n_ready - 1];
6334 ready[n_ready - 1] = insn;
6335 *insnp = highest;
6336 return 1;
6337 }
6338 }
6339 }
6340
6341 if (n_asms < n_ready)
6342 {
6343 /* Some normal insns to process. Skip the asms. */
6344 ready += n_asms;
6345 n_ready -= n_asms;
6346 }
6347 else if (n_ready > 0)
6348 return 1;
6349 }
6350
6351 if (ia64_final_schedule)
6352 {
6353 int deleted = 0;
6354 int nr_need_stop = 0;
6355
6356 for (insnp = ready; insnp < e_ready; insnp++)
6357 if (safe_group_barrier_needed (*insnp))
6358 nr_need_stop++;
6359
6360 if (reorder_type == 1 && n_ready == nr_need_stop)
6361 return 0;
6362 if (reorder_type == 0)
6363 return 1;
6364 insnp = e_ready;
6365 /* Move down everything that needs a stop bit, preserving
6366 relative order. */
6367 while (insnp-- > ready + deleted)
6368 while (insnp >= ready + deleted)
6369 {
6370 rtx insn = *insnp;
6371 if (! safe_group_barrier_needed (insn))
6372 break;
6373 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6374 *ready = insn;
6375 deleted++;
6376 }
6377 n_ready -= deleted;
6378 ready += deleted;
6379 }
6380
6381 return 1;
6382 }
6383
6384 /* We are about to being issuing insns for this clock cycle. Override
6385 the default sort algorithm to better slot instructions. */
6386
6387 static int
6388 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6389 int clock_var)
6390 {
6391 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6392 pn_ready, clock_var, 0);
6393 }
6394
6395 /* Like ia64_sched_reorder, but called after issuing each insn.
6396 Override the default sort algorithm to better slot instructions. */
6397
6398 static int
6399 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6400 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6401 int *pn_ready, int clock_var)
6402 {
6403 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6404 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6405 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6406 clock_var, 1);
6407 }
6408
6409 /* We are about to issue INSN. Return the number of insns left on the
6410 ready queue that can be issued this cycle. */
6411
6412 static int
6413 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6414 int sched_verbose ATTRIBUTE_UNUSED,
6415 rtx insn ATTRIBUTE_UNUSED,
6416 int can_issue_more ATTRIBUTE_UNUSED)
6417 {
6418 last_scheduled_insn = insn;
6419 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6420 if (reload_completed)
6421 {
6422 int needed = group_barrier_needed (insn);
6423
6424 gcc_assert (!needed);
6425 if (GET_CODE (insn) == CALL_INSN)
6426 init_insn_group_barriers ();
6427 stops_p [INSN_UID (insn)] = stop_before_p;
6428 stop_before_p = 0;
6429 }
6430 return 1;
6431 }
6432
6433 /* We are choosing insn from the ready queue. Return nonzero if INSN
6434 can be chosen. */
6435
6436 static int
6437 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6438 {
6439 gcc_assert (insn && INSN_P (insn));
6440 return (!reload_completed
6441 || !safe_group_barrier_needed (insn));
6442 }
6443
6444 /* The following variable value is pseudo-insn used by the DFA insn
6445 scheduler to change the DFA state when the simulated clock is
6446 increased. */
6447
6448 static rtx dfa_pre_cycle_insn;
6449
6450 /* We are about to being issuing INSN. Return nonzero if we cannot
6451 issue it on given cycle CLOCK and return zero if we should not sort
6452 the ready queue on the next clock start. */
6453
6454 static int
6455 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6456 int clock, int *sort_p)
6457 {
6458 int setup_clocks_p = FALSE;
6459
6460 gcc_assert (insn && INSN_P (insn));
6461 if ((reload_completed && safe_group_barrier_needed (insn))
6462 || (last_scheduled_insn
6463 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6464 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6465 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6466 {
6467 init_insn_group_barriers ();
6468 if (verbose && dump)
6469 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6470 last_clock == clock ? " + cycle advance" : "");
6471 stop_before_p = 1;
6472 if (last_clock == clock)
6473 {
6474 state_transition (curr_state, dfa_stop_insn);
6475 if (TARGET_EARLY_STOP_BITS)
6476 *sort_p = (last_scheduled_insn == NULL_RTX
6477 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6478 else
6479 *sort_p = 0;
6480 return 1;
6481 }
6482 else if (reload_completed)
6483 setup_clocks_p = TRUE;
6484 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6485 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6486 state_reset (curr_state);
6487 else
6488 {
6489 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6490 state_transition (curr_state, dfa_stop_insn);
6491 state_transition (curr_state, dfa_pre_cycle_insn);
6492 state_transition (curr_state, NULL);
6493 }
6494 }
6495 else if (reload_completed)
6496 setup_clocks_p = TRUE;
6497 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6498 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6499 && asm_noperands (PATTERN (insn)) < 0)
6500 {
6501 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6502
6503 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6504 {
6505 rtx link;
6506 int d = -1;
6507
6508 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6509 if (REG_NOTE_KIND (link) == 0)
6510 {
6511 enum attr_itanium_class dep_class;
6512 rtx dep_insn = XEXP (link, 0);
6513
6514 dep_class = ia64_safe_itanium_class (dep_insn);
6515 if ((dep_class == ITANIUM_CLASS_MMMUL
6516 || dep_class == ITANIUM_CLASS_MMSHF)
6517 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6518 && (d < 0
6519 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6520 d = last_clock - clocks [INSN_UID (dep_insn)];
6521 }
6522 if (d >= 0)
6523 add_cycles [INSN_UID (insn)] = 3 - d;
6524 }
6525 }
6526 return 0;
6527 }
6528
6529 \f
6530
6531 /* The following page contains abstract data `bundle states' which are
6532 used for bundling insns (inserting nops and template generation). */
6533
6534 /* The following describes state of insn bundling. */
6535
6536 struct bundle_state
6537 {
6538 /* Unique bundle state number to identify them in the debugging
6539 output */
6540 int unique_num;
6541 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6542 /* number nops before and after the insn */
6543 short before_nops_num, after_nops_num;
6544 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6545 insn */
6546 int cost; /* cost of the state in cycles */
6547 int accumulated_insns_num; /* number of all previous insns including
6548 nops. L is considered as 2 insns */
6549 int branch_deviation; /* deviation of previous branches from 3rd slots */
6550 struct bundle_state *next; /* next state with the same insn_num */
6551 struct bundle_state *originator; /* originator (previous insn state) */
6552 /* All bundle states are in the following chain. */
6553 struct bundle_state *allocated_states_chain;
6554 /* The DFA State after issuing the insn and the nops. */
6555 state_t dfa_state;
6556 };
6557
6558 /* The following is map insn number to the corresponding bundle state. */
6559
6560 static struct bundle_state **index_to_bundle_states;
6561
6562 /* The unique number of next bundle state. */
6563
6564 static int bundle_states_num;
6565
6566 /* All allocated bundle states are in the following chain. */
6567
6568 static struct bundle_state *allocated_bundle_states_chain;
6569
6570 /* All allocated but not used bundle states are in the following
6571 chain. */
6572
6573 static struct bundle_state *free_bundle_state_chain;
6574
6575
6576 /* The following function returns a free bundle state. */
6577
6578 static struct bundle_state *
6579 get_free_bundle_state (void)
6580 {
6581 struct bundle_state *result;
6582
6583 if (free_bundle_state_chain != NULL)
6584 {
6585 result = free_bundle_state_chain;
6586 free_bundle_state_chain = result->next;
6587 }
6588 else
6589 {
6590 result = xmalloc (sizeof (struct bundle_state));
6591 result->dfa_state = xmalloc (dfa_state_size);
6592 result->allocated_states_chain = allocated_bundle_states_chain;
6593 allocated_bundle_states_chain = result;
6594 }
6595 result->unique_num = bundle_states_num++;
6596 return result;
6597
6598 }
6599
6600 /* The following function frees given bundle state. */
6601
6602 static void
6603 free_bundle_state (struct bundle_state *state)
6604 {
6605 state->next = free_bundle_state_chain;
6606 free_bundle_state_chain = state;
6607 }
6608
6609 /* Start work with abstract data `bundle states'. */
6610
6611 static void
6612 initiate_bundle_states (void)
6613 {
6614 bundle_states_num = 0;
6615 free_bundle_state_chain = NULL;
6616 allocated_bundle_states_chain = NULL;
6617 }
6618
6619 /* Finish work with abstract data `bundle states'. */
6620
6621 static void
6622 finish_bundle_states (void)
6623 {
6624 struct bundle_state *curr_state, *next_state;
6625
6626 for (curr_state = allocated_bundle_states_chain;
6627 curr_state != NULL;
6628 curr_state = next_state)
6629 {
6630 next_state = curr_state->allocated_states_chain;
6631 free (curr_state->dfa_state);
6632 free (curr_state);
6633 }
6634 }
6635
6636 /* Hash table of the bundle states. The key is dfa_state and insn_num
6637 of the bundle states. */
6638
6639 static htab_t bundle_state_table;
6640
6641 /* The function returns hash of BUNDLE_STATE. */
6642
6643 static unsigned
6644 bundle_state_hash (const void *bundle_state)
6645 {
6646 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6647 unsigned result, i;
6648
6649 for (result = i = 0; i < dfa_state_size; i++)
6650 result += (((unsigned char *) state->dfa_state) [i]
6651 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6652 return result + state->insn_num;
6653 }
6654
6655 /* The function returns nonzero if the bundle state keys are equal. */
6656
6657 static int
6658 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6659 {
6660 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6661 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6662
6663 return (state1->insn_num == state2->insn_num
6664 && memcmp (state1->dfa_state, state2->dfa_state,
6665 dfa_state_size) == 0);
6666 }
6667
6668 /* The function inserts the BUNDLE_STATE into the hash table. The
6669 function returns nonzero if the bundle has been inserted into the
6670 table. The table contains the best bundle state with given key. */
6671
6672 static int
6673 insert_bundle_state (struct bundle_state *bundle_state)
6674 {
6675 void **entry_ptr;
6676
6677 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6678 if (*entry_ptr == NULL)
6679 {
6680 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6681 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6682 *entry_ptr = (void *) bundle_state;
6683 return TRUE;
6684 }
6685 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6686 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6687 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6688 > bundle_state->accumulated_insns_num
6689 || (((struct bundle_state *)
6690 *entry_ptr)->accumulated_insns_num
6691 == bundle_state->accumulated_insns_num
6692 && ((struct bundle_state *)
6693 *entry_ptr)->branch_deviation
6694 > bundle_state->branch_deviation))))
6695
6696 {
6697 struct bundle_state temp;
6698
6699 temp = *(struct bundle_state *) *entry_ptr;
6700 *(struct bundle_state *) *entry_ptr = *bundle_state;
6701 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6702 *bundle_state = temp;
6703 }
6704 return FALSE;
6705 }
6706
6707 /* Start work with the hash table. */
6708
6709 static void
6710 initiate_bundle_state_table (void)
6711 {
6712 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6713 (htab_del) 0);
6714 }
6715
6716 /* Finish work with the hash table. */
6717
6718 static void
6719 finish_bundle_state_table (void)
6720 {
6721 htab_delete (bundle_state_table);
6722 }
6723
6724 \f
6725
6726 /* The following variable is a insn `nop' used to check bundle states
6727 with different number of inserted nops. */
6728
6729 static rtx ia64_nop;
6730
6731 /* The following function tries to issue NOPS_NUM nops for the current
6732 state without advancing processor cycle. If it failed, the
6733 function returns FALSE and frees the current state. */
6734
6735 static int
6736 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6737 {
6738 int i;
6739
6740 for (i = 0; i < nops_num; i++)
6741 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6742 {
6743 free_bundle_state (curr_state);
6744 return FALSE;
6745 }
6746 return TRUE;
6747 }
6748
6749 /* The following function tries to issue INSN for the current
6750 state without advancing processor cycle. If it failed, the
6751 function returns FALSE and frees the current state. */
6752
6753 static int
6754 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6755 {
6756 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6757 {
6758 free_bundle_state (curr_state);
6759 return FALSE;
6760 }
6761 return TRUE;
6762 }
6763
6764 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6765 starting with ORIGINATOR without advancing processor cycle. If
6766 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6767 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6768 If it was successful, the function creates new bundle state and
6769 insert into the hash table and into `index_to_bundle_states'. */
6770
6771 static void
6772 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6773 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6774 {
6775 struct bundle_state *curr_state;
6776
6777 curr_state = get_free_bundle_state ();
6778 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6779 curr_state->insn = insn;
6780 curr_state->insn_num = originator->insn_num + 1;
6781 curr_state->cost = originator->cost;
6782 curr_state->originator = originator;
6783 curr_state->before_nops_num = before_nops_num;
6784 curr_state->after_nops_num = 0;
6785 curr_state->accumulated_insns_num
6786 = originator->accumulated_insns_num + before_nops_num;
6787 curr_state->branch_deviation = originator->branch_deviation;
6788 gcc_assert (insn);
6789 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6790 {
6791 gcc_assert (GET_MODE (insn) != TImode);
6792 if (!try_issue_nops (curr_state, before_nops_num))
6793 return;
6794 if (!try_issue_insn (curr_state, insn))
6795 return;
6796 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6797 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6798 && curr_state->accumulated_insns_num % 3 != 0)
6799 {
6800 free_bundle_state (curr_state);
6801 return;
6802 }
6803 }
6804 else if (GET_MODE (insn) != TImode)
6805 {
6806 if (!try_issue_nops (curr_state, before_nops_num))
6807 return;
6808 if (!try_issue_insn (curr_state, insn))
6809 return;
6810 curr_state->accumulated_insns_num++;
6811 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
6812 && asm_noperands (PATTERN (insn)) < 0);
6813
6814 if (ia64_safe_type (insn) == TYPE_L)
6815 curr_state->accumulated_insns_num++;
6816 }
6817 else
6818 {
6819 /* If this is an insn that must be first in a group, then don't allow
6820 nops to be emitted before it. Currently, alloc is the only such
6821 supported instruction. */
6822 /* ??? The bundling automatons should handle this for us, but they do
6823 not yet have support for the first_insn attribute. */
6824 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
6825 {
6826 free_bundle_state (curr_state);
6827 return;
6828 }
6829
6830 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6831 state_transition (curr_state->dfa_state, NULL);
6832 curr_state->cost++;
6833 if (!try_issue_nops (curr_state, before_nops_num))
6834 return;
6835 if (!try_issue_insn (curr_state, insn))
6836 return;
6837 curr_state->accumulated_insns_num++;
6838 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6839 || asm_noperands (PATTERN (insn)) >= 0)
6840 {
6841 /* Finish bundle containing asm insn. */
6842 curr_state->after_nops_num
6843 = 3 - curr_state->accumulated_insns_num % 3;
6844 curr_state->accumulated_insns_num
6845 += 3 - curr_state->accumulated_insns_num % 3;
6846 }
6847 else if (ia64_safe_type (insn) == TYPE_L)
6848 curr_state->accumulated_insns_num++;
6849 }
6850 if (ia64_safe_type (insn) == TYPE_B)
6851 curr_state->branch_deviation
6852 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6853 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6854 {
6855 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6856 {
6857 state_t dfa_state;
6858 struct bundle_state *curr_state1;
6859 struct bundle_state *allocated_states_chain;
6860
6861 curr_state1 = get_free_bundle_state ();
6862 dfa_state = curr_state1->dfa_state;
6863 allocated_states_chain = curr_state1->allocated_states_chain;
6864 *curr_state1 = *curr_state;
6865 curr_state1->dfa_state = dfa_state;
6866 curr_state1->allocated_states_chain = allocated_states_chain;
6867 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6868 dfa_state_size);
6869 curr_state = curr_state1;
6870 }
6871 if (!try_issue_nops (curr_state,
6872 3 - curr_state->accumulated_insns_num % 3))
6873 return;
6874 curr_state->after_nops_num
6875 = 3 - curr_state->accumulated_insns_num % 3;
6876 curr_state->accumulated_insns_num
6877 += 3 - curr_state->accumulated_insns_num % 3;
6878 }
6879 if (!insert_bundle_state (curr_state))
6880 free_bundle_state (curr_state);
6881 return;
6882 }
6883
6884 /* The following function returns position in the two window bundle
6885 for given STATE. */
6886
6887 static int
6888 get_max_pos (state_t state)
6889 {
6890 if (cpu_unit_reservation_p (state, pos_6))
6891 return 6;
6892 else if (cpu_unit_reservation_p (state, pos_5))
6893 return 5;
6894 else if (cpu_unit_reservation_p (state, pos_4))
6895 return 4;
6896 else if (cpu_unit_reservation_p (state, pos_3))
6897 return 3;
6898 else if (cpu_unit_reservation_p (state, pos_2))
6899 return 2;
6900 else if (cpu_unit_reservation_p (state, pos_1))
6901 return 1;
6902 else
6903 return 0;
6904 }
6905
6906 /* The function returns code of a possible template for given position
6907 and state. The function should be called only with 2 values of
6908 position equal to 3 or 6. We avoid generating F NOPs by putting
6909 templates containing F insns at the end of the template search
6910 because undocumented anomaly in McKinley derived cores which can
6911 cause stalls if an F-unit insn (including a NOP) is issued within a
6912 six-cycle window after reading certain application registers (such
6913 as ar.bsp). Furthermore, power-considerations also argue against
6914 the use of F-unit instructions unless they're really needed. */
6915
6916 static int
6917 get_template (state_t state, int pos)
6918 {
6919 switch (pos)
6920 {
6921 case 3:
6922 if (cpu_unit_reservation_p (state, _0mmi_))
6923 return 1;
6924 else if (cpu_unit_reservation_p (state, _0mii_))
6925 return 0;
6926 else if (cpu_unit_reservation_p (state, _0mmb_))
6927 return 7;
6928 else if (cpu_unit_reservation_p (state, _0mib_))
6929 return 6;
6930 else if (cpu_unit_reservation_p (state, _0mbb_))
6931 return 5;
6932 else if (cpu_unit_reservation_p (state, _0bbb_))
6933 return 4;
6934 else if (cpu_unit_reservation_p (state, _0mmf_))
6935 return 3;
6936 else if (cpu_unit_reservation_p (state, _0mfi_))
6937 return 2;
6938 else if (cpu_unit_reservation_p (state, _0mfb_))
6939 return 8;
6940 else if (cpu_unit_reservation_p (state, _0mlx_))
6941 return 9;
6942 else
6943 gcc_unreachable ();
6944 case 6:
6945 if (cpu_unit_reservation_p (state, _1mmi_))
6946 return 1;
6947 else if (cpu_unit_reservation_p (state, _1mii_))
6948 return 0;
6949 else if (cpu_unit_reservation_p (state, _1mmb_))
6950 return 7;
6951 else if (cpu_unit_reservation_p (state, _1mib_))
6952 return 6;
6953 else if (cpu_unit_reservation_p (state, _1mbb_))
6954 return 5;
6955 else if (cpu_unit_reservation_p (state, _1bbb_))
6956 return 4;
6957 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6958 return 3;
6959 else if (cpu_unit_reservation_p (state, _1mfi_))
6960 return 2;
6961 else if (cpu_unit_reservation_p (state, _1mfb_))
6962 return 8;
6963 else if (cpu_unit_reservation_p (state, _1mlx_))
6964 return 9;
6965 else
6966 gcc_unreachable ();
6967 default:
6968 gcc_unreachable ();
6969 }
6970 }
6971
6972 /* The following function returns an insn important for insn bundling
6973 followed by INSN and before TAIL. */
6974
6975 static rtx
6976 get_next_important_insn (rtx insn, rtx tail)
6977 {
6978 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6979 if (INSN_P (insn)
6980 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6981 && GET_CODE (PATTERN (insn)) != USE
6982 && GET_CODE (PATTERN (insn)) != CLOBBER)
6983 return insn;
6984 return NULL_RTX;
6985 }
6986
6987 /* The following function does insn bundling. Bundling means
6988 inserting templates and nop insns to fit insn groups into permitted
6989 templates. Instruction scheduling uses NDFA (non-deterministic
6990 finite automata) encoding informations about the templates and the
6991 inserted nops. Nondeterminism of the automata permits follows
6992 all possible insn sequences very fast.
6993
6994 Unfortunately it is not possible to get information about inserting
6995 nop insns and used templates from the automata states. The
6996 automata only says that we can issue an insn possibly inserting
6997 some nops before it and using some template. Therefore insn
6998 bundling in this function is implemented by using DFA
6999 (deterministic finite automata). We follows all possible insn
7000 sequences by inserting 0-2 nops (that is what the NDFA describe for
7001 insn scheduling) before/after each insn being bundled. We know the
7002 start of simulated processor cycle from insn scheduling (insn
7003 starting a new cycle has TImode).
7004
7005 Simple implementation of insn bundling would create enormous
7006 number of possible insn sequences satisfying information about new
7007 cycle ticks taken from the insn scheduling. To make the algorithm
7008 practical we use dynamic programming. Each decision (about
7009 inserting nops and implicitly about previous decisions) is described
7010 by structure bundle_state (see above). If we generate the same
7011 bundle state (key is automaton state after issuing the insns and
7012 nops for it), we reuse already generated one. As consequence we
7013 reject some decisions which cannot improve the solution and
7014 reduce memory for the algorithm.
7015
7016 When we reach the end of EBB (extended basic block), we choose the
7017 best sequence and then, moving back in EBB, insert templates for
7018 the best alternative. The templates are taken from querying
7019 automaton state for each insn in chosen bundle states.
7020
7021 So the algorithm makes two (forward and backward) passes through
7022 EBB. There is an additional forward pass through EBB for Itanium1
7023 processor. This pass inserts more nops to make dependency between
7024 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7025
7026 static void
7027 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7028 {
7029 struct bundle_state *curr_state, *next_state, *best_state;
7030 rtx insn, next_insn;
7031 int insn_num;
7032 int i, bundle_end_p, only_bundle_end_p, asm_p;
7033 int pos = 0, max_pos, template0, template1;
7034 rtx b;
7035 rtx nop;
7036 enum attr_type type;
7037
7038 insn_num = 0;
7039 /* Count insns in the EBB. */
7040 for (insn = NEXT_INSN (prev_head_insn);
7041 insn && insn != tail;
7042 insn = NEXT_INSN (insn))
7043 if (INSN_P (insn))
7044 insn_num++;
7045 if (insn_num == 0)
7046 return;
7047 bundling_p = 1;
7048 dfa_clean_insn_cache ();
7049 initiate_bundle_state_table ();
7050 index_to_bundle_states = xmalloc ((insn_num + 2)
7051 * sizeof (struct bundle_state *));
7052 /* First (forward) pass -- generation of bundle states. */
7053 curr_state = get_free_bundle_state ();
7054 curr_state->insn = NULL;
7055 curr_state->before_nops_num = 0;
7056 curr_state->after_nops_num = 0;
7057 curr_state->insn_num = 0;
7058 curr_state->cost = 0;
7059 curr_state->accumulated_insns_num = 0;
7060 curr_state->branch_deviation = 0;
7061 curr_state->next = NULL;
7062 curr_state->originator = NULL;
7063 state_reset (curr_state->dfa_state);
7064 index_to_bundle_states [0] = curr_state;
7065 insn_num = 0;
7066 /* Shift cycle mark if it is put on insn which could be ignored. */
7067 for (insn = NEXT_INSN (prev_head_insn);
7068 insn != tail;
7069 insn = NEXT_INSN (insn))
7070 if (INSN_P (insn)
7071 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7072 || GET_CODE (PATTERN (insn)) == USE
7073 || GET_CODE (PATTERN (insn)) == CLOBBER)
7074 && GET_MODE (insn) == TImode)
7075 {
7076 PUT_MODE (insn, VOIDmode);
7077 for (next_insn = NEXT_INSN (insn);
7078 next_insn != tail;
7079 next_insn = NEXT_INSN (next_insn))
7080 if (INSN_P (next_insn)
7081 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7082 && GET_CODE (PATTERN (next_insn)) != USE
7083 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7084 {
7085 PUT_MODE (next_insn, TImode);
7086 break;
7087 }
7088 }
7089 /* Froward pass: generation of bundle states. */
7090 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7091 insn != NULL_RTX;
7092 insn = next_insn)
7093 {
7094 gcc_assert (INSN_P (insn)
7095 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7096 && GET_CODE (PATTERN (insn)) != USE
7097 && GET_CODE (PATTERN (insn)) != CLOBBER);
7098 type = ia64_safe_type (insn);
7099 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7100 insn_num++;
7101 index_to_bundle_states [insn_num] = NULL;
7102 for (curr_state = index_to_bundle_states [insn_num - 1];
7103 curr_state != NULL;
7104 curr_state = next_state)
7105 {
7106 pos = curr_state->accumulated_insns_num % 3;
7107 next_state = curr_state->next;
7108 /* We must fill up the current bundle in order to start a
7109 subsequent asm insn in a new bundle. Asm insn is always
7110 placed in a separate bundle. */
7111 only_bundle_end_p
7112 = (next_insn != NULL_RTX
7113 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7114 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7115 /* We may fill up the current bundle if it is the cycle end
7116 without a group barrier. */
7117 bundle_end_p
7118 = (only_bundle_end_p || next_insn == NULL_RTX
7119 || (GET_MODE (next_insn) == TImode
7120 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7121 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7122 || type == TYPE_S
7123 /* We need to insert 2 nops for cases like M_MII. To
7124 guarantee issuing all insns on the same cycle for
7125 Itanium 1, we need to issue 2 nops after the first M
7126 insn (MnnMII where n is a nop insn). */
7127 || ((type == TYPE_M || type == TYPE_A)
7128 && ia64_tune == PROCESSOR_ITANIUM
7129 && !bundle_end_p && pos == 1))
7130 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
7131 only_bundle_end_p);
7132 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
7133 only_bundle_end_p);
7134 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
7135 only_bundle_end_p);
7136 }
7137 gcc_assert (index_to_bundle_states [insn_num]);
7138 for (curr_state = index_to_bundle_states [insn_num];
7139 curr_state != NULL;
7140 curr_state = curr_state->next)
7141 if (verbose >= 2 && dump)
7142 {
7143 /* This structure is taken from generated code of the
7144 pipeline hazard recognizer (see file insn-attrtab.c).
7145 Please don't forget to change the structure if a new
7146 automaton is added to .md file. */
7147 struct DFA_chip
7148 {
7149 unsigned short one_automaton_state;
7150 unsigned short oneb_automaton_state;
7151 unsigned short two_automaton_state;
7152 unsigned short twob_automaton_state;
7153 };
7154
7155 fprintf
7156 (dump,
7157 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7158 curr_state->unique_num,
7159 (curr_state->originator == NULL
7160 ? -1 : curr_state->originator->unique_num),
7161 curr_state->cost,
7162 curr_state->before_nops_num, curr_state->after_nops_num,
7163 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7164 (ia64_tune == PROCESSOR_ITANIUM
7165 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7166 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7167 INSN_UID (insn));
7168 }
7169 }
7170
7171 /* We should find a solution because the 2nd insn scheduling has
7172 found one. */
7173 gcc_assert (index_to_bundle_states [insn_num]);
7174 /* Find a state corresponding to the best insn sequence. */
7175 best_state = NULL;
7176 for (curr_state = index_to_bundle_states [insn_num];
7177 curr_state != NULL;
7178 curr_state = curr_state->next)
7179 /* We are just looking at the states with fully filled up last
7180 bundle. The first we prefer insn sequences with minimal cost
7181 then with minimal inserted nops and finally with branch insns
7182 placed in the 3rd slots. */
7183 if (curr_state->accumulated_insns_num % 3 == 0
7184 && (best_state == NULL || best_state->cost > curr_state->cost
7185 || (best_state->cost == curr_state->cost
7186 && (curr_state->accumulated_insns_num
7187 < best_state->accumulated_insns_num
7188 || (curr_state->accumulated_insns_num
7189 == best_state->accumulated_insns_num
7190 && curr_state->branch_deviation
7191 < best_state->branch_deviation)))))
7192 best_state = curr_state;
7193 /* Second (backward) pass: adding nops and templates. */
7194 insn_num = best_state->before_nops_num;
7195 template0 = template1 = -1;
7196 for (curr_state = best_state;
7197 curr_state->originator != NULL;
7198 curr_state = curr_state->originator)
7199 {
7200 insn = curr_state->insn;
7201 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7202 || asm_noperands (PATTERN (insn)) >= 0);
7203 insn_num++;
7204 if (verbose >= 2 && dump)
7205 {
7206 struct DFA_chip
7207 {
7208 unsigned short one_automaton_state;
7209 unsigned short oneb_automaton_state;
7210 unsigned short two_automaton_state;
7211 unsigned short twob_automaton_state;
7212 };
7213
7214 fprintf
7215 (dump,
7216 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7217 curr_state->unique_num,
7218 (curr_state->originator == NULL
7219 ? -1 : curr_state->originator->unique_num),
7220 curr_state->cost,
7221 curr_state->before_nops_num, curr_state->after_nops_num,
7222 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7223 (ia64_tune == PROCESSOR_ITANIUM
7224 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7225 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7226 INSN_UID (insn));
7227 }
7228 /* Find the position in the current bundle window. The window can
7229 contain at most two bundles. Two bundle window means that
7230 the processor will make two bundle rotation. */
7231 max_pos = get_max_pos (curr_state->dfa_state);
7232 if (max_pos == 6
7233 /* The following (negative template number) means that the
7234 processor did one bundle rotation. */
7235 || (max_pos == 3 && template0 < 0))
7236 {
7237 /* We are at the end of the window -- find template(s) for
7238 its bundle(s). */
7239 pos = max_pos;
7240 if (max_pos == 3)
7241 template0 = get_template (curr_state->dfa_state, 3);
7242 else
7243 {
7244 template1 = get_template (curr_state->dfa_state, 3);
7245 template0 = get_template (curr_state->dfa_state, 6);
7246 }
7247 }
7248 if (max_pos > 3 && template1 < 0)
7249 /* It may happen when we have the stop inside a bundle. */
7250 {
7251 gcc_assert (pos <= 3);
7252 template1 = get_template (curr_state->dfa_state, 3);
7253 pos += 3;
7254 }
7255 if (!asm_p)
7256 /* Emit nops after the current insn. */
7257 for (i = 0; i < curr_state->after_nops_num; i++)
7258 {
7259 nop = gen_nop ();
7260 emit_insn_after (nop, insn);
7261 pos--;
7262 gcc_assert (pos >= 0);
7263 if (pos % 3 == 0)
7264 {
7265 /* We are at the start of a bundle: emit the template
7266 (it should be defined). */
7267 gcc_assert (template0 >= 0);
7268 b = gen_bundle_selector (GEN_INT (template0));
7269 ia64_emit_insn_before (b, nop);
7270 /* If we have two bundle window, we make one bundle
7271 rotation. Otherwise template0 will be undefined
7272 (negative value). */
7273 template0 = template1;
7274 template1 = -1;
7275 }
7276 }
7277 /* Move the position backward in the window. Group barrier has
7278 no slot. Asm insn takes all bundle. */
7279 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7280 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7281 && asm_noperands (PATTERN (insn)) < 0)
7282 pos--;
7283 /* Long insn takes 2 slots. */
7284 if (ia64_safe_type (insn) == TYPE_L)
7285 pos--;
7286 gcc_assert (pos >= 0);
7287 if (pos % 3 == 0
7288 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7289 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7290 && asm_noperands (PATTERN (insn)) < 0)
7291 {
7292 /* The current insn is at the bundle start: emit the
7293 template. */
7294 gcc_assert (template0 >= 0);
7295 b = gen_bundle_selector (GEN_INT (template0));
7296 ia64_emit_insn_before (b, insn);
7297 b = PREV_INSN (insn);
7298 insn = b;
7299 /* See comment above in analogous place for emitting nops
7300 after the insn. */
7301 template0 = template1;
7302 template1 = -1;
7303 }
7304 /* Emit nops after the current insn. */
7305 for (i = 0; i < curr_state->before_nops_num; i++)
7306 {
7307 nop = gen_nop ();
7308 ia64_emit_insn_before (nop, insn);
7309 nop = PREV_INSN (insn);
7310 insn = nop;
7311 pos--;
7312 gcc_assert (pos >= 0);
7313 if (pos % 3 == 0)
7314 {
7315 /* See comment above in analogous place for emitting nops
7316 after the insn. */
7317 gcc_assert (template0 >= 0);
7318 b = gen_bundle_selector (GEN_INT (template0));
7319 ia64_emit_insn_before (b, insn);
7320 b = PREV_INSN (insn);
7321 insn = b;
7322 template0 = template1;
7323 template1 = -1;
7324 }
7325 }
7326 }
7327 if (ia64_tune == PROCESSOR_ITANIUM)
7328 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7329 Itanium1 has a strange design, if the distance between an insn
7330 and dependent MM-insn is less 4 then we have a 6 additional
7331 cycles stall. So we make the distance equal to 4 cycles if it
7332 is less. */
7333 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7334 insn != NULL_RTX;
7335 insn = next_insn)
7336 {
7337 gcc_assert (INSN_P (insn)
7338 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7339 && GET_CODE (PATTERN (insn)) != USE
7340 && GET_CODE (PATTERN (insn)) != CLOBBER);
7341 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7342 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
7343 /* We found a MM-insn which needs additional cycles. */
7344 {
7345 rtx last;
7346 int i, j, n;
7347 int pred_stop_p;
7348
7349 /* Now we are searching for a template of the bundle in
7350 which the MM-insn is placed and the position of the
7351 insn in the bundle (0, 1, 2). Also we are searching
7352 for that there is a stop before the insn. */
7353 last = prev_active_insn (insn);
7354 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
7355 if (pred_stop_p)
7356 last = prev_active_insn (last);
7357 n = 0;
7358 for (;; last = prev_active_insn (last))
7359 if (recog_memoized (last) == CODE_FOR_bundle_selector)
7360 {
7361 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
7362 if (template0 == 9)
7363 /* The insn is in MLX bundle. Change the template
7364 onto MFI because we will add nops before the
7365 insn. It simplifies subsequent code a lot. */
7366 PATTERN (last)
7367 = gen_bundle_selector (const2_rtx); /* -> MFI */
7368 break;
7369 }
7370 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
7371 && (ia64_safe_itanium_class (last)
7372 != ITANIUM_CLASS_IGNORE))
7373 n++;
7374 /* Some check of correctness: the stop is not at the
7375 bundle start, there are no more 3 insns in the bundle,
7376 and the MM-insn is not at the start of bundle with
7377 template MLX. */
7378 gcc_assert ((!pred_stop_p || n)
7379 && n <= 2
7380 && (template0 != 9 || !n));
7381 /* Put nops after the insn in the bundle. */
7382 for (j = 3 - n; j > 0; j --)
7383 ia64_emit_insn_before (gen_nop (), insn);
7384 /* It takes into account that we will add more N nops
7385 before the insn lately -- please see code below. */
7386 add_cycles [INSN_UID (insn)]--;
7387 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
7388 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7389 insn);
7390 if (pred_stop_p)
7391 add_cycles [INSN_UID (insn)]--;
7392 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
7393 {
7394 /* Insert "MII;" template. */
7395 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
7396 insn);
7397 ia64_emit_insn_before (gen_nop (), insn);
7398 ia64_emit_insn_before (gen_nop (), insn);
7399 if (i > 1)
7400 {
7401 /* To decrease code size, we use "MI;I;"
7402 template. */
7403 ia64_emit_insn_before
7404 (gen_insn_group_barrier (GEN_INT (3)), insn);
7405 i--;
7406 }
7407 ia64_emit_insn_before (gen_nop (), insn);
7408 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7409 insn);
7410 }
7411 /* Put the MM-insn in the same slot of a bundle with the
7412 same template as the original one. */
7413 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
7414 insn);
7415 /* To put the insn in the same slot, add necessary number
7416 of nops. */
7417 for (j = n; j > 0; j --)
7418 ia64_emit_insn_before (gen_nop (), insn);
7419 /* Put the stop if the original bundle had it. */
7420 if (pred_stop_p)
7421 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7422 insn);
7423 }
7424 }
7425 free (index_to_bundle_states);
7426 finish_bundle_state_table ();
7427 bundling_p = 0;
7428 dfa_clean_insn_cache ();
7429 }
7430
7431 /* The following function is called at the end of scheduling BB or
7432 EBB. After reload, it inserts stop bits and does insn bundling. */
7433
7434 static void
7435 ia64_sched_finish (FILE *dump, int sched_verbose)
7436 {
7437 if (sched_verbose)
7438 fprintf (dump, "// Finishing schedule.\n");
7439 if (!reload_completed)
7440 return;
7441 if (reload_completed)
7442 {
7443 final_emit_insn_group_barriers (dump);
7444 bundling (dump, sched_verbose, current_sched_info->prev_head,
7445 current_sched_info->next_tail);
7446 if (sched_verbose && dump)
7447 fprintf (dump, "// finishing %d-%d\n",
7448 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7449 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7450
7451 return;
7452 }
7453 }
7454
7455 /* The following function inserts stop bits in scheduled BB or EBB. */
7456
7457 static void
7458 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7459 {
7460 rtx insn;
7461 int need_barrier_p = 0;
7462 rtx prev_insn = NULL_RTX;
7463
7464 init_insn_group_barriers ();
7465
7466 for (insn = NEXT_INSN (current_sched_info->prev_head);
7467 insn != current_sched_info->next_tail;
7468 insn = NEXT_INSN (insn))
7469 {
7470 if (GET_CODE (insn) == BARRIER)
7471 {
7472 rtx last = prev_active_insn (insn);
7473
7474 if (! last)
7475 continue;
7476 if (GET_CODE (last) == JUMP_INSN
7477 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7478 last = prev_active_insn (last);
7479 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7480 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7481
7482 init_insn_group_barriers ();
7483 need_barrier_p = 0;
7484 prev_insn = NULL_RTX;
7485 }
7486 else if (INSN_P (insn))
7487 {
7488 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7489 {
7490 init_insn_group_barriers ();
7491 need_barrier_p = 0;
7492 prev_insn = NULL_RTX;
7493 }
7494 else if (need_barrier_p || group_barrier_needed (insn))
7495 {
7496 if (TARGET_EARLY_STOP_BITS)
7497 {
7498 rtx last;
7499
7500 for (last = insn;
7501 last != current_sched_info->prev_head;
7502 last = PREV_INSN (last))
7503 if (INSN_P (last) && GET_MODE (last) == TImode
7504 && stops_p [INSN_UID (last)])
7505 break;
7506 if (last == current_sched_info->prev_head)
7507 last = insn;
7508 last = prev_active_insn (last);
7509 if (last
7510 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7511 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7512 last);
7513 init_insn_group_barriers ();
7514 for (last = NEXT_INSN (last);
7515 last != insn;
7516 last = NEXT_INSN (last))
7517 if (INSN_P (last))
7518 group_barrier_needed (last);
7519 }
7520 else
7521 {
7522 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7523 insn);
7524 init_insn_group_barriers ();
7525 }
7526 group_barrier_needed (insn);
7527 prev_insn = NULL_RTX;
7528 }
7529 else if (recog_memoized (insn) >= 0)
7530 prev_insn = insn;
7531 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7532 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7533 || asm_noperands (PATTERN (insn)) >= 0);
7534 }
7535 }
7536 }
7537
7538 \f
7539
7540 /* If the following function returns TRUE, we will use the DFA
7541 insn scheduler. */
7542
7543 static int
7544 ia64_first_cycle_multipass_dfa_lookahead (void)
7545 {
7546 return (reload_completed ? 6 : 4);
7547 }
7548
7549 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7550
7551 static void
7552 ia64_init_dfa_pre_cycle_insn (void)
7553 {
7554 if (temp_dfa_state == NULL)
7555 {
7556 dfa_state_size = state_size ();
7557 temp_dfa_state = xmalloc (dfa_state_size);
7558 prev_cycle_state = xmalloc (dfa_state_size);
7559 }
7560 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7561 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7562 recog_memoized (dfa_pre_cycle_insn);
7563 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7564 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7565 recog_memoized (dfa_stop_insn);
7566 }
7567
7568 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7569 used by the DFA insn scheduler. */
7570
7571 static rtx
7572 ia64_dfa_pre_cycle_insn (void)
7573 {
7574 return dfa_pre_cycle_insn;
7575 }
7576
7577 /* The following function returns TRUE if PRODUCER (of type ilog or
7578 ld) produces address for CONSUMER (of type st or stf). */
7579
7580 int
7581 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7582 {
7583 rtx dest, reg, mem;
7584
7585 gcc_assert (producer && consumer);
7586 dest = ia64_single_set (producer);
7587 gcc_assert (dest);
7588 reg = SET_DEST (dest);
7589 gcc_assert (reg);
7590 if (GET_CODE (reg) == SUBREG)
7591 reg = SUBREG_REG (reg);
7592 gcc_assert (GET_CODE (reg) == REG);
7593
7594 dest = ia64_single_set (consumer);
7595 gcc_assert (dest);
7596 mem = SET_DEST (dest);
7597 gcc_assert (mem && GET_CODE (mem) == MEM);
7598 return reg_mentioned_p (reg, mem);
7599 }
7600
7601 /* The following function returns TRUE if PRODUCER (of type ilog or
7602 ld) produces address for CONSUMER (of type ld or fld). */
7603
7604 int
7605 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7606 {
7607 rtx dest, src, reg, mem;
7608
7609 gcc_assert (producer && consumer);
7610 dest = ia64_single_set (producer);
7611 gcc_assert (dest);
7612 reg = SET_DEST (dest);
7613 gcc_assert (reg);
7614 if (GET_CODE (reg) == SUBREG)
7615 reg = SUBREG_REG (reg);
7616 gcc_assert (GET_CODE (reg) == REG);
7617
7618 src = ia64_single_set (consumer);
7619 gcc_assert (src);
7620 mem = SET_SRC (src);
7621 gcc_assert (mem);
7622 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7623 mem = XVECEXP (mem, 0, 0);
7624 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7625 mem = XEXP (mem, 0);
7626
7627 /* Note that LO_SUM is used for GOT loads. */
7628 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
7629
7630 return reg_mentioned_p (reg, mem);
7631 }
7632
7633 /* The following function returns TRUE if INSN produces address for a
7634 load/store insn. We will place such insns into M slot because it
7635 decreases its latency time. */
7636
7637 int
7638 ia64_produce_address_p (rtx insn)
7639 {
7640 return insn->call;
7641 }
7642
7643 \f
7644 /* Emit pseudo-ops for the assembler to describe predicate relations.
7645 At present this assumes that we only consider predicate pairs to
7646 be mutex, and that the assembler can deduce proper values from
7647 straight-line code. */
7648
7649 static void
7650 emit_predicate_relation_info (void)
7651 {
7652 basic_block bb;
7653
7654 FOR_EACH_BB_REVERSE (bb)
7655 {
7656 int r;
7657 rtx head = BB_HEAD (bb);
7658
7659 /* We only need such notes at code labels. */
7660 if (GET_CODE (head) != CODE_LABEL)
7661 continue;
7662 if (GET_CODE (NEXT_INSN (head)) == NOTE
7663 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7664 head = NEXT_INSN (head);
7665
7666 /* Skip p0, which may be thought to be live due to (reg:DI p0)
7667 grabbing the entire block of predicate registers. */
7668 for (r = PR_REG (2); r < PR_REG (64); r += 2)
7669 if (REGNO_REG_SET_P (bb->il.rtl->global_live_at_start, r))
7670 {
7671 rtx p = gen_rtx_REG (BImode, r);
7672 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7673 if (head == BB_END (bb))
7674 BB_END (bb) = n;
7675 head = n;
7676 }
7677 }
7678
7679 /* Look for conditional calls that do not return, and protect predicate
7680 relations around them. Otherwise the assembler will assume the call
7681 returns, and complain about uses of call-clobbered predicates after
7682 the call. */
7683 FOR_EACH_BB_REVERSE (bb)
7684 {
7685 rtx insn = BB_HEAD (bb);
7686
7687 while (1)
7688 {
7689 if (GET_CODE (insn) == CALL_INSN
7690 && GET_CODE (PATTERN (insn)) == COND_EXEC
7691 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7692 {
7693 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7694 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7695 if (BB_HEAD (bb) == insn)
7696 BB_HEAD (bb) = b;
7697 if (BB_END (bb) == insn)
7698 BB_END (bb) = a;
7699 }
7700
7701 if (insn == BB_END (bb))
7702 break;
7703 insn = NEXT_INSN (insn);
7704 }
7705 }
7706 }
7707
7708 /* Perform machine dependent operations on the rtl chain INSNS. */
7709
7710 static void
7711 ia64_reorg (void)
7712 {
7713 /* We are freeing block_for_insn in the toplev to keep compatibility
7714 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7715 compute_bb_for_insn ();
7716
7717 /* If optimizing, we'll have split before scheduling. */
7718 if (optimize == 0)
7719 split_all_insns (0);
7720
7721 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7722 non-optimizing bootstrap. */
7723 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7724
7725 if (optimize && ia64_flag_schedule_insns2)
7726 {
7727 timevar_push (TV_SCHED2);
7728 ia64_final_schedule = 1;
7729
7730 initiate_bundle_states ();
7731 ia64_nop = make_insn_raw (gen_nop ());
7732 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7733 recog_memoized (ia64_nop);
7734 clocks_length = get_max_uid () + 1;
7735 stops_p = xcalloc (1, clocks_length);
7736 if (ia64_tune == PROCESSOR_ITANIUM)
7737 {
7738 clocks = xcalloc (clocks_length, sizeof (int));
7739 add_cycles = xcalloc (clocks_length, sizeof (int));
7740 }
7741 if (ia64_tune == PROCESSOR_ITANIUM2)
7742 {
7743 pos_1 = get_cpu_unit_code ("2_1");
7744 pos_2 = get_cpu_unit_code ("2_2");
7745 pos_3 = get_cpu_unit_code ("2_3");
7746 pos_4 = get_cpu_unit_code ("2_4");
7747 pos_5 = get_cpu_unit_code ("2_5");
7748 pos_6 = get_cpu_unit_code ("2_6");
7749 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7750 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7751 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7752 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7753 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7754 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7755 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7756 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7757 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7758 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7759 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7760 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7761 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7762 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7763 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7764 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7765 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7766 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7767 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7768 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7769 }
7770 else
7771 {
7772 pos_1 = get_cpu_unit_code ("1_1");
7773 pos_2 = get_cpu_unit_code ("1_2");
7774 pos_3 = get_cpu_unit_code ("1_3");
7775 pos_4 = get_cpu_unit_code ("1_4");
7776 pos_5 = get_cpu_unit_code ("1_5");
7777 pos_6 = get_cpu_unit_code ("1_6");
7778 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7779 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7780 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7781 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7782 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7783 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7784 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7785 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7786 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7787 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7788 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7789 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7790 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7791 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7792 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7793 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7794 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7795 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7796 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7797 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7798 }
7799 schedule_ebbs (dump_file);
7800 finish_bundle_states ();
7801 if (ia64_tune == PROCESSOR_ITANIUM)
7802 {
7803 free (add_cycles);
7804 free (clocks);
7805 }
7806 free (stops_p);
7807 emit_insn_group_barriers (dump_file);
7808
7809 ia64_final_schedule = 0;
7810 timevar_pop (TV_SCHED2);
7811 }
7812 else
7813 emit_all_insn_group_barriers (dump_file);
7814
7815 /* A call must not be the last instruction in a function, so that the
7816 return address is still within the function, so that unwinding works
7817 properly. Note that IA-64 differs from dwarf2 on this point. */
7818 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7819 {
7820 rtx insn;
7821 int saw_stop = 0;
7822
7823 insn = get_last_insn ();
7824 if (! INSN_P (insn))
7825 insn = prev_active_insn (insn);
7826 /* Skip over insns that expand to nothing. */
7827 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7828 {
7829 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7830 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7831 saw_stop = 1;
7832 insn = prev_active_insn (insn);
7833 }
7834 if (GET_CODE (insn) == CALL_INSN)
7835 {
7836 if (! saw_stop)
7837 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7838 emit_insn (gen_break_f ());
7839 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7840 }
7841 }
7842
7843 emit_predicate_relation_info ();
7844
7845 if (ia64_flag_var_tracking)
7846 {
7847 timevar_push (TV_VAR_TRACKING);
7848 variable_tracking_main ();
7849 timevar_pop (TV_VAR_TRACKING);
7850 }
7851 }
7852 \f
7853 /* Return true if REGNO is used by the epilogue. */
7854
7855 int
7856 ia64_epilogue_uses (int regno)
7857 {
7858 switch (regno)
7859 {
7860 case R_GR (1):
7861 /* With a call to a function in another module, we will write a new
7862 value to "gp". After returning from such a call, we need to make
7863 sure the function restores the original gp-value, even if the
7864 function itself does not use the gp anymore. */
7865 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7866
7867 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7868 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7869 /* For functions defined with the syscall_linkage attribute, all
7870 input registers are marked as live at all function exits. This
7871 prevents the register allocator from using the input registers,
7872 which in turn makes it possible to restart a system call after
7873 an interrupt without having to save/restore the input registers.
7874 This also prevents kernel data from leaking to application code. */
7875 return lookup_attribute ("syscall_linkage",
7876 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7877
7878 case R_BR (0):
7879 /* Conditional return patterns can't represent the use of `b0' as
7880 the return address, so we force the value live this way. */
7881 return 1;
7882
7883 case AR_PFS_REGNUM:
7884 /* Likewise for ar.pfs, which is used by br.ret. */
7885 return 1;
7886
7887 default:
7888 return 0;
7889 }
7890 }
7891
7892 /* Return true if REGNO is used by the frame unwinder. */
7893
7894 int
7895 ia64_eh_uses (int regno)
7896 {
7897 if (! reload_completed)
7898 return 0;
7899
7900 if (current_frame_info.reg_save_b0
7901 && regno == current_frame_info.reg_save_b0)
7902 return 1;
7903 if (current_frame_info.reg_save_pr
7904 && regno == current_frame_info.reg_save_pr)
7905 return 1;
7906 if (current_frame_info.reg_save_ar_pfs
7907 && regno == current_frame_info.reg_save_ar_pfs)
7908 return 1;
7909 if (current_frame_info.reg_save_ar_unat
7910 && regno == current_frame_info.reg_save_ar_unat)
7911 return 1;
7912 if (current_frame_info.reg_save_ar_lc
7913 && regno == current_frame_info.reg_save_ar_lc)
7914 return 1;
7915
7916 return 0;
7917 }
7918 \f
7919 /* Return true if this goes in small data/bss. */
7920
7921 /* ??? We could also support own long data here. Generating movl/add/ld8
7922 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7923 code faster because there is one less load. This also includes incomplete
7924 types which can't go in sdata/sbss. */
7925
7926 static bool
7927 ia64_in_small_data_p (tree exp)
7928 {
7929 if (TARGET_NO_SDATA)
7930 return false;
7931
7932 /* We want to merge strings, so we never consider them small data. */
7933 if (TREE_CODE (exp) == STRING_CST)
7934 return false;
7935
7936 /* Functions are never small data. */
7937 if (TREE_CODE (exp) == FUNCTION_DECL)
7938 return false;
7939
7940 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7941 {
7942 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7943
7944 if (strcmp (section, ".sdata") == 0
7945 || strncmp (section, ".sdata.", 7) == 0
7946 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
7947 || strcmp (section, ".sbss") == 0
7948 || strncmp (section, ".sbss.", 6) == 0
7949 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
7950 return true;
7951 }
7952 else
7953 {
7954 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7955
7956 /* If this is an incomplete type with size 0, then we can't put it
7957 in sdata because it might be too big when completed. */
7958 if (size > 0 && size <= ia64_section_threshold)
7959 return true;
7960 }
7961
7962 return false;
7963 }
7964 \f
7965 /* Output assembly directives for prologue regions. */
7966
7967 /* The current basic block number. */
7968
7969 static bool last_block;
7970
7971 /* True if we need a copy_state command at the start of the next block. */
7972
7973 static bool need_copy_state;
7974
7975 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
7976 # define MAX_ARTIFICIAL_LABEL_BYTES 30
7977 #endif
7978
7979 /* Emit a debugging label after a call-frame-related insn. We'd
7980 rather output the label right away, but we'd have to output it
7981 after, not before, the instruction, and the instruction has not
7982 been output yet. So we emit the label after the insn, delete it to
7983 avoid introducing basic blocks, and mark it as preserved, such that
7984 it is still output, given that it is referenced in debug info. */
7985
7986 static const char *
7987 ia64_emit_deleted_label_after_insn (rtx insn)
7988 {
7989 char label[MAX_ARTIFICIAL_LABEL_BYTES];
7990 rtx lb = gen_label_rtx ();
7991 rtx label_insn = emit_label_after (lb, insn);
7992
7993 LABEL_PRESERVE_P (lb) = 1;
7994
7995 delete_insn (label_insn);
7996
7997 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
7998
7999 return xstrdup (label);
8000 }
8001
8002 /* Define the CFA after INSN with the steady-state definition. */
8003
8004 static void
8005 ia64_dwarf2out_def_steady_cfa (rtx insn)
8006 {
8007 rtx fp = frame_pointer_needed
8008 ? hard_frame_pointer_rtx
8009 : stack_pointer_rtx;
8010
8011 dwarf2out_def_cfa
8012 (ia64_emit_deleted_label_after_insn (insn),
8013 REGNO (fp),
8014 ia64_initial_elimination_offset
8015 (REGNO (arg_pointer_rtx), REGNO (fp))
8016 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8017 }
8018
8019 /* The generic dwarf2 frame debug info generator does not define a
8020 separate region for the very end of the epilogue, so refrain from
8021 doing so in the IA64-specific code as well. */
8022
8023 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8024
8025 /* The function emits unwind directives for the start of an epilogue. */
8026
8027 static void
8028 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8029 {
8030 /* If this isn't the last block of the function, then we need to label the
8031 current state, and copy it back in at the start of the next block. */
8032
8033 if (!last_block)
8034 {
8035 if (unwind)
8036 fprintf (asm_out_file, "\t.label_state %d\n",
8037 ++cfun->machine->state_num);
8038 need_copy_state = true;
8039 }
8040
8041 if (unwind)
8042 fprintf (asm_out_file, "\t.restore sp\n");
8043 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8044 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8045 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8046 }
8047
8048 /* This function processes a SET pattern looking for specific patterns
8049 which result in emitting an assembly directive required for unwinding. */
8050
8051 static int
8052 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8053 {
8054 rtx src = SET_SRC (pat);
8055 rtx dest = SET_DEST (pat);
8056 int src_regno, dest_regno;
8057
8058 /* Look for the ALLOC insn. */
8059 if (GET_CODE (src) == UNSPEC_VOLATILE
8060 && XINT (src, 1) == UNSPECV_ALLOC
8061 && GET_CODE (dest) == REG)
8062 {
8063 dest_regno = REGNO (dest);
8064
8065 /* If this is the final destination for ar.pfs, then this must
8066 be the alloc in the prologue. */
8067 if (dest_regno == current_frame_info.reg_save_ar_pfs)
8068 {
8069 if (unwind)
8070 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8071 ia64_dbx_register_number (dest_regno));
8072 }
8073 else
8074 {
8075 /* This must be an alloc before a sibcall. We must drop the
8076 old frame info. The easiest way to drop the old frame
8077 info is to ensure we had a ".restore sp" directive
8078 followed by a new prologue. If the procedure doesn't
8079 have a memory-stack frame, we'll issue a dummy ".restore
8080 sp" now. */
8081 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8082 /* if haven't done process_epilogue() yet, do it now */
8083 process_epilogue (asm_out_file, insn, unwind, frame);
8084 if (unwind)
8085 fprintf (asm_out_file, "\t.prologue\n");
8086 }
8087 return 1;
8088 }
8089
8090 /* Look for SP = .... */
8091 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8092 {
8093 if (GET_CODE (src) == PLUS)
8094 {
8095 rtx op0 = XEXP (src, 0);
8096 rtx op1 = XEXP (src, 1);
8097
8098 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8099
8100 if (INTVAL (op1) < 0)
8101 {
8102 gcc_assert (!frame_pointer_needed);
8103 if (unwind)
8104 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8105 -INTVAL (op1));
8106 if (frame)
8107 ia64_dwarf2out_def_steady_cfa (insn);
8108 }
8109 else
8110 process_epilogue (asm_out_file, insn, unwind, frame);
8111 }
8112 else
8113 {
8114 gcc_assert (GET_CODE (src) == REG
8115 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8116 process_epilogue (asm_out_file, insn, unwind, frame);
8117 }
8118
8119 return 1;
8120 }
8121
8122 /* Register move we need to look at. */
8123 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
8124 {
8125 src_regno = REGNO (src);
8126 dest_regno = REGNO (dest);
8127
8128 switch (src_regno)
8129 {
8130 case BR_REG (0):
8131 /* Saving return address pointer. */
8132 gcc_assert (dest_regno == current_frame_info.reg_save_b0);
8133 if (unwind)
8134 fprintf (asm_out_file, "\t.save rp, r%d\n",
8135 ia64_dbx_register_number (dest_regno));
8136 return 1;
8137
8138 case PR_REG (0):
8139 gcc_assert (dest_regno == current_frame_info.reg_save_pr);
8140 if (unwind)
8141 fprintf (asm_out_file, "\t.save pr, r%d\n",
8142 ia64_dbx_register_number (dest_regno));
8143 return 1;
8144
8145 case AR_UNAT_REGNUM:
8146 gcc_assert (dest_regno == current_frame_info.reg_save_ar_unat);
8147 if (unwind)
8148 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
8149 ia64_dbx_register_number (dest_regno));
8150 return 1;
8151
8152 case AR_LC_REGNUM:
8153 gcc_assert (dest_regno == current_frame_info.reg_save_ar_lc);
8154 if (unwind)
8155 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
8156 ia64_dbx_register_number (dest_regno));
8157 return 1;
8158
8159 case STACK_POINTER_REGNUM:
8160 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
8161 && frame_pointer_needed);
8162 if (unwind)
8163 fprintf (asm_out_file, "\t.vframe r%d\n",
8164 ia64_dbx_register_number (dest_regno));
8165 if (frame)
8166 ia64_dwarf2out_def_steady_cfa (insn);
8167 return 1;
8168
8169 default:
8170 /* Everything else should indicate being stored to memory. */
8171 gcc_unreachable ();
8172 }
8173 }
8174
8175 /* Memory store we need to look at. */
8176 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
8177 {
8178 long off;
8179 rtx base;
8180 const char *saveop;
8181
8182 if (GET_CODE (XEXP (dest, 0)) == REG)
8183 {
8184 base = XEXP (dest, 0);
8185 off = 0;
8186 }
8187 else
8188 {
8189 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
8190 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
8191 base = XEXP (XEXP (dest, 0), 0);
8192 off = INTVAL (XEXP (XEXP (dest, 0), 1));
8193 }
8194
8195 if (base == hard_frame_pointer_rtx)
8196 {
8197 saveop = ".savepsp";
8198 off = - off;
8199 }
8200 else
8201 {
8202 gcc_assert (base == stack_pointer_rtx);
8203 saveop = ".savesp";
8204 }
8205
8206 src_regno = REGNO (src);
8207 switch (src_regno)
8208 {
8209 case BR_REG (0):
8210 gcc_assert (!current_frame_info.reg_save_b0);
8211 if (unwind)
8212 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
8213 return 1;
8214
8215 case PR_REG (0):
8216 gcc_assert (!current_frame_info.reg_save_pr);
8217 if (unwind)
8218 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
8219 return 1;
8220
8221 case AR_LC_REGNUM:
8222 gcc_assert (!current_frame_info.reg_save_ar_lc);
8223 if (unwind)
8224 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
8225 return 1;
8226
8227 case AR_PFS_REGNUM:
8228 gcc_assert (!current_frame_info.reg_save_ar_pfs);
8229 if (unwind)
8230 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
8231 return 1;
8232
8233 case AR_UNAT_REGNUM:
8234 gcc_assert (!current_frame_info.reg_save_ar_unat);
8235 if (unwind)
8236 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
8237 return 1;
8238
8239 case GR_REG (4):
8240 case GR_REG (5):
8241 case GR_REG (6):
8242 case GR_REG (7):
8243 if (unwind)
8244 fprintf (asm_out_file, "\t.save.g 0x%x\n",
8245 1 << (src_regno - GR_REG (4)));
8246 return 1;
8247
8248 case BR_REG (1):
8249 case BR_REG (2):
8250 case BR_REG (3):
8251 case BR_REG (4):
8252 case BR_REG (5):
8253 if (unwind)
8254 fprintf (asm_out_file, "\t.save.b 0x%x\n",
8255 1 << (src_regno - BR_REG (1)));
8256 return 1;
8257
8258 case FR_REG (2):
8259 case FR_REG (3):
8260 case FR_REG (4):
8261 case FR_REG (5):
8262 if (unwind)
8263 fprintf (asm_out_file, "\t.save.f 0x%x\n",
8264 1 << (src_regno - FR_REG (2)));
8265 return 1;
8266
8267 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
8268 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
8269 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
8270 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
8271 if (unwind)
8272 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
8273 1 << (src_regno - FR_REG (12)));
8274 return 1;
8275
8276 default:
8277 return 0;
8278 }
8279 }
8280
8281 return 0;
8282 }
8283
8284
8285 /* This function looks at a single insn and emits any directives
8286 required to unwind this insn. */
8287 void
8288 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
8289 {
8290 bool unwind = (flag_unwind_tables
8291 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
8292 bool frame = dwarf2out_do_frame ();
8293
8294 if (unwind || frame)
8295 {
8296 rtx pat;
8297
8298 if (GET_CODE (insn) == NOTE
8299 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
8300 {
8301 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
8302
8303 /* Restore unwind state from immediately before the epilogue. */
8304 if (need_copy_state)
8305 {
8306 if (unwind)
8307 {
8308 fprintf (asm_out_file, "\t.body\n");
8309 fprintf (asm_out_file, "\t.copy_state %d\n",
8310 cfun->machine->state_num);
8311 }
8312 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8313 ia64_dwarf2out_def_steady_cfa (insn);
8314 need_copy_state = false;
8315 }
8316 }
8317
8318 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
8319 return;
8320
8321 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
8322 if (pat)
8323 pat = XEXP (pat, 0);
8324 else
8325 pat = PATTERN (insn);
8326
8327 switch (GET_CODE (pat))
8328 {
8329 case SET:
8330 process_set (asm_out_file, pat, insn, unwind, frame);
8331 break;
8332
8333 case PARALLEL:
8334 {
8335 int par_index;
8336 int limit = XVECLEN (pat, 0);
8337 for (par_index = 0; par_index < limit; par_index++)
8338 {
8339 rtx x = XVECEXP (pat, 0, par_index);
8340 if (GET_CODE (x) == SET)
8341 process_set (asm_out_file, x, insn, unwind, frame);
8342 }
8343 break;
8344 }
8345
8346 default:
8347 gcc_unreachable ();
8348 }
8349 }
8350 }
8351
8352 \f
8353 enum ia64_builtins
8354 {
8355 IA64_BUILTIN_BSP,
8356 IA64_BUILTIN_FLUSHRS
8357 };
8358
8359 void
8360 ia64_init_builtins (void)
8361 {
8362 tree fpreg_type;
8363 tree float80_type;
8364
8365 /* The __fpreg type. */
8366 fpreg_type = make_node (REAL_TYPE);
8367 TYPE_PRECISION (fpreg_type) = 82;
8368 layout_type (fpreg_type);
8369 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
8370
8371 /* The __float80 type. */
8372 float80_type = make_node (REAL_TYPE);
8373 TYPE_PRECISION (float80_type) = 80;
8374 layout_type (float80_type);
8375 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
8376
8377 /* The __float128 type. */
8378 if (!TARGET_HPUX)
8379 {
8380 tree float128_type = make_node (REAL_TYPE);
8381 TYPE_PRECISION (float128_type) = 128;
8382 layout_type (float128_type);
8383 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
8384 }
8385 else
8386 /* Under HPUX, this is a synonym for "long double". */
8387 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
8388 "__float128");
8389
8390 #define def_builtin(name, type, code) \
8391 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
8392 NULL, NULL_TREE)
8393
8394 def_builtin ("__builtin_ia64_bsp",
8395 build_function_type (ptr_type_node, void_list_node),
8396 IA64_BUILTIN_BSP);
8397
8398 def_builtin ("__builtin_ia64_flushrs",
8399 build_function_type (void_type_node, void_list_node),
8400 IA64_BUILTIN_FLUSHRS);
8401
8402 #undef def_builtin
8403 }
8404
8405 rtx
8406 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8407 enum machine_mode mode ATTRIBUTE_UNUSED,
8408 int ignore ATTRIBUTE_UNUSED)
8409 {
8410 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8411 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8412
8413 switch (fcode)
8414 {
8415 case IA64_BUILTIN_BSP:
8416 if (! target || ! register_operand (target, DImode))
8417 target = gen_reg_rtx (DImode);
8418 emit_insn (gen_bsp_value (target));
8419 #ifdef POINTERS_EXTEND_UNSIGNED
8420 target = convert_memory_address (ptr_mode, target);
8421 #endif
8422 return target;
8423
8424 case IA64_BUILTIN_FLUSHRS:
8425 emit_insn (gen_flushrs ());
8426 return const0_rtx;
8427
8428 default:
8429 break;
8430 }
8431
8432 return NULL_RTX;
8433 }
8434
8435 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8436 most significant bits of the stack slot. */
8437
8438 enum direction
8439 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8440 {
8441 /* Exception to normal case for structures/unions/etc. */
8442
8443 if (type && AGGREGATE_TYPE_P (type)
8444 && int_size_in_bytes (type) < UNITS_PER_WORD)
8445 return upward;
8446
8447 /* Fall back to the default. */
8448 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8449 }
8450
8451 /* Linked list of all external functions that are to be emitted by GCC.
8452 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8453 order to avoid putting out names that are never really used. */
8454
8455 struct extern_func_list GTY(())
8456 {
8457 struct extern_func_list *next;
8458 tree decl;
8459 };
8460
8461 static GTY(()) struct extern_func_list *extern_func_head;
8462
8463 static void
8464 ia64_hpux_add_extern_decl (tree decl)
8465 {
8466 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8467
8468 p->decl = decl;
8469 p->next = extern_func_head;
8470 extern_func_head = p;
8471 }
8472
8473 /* Print out the list of used global functions. */
8474
8475 static void
8476 ia64_hpux_file_end (void)
8477 {
8478 struct extern_func_list *p;
8479
8480 for (p = extern_func_head; p; p = p->next)
8481 {
8482 tree decl = p->decl;
8483 tree id = DECL_ASSEMBLER_NAME (decl);
8484
8485 gcc_assert (id);
8486
8487 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8488 {
8489 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8490
8491 TREE_ASM_WRITTEN (decl) = 1;
8492 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8493 fputs (TYPE_ASM_OP, asm_out_file);
8494 assemble_name (asm_out_file, name);
8495 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8496 }
8497 }
8498
8499 extern_func_head = 0;
8500 }
8501
8502 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8503 modes of word_mode and larger. Rename the TFmode libfuncs using the
8504 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8505 backward compatibility. */
8506
8507 static void
8508 ia64_init_libfuncs (void)
8509 {
8510 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8511 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8512 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8513 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8514
8515 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8516 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8517 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8518 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8519 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8520
8521 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8522 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8523 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8524 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8525 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8526 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8527
8528 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8529 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8530 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
8531 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8532 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8533
8534 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8535 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8536 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
8537 /* HP-UX 11.23 libc does not have a function for unsigned
8538 SImode-to-TFmode conversion. */
8539 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
8540 }
8541
8542 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8543
8544 static void
8545 ia64_hpux_init_libfuncs (void)
8546 {
8547 ia64_init_libfuncs ();
8548
8549 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8550 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8551 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8552
8553 /* ia64_expand_compare uses this. */
8554 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8555
8556 /* These should never be used. */
8557 set_optab_libfunc (eq_optab, TFmode, 0);
8558 set_optab_libfunc (ne_optab, TFmode, 0);
8559 set_optab_libfunc (gt_optab, TFmode, 0);
8560 set_optab_libfunc (ge_optab, TFmode, 0);
8561 set_optab_libfunc (lt_optab, TFmode, 0);
8562 set_optab_libfunc (le_optab, TFmode, 0);
8563 }
8564
8565 /* Rename the division and modulus functions in VMS. */
8566
8567 static void
8568 ia64_vms_init_libfuncs (void)
8569 {
8570 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8571 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8572 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8573 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8574 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8575 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8576 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8577 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8578 }
8579
8580 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8581 the HPUX conventions. */
8582
8583 static void
8584 ia64_sysv4_init_libfuncs (void)
8585 {
8586 ia64_init_libfuncs ();
8587
8588 /* These functions are not part of the HPUX TFmode interface. We
8589 use them instead of _U_Qfcmp, which doesn't work the way we
8590 expect. */
8591 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8592 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8593 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8594 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8595 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8596 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8597
8598 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8599 glibc doesn't have them. */
8600 }
8601 \f
8602 /* Return the section to use for X. The only special thing we do here
8603 is to honor small data. */
8604
8605 static section *
8606 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8607 unsigned HOST_WIDE_INT align)
8608 {
8609 if (GET_MODE_SIZE (mode) > 0
8610 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8611 return sdata_section;
8612 else
8613 return default_elf_select_rtx_section (mode, x, align);
8614 }
8615
8616 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8617 Pretend flag_pic is always set. */
8618
8619 static section *
8620 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8621 {
8622 return default_elf_select_section_1 (exp, reloc, align, true);
8623 }
8624
8625 static void
8626 ia64_rwreloc_unique_section (tree decl, int reloc)
8627 {
8628 default_unique_section_1 (decl, reloc, true);
8629 }
8630
8631 static section *
8632 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8633 unsigned HOST_WIDE_INT align)
8634 {
8635 section *sect;
8636 int save_pic = flag_pic;
8637 flag_pic = 1;
8638 sect = ia64_select_rtx_section (mode, x, align);
8639 flag_pic = save_pic;
8640 return sect;
8641 }
8642
8643 #ifndef TARGET_RWRELOC
8644 #define TARGET_RWRELOC flag_pic
8645 #endif
8646
8647 static unsigned int
8648 ia64_section_type_flags (tree decl, const char *name, int reloc)
8649 {
8650 unsigned int flags = 0;
8651
8652 if (strcmp (name, ".sdata") == 0
8653 || strncmp (name, ".sdata.", 7) == 0
8654 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
8655 || strncmp (name, ".sdata2.", 8) == 0
8656 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
8657 || strcmp (name, ".sbss") == 0
8658 || strncmp (name, ".sbss.", 6) == 0
8659 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
8660 flags = SECTION_SMALL;
8661
8662 flags |= default_section_type_flags_1 (decl, name, reloc, TARGET_RWRELOC);
8663 return flags;
8664 }
8665
8666 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8667 structure type and that the address of that type should be passed
8668 in out0, rather than in r8. */
8669
8670 static bool
8671 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8672 {
8673 tree ret_type = TREE_TYPE (fntype);
8674
8675 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8676 as the structure return address parameter, if the return value
8677 type has a non-trivial copy constructor or destructor. It is not
8678 clear if this same convention should be used for other
8679 programming languages. Until G++ 3.4, we incorrectly used r8 for
8680 these return values. */
8681 return (abi_version_at_least (2)
8682 && ret_type
8683 && TYPE_MODE (ret_type) == BLKmode
8684 && TREE_ADDRESSABLE (ret_type)
8685 && strcmp (lang_hooks.name, "GNU C++") == 0);
8686 }
8687
8688 /* Output the assembler code for a thunk function. THUNK_DECL is the
8689 declaration for the thunk function itself, FUNCTION is the decl for
8690 the target function. DELTA is an immediate constant offset to be
8691 added to THIS. If VCALL_OFFSET is nonzero, the word at
8692 *(*this + vcall_offset) should be added to THIS. */
8693
8694 static void
8695 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8696 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8697 tree function)
8698 {
8699 rtx this, insn, funexp;
8700 unsigned int this_parmno;
8701 unsigned int this_regno;
8702
8703 reload_completed = 1;
8704 epilogue_completed = 1;
8705 no_new_pseudos = 1;
8706 reset_block_changes ();
8707
8708 /* Set things up as ia64_expand_prologue might. */
8709 last_scratch_gr_reg = 15;
8710
8711 memset (&current_frame_info, 0, sizeof (current_frame_info));
8712 current_frame_info.spill_cfa_off = -16;
8713 current_frame_info.n_input_regs = 1;
8714 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8715
8716 /* Mark the end of the (empty) prologue. */
8717 emit_note (NOTE_INSN_PROLOGUE_END);
8718
8719 /* Figure out whether "this" will be the first parameter (the
8720 typical case) or the second parameter (as happens when the
8721 virtual function returns certain class objects). */
8722 this_parmno
8723 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8724 ? 1 : 0);
8725 this_regno = IN_REG (this_parmno);
8726 if (!TARGET_REG_NAMES)
8727 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8728
8729 this = gen_rtx_REG (Pmode, this_regno);
8730 if (TARGET_ILP32)
8731 {
8732 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8733 REG_POINTER (tmp) = 1;
8734 if (delta && CONST_OK_FOR_I (delta))
8735 {
8736 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8737 delta = 0;
8738 }
8739 else
8740 emit_insn (gen_ptr_extend (this, tmp));
8741 }
8742
8743 /* Apply the constant offset, if required. */
8744 if (delta)
8745 {
8746 rtx delta_rtx = GEN_INT (delta);
8747
8748 if (!CONST_OK_FOR_I (delta))
8749 {
8750 rtx tmp = gen_rtx_REG (Pmode, 2);
8751 emit_move_insn (tmp, delta_rtx);
8752 delta_rtx = tmp;
8753 }
8754 emit_insn (gen_adddi3 (this, this, delta_rtx));
8755 }
8756
8757 /* Apply the offset from the vtable, if required. */
8758 if (vcall_offset)
8759 {
8760 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8761 rtx tmp = gen_rtx_REG (Pmode, 2);
8762
8763 if (TARGET_ILP32)
8764 {
8765 rtx t = gen_rtx_REG (ptr_mode, 2);
8766 REG_POINTER (t) = 1;
8767 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8768 if (CONST_OK_FOR_I (vcall_offset))
8769 {
8770 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8771 vcall_offset_rtx));
8772 vcall_offset = 0;
8773 }
8774 else
8775 emit_insn (gen_ptr_extend (tmp, t));
8776 }
8777 else
8778 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8779
8780 if (vcall_offset)
8781 {
8782 if (!CONST_OK_FOR_J (vcall_offset))
8783 {
8784 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8785 emit_move_insn (tmp2, vcall_offset_rtx);
8786 vcall_offset_rtx = tmp2;
8787 }
8788 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8789 }
8790
8791 if (TARGET_ILP32)
8792 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8793 gen_rtx_MEM (ptr_mode, tmp));
8794 else
8795 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8796
8797 emit_insn (gen_adddi3 (this, this, tmp));
8798 }
8799
8800 /* Generate a tail call to the target function. */
8801 if (! TREE_USED (function))
8802 {
8803 assemble_external (function);
8804 TREE_USED (function) = 1;
8805 }
8806 funexp = XEXP (DECL_RTL (function), 0);
8807 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8808 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8809 insn = get_last_insn ();
8810 SIBLING_CALL_P (insn) = 1;
8811
8812 /* Code generation for calls relies on splitting. */
8813 reload_completed = 1;
8814 epilogue_completed = 1;
8815 try_split (PATTERN (insn), insn, 0);
8816
8817 emit_barrier ();
8818
8819 /* Run just enough of rest_of_compilation to get the insns emitted.
8820 There's not really enough bulk here to make other passes such as
8821 instruction scheduling worth while. Note that use_thunk calls
8822 assemble_start_function and assemble_end_function. */
8823
8824 insn_locators_initialize ();
8825 emit_all_insn_group_barriers (NULL);
8826 insn = get_insns ();
8827 shorten_branches (insn);
8828 final_start_function (insn, file, 1);
8829 final (insn, file, 1);
8830 final_end_function ();
8831
8832 reload_completed = 0;
8833 epilogue_completed = 0;
8834 no_new_pseudos = 0;
8835 }
8836
8837 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8838
8839 static rtx
8840 ia64_struct_value_rtx (tree fntype,
8841 int incoming ATTRIBUTE_UNUSED)
8842 {
8843 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8844 return NULL_RTX;
8845 return gen_rtx_REG (Pmode, GR_REG (8));
8846 }
8847
8848 static bool
8849 ia64_scalar_mode_supported_p (enum machine_mode mode)
8850 {
8851 switch (mode)
8852 {
8853 case QImode:
8854 case HImode:
8855 case SImode:
8856 case DImode:
8857 case TImode:
8858 return true;
8859
8860 case SFmode:
8861 case DFmode:
8862 case XFmode:
8863 case RFmode:
8864 return true;
8865
8866 case TFmode:
8867 return TARGET_HPUX;
8868
8869 default:
8870 return false;
8871 }
8872 }
8873
8874 static bool
8875 ia64_vector_mode_supported_p (enum machine_mode mode)
8876 {
8877 switch (mode)
8878 {
8879 case V8QImode:
8880 case V4HImode:
8881 case V2SImode:
8882 return true;
8883
8884 case V2SFmode:
8885 return true;
8886
8887 default:
8888 return false;
8889 }
8890 }
8891
8892 /* Implement the FUNCTION_PROFILER macro. */
8893
8894 void
8895 ia64_output_function_profiler (FILE *file, int labelno)
8896 {
8897 bool indirect_call;
8898
8899 /* If the function needs a static chain and the static chain
8900 register is r15, we use an indirect call so as to bypass
8901 the PLT stub in case the executable is dynamically linked,
8902 because the stub clobbers r15 as per 5.3.6 of the psABI.
8903 We don't need to do that in non canonical PIC mode. */
8904
8905 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
8906 {
8907 gcc_assert (STATIC_CHAIN_REGNUM == 15);
8908 indirect_call = true;
8909 }
8910 else
8911 indirect_call = false;
8912
8913 if (TARGET_GNU_AS)
8914 fputs ("\t.prologue 4, r40\n", file);
8915 else
8916 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
8917 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
8918
8919 if (NO_PROFILE_COUNTERS)
8920 fputs ("\tmov out3 = r0\n", file);
8921 else
8922 {
8923 char buf[20];
8924 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8925
8926 if (TARGET_AUTO_PIC)
8927 fputs ("\tmovl out3 = @gprel(", file);
8928 else
8929 fputs ("\taddl out3 = @ltoff(", file);
8930 assemble_name (file, buf);
8931 if (TARGET_AUTO_PIC)
8932 fputs (")\n", file);
8933 else
8934 fputs ("), r1\n", file);
8935 }
8936
8937 if (indirect_call)
8938 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
8939 fputs ("\t;;\n", file);
8940
8941 fputs ("\t.save rp, r42\n", file);
8942 fputs ("\tmov out2 = b0\n", file);
8943 if (indirect_call)
8944 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
8945 fputs ("\t.body\n", file);
8946 fputs ("\tmov out1 = r1\n", file);
8947 if (indirect_call)
8948 {
8949 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
8950 fputs ("\tmov b6 = r16\n", file);
8951 fputs ("\tld8 r1 = [r14]\n", file);
8952 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
8953 }
8954 else
8955 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
8956 }
8957
8958 static GTY(()) rtx mcount_func_rtx;
8959 static rtx
8960 gen_mcount_func_rtx (void)
8961 {
8962 if (!mcount_func_rtx)
8963 mcount_func_rtx = init_one_libfunc ("_mcount");
8964 return mcount_func_rtx;
8965 }
8966
8967 void
8968 ia64_profile_hook (int labelno)
8969 {
8970 rtx label, ip;
8971
8972 if (NO_PROFILE_COUNTERS)
8973 label = const0_rtx;
8974 else
8975 {
8976 char buf[30];
8977 const char *label_name;
8978 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8979 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
8980 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
8981 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
8982 }
8983 ip = gen_reg_rtx (Pmode);
8984 emit_insn (gen_ip_value (ip));
8985 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
8986 VOIDmode, 3,
8987 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
8988 ip, Pmode,
8989 label, Pmode);
8990 }
8991
8992 /* Return the mangling of TYPE if it is an extended fundamental type. */
8993
8994 static const char *
8995 ia64_mangle_fundamental_type (tree type)
8996 {
8997 /* On HP-UX, "long double" is mangled as "e" so __float128 is
8998 mangled as "e". */
8999 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9000 return "g";
9001 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9002 an extended mangling. Elsewhere, "e" is available since long
9003 double is 80 bits. */
9004 if (TYPE_MODE (type) == XFmode)
9005 return TARGET_HPUX ? "u9__float80" : "e";
9006 if (TYPE_MODE (type) == RFmode)
9007 return "u7__fpreg";
9008 return NULL;
9009 }
9010
9011 /* Return the diagnostic message string if conversion from FROMTYPE to
9012 TOTYPE is not allowed, NULL otherwise. */
9013 static const char *
9014 ia64_invalid_conversion (tree fromtype, tree totype)
9015 {
9016 /* Reject nontrivial conversion to or from __fpreg. */
9017 if (TYPE_MODE (fromtype) == RFmode
9018 && TYPE_MODE (totype) != RFmode
9019 && TYPE_MODE (totype) != VOIDmode)
9020 return N_("invalid conversion from %<__fpreg%>");
9021 if (TYPE_MODE (totype) == RFmode
9022 && TYPE_MODE (fromtype) != RFmode)
9023 return N_("invalid conversion to %<__fpreg%>");
9024 return NULL;
9025 }
9026
9027 /* Return the diagnostic message string if the unary operation OP is
9028 not permitted on TYPE, NULL otherwise. */
9029 static const char *
9030 ia64_invalid_unary_op (int op, tree type)
9031 {
9032 /* Reject operations on __fpreg other than unary + or &. */
9033 if (TYPE_MODE (type) == RFmode
9034 && op != CONVERT_EXPR
9035 && op != ADDR_EXPR)
9036 return N_("invalid operation on %<__fpreg%>");
9037 return NULL;
9038 }
9039
9040 /* Return the diagnostic message string if the binary operation OP is
9041 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9042 static const char *
9043 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, tree type1, tree type2)
9044 {
9045 /* Reject operations on __fpreg. */
9046 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9047 return N_("invalid operation on %<__fpreg%>");
9048 return NULL;
9049 }
9050
9051 #include "gt-ia64.h"