Fixed typo in date of the 2008-09-01 MOVE_RATIO commit.
[gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "toplev.h"
45 #include "sched-int.h"
46 #include "timevar.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "hashtab.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
53 #include "gimple.h"
54 #include "intl.h"
55 #include "df.h"
56 #include "debug.h"
57 #include "params.h"
58 #include "dbgcnt.h"
59 #include "tm-constrs.h"
60
61 /* This is used for communication between ASM_OUTPUT_LABEL and
62 ASM_OUTPUT_LABELREF. */
63 int ia64_asm_output_label = 0;
64
65 /* Define the information needed to generate branch and scc insns. This is
66 stored from the compare operation. */
67 struct rtx_def * ia64_compare_op0;
68 struct rtx_def * ia64_compare_op1;
69
70 /* Register names for ia64_expand_prologue. */
71 static const char * const ia64_reg_numbers[96] =
72 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
73 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
74 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
75 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
76 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
77 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
78 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
79 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
80 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
81 "r104","r105","r106","r107","r108","r109","r110","r111",
82 "r112","r113","r114","r115","r116","r117","r118","r119",
83 "r120","r121","r122","r123","r124","r125","r126","r127"};
84
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_input_reg_names[8] =
87 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
88
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_local_reg_names[80] =
91 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
92 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
93 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
94 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
95 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
96 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
97 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
98 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
99 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
100 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
101
102 /* ??? These strings could be shared with REGISTER_NAMES. */
103 static const char * const ia64_output_reg_names[8] =
104 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
105
106 /* Which cpu are we scheduling for. */
107 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
108
109 /* Determines whether we run our final scheduling pass or not. We always
110 avoid the normal second scheduling pass. */
111 static int ia64_flag_schedule_insns2;
112
113 /* Determines whether we run variable tracking in machine dependent
114 reorganization. */
115 static int ia64_flag_var_tracking;
116
117 /* Variables which are this size or smaller are put in the sdata/sbss
118 sections. */
119
120 unsigned int ia64_section_threshold;
121
122 /* The following variable is used by the DFA insn scheduler. The value is
123 TRUE if we do insn bundling instead of insn scheduling. */
124 int bundling_p = 0;
125
126 enum ia64_frame_regs
127 {
128 reg_fp,
129 reg_save_b0,
130 reg_save_pr,
131 reg_save_ar_pfs,
132 reg_save_ar_unat,
133 reg_save_ar_lc,
134 reg_save_gp,
135 number_of_ia64_frame_regs
136 };
137
138 /* Structure to be filled in by ia64_compute_frame_size with register
139 save masks and offsets for the current function. */
140
141 struct ia64_frame_info
142 {
143 HOST_WIDE_INT total_size; /* size of the stack frame, not including
144 the caller's scratch area. */
145 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
146 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
147 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
148 HARD_REG_SET mask; /* mask of saved registers. */
149 unsigned int gr_used_mask; /* mask of registers in use as gr spill
150 registers or long-term scratches. */
151 int n_spilled; /* number of spilled registers. */
152 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
153 int n_input_regs; /* number of input registers used. */
154 int n_local_regs; /* number of local registers used. */
155 int n_output_regs; /* number of output registers used. */
156 int n_rotate_regs; /* number of rotating registers used. */
157
158 char need_regstk; /* true if a .regstk directive needed. */
159 char initialized; /* true if the data is finalized. */
160 };
161
162 /* Current frame information calculated by ia64_compute_frame_size. */
163 static struct ia64_frame_info current_frame_info;
164 /* The actual registers that are emitted. */
165 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
166 \f
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
173 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
174 static void ia64_h_i_d_extended (void);
175 static int ia64_mode_to_int (enum machine_mode);
176 static void ia64_set_sched_flags (spec_info_t);
177 static int ia64_speculate_insn (rtx, ds_t, rtx *);
178 static rtx ia64_gen_spec_insn (rtx, ds_t, int, bool, bool);
179 static bool ia64_needs_block_p (const_rtx);
180 static rtx ia64_gen_check (rtx, rtx, bool);
181 static int ia64_spec_check_p (rtx);
182 static int ia64_spec_check_src_p (rtx);
183 static rtx gen_tls_get_addr (void);
184 static rtx gen_thread_pointer (void);
185 static int find_gr_spill (enum ia64_frame_regs, int);
186 static int next_scratch_gr_reg (void);
187 static void mark_reg_gr_used_mask (rtx, void *);
188 static void ia64_compute_frame_size (HOST_WIDE_INT);
189 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
190 static void finish_spill_pointers (void);
191 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
192 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
193 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
194 static rtx gen_movdi_x (rtx, rtx, rtx);
195 static rtx gen_fr_spill_x (rtx, rtx, rtx);
196 static rtx gen_fr_restore_x (rtx, rtx, rtx);
197
198 static enum machine_mode hfa_element_mode (const_tree, bool);
199 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
200 tree, int *, int);
201 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
202 tree, bool);
203 static bool ia64_function_ok_for_sibcall (tree, tree);
204 static bool ia64_return_in_memory (const_tree, const_tree);
205 static bool ia64_rtx_costs (rtx, int, int, int *, bool);
206 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
207 static void fix_range (const char *);
208 static bool ia64_handle_option (size_t, const char *, int);
209 static struct machine_function * ia64_init_machine_status (void);
210 static void emit_insn_group_barriers (FILE *);
211 static void emit_all_insn_group_barriers (FILE *);
212 static void final_emit_insn_group_barriers (FILE *);
213 static void emit_predicate_relation_info (void);
214 static void ia64_reorg (void);
215 static bool ia64_in_small_data_p (const_tree);
216 static void process_epilogue (FILE *, rtx, bool, bool);
217 static int process_set (FILE *, rtx, rtx, bool, bool);
218
219 static bool ia64_assemble_integer (rtx, unsigned int, int);
220 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
221 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
222 static void ia64_output_function_end_prologue (FILE *);
223
224 static int ia64_issue_rate (void);
225 static int ia64_adjust_cost (rtx, rtx, rtx, int);
226 static void ia64_sched_init (FILE *, int, int);
227 static void ia64_sched_init_global (FILE *, int, int);
228 static void ia64_sched_finish_global (FILE *, int);
229 static void ia64_sched_finish (FILE *, int);
230 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
231 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
232 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
233 static int ia64_variable_issue (FILE *, int, rtx, int);
234
235 static struct bundle_state *get_free_bundle_state (void);
236 static void free_bundle_state (struct bundle_state *);
237 static void initiate_bundle_states (void);
238 static void finish_bundle_states (void);
239 static unsigned bundle_state_hash (const void *);
240 static int bundle_state_eq_p (const void *, const void *);
241 static int insert_bundle_state (struct bundle_state *);
242 static void initiate_bundle_state_table (void);
243 static void finish_bundle_state_table (void);
244 static int try_issue_nops (struct bundle_state *, int);
245 static int try_issue_insn (struct bundle_state *, rtx);
246 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
247 static int get_max_pos (state_t);
248 static int get_template (state_t, int);
249
250 static rtx get_next_important_insn (rtx, rtx);
251 static void bundling (FILE *, int, rtx, rtx);
252
253 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
254 HOST_WIDE_INT, tree);
255 static void ia64_file_start (void);
256 static void ia64_globalize_decl_name (FILE *, tree);
257
258 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
259 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
260 static section *ia64_select_rtx_section (enum machine_mode, rtx,
261 unsigned HOST_WIDE_INT);
262 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
263 ATTRIBUTE_UNUSED;
264 static unsigned int ia64_section_type_flags (tree, const char *, int);
265 static void ia64_init_libfuncs (void)
266 ATTRIBUTE_UNUSED;
267 static void ia64_hpux_init_libfuncs (void)
268 ATTRIBUTE_UNUSED;
269 static void ia64_sysv4_init_libfuncs (void)
270 ATTRIBUTE_UNUSED;
271 static void ia64_vms_init_libfuncs (void)
272 ATTRIBUTE_UNUSED;
273
274 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
275 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
280 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
281 static bool ia64_cannot_force_const_mem (rtx);
282 static const char *ia64_mangle_type (const_tree);
283 static const char *ia64_invalid_conversion (const_tree, const_tree);
284 static const char *ia64_invalid_unary_op (int, const_tree);
285 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
286 static enum machine_mode ia64_c_mode_for_suffix (char);
287 \f
288 /* Table of valid machine attributes. */
289 static const struct attribute_spec ia64_attribute_table[] =
290 {
291 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
292 { "syscall_linkage", 0, 0, false, true, true, NULL },
293 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
294 { "version_id", 1, 1, true, false, false,
295 ia64_handle_version_id_attribute },
296 { NULL, 0, 0, false, false, false, NULL }
297 };
298
299 /* Initialize the GCC target structure. */
300 #undef TARGET_ATTRIBUTE_TABLE
301 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
302
303 #undef TARGET_INIT_BUILTINS
304 #define TARGET_INIT_BUILTINS ia64_init_builtins
305
306 #undef TARGET_EXPAND_BUILTIN
307 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
308
309 #undef TARGET_ASM_BYTE_OP
310 #define TARGET_ASM_BYTE_OP "\tdata1\t"
311 #undef TARGET_ASM_ALIGNED_HI_OP
312 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
313 #undef TARGET_ASM_ALIGNED_SI_OP
314 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
315 #undef TARGET_ASM_ALIGNED_DI_OP
316 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
317 #undef TARGET_ASM_UNALIGNED_HI_OP
318 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
319 #undef TARGET_ASM_UNALIGNED_SI_OP
320 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
321 #undef TARGET_ASM_UNALIGNED_DI_OP
322 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
323 #undef TARGET_ASM_INTEGER
324 #define TARGET_ASM_INTEGER ia64_assemble_integer
325
326 #undef TARGET_ASM_FUNCTION_PROLOGUE
327 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
328 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
329 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
330 #undef TARGET_ASM_FUNCTION_EPILOGUE
331 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
332
333 #undef TARGET_IN_SMALL_DATA_P
334 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
335
336 #undef TARGET_SCHED_ADJUST_COST
337 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
338 #undef TARGET_SCHED_ISSUE_RATE
339 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
340 #undef TARGET_SCHED_VARIABLE_ISSUE
341 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
342 #undef TARGET_SCHED_INIT
343 #define TARGET_SCHED_INIT ia64_sched_init
344 #undef TARGET_SCHED_FINISH
345 #define TARGET_SCHED_FINISH ia64_sched_finish
346 #undef TARGET_SCHED_INIT_GLOBAL
347 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
348 #undef TARGET_SCHED_FINISH_GLOBAL
349 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
350 #undef TARGET_SCHED_REORDER
351 #define TARGET_SCHED_REORDER ia64_sched_reorder
352 #undef TARGET_SCHED_REORDER2
353 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
354
355 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
356 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
357
358 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
359 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
360
361 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
362 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
363 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
364 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
365
366 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
367 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
368 ia64_first_cycle_multipass_dfa_lookahead_guard
369
370 #undef TARGET_SCHED_DFA_NEW_CYCLE
371 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
372
373 #undef TARGET_SCHED_H_I_D_EXTENDED
374 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
375
376 #undef TARGET_SCHED_SET_SCHED_FLAGS
377 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
378
379 #undef TARGET_SCHED_SPECULATE_INSN
380 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
381
382 #undef TARGET_SCHED_NEEDS_BLOCK_P
383 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
384
385 #undef TARGET_SCHED_GEN_SPEC_CHECK
386 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_check
387
388 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
389 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
390 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
391
392 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
393 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
394 #undef TARGET_ARG_PARTIAL_BYTES
395 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
396
397 #undef TARGET_ASM_OUTPUT_MI_THUNK
398 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
399 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
400 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
401
402 #undef TARGET_ASM_FILE_START
403 #define TARGET_ASM_FILE_START ia64_file_start
404
405 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
406 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
407
408 #undef TARGET_RTX_COSTS
409 #define TARGET_RTX_COSTS ia64_rtx_costs
410 #undef TARGET_ADDRESS_COST
411 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
412
413 #undef TARGET_UNSPEC_MAY_TRAP_P
414 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
415
416 #undef TARGET_MACHINE_DEPENDENT_REORG
417 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
418
419 #undef TARGET_ENCODE_SECTION_INFO
420 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
421
422 #undef TARGET_SECTION_TYPE_FLAGS
423 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
424
425 #ifdef HAVE_AS_TLS
426 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
427 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
428 #endif
429
430 /* ??? ABI doesn't allow us to define this. */
431 #if 0
432 #undef TARGET_PROMOTE_FUNCTION_ARGS
433 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
434 #endif
435
436 /* ??? ABI doesn't allow us to define this. */
437 #if 0
438 #undef TARGET_PROMOTE_FUNCTION_RETURN
439 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
440 #endif
441
442 /* ??? Investigate. */
443 #if 0
444 #undef TARGET_PROMOTE_PROTOTYPES
445 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
446 #endif
447
448 #undef TARGET_STRUCT_VALUE_RTX
449 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
450 #undef TARGET_RETURN_IN_MEMORY
451 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
452 #undef TARGET_SETUP_INCOMING_VARARGS
453 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
454 #undef TARGET_STRICT_ARGUMENT_NAMING
455 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
456 #undef TARGET_MUST_PASS_IN_STACK
457 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
458
459 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
460 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
461
462 #undef TARGET_UNWIND_EMIT
463 #define TARGET_UNWIND_EMIT process_for_unwind_directive
464
465 #undef TARGET_SCALAR_MODE_SUPPORTED_P
466 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
467 #undef TARGET_VECTOR_MODE_SUPPORTED_P
468 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
469
470 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
471 in an order different from the specified program order. */
472 #undef TARGET_RELAXED_ORDERING
473 #define TARGET_RELAXED_ORDERING true
474
475 #undef TARGET_DEFAULT_TARGET_FLAGS
476 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
477 #undef TARGET_HANDLE_OPTION
478 #define TARGET_HANDLE_OPTION ia64_handle_option
479
480 #undef TARGET_CANNOT_FORCE_CONST_MEM
481 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
482
483 #undef TARGET_MANGLE_TYPE
484 #define TARGET_MANGLE_TYPE ia64_mangle_type
485
486 #undef TARGET_INVALID_CONVERSION
487 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
488 #undef TARGET_INVALID_UNARY_OP
489 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
490 #undef TARGET_INVALID_BINARY_OP
491 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
492
493 #undef TARGET_C_MODE_FOR_SUFFIX
494 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
495
496 struct gcc_target targetm = TARGET_INITIALIZER;
497 \f
498 typedef enum
499 {
500 ADDR_AREA_NORMAL, /* normal address area */
501 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
502 }
503 ia64_addr_area;
504
505 static GTY(()) tree small_ident1;
506 static GTY(()) tree small_ident2;
507
508 static void
509 init_idents (void)
510 {
511 if (small_ident1 == 0)
512 {
513 small_ident1 = get_identifier ("small");
514 small_ident2 = get_identifier ("__small__");
515 }
516 }
517
518 /* Retrieve the address area that has been chosen for the given decl. */
519
520 static ia64_addr_area
521 ia64_get_addr_area (tree decl)
522 {
523 tree model_attr;
524
525 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
526 if (model_attr)
527 {
528 tree id;
529
530 init_idents ();
531 id = TREE_VALUE (TREE_VALUE (model_attr));
532 if (id == small_ident1 || id == small_ident2)
533 return ADDR_AREA_SMALL;
534 }
535 return ADDR_AREA_NORMAL;
536 }
537
538 static tree
539 ia64_handle_model_attribute (tree *node, tree name, tree args,
540 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
541 {
542 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
543 ia64_addr_area area;
544 tree arg, decl = *node;
545
546 init_idents ();
547 arg = TREE_VALUE (args);
548 if (arg == small_ident1 || arg == small_ident2)
549 {
550 addr_area = ADDR_AREA_SMALL;
551 }
552 else
553 {
554 warning (OPT_Wattributes, "invalid argument of %qs attribute",
555 IDENTIFIER_POINTER (name));
556 *no_add_attrs = true;
557 }
558
559 switch (TREE_CODE (decl))
560 {
561 case VAR_DECL:
562 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
563 == FUNCTION_DECL)
564 && !TREE_STATIC (decl))
565 {
566 error ("%Jan address area attribute cannot be specified for "
567 "local variables", decl);
568 *no_add_attrs = true;
569 }
570 area = ia64_get_addr_area (decl);
571 if (area != ADDR_AREA_NORMAL && addr_area != area)
572 {
573 error ("address area of %q+D conflicts with previous "
574 "declaration", decl);
575 *no_add_attrs = true;
576 }
577 break;
578
579 case FUNCTION_DECL:
580 error ("%Jaddress area attribute cannot be specified for functions",
581 decl);
582 *no_add_attrs = true;
583 break;
584
585 default:
586 warning (OPT_Wattributes, "%qs attribute ignored",
587 IDENTIFIER_POINTER (name));
588 *no_add_attrs = true;
589 break;
590 }
591
592 return NULL_TREE;
593 }
594
595 static void
596 ia64_encode_addr_area (tree decl, rtx symbol)
597 {
598 int flags;
599
600 flags = SYMBOL_REF_FLAGS (symbol);
601 switch (ia64_get_addr_area (decl))
602 {
603 case ADDR_AREA_NORMAL: break;
604 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
605 default: gcc_unreachable ();
606 }
607 SYMBOL_REF_FLAGS (symbol) = flags;
608 }
609
610 static void
611 ia64_encode_section_info (tree decl, rtx rtl, int first)
612 {
613 default_encode_section_info (decl, rtl, first);
614
615 /* Careful not to prod global register variables. */
616 if (TREE_CODE (decl) == VAR_DECL
617 && GET_CODE (DECL_RTL (decl)) == MEM
618 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
619 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
620 ia64_encode_addr_area (decl, XEXP (rtl, 0));
621 }
622 \f
623 /* Return 1 if the operands of a move are ok. */
624
625 int
626 ia64_move_ok (rtx dst, rtx src)
627 {
628 /* If we're under init_recog_no_volatile, we'll not be able to use
629 memory_operand. So check the code directly and don't worry about
630 the validity of the underlying address, which should have been
631 checked elsewhere anyway. */
632 if (GET_CODE (dst) != MEM)
633 return 1;
634 if (GET_CODE (src) == MEM)
635 return 0;
636 if (register_operand (src, VOIDmode))
637 return 1;
638
639 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
640 if (INTEGRAL_MODE_P (GET_MODE (dst)))
641 return src == const0_rtx;
642 else
643 return satisfies_constraint_G (src);
644 }
645
646 /* Return 1 if the operands are ok for a floating point load pair. */
647
648 int
649 ia64_load_pair_ok (rtx dst, rtx src)
650 {
651 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
652 return 0;
653 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
654 return 0;
655 switch (GET_CODE (XEXP (src, 0)))
656 {
657 case REG:
658 case POST_INC:
659 break;
660 case POST_DEC:
661 return 0;
662 case POST_MODIFY:
663 {
664 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
665
666 if (GET_CODE (adjust) != CONST_INT
667 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
668 return 0;
669 }
670 break;
671 default:
672 abort ();
673 }
674 return 1;
675 }
676
677 int
678 addp4_optimize_ok (rtx op1, rtx op2)
679 {
680 return (basereg_operand (op1, GET_MODE(op1)) !=
681 basereg_operand (op2, GET_MODE(op2)));
682 }
683
684 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
685 Return the length of the field, or <= 0 on failure. */
686
687 int
688 ia64_depz_field_mask (rtx rop, rtx rshift)
689 {
690 unsigned HOST_WIDE_INT op = INTVAL (rop);
691 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
692
693 /* Get rid of the zero bits we're shifting in. */
694 op >>= shift;
695
696 /* We must now have a solid block of 1's at bit 0. */
697 return exact_log2 (op + 1);
698 }
699
700 /* Return the TLS model to use for ADDR. */
701
702 static enum tls_model
703 tls_symbolic_operand_type (rtx addr)
704 {
705 enum tls_model tls_kind = 0;
706
707 if (GET_CODE (addr) == CONST)
708 {
709 if (GET_CODE (XEXP (addr, 0)) == PLUS
710 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
711 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
712 }
713 else if (GET_CODE (addr) == SYMBOL_REF)
714 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
715
716 return tls_kind;
717 }
718
719 /* Return true if X is a constant that is valid for some immediate
720 field in an instruction. */
721
722 bool
723 ia64_legitimate_constant_p (rtx x)
724 {
725 switch (GET_CODE (x))
726 {
727 case CONST_INT:
728 case LABEL_REF:
729 return true;
730
731 case CONST_DOUBLE:
732 if (GET_MODE (x) == VOIDmode)
733 return true;
734 return satisfies_constraint_G (x);
735
736 case CONST:
737 case SYMBOL_REF:
738 /* ??? Short term workaround for PR 28490. We must make the code here
739 match the code in ia64_expand_move and move_operand, even though they
740 are both technically wrong. */
741 if (tls_symbolic_operand_type (x) == 0)
742 {
743 HOST_WIDE_INT addend = 0;
744 rtx op = x;
745
746 if (GET_CODE (op) == CONST
747 && GET_CODE (XEXP (op, 0)) == PLUS
748 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
749 {
750 addend = INTVAL (XEXP (XEXP (op, 0), 1));
751 op = XEXP (XEXP (op, 0), 0);
752 }
753
754 if (any_offset_symbol_operand (op, GET_MODE (op))
755 || function_operand (op, GET_MODE (op)))
756 return true;
757 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
758 return (addend & 0x3fff) == 0;
759 return false;
760 }
761 return false;
762
763 case CONST_VECTOR:
764 {
765 enum machine_mode mode = GET_MODE (x);
766
767 if (mode == V2SFmode)
768 return satisfies_constraint_Y (x);
769
770 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
771 && GET_MODE_SIZE (mode) <= 8);
772 }
773
774 default:
775 return false;
776 }
777 }
778
779 /* Don't allow TLS addresses to get spilled to memory. */
780
781 static bool
782 ia64_cannot_force_const_mem (rtx x)
783 {
784 if (GET_MODE (x) == RFmode)
785 return true;
786 return tls_symbolic_operand_type (x) != 0;
787 }
788
789 /* Expand a symbolic constant load. */
790
791 bool
792 ia64_expand_load_address (rtx dest, rtx src)
793 {
794 gcc_assert (GET_CODE (dest) == REG);
795
796 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
797 having to pointer-extend the value afterward. Other forms of address
798 computation below are also more natural to compute as 64-bit quantities.
799 If we've been given an SImode destination register, change it. */
800 if (GET_MODE (dest) != Pmode)
801 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
802 byte_lowpart_offset (Pmode, GET_MODE (dest)));
803
804 if (TARGET_NO_PIC)
805 return false;
806 if (small_addr_symbolic_operand (src, VOIDmode))
807 return false;
808
809 if (TARGET_AUTO_PIC)
810 emit_insn (gen_load_gprel64 (dest, src));
811 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
812 emit_insn (gen_load_fptr (dest, src));
813 else if (sdata_symbolic_operand (src, VOIDmode))
814 emit_insn (gen_load_gprel (dest, src));
815 else
816 {
817 HOST_WIDE_INT addend = 0;
818 rtx tmp;
819
820 /* We did split constant offsets in ia64_expand_move, and we did try
821 to keep them split in move_operand, but we also allowed reload to
822 rematerialize arbitrary constants rather than spill the value to
823 the stack and reload it. So we have to be prepared here to split
824 them apart again. */
825 if (GET_CODE (src) == CONST)
826 {
827 HOST_WIDE_INT hi, lo;
828
829 hi = INTVAL (XEXP (XEXP (src, 0), 1));
830 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
831 hi = hi - lo;
832
833 if (lo != 0)
834 {
835 addend = lo;
836 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
837 }
838 }
839
840 tmp = gen_rtx_HIGH (Pmode, src);
841 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
842 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
843
844 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
845 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
846
847 if (addend)
848 {
849 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
850 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
851 }
852 }
853
854 return true;
855 }
856
857 static GTY(()) rtx gen_tls_tga;
858 static rtx
859 gen_tls_get_addr (void)
860 {
861 if (!gen_tls_tga)
862 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
863 return gen_tls_tga;
864 }
865
866 static GTY(()) rtx thread_pointer_rtx;
867 static rtx
868 gen_thread_pointer (void)
869 {
870 if (!thread_pointer_rtx)
871 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
872 return thread_pointer_rtx;
873 }
874
875 static rtx
876 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
877 rtx orig_op1, HOST_WIDE_INT addend)
878 {
879 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
880 rtx orig_op0 = op0;
881 HOST_WIDE_INT addend_lo, addend_hi;
882
883 switch (tls_kind)
884 {
885 case TLS_MODEL_GLOBAL_DYNAMIC:
886 start_sequence ();
887
888 tga_op1 = gen_reg_rtx (Pmode);
889 emit_insn (gen_load_dtpmod (tga_op1, op1));
890
891 tga_op2 = gen_reg_rtx (Pmode);
892 emit_insn (gen_load_dtprel (tga_op2, op1));
893
894 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
895 LCT_CONST, Pmode, 2, tga_op1,
896 Pmode, tga_op2, Pmode);
897
898 insns = get_insns ();
899 end_sequence ();
900
901 if (GET_MODE (op0) != Pmode)
902 op0 = tga_ret;
903 emit_libcall_block (insns, op0, tga_ret, op1);
904 break;
905
906 case TLS_MODEL_LOCAL_DYNAMIC:
907 /* ??? This isn't the completely proper way to do local-dynamic
908 If the call to __tls_get_addr is used only by a single symbol,
909 then we should (somehow) move the dtprel to the second arg
910 to avoid the extra add. */
911 start_sequence ();
912
913 tga_op1 = gen_reg_rtx (Pmode);
914 emit_insn (gen_load_dtpmod (tga_op1, op1));
915
916 tga_op2 = const0_rtx;
917
918 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
919 LCT_CONST, Pmode, 2, tga_op1,
920 Pmode, tga_op2, Pmode);
921
922 insns = get_insns ();
923 end_sequence ();
924
925 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
926 UNSPEC_LD_BASE);
927 tmp = gen_reg_rtx (Pmode);
928 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
929
930 if (!register_operand (op0, Pmode))
931 op0 = gen_reg_rtx (Pmode);
932 if (TARGET_TLS64)
933 {
934 emit_insn (gen_load_dtprel (op0, op1));
935 emit_insn (gen_adddi3 (op0, tmp, op0));
936 }
937 else
938 emit_insn (gen_add_dtprel (op0, op1, tmp));
939 break;
940
941 case TLS_MODEL_INITIAL_EXEC:
942 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
943 addend_hi = addend - addend_lo;
944
945 op1 = plus_constant (op1, addend_hi);
946 addend = addend_lo;
947
948 tmp = gen_reg_rtx (Pmode);
949 emit_insn (gen_load_tprel (tmp, op1));
950
951 if (!register_operand (op0, Pmode))
952 op0 = gen_reg_rtx (Pmode);
953 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
954 break;
955
956 case TLS_MODEL_LOCAL_EXEC:
957 if (!register_operand (op0, Pmode))
958 op0 = gen_reg_rtx (Pmode);
959
960 op1 = orig_op1;
961 addend = 0;
962 if (TARGET_TLS64)
963 {
964 emit_insn (gen_load_tprel (op0, op1));
965 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
966 }
967 else
968 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
969 break;
970
971 default:
972 gcc_unreachable ();
973 }
974
975 if (addend)
976 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
977 orig_op0, 1, OPTAB_DIRECT);
978 if (orig_op0 == op0)
979 return NULL_RTX;
980 if (GET_MODE (orig_op0) == Pmode)
981 return op0;
982 return gen_lowpart (GET_MODE (orig_op0), op0);
983 }
984
985 rtx
986 ia64_expand_move (rtx op0, rtx op1)
987 {
988 enum machine_mode mode = GET_MODE (op0);
989
990 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
991 op1 = force_reg (mode, op1);
992
993 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
994 {
995 HOST_WIDE_INT addend = 0;
996 enum tls_model tls_kind;
997 rtx sym = op1;
998
999 if (GET_CODE (op1) == CONST
1000 && GET_CODE (XEXP (op1, 0)) == PLUS
1001 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1002 {
1003 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1004 sym = XEXP (XEXP (op1, 0), 0);
1005 }
1006
1007 tls_kind = tls_symbolic_operand_type (sym);
1008 if (tls_kind)
1009 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1010
1011 if (any_offset_symbol_operand (sym, mode))
1012 addend = 0;
1013 else if (aligned_offset_symbol_operand (sym, mode))
1014 {
1015 HOST_WIDE_INT addend_lo, addend_hi;
1016
1017 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1018 addend_hi = addend - addend_lo;
1019
1020 if (addend_lo != 0)
1021 {
1022 op1 = plus_constant (sym, addend_hi);
1023 addend = addend_lo;
1024 }
1025 else
1026 addend = 0;
1027 }
1028 else
1029 op1 = sym;
1030
1031 if (reload_completed)
1032 {
1033 /* We really should have taken care of this offset earlier. */
1034 gcc_assert (addend == 0);
1035 if (ia64_expand_load_address (op0, op1))
1036 return NULL_RTX;
1037 }
1038
1039 if (addend)
1040 {
1041 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1042
1043 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1044
1045 op1 = expand_simple_binop (mode, PLUS, subtarget,
1046 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1047 if (op0 == op1)
1048 return NULL_RTX;
1049 }
1050 }
1051
1052 return op1;
1053 }
1054
1055 /* Split a move from OP1 to OP0 conditional on COND. */
1056
1057 void
1058 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1059 {
1060 rtx insn, first = get_last_insn ();
1061
1062 emit_move_insn (op0, op1);
1063
1064 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1065 if (INSN_P (insn))
1066 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1067 PATTERN (insn));
1068 }
1069
1070 /* Split a post-reload TImode or TFmode reference into two DImode
1071 components. This is made extra difficult by the fact that we do
1072 not get any scratch registers to work with, because reload cannot
1073 be prevented from giving us a scratch that overlaps the register
1074 pair involved. So instead, when addressing memory, we tweak the
1075 pointer register up and back down with POST_INCs. Or up and not
1076 back down when we can get away with it.
1077
1078 REVERSED is true when the loads must be done in reversed order
1079 (high word first) for correctness. DEAD is true when the pointer
1080 dies with the second insn we generate and therefore the second
1081 address must not carry a postmodify.
1082
1083 May return an insn which is to be emitted after the moves. */
1084
1085 static rtx
1086 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1087 {
1088 rtx fixup = 0;
1089
1090 switch (GET_CODE (in))
1091 {
1092 case REG:
1093 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1094 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1095 break;
1096
1097 case CONST_INT:
1098 case CONST_DOUBLE:
1099 /* Cannot occur reversed. */
1100 gcc_assert (!reversed);
1101
1102 if (GET_MODE (in) != TFmode)
1103 split_double (in, &out[0], &out[1]);
1104 else
1105 /* split_double does not understand how to split a TFmode
1106 quantity into a pair of DImode constants. */
1107 {
1108 REAL_VALUE_TYPE r;
1109 unsigned HOST_WIDE_INT p[2];
1110 long l[4]; /* TFmode is 128 bits */
1111
1112 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1113 real_to_target (l, &r, TFmode);
1114
1115 if (FLOAT_WORDS_BIG_ENDIAN)
1116 {
1117 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1118 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1119 }
1120 else
1121 {
1122 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1123 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1124 }
1125 out[0] = GEN_INT (p[0]);
1126 out[1] = GEN_INT (p[1]);
1127 }
1128 break;
1129
1130 case MEM:
1131 {
1132 rtx base = XEXP (in, 0);
1133 rtx offset;
1134
1135 switch (GET_CODE (base))
1136 {
1137 case REG:
1138 if (!reversed)
1139 {
1140 out[0] = adjust_automodify_address
1141 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1142 out[1] = adjust_automodify_address
1143 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1144 }
1145 else
1146 {
1147 /* Reversal requires a pre-increment, which can only
1148 be done as a separate insn. */
1149 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1150 out[0] = adjust_automodify_address
1151 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1152 out[1] = adjust_address (in, DImode, 0);
1153 }
1154 break;
1155
1156 case POST_INC:
1157 gcc_assert (!reversed && !dead);
1158
1159 /* Just do the increment in two steps. */
1160 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1161 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1162 break;
1163
1164 case POST_DEC:
1165 gcc_assert (!reversed && !dead);
1166
1167 /* Add 8, subtract 24. */
1168 base = XEXP (base, 0);
1169 out[0] = adjust_automodify_address
1170 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1171 out[1] = adjust_automodify_address
1172 (in, DImode,
1173 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1174 8);
1175 break;
1176
1177 case POST_MODIFY:
1178 gcc_assert (!reversed && !dead);
1179
1180 /* Extract and adjust the modification. This case is
1181 trickier than the others, because we might have an
1182 index register, or we might have a combined offset that
1183 doesn't fit a signed 9-bit displacement field. We can
1184 assume the incoming expression is already legitimate. */
1185 offset = XEXP (base, 1);
1186 base = XEXP (base, 0);
1187
1188 out[0] = adjust_automodify_address
1189 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1190
1191 if (GET_CODE (XEXP (offset, 1)) == REG)
1192 {
1193 /* Can't adjust the postmodify to match. Emit the
1194 original, then a separate addition insn. */
1195 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1196 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1197 }
1198 else
1199 {
1200 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1201 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1202 {
1203 /* Again the postmodify cannot be made to match,
1204 but in this case it's more efficient to get rid
1205 of the postmodify entirely and fix up with an
1206 add insn. */
1207 out[1] = adjust_automodify_address (in, DImode, base, 8);
1208 fixup = gen_adddi3
1209 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1210 }
1211 else
1212 {
1213 /* Combined offset still fits in the displacement field.
1214 (We cannot overflow it at the high end.) */
1215 out[1] = adjust_automodify_address
1216 (in, DImode, gen_rtx_POST_MODIFY
1217 (Pmode, base, gen_rtx_PLUS
1218 (Pmode, base,
1219 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1220 8);
1221 }
1222 }
1223 break;
1224
1225 default:
1226 gcc_unreachable ();
1227 }
1228 break;
1229 }
1230
1231 default:
1232 gcc_unreachable ();
1233 }
1234
1235 return fixup;
1236 }
1237
1238 /* Split a TImode or TFmode move instruction after reload.
1239 This is used by *movtf_internal and *movti_internal. */
1240 void
1241 ia64_split_tmode_move (rtx operands[])
1242 {
1243 rtx in[2], out[2], insn;
1244 rtx fixup[2];
1245 bool dead = false;
1246 bool reversed = false;
1247
1248 /* It is possible for reload to decide to overwrite a pointer with
1249 the value it points to. In that case we have to do the loads in
1250 the appropriate order so that the pointer is not destroyed too
1251 early. Also we must not generate a postmodify for that second
1252 load, or rws_access_regno will die. */
1253 if (GET_CODE (operands[1]) == MEM
1254 && reg_overlap_mentioned_p (operands[0], operands[1]))
1255 {
1256 rtx base = XEXP (operands[1], 0);
1257 while (GET_CODE (base) != REG)
1258 base = XEXP (base, 0);
1259
1260 if (REGNO (base) == REGNO (operands[0]))
1261 reversed = true;
1262 dead = true;
1263 }
1264 /* Another reason to do the moves in reversed order is if the first
1265 element of the target register pair is also the second element of
1266 the source register pair. */
1267 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1268 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1269 reversed = true;
1270
1271 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1272 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1273
1274 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1275 if (GET_CODE (EXP) == MEM \
1276 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1277 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1278 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1279 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1280 XEXP (XEXP (EXP, 0), 0), \
1281 REG_NOTES (INSN))
1282
1283 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1284 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1285 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1286
1287 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1288 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1289 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1290
1291 if (fixup[0])
1292 emit_insn (fixup[0]);
1293 if (fixup[1])
1294 emit_insn (fixup[1]);
1295
1296 #undef MAYBE_ADD_REG_INC_NOTE
1297 }
1298
1299 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1300 through memory plus an extra GR scratch register. Except that you can
1301 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1302 SECONDARY_RELOAD_CLASS, but not both.
1303
1304 We got into problems in the first place by allowing a construct like
1305 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1306 This solution attempts to prevent this situation from occurring. When
1307 we see something like the above, we spill the inner register to memory. */
1308
1309 static rtx
1310 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1311 {
1312 if (GET_CODE (in) == SUBREG
1313 && GET_MODE (SUBREG_REG (in)) == TImode
1314 && GET_CODE (SUBREG_REG (in)) == REG)
1315 {
1316 rtx memt = assign_stack_temp (TImode, 16, 0);
1317 emit_move_insn (memt, SUBREG_REG (in));
1318 return adjust_address (memt, mode, 0);
1319 }
1320 else if (force && GET_CODE (in) == REG)
1321 {
1322 rtx memx = assign_stack_temp (mode, 16, 0);
1323 emit_move_insn (memx, in);
1324 return memx;
1325 }
1326 else
1327 return in;
1328 }
1329
1330 /* Expand the movxf or movrf pattern (MODE says which) with the given
1331 OPERANDS, returning true if the pattern should then invoke
1332 DONE. */
1333
1334 bool
1335 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1336 {
1337 rtx op0 = operands[0];
1338
1339 if (GET_CODE (op0) == SUBREG)
1340 op0 = SUBREG_REG (op0);
1341
1342 /* We must support XFmode loads into general registers for stdarg/vararg,
1343 unprototyped calls, and a rare case where a long double is passed as
1344 an argument after a float HFA fills the FP registers. We split them into
1345 DImode loads for convenience. We also need to support XFmode stores
1346 for the last case. This case does not happen for stdarg/vararg routines,
1347 because we do a block store to memory of unnamed arguments. */
1348
1349 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1350 {
1351 rtx out[2];
1352
1353 /* We're hoping to transform everything that deals with XFmode
1354 quantities and GR registers early in the compiler. */
1355 gcc_assert (can_create_pseudo_p ());
1356
1357 /* Struct to register can just use TImode instead. */
1358 if ((GET_CODE (operands[1]) == SUBREG
1359 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1360 || (GET_CODE (operands[1]) == REG
1361 && GR_REGNO_P (REGNO (operands[1]))))
1362 {
1363 rtx op1 = operands[1];
1364
1365 if (GET_CODE (op1) == SUBREG)
1366 op1 = SUBREG_REG (op1);
1367 else
1368 op1 = gen_rtx_REG (TImode, REGNO (op1));
1369
1370 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1371 return true;
1372 }
1373
1374 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1375 {
1376 /* Don't word-swap when reading in the constant. */
1377 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1378 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1379 0, mode));
1380 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1381 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1382 0, mode));
1383 return true;
1384 }
1385
1386 /* If the quantity is in a register not known to be GR, spill it. */
1387 if (register_operand (operands[1], mode))
1388 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1389
1390 gcc_assert (GET_CODE (operands[1]) == MEM);
1391
1392 /* Don't word-swap when reading in the value. */
1393 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1394 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1395
1396 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1397 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1398 return true;
1399 }
1400
1401 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1402 {
1403 /* We're hoping to transform everything that deals with XFmode
1404 quantities and GR registers early in the compiler. */
1405 gcc_assert (can_create_pseudo_p ());
1406
1407 /* Op0 can't be a GR_REG here, as that case is handled above.
1408 If op0 is a register, then we spill op1, so that we now have a
1409 MEM operand. This requires creating an XFmode subreg of a TImode reg
1410 to force the spill. */
1411 if (register_operand (operands[0], mode))
1412 {
1413 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1414 op1 = gen_rtx_SUBREG (mode, op1, 0);
1415 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1416 }
1417
1418 else
1419 {
1420 rtx in[2];
1421
1422 gcc_assert (GET_CODE (operands[0]) == MEM);
1423
1424 /* Don't word-swap when writing out the value. */
1425 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1426 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1427
1428 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1429 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1430 return true;
1431 }
1432 }
1433
1434 if (!reload_in_progress && !reload_completed)
1435 {
1436 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1437
1438 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1439 {
1440 rtx memt, memx, in = operands[1];
1441 if (CONSTANT_P (in))
1442 in = validize_mem (force_const_mem (mode, in));
1443 if (GET_CODE (in) == MEM)
1444 memt = adjust_address (in, TImode, 0);
1445 else
1446 {
1447 memt = assign_stack_temp (TImode, 16, 0);
1448 memx = adjust_address (memt, mode, 0);
1449 emit_move_insn (memx, in);
1450 }
1451 emit_move_insn (op0, memt);
1452 return true;
1453 }
1454
1455 if (!ia64_move_ok (operands[0], operands[1]))
1456 operands[1] = force_reg (mode, operands[1]);
1457 }
1458
1459 return false;
1460 }
1461
1462 /* Emit comparison instruction if necessary, returning the expression
1463 that holds the compare result in the proper mode. */
1464
1465 static GTY(()) rtx cmptf_libfunc;
1466
1467 rtx
1468 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1469 {
1470 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1471 rtx cmp;
1472
1473 /* If we have a BImode input, then we already have a compare result, and
1474 do not need to emit another comparison. */
1475 if (GET_MODE (op0) == BImode)
1476 {
1477 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1478 cmp = op0;
1479 }
1480 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1481 magic number as its third argument, that indicates what to do.
1482 The return value is an integer to be compared against zero. */
1483 else if (GET_MODE (op0) == TFmode)
1484 {
1485 enum qfcmp_magic {
1486 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1487 QCMP_UNORD = 2,
1488 QCMP_EQ = 4,
1489 QCMP_LT = 8,
1490 QCMP_GT = 16
1491 } magic;
1492 enum rtx_code ncode;
1493 rtx ret, insns;
1494
1495 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1496 switch (code)
1497 {
1498 /* 1 = equal, 0 = not equal. Equality operators do
1499 not raise FP_INVALID when given an SNaN operand. */
1500 case EQ: magic = QCMP_EQ; ncode = NE; break;
1501 case NE: magic = QCMP_EQ; ncode = EQ; break;
1502 /* isunordered() from C99. */
1503 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1504 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1505 /* Relational operators raise FP_INVALID when given
1506 an SNaN operand. */
1507 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1508 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1509 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1510 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1511 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1512 Expanders for buneq etc. weuld have to be added to ia64.md
1513 for this to be useful. */
1514 default: gcc_unreachable ();
1515 }
1516
1517 start_sequence ();
1518
1519 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1520 op0, TFmode, op1, TFmode,
1521 GEN_INT (magic), DImode);
1522 cmp = gen_reg_rtx (BImode);
1523 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1524 gen_rtx_fmt_ee (ncode, BImode,
1525 ret, const0_rtx)));
1526
1527 insns = get_insns ();
1528 end_sequence ();
1529
1530 emit_libcall_block (insns, cmp, cmp,
1531 gen_rtx_fmt_ee (code, BImode, op0, op1));
1532 code = NE;
1533 }
1534 else
1535 {
1536 cmp = gen_reg_rtx (BImode);
1537 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1538 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1539 code = NE;
1540 }
1541
1542 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1543 }
1544
1545 /* Generate an integral vector comparison. Return true if the condition has
1546 been reversed, and so the sense of the comparison should be inverted. */
1547
1548 static bool
1549 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1550 rtx dest, rtx op0, rtx op1)
1551 {
1552 bool negate = false;
1553 rtx x;
1554
1555 /* Canonicalize the comparison to EQ, GT, GTU. */
1556 switch (code)
1557 {
1558 case EQ:
1559 case GT:
1560 case GTU:
1561 break;
1562
1563 case NE:
1564 case LE:
1565 case LEU:
1566 code = reverse_condition (code);
1567 negate = true;
1568 break;
1569
1570 case GE:
1571 case GEU:
1572 code = reverse_condition (code);
1573 negate = true;
1574 /* FALLTHRU */
1575
1576 case LT:
1577 case LTU:
1578 code = swap_condition (code);
1579 x = op0, op0 = op1, op1 = x;
1580 break;
1581
1582 default:
1583 gcc_unreachable ();
1584 }
1585
1586 /* Unsigned parallel compare is not supported by the hardware. Play some
1587 tricks to turn this into a signed comparison against 0. */
1588 if (code == GTU)
1589 {
1590 switch (mode)
1591 {
1592 case V2SImode:
1593 {
1594 rtx t1, t2, mask;
1595
1596 /* Perform a parallel modulo subtraction. */
1597 t1 = gen_reg_rtx (V2SImode);
1598 emit_insn (gen_subv2si3 (t1, op0, op1));
1599
1600 /* Extract the original sign bit of op0. */
1601 mask = GEN_INT (-0x80000000);
1602 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1603 mask = force_reg (V2SImode, mask);
1604 t2 = gen_reg_rtx (V2SImode);
1605 emit_insn (gen_andv2si3 (t2, op0, mask));
1606
1607 /* XOR it back into the result of the subtraction. This results
1608 in the sign bit set iff we saw unsigned underflow. */
1609 x = gen_reg_rtx (V2SImode);
1610 emit_insn (gen_xorv2si3 (x, t1, t2));
1611
1612 code = GT;
1613 op0 = x;
1614 op1 = CONST0_RTX (mode);
1615 }
1616 break;
1617
1618 case V8QImode:
1619 case V4HImode:
1620 /* Perform a parallel unsigned saturating subtraction. */
1621 x = gen_reg_rtx (mode);
1622 emit_insn (gen_rtx_SET (VOIDmode, x,
1623 gen_rtx_US_MINUS (mode, op0, op1)));
1624
1625 code = EQ;
1626 op0 = x;
1627 op1 = CONST0_RTX (mode);
1628 negate = !negate;
1629 break;
1630
1631 default:
1632 gcc_unreachable ();
1633 }
1634 }
1635
1636 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1637 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1638
1639 return negate;
1640 }
1641
1642 /* Emit an integral vector conditional move. */
1643
1644 void
1645 ia64_expand_vecint_cmov (rtx operands[])
1646 {
1647 enum machine_mode mode = GET_MODE (operands[0]);
1648 enum rtx_code code = GET_CODE (operands[3]);
1649 bool negate;
1650 rtx cmp, x, ot, of;
1651
1652 cmp = gen_reg_rtx (mode);
1653 negate = ia64_expand_vecint_compare (code, mode, cmp,
1654 operands[4], operands[5]);
1655
1656 ot = operands[1+negate];
1657 of = operands[2-negate];
1658
1659 if (ot == CONST0_RTX (mode))
1660 {
1661 if (of == CONST0_RTX (mode))
1662 {
1663 emit_move_insn (operands[0], ot);
1664 return;
1665 }
1666
1667 x = gen_rtx_NOT (mode, cmp);
1668 x = gen_rtx_AND (mode, x, of);
1669 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1670 }
1671 else if (of == CONST0_RTX (mode))
1672 {
1673 x = gen_rtx_AND (mode, cmp, ot);
1674 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1675 }
1676 else
1677 {
1678 rtx t, f;
1679
1680 t = gen_reg_rtx (mode);
1681 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1682 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1683
1684 f = gen_reg_rtx (mode);
1685 x = gen_rtx_NOT (mode, cmp);
1686 x = gen_rtx_AND (mode, x, operands[2-negate]);
1687 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1688
1689 x = gen_rtx_IOR (mode, t, f);
1690 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1691 }
1692 }
1693
1694 /* Emit an integral vector min or max operation. Return true if all done. */
1695
1696 bool
1697 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1698 rtx operands[])
1699 {
1700 rtx xops[6];
1701
1702 /* These four combinations are supported directly. */
1703 if (mode == V8QImode && (code == UMIN || code == UMAX))
1704 return false;
1705 if (mode == V4HImode && (code == SMIN || code == SMAX))
1706 return false;
1707
1708 /* This combination can be implemented with only saturating subtraction. */
1709 if (mode == V4HImode && code == UMAX)
1710 {
1711 rtx x, tmp = gen_reg_rtx (mode);
1712
1713 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1714 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1715
1716 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1717 return true;
1718 }
1719
1720 /* Everything else implemented via vector comparisons. */
1721 xops[0] = operands[0];
1722 xops[4] = xops[1] = operands[1];
1723 xops[5] = xops[2] = operands[2];
1724
1725 switch (code)
1726 {
1727 case UMIN:
1728 code = LTU;
1729 break;
1730 case UMAX:
1731 code = GTU;
1732 break;
1733 case SMIN:
1734 code = LT;
1735 break;
1736 case SMAX:
1737 code = GT;
1738 break;
1739 default:
1740 gcc_unreachable ();
1741 }
1742 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1743
1744 ia64_expand_vecint_cmov (xops);
1745 return true;
1746 }
1747
1748 /* Emit an integral vector widening sum operations. */
1749
1750 void
1751 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1752 {
1753 rtx l, h, x, s;
1754 enum machine_mode wmode, mode;
1755 rtx (*unpack_l) (rtx, rtx, rtx);
1756 rtx (*unpack_h) (rtx, rtx, rtx);
1757 rtx (*plus) (rtx, rtx, rtx);
1758
1759 wmode = GET_MODE (operands[0]);
1760 mode = GET_MODE (operands[1]);
1761
1762 switch (mode)
1763 {
1764 case V8QImode:
1765 unpack_l = gen_unpack1_l;
1766 unpack_h = gen_unpack1_h;
1767 plus = gen_addv4hi3;
1768 break;
1769 case V4HImode:
1770 unpack_l = gen_unpack2_l;
1771 unpack_h = gen_unpack2_h;
1772 plus = gen_addv2si3;
1773 break;
1774 default:
1775 gcc_unreachable ();
1776 }
1777
1778 /* Fill in x with the sign extension of each element in op1. */
1779 if (unsignedp)
1780 x = CONST0_RTX (mode);
1781 else
1782 {
1783 bool neg;
1784
1785 x = gen_reg_rtx (mode);
1786
1787 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1788 CONST0_RTX (mode));
1789 gcc_assert (!neg);
1790 }
1791
1792 l = gen_reg_rtx (wmode);
1793 h = gen_reg_rtx (wmode);
1794 s = gen_reg_rtx (wmode);
1795
1796 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1797 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1798 emit_insn (plus (s, l, operands[2]));
1799 emit_insn (plus (operands[0], h, s));
1800 }
1801
1802 /* Emit a signed or unsigned V8QI dot product operation. */
1803
1804 void
1805 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1806 {
1807 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1808
1809 /* Fill in x1 and x2 with the sign extension of each element. */
1810 if (unsignedp)
1811 x1 = x2 = CONST0_RTX (V8QImode);
1812 else
1813 {
1814 bool neg;
1815
1816 x1 = gen_reg_rtx (V8QImode);
1817 x2 = gen_reg_rtx (V8QImode);
1818
1819 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1820 CONST0_RTX (V8QImode));
1821 gcc_assert (!neg);
1822 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1823 CONST0_RTX (V8QImode));
1824 gcc_assert (!neg);
1825 }
1826
1827 l1 = gen_reg_rtx (V4HImode);
1828 l2 = gen_reg_rtx (V4HImode);
1829 h1 = gen_reg_rtx (V4HImode);
1830 h2 = gen_reg_rtx (V4HImode);
1831
1832 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1833 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1834 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1835 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1836
1837 p1 = gen_reg_rtx (V2SImode);
1838 p2 = gen_reg_rtx (V2SImode);
1839 p3 = gen_reg_rtx (V2SImode);
1840 p4 = gen_reg_rtx (V2SImode);
1841 emit_insn (gen_pmpy2_r (p1, l1, l2));
1842 emit_insn (gen_pmpy2_l (p2, l1, l2));
1843 emit_insn (gen_pmpy2_r (p3, h1, h2));
1844 emit_insn (gen_pmpy2_l (p4, h1, h2));
1845
1846 s1 = gen_reg_rtx (V2SImode);
1847 s2 = gen_reg_rtx (V2SImode);
1848 s3 = gen_reg_rtx (V2SImode);
1849 emit_insn (gen_addv2si3 (s1, p1, p2));
1850 emit_insn (gen_addv2si3 (s2, p3, p4));
1851 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1852 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1853 }
1854
1855 /* Emit the appropriate sequence for a call. */
1856
1857 void
1858 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1859 int sibcall_p)
1860 {
1861 rtx insn, b0;
1862
1863 addr = XEXP (addr, 0);
1864 addr = convert_memory_address (DImode, addr);
1865 b0 = gen_rtx_REG (DImode, R_BR (0));
1866
1867 /* ??? Should do this for functions known to bind local too. */
1868 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1869 {
1870 if (sibcall_p)
1871 insn = gen_sibcall_nogp (addr);
1872 else if (! retval)
1873 insn = gen_call_nogp (addr, b0);
1874 else
1875 insn = gen_call_value_nogp (retval, addr, b0);
1876 insn = emit_call_insn (insn);
1877 }
1878 else
1879 {
1880 if (sibcall_p)
1881 insn = gen_sibcall_gp (addr);
1882 else if (! retval)
1883 insn = gen_call_gp (addr, b0);
1884 else
1885 insn = gen_call_value_gp (retval, addr, b0);
1886 insn = emit_call_insn (insn);
1887
1888 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1889 }
1890
1891 if (sibcall_p)
1892 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1893 }
1894
1895 static void
1896 reg_emitted (enum ia64_frame_regs r)
1897 {
1898 if (emitted_frame_related_regs[r] == 0)
1899 emitted_frame_related_regs[r] = current_frame_info.r[r];
1900 else
1901 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
1902 }
1903
1904 static int
1905 get_reg (enum ia64_frame_regs r)
1906 {
1907 reg_emitted (r);
1908 return current_frame_info.r[r];
1909 }
1910
1911 static bool
1912 is_emitted (int regno)
1913 {
1914 enum ia64_frame_regs r;
1915
1916 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
1917 if (emitted_frame_related_regs[r] == regno)
1918 return true;
1919 return false;
1920 }
1921
1922 void
1923 ia64_reload_gp (void)
1924 {
1925 rtx tmp;
1926
1927 if (current_frame_info.r[reg_save_gp])
1928 {
1929 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
1930 }
1931 else
1932 {
1933 HOST_WIDE_INT offset;
1934 rtx offset_r;
1935
1936 offset = (current_frame_info.spill_cfa_off
1937 + current_frame_info.spill_size);
1938 if (frame_pointer_needed)
1939 {
1940 tmp = hard_frame_pointer_rtx;
1941 offset = -offset;
1942 }
1943 else
1944 {
1945 tmp = stack_pointer_rtx;
1946 offset = current_frame_info.total_size - offset;
1947 }
1948
1949 offset_r = GEN_INT (offset);
1950 if (satisfies_constraint_I (offset_r))
1951 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
1952 else
1953 {
1954 emit_move_insn (pic_offset_table_rtx, offset_r);
1955 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1956 pic_offset_table_rtx, tmp));
1957 }
1958
1959 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1960 }
1961
1962 emit_move_insn (pic_offset_table_rtx, tmp);
1963 }
1964
1965 void
1966 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1967 rtx scratch_b, int noreturn_p, int sibcall_p)
1968 {
1969 rtx insn;
1970 bool is_desc = false;
1971
1972 /* If we find we're calling through a register, then we're actually
1973 calling through a descriptor, so load up the values. */
1974 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1975 {
1976 rtx tmp;
1977 bool addr_dead_p;
1978
1979 /* ??? We are currently constrained to *not* use peep2, because
1980 we can legitimately change the global lifetime of the GP
1981 (in the form of killing where previously live). This is
1982 because a call through a descriptor doesn't use the previous
1983 value of the GP, while a direct call does, and we do not
1984 commit to either form until the split here.
1985
1986 That said, this means that we lack precise life info for
1987 whether ADDR is dead after this call. This is not terribly
1988 important, since we can fix things up essentially for free
1989 with the POST_DEC below, but it's nice to not use it when we
1990 can immediately tell it's not necessary. */
1991 addr_dead_p = ((noreturn_p || sibcall_p
1992 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1993 REGNO (addr)))
1994 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1995
1996 /* Load the code address into scratch_b. */
1997 tmp = gen_rtx_POST_INC (Pmode, addr);
1998 tmp = gen_rtx_MEM (Pmode, tmp);
1999 emit_move_insn (scratch_r, tmp);
2000 emit_move_insn (scratch_b, scratch_r);
2001
2002 /* Load the GP address. If ADDR is not dead here, then we must
2003 revert the change made above via the POST_INCREMENT. */
2004 if (!addr_dead_p)
2005 tmp = gen_rtx_POST_DEC (Pmode, addr);
2006 else
2007 tmp = addr;
2008 tmp = gen_rtx_MEM (Pmode, tmp);
2009 emit_move_insn (pic_offset_table_rtx, tmp);
2010
2011 is_desc = true;
2012 addr = scratch_b;
2013 }
2014
2015 if (sibcall_p)
2016 insn = gen_sibcall_nogp (addr);
2017 else if (retval)
2018 insn = gen_call_value_nogp (retval, addr, retaddr);
2019 else
2020 insn = gen_call_nogp (addr, retaddr);
2021 emit_call_insn (insn);
2022
2023 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2024 ia64_reload_gp ();
2025 }
2026
2027 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2028
2029 This differs from the generic code in that we know about the zero-extending
2030 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2031 also know that ld.acq+cmpxchg.rel equals a full barrier.
2032
2033 The loop we want to generate looks like
2034
2035 cmp_reg = mem;
2036 label:
2037 old_reg = cmp_reg;
2038 new_reg = cmp_reg op val;
2039 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2040 if (cmp_reg != old_reg)
2041 goto label;
2042
2043 Note that we only do the plain load from memory once. Subsequent
2044 iterations use the value loaded by the compare-and-swap pattern. */
2045
2046 void
2047 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2048 rtx old_dst, rtx new_dst)
2049 {
2050 enum machine_mode mode = GET_MODE (mem);
2051 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2052 enum insn_code icode;
2053
2054 /* Special case for using fetchadd. */
2055 if ((mode == SImode || mode == DImode)
2056 && (code == PLUS || code == MINUS)
2057 && fetchadd_operand (val, mode))
2058 {
2059 if (code == MINUS)
2060 val = GEN_INT (-INTVAL (val));
2061
2062 if (!old_dst)
2063 old_dst = gen_reg_rtx (mode);
2064
2065 emit_insn (gen_memory_barrier ());
2066
2067 if (mode == SImode)
2068 icode = CODE_FOR_fetchadd_acq_si;
2069 else
2070 icode = CODE_FOR_fetchadd_acq_di;
2071 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2072
2073 if (new_dst)
2074 {
2075 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2076 true, OPTAB_WIDEN);
2077 if (new_reg != new_dst)
2078 emit_move_insn (new_dst, new_reg);
2079 }
2080 return;
2081 }
2082
2083 /* Because of the volatile mem read, we get an ld.acq, which is the
2084 front half of the full barrier. The end half is the cmpxchg.rel. */
2085 gcc_assert (MEM_VOLATILE_P (mem));
2086
2087 old_reg = gen_reg_rtx (DImode);
2088 cmp_reg = gen_reg_rtx (DImode);
2089 label = gen_label_rtx ();
2090
2091 if (mode != DImode)
2092 {
2093 val = simplify_gen_subreg (DImode, val, mode, 0);
2094 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2095 }
2096 else
2097 emit_move_insn (cmp_reg, mem);
2098
2099 emit_label (label);
2100
2101 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2102 emit_move_insn (old_reg, cmp_reg);
2103 emit_move_insn (ar_ccv, cmp_reg);
2104
2105 if (old_dst)
2106 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2107
2108 new_reg = cmp_reg;
2109 if (code == NOT)
2110 {
2111 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2112 code = AND;
2113 }
2114 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2115 true, OPTAB_DIRECT);
2116
2117 if (mode != DImode)
2118 new_reg = gen_lowpart (mode, new_reg);
2119 if (new_dst)
2120 emit_move_insn (new_dst, new_reg);
2121
2122 switch (mode)
2123 {
2124 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2125 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2126 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2127 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2128 default:
2129 gcc_unreachable ();
2130 }
2131
2132 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2133
2134 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2135 }
2136 \f
2137 /* Begin the assembly file. */
2138
2139 static void
2140 ia64_file_start (void)
2141 {
2142 /* Variable tracking should be run after all optimizations which change order
2143 of insns. It also needs a valid CFG. This can't be done in
2144 ia64_override_options, because flag_var_tracking is finalized after
2145 that. */
2146 ia64_flag_var_tracking = flag_var_tracking;
2147 flag_var_tracking = 0;
2148
2149 default_file_start ();
2150 emit_safe_across_calls ();
2151 }
2152
2153 void
2154 emit_safe_across_calls (void)
2155 {
2156 unsigned int rs, re;
2157 int out_state;
2158
2159 rs = 1;
2160 out_state = 0;
2161 while (1)
2162 {
2163 while (rs < 64 && call_used_regs[PR_REG (rs)])
2164 rs++;
2165 if (rs >= 64)
2166 break;
2167 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2168 continue;
2169 if (out_state == 0)
2170 {
2171 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2172 out_state = 1;
2173 }
2174 else
2175 fputc (',', asm_out_file);
2176 if (re == rs + 1)
2177 fprintf (asm_out_file, "p%u", rs);
2178 else
2179 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2180 rs = re + 1;
2181 }
2182 if (out_state)
2183 fputc ('\n', asm_out_file);
2184 }
2185
2186 /* Globalize a declaration. */
2187
2188 static void
2189 ia64_globalize_decl_name (FILE * stream, tree decl)
2190 {
2191 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2192 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2193 if (version_attr)
2194 {
2195 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2196 const char *p = TREE_STRING_POINTER (v);
2197 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2198 }
2199 targetm.asm_out.globalize_label (stream, name);
2200 if (TREE_CODE (decl) == FUNCTION_DECL)
2201 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2202 }
2203
2204 /* Helper function for ia64_compute_frame_size: find an appropriate general
2205 register to spill some special register to. SPECIAL_SPILL_MASK contains
2206 bits in GR0 to GR31 that have already been allocated by this routine.
2207 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2208
2209 static int
2210 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2211 {
2212 int regno;
2213
2214 if (emitted_frame_related_regs[r] != 0)
2215 {
2216 regno = emitted_frame_related_regs[r];
2217 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2218 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2219 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2220 else if (current_function_is_leaf
2221 && regno >= GR_REG (1) && regno <= GR_REG (31))
2222 current_frame_info.gr_used_mask |= 1 << regno;
2223
2224 return regno;
2225 }
2226
2227 /* If this is a leaf function, first try an otherwise unused
2228 call-clobbered register. */
2229 if (current_function_is_leaf)
2230 {
2231 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2232 if (! df_regs_ever_live_p (regno)
2233 && call_used_regs[regno]
2234 && ! fixed_regs[regno]
2235 && ! global_regs[regno]
2236 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2237 && ! is_emitted (regno))
2238 {
2239 current_frame_info.gr_used_mask |= 1 << regno;
2240 return regno;
2241 }
2242 }
2243
2244 if (try_locals)
2245 {
2246 regno = current_frame_info.n_local_regs;
2247 /* If there is a frame pointer, then we can't use loc79, because
2248 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2249 reg_name switching code in ia64_expand_prologue. */
2250 while (regno < (80 - frame_pointer_needed))
2251 if (! is_emitted (LOC_REG (regno++)))
2252 {
2253 current_frame_info.n_local_regs = regno;
2254 return LOC_REG (regno - 1);
2255 }
2256 }
2257
2258 /* Failed to find a general register to spill to. Must use stack. */
2259 return 0;
2260 }
2261
2262 /* In order to make for nice schedules, we try to allocate every temporary
2263 to a different register. We must of course stay away from call-saved,
2264 fixed, and global registers. We must also stay away from registers
2265 allocated in current_frame_info.gr_used_mask, since those include regs
2266 used all through the prologue.
2267
2268 Any register allocated here must be used immediately. The idea is to
2269 aid scheduling, not to solve data flow problems. */
2270
2271 static int last_scratch_gr_reg;
2272
2273 static int
2274 next_scratch_gr_reg (void)
2275 {
2276 int i, regno;
2277
2278 for (i = 0; i < 32; ++i)
2279 {
2280 regno = (last_scratch_gr_reg + i + 1) & 31;
2281 if (call_used_regs[regno]
2282 && ! fixed_regs[regno]
2283 && ! global_regs[regno]
2284 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2285 {
2286 last_scratch_gr_reg = regno;
2287 return regno;
2288 }
2289 }
2290
2291 /* There must be _something_ available. */
2292 gcc_unreachable ();
2293 }
2294
2295 /* Helper function for ia64_compute_frame_size, called through
2296 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2297
2298 static void
2299 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2300 {
2301 unsigned int regno = REGNO (reg);
2302 if (regno < 32)
2303 {
2304 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2305 for (i = 0; i < n; ++i)
2306 current_frame_info.gr_used_mask |= 1 << (regno + i);
2307 }
2308 }
2309
2310
2311 /* Returns the number of bytes offset between the frame pointer and the stack
2312 pointer for the current function. SIZE is the number of bytes of space
2313 needed for local variables. */
2314
2315 static void
2316 ia64_compute_frame_size (HOST_WIDE_INT size)
2317 {
2318 HOST_WIDE_INT total_size;
2319 HOST_WIDE_INT spill_size = 0;
2320 HOST_WIDE_INT extra_spill_size = 0;
2321 HOST_WIDE_INT pretend_args_size;
2322 HARD_REG_SET mask;
2323 int n_spilled = 0;
2324 int spilled_gr_p = 0;
2325 int spilled_fr_p = 0;
2326 unsigned int regno;
2327 int min_regno;
2328 int max_regno;
2329 int i;
2330
2331 if (current_frame_info.initialized)
2332 return;
2333
2334 memset (&current_frame_info, 0, sizeof current_frame_info);
2335 CLEAR_HARD_REG_SET (mask);
2336
2337 /* Don't allocate scratches to the return register. */
2338 diddle_return_value (mark_reg_gr_used_mask, NULL);
2339
2340 /* Don't allocate scratches to the EH scratch registers. */
2341 if (cfun->machine->ia64_eh_epilogue_sp)
2342 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2343 if (cfun->machine->ia64_eh_epilogue_bsp)
2344 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2345
2346 /* Find the size of the register stack frame. We have only 80 local
2347 registers, because we reserve 8 for the inputs and 8 for the
2348 outputs. */
2349
2350 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2351 since we'll be adjusting that down later. */
2352 regno = LOC_REG (78) + ! frame_pointer_needed;
2353 for (; regno >= LOC_REG (0); regno--)
2354 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2355 break;
2356 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2357
2358 /* For functions marked with the syscall_linkage attribute, we must mark
2359 all eight input registers as in use, so that locals aren't visible to
2360 the caller. */
2361
2362 if (cfun->machine->n_varargs > 0
2363 || lookup_attribute ("syscall_linkage",
2364 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2365 current_frame_info.n_input_regs = 8;
2366 else
2367 {
2368 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2369 if (df_regs_ever_live_p (regno))
2370 break;
2371 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2372 }
2373
2374 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2375 if (df_regs_ever_live_p (regno))
2376 break;
2377 i = regno - OUT_REG (0) + 1;
2378
2379 #ifndef PROFILE_HOOK
2380 /* When -p profiling, we need one output register for the mcount argument.
2381 Likewise for -a profiling for the bb_init_func argument. For -ax
2382 profiling, we need two output registers for the two bb_init_trace_func
2383 arguments. */
2384 if (crtl->profile)
2385 i = MAX (i, 1);
2386 #endif
2387 current_frame_info.n_output_regs = i;
2388
2389 /* ??? No rotating register support yet. */
2390 current_frame_info.n_rotate_regs = 0;
2391
2392 /* Discover which registers need spilling, and how much room that
2393 will take. Begin with floating point and general registers,
2394 which will always wind up on the stack. */
2395
2396 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2397 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2398 {
2399 SET_HARD_REG_BIT (mask, regno);
2400 spill_size += 16;
2401 n_spilled += 1;
2402 spilled_fr_p = 1;
2403 }
2404
2405 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2406 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2407 {
2408 SET_HARD_REG_BIT (mask, regno);
2409 spill_size += 8;
2410 n_spilled += 1;
2411 spilled_gr_p = 1;
2412 }
2413
2414 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2415 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2416 {
2417 SET_HARD_REG_BIT (mask, regno);
2418 spill_size += 8;
2419 n_spilled += 1;
2420 }
2421
2422 /* Now come all special registers that might get saved in other
2423 general registers. */
2424
2425 if (frame_pointer_needed)
2426 {
2427 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2428 /* If we did not get a register, then we take LOC79. This is guaranteed
2429 to be free, even if regs_ever_live is already set, because this is
2430 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2431 as we don't count loc79 above. */
2432 if (current_frame_info.r[reg_fp] == 0)
2433 {
2434 current_frame_info.r[reg_fp] = LOC_REG (79);
2435 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2436 }
2437 }
2438
2439 if (! current_function_is_leaf)
2440 {
2441 /* Emit a save of BR0 if we call other functions. Do this even
2442 if this function doesn't return, as EH depends on this to be
2443 able to unwind the stack. */
2444 SET_HARD_REG_BIT (mask, BR_REG (0));
2445
2446 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2447 if (current_frame_info.r[reg_save_b0] == 0)
2448 {
2449 extra_spill_size += 8;
2450 n_spilled += 1;
2451 }
2452
2453 /* Similarly for ar.pfs. */
2454 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2455 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2456 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2457 {
2458 extra_spill_size += 8;
2459 n_spilled += 1;
2460 }
2461
2462 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2463 registers are clobbered, so we fall back to the stack. */
2464 current_frame_info.r[reg_save_gp]
2465 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2466 if (current_frame_info.r[reg_save_gp] == 0)
2467 {
2468 SET_HARD_REG_BIT (mask, GR_REG (1));
2469 spill_size += 8;
2470 n_spilled += 1;
2471 }
2472 }
2473 else
2474 {
2475 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2476 {
2477 SET_HARD_REG_BIT (mask, BR_REG (0));
2478 extra_spill_size += 8;
2479 n_spilled += 1;
2480 }
2481
2482 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2483 {
2484 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2485 current_frame_info.r[reg_save_ar_pfs]
2486 = find_gr_spill (reg_save_ar_pfs, 1);
2487 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2488 {
2489 extra_spill_size += 8;
2490 n_spilled += 1;
2491 }
2492 }
2493 }
2494
2495 /* Unwind descriptor hackery: things are most efficient if we allocate
2496 consecutive GR save registers for RP, PFS, FP in that order. However,
2497 it is absolutely critical that FP get the only hard register that's
2498 guaranteed to be free, so we allocated it first. If all three did
2499 happen to be allocated hard regs, and are consecutive, rearrange them
2500 into the preferred order now.
2501
2502 If we have already emitted code for any of those registers,
2503 then it's already too late to change. */
2504 min_regno = MIN (current_frame_info.r[reg_fp],
2505 MIN (current_frame_info.r[reg_save_b0],
2506 current_frame_info.r[reg_save_ar_pfs]));
2507 max_regno = MAX (current_frame_info.r[reg_fp],
2508 MAX (current_frame_info.r[reg_save_b0],
2509 current_frame_info.r[reg_save_ar_pfs]));
2510 if (min_regno > 0
2511 && min_regno + 2 == max_regno
2512 && (current_frame_info.r[reg_fp] == min_regno + 1
2513 || current_frame_info.r[reg_save_b0] == min_regno + 1
2514 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2515 && (emitted_frame_related_regs[reg_save_b0] == 0
2516 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2517 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2518 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2519 && (emitted_frame_related_regs[reg_fp] == 0
2520 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2521 {
2522 current_frame_info.r[reg_save_b0] = min_regno;
2523 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2524 current_frame_info.r[reg_fp] = min_regno + 2;
2525 }
2526
2527 /* See if we need to store the predicate register block. */
2528 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2529 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2530 break;
2531 if (regno <= PR_REG (63))
2532 {
2533 SET_HARD_REG_BIT (mask, PR_REG (0));
2534 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2535 if (current_frame_info.r[reg_save_pr] == 0)
2536 {
2537 extra_spill_size += 8;
2538 n_spilled += 1;
2539 }
2540
2541 /* ??? Mark them all as used so that register renaming and such
2542 are free to use them. */
2543 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2544 df_set_regs_ever_live (regno, true);
2545 }
2546
2547 /* If we're forced to use st8.spill, we're forced to save and restore
2548 ar.unat as well. The check for existing liveness allows inline asm
2549 to touch ar.unat. */
2550 if (spilled_gr_p || cfun->machine->n_varargs
2551 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2552 {
2553 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2554 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2555 current_frame_info.r[reg_save_ar_unat]
2556 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2557 if (current_frame_info.r[reg_save_ar_unat] == 0)
2558 {
2559 extra_spill_size += 8;
2560 n_spilled += 1;
2561 }
2562 }
2563
2564 if (df_regs_ever_live_p (AR_LC_REGNUM))
2565 {
2566 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2567 current_frame_info.r[reg_save_ar_lc]
2568 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2569 if (current_frame_info.r[reg_save_ar_lc] == 0)
2570 {
2571 extra_spill_size += 8;
2572 n_spilled += 1;
2573 }
2574 }
2575
2576 /* If we have an odd number of words of pretend arguments written to
2577 the stack, then the FR save area will be unaligned. We round the
2578 size of this area up to keep things 16 byte aligned. */
2579 if (spilled_fr_p)
2580 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2581 else
2582 pretend_args_size = crtl->args.pretend_args_size;
2583
2584 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2585 + crtl->outgoing_args_size);
2586 total_size = IA64_STACK_ALIGN (total_size);
2587
2588 /* We always use the 16-byte scratch area provided by the caller, but
2589 if we are a leaf function, there's no one to which we need to provide
2590 a scratch area. */
2591 if (current_function_is_leaf)
2592 total_size = MAX (0, total_size - 16);
2593
2594 current_frame_info.total_size = total_size;
2595 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2596 current_frame_info.spill_size = spill_size;
2597 current_frame_info.extra_spill_size = extra_spill_size;
2598 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2599 current_frame_info.n_spilled = n_spilled;
2600 current_frame_info.initialized = reload_completed;
2601 }
2602
2603 /* Compute the initial difference between the specified pair of registers. */
2604
2605 HOST_WIDE_INT
2606 ia64_initial_elimination_offset (int from, int to)
2607 {
2608 HOST_WIDE_INT offset;
2609
2610 ia64_compute_frame_size (get_frame_size ());
2611 switch (from)
2612 {
2613 case FRAME_POINTER_REGNUM:
2614 switch (to)
2615 {
2616 case HARD_FRAME_POINTER_REGNUM:
2617 if (current_function_is_leaf)
2618 offset = -current_frame_info.total_size;
2619 else
2620 offset = -(current_frame_info.total_size
2621 - crtl->outgoing_args_size - 16);
2622 break;
2623
2624 case STACK_POINTER_REGNUM:
2625 if (current_function_is_leaf)
2626 offset = 0;
2627 else
2628 offset = 16 + crtl->outgoing_args_size;
2629 break;
2630
2631 default:
2632 gcc_unreachable ();
2633 }
2634 break;
2635
2636 case ARG_POINTER_REGNUM:
2637 /* Arguments start above the 16 byte save area, unless stdarg
2638 in which case we store through the 16 byte save area. */
2639 switch (to)
2640 {
2641 case HARD_FRAME_POINTER_REGNUM:
2642 offset = 16 - crtl->args.pretend_args_size;
2643 break;
2644
2645 case STACK_POINTER_REGNUM:
2646 offset = (current_frame_info.total_size
2647 + 16 - crtl->args.pretend_args_size);
2648 break;
2649
2650 default:
2651 gcc_unreachable ();
2652 }
2653 break;
2654
2655 default:
2656 gcc_unreachable ();
2657 }
2658
2659 return offset;
2660 }
2661
2662 /* If there are more than a trivial number of register spills, we use
2663 two interleaved iterators so that we can get two memory references
2664 per insn group.
2665
2666 In order to simplify things in the prologue and epilogue expanders,
2667 we use helper functions to fix up the memory references after the
2668 fact with the appropriate offsets to a POST_MODIFY memory mode.
2669 The following data structure tracks the state of the two iterators
2670 while insns are being emitted. */
2671
2672 struct spill_fill_data
2673 {
2674 rtx init_after; /* point at which to emit initializations */
2675 rtx init_reg[2]; /* initial base register */
2676 rtx iter_reg[2]; /* the iterator registers */
2677 rtx *prev_addr[2]; /* address of last memory use */
2678 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2679 HOST_WIDE_INT prev_off[2]; /* last offset */
2680 int n_iter; /* number of iterators in use */
2681 int next_iter; /* next iterator to use */
2682 unsigned int save_gr_used_mask;
2683 };
2684
2685 static struct spill_fill_data spill_fill_data;
2686
2687 static void
2688 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2689 {
2690 int i;
2691
2692 spill_fill_data.init_after = get_last_insn ();
2693 spill_fill_data.init_reg[0] = init_reg;
2694 spill_fill_data.init_reg[1] = init_reg;
2695 spill_fill_data.prev_addr[0] = NULL;
2696 spill_fill_data.prev_addr[1] = NULL;
2697 spill_fill_data.prev_insn[0] = NULL;
2698 spill_fill_data.prev_insn[1] = NULL;
2699 spill_fill_data.prev_off[0] = cfa_off;
2700 spill_fill_data.prev_off[1] = cfa_off;
2701 spill_fill_data.next_iter = 0;
2702 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2703
2704 spill_fill_data.n_iter = 1 + (n_spills > 2);
2705 for (i = 0; i < spill_fill_data.n_iter; ++i)
2706 {
2707 int regno = next_scratch_gr_reg ();
2708 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2709 current_frame_info.gr_used_mask |= 1 << regno;
2710 }
2711 }
2712
2713 static void
2714 finish_spill_pointers (void)
2715 {
2716 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2717 }
2718
2719 static rtx
2720 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2721 {
2722 int iter = spill_fill_data.next_iter;
2723 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2724 rtx disp_rtx = GEN_INT (disp);
2725 rtx mem;
2726
2727 if (spill_fill_data.prev_addr[iter])
2728 {
2729 if (satisfies_constraint_N (disp_rtx))
2730 {
2731 *spill_fill_data.prev_addr[iter]
2732 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2733 gen_rtx_PLUS (DImode,
2734 spill_fill_data.iter_reg[iter],
2735 disp_rtx));
2736 REG_NOTES (spill_fill_data.prev_insn[iter])
2737 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2738 REG_NOTES (spill_fill_data.prev_insn[iter]));
2739 }
2740 else
2741 {
2742 /* ??? Could use register post_modify for loads. */
2743 if (!satisfies_constraint_I (disp_rtx))
2744 {
2745 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2746 emit_move_insn (tmp, disp_rtx);
2747 disp_rtx = tmp;
2748 }
2749 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2750 spill_fill_data.iter_reg[iter], disp_rtx));
2751 }
2752 }
2753 /* Micro-optimization: if we've created a frame pointer, it's at
2754 CFA 0, which may allow the real iterator to be initialized lower,
2755 slightly increasing parallelism. Also, if there are few saves
2756 it may eliminate the iterator entirely. */
2757 else if (disp == 0
2758 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2759 && frame_pointer_needed)
2760 {
2761 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2762 set_mem_alias_set (mem, get_varargs_alias_set ());
2763 return mem;
2764 }
2765 else
2766 {
2767 rtx seq, insn;
2768
2769 if (disp == 0)
2770 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2771 spill_fill_data.init_reg[iter]);
2772 else
2773 {
2774 start_sequence ();
2775
2776 if (!satisfies_constraint_I (disp_rtx))
2777 {
2778 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2779 emit_move_insn (tmp, disp_rtx);
2780 disp_rtx = tmp;
2781 }
2782
2783 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2784 spill_fill_data.init_reg[iter],
2785 disp_rtx));
2786
2787 seq = get_insns ();
2788 end_sequence ();
2789 }
2790
2791 /* Careful for being the first insn in a sequence. */
2792 if (spill_fill_data.init_after)
2793 insn = emit_insn_after (seq, spill_fill_data.init_after);
2794 else
2795 {
2796 rtx first = get_insns ();
2797 if (first)
2798 insn = emit_insn_before (seq, first);
2799 else
2800 insn = emit_insn (seq);
2801 }
2802 spill_fill_data.init_after = insn;
2803 }
2804
2805 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2806
2807 /* ??? Not all of the spills are for varargs, but some of them are.
2808 The rest of the spills belong in an alias set of their own. But
2809 it doesn't actually hurt to include them here. */
2810 set_mem_alias_set (mem, get_varargs_alias_set ());
2811
2812 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2813 spill_fill_data.prev_off[iter] = cfa_off;
2814
2815 if (++iter >= spill_fill_data.n_iter)
2816 iter = 0;
2817 spill_fill_data.next_iter = iter;
2818
2819 return mem;
2820 }
2821
2822 static void
2823 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2824 rtx frame_reg)
2825 {
2826 int iter = spill_fill_data.next_iter;
2827 rtx mem, insn;
2828
2829 mem = spill_restore_mem (reg, cfa_off);
2830 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2831 spill_fill_data.prev_insn[iter] = insn;
2832
2833 if (frame_reg)
2834 {
2835 rtx base;
2836 HOST_WIDE_INT off;
2837
2838 RTX_FRAME_RELATED_P (insn) = 1;
2839
2840 /* Don't even pretend that the unwind code can intuit its way
2841 through a pair of interleaved post_modify iterators. Just
2842 provide the correct answer. */
2843
2844 if (frame_pointer_needed)
2845 {
2846 base = hard_frame_pointer_rtx;
2847 off = - cfa_off;
2848 }
2849 else
2850 {
2851 base = stack_pointer_rtx;
2852 off = current_frame_info.total_size - cfa_off;
2853 }
2854
2855 REG_NOTES (insn)
2856 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2857 gen_rtx_SET (VOIDmode,
2858 gen_rtx_MEM (GET_MODE (reg),
2859 plus_constant (base, off)),
2860 frame_reg),
2861 REG_NOTES (insn));
2862 }
2863 }
2864
2865 static void
2866 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2867 {
2868 int iter = spill_fill_data.next_iter;
2869 rtx insn;
2870
2871 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2872 GEN_INT (cfa_off)));
2873 spill_fill_data.prev_insn[iter] = insn;
2874 }
2875
2876 /* Wrapper functions that discards the CONST_INT spill offset. These
2877 exist so that we can give gr_spill/gr_fill the offset they need and
2878 use a consistent function interface. */
2879
2880 static rtx
2881 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2882 {
2883 return gen_movdi (dest, src);
2884 }
2885
2886 static rtx
2887 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2888 {
2889 return gen_fr_spill (dest, src);
2890 }
2891
2892 static rtx
2893 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2894 {
2895 return gen_fr_restore (dest, src);
2896 }
2897
2898 /* Called after register allocation to add any instructions needed for the
2899 prologue. Using a prologue insn is favored compared to putting all of the
2900 instructions in output_function_prologue(), since it allows the scheduler
2901 to intermix instructions with the saves of the caller saved registers. In
2902 some cases, it might be necessary to emit a barrier instruction as the last
2903 insn to prevent such scheduling.
2904
2905 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2906 so that the debug info generation code can handle them properly.
2907
2908 The register save area is layed out like so:
2909 cfa+16
2910 [ varargs spill area ]
2911 [ fr register spill area ]
2912 [ br register spill area ]
2913 [ ar register spill area ]
2914 [ pr register spill area ]
2915 [ gr register spill area ] */
2916
2917 /* ??? Get inefficient code when the frame size is larger than can fit in an
2918 adds instruction. */
2919
2920 void
2921 ia64_expand_prologue (void)
2922 {
2923 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2924 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2925 rtx reg, alt_reg;
2926
2927 ia64_compute_frame_size (get_frame_size ());
2928 last_scratch_gr_reg = 15;
2929
2930 if (dump_file)
2931 {
2932 fprintf (dump_file, "ia64 frame related registers "
2933 "recorded in current_frame_info.r[]:\n");
2934 #define PRINTREG(a) if (current_frame_info.r[a]) \
2935 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
2936 PRINTREG(reg_fp);
2937 PRINTREG(reg_save_b0);
2938 PRINTREG(reg_save_pr);
2939 PRINTREG(reg_save_ar_pfs);
2940 PRINTREG(reg_save_ar_unat);
2941 PRINTREG(reg_save_ar_lc);
2942 PRINTREG(reg_save_gp);
2943 #undef PRINTREG
2944 }
2945
2946 /* If there is no epilogue, then we don't need some prologue insns.
2947 We need to avoid emitting the dead prologue insns, because flow
2948 will complain about them. */
2949 if (optimize)
2950 {
2951 edge e;
2952 edge_iterator ei;
2953
2954 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2955 if ((e->flags & EDGE_FAKE) == 0
2956 && (e->flags & EDGE_FALLTHRU) != 0)
2957 break;
2958 epilogue_p = (e != NULL);
2959 }
2960 else
2961 epilogue_p = 1;
2962
2963 /* Set the local, input, and output register names. We need to do this
2964 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2965 half. If we use in/loc/out register names, then we get assembler errors
2966 in crtn.S because there is no alloc insn or regstk directive in there. */
2967 if (! TARGET_REG_NAMES)
2968 {
2969 int inputs = current_frame_info.n_input_regs;
2970 int locals = current_frame_info.n_local_regs;
2971 int outputs = current_frame_info.n_output_regs;
2972
2973 for (i = 0; i < inputs; i++)
2974 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2975 for (i = 0; i < locals; i++)
2976 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2977 for (i = 0; i < outputs; i++)
2978 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2979 }
2980
2981 /* Set the frame pointer register name. The regnum is logically loc79,
2982 but of course we'll not have allocated that many locals. Rather than
2983 worrying about renumbering the existing rtxs, we adjust the name. */
2984 /* ??? This code means that we can never use one local register when
2985 there is a frame pointer. loc79 gets wasted in this case, as it is
2986 renamed to a register that will never be used. See also the try_locals
2987 code in find_gr_spill. */
2988 if (current_frame_info.r[reg_fp])
2989 {
2990 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2991 reg_names[HARD_FRAME_POINTER_REGNUM]
2992 = reg_names[current_frame_info.r[reg_fp]];
2993 reg_names[current_frame_info.r[reg_fp]] = tmp;
2994 }
2995
2996 /* We don't need an alloc instruction if we've used no outputs or locals. */
2997 if (current_frame_info.n_local_regs == 0
2998 && current_frame_info.n_output_regs == 0
2999 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3000 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3001 {
3002 /* If there is no alloc, but there are input registers used, then we
3003 need a .regstk directive. */
3004 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3005 ar_pfs_save_reg = NULL_RTX;
3006 }
3007 else
3008 {
3009 current_frame_info.need_regstk = 0;
3010
3011 if (current_frame_info.r[reg_save_ar_pfs])
3012 {
3013 regno = current_frame_info.r[reg_save_ar_pfs];
3014 reg_emitted (reg_save_ar_pfs);
3015 }
3016 else
3017 regno = next_scratch_gr_reg ();
3018 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3019
3020 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3021 GEN_INT (current_frame_info.n_input_regs),
3022 GEN_INT (current_frame_info.n_local_regs),
3023 GEN_INT (current_frame_info.n_output_regs),
3024 GEN_INT (current_frame_info.n_rotate_regs)));
3025 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3026 }
3027
3028 /* Set up frame pointer, stack pointer, and spill iterators. */
3029
3030 n_varargs = cfun->machine->n_varargs;
3031 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3032 stack_pointer_rtx, 0);
3033
3034 if (frame_pointer_needed)
3035 {
3036 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3037 RTX_FRAME_RELATED_P (insn) = 1;
3038 }
3039
3040 if (current_frame_info.total_size != 0)
3041 {
3042 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3043 rtx offset;
3044
3045 if (satisfies_constraint_I (frame_size_rtx))
3046 offset = frame_size_rtx;
3047 else
3048 {
3049 regno = next_scratch_gr_reg ();
3050 offset = gen_rtx_REG (DImode, regno);
3051 emit_move_insn (offset, frame_size_rtx);
3052 }
3053
3054 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3055 stack_pointer_rtx, offset));
3056
3057 if (! frame_pointer_needed)
3058 {
3059 RTX_FRAME_RELATED_P (insn) = 1;
3060 if (GET_CODE (offset) != CONST_INT)
3061 {
3062 REG_NOTES (insn)
3063 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3064 gen_rtx_SET (VOIDmode,
3065 stack_pointer_rtx,
3066 gen_rtx_PLUS (DImode,
3067 stack_pointer_rtx,
3068 frame_size_rtx)),
3069 REG_NOTES (insn));
3070 }
3071 }
3072
3073 /* ??? At this point we must generate a magic insn that appears to
3074 modify the stack pointer, the frame pointer, and all spill
3075 iterators. This would allow the most scheduling freedom. For
3076 now, just hard stop. */
3077 emit_insn (gen_blockage ());
3078 }
3079
3080 /* Must copy out ar.unat before doing any integer spills. */
3081 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3082 {
3083 if (current_frame_info.r[reg_save_ar_unat])
3084 {
3085 ar_unat_save_reg
3086 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3087 reg_emitted (reg_save_ar_unat);
3088 }
3089 else
3090 {
3091 alt_regno = next_scratch_gr_reg ();
3092 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3093 current_frame_info.gr_used_mask |= 1 << alt_regno;
3094 }
3095
3096 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3097 insn = emit_move_insn (ar_unat_save_reg, reg);
3098 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_unat] != 0);
3099
3100 /* Even if we're not going to generate an epilogue, we still
3101 need to save the register so that EH works. */
3102 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3103 emit_insn (gen_prologue_use (ar_unat_save_reg));
3104 }
3105 else
3106 ar_unat_save_reg = NULL_RTX;
3107
3108 /* Spill all varargs registers. Do this before spilling any GR registers,
3109 since we want the UNAT bits for the GR registers to override the UNAT
3110 bits from varargs, which we don't care about. */
3111
3112 cfa_off = -16;
3113 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3114 {
3115 reg = gen_rtx_REG (DImode, regno);
3116 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3117 }
3118
3119 /* Locate the bottom of the register save area. */
3120 cfa_off = (current_frame_info.spill_cfa_off
3121 + current_frame_info.spill_size
3122 + current_frame_info.extra_spill_size);
3123
3124 /* Save the predicate register block either in a register or in memory. */
3125 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3126 {
3127 reg = gen_rtx_REG (DImode, PR_REG (0));
3128 if (current_frame_info.r[reg_save_pr] != 0)
3129 {
3130 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3131 reg_emitted (reg_save_pr);
3132 insn = emit_move_insn (alt_reg, reg);
3133
3134 /* ??? Denote pr spill/fill by a DImode move that modifies all
3135 64 hard registers. */
3136 RTX_FRAME_RELATED_P (insn) = 1;
3137 REG_NOTES (insn)
3138 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3139 gen_rtx_SET (VOIDmode, alt_reg, reg),
3140 REG_NOTES (insn));
3141
3142 /* Even if we're not going to generate an epilogue, we still
3143 need to save the register so that EH works. */
3144 if (! epilogue_p)
3145 emit_insn (gen_prologue_use (alt_reg));
3146 }
3147 else
3148 {
3149 alt_regno = next_scratch_gr_reg ();
3150 alt_reg = gen_rtx_REG (DImode, alt_regno);
3151 insn = emit_move_insn (alt_reg, reg);
3152 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3153 cfa_off -= 8;
3154 }
3155 }
3156
3157 /* Handle AR regs in numerical order. All of them get special handling. */
3158 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3159 && current_frame_info.r[reg_save_ar_unat] == 0)
3160 {
3161 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3162 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3163 cfa_off -= 8;
3164 }
3165
3166 /* The alloc insn already copied ar.pfs into a general register. The
3167 only thing we have to do now is copy that register to a stack slot
3168 if we'd not allocated a local register for the job. */
3169 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3170 && current_frame_info.r[reg_save_ar_pfs] == 0)
3171 {
3172 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3173 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3174 cfa_off -= 8;
3175 }
3176
3177 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3178 {
3179 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3180 if (current_frame_info.r[reg_save_ar_lc] != 0)
3181 {
3182 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3183 reg_emitted (reg_save_ar_lc);
3184 insn = emit_move_insn (alt_reg, reg);
3185 RTX_FRAME_RELATED_P (insn) = 1;
3186
3187 /* Even if we're not going to generate an epilogue, we still
3188 need to save the register so that EH works. */
3189 if (! epilogue_p)
3190 emit_insn (gen_prologue_use (alt_reg));
3191 }
3192 else
3193 {
3194 alt_regno = next_scratch_gr_reg ();
3195 alt_reg = gen_rtx_REG (DImode, alt_regno);
3196 emit_move_insn (alt_reg, reg);
3197 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3198 cfa_off -= 8;
3199 }
3200 }
3201
3202 /* Save the return pointer. */
3203 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3204 {
3205 reg = gen_rtx_REG (DImode, BR_REG (0));
3206 if (current_frame_info.r[reg_save_b0] != 0)
3207 {
3208 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3209 reg_emitted (reg_save_b0);
3210 insn = emit_move_insn (alt_reg, reg);
3211 RTX_FRAME_RELATED_P (insn) = 1;
3212
3213 /* Even if we're not going to generate an epilogue, we still
3214 need to save the register so that EH works. */
3215 if (! epilogue_p)
3216 emit_insn (gen_prologue_use (alt_reg));
3217 }
3218 else
3219 {
3220 alt_regno = next_scratch_gr_reg ();
3221 alt_reg = gen_rtx_REG (DImode, alt_regno);
3222 emit_move_insn (alt_reg, reg);
3223 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3224 cfa_off -= 8;
3225 }
3226 }
3227
3228 if (current_frame_info.r[reg_save_gp])
3229 {
3230 reg_emitted (reg_save_gp);
3231 insn = emit_move_insn (gen_rtx_REG (DImode,
3232 current_frame_info.r[reg_save_gp]),
3233 pic_offset_table_rtx);
3234 }
3235
3236 /* We should now be at the base of the gr/br/fr spill area. */
3237 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3238 + current_frame_info.spill_size));
3239
3240 /* Spill all general registers. */
3241 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3242 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3243 {
3244 reg = gen_rtx_REG (DImode, regno);
3245 do_spill (gen_gr_spill, reg, cfa_off, reg);
3246 cfa_off -= 8;
3247 }
3248
3249 /* Spill the rest of the BR registers. */
3250 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3251 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3252 {
3253 alt_regno = next_scratch_gr_reg ();
3254 alt_reg = gen_rtx_REG (DImode, alt_regno);
3255 reg = gen_rtx_REG (DImode, regno);
3256 emit_move_insn (alt_reg, reg);
3257 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3258 cfa_off -= 8;
3259 }
3260
3261 /* Align the frame and spill all FR registers. */
3262 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3263 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3264 {
3265 gcc_assert (!(cfa_off & 15));
3266 reg = gen_rtx_REG (XFmode, regno);
3267 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3268 cfa_off -= 16;
3269 }
3270
3271 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3272
3273 finish_spill_pointers ();
3274 }
3275
3276 /* Called after register allocation to add any instructions needed for the
3277 epilogue. Using an epilogue insn is favored compared to putting all of the
3278 instructions in output_function_prologue(), since it allows the scheduler
3279 to intermix instructions with the saves of the caller saved registers. In
3280 some cases, it might be necessary to emit a barrier instruction as the last
3281 insn to prevent such scheduling. */
3282
3283 void
3284 ia64_expand_epilogue (int sibcall_p)
3285 {
3286 rtx insn, reg, alt_reg, ar_unat_save_reg;
3287 int regno, alt_regno, cfa_off;
3288
3289 ia64_compute_frame_size (get_frame_size ());
3290
3291 /* If there is a frame pointer, then we use it instead of the stack
3292 pointer, so that the stack pointer does not need to be valid when
3293 the epilogue starts. See EXIT_IGNORE_STACK. */
3294 if (frame_pointer_needed)
3295 setup_spill_pointers (current_frame_info.n_spilled,
3296 hard_frame_pointer_rtx, 0);
3297 else
3298 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3299 current_frame_info.total_size);
3300
3301 if (current_frame_info.total_size != 0)
3302 {
3303 /* ??? At this point we must generate a magic insn that appears to
3304 modify the spill iterators and the frame pointer. This would
3305 allow the most scheduling freedom. For now, just hard stop. */
3306 emit_insn (gen_blockage ());
3307 }
3308
3309 /* Locate the bottom of the register save area. */
3310 cfa_off = (current_frame_info.spill_cfa_off
3311 + current_frame_info.spill_size
3312 + current_frame_info.extra_spill_size);
3313
3314 /* Restore the predicate registers. */
3315 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3316 {
3317 if (current_frame_info.r[reg_save_pr] != 0)
3318 {
3319 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3320 reg_emitted (reg_save_pr);
3321 }
3322 else
3323 {
3324 alt_regno = next_scratch_gr_reg ();
3325 alt_reg = gen_rtx_REG (DImode, alt_regno);
3326 do_restore (gen_movdi_x, alt_reg, cfa_off);
3327 cfa_off -= 8;
3328 }
3329 reg = gen_rtx_REG (DImode, PR_REG (0));
3330 emit_move_insn (reg, alt_reg);
3331 }
3332
3333 /* Restore the application registers. */
3334
3335 /* Load the saved unat from the stack, but do not restore it until
3336 after the GRs have been restored. */
3337 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3338 {
3339 if (current_frame_info.r[reg_save_ar_unat] != 0)
3340 {
3341 ar_unat_save_reg
3342 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3343 reg_emitted (reg_save_ar_unat);
3344 }
3345 else
3346 {
3347 alt_regno = next_scratch_gr_reg ();
3348 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3349 current_frame_info.gr_used_mask |= 1 << alt_regno;
3350 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3351 cfa_off -= 8;
3352 }
3353 }
3354 else
3355 ar_unat_save_reg = NULL_RTX;
3356
3357 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3358 {
3359 reg_emitted (reg_save_ar_pfs);
3360 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3361 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3362 emit_move_insn (reg, alt_reg);
3363 }
3364 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3365 {
3366 alt_regno = next_scratch_gr_reg ();
3367 alt_reg = gen_rtx_REG (DImode, alt_regno);
3368 do_restore (gen_movdi_x, alt_reg, cfa_off);
3369 cfa_off -= 8;
3370 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3371 emit_move_insn (reg, alt_reg);
3372 }
3373
3374 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3375 {
3376 if (current_frame_info.r[reg_save_ar_lc] != 0)
3377 {
3378 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3379 reg_emitted (reg_save_ar_lc);
3380 }
3381 else
3382 {
3383 alt_regno = next_scratch_gr_reg ();
3384 alt_reg = gen_rtx_REG (DImode, alt_regno);
3385 do_restore (gen_movdi_x, alt_reg, cfa_off);
3386 cfa_off -= 8;
3387 }
3388 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3389 emit_move_insn (reg, alt_reg);
3390 }
3391
3392 /* Restore the return pointer. */
3393 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3394 {
3395 if (current_frame_info.r[reg_save_b0] != 0)
3396 {
3397 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3398 reg_emitted (reg_save_b0);
3399 }
3400 else
3401 {
3402 alt_regno = next_scratch_gr_reg ();
3403 alt_reg = gen_rtx_REG (DImode, alt_regno);
3404 do_restore (gen_movdi_x, alt_reg, cfa_off);
3405 cfa_off -= 8;
3406 }
3407 reg = gen_rtx_REG (DImode, BR_REG (0));
3408 emit_move_insn (reg, alt_reg);
3409 }
3410
3411 /* We should now be at the base of the gr/br/fr spill area. */
3412 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3413 + current_frame_info.spill_size));
3414
3415 /* The GP may be stored on the stack in the prologue, but it's
3416 never restored in the epilogue. Skip the stack slot. */
3417 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3418 cfa_off -= 8;
3419
3420 /* Restore all general registers. */
3421 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3422 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3423 {
3424 reg = gen_rtx_REG (DImode, regno);
3425 do_restore (gen_gr_restore, reg, cfa_off);
3426 cfa_off -= 8;
3427 }
3428
3429 /* Restore the branch registers. */
3430 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3431 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3432 {
3433 alt_regno = next_scratch_gr_reg ();
3434 alt_reg = gen_rtx_REG (DImode, alt_regno);
3435 do_restore (gen_movdi_x, alt_reg, cfa_off);
3436 cfa_off -= 8;
3437 reg = gen_rtx_REG (DImode, regno);
3438 emit_move_insn (reg, alt_reg);
3439 }
3440
3441 /* Restore floating point registers. */
3442 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3443 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3444 {
3445 gcc_assert (!(cfa_off & 15));
3446 reg = gen_rtx_REG (XFmode, regno);
3447 do_restore (gen_fr_restore_x, reg, cfa_off);
3448 cfa_off -= 16;
3449 }
3450
3451 /* Restore ar.unat for real. */
3452 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3453 {
3454 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3455 emit_move_insn (reg, ar_unat_save_reg);
3456 }
3457
3458 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3459
3460 finish_spill_pointers ();
3461
3462 if (current_frame_info.total_size
3463 || cfun->machine->ia64_eh_epilogue_sp
3464 || frame_pointer_needed)
3465 {
3466 /* ??? At this point we must generate a magic insn that appears to
3467 modify the spill iterators, the stack pointer, and the frame
3468 pointer. This would allow the most scheduling freedom. For now,
3469 just hard stop. */
3470 emit_insn (gen_blockage ());
3471 }
3472
3473 if (cfun->machine->ia64_eh_epilogue_sp)
3474 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3475 else if (frame_pointer_needed)
3476 {
3477 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3478 RTX_FRAME_RELATED_P (insn) = 1;
3479 }
3480 else if (current_frame_info.total_size)
3481 {
3482 rtx offset, frame_size_rtx;
3483
3484 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3485 if (satisfies_constraint_I (frame_size_rtx))
3486 offset = frame_size_rtx;
3487 else
3488 {
3489 regno = next_scratch_gr_reg ();
3490 offset = gen_rtx_REG (DImode, regno);
3491 emit_move_insn (offset, frame_size_rtx);
3492 }
3493
3494 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3495 offset));
3496
3497 RTX_FRAME_RELATED_P (insn) = 1;
3498 if (GET_CODE (offset) != CONST_INT)
3499 {
3500 REG_NOTES (insn)
3501 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3502 gen_rtx_SET (VOIDmode,
3503 stack_pointer_rtx,
3504 gen_rtx_PLUS (DImode,
3505 stack_pointer_rtx,
3506 frame_size_rtx)),
3507 REG_NOTES (insn));
3508 }
3509 }
3510
3511 if (cfun->machine->ia64_eh_epilogue_bsp)
3512 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3513
3514 if (! sibcall_p)
3515 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3516 else
3517 {
3518 int fp = GR_REG (2);
3519 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3520 first available call clobbered register. If there was a frame_pointer
3521 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3522 so we have to make sure we're using the string "r2" when emitting
3523 the register name for the assembler. */
3524 if (current_frame_info.r[reg_fp]
3525 && current_frame_info.r[reg_fp] == GR_REG (2))
3526 fp = HARD_FRAME_POINTER_REGNUM;
3527
3528 /* We must emit an alloc to force the input registers to become output
3529 registers. Otherwise, if the callee tries to pass its parameters
3530 through to another call without an intervening alloc, then these
3531 values get lost. */
3532 /* ??? We don't need to preserve all input registers. We only need to
3533 preserve those input registers used as arguments to the sibling call.
3534 It is unclear how to compute that number here. */
3535 if (current_frame_info.n_input_regs != 0)
3536 {
3537 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3538 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3539 const0_rtx, const0_rtx,
3540 n_inputs, const0_rtx));
3541 RTX_FRAME_RELATED_P (insn) = 1;
3542 }
3543 }
3544 }
3545
3546 /* Return 1 if br.ret can do all the work required to return from a
3547 function. */
3548
3549 int
3550 ia64_direct_return (void)
3551 {
3552 if (reload_completed && ! frame_pointer_needed)
3553 {
3554 ia64_compute_frame_size (get_frame_size ());
3555
3556 return (current_frame_info.total_size == 0
3557 && current_frame_info.n_spilled == 0
3558 && current_frame_info.r[reg_save_b0] == 0
3559 && current_frame_info.r[reg_save_pr] == 0
3560 && current_frame_info.r[reg_save_ar_pfs] == 0
3561 && current_frame_info.r[reg_save_ar_unat] == 0
3562 && current_frame_info.r[reg_save_ar_lc] == 0);
3563 }
3564 return 0;
3565 }
3566
3567 /* Return the magic cookie that we use to hold the return address
3568 during early compilation. */
3569
3570 rtx
3571 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3572 {
3573 if (count != 0)
3574 return NULL;
3575 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3576 }
3577
3578 /* Split this value after reload, now that we know where the return
3579 address is saved. */
3580
3581 void
3582 ia64_split_return_addr_rtx (rtx dest)
3583 {
3584 rtx src;
3585
3586 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3587 {
3588 if (current_frame_info.r[reg_save_b0] != 0)
3589 {
3590 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3591 reg_emitted (reg_save_b0);
3592 }
3593 else
3594 {
3595 HOST_WIDE_INT off;
3596 unsigned int regno;
3597 rtx off_r;
3598
3599 /* Compute offset from CFA for BR0. */
3600 /* ??? Must be kept in sync with ia64_expand_prologue. */
3601 off = (current_frame_info.spill_cfa_off
3602 + current_frame_info.spill_size);
3603 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3604 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3605 off -= 8;
3606
3607 /* Convert CFA offset to a register based offset. */
3608 if (frame_pointer_needed)
3609 src = hard_frame_pointer_rtx;
3610 else
3611 {
3612 src = stack_pointer_rtx;
3613 off += current_frame_info.total_size;
3614 }
3615
3616 /* Load address into scratch register. */
3617 off_r = GEN_INT (off);
3618 if (satisfies_constraint_I (off_r))
3619 emit_insn (gen_adddi3 (dest, src, off_r));
3620 else
3621 {
3622 emit_move_insn (dest, off_r);
3623 emit_insn (gen_adddi3 (dest, src, dest));
3624 }
3625
3626 src = gen_rtx_MEM (Pmode, dest);
3627 }
3628 }
3629 else
3630 src = gen_rtx_REG (DImode, BR_REG (0));
3631
3632 emit_move_insn (dest, src);
3633 }
3634
3635 int
3636 ia64_hard_regno_rename_ok (int from, int to)
3637 {
3638 /* Don't clobber any of the registers we reserved for the prologue. */
3639 enum ia64_frame_regs r;
3640
3641 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3642 if (to == current_frame_info.r[r]
3643 || from == current_frame_info.r[r]
3644 || to == emitted_frame_related_regs[r]
3645 || from == emitted_frame_related_regs[r])
3646 return 0;
3647
3648 /* Don't use output registers outside the register frame. */
3649 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3650 return 0;
3651
3652 /* Retain even/oddness on predicate register pairs. */
3653 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3654 return (from & 1) == (to & 1);
3655
3656 return 1;
3657 }
3658
3659 /* Target hook for assembling integer objects. Handle word-sized
3660 aligned objects and detect the cases when @fptr is needed. */
3661
3662 static bool
3663 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3664 {
3665 if (size == POINTER_SIZE / BITS_PER_UNIT
3666 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3667 && GET_CODE (x) == SYMBOL_REF
3668 && SYMBOL_REF_FUNCTION_P (x))
3669 {
3670 static const char * const directive[2][2] = {
3671 /* 64-bit pointer */ /* 32-bit pointer */
3672 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3673 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3674 };
3675 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3676 output_addr_const (asm_out_file, x);
3677 fputs (")\n", asm_out_file);
3678 return true;
3679 }
3680 return default_assemble_integer (x, size, aligned_p);
3681 }
3682
3683 /* Emit the function prologue. */
3684
3685 static void
3686 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3687 {
3688 int mask, grsave, grsave_prev;
3689
3690 if (current_frame_info.need_regstk)
3691 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3692 current_frame_info.n_input_regs,
3693 current_frame_info.n_local_regs,
3694 current_frame_info.n_output_regs,
3695 current_frame_info.n_rotate_regs);
3696
3697 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3698 return;
3699
3700 /* Emit the .prologue directive. */
3701
3702 mask = 0;
3703 grsave = grsave_prev = 0;
3704 if (current_frame_info.r[reg_save_b0] != 0)
3705 {
3706 mask |= 8;
3707 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
3708 }
3709 if (current_frame_info.r[reg_save_ar_pfs] != 0
3710 && (grsave_prev == 0
3711 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
3712 {
3713 mask |= 4;
3714 if (grsave_prev == 0)
3715 grsave = current_frame_info.r[reg_save_ar_pfs];
3716 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
3717 }
3718 if (current_frame_info.r[reg_fp] != 0
3719 && (grsave_prev == 0
3720 || current_frame_info.r[reg_fp] == grsave_prev + 1))
3721 {
3722 mask |= 2;
3723 if (grsave_prev == 0)
3724 grsave = HARD_FRAME_POINTER_REGNUM;
3725 grsave_prev = current_frame_info.r[reg_fp];
3726 }
3727 if (current_frame_info.r[reg_save_pr] != 0
3728 && (grsave_prev == 0
3729 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
3730 {
3731 mask |= 1;
3732 if (grsave_prev == 0)
3733 grsave = current_frame_info.r[reg_save_pr];
3734 }
3735
3736 if (mask && TARGET_GNU_AS)
3737 fprintf (file, "\t.prologue %d, %d\n", mask,
3738 ia64_dbx_register_number (grsave));
3739 else
3740 fputs ("\t.prologue\n", file);
3741
3742 /* Emit a .spill directive, if necessary, to relocate the base of
3743 the register spill area. */
3744 if (current_frame_info.spill_cfa_off != -16)
3745 fprintf (file, "\t.spill %ld\n",
3746 (long) (current_frame_info.spill_cfa_off
3747 + current_frame_info.spill_size));
3748 }
3749
3750 /* Emit the .body directive at the scheduled end of the prologue. */
3751
3752 static void
3753 ia64_output_function_end_prologue (FILE *file)
3754 {
3755 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3756 return;
3757
3758 fputs ("\t.body\n", file);
3759 }
3760
3761 /* Emit the function epilogue. */
3762
3763 static void
3764 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3765 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3766 {
3767 int i;
3768
3769 if (current_frame_info.r[reg_fp])
3770 {
3771 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3772 reg_names[HARD_FRAME_POINTER_REGNUM]
3773 = reg_names[current_frame_info.r[reg_fp]];
3774 reg_names[current_frame_info.r[reg_fp]] = tmp;
3775 reg_emitted (reg_fp);
3776 }
3777 if (! TARGET_REG_NAMES)
3778 {
3779 for (i = 0; i < current_frame_info.n_input_regs; i++)
3780 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3781 for (i = 0; i < current_frame_info.n_local_regs; i++)
3782 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3783 for (i = 0; i < current_frame_info.n_output_regs; i++)
3784 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3785 }
3786
3787 current_frame_info.initialized = 0;
3788 }
3789
3790 int
3791 ia64_dbx_register_number (int regno)
3792 {
3793 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3794 from its home at loc79 to something inside the register frame. We
3795 must perform the same renumbering here for the debug info. */
3796 if (current_frame_info.r[reg_fp])
3797 {
3798 if (regno == HARD_FRAME_POINTER_REGNUM)
3799 regno = current_frame_info.r[reg_fp];
3800 else if (regno == current_frame_info.r[reg_fp])
3801 regno = HARD_FRAME_POINTER_REGNUM;
3802 }
3803
3804 if (IN_REGNO_P (regno))
3805 return 32 + regno - IN_REG (0);
3806 else if (LOC_REGNO_P (regno))
3807 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3808 else if (OUT_REGNO_P (regno))
3809 return (32 + current_frame_info.n_input_regs
3810 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3811 else
3812 return regno;
3813 }
3814
3815 void
3816 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3817 {
3818 rtx addr_reg, eight = GEN_INT (8);
3819
3820 /* The Intel assembler requires that the global __ia64_trampoline symbol
3821 be declared explicitly */
3822 if (!TARGET_GNU_AS)
3823 {
3824 static bool declared_ia64_trampoline = false;
3825
3826 if (!declared_ia64_trampoline)
3827 {
3828 declared_ia64_trampoline = true;
3829 (*targetm.asm_out.globalize_label) (asm_out_file,
3830 "__ia64_trampoline");
3831 }
3832 }
3833
3834 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3835 addr = convert_memory_address (Pmode, addr);
3836 fnaddr = convert_memory_address (Pmode, fnaddr);
3837 static_chain = convert_memory_address (Pmode, static_chain);
3838
3839 /* Load up our iterator. */
3840 addr_reg = gen_reg_rtx (Pmode);
3841 emit_move_insn (addr_reg, addr);
3842
3843 /* The first two words are the fake descriptor:
3844 __ia64_trampoline, ADDR+16. */
3845 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3846 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3847 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3848
3849 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3850 copy_to_reg (plus_constant (addr, 16)));
3851 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3852
3853 /* The third word is the target descriptor. */
3854 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3855 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3856
3857 /* The fourth word is the static chain. */
3858 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3859 }
3860 \f
3861 /* Do any needed setup for a variadic function. CUM has not been updated
3862 for the last named argument which has type TYPE and mode MODE.
3863
3864 We generate the actual spill instructions during prologue generation. */
3865
3866 static void
3867 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3868 tree type, int * pretend_size,
3869 int second_time ATTRIBUTE_UNUSED)
3870 {
3871 CUMULATIVE_ARGS next_cum = *cum;
3872
3873 /* Skip the current argument. */
3874 ia64_function_arg_advance (&next_cum, mode, type, 1);
3875
3876 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3877 {
3878 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3879 *pretend_size = n * UNITS_PER_WORD;
3880 cfun->machine->n_varargs = n;
3881 }
3882 }
3883
3884 /* Check whether TYPE is a homogeneous floating point aggregate. If
3885 it is, return the mode of the floating point type that appears
3886 in all leafs. If it is not, return VOIDmode.
3887
3888 An aggregate is a homogeneous floating point aggregate is if all
3889 fields/elements in it have the same floating point type (e.g,
3890 SFmode). 128-bit quad-precision floats are excluded.
3891
3892 Variable sized aggregates should never arrive here, since we should
3893 have already decided to pass them by reference. Top-level zero-sized
3894 aggregates are excluded because our parallels crash the middle-end. */
3895
3896 static enum machine_mode
3897 hfa_element_mode (const_tree type, bool nested)
3898 {
3899 enum machine_mode element_mode = VOIDmode;
3900 enum machine_mode mode;
3901 enum tree_code code = TREE_CODE (type);
3902 int know_element_mode = 0;
3903 tree t;
3904
3905 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3906 return VOIDmode;
3907
3908 switch (code)
3909 {
3910 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3911 case BOOLEAN_TYPE: case POINTER_TYPE:
3912 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3913 case LANG_TYPE: case FUNCTION_TYPE:
3914 return VOIDmode;
3915
3916 /* Fortran complex types are supposed to be HFAs, so we need to handle
3917 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3918 types though. */
3919 case COMPLEX_TYPE:
3920 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3921 && TYPE_MODE (type) != TCmode)
3922 return GET_MODE_INNER (TYPE_MODE (type));
3923 else
3924 return VOIDmode;
3925
3926 case REAL_TYPE:
3927 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3928 mode if this is contained within an aggregate. */
3929 if (nested && TYPE_MODE (type) != TFmode)
3930 return TYPE_MODE (type);
3931 else
3932 return VOIDmode;
3933
3934 case ARRAY_TYPE:
3935 return hfa_element_mode (TREE_TYPE (type), 1);
3936
3937 case RECORD_TYPE:
3938 case UNION_TYPE:
3939 case QUAL_UNION_TYPE:
3940 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3941 {
3942 if (TREE_CODE (t) != FIELD_DECL)
3943 continue;
3944
3945 mode = hfa_element_mode (TREE_TYPE (t), 1);
3946 if (know_element_mode)
3947 {
3948 if (mode != element_mode)
3949 return VOIDmode;
3950 }
3951 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3952 return VOIDmode;
3953 else
3954 {
3955 know_element_mode = 1;
3956 element_mode = mode;
3957 }
3958 }
3959 return element_mode;
3960
3961 default:
3962 /* If we reach here, we probably have some front-end specific type
3963 that the backend doesn't know about. This can happen via the
3964 aggregate_value_p call in init_function_start. All we can do is
3965 ignore unknown tree types. */
3966 return VOIDmode;
3967 }
3968
3969 return VOIDmode;
3970 }
3971
3972 /* Return the number of words required to hold a quantity of TYPE and MODE
3973 when passed as an argument. */
3974 static int
3975 ia64_function_arg_words (tree type, enum machine_mode mode)
3976 {
3977 int words;
3978
3979 if (mode == BLKmode)
3980 words = int_size_in_bytes (type);
3981 else
3982 words = GET_MODE_SIZE (mode);
3983
3984 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3985 }
3986
3987 /* Return the number of registers that should be skipped so the current
3988 argument (described by TYPE and WORDS) will be properly aligned.
3989
3990 Integer and float arguments larger than 8 bytes start at the next
3991 even boundary. Aggregates larger than 8 bytes start at the next
3992 even boundary if the aggregate has 16 byte alignment. Note that
3993 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3994 but are still to be aligned in registers.
3995
3996 ??? The ABI does not specify how to handle aggregates with
3997 alignment from 9 to 15 bytes, or greater than 16. We handle them
3998 all as if they had 16 byte alignment. Such aggregates can occur
3999 only if gcc extensions are used. */
4000 static int
4001 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
4002 {
4003 if ((cum->words & 1) == 0)
4004 return 0;
4005
4006 if (type
4007 && TREE_CODE (type) != INTEGER_TYPE
4008 && TREE_CODE (type) != REAL_TYPE)
4009 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4010 else
4011 return words > 1;
4012 }
4013
4014 /* Return rtx for register where argument is passed, or zero if it is passed
4015 on the stack. */
4016 /* ??? 128-bit quad-precision floats are always passed in general
4017 registers. */
4018
4019 rtx
4020 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
4021 int named, int incoming)
4022 {
4023 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4024 int words = ia64_function_arg_words (type, mode);
4025 int offset = ia64_function_arg_offset (cum, type, words);
4026 enum machine_mode hfa_mode = VOIDmode;
4027
4028 /* If all argument slots are used, then it must go on the stack. */
4029 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4030 return 0;
4031
4032 /* Check for and handle homogeneous FP aggregates. */
4033 if (type)
4034 hfa_mode = hfa_element_mode (type, 0);
4035
4036 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4037 and unprototyped hfas are passed specially. */
4038 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4039 {
4040 rtx loc[16];
4041 int i = 0;
4042 int fp_regs = cum->fp_regs;
4043 int int_regs = cum->words + offset;
4044 int hfa_size = GET_MODE_SIZE (hfa_mode);
4045 int byte_size;
4046 int args_byte_size;
4047
4048 /* If prototyped, pass it in FR regs then GR regs.
4049 If not prototyped, pass it in both FR and GR regs.
4050
4051 If this is an SFmode aggregate, then it is possible to run out of
4052 FR regs while GR regs are still left. In that case, we pass the
4053 remaining part in the GR regs. */
4054
4055 /* Fill the FP regs. We do this always. We stop if we reach the end
4056 of the argument, the last FP register, or the last argument slot. */
4057
4058 byte_size = ((mode == BLKmode)
4059 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4060 args_byte_size = int_regs * UNITS_PER_WORD;
4061 offset = 0;
4062 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4063 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4064 {
4065 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4066 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4067 + fp_regs)),
4068 GEN_INT (offset));
4069 offset += hfa_size;
4070 args_byte_size += hfa_size;
4071 fp_regs++;
4072 }
4073
4074 /* If no prototype, then the whole thing must go in GR regs. */
4075 if (! cum->prototype)
4076 offset = 0;
4077 /* If this is an SFmode aggregate, then we might have some left over
4078 that needs to go in GR regs. */
4079 else if (byte_size != offset)
4080 int_regs += offset / UNITS_PER_WORD;
4081
4082 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4083
4084 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4085 {
4086 enum machine_mode gr_mode = DImode;
4087 unsigned int gr_size;
4088
4089 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4090 then this goes in a GR reg left adjusted/little endian, right
4091 adjusted/big endian. */
4092 /* ??? Currently this is handled wrong, because 4-byte hunks are
4093 always right adjusted/little endian. */
4094 if (offset & 0x4)
4095 gr_mode = SImode;
4096 /* If we have an even 4 byte hunk because the aggregate is a
4097 multiple of 4 bytes in size, then this goes in a GR reg right
4098 adjusted/little endian. */
4099 else if (byte_size - offset == 4)
4100 gr_mode = SImode;
4101
4102 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4103 gen_rtx_REG (gr_mode, (basereg
4104 + int_regs)),
4105 GEN_INT (offset));
4106
4107 gr_size = GET_MODE_SIZE (gr_mode);
4108 offset += gr_size;
4109 if (gr_size == UNITS_PER_WORD
4110 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4111 int_regs++;
4112 else if (gr_size > UNITS_PER_WORD)
4113 int_regs += gr_size / UNITS_PER_WORD;
4114 }
4115 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4116 }
4117
4118 /* Integral and aggregates go in general registers. If we have run out of
4119 FR registers, then FP values must also go in general registers. This can
4120 happen when we have a SFmode HFA. */
4121 else if (mode == TFmode || mode == TCmode
4122 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4123 {
4124 int byte_size = ((mode == BLKmode)
4125 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4126 if (BYTES_BIG_ENDIAN
4127 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4128 && byte_size < UNITS_PER_WORD
4129 && byte_size > 0)
4130 {
4131 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4132 gen_rtx_REG (DImode,
4133 (basereg + cum->words
4134 + offset)),
4135 const0_rtx);
4136 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4137 }
4138 else
4139 return gen_rtx_REG (mode, basereg + cum->words + offset);
4140
4141 }
4142
4143 /* If there is a prototype, then FP values go in a FR register when
4144 named, and in a GR register when unnamed. */
4145 else if (cum->prototype)
4146 {
4147 if (named)
4148 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4149 /* In big-endian mode, an anonymous SFmode value must be represented
4150 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4151 the value into the high half of the general register. */
4152 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4153 return gen_rtx_PARALLEL (mode,
4154 gen_rtvec (1,
4155 gen_rtx_EXPR_LIST (VOIDmode,
4156 gen_rtx_REG (DImode, basereg + cum->words + offset),
4157 const0_rtx)));
4158 else
4159 return gen_rtx_REG (mode, basereg + cum->words + offset);
4160 }
4161 /* If there is no prototype, then FP values go in both FR and GR
4162 registers. */
4163 else
4164 {
4165 /* See comment above. */
4166 enum machine_mode inner_mode =
4167 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4168
4169 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4170 gen_rtx_REG (mode, (FR_ARG_FIRST
4171 + cum->fp_regs)),
4172 const0_rtx);
4173 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4174 gen_rtx_REG (inner_mode,
4175 (basereg + cum->words
4176 + offset)),
4177 const0_rtx);
4178
4179 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4180 }
4181 }
4182
4183 /* Return number of bytes, at the beginning of the argument, that must be
4184 put in registers. 0 is the argument is entirely in registers or entirely
4185 in memory. */
4186
4187 static int
4188 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4189 tree type, bool named ATTRIBUTE_UNUSED)
4190 {
4191 int words = ia64_function_arg_words (type, mode);
4192 int offset = ia64_function_arg_offset (cum, type, words);
4193
4194 /* If all argument slots are used, then it must go on the stack. */
4195 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4196 return 0;
4197
4198 /* It doesn't matter whether the argument goes in FR or GR regs. If
4199 it fits within the 8 argument slots, then it goes entirely in
4200 registers. If it extends past the last argument slot, then the rest
4201 goes on the stack. */
4202
4203 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4204 return 0;
4205
4206 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4207 }
4208
4209 /* Update CUM to point after this argument. This is patterned after
4210 ia64_function_arg. */
4211
4212 void
4213 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4214 tree type, int named)
4215 {
4216 int words = ia64_function_arg_words (type, mode);
4217 int offset = ia64_function_arg_offset (cum, type, words);
4218 enum machine_mode hfa_mode = VOIDmode;
4219
4220 /* If all arg slots are already full, then there is nothing to do. */
4221 if (cum->words >= MAX_ARGUMENT_SLOTS)
4222 return;
4223
4224 cum->words += words + offset;
4225
4226 /* Check for and handle homogeneous FP aggregates. */
4227 if (type)
4228 hfa_mode = hfa_element_mode (type, 0);
4229
4230 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4231 and unprototyped hfas are passed specially. */
4232 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4233 {
4234 int fp_regs = cum->fp_regs;
4235 /* This is the original value of cum->words + offset. */
4236 int int_regs = cum->words - words;
4237 int hfa_size = GET_MODE_SIZE (hfa_mode);
4238 int byte_size;
4239 int args_byte_size;
4240
4241 /* If prototyped, pass it in FR regs then GR regs.
4242 If not prototyped, pass it in both FR and GR regs.
4243
4244 If this is an SFmode aggregate, then it is possible to run out of
4245 FR regs while GR regs are still left. In that case, we pass the
4246 remaining part in the GR regs. */
4247
4248 /* Fill the FP regs. We do this always. We stop if we reach the end
4249 of the argument, the last FP register, or the last argument slot. */
4250
4251 byte_size = ((mode == BLKmode)
4252 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4253 args_byte_size = int_regs * UNITS_PER_WORD;
4254 offset = 0;
4255 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4256 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4257 {
4258 offset += hfa_size;
4259 args_byte_size += hfa_size;
4260 fp_regs++;
4261 }
4262
4263 cum->fp_regs = fp_regs;
4264 }
4265
4266 /* Integral and aggregates go in general registers. So do TFmode FP values.
4267 If we have run out of FR registers, then other FP values must also go in
4268 general registers. This can happen when we have a SFmode HFA. */
4269 else if (mode == TFmode || mode == TCmode
4270 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4271 cum->int_regs = cum->words;
4272
4273 /* If there is a prototype, then FP values go in a FR register when
4274 named, and in a GR register when unnamed. */
4275 else if (cum->prototype)
4276 {
4277 if (! named)
4278 cum->int_regs = cum->words;
4279 else
4280 /* ??? Complex types should not reach here. */
4281 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4282 }
4283 /* If there is no prototype, then FP values go in both FR and GR
4284 registers. */
4285 else
4286 {
4287 /* ??? Complex types should not reach here. */
4288 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4289 cum->int_regs = cum->words;
4290 }
4291 }
4292
4293 /* Arguments with alignment larger than 8 bytes start at the next even
4294 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4295 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4296
4297 int
4298 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4299 {
4300
4301 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4302 return PARM_BOUNDARY * 2;
4303
4304 if (type)
4305 {
4306 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4307 return PARM_BOUNDARY * 2;
4308 else
4309 return PARM_BOUNDARY;
4310 }
4311
4312 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4313 return PARM_BOUNDARY * 2;
4314 else
4315 return PARM_BOUNDARY;
4316 }
4317
4318 /* True if it is OK to do sibling call optimization for the specified
4319 call expression EXP. DECL will be the called function, or NULL if
4320 this is an indirect call. */
4321 static bool
4322 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4323 {
4324 /* We can't perform a sibcall if the current function has the syscall_linkage
4325 attribute. */
4326 if (lookup_attribute ("syscall_linkage",
4327 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4328 return false;
4329
4330 /* We must always return with our current GP. This means we can
4331 only sibcall to functions defined in the current module. */
4332 return decl && (*targetm.binds_local_p) (decl);
4333 }
4334 \f
4335
4336 /* Implement va_arg. */
4337
4338 static tree
4339 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4340 gimple_seq *post_p)
4341 {
4342 /* Variable sized types are passed by reference. */
4343 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4344 {
4345 tree ptrtype = build_pointer_type (type);
4346 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4347 return build_va_arg_indirect_ref (addr);
4348 }
4349
4350 /* Aggregate arguments with alignment larger than 8 bytes start at
4351 the next even boundary. Integer and floating point arguments
4352 do so if they are larger than 8 bytes, whether or not they are
4353 also aligned larger than 8 bytes. */
4354 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4355 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4356 {
4357 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4358 size_int (2 * UNITS_PER_WORD - 1));
4359 t = fold_convert (sizetype, t);
4360 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4361 size_int (-2 * UNITS_PER_WORD));
4362 t = fold_convert (TREE_TYPE (valist), t);
4363 gimplify_assign (unshare_expr (valist), t, pre_p);
4364 }
4365
4366 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4367 }
4368 \f
4369 /* Return 1 if function return value returned in memory. Return 0 if it is
4370 in a register. */
4371
4372 static bool
4373 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4374 {
4375 enum machine_mode mode;
4376 enum machine_mode hfa_mode;
4377 HOST_WIDE_INT byte_size;
4378
4379 mode = TYPE_MODE (valtype);
4380 byte_size = GET_MODE_SIZE (mode);
4381 if (mode == BLKmode)
4382 {
4383 byte_size = int_size_in_bytes (valtype);
4384 if (byte_size < 0)
4385 return true;
4386 }
4387
4388 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4389
4390 hfa_mode = hfa_element_mode (valtype, 0);
4391 if (hfa_mode != VOIDmode)
4392 {
4393 int hfa_size = GET_MODE_SIZE (hfa_mode);
4394
4395 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4396 return true;
4397 else
4398 return false;
4399 }
4400 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4401 return true;
4402 else
4403 return false;
4404 }
4405
4406 /* Return rtx for register that holds the function return value. */
4407
4408 rtx
4409 ia64_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
4410 {
4411 enum machine_mode mode;
4412 enum machine_mode hfa_mode;
4413
4414 mode = TYPE_MODE (valtype);
4415 hfa_mode = hfa_element_mode (valtype, 0);
4416
4417 if (hfa_mode != VOIDmode)
4418 {
4419 rtx loc[8];
4420 int i;
4421 int hfa_size;
4422 int byte_size;
4423 int offset;
4424
4425 hfa_size = GET_MODE_SIZE (hfa_mode);
4426 byte_size = ((mode == BLKmode)
4427 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4428 offset = 0;
4429 for (i = 0; offset < byte_size; i++)
4430 {
4431 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4432 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4433 GEN_INT (offset));
4434 offset += hfa_size;
4435 }
4436 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4437 }
4438 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4439 return gen_rtx_REG (mode, FR_ARG_FIRST);
4440 else
4441 {
4442 bool need_parallel = false;
4443
4444 /* In big-endian mode, we need to manage the layout of aggregates
4445 in the registers so that we get the bits properly aligned in
4446 the highpart of the registers. */
4447 if (BYTES_BIG_ENDIAN
4448 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4449 need_parallel = true;
4450
4451 /* Something like struct S { long double x; char a[0] } is not an
4452 HFA structure, and therefore doesn't go in fp registers. But
4453 the middle-end will give it XFmode anyway, and XFmode values
4454 don't normally fit in integer registers. So we need to smuggle
4455 the value inside a parallel. */
4456 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4457 need_parallel = true;
4458
4459 if (need_parallel)
4460 {
4461 rtx loc[8];
4462 int offset;
4463 int bytesize;
4464 int i;
4465
4466 offset = 0;
4467 bytesize = int_size_in_bytes (valtype);
4468 /* An empty PARALLEL is invalid here, but the return value
4469 doesn't matter for empty structs. */
4470 if (bytesize == 0)
4471 return gen_rtx_REG (mode, GR_RET_FIRST);
4472 for (i = 0; offset < bytesize; i++)
4473 {
4474 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4475 gen_rtx_REG (DImode,
4476 GR_RET_FIRST + i),
4477 GEN_INT (offset));
4478 offset += UNITS_PER_WORD;
4479 }
4480 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4481 }
4482
4483 return gen_rtx_REG (mode, GR_RET_FIRST);
4484 }
4485 }
4486
4487 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4488 We need to emit DTP-relative relocations. */
4489
4490 static void
4491 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4492 {
4493 gcc_assert (size == 4 || size == 8);
4494 if (size == 4)
4495 fputs ("\tdata4.ua\t@dtprel(", file);
4496 else
4497 fputs ("\tdata8.ua\t@dtprel(", file);
4498 output_addr_const (file, x);
4499 fputs (")", file);
4500 }
4501
4502 /* Print a memory address as an operand to reference that memory location. */
4503
4504 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4505 also call this from ia64_print_operand for memory addresses. */
4506
4507 void
4508 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4509 rtx address ATTRIBUTE_UNUSED)
4510 {
4511 }
4512
4513 /* Print an operand to an assembler instruction.
4514 C Swap and print a comparison operator.
4515 D Print an FP comparison operator.
4516 E Print 32 - constant, for SImode shifts as extract.
4517 e Print 64 - constant, for DImode rotates.
4518 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4519 a floating point register emitted normally.
4520 I Invert a predicate register by adding 1.
4521 J Select the proper predicate register for a condition.
4522 j Select the inverse predicate register for a condition.
4523 O Append .acq for volatile load.
4524 P Postincrement of a MEM.
4525 Q Append .rel for volatile store.
4526 R Print .s .d or nothing for a single, double or no truncation.
4527 S Shift amount for shladd instruction.
4528 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4529 for Intel assembler.
4530 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4531 for Intel assembler.
4532 X A pair of floating point registers.
4533 r Print register name, or constant 0 as r0. HP compatibility for
4534 Linux kernel.
4535 v Print vector constant value as an 8-byte integer value. */
4536
4537 void
4538 ia64_print_operand (FILE * file, rtx x, int code)
4539 {
4540 const char *str;
4541
4542 switch (code)
4543 {
4544 case 0:
4545 /* Handled below. */
4546 break;
4547
4548 case 'C':
4549 {
4550 enum rtx_code c = swap_condition (GET_CODE (x));
4551 fputs (GET_RTX_NAME (c), file);
4552 return;
4553 }
4554
4555 case 'D':
4556 switch (GET_CODE (x))
4557 {
4558 case NE:
4559 str = "neq";
4560 break;
4561 case UNORDERED:
4562 str = "unord";
4563 break;
4564 case ORDERED:
4565 str = "ord";
4566 break;
4567 case UNLT:
4568 str = "nge";
4569 break;
4570 case UNLE:
4571 str = "ngt";
4572 break;
4573 case UNGT:
4574 str = "nle";
4575 break;
4576 case UNGE:
4577 str = "nlt";
4578 break;
4579 default:
4580 str = GET_RTX_NAME (GET_CODE (x));
4581 break;
4582 }
4583 fputs (str, file);
4584 return;
4585
4586 case 'E':
4587 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4588 return;
4589
4590 case 'e':
4591 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4592 return;
4593
4594 case 'F':
4595 if (x == CONST0_RTX (GET_MODE (x)))
4596 str = reg_names [FR_REG (0)];
4597 else if (x == CONST1_RTX (GET_MODE (x)))
4598 str = reg_names [FR_REG (1)];
4599 else
4600 {
4601 gcc_assert (GET_CODE (x) == REG);
4602 str = reg_names [REGNO (x)];
4603 }
4604 fputs (str, file);
4605 return;
4606
4607 case 'I':
4608 fputs (reg_names [REGNO (x) + 1], file);
4609 return;
4610
4611 case 'J':
4612 case 'j':
4613 {
4614 unsigned int regno = REGNO (XEXP (x, 0));
4615 if (GET_CODE (x) == EQ)
4616 regno += 1;
4617 if (code == 'j')
4618 regno ^= 1;
4619 fputs (reg_names [regno], file);
4620 }
4621 return;
4622
4623 case 'O':
4624 if (MEM_VOLATILE_P (x))
4625 fputs(".acq", file);
4626 return;
4627
4628 case 'P':
4629 {
4630 HOST_WIDE_INT value;
4631
4632 switch (GET_CODE (XEXP (x, 0)))
4633 {
4634 default:
4635 return;
4636
4637 case POST_MODIFY:
4638 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4639 if (GET_CODE (x) == CONST_INT)
4640 value = INTVAL (x);
4641 else
4642 {
4643 gcc_assert (GET_CODE (x) == REG);
4644 fprintf (file, ", %s", reg_names[REGNO (x)]);
4645 return;
4646 }
4647 break;
4648
4649 case POST_INC:
4650 value = GET_MODE_SIZE (GET_MODE (x));
4651 break;
4652
4653 case POST_DEC:
4654 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4655 break;
4656 }
4657
4658 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4659 return;
4660 }
4661
4662 case 'Q':
4663 if (MEM_VOLATILE_P (x))
4664 fputs(".rel", file);
4665 return;
4666
4667 case 'R':
4668 if (x == CONST0_RTX (GET_MODE (x)))
4669 fputs(".s", file);
4670 else if (x == CONST1_RTX (GET_MODE (x)))
4671 fputs(".d", file);
4672 else if (x == CONST2_RTX (GET_MODE (x)))
4673 ;
4674 else
4675 output_operand_lossage ("invalid %%R value");
4676 return;
4677
4678 case 'S':
4679 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4680 return;
4681
4682 case 'T':
4683 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4684 {
4685 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4686 return;
4687 }
4688 break;
4689
4690 case 'U':
4691 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4692 {
4693 const char *prefix = "0x";
4694 if (INTVAL (x) & 0x80000000)
4695 {
4696 fprintf (file, "0xffffffff");
4697 prefix = "";
4698 }
4699 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4700 return;
4701 }
4702 break;
4703
4704 case 'X':
4705 {
4706 unsigned int regno = REGNO (x);
4707 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4708 }
4709 return;
4710
4711 case 'r':
4712 /* If this operand is the constant zero, write it as register zero.
4713 Any register, zero, or CONST_INT value is OK here. */
4714 if (GET_CODE (x) == REG)
4715 fputs (reg_names[REGNO (x)], file);
4716 else if (x == CONST0_RTX (GET_MODE (x)))
4717 fputs ("r0", file);
4718 else if (GET_CODE (x) == CONST_INT)
4719 output_addr_const (file, x);
4720 else
4721 output_operand_lossage ("invalid %%r value");
4722 return;
4723
4724 case 'v':
4725 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4726 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4727 break;
4728
4729 case '+':
4730 {
4731 const char *which;
4732
4733 /* For conditional branches, returns or calls, substitute
4734 sptk, dptk, dpnt, or spnt for %s. */
4735 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4736 if (x)
4737 {
4738 int pred_val = INTVAL (XEXP (x, 0));
4739
4740 /* Guess top and bottom 10% statically predicted. */
4741 if (pred_val < REG_BR_PROB_BASE / 50
4742 && br_prob_note_reliable_p (x))
4743 which = ".spnt";
4744 else if (pred_val < REG_BR_PROB_BASE / 2)
4745 which = ".dpnt";
4746 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
4747 || !br_prob_note_reliable_p (x))
4748 which = ".dptk";
4749 else
4750 which = ".sptk";
4751 }
4752 else if (GET_CODE (current_output_insn) == CALL_INSN)
4753 which = ".sptk";
4754 else
4755 which = ".dptk";
4756
4757 fputs (which, file);
4758 return;
4759 }
4760
4761 case ',':
4762 x = current_insn_predicate;
4763 if (x)
4764 {
4765 unsigned int regno = REGNO (XEXP (x, 0));
4766 if (GET_CODE (x) == EQ)
4767 regno += 1;
4768 fprintf (file, "(%s) ", reg_names [regno]);
4769 }
4770 return;
4771
4772 default:
4773 output_operand_lossage ("ia64_print_operand: unknown code");
4774 return;
4775 }
4776
4777 switch (GET_CODE (x))
4778 {
4779 /* This happens for the spill/restore instructions. */
4780 case POST_INC:
4781 case POST_DEC:
4782 case POST_MODIFY:
4783 x = XEXP (x, 0);
4784 /* ... fall through ... */
4785
4786 case REG:
4787 fputs (reg_names [REGNO (x)], file);
4788 break;
4789
4790 case MEM:
4791 {
4792 rtx addr = XEXP (x, 0);
4793 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4794 addr = XEXP (addr, 0);
4795 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4796 break;
4797 }
4798
4799 default:
4800 output_addr_const (file, x);
4801 break;
4802 }
4803
4804 return;
4805 }
4806 \f
4807 /* Compute a (partial) cost for rtx X. Return true if the complete
4808 cost has been computed, and false if subexpressions should be
4809 scanned. In either case, *TOTAL contains the cost result. */
4810 /* ??? This is incomplete. */
4811
4812 static bool
4813 ia64_rtx_costs (rtx x, int code, int outer_code, int *total,
4814 bool speed ATTRIBUTE_UNUSED)
4815 {
4816 switch (code)
4817 {
4818 case CONST_INT:
4819 switch (outer_code)
4820 {
4821 case SET:
4822 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
4823 return true;
4824 case PLUS:
4825 if (satisfies_constraint_I (x))
4826 *total = 0;
4827 else if (satisfies_constraint_J (x))
4828 *total = 1;
4829 else
4830 *total = COSTS_N_INSNS (1);
4831 return true;
4832 default:
4833 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
4834 *total = 0;
4835 else
4836 *total = COSTS_N_INSNS (1);
4837 return true;
4838 }
4839
4840 case CONST_DOUBLE:
4841 *total = COSTS_N_INSNS (1);
4842 return true;
4843
4844 case CONST:
4845 case SYMBOL_REF:
4846 case LABEL_REF:
4847 *total = COSTS_N_INSNS (3);
4848 return true;
4849
4850 case MULT:
4851 /* For multiplies wider than HImode, we have to go to the FPU,
4852 which normally involves copies. Plus there's the latency
4853 of the multiply itself, and the latency of the instructions to
4854 transfer integer regs to FP regs. */
4855 /* ??? Check for FP mode. */
4856 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4857 *total = COSTS_N_INSNS (10);
4858 else
4859 *total = COSTS_N_INSNS (2);
4860 return true;
4861
4862 case PLUS:
4863 case MINUS:
4864 case ASHIFT:
4865 case ASHIFTRT:
4866 case LSHIFTRT:
4867 *total = COSTS_N_INSNS (1);
4868 return true;
4869
4870 case DIV:
4871 case UDIV:
4872 case MOD:
4873 case UMOD:
4874 /* We make divide expensive, so that divide-by-constant will be
4875 optimized to a multiply. */
4876 *total = COSTS_N_INSNS (60);
4877 return true;
4878
4879 default:
4880 return false;
4881 }
4882 }
4883
4884 /* Calculate the cost of moving data from a register in class FROM to
4885 one in class TO, using MODE. */
4886
4887 int
4888 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4889 enum reg_class to)
4890 {
4891 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4892 if (to == ADDL_REGS)
4893 to = GR_REGS;
4894 if (from == ADDL_REGS)
4895 from = GR_REGS;
4896
4897 /* All costs are symmetric, so reduce cases by putting the
4898 lower number class as the destination. */
4899 if (from < to)
4900 {
4901 enum reg_class tmp = to;
4902 to = from, from = tmp;
4903 }
4904
4905 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4906 so that we get secondary memory reloads. Between FR_REGS,
4907 we have to make this at least as expensive as MEMORY_MOVE_COST
4908 to avoid spectacularly poor register class preferencing. */
4909 if (mode == XFmode || mode == RFmode)
4910 {
4911 if (to != GR_REGS || from != GR_REGS)
4912 return MEMORY_MOVE_COST (mode, to, 0);
4913 else
4914 return 3;
4915 }
4916
4917 switch (to)
4918 {
4919 case PR_REGS:
4920 /* Moving between PR registers takes two insns. */
4921 if (from == PR_REGS)
4922 return 3;
4923 /* Moving between PR and anything but GR is impossible. */
4924 if (from != GR_REGS)
4925 return MEMORY_MOVE_COST (mode, to, 0);
4926 break;
4927
4928 case BR_REGS:
4929 /* Moving between BR and anything but GR is impossible. */
4930 if (from != GR_REGS && from != GR_AND_BR_REGS)
4931 return MEMORY_MOVE_COST (mode, to, 0);
4932 break;
4933
4934 case AR_I_REGS:
4935 case AR_M_REGS:
4936 /* Moving between AR and anything but GR is impossible. */
4937 if (from != GR_REGS)
4938 return MEMORY_MOVE_COST (mode, to, 0);
4939 break;
4940
4941 case GR_REGS:
4942 case FR_REGS:
4943 case FP_REGS:
4944 case GR_AND_FR_REGS:
4945 case GR_AND_BR_REGS:
4946 case ALL_REGS:
4947 break;
4948
4949 default:
4950 gcc_unreachable ();
4951 }
4952
4953 return 2;
4954 }
4955
4956 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on RCLASS
4957 to use when copying X into that class. */
4958
4959 enum reg_class
4960 ia64_preferred_reload_class (rtx x, enum reg_class rclass)
4961 {
4962 switch (rclass)
4963 {
4964 case FR_REGS:
4965 case FP_REGS:
4966 /* Don't allow volatile mem reloads into floating point registers.
4967 This is defined to force reload to choose the r/m case instead
4968 of the f/f case when reloading (set (reg fX) (mem/v)). */
4969 if (MEM_P (x) && MEM_VOLATILE_P (x))
4970 return NO_REGS;
4971
4972 /* Force all unrecognized constants into the constant pool. */
4973 if (CONSTANT_P (x))
4974 return NO_REGS;
4975 break;
4976
4977 case AR_M_REGS:
4978 case AR_I_REGS:
4979 if (!OBJECT_P (x))
4980 return NO_REGS;
4981 break;
4982
4983 default:
4984 break;
4985 }
4986
4987 return rclass;
4988 }
4989
4990 /* This function returns the register class required for a secondary
4991 register when copying between one of the registers in RCLASS, and X,
4992 using MODE. A return value of NO_REGS means that no secondary register
4993 is required. */
4994
4995 enum reg_class
4996 ia64_secondary_reload_class (enum reg_class rclass,
4997 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4998 {
4999 int regno = -1;
5000
5001 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5002 regno = true_regnum (x);
5003
5004 switch (rclass)
5005 {
5006 case BR_REGS:
5007 case AR_M_REGS:
5008 case AR_I_REGS:
5009 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5010 interaction. We end up with two pseudos with overlapping lifetimes
5011 both of which are equiv to the same constant, and both which need
5012 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5013 changes depending on the path length, which means the qty_first_reg
5014 check in make_regs_eqv can give different answers at different times.
5015 At some point I'll probably need a reload_indi pattern to handle
5016 this.
5017
5018 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5019 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5020 non-general registers for good measure. */
5021 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5022 return GR_REGS;
5023
5024 /* This is needed if a pseudo used as a call_operand gets spilled to a
5025 stack slot. */
5026 if (GET_CODE (x) == MEM)
5027 return GR_REGS;
5028 break;
5029
5030 case FR_REGS:
5031 case FP_REGS:
5032 /* Need to go through general registers to get to other class regs. */
5033 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5034 return GR_REGS;
5035
5036 /* This can happen when a paradoxical subreg is an operand to the
5037 muldi3 pattern. */
5038 /* ??? This shouldn't be necessary after instruction scheduling is
5039 enabled, because paradoxical subregs are not accepted by
5040 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5041 stop the paradoxical subreg stupidity in the *_operand functions
5042 in recog.c. */
5043 if (GET_CODE (x) == MEM
5044 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5045 || GET_MODE (x) == QImode))
5046 return GR_REGS;
5047
5048 /* This can happen because of the ior/and/etc patterns that accept FP
5049 registers as operands. If the third operand is a constant, then it
5050 needs to be reloaded into a FP register. */
5051 if (GET_CODE (x) == CONST_INT)
5052 return GR_REGS;
5053
5054 /* This can happen because of register elimination in a muldi3 insn.
5055 E.g. `26107 * (unsigned long)&u'. */
5056 if (GET_CODE (x) == PLUS)
5057 return GR_REGS;
5058 break;
5059
5060 case PR_REGS:
5061 /* ??? This happens if we cse/gcse a BImode value across a call,
5062 and the function has a nonlocal goto. This is because global
5063 does not allocate call crossing pseudos to hard registers when
5064 crtl->has_nonlocal_goto is true. This is relatively
5065 common for C++ programs that use exceptions. To reproduce,
5066 return NO_REGS and compile libstdc++. */
5067 if (GET_CODE (x) == MEM)
5068 return GR_REGS;
5069
5070 /* This can happen when we take a BImode subreg of a DImode value,
5071 and that DImode value winds up in some non-GR register. */
5072 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5073 return GR_REGS;
5074 break;
5075
5076 default:
5077 break;
5078 }
5079
5080 return NO_REGS;
5081 }
5082
5083 \f
5084 /* Implement targetm.unspec_may_trap_p hook. */
5085 static int
5086 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5087 {
5088 if (GET_CODE (x) == UNSPEC)
5089 {
5090 switch (XINT (x, 1))
5091 {
5092 case UNSPEC_LDA:
5093 case UNSPEC_LDS:
5094 case UNSPEC_LDSA:
5095 case UNSPEC_LDCCLR:
5096 case UNSPEC_CHKACLR:
5097 case UNSPEC_CHKS:
5098 /* These unspecs are just wrappers. */
5099 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5100 }
5101 }
5102
5103 return default_unspec_may_trap_p (x, flags);
5104 }
5105
5106 \f
5107 /* Parse the -mfixed-range= option string. */
5108
5109 static void
5110 fix_range (const char *const_str)
5111 {
5112 int i, first, last;
5113 char *str, *dash, *comma;
5114
5115 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5116 REG2 are either register names or register numbers. The effect
5117 of this option is to mark the registers in the range from REG1 to
5118 REG2 as ``fixed'' so they won't be used by the compiler. This is
5119 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5120
5121 i = strlen (const_str);
5122 str = (char *) alloca (i + 1);
5123 memcpy (str, const_str, i + 1);
5124
5125 while (1)
5126 {
5127 dash = strchr (str, '-');
5128 if (!dash)
5129 {
5130 warning (0, "value of -mfixed-range must have form REG1-REG2");
5131 return;
5132 }
5133 *dash = '\0';
5134
5135 comma = strchr (dash + 1, ',');
5136 if (comma)
5137 *comma = '\0';
5138
5139 first = decode_reg_name (str);
5140 if (first < 0)
5141 {
5142 warning (0, "unknown register name: %s", str);
5143 return;
5144 }
5145
5146 last = decode_reg_name (dash + 1);
5147 if (last < 0)
5148 {
5149 warning (0, "unknown register name: %s", dash + 1);
5150 return;
5151 }
5152
5153 *dash = '-';
5154
5155 if (first > last)
5156 {
5157 warning (0, "%s-%s is an empty range", str, dash + 1);
5158 return;
5159 }
5160
5161 for (i = first; i <= last; ++i)
5162 fixed_regs[i] = call_used_regs[i] = 1;
5163
5164 if (!comma)
5165 break;
5166
5167 *comma = ',';
5168 str = comma + 1;
5169 }
5170 }
5171
5172 /* Implement TARGET_HANDLE_OPTION. */
5173
5174 static bool
5175 ia64_handle_option (size_t code, const char *arg, int value)
5176 {
5177 switch (code)
5178 {
5179 case OPT_mfixed_range_:
5180 fix_range (arg);
5181 return true;
5182
5183 case OPT_mtls_size_:
5184 if (value != 14 && value != 22 && value != 64)
5185 error ("bad value %<%s%> for -mtls-size= switch", arg);
5186 return true;
5187
5188 case OPT_mtune_:
5189 {
5190 static struct pta
5191 {
5192 const char *name; /* processor name or nickname. */
5193 enum processor_type processor;
5194 }
5195 const processor_alias_table[] =
5196 {
5197 {"itanium", PROCESSOR_ITANIUM},
5198 {"itanium1", PROCESSOR_ITANIUM},
5199 {"merced", PROCESSOR_ITANIUM},
5200 {"itanium2", PROCESSOR_ITANIUM2},
5201 {"mckinley", PROCESSOR_ITANIUM2},
5202 };
5203 int const pta_size = ARRAY_SIZE (processor_alias_table);
5204 int i;
5205
5206 for (i = 0; i < pta_size; i++)
5207 if (!strcmp (arg, processor_alias_table[i].name))
5208 {
5209 ia64_tune = processor_alias_table[i].processor;
5210 break;
5211 }
5212 if (i == pta_size)
5213 error ("bad value %<%s%> for -mtune= switch", arg);
5214 return true;
5215 }
5216
5217 default:
5218 return true;
5219 }
5220 }
5221
5222 /* Implement OVERRIDE_OPTIONS. */
5223
5224 void
5225 ia64_override_options (void)
5226 {
5227 if (TARGET_AUTO_PIC)
5228 target_flags |= MASK_CONST_GP;
5229
5230 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5231 {
5232 warning (0, "not yet implemented: latency-optimized inline square root");
5233 TARGET_INLINE_SQRT = INL_MAX_THR;
5234 }
5235
5236 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5237
5238 init_machine_status = ia64_init_machine_status;
5239 }
5240
5241 /* Initialize the record of emitted frame related registers. */
5242
5243 void ia64_init_expanders (void)
5244 {
5245 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5246 }
5247
5248 static struct machine_function *
5249 ia64_init_machine_status (void)
5250 {
5251 return GGC_CNEW (struct machine_function);
5252 }
5253 \f
5254 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5255 static enum attr_type ia64_safe_type (rtx);
5256
5257 static enum attr_itanium_class
5258 ia64_safe_itanium_class (rtx insn)
5259 {
5260 if (recog_memoized (insn) >= 0)
5261 return get_attr_itanium_class (insn);
5262 else
5263 return ITANIUM_CLASS_UNKNOWN;
5264 }
5265
5266 static enum attr_type
5267 ia64_safe_type (rtx insn)
5268 {
5269 if (recog_memoized (insn) >= 0)
5270 return get_attr_type (insn);
5271 else
5272 return TYPE_UNKNOWN;
5273 }
5274 \f
5275 /* The following collection of routines emit instruction group stop bits as
5276 necessary to avoid dependencies. */
5277
5278 /* Need to track some additional registers as far as serialization is
5279 concerned so we can properly handle br.call and br.ret. We could
5280 make these registers visible to gcc, but since these registers are
5281 never explicitly used in gcc generated code, it seems wasteful to
5282 do so (plus it would make the call and return patterns needlessly
5283 complex). */
5284 #define REG_RP (BR_REG (0))
5285 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5286 /* This is used for volatile asms which may require a stop bit immediately
5287 before and after them. */
5288 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5289 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5290 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5291
5292 /* For each register, we keep track of how it has been written in the
5293 current instruction group.
5294
5295 If a register is written unconditionally (no qualifying predicate),
5296 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5297
5298 If a register is written if its qualifying predicate P is true, we
5299 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5300 may be written again by the complement of P (P^1) and when this happens,
5301 WRITE_COUNT gets set to 2.
5302
5303 The result of this is that whenever an insn attempts to write a register
5304 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5305
5306 If a predicate register is written by a floating-point insn, we set
5307 WRITTEN_BY_FP to true.
5308
5309 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5310 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5311
5312 #if GCC_VERSION >= 4000
5313 #define RWS_FIELD_TYPE __extension__ unsigned short
5314 #else
5315 #define RWS_FIELD_TYPE unsigned int
5316 #endif
5317 struct reg_write_state
5318 {
5319 RWS_FIELD_TYPE write_count : 2;
5320 RWS_FIELD_TYPE first_pred : 10;
5321 RWS_FIELD_TYPE written_by_fp : 1;
5322 RWS_FIELD_TYPE written_by_and : 1;
5323 RWS_FIELD_TYPE written_by_or : 1;
5324 };
5325
5326 /* Cumulative info for the current instruction group. */
5327 struct reg_write_state rws_sum[NUM_REGS];
5328 #ifdef ENABLE_CHECKING
5329 /* Bitmap whether a register has been written in the current insn. */
5330 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5331 / HOST_BITS_PER_WIDEST_FAST_INT];
5332
5333 static inline void
5334 rws_insn_set (int regno)
5335 {
5336 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5337 SET_HARD_REG_BIT (rws_insn, regno);
5338 }
5339
5340 static inline int
5341 rws_insn_test (int regno)
5342 {
5343 return TEST_HARD_REG_BIT (rws_insn, regno);
5344 }
5345 #else
5346 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5347 unsigned char rws_insn[2];
5348
5349 static inline void
5350 rws_insn_set (int regno)
5351 {
5352 if (regno == REG_AR_CFM)
5353 rws_insn[0] = 1;
5354 else if (regno == REG_VOLATILE)
5355 rws_insn[1] = 1;
5356 }
5357
5358 static inline int
5359 rws_insn_test (int regno)
5360 {
5361 if (regno == REG_AR_CFM)
5362 return rws_insn[0];
5363 if (regno == REG_VOLATILE)
5364 return rws_insn[1];
5365 return 0;
5366 }
5367 #endif
5368
5369 /* Indicates whether this is the first instruction after a stop bit,
5370 in which case we don't need another stop bit. Without this,
5371 ia64_variable_issue will die when scheduling an alloc. */
5372 static int first_instruction;
5373
5374 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5375 RTL for one instruction. */
5376 struct reg_flags
5377 {
5378 unsigned int is_write : 1; /* Is register being written? */
5379 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5380 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5381 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5382 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5383 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5384 };
5385
5386 static void rws_update (int, struct reg_flags, int);
5387 static int rws_access_regno (int, struct reg_flags, int);
5388 static int rws_access_reg (rtx, struct reg_flags, int);
5389 static void update_set_flags (rtx, struct reg_flags *);
5390 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5391 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5392 static void init_insn_group_barriers (void);
5393 static int group_barrier_needed (rtx);
5394 static int safe_group_barrier_needed (rtx);
5395 static int in_safe_group_barrier;
5396
5397 /* Update *RWS for REGNO, which is being written by the current instruction,
5398 with predicate PRED, and associated register flags in FLAGS. */
5399
5400 static void
5401 rws_update (int regno, struct reg_flags flags, int pred)
5402 {
5403 if (pred)
5404 rws_sum[regno].write_count++;
5405 else
5406 rws_sum[regno].write_count = 2;
5407 rws_sum[regno].written_by_fp |= flags.is_fp;
5408 /* ??? Not tracking and/or across differing predicates. */
5409 rws_sum[regno].written_by_and = flags.is_and;
5410 rws_sum[regno].written_by_or = flags.is_or;
5411 rws_sum[regno].first_pred = pred;
5412 }
5413
5414 /* Handle an access to register REGNO of type FLAGS using predicate register
5415 PRED. Update rws_sum array. Return 1 if this access creates
5416 a dependency with an earlier instruction in the same group. */
5417
5418 static int
5419 rws_access_regno (int regno, struct reg_flags flags, int pred)
5420 {
5421 int need_barrier = 0;
5422
5423 gcc_assert (regno < NUM_REGS);
5424
5425 if (! PR_REGNO_P (regno))
5426 flags.is_and = flags.is_or = 0;
5427
5428 if (flags.is_write)
5429 {
5430 int write_count;
5431
5432 rws_insn_set (regno);
5433 write_count = rws_sum[regno].write_count;
5434
5435 switch (write_count)
5436 {
5437 case 0:
5438 /* The register has not been written yet. */
5439 if (!in_safe_group_barrier)
5440 rws_update (regno, flags, pred);
5441 break;
5442
5443 case 1:
5444 /* The register has been written via a predicate. If this is
5445 not a complementary predicate, then we need a barrier. */
5446 /* ??? This assumes that P and P+1 are always complementary
5447 predicates for P even. */
5448 if (flags.is_and && rws_sum[regno].written_by_and)
5449 ;
5450 else if (flags.is_or && rws_sum[regno].written_by_or)
5451 ;
5452 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5453 need_barrier = 1;
5454 if (!in_safe_group_barrier)
5455 rws_update (regno, flags, pred);
5456 break;
5457
5458 case 2:
5459 /* The register has been unconditionally written already. We
5460 need a barrier. */
5461 if (flags.is_and && rws_sum[regno].written_by_and)
5462 ;
5463 else if (flags.is_or && rws_sum[regno].written_by_or)
5464 ;
5465 else
5466 need_barrier = 1;
5467 if (!in_safe_group_barrier)
5468 {
5469 rws_sum[regno].written_by_and = flags.is_and;
5470 rws_sum[regno].written_by_or = flags.is_or;
5471 }
5472 break;
5473
5474 default:
5475 gcc_unreachable ();
5476 }
5477 }
5478 else
5479 {
5480 if (flags.is_branch)
5481 {
5482 /* Branches have several RAW exceptions that allow to avoid
5483 barriers. */
5484
5485 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5486 /* RAW dependencies on branch regs are permissible as long
5487 as the writer is a non-branch instruction. Since we
5488 never generate code that uses a branch register written
5489 by a branch instruction, handling this case is
5490 easy. */
5491 return 0;
5492
5493 if (REGNO_REG_CLASS (regno) == PR_REGS
5494 && ! rws_sum[regno].written_by_fp)
5495 /* The predicates of a branch are available within the
5496 same insn group as long as the predicate was written by
5497 something other than a floating-point instruction. */
5498 return 0;
5499 }
5500
5501 if (flags.is_and && rws_sum[regno].written_by_and)
5502 return 0;
5503 if (flags.is_or && rws_sum[regno].written_by_or)
5504 return 0;
5505
5506 switch (rws_sum[regno].write_count)
5507 {
5508 case 0:
5509 /* The register has not been written yet. */
5510 break;
5511
5512 case 1:
5513 /* The register has been written via a predicate. If this is
5514 not a complementary predicate, then we need a barrier. */
5515 /* ??? This assumes that P and P+1 are always complementary
5516 predicates for P even. */
5517 if ((rws_sum[regno].first_pred ^ 1) != pred)
5518 need_barrier = 1;
5519 break;
5520
5521 case 2:
5522 /* The register has been unconditionally written already. We
5523 need a barrier. */
5524 need_barrier = 1;
5525 break;
5526
5527 default:
5528 gcc_unreachable ();
5529 }
5530 }
5531
5532 return need_barrier;
5533 }
5534
5535 static int
5536 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5537 {
5538 int regno = REGNO (reg);
5539 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5540
5541 if (n == 1)
5542 return rws_access_regno (regno, flags, pred);
5543 else
5544 {
5545 int need_barrier = 0;
5546 while (--n >= 0)
5547 need_barrier |= rws_access_regno (regno + n, flags, pred);
5548 return need_barrier;
5549 }
5550 }
5551
5552 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5553 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5554
5555 static void
5556 update_set_flags (rtx x, struct reg_flags *pflags)
5557 {
5558 rtx src = SET_SRC (x);
5559
5560 switch (GET_CODE (src))
5561 {
5562 case CALL:
5563 return;
5564
5565 case IF_THEN_ELSE:
5566 /* There are four cases here:
5567 (1) The destination is (pc), in which case this is a branch,
5568 nothing here applies.
5569 (2) The destination is ar.lc, in which case this is a
5570 doloop_end_internal,
5571 (3) The destination is an fp register, in which case this is
5572 an fselect instruction.
5573 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5574 this is a check load.
5575 In all cases, nothing we do in this function applies. */
5576 return;
5577
5578 default:
5579 if (COMPARISON_P (src)
5580 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5581 /* Set pflags->is_fp to 1 so that we know we're dealing
5582 with a floating point comparison when processing the
5583 destination of the SET. */
5584 pflags->is_fp = 1;
5585
5586 /* Discover if this is a parallel comparison. We only handle
5587 and.orcm and or.andcm at present, since we must retain a
5588 strict inverse on the predicate pair. */
5589 else if (GET_CODE (src) == AND)
5590 pflags->is_and = 1;
5591 else if (GET_CODE (src) == IOR)
5592 pflags->is_or = 1;
5593
5594 break;
5595 }
5596 }
5597
5598 /* Subroutine of rtx_needs_barrier; this function determines whether the
5599 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5600 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5601 for this insn. */
5602
5603 static int
5604 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5605 {
5606 int need_barrier = 0;
5607 rtx dst;
5608 rtx src = SET_SRC (x);
5609
5610 if (GET_CODE (src) == CALL)
5611 /* We don't need to worry about the result registers that
5612 get written by subroutine call. */
5613 return rtx_needs_barrier (src, flags, pred);
5614 else if (SET_DEST (x) == pc_rtx)
5615 {
5616 /* X is a conditional branch. */
5617 /* ??? This seems redundant, as the caller sets this bit for
5618 all JUMP_INSNs. */
5619 if (!ia64_spec_check_src_p (src))
5620 flags.is_branch = 1;
5621 return rtx_needs_barrier (src, flags, pred);
5622 }
5623
5624 if (ia64_spec_check_src_p (src))
5625 /* Avoid checking one register twice (in condition
5626 and in 'then' section) for ldc pattern. */
5627 {
5628 gcc_assert (REG_P (XEXP (src, 2)));
5629 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
5630
5631 /* We process MEM below. */
5632 src = XEXP (src, 1);
5633 }
5634
5635 need_barrier |= rtx_needs_barrier (src, flags, pred);
5636
5637 dst = SET_DEST (x);
5638 if (GET_CODE (dst) == ZERO_EXTRACT)
5639 {
5640 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5641 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5642 }
5643 return need_barrier;
5644 }
5645
5646 /* Handle an access to rtx X of type FLAGS using predicate register
5647 PRED. Return 1 if this access creates a dependency with an earlier
5648 instruction in the same group. */
5649
5650 static int
5651 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5652 {
5653 int i, j;
5654 int is_complemented = 0;
5655 int need_barrier = 0;
5656 const char *format_ptr;
5657 struct reg_flags new_flags;
5658 rtx cond;
5659
5660 if (! x)
5661 return 0;
5662
5663 new_flags = flags;
5664
5665 switch (GET_CODE (x))
5666 {
5667 case SET:
5668 update_set_flags (x, &new_flags);
5669 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5670 if (GET_CODE (SET_SRC (x)) != CALL)
5671 {
5672 new_flags.is_write = 1;
5673 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5674 }
5675 break;
5676
5677 case CALL:
5678 new_flags.is_write = 0;
5679 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5680
5681 /* Avoid multiple register writes, in case this is a pattern with
5682 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5683 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
5684 {
5685 new_flags.is_write = 1;
5686 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5687 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5688 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5689 }
5690 break;
5691
5692 case COND_EXEC:
5693 /* X is a predicated instruction. */
5694
5695 cond = COND_EXEC_TEST (x);
5696 gcc_assert (!pred);
5697 need_barrier = rtx_needs_barrier (cond, flags, 0);
5698
5699 if (GET_CODE (cond) == EQ)
5700 is_complemented = 1;
5701 cond = XEXP (cond, 0);
5702 gcc_assert (GET_CODE (cond) == REG
5703 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5704 pred = REGNO (cond);
5705 if (is_complemented)
5706 ++pred;
5707
5708 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5709 return need_barrier;
5710
5711 case CLOBBER:
5712 case USE:
5713 /* Clobber & use are for earlier compiler-phases only. */
5714 break;
5715
5716 case ASM_OPERANDS:
5717 case ASM_INPUT:
5718 /* We always emit stop bits for traditional asms. We emit stop bits
5719 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5720 if (GET_CODE (x) != ASM_OPERANDS
5721 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5722 {
5723 /* Avoid writing the register multiple times if we have multiple
5724 asm outputs. This avoids a failure in rws_access_reg. */
5725 if (! rws_insn_test (REG_VOLATILE))
5726 {
5727 new_flags.is_write = 1;
5728 rws_access_regno (REG_VOLATILE, new_flags, pred);
5729 }
5730 return 1;
5731 }
5732
5733 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5734 We cannot just fall through here since then we would be confused
5735 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5736 traditional asms unlike their normal usage. */
5737
5738 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5739 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5740 need_barrier = 1;
5741 break;
5742
5743 case PARALLEL:
5744 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5745 {
5746 rtx pat = XVECEXP (x, 0, i);
5747 switch (GET_CODE (pat))
5748 {
5749 case SET:
5750 update_set_flags (pat, &new_flags);
5751 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5752 break;
5753
5754 case USE:
5755 case CALL:
5756 case ASM_OPERANDS:
5757 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5758 break;
5759
5760 case CLOBBER:
5761 case RETURN:
5762 break;
5763
5764 default:
5765 gcc_unreachable ();
5766 }
5767 }
5768 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5769 {
5770 rtx pat = XVECEXP (x, 0, i);
5771 if (GET_CODE (pat) == SET)
5772 {
5773 if (GET_CODE (SET_SRC (pat)) != CALL)
5774 {
5775 new_flags.is_write = 1;
5776 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5777 pred);
5778 }
5779 }
5780 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5781 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5782 }
5783 break;
5784
5785 case SUBREG:
5786 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5787 break;
5788 case REG:
5789 if (REGNO (x) == AR_UNAT_REGNUM)
5790 {
5791 for (i = 0; i < 64; ++i)
5792 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5793 }
5794 else
5795 need_barrier = rws_access_reg (x, flags, pred);
5796 break;
5797
5798 case MEM:
5799 /* Find the regs used in memory address computation. */
5800 new_flags.is_write = 0;
5801 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5802 break;
5803
5804 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5805 case SYMBOL_REF: case LABEL_REF: case CONST:
5806 break;
5807
5808 /* Operators with side-effects. */
5809 case POST_INC: case POST_DEC:
5810 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5811
5812 new_flags.is_write = 0;
5813 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5814 new_flags.is_write = 1;
5815 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5816 break;
5817
5818 case POST_MODIFY:
5819 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5820
5821 new_flags.is_write = 0;
5822 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5823 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5824 new_flags.is_write = 1;
5825 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5826 break;
5827
5828 /* Handle common unary and binary ops for efficiency. */
5829 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5830 case MOD: case UDIV: case UMOD: case AND: case IOR:
5831 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5832 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5833 case NE: case EQ: case GE: case GT: case LE:
5834 case LT: case GEU: case GTU: case LEU: case LTU:
5835 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5836 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5837 break;
5838
5839 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5840 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5841 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5842 case SQRT: case FFS: case POPCOUNT:
5843 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5844 break;
5845
5846 case VEC_SELECT:
5847 /* VEC_SELECT's second argument is a PARALLEL with integers that
5848 describe the elements selected. On ia64, those integers are
5849 always constants. Avoid walking the PARALLEL so that we don't
5850 get confused with "normal" parallels and then die. */
5851 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5852 break;
5853
5854 case UNSPEC:
5855 switch (XINT (x, 1))
5856 {
5857 case UNSPEC_LTOFF_DTPMOD:
5858 case UNSPEC_LTOFF_DTPREL:
5859 case UNSPEC_DTPREL:
5860 case UNSPEC_LTOFF_TPREL:
5861 case UNSPEC_TPREL:
5862 case UNSPEC_PRED_REL_MUTEX:
5863 case UNSPEC_PIC_CALL:
5864 case UNSPEC_MF:
5865 case UNSPEC_FETCHADD_ACQ:
5866 case UNSPEC_BSP_VALUE:
5867 case UNSPEC_FLUSHRS:
5868 case UNSPEC_BUNDLE_SELECTOR:
5869 break;
5870
5871 case UNSPEC_GR_SPILL:
5872 case UNSPEC_GR_RESTORE:
5873 {
5874 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5875 HOST_WIDE_INT bit = (offset >> 3) & 63;
5876
5877 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5878 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5879 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5880 new_flags, pred);
5881 break;
5882 }
5883
5884 case UNSPEC_FR_SPILL:
5885 case UNSPEC_FR_RESTORE:
5886 case UNSPEC_GETF_EXP:
5887 case UNSPEC_SETF_EXP:
5888 case UNSPEC_ADDP4:
5889 case UNSPEC_FR_SQRT_RECIP_APPROX:
5890 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
5891 case UNSPEC_LDA:
5892 case UNSPEC_LDS:
5893 case UNSPEC_LDSA:
5894 case UNSPEC_CHKACLR:
5895 case UNSPEC_CHKS:
5896 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5897 break;
5898
5899 case UNSPEC_FR_RECIP_APPROX:
5900 case UNSPEC_SHRP:
5901 case UNSPEC_COPYSIGN:
5902 case UNSPEC_FR_RECIP_APPROX_RES:
5903 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5904 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5905 break;
5906
5907 case UNSPEC_CMPXCHG_ACQ:
5908 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5909 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5910 break;
5911
5912 default:
5913 gcc_unreachable ();
5914 }
5915 break;
5916
5917 case UNSPEC_VOLATILE:
5918 switch (XINT (x, 1))
5919 {
5920 case UNSPECV_ALLOC:
5921 /* Alloc must always be the first instruction of a group.
5922 We force this by always returning true. */
5923 /* ??? We might get better scheduling if we explicitly check for
5924 input/local/output register dependencies, and modify the
5925 scheduler so that alloc is always reordered to the start of
5926 the current group. We could then eliminate all of the
5927 first_instruction code. */
5928 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5929
5930 new_flags.is_write = 1;
5931 rws_access_regno (REG_AR_CFM, new_flags, pred);
5932 return 1;
5933
5934 case UNSPECV_SET_BSP:
5935 need_barrier = 1;
5936 break;
5937
5938 case UNSPECV_BLOCKAGE:
5939 case UNSPECV_INSN_GROUP_BARRIER:
5940 case UNSPECV_BREAK:
5941 case UNSPECV_PSAC_ALL:
5942 case UNSPECV_PSAC_NORMAL:
5943 return 0;
5944
5945 default:
5946 gcc_unreachable ();
5947 }
5948 break;
5949
5950 case RETURN:
5951 new_flags.is_write = 0;
5952 need_barrier = rws_access_regno (REG_RP, flags, pred);
5953 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5954
5955 new_flags.is_write = 1;
5956 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5957 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5958 break;
5959
5960 default:
5961 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5962 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5963 switch (format_ptr[i])
5964 {
5965 case '0': /* unused field */
5966 case 'i': /* integer */
5967 case 'n': /* note */
5968 case 'w': /* wide integer */
5969 case 's': /* pointer to string */
5970 case 'S': /* optional pointer to string */
5971 break;
5972
5973 case 'e':
5974 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5975 need_barrier = 1;
5976 break;
5977
5978 case 'E':
5979 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5980 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5981 need_barrier = 1;
5982 break;
5983
5984 default:
5985 gcc_unreachable ();
5986 }
5987 break;
5988 }
5989 return need_barrier;
5990 }
5991
5992 /* Clear out the state for group_barrier_needed at the start of a
5993 sequence of insns. */
5994
5995 static void
5996 init_insn_group_barriers (void)
5997 {
5998 memset (rws_sum, 0, sizeof (rws_sum));
5999 first_instruction = 1;
6000 }
6001
6002 /* Given the current state, determine whether a group barrier (a stop bit) is
6003 necessary before INSN. Return nonzero if so. This modifies the state to
6004 include the effects of INSN as a side-effect. */
6005
6006 static int
6007 group_barrier_needed (rtx insn)
6008 {
6009 rtx pat;
6010 int need_barrier = 0;
6011 struct reg_flags flags;
6012
6013 memset (&flags, 0, sizeof (flags));
6014 switch (GET_CODE (insn))
6015 {
6016 case NOTE:
6017 break;
6018
6019 case BARRIER:
6020 /* A barrier doesn't imply an instruction group boundary. */
6021 break;
6022
6023 case CODE_LABEL:
6024 memset (rws_insn, 0, sizeof (rws_insn));
6025 return 1;
6026
6027 case CALL_INSN:
6028 flags.is_branch = 1;
6029 flags.is_sibcall = SIBLING_CALL_P (insn);
6030 memset (rws_insn, 0, sizeof (rws_insn));
6031
6032 /* Don't bundle a call following another call. */
6033 if ((pat = prev_active_insn (insn))
6034 && GET_CODE (pat) == CALL_INSN)
6035 {
6036 need_barrier = 1;
6037 break;
6038 }
6039
6040 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6041 break;
6042
6043 case JUMP_INSN:
6044 if (!ia64_spec_check_p (insn))
6045 flags.is_branch = 1;
6046
6047 /* Don't bundle a jump following a call. */
6048 if ((pat = prev_active_insn (insn))
6049 && GET_CODE (pat) == CALL_INSN)
6050 {
6051 need_barrier = 1;
6052 break;
6053 }
6054 /* FALLTHRU */
6055
6056 case INSN:
6057 if (GET_CODE (PATTERN (insn)) == USE
6058 || GET_CODE (PATTERN (insn)) == CLOBBER)
6059 /* Don't care about USE and CLOBBER "insns"---those are used to
6060 indicate to the optimizer that it shouldn't get rid of
6061 certain operations. */
6062 break;
6063
6064 pat = PATTERN (insn);
6065
6066 /* Ug. Hack hacks hacked elsewhere. */
6067 switch (recog_memoized (insn))
6068 {
6069 /* We play dependency tricks with the epilogue in order
6070 to get proper schedules. Undo this for dv analysis. */
6071 case CODE_FOR_epilogue_deallocate_stack:
6072 case CODE_FOR_prologue_allocate_stack:
6073 pat = XVECEXP (pat, 0, 0);
6074 break;
6075
6076 /* The pattern we use for br.cloop confuses the code above.
6077 The second element of the vector is representative. */
6078 case CODE_FOR_doloop_end_internal:
6079 pat = XVECEXP (pat, 0, 1);
6080 break;
6081
6082 /* Doesn't generate code. */
6083 case CODE_FOR_pred_rel_mutex:
6084 case CODE_FOR_prologue_use:
6085 return 0;
6086
6087 default:
6088 break;
6089 }
6090
6091 memset (rws_insn, 0, sizeof (rws_insn));
6092 need_barrier = rtx_needs_barrier (pat, flags, 0);
6093
6094 /* Check to see if the previous instruction was a volatile
6095 asm. */
6096 if (! need_barrier)
6097 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6098 break;
6099
6100 default:
6101 gcc_unreachable ();
6102 }
6103
6104 if (first_instruction && INSN_P (insn)
6105 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6106 && GET_CODE (PATTERN (insn)) != USE
6107 && GET_CODE (PATTERN (insn)) != CLOBBER)
6108 {
6109 need_barrier = 0;
6110 first_instruction = 0;
6111 }
6112
6113 return need_barrier;
6114 }
6115
6116 /* Like group_barrier_needed, but do not clobber the current state. */
6117
6118 static int
6119 safe_group_barrier_needed (rtx insn)
6120 {
6121 int saved_first_instruction;
6122 int t;
6123
6124 saved_first_instruction = first_instruction;
6125 in_safe_group_barrier = 1;
6126
6127 t = group_barrier_needed (insn);
6128
6129 first_instruction = saved_first_instruction;
6130 in_safe_group_barrier = 0;
6131
6132 return t;
6133 }
6134
6135 /* Scan the current function and insert stop bits as necessary to
6136 eliminate dependencies. This function assumes that a final
6137 instruction scheduling pass has been run which has already
6138 inserted most of the necessary stop bits. This function only
6139 inserts new ones at basic block boundaries, since these are
6140 invisible to the scheduler. */
6141
6142 static void
6143 emit_insn_group_barriers (FILE *dump)
6144 {
6145 rtx insn;
6146 rtx last_label = 0;
6147 int insns_since_last_label = 0;
6148
6149 init_insn_group_barriers ();
6150
6151 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6152 {
6153 if (GET_CODE (insn) == CODE_LABEL)
6154 {
6155 if (insns_since_last_label)
6156 last_label = insn;
6157 insns_since_last_label = 0;
6158 }
6159 else if (GET_CODE (insn) == NOTE
6160 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6161 {
6162 if (insns_since_last_label)
6163 last_label = insn;
6164 insns_since_last_label = 0;
6165 }
6166 else if (GET_CODE (insn) == INSN
6167 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6168 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6169 {
6170 init_insn_group_barriers ();
6171 last_label = 0;
6172 }
6173 else if (INSN_P (insn))
6174 {
6175 insns_since_last_label = 1;
6176
6177 if (group_barrier_needed (insn))
6178 {
6179 if (last_label)
6180 {
6181 if (dump)
6182 fprintf (dump, "Emitting stop before label %d\n",
6183 INSN_UID (last_label));
6184 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6185 insn = last_label;
6186
6187 init_insn_group_barriers ();
6188 last_label = 0;
6189 }
6190 }
6191 }
6192 }
6193 }
6194
6195 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6196 This function has to emit all necessary group barriers. */
6197
6198 static void
6199 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6200 {
6201 rtx insn;
6202
6203 init_insn_group_barriers ();
6204
6205 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6206 {
6207 if (GET_CODE (insn) == BARRIER)
6208 {
6209 rtx last = prev_active_insn (insn);
6210
6211 if (! last)
6212 continue;
6213 if (GET_CODE (last) == JUMP_INSN
6214 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6215 last = prev_active_insn (last);
6216 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6217 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6218
6219 init_insn_group_barriers ();
6220 }
6221 else if (INSN_P (insn))
6222 {
6223 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6224 init_insn_group_barriers ();
6225 else if (group_barrier_needed (insn))
6226 {
6227 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6228 init_insn_group_barriers ();
6229 group_barrier_needed (insn);
6230 }
6231 }
6232 }
6233 }
6234
6235 \f
6236
6237 /* Instruction scheduling support. */
6238
6239 #define NR_BUNDLES 10
6240
6241 /* A list of names of all available bundles. */
6242
6243 static const char *bundle_name [NR_BUNDLES] =
6244 {
6245 ".mii",
6246 ".mmi",
6247 ".mfi",
6248 ".mmf",
6249 #if NR_BUNDLES == 10
6250 ".bbb",
6251 ".mbb",
6252 #endif
6253 ".mib",
6254 ".mmb",
6255 ".mfb",
6256 ".mlx"
6257 };
6258
6259 /* Nonzero if we should insert stop bits into the schedule. */
6260
6261 int ia64_final_schedule = 0;
6262
6263 /* Codes of the corresponding queried units: */
6264
6265 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6266 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6267
6268 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6269 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6270
6271 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6272
6273 /* The following variable value is an insn group barrier. */
6274
6275 static rtx dfa_stop_insn;
6276
6277 /* The following variable value is the last issued insn. */
6278
6279 static rtx last_scheduled_insn;
6280
6281 /* The following variable value is pointer to a DFA state used as
6282 temporary variable. */
6283
6284 static state_t temp_dfa_state = NULL;
6285
6286 /* The following variable value is DFA state after issuing the last
6287 insn. */
6288
6289 static state_t prev_cycle_state = NULL;
6290
6291 /* The following array element values are TRUE if the corresponding
6292 insn requires to add stop bits before it. */
6293
6294 static char *stops_p = NULL;
6295
6296 /* The following array element values are ZERO for non-speculative
6297 instructions and hold corresponding speculation check number for
6298 speculative instructions. */
6299 static int *spec_check_no = NULL;
6300
6301 /* Size of spec_check_no array. */
6302 static int max_uid = 0;
6303
6304 /* The following variable is used to set up the mentioned above array. */
6305
6306 static int stop_before_p = 0;
6307
6308 /* The following variable value is length of the arrays `clocks' and
6309 `add_cycles'. */
6310
6311 static int clocks_length;
6312
6313 /* The following array element values are cycles on which the
6314 corresponding insn will be issued. The array is used only for
6315 Itanium1. */
6316
6317 static int *clocks;
6318
6319 /* The following array element values are numbers of cycles should be
6320 added to improve insn scheduling for MM_insns for Itanium1. */
6321
6322 static int *add_cycles;
6323
6324 /* The following variable value is number of data speculations in progress. */
6325 static int pending_data_specs = 0;
6326
6327 static rtx ia64_single_set (rtx);
6328 static void ia64_emit_insn_before (rtx, rtx);
6329
6330 /* Map a bundle number to its pseudo-op. */
6331
6332 const char *
6333 get_bundle_name (int b)
6334 {
6335 return bundle_name[b];
6336 }
6337
6338
6339 /* Return the maximum number of instructions a cpu can issue. */
6340
6341 static int
6342 ia64_issue_rate (void)
6343 {
6344 return 6;
6345 }
6346
6347 /* Helper function - like single_set, but look inside COND_EXEC. */
6348
6349 static rtx
6350 ia64_single_set (rtx insn)
6351 {
6352 rtx x = PATTERN (insn), ret;
6353 if (GET_CODE (x) == COND_EXEC)
6354 x = COND_EXEC_CODE (x);
6355 if (GET_CODE (x) == SET)
6356 return x;
6357
6358 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6359 Although they are not classical single set, the second set is there just
6360 to protect it from moving past FP-relative stack accesses. */
6361 switch (recog_memoized (insn))
6362 {
6363 case CODE_FOR_prologue_allocate_stack:
6364 case CODE_FOR_epilogue_deallocate_stack:
6365 ret = XVECEXP (x, 0, 0);
6366 break;
6367
6368 default:
6369 ret = single_set_2 (insn, x);
6370 break;
6371 }
6372
6373 return ret;
6374 }
6375
6376 /* Adjust the cost of a scheduling dependency. Return the new cost of
6377 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6378
6379 static int
6380 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6381 {
6382 enum attr_itanium_class dep_class;
6383 enum attr_itanium_class insn_class;
6384
6385 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6386 return cost;
6387
6388 insn_class = ia64_safe_itanium_class (insn);
6389 dep_class = ia64_safe_itanium_class (dep_insn);
6390 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6391 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6392 return 0;
6393
6394 return cost;
6395 }
6396
6397 /* Like emit_insn_before, but skip cycle_display notes.
6398 ??? When cycle display notes are implemented, update this. */
6399
6400 static void
6401 ia64_emit_insn_before (rtx insn, rtx before)
6402 {
6403 emit_insn_before (insn, before);
6404 }
6405
6406 /* The following function marks insns who produce addresses for load
6407 and store insns. Such insns will be placed into M slots because it
6408 decrease latency time for Itanium1 (see function
6409 `ia64_produce_address_p' and the DFA descriptions). */
6410
6411 static void
6412 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6413 {
6414 rtx insn, next, next_tail;
6415
6416 /* Before reload, which_alternative is not set, which means that
6417 ia64_safe_itanium_class will produce wrong results for (at least)
6418 move instructions. */
6419 if (!reload_completed)
6420 return;
6421
6422 next_tail = NEXT_INSN (tail);
6423 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6424 if (INSN_P (insn))
6425 insn->call = 0;
6426 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6427 if (INSN_P (insn)
6428 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6429 {
6430 sd_iterator_def sd_it;
6431 dep_t dep;
6432 bool has_mem_op_consumer_p = false;
6433
6434 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6435 {
6436 enum attr_itanium_class c;
6437
6438 if (DEP_TYPE (dep) != REG_DEP_TRUE)
6439 continue;
6440
6441 next = DEP_CON (dep);
6442 c = ia64_safe_itanium_class (next);
6443 if ((c == ITANIUM_CLASS_ST
6444 || c == ITANIUM_CLASS_STF)
6445 && ia64_st_address_bypass_p (insn, next))
6446 {
6447 has_mem_op_consumer_p = true;
6448 break;
6449 }
6450 else if ((c == ITANIUM_CLASS_LD
6451 || c == ITANIUM_CLASS_FLD
6452 || c == ITANIUM_CLASS_FLDP)
6453 && ia64_ld_address_bypass_p (insn, next))
6454 {
6455 has_mem_op_consumer_p = true;
6456 break;
6457 }
6458 }
6459
6460 insn->call = has_mem_op_consumer_p;
6461 }
6462 }
6463
6464 /* We're beginning a new block. Initialize data structures as necessary. */
6465
6466 static void
6467 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6468 int sched_verbose ATTRIBUTE_UNUSED,
6469 int max_ready ATTRIBUTE_UNUSED)
6470 {
6471 #ifdef ENABLE_CHECKING
6472 rtx insn;
6473
6474 if (reload_completed)
6475 for (insn = NEXT_INSN (current_sched_info->prev_head);
6476 insn != current_sched_info->next_tail;
6477 insn = NEXT_INSN (insn))
6478 gcc_assert (!SCHED_GROUP_P (insn));
6479 #endif
6480 last_scheduled_insn = NULL_RTX;
6481 init_insn_group_barriers ();
6482 }
6483
6484 /* We're beginning a scheduling pass. Check assertion. */
6485
6486 static void
6487 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6488 int sched_verbose ATTRIBUTE_UNUSED,
6489 int max_ready ATTRIBUTE_UNUSED)
6490 {
6491 gcc_assert (!pending_data_specs);
6492 }
6493
6494 /* Scheduling pass is now finished. Free/reset static variable. */
6495 static void
6496 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6497 int sched_verbose ATTRIBUTE_UNUSED)
6498 {
6499 free (spec_check_no);
6500 spec_check_no = 0;
6501 max_uid = 0;
6502 }
6503
6504 /* We are about to being issuing insns for this clock cycle.
6505 Override the default sort algorithm to better slot instructions. */
6506
6507 static int
6508 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6509 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6510 int reorder_type)
6511 {
6512 int n_asms;
6513 int n_ready = *pn_ready;
6514 rtx *e_ready = ready + n_ready;
6515 rtx *insnp;
6516
6517 if (sched_verbose)
6518 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6519
6520 if (reorder_type == 0)
6521 {
6522 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6523 n_asms = 0;
6524 for (insnp = ready; insnp < e_ready; insnp++)
6525 if (insnp < e_ready)
6526 {
6527 rtx insn = *insnp;
6528 enum attr_type t = ia64_safe_type (insn);
6529 if (t == TYPE_UNKNOWN)
6530 {
6531 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6532 || asm_noperands (PATTERN (insn)) >= 0)
6533 {
6534 rtx lowest = ready[n_asms];
6535 ready[n_asms] = insn;
6536 *insnp = lowest;
6537 n_asms++;
6538 }
6539 else
6540 {
6541 rtx highest = ready[n_ready - 1];
6542 ready[n_ready - 1] = insn;
6543 *insnp = highest;
6544 return 1;
6545 }
6546 }
6547 }
6548
6549 if (n_asms < n_ready)
6550 {
6551 /* Some normal insns to process. Skip the asms. */
6552 ready += n_asms;
6553 n_ready -= n_asms;
6554 }
6555 else if (n_ready > 0)
6556 return 1;
6557 }
6558
6559 if (ia64_final_schedule)
6560 {
6561 int deleted = 0;
6562 int nr_need_stop = 0;
6563
6564 for (insnp = ready; insnp < e_ready; insnp++)
6565 if (safe_group_barrier_needed (*insnp))
6566 nr_need_stop++;
6567
6568 if (reorder_type == 1 && n_ready == nr_need_stop)
6569 return 0;
6570 if (reorder_type == 0)
6571 return 1;
6572 insnp = e_ready;
6573 /* Move down everything that needs a stop bit, preserving
6574 relative order. */
6575 while (insnp-- > ready + deleted)
6576 while (insnp >= ready + deleted)
6577 {
6578 rtx insn = *insnp;
6579 if (! safe_group_barrier_needed (insn))
6580 break;
6581 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6582 *ready = insn;
6583 deleted++;
6584 }
6585 n_ready -= deleted;
6586 ready += deleted;
6587 }
6588
6589 return 1;
6590 }
6591
6592 /* We are about to being issuing insns for this clock cycle. Override
6593 the default sort algorithm to better slot instructions. */
6594
6595 static int
6596 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6597 int clock_var)
6598 {
6599 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6600 pn_ready, clock_var, 0);
6601 }
6602
6603 /* Like ia64_sched_reorder, but called after issuing each insn.
6604 Override the default sort algorithm to better slot instructions. */
6605
6606 static int
6607 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6608 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6609 int *pn_ready, int clock_var)
6610 {
6611 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6612 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6613 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6614 clock_var, 1);
6615 }
6616
6617 /* We are about to issue INSN. Return the number of insns left on the
6618 ready queue that can be issued this cycle. */
6619
6620 static int
6621 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6622 int sched_verbose ATTRIBUTE_UNUSED,
6623 rtx insn ATTRIBUTE_UNUSED,
6624 int can_issue_more ATTRIBUTE_UNUSED)
6625 {
6626 if (current_sched_info->flags & DO_SPECULATION)
6627 /* Modulo scheduling does not extend h_i_d when emitting
6628 new instructions. Deal with it. */
6629 {
6630 if (DONE_SPEC (insn) & BEGIN_DATA)
6631 pending_data_specs++;
6632 if (CHECK_SPEC (insn) & BEGIN_DATA)
6633 pending_data_specs--;
6634 }
6635
6636 last_scheduled_insn = insn;
6637 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6638 if (reload_completed)
6639 {
6640 int needed = group_barrier_needed (insn);
6641
6642 gcc_assert (!needed);
6643 if (GET_CODE (insn) == CALL_INSN)
6644 init_insn_group_barriers ();
6645 stops_p [INSN_UID (insn)] = stop_before_p;
6646 stop_before_p = 0;
6647 }
6648 return 1;
6649 }
6650
6651 /* We are choosing insn from the ready queue. Return nonzero if INSN
6652 can be chosen. */
6653
6654 static int
6655 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6656 {
6657 gcc_assert (insn && INSN_P (insn));
6658 return ((!reload_completed
6659 || !safe_group_barrier_needed (insn))
6660 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn));
6661 }
6662
6663 /* We are choosing insn from the ready queue. Return nonzero if INSN
6664 can be chosen. */
6665
6666 static bool
6667 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
6668 {
6669 gcc_assert (insn && INSN_P (insn));
6670 /* Size of ALAT is 32. As far as we perform conservative data speculation,
6671 we keep ALAT half-empty. */
6672 return (pending_data_specs < 16
6673 || !(TODO_SPEC (insn) & BEGIN_DATA));
6674 }
6675
6676 /* The following variable value is pseudo-insn used by the DFA insn
6677 scheduler to change the DFA state when the simulated clock is
6678 increased. */
6679
6680 static rtx dfa_pre_cycle_insn;
6681
6682 /* We are about to being issuing INSN. Return nonzero if we cannot
6683 issue it on given cycle CLOCK and return zero if we should not sort
6684 the ready queue on the next clock start. */
6685
6686 static int
6687 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6688 int clock, int *sort_p)
6689 {
6690 int setup_clocks_p = FALSE;
6691
6692 gcc_assert (insn && INSN_P (insn));
6693 if ((reload_completed && safe_group_barrier_needed (insn))
6694 || (last_scheduled_insn
6695 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6696 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6697 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6698 {
6699 init_insn_group_barriers ();
6700 if (verbose && dump)
6701 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6702 last_clock == clock ? " + cycle advance" : "");
6703 stop_before_p = 1;
6704 if (last_clock == clock)
6705 {
6706 state_transition (curr_state, dfa_stop_insn);
6707 if (TARGET_EARLY_STOP_BITS)
6708 *sort_p = (last_scheduled_insn == NULL_RTX
6709 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6710 else
6711 *sort_p = 0;
6712 return 1;
6713 }
6714 else if (reload_completed)
6715 setup_clocks_p = TRUE;
6716 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6717 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6718 state_reset (curr_state);
6719 else
6720 {
6721 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6722 state_transition (curr_state, dfa_stop_insn);
6723 state_transition (curr_state, dfa_pre_cycle_insn);
6724 state_transition (curr_state, NULL);
6725 }
6726 }
6727 else if (reload_completed)
6728 setup_clocks_p = TRUE;
6729 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6730 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6731 && asm_noperands (PATTERN (insn)) < 0)
6732 {
6733 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6734
6735 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6736 {
6737 sd_iterator_def sd_it;
6738 dep_t dep;
6739 int d = -1;
6740
6741 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6742 if (DEP_TYPE (dep) == REG_DEP_TRUE)
6743 {
6744 enum attr_itanium_class dep_class;
6745 rtx dep_insn = DEP_PRO (dep);
6746
6747 dep_class = ia64_safe_itanium_class (dep_insn);
6748 if ((dep_class == ITANIUM_CLASS_MMMUL
6749 || dep_class == ITANIUM_CLASS_MMSHF)
6750 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6751 && (d < 0
6752 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6753 d = last_clock - clocks [INSN_UID (dep_insn)];
6754 }
6755 if (d >= 0)
6756 add_cycles [INSN_UID (insn)] = 3 - d;
6757 }
6758 }
6759 return 0;
6760 }
6761
6762 /* Implement targetm.sched.h_i_d_extended hook.
6763 Extend internal data structures. */
6764 static void
6765 ia64_h_i_d_extended (void)
6766 {
6767 if (current_sched_info->flags & DO_SPECULATION)
6768 {
6769 int new_max_uid = get_max_uid () + 1;
6770
6771 spec_check_no = (int *) xrecalloc (spec_check_no, new_max_uid,
6772 max_uid, sizeof (*spec_check_no));
6773 max_uid = new_max_uid;
6774 }
6775
6776 if (stops_p != NULL)
6777 {
6778 int new_clocks_length = get_max_uid () + 1;
6779
6780 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6781
6782 if (ia64_tune == PROCESSOR_ITANIUM)
6783 {
6784 clocks = (int *) xrecalloc (clocks, new_clocks_length, clocks_length,
6785 sizeof (int));
6786 add_cycles = (int *) xrecalloc (add_cycles, new_clocks_length,
6787 clocks_length, sizeof (int));
6788 }
6789
6790 clocks_length = new_clocks_length;
6791 }
6792 }
6793
6794 /* Constants that help mapping 'enum machine_mode' to int. */
6795 enum SPEC_MODES
6796 {
6797 SPEC_MODE_INVALID = -1,
6798 SPEC_MODE_FIRST = 0,
6799 SPEC_MODE_FOR_EXTEND_FIRST = 1,
6800 SPEC_MODE_FOR_EXTEND_LAST = 3,
6801 SPEC_MODE_LAST = 8
6802 };
6803
6804 /* Return index of the MODE. */
6805 static int
6806 ia64_mode_to_int (enum machine_mode mode)
6807 {
6808 switch (mode)
6809 {
6810 case BImode: return 0; /* SPEC_MODE_FIRST */
6811 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
6812 case HImode: return 2;
6813 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
6814 case DImode: return 4;
6815 case SFmode: return 5;
6816 case DFmode: return 6;
6817 case XFmode: return 7;
6818 case TImode:
6819 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
6820 mentioned in itanium[12].md. Predicate fp_register_operand also
6821 needs to be defined. Bottom line: better disable for now. */
6822 return SPEC_MODE_INVALID;
6823 default: return SPEC_MODE_INVALID;
6824 }
6825 }
6826
6827 /* Provide information about speculation capabilities. */
6828 static void
6829 ia64_set_sched_flags (spec_info_t spec_info)
6830 {
6831 unsigned int *flags = &(current_sched_info->flags);
6832
6833 if (*flags & SCHED_RGN
6834 || *flags & SCHED_EBB)
6835 {
6836 int mask = 0;
6837
6838 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
6839 || (mflag_sched_ar_data_spec && reload_completed))
6840 {
6841 mask |= BEGIN_DATA;
6842
6843 if ((mflag_sched_br_in_data_spec && !reload_completed)
6844 || (mflag_sched_ar_in_data_spec && reload_completed))
6845 mask |= BE_IN_DATA;
6846 }
6847
6848 if (mflag_sched_control_spec)
6849 {
6850 mask |= BEGIN_CONTROL;
6851
6852 if (mflag_sched_in_control_spec)
6853 mask |= BE_IN_CONTROL;
6854 }
6855
6856 spec_info->mask = mask;
6857
6858 if (mask)
6859 {
6860 *flags |= USE_DEPS_LIST | DO_SPECULATION;
6861
6862 if (mask & BE_IN_SPEC)
6863 *flags |= NEW_BBS;
6864
6865 spec_info->flags = 0;
6866
6867 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
6868 spec_info->flags |= PREFER_NON_DATA_SPEC;
6869
6870 if ((mask & CONTROL_SPEC)
6871 && mflag_sched_prefer_non_control_spec_insns)
6872 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
6873
6874 if (mflag_sched_spec_verbose)
6875 {
6876 if (sched_verbose >= 1)
6877 spec_info->dump = sched_dump;
6878 else
6879 spec_info->dump = stderr;
6880 }
6881 else
6882 spec_info->dump = 0;
6883
6884 if (mflag_sched_count_spec_in_critical_path)
6885 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
6886 }
6887 }
6888 }
6889
6890 /* Implement targetm.sched.speculate_insn hook.
6891 Check if the INSN can be TS speculative.
6892 If 'no' - return -1.
6893 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
6894 If current pattern of the INSN already provides TS speculation, return 0. */
6895 static int
6896 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
6897 {
6898 rtx pat, reg, mem, mem_reg;
6899 int mode_no, gen_p = 1;
6900 bool extend_p;
6901
6902 gcc_assert (!(ts & ~BEGIN_SPEC) && ts);
6903
6904 pat = PATTERN (insn);
6905
6906 if (GET_CODE (pat) == COND_EXEC)
6907 pat = COND_EXEC_CODE (pat);
6908
6909 /* This should be a SET ... */
6910 if (GET_CODE (pat) != SET)
6911 return -1;
6912
6913 reg = SET_DEST (pat);
6914 /* ... to the general/fp register ... */
6915 if (!REG_P (reg) || !(GR_REGNO_P (REGNO (reg)) || FP_REGNO_P (REGNO (reg))))
6916 return -1;
6917
6918 /* ... from the mem ... */
6919 mem = SET_SRC (pat);
6920
6921 /* ... that can, possibly, be a zero_extend ... */
6922 if (GET_CODE (mem) == ZERO_EXTEND)
6923 {
6924 mem = XEXP (mem, 0);
6925 extend_p = true;
6926 }
6927 else
6928 extend_p = false;
6929
6930 /* ... or a speculative load. */
6931 if (GET_CODE (mem) == UNSPEC)
6932 {
6933 int code;
6934
6935 code = XINT (mem, 1);
6936 if (code != UNSPEC_LDA && code != UNSPEC_LDS && code != UNSPEC_LDSA)
6937 return -1;
6938
6939 if ((code == UNSPEC_LDA && !(ts & BEGIN_CONTROL))
6940 || (code == UNSPEC_LDS && !(ts & BEGIN_DATA))
6941 || code == UNSPEC_LDSA)
6942 gen_p = 0;
6943
6944 mem = XVECEXP (mem, 0, 0);
6945 gcc_assert (MEM_P (mem));
6946 }
6947
6948 /* Source should be a mem ... */
6949 if (!MEM_P (mem))
6950 return -1;
6951
6952 /* ... addressed by a register. */
6953 mem_reg = XEXP (mem, 0);
6954 if (!REG_P (mem_reg))
6955 return -1;
6956
6957 /* We should use MEM's mode since REG's mode in presence of ZERO_EXTEND
6958 will always be DImode. */
6959 mode_no = ia64_mode_to_int (GET_MODE (mem));
6960
6961 if (mode_no == SPEC_MODE_INVALID
6962 || (extend_p
6963 && !(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
6964 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST)))
6965 return -1;
6966
6967 extract_insn_cached (insn);
6968 gcc_assert (reg == recog_data.operand[0] && mem == recog_data.operand[1]);
6969
6970 *new_pat = ia64_gen_spec_insn (insn, ts, mode_no, gen_p != 0, extend_p);
6971
6972 return gen_p;
6973 }
6974
6975 enum
6976 {
6977 /* Offset to reach ZERO_EXTEND patterns. */
6978 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1,
6979 /* Number of patterns for each speculation mode. */
6980 SPEC_N = (SPEC_MODE_LAST
6981 + SPEC_MODE_FOR_EXTEND_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 2)
6982 };
6983
6984 enum SPEC_GEN_LD_MAP
6985 {
6986 /* Offset to ld.a patterns. */
6987 SPEC_GEN_A = 0 * SPEC_N,
6988 /* Offset to ld.s patterns. */
6989 SPEC_GEN_S = 1 * SPEC_N,
6990 /* Offset to ld.sa patterns. */
6991 SPEC_GEN_SA = 2 * SPEC_N,
6992 /* Offset to ld.sa patterns. For this patterns corresponding ld.c will
6993 mutate to chk.s. */
6994 SPEC_GEN_SA_FOR_S = 3 * SPEC_N
6995 };
6996
6997 /* These offsets are used to get (4 * SPEC_N). */
6998 enum SPEC_GEN_CHECK_OFFSET
6999 {
7000 SPEC_GEN_CHKA_FOR_A_OFFSET = 4 * SPEC_N - SPEC_GEN_A,
7001 SPEC_GEN_CHKA_FOR_SA_OFFSET = 4 * SPEC_N - SPEC_GEN_SA
7002 };
7003
7004 /* If GEN_P is true, calculate the index of needed speculation check and return
7005 speculative pattern for INSN with speculative mode TS, machine mode
7006 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7007 If GEN_P is false, just calculate the index of needed speculation check. */
7008 static rtx
7009 ia64_gen_spec_insn (rtx insn, ds_t ts, int mode_no, bool gen_p, bool extend_p)
7010 {
7011 rtx pat, new_pat;
7012 int load_no;
7013 int shift = 0;
7014
7015 static rtx (* const gen_load[]) (rtx, rtx) = {
7016 gen_movbi_advanced,
7017 gen_movqi_advanced,
7018 gen_movhi_advanced,
7019 gen_movsi_advanced,
7020 gen_movdi_advanced,
7021 gen_movsf_advanced,
7022 gen_movdf_advanced,
7023 gen_movxf_advanced,
7024 gen_movti_advanced,
7025 gen_zero_extendqidi2_advanced,
7026 gen_zero_extendhidi2_advanced,
7027 gen_zero_extendsidi2_advanced,
7028
7029 gen_movbi_speculative,
7030 gen_movqi_speculative,
7031 gen_movhi_speculative,
7032 gen_movsi_speculative,
7033 gen_movdi_speculative,
7034 gen_movsf_speculative,
7035 gen_movdf_speculative,
7036 gen_movxf_speculative,
7037 gen_movti_speculative,
7038 gen_zero_extendqidi2_speculative,
7039 gen_zero_extendhidi2_speculative,
7040 gen_zero_extendsidi2_speculative,
7041
7042 gen_movbi_speculative_advanced,
7043 gen_movqi_speculative_advanced,
7044 gen_movhi_speculative_advanced,
7045 gen_movsi_speculative_advanced,
7046 gen_movdi_speculative_advanced,
7047 gen_movsf_speculative_advanced,
7048 gen_movdf_speculative_advanced,
7049 gen_movxf_speculative_advanced,
7050 gen_movti_speculative_advanced,
7051 gen_zero_extendqidi2_speculative_advanced,
7052 gen_zero_extendhidi2_speculative_advanced,
7053 gen_zero_extendsidi2_speculative_advanced,
7054
7055 gen_movbi_speculative_advanced,
7056 gen_movqi_speculative_advanced,
7057 gen_movhi_speculative_advanced,
7058 gen_movsi_speculative_advanced,
7059 gen_movdi_speculative_advanced,
7060 gen_movsf_speculative_advanced,
7061 gen_movdf_speculative_advanced,
7062 gen_movxf_speculative_advanced,
7063 gen_movti_speculative_advanced,
7064 gen_zero_extendqidi2_speculative_advanced,
7065 gen_zero_extendhidi2_speculative_advanced,
7066 gen_zero_extendsidi2_speculative_advanced
7067 };
7068
7069 load_no = extend_p ? mode_no + SPEC_GEN_EXTEND_OFFSET : mode_no;
7070
7071 if (ts & BEGIN_DATA)
7072 {
7073 /* We don't need recovery because even if this is ld.sa
7074 ALAT entry will be allocated only if NAT bit is set to zero.
7075 So it is enough to use ld.c here. */
7076
7077 if (ts & BEGIN_CONTROL)
7078 {
7079 load_no += SPEC_GEN_SA;
7080
7081 if (!mflag_sched_ldc)
7082 shift = SPEC_GEN_CHKA_FOR_SA_OFFSET;
7083 }
7084 else
7085 {
7086 load_no += SPEC_GEN_A;
7087
7088 if (!mflag_sched_ldc)
7089 shift = SPEC_GEN_CHKA_FOR_A_OFFSET;
7090 }
7091 }
7092 else if (ts & BEGIN_CONTROL)
7093 {
7094 /* ld.sa can be used instead of ld.s to avoid basic block splitting. */
7095 if (!mflag_control_ldc)
7096 load_no += SPEC_GEN_S;
7097 else
7098 {
7099 gcc_assert (mflag_sched_ldc);
7100 load_no += SPEC_GEN_SA_FOR_S;
7101 }
7102 }
7103 else
7104 gcc_unreachable ();
7105
7106 /* Set the desired check index. We add '1', because zero element in this
7107 array means, that instruction with such uid is non-speculative. */
7108 spec_check_no[INSN_UID (insn)] = load_no + shift + 1;
7109
7110 if (!gen_p)
7111 return 0;
7112
7113 new_pat = gen_load[load_no] (copy_rtx (recog_data.operand[0]),
7114 copy_rtx (recog_data.operand[1]));
7115
7116 pat = PATTERN (insn);
7117 if (GET_CODE (pat) == COND_EXEC)
7118 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx
7119 (COND_EXEC_TEST (pat)), new_pat);
7120
7121 return new_pat;
7122 }
7123
7124 /* Offset to branchy checks. */
7125 enum { SPEC_GEN_CHECK_MUTATION_OFFSET = 5 * SPEC_N };
7126
7127 /* Return nonzero, if INSN needs branchy recovery check. */
7128 static bool
7129 ia64_needs_block_p (const_rtx insn)
7130 {
7131 int check_no;
7132
7133 check_no = spec_check_no[INSN_UID(insn)] - 1;
7134 gcc_assert (0 <= check_no && check_no < SPEC_GEN_CHECK_MUTATION_OFFSET);
7135
7136 return ((SPEC_GEN_S <= check_no && check_no < SPEC_GEN_S + SPEC_N)
7137 || (4 * SPEC_N <= check_no && check_no < 4 * SPEC_N + SPEC_N));
7138 }
7139
7140 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
7141 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
7142 Otherwise, generate a simple check. */
7143 static rtx
7144 ia64_gen_check (rtx insn, rtx label, bool mutate_p)
7145 {
7146 rtx op1, pat, check_pat;
7147
7148 static rtx (* const gen_check[]) (rtx, rtx) = {
7149 gen_movbi_clr,
7150 gen_movqi_clr,
7151 gen_movhi_clr,
7152 gen_movsi_clr,
7153 gen_movdi_clr,
7154 gen_movsf_clr,
7155 gen_movdf_clr,
7156 gen_movxf_clr,
7157 gen_movti_clr,
7158 gen_zero_extendqidi2_clr,
7159 gen_zero_extendhidi2_clr,
7160 gen_zero_extendsidi2_clr,
7161
7162 gen_speculation_check_bi,
7163 gen_speculation_check_qi,
7164 gen_speculation_check_hi,
7165 gen_speculation_check_si,
7166 gen_speculation_check_di,
7167 gen_speculation_check_sf,
7168 gen_speculation_check_df,
7169 gen_speculation_check_xf,
7170 gen_speculation_check_ti,
7171 gen_speculation_check_di,
7172 gen_speculation_check_di,
7173 gen_speculation_check_di,
7174
7175 gen_movbi_clr,
7176 gen_movqi_clr,
7177 gen_movhi_clr,
7178 gen_movsi_clr,
7179 gen_movdi_clr,
7180 gen_movsf_clr,
7181 gen_movdf_clr,
7182 gen_movxf_clr,
7183 gen_movti_clr,
7184 gen_zero_extendqidi2_clr,
7185 gen_zero_extendhidi2_clr,
7186 gen_zero_extendsidi2_clr,
7187
7188 gen_movbi_clr,
7189 gen_movqi_clr,
7190 gen_movhi_clr,
7191 gen_movsi_clr,
7192 gen_movdi_clr,
7193 gen_movsf_clr,
7194 gen_movdf_clr,
7195 gen_movxf_clr,
7196 gen_movti_clr,
7197 gen_zero_extendqidi2_clr,
7198 gen_zero_extendhidi2_clr,
7199 gen_zero_extendsidi2_clr,
7200
7201 gen_advanced_load_check_clr_bi,
7202 gen_advanced_load_check_clr_qi,
7203 gen_advanced_load_check_clr_hi,
7204 gen_advanced_load_check_clr_si,
7205 gen_advanced_load_check_clr_di,
7206 gen_advanced_load_check_clr_sf,
7207 gen_advanced_load_check_clr_df,
7208 gen_advanced_load_check_clr_xf,
7209 gen_advanced_load_check_clr_ti,
7210 gen_advanced_load_check_clr_di,
7211 gen_advanced_load_check_clr_di,
7212 gen_advanced_load_check_clr_di,
7213
7214 /* Following checks are generated during mutation. */
7215 gen_advanced_load_check_clr_bi,
7216 gen_advanced_load_check_clr_qi,
7217 gen_advanced_load_check_clr_hi,
7218 gen_advanced_load_check_clr_si,
7219 gen_advanced_load_check_clr_di,
7220 gen_advanced_load_check_clr_sf,
7221 gen_advanced_load_check_clr_df,
7222 gen_advanced_load_check_clr_xf,
7223 gen_advanced_load_check_clr_ti,
7224 gen_advanced_load_check_clr_di,
7225 gen_advanced_load_check_clr_di,
7226 gen_advanced_load_check_clr_di,
7227
7228 0,0,0,0,0,0,0,0,0,0,0,0,
7229
7230 gen_advanced_load_check_clr_bi,
7231 gen_advanced_load_check_clr_qi,
7232 gen_advanced_load_check_clr_hi,
7233 gen_advanced_load_check_clr_si,
7234 gen_advanced_load_check_clr_di,
7235 gen_advanced_load_check_clr_sf,
7236 gen_advanced_load_check_clr_df,
7237 gen_advanced_load_check_clr_xf,
7238 gen_advanced_load_check_clr_ti,
7239 gen_advanced_load_check_clr_di,
7240 gen_advanced_load_check_clr_di,
7241 gen_advanced_load_check_clr_di,
7242
7243 gen_speculation_check_bi,
7244 gen_speculation_check_qi,
7245 gen_speculation_check_hi,
7246 gen_speculation_check_si,
7247 gen_speculation_check_di,
7248 gen_speculation_check_sf,
7249 gen_speculation_check_df,
7250 gen_speculation_check_xf,
7251 gen_speculation_check_ti,
7252 gen_speculation_check_di,
7253 gen_speculation_check_di,
7254 gen_speculation_check_di
7255 };
7256
7257 extract_insn_cached (insn);
7258
7259 if (label)
7260 {
7261 gcc_assert (mutate_p || ia64_needs_block_p (insn));
7262 op1 = label;
7263 }
7264 else
7265 {
7266 gcc_assert (!mutate_p && !ia64_needs_block_p (insn));
7267 op1 = copy_rtx (recog_data.operand[1]);
7268 }
7269
7270 if (mutate_p)
7271 /* INSN is ld.c.
7272 Find the speculation check number by searching for original
7273 speculative load in the RESOLVED_DEPS list of INSN.
7274 As long as patterns are unique for each instruction, this can be
7275 accomplished by matching ORIG_PAT fields. */
7276 {
7277 sd_iterator_def sd_it;
7278 dep_t dep;
7279 int check_no = 0;
7280 rtx orig_pat = ORIG_PAT (insn);
7281
7282 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7283 {
7284 rtx x = DEP_PRO (dep);
7285
7286 if (ORIG_PAT (x) == orig_pat)
7287 check_no = spec_check_no[INSN_UID (x)];
7288 }
7289 gcc_assert (check_no);
7290
7291 spec_check_no[INSN_UID (insn)] = (check_no
7292 + SPEC_GEN_CHECK_MUTATION_OFFSET);
7293 }
7294
7295 check_pat = (gen_check[spec_check_no[INSN_UID (insn)] - 1]
7296 (copy_rtx (recog_data.operand[0]), op1));
7297
7298 pat = PATTERN (insn);
7299 if (GET_CODE (pat) == COND_EXEC)
7300 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7301 check_pat);
7302
7303 return check_pat;
7304 }
7305
7306 /* Return nonzero, if X is branchy recovery check. */
7307 static int
7308 ia64_spec_check_p (rtx x)
7309 {
7310 x = PATTERN (x);
7311 if (GET_CODE (x) == COND_EXEC)
7312 x = COND_EXEC_CODE (x);
7313 if (GET_CODE (x) == SET)
7314 return ia64_spec_check_src_p (SET_SRC (x));
7315 return 0;
7316 }
7317
7318 /* Return nonzero, if SRC belongs to recovery check. */
7319 static int
7320 ia64_spec_check_src_p (rtx src)
7321 {
7322 if (GET_CODE (src) == IF_THEN_ELSE)
7323 {
7324 rtx t;
7325
7326 t = XEXP (src, 0);
7327 if (GET_CODE (t) == NE)
7328 {
7329 t = XEXP (t, 0);
7330
7331 if (GET_CODE (t) == UNSPEC)
7332 {
7333 int code;
7334
7335 code = XINT (t, 1);
7336
7337 if (code == UNSPEC_CHKACLR
7338 || code == UNSPEC_CHKS
7339 || code == UNSPEC_LDCCLR)
7340 {
7341 gcc_assert (code != 0);
7342 return code;
7343 }
7344 }
7345 }
7346 }
7347 return 0;
7348 }
7349 \f
7350
7351 /* The following page contains abstract data `bundle states' which are
7352 used for bundling insns (inserting nops and template generation). */
7353
7354 /* The following describes state of insn bundling. */
7355
7356 struct bundle_state
7357 {
7358 /* Unique bundle state number to identify them in the debugging
7359 output */
7360 int unique_num;
7361 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
7362 /* number nops before and after the insn */
7363 short before_nops_num, after_nops_num;
7364 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
7365 insn */
7366 int cost; /* cost of the state in cycles */
7367 int accumulated_insns_num; /* number of all previous insns including
7368 nops. L is considered as 2 insns */
7369 int branch_deviation; /* deviation of previous branches from 3rd slots */
7370 struct bundle_state *next; /* next state with the same insn_num */
7371 struct bundle_state *originator; /* originator (previous insn state) */
7372 /* All bundle states are in the following chain. */
7373 struct bundle_state *allocated_states_chain;
7374 /* The DFA State after issuing the insn and the nops. */
7375 state_t dfa_state;
7376 };
7377
7378 /* The following is map insn number to the corresponding bundle state. */
7379
7380 static struct bundle_state **index_to_bundle_states;
7381
7382 /* The unique number of next bundle state. */
7383
7384 static int bundle_states_num;
7385
7386 /* All allocated bundle states are in the following chain. */
7387
7388 static struct bundle_state *allocated_bundle_states_chain;
7389
7390 /* All allocated but not used bundle states are in the following
7391 chain. */
7392
7393 static struct bundle_state *free_bundle_state_chain;
7394
7395
7396 /* The following function returns a free bundle state. */
7397
7398 static struct bundle_state *
7399 get_free_bundle_state (void)
7400 {
7401 struct bundle_state *result;
7402
7403 if (free_bundle_state_chain != NULL)
7404 {
7405 result = free_bundle_state_chain;
7406 free_bundle_state_chain = result->next;
7407 }
7408 else
7409 {
7410 result = XNEW (struct bundle_state);
7411 result->dfa_state = xmalloc (dfa_state_size);
7412 result->allocated_states_chain = allocated_bundle_states_chain;
7413 allocated_bundle_states_chain = result;
7414 }
7415 result->unique_num = bundle_states_num++;
7416 return result;
7417
7418 }
7419
7420 /* The following function frees given bundle state. */
7421
7422 static void
7423 free_bundle_state (struct bundle_state *state)
7424 {
7425 state->next = free_bundle_state_chain;
7426 free_bundle_state_chain = state;
7427 }
7428
7429 /* Start work with abstract data `bundle states'. */
7430
7431 static void
7432 initiate_bundle_states (void)
7433 {
7434 bundle_states_num = 0;
7435 free_bundle_state_chain = NULL;
7436 allocated_bundle_states_chain = NULL;
7437 }
7438
7439 /* Finish work with abstract data `bundle states'. */
7440
7441 static void
7442 finish_bundle_states (void)
7443 {
7444 struct bundle_state *curr_state, *next_state;
7445
7446 for (curr_state = allocated_bundle_states_chain;
7447 curr_state != NULL;
7448 curr_state = next_state)
7449 {
7450 next_state = curr_state->allocated_states_chain;
7451 free (curr_state->dfa_state);
7452 free (curr_state);
7453 }
7454 }
7455
7456 /* Hash table of the bundle states. The key is dfa_state and insn_num
7457 of the bundle states. */
7458
7459 static htab_t bundle_state_table;
7460
7461 /* The function returns hash of BUNDLE_STATE. */
7462
7463 static unsigned
7464 bundle_state_hash (const void *bundle_state)
7465 {
7466 const struct bundle_state *const state
7467 = (const struct bundle_state *) bundle_state;
7468 unsigned result, i;
7469
7470 for (result = i = 0; i < dfa_state_size; i++)
7471 result += (((unsigned char *) state->dfa_state) [i]
7472 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
7473 return result + state->insn_num;
7474 }
7475
7476 /* The function returns nonzero if the bundle state keys are equal. */
7477
7478 static int
7479 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
7480 {
7481 const struct bundle_state *const state1
7482 = (const struct bundle_state *) bundle_state_1;
7483 const struct bundle_state *const state2
7484 = (const struct bundle_state *) bundle_state_2;
7485
7486 return (state1->insn_num == state2->insn_num
7487 && memcmp (state1->dfa_state, state2->dfa_state,
7488 dfa_state_size) == 0);
7489 }
7490
7491 /* The function inserts the BUNDLE_STATE into the hash table. The
7492 function returns nonzero if the bundle has been inserted into the
7493 table. The table contains the best bundle state with given key. */
7494
7495 static int
7496 insert_bundle_state (struct bundle_state *bundle_state)
7497 {
7498 void **entry_ptr;
7499
7500 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
7501 if (*entry_ptr == NULL)
7502 {
7503 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7504 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7505 *entry_ptr = (void *) bundle_state;
7506 return TRUE;
7507 }
7508 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
7509 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
7510 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
7511 > bundle_state->accumulated_insns_num
7512 || (((struct bundle_state *)
7513 *entry_ptr)->accumulated_insns_num
7514 == bundle_state->accumulated_insns_num
7515 && ((struct bundle_state *)
7516 *entry_ptr)->branch_deviation
7517 > bundle_state->branch_deviation))))
7518
7519 {
7520 struct bundle_state temp;
7521
7522 temp = *(struct bundle_state *) *entry_ptr;
7523 *(struct bundle_state *) *entry_ptr = *bundle_state;
7524 ((struct bundle_state *) *entry_ptr)->next = temp.next;
7525 *bundle_state = temp;
7526 }
7527 return FALSE;
7528 }
7529
7530 /* Start work with the hash table. */
7531
7532 static void
7533 initiate_bundle_state_table (void)
7534 {
7535 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
7536 (htab_del) 0);
7537 }
7538
7539 /* Finish work with the hash table. */
7540
7541 static void
7542 finish_bundle_state_table (void)
7543 {
7544 htab_delete (bundle_state_table);
7545 }
7546
7547 \f
7548
7549 /* The following variable is a insn `nop' used to check bundle states
7550 with different number of inserted nops. */
7551
7552 static rtx ia64_nop;
7553
7554 /* The following function tries to issue NOPS_NUM nops for the current
7555 state without advancing processor cycle. If it failed, the
7556 function returns FALSE and frees the current state. */
7557
7558 static int
7559 try_issue_nops (struct bundle_state *curr_state, int nops_num)
7560 {
7561 int i;
7562
7563 for (i = 0; i < nops_num; i++)
7564 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
7565 {
7566 free_bundle_state (curr_state);
7567 return FALSE;
7568 }
7569 return TRUE;
7570 }
7571
7572 /* The following function tries to issue INSN for the current
7573 state without advancing processor cycle. If it failed, the
7574 function returns FALSE and frees the current state. */
7575
7576 static int
7577 try_issue_insn (struct bundle_state *curr_state, rtx insn)
7578 {
7579 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
7580 {
7581 free_bundle_state (curr_state);
7582 return FALSE;
7583 }
7584 return TRUE;
7585 }
7586
7587 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
7588 starting with ORIGINATOR without advancing processor cycle. If
7589 TRY_BUNDLE_END_P is TRUE, the function also/only (if
7590 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
7591 If it was successful, the function creates new bundle state and
7592 insert into the hash table and into `index_to_bundle_states'. */
7593
7594 static void
7595 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
7596 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
7597 {
7598 struct bundle_state *curr_state;
7599
7600 curr_state = get_free_bundle_state ();
7601 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
7602 curr_state->insn = insn;
7603 curr_state->insn_num = originator->insn_num + 1;
7604 curr_state->cost = originator->cost;
7605 curr_state->originator = originator;
7606 curr_state->before_nops_num = before_nops_num;
7607 curr_state->after_nops_num = 0;
7608 curr_state->accumulated_insns_num
7609 = originator->accumulated_insns_num + before_nops_num;
7610 curr_state->branch_deviation = originator->branch_deviation;
7611 gcc_assert (insn);
7612 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
7613 {
7614 gcc_assert (GET_MODE (insn) != TImode);
7615 if (!try_issue_nops (curr_state, before_nops_num))
7616 return;
7617 if (!try_issue_insn (curr_state, insn))
7618 return;
7619 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
7620 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
7621 && curr_state->accumulated_insns_num % 3 != 0)
7622 {
7623 free_bundle_state (curr_state);
7624 return;
7625 }
7626 }
7627 else if (GET_MODE (insn) != TImode)
7628 {
7629 if (!try_issue_nops (curr_state, before_nops_num))
7630 return;
7631 if (!try_issue_insn (curr_state, insn))
7632 return;
7633 curr_state->accumulated_insns_num++;
7634 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
7635 && asm_noperands (PATTERN (insn)) < 0);
7636
7637 if (ia64_safe_type (insn) == TYPE_L)
7638 curr_state->accumulated_insns_num++;
7639 }
7640 else
7641 {
7642 /* If this is an insn that must be first in a group, then don't allow
7643 nops to be emitted before it. Currently, alloc is the only such
7644 supported instruction. */
7645 /* ??? The bundling automatons should handle this for us, but they do
7646 not yet have support for the first_insn attribute. */
7647 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
7648 {
7649 free_bundle_state (curr_state);
7650 return;
7651 }
7652
7653 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
7654 state_transition (curr_state->dfa_state, NULL);
7655 curr_state->cost++;
7656 if (!try_issue_nops (curr_state, before_nops_num))
7657 return;
7658 if (!try_issue_insn (curr_state, insn))
7659 return;
7660 curr_state->accumulated_insns_num++;
7661 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7662 || asm_noperands (PATTERN (insn)) >= 0)
7663 {
7664 /* Finish bundle containing asm insn. */
7665 curr_state->after_nops_num
7666 = 3 - curr_state->accumulated_insns_num % 3;
7667 curr_state->accumulated_insns_num
7668 += 3 - curr_state->accumulated_insns_num % 3;
7669 }
7670 else if (ia64_safe_type (insn) == TYPE_L)
7671 curr_state->accumulated_insns_num++;
7672 }
7673 if (ia64_safe_type (insn) == TYPE_B)
7674 curr_state->branch_deviation
7675 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
7676 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
7677 {
7678 if (!only_bundle_end_p && insert_bundle_state (curr_state))
7679 {
7680 state_t dfa_state;
7681 struct bundle_state *curr_state1;
7682 struct bundle_state *allocated_states_chain;
7683
7684 curr_state1 = get_free_bundle_state ();
7685 dfa_state = curr_state1->dfa_state;
7686 allocated_states_chain = curr_state1->allocated_states_chain;
7687 *curr_state1 = *curr_state;
7688 curr_state1->dfa_state = dfa_state;
7689 curr_state1->allocated_states_chain = allocated_states_chain;
7690 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
7691 dfa_state_size);
7692 curr_state = curr_state1;
7693 }
7694 if (!try_issue_nops (curr_state,
7695 3 - curr_state->accumulated_insns_num % 3))
7696 return;
7697 curr_state->after_nops_num
7698 = 3 - curr_state->accumulated_insns_num % 3;
7699 curr_state->accumulated_insns_num
7700 += 3 - curr_state->accumulated_insns_num % 3;
7701 }
7702 if (!insert_bundle_state (curr_state))
7703 free_bundle_state (curr_state);
7704 return;
7705 }
7706
7707 /* The following function returns position in the two window bundle
7708 for given STATE. */
7709
7710 static int
7711 get_max_pos (state_t state)
7712 {
7713 if (cpu_unit_reservation_p (state, pos_6))
7714 return 6;
7715 else if (cpu_unit_reservation_p (state, pos_5))
7716 return 5;
7717 else if (cpu_unit_reservation_p (state, pos_4))
7718 return 4;
7719 else if (cpu_unit_reservation_p (state, pos_3))
7720 return 3;
7721 else if (cpu_unit_reservation_p (state, pos_2))
7722 return 2;
7723 else if (cpu_unit_reservation_p (state, pos_1))
7724 return 1;
7725 else
7726 return 0;
7727 }
7728
7729 /* The function returns code of a possible template for given position
7730 and state. The function should be called only with 2 values of
7731 position equal to 3 or 6. We avoid generating F NOPs by putting
7732 templates containing F insns at the end of the template search
7733 because undocumented anomaly in McKinley derived cores which can
7734 cause stalls if an F-unit insn (including a NOP) is issued within a
7735 six-cycle window after reading certain application registers (such
7736 as ar.bsp). Furthermore, power-considerations also argue against
7737 the use of F-unit instructions unless they're really needed. */
7738
7739 static int
7740 get_template (state_t state, int pos)
7741 {
7742 switch (pos)
7743 {
7744 case 3:
7745 if (cpu_unit_reservation_p (state, _0mmi_))
7746 return 1;
7747 else if (cpu_unit_reservation_p (state, _0mii_))
7748 return 0;
7749 else if (cpu_unit_reservation_p (state, _0mmb_))
7750 return 7;
7751 else if (cpu_unit_reservation_p (state, _0mib_))
7752 return 6;
7753 else if (cpu_unit_reservation_p (state, _0mbb_))
7754 return 5;
7755 else if (cpu_unit_reservation_p (state, _0bbb_))
7756 return 4;
7757 else if (cpu_unit_reservation_p (state, _0mmf_))
7758 return 3;
7759 else if (cpu_unit_reservation_p (state, _0mfi_))
7760 return 2;
7761 else if (cpu_unit_reservation_p (state, _0mfb_))
7762 return 8;
7763 else if (cpu_unit_reservation_p (state, _0mlx_))
7764 return 9;
7765 else
7766 gcc_unreachable ();
7767 case 6:
7768 if (cpu_unit_reservation_p (state, _1mmi_))
7769 return 1;
7770 else if (cpu_unit_reservation_p (state, _1mii_))
7771 return 0;
7772 else if (cpu_unit_reservation_p (state, _1mmb_))
7773 return 7;
7774 else if (cpu_unit_reservation_p (state, _1mib_))
7775 return 6;
7776 else if (cpu_unit_reservation_p (state, _1mbb_))
7777 return 5;
7778 else if (cpu_unit_reservation_p (state, _1bbb_))
7779 return 4;
7780 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
7781 return 3;
7782 else if (cpu_unit_reservation_p (state, _1mfi_))
7783 return 2;
7784 else if (cpu_unit_reservation_p (state, _1mfb_))
7785 return 8;
7786 else if (cpu_unit_reservation_p (state, _1mlx_))
7787 return 9;
7788 else
7789 gcc_unreachable ();
7790 default:
7791 gcc_unreachable ();
7792 }
7793 }
7794
7795 /* The following function returns an insn important for insn bundling
7796 followed by INSN and before TAIL. */
7797
7798 static rtx
7799 get_next_important_insn (rtx insn, rtx tail)
7800 {
7801 for (; insn && insn != tail; insn = NEXT_INSN (insn))
7802 if (INSN_P (insn)
7803 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7804 && GET_CODE (PATTERN (insn)) != USE
7805 && GET_CODE (PATTERN (insn)) != CLOBBER)
7806 return insn;
7807 return NULL_RTX;
7808 }
7809
7810 /* Add a bundle selector TEMPLATE0 before INSN. */
7811
7812 static void
7813 ia64_add_bundle_selector_before (int template0, rtx insn)
7814 {
7815 rtx b = gen_bundle_selector (GEN_INT (template0));
7816
7817 ia64_emit_insn_before (b, insn);
7818 #if NR_BUNDLES == 10
7819 if ((template0 == 4 || template0 == 5)
7820 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
7821 {
7822 int i;
7823 rtx note = NULL_RTX;
7824
7825 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
7826 first or second slot. If it is and has REG_EH_NOTE set, copy it
7827 to following nops, as br.call sets rp to the address of following
7828 bundle and therefore an EH region end must be on a bundle
7829 boundary. */
7830 insn = PREV_INSN (insn);
7831 for (i = 0; i < 3; i++)
7832 {
7833 do
7834 insn = next_active_insn (insn);
7835 while (GET_CODE (insn) == INSN
7836 && get_attr_empty (insn) == EMPTY_YES);
7837 if (GET_CODE (insn) == CALL_INSN)
7838 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
7839 else if (note)
7840 {
7841 int code;
7842
7843 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
7844 || code == CODE_FOR_nop_b);
7845 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
7846 note = NULL_RTX;
7847 else
7848 REG_NOTES (insn)
7849 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
7850 REG_NOTES (insn));
7851 }
7852 }
7853 }
7854 #endif
7855 }
7856
7857 /* The following function does insn bundling. Bundling means
7858 inserting templates and nop insns to fit insn groups into permitted
7859 templates. Instruction scheduling uses NDFA (non-deterministic
7860 finite automata) encoding informations about the templates and the
7861 inserted nops. Nondeterminism of the automata permits follows
7862 all possible insn sequences very fast.
7863
7864 Unfortunately it is not possible to get information about inserting
7865 nop insns and used templates from the automata states. The
7866 automata only says that we can issue an insn possibly inserting
7867 some nops before it and using some template. Therefore insn
7868 bundling in this function is implemented by using DFA
7869 (deterministic finite automata). We follow all possible insn
7870 sequences by inserting 0-2 nops (that is what the NDFA describe for
7871 insn scheduling) before/after each insn being bundled. We know the
7872 start of simulated processor cycle from insn scheduling (insn
7873 starting a new cycle has TImode).
7874
7875 Simple implementation of insn bundling would create enormous
7876 number of possible insn sequences satisfying information about new
7877 cycle ticks taken from the insn scheduling. To make the algorithm
7878 practical we use dynamic programming. Each decision (about
7879 inserting nops and implicitly about previous decisions) is described
7880 by structure bundle_state (see above). If we generate the same
7881 bundle state (key is automaton state after issuing the insns and
7882 nops for it), we reuse already generated one. As consequence we
7883 reject some decisions which cannot improve the solution and
7884 reduce memory for the algorithm.
7885
7886 When we reach the end of EBB (extended basic block), we choose the
7887 best sequence and then, moving back in EBB, insert templates for
7888 the best alternative. The templates are taken from querying
7889 automaton state for each insn in chosen bundle states.
7890
7891 So the algorithm makes two (forward and backward) passes through
7892 EBB. There is an additional forward pass through EBB for Itanium1
7893 processor. This pass inserts more nops to make dependency between
7894 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7895
7896 static void
7897 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7898 {
7899 struct bundle_state *curr_state, *next_state, *best_state;
7900 rtx insn, next_insn;
7901 int insn_num;
7902 int i, bundle_end_p, only_bundle_end_p, asm_p;
7903 int pos = 0, max_pos, template0, template1;
7904 rtx b;
7905 rtx nop;
7906 enum attr_type type;
7907
7908 insn_num = 0;
7909 /* Count insns in the EBB. */
7910 for (insn = NEXT_INSN (prev_head_insn);
7911 insn && insn != tail;
7912 insn = NEXT_INSN (insn))
7913 if (INSN_P (insn))
7914 insn_num++;
7915 if (insn_num == 0)
7916 return;
7917 bundling_p = 1;
7918 dfa_clean_insn_cache ();
7919 initiate_bundle_state_table ();
7920 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
7921 /* First (forward) pass -- generation of bundle states. */
7922 curr_state = get_free_bundle_state ();
7923 curr_state->insn = NULL;
7924 curr_state->before_nops_num = 0;
7925 curr_state->after_nops_num = 0;
7926 curr_state->insn_num = 0;
7927 curr_state->cost = 0;
7928 curr_state->accumulated_insns_num = 0;
7929 curr_state->branch_deviation = 0;
7930 curr_state->next = NULL;
7931 curr_state->originator = NULL;
7932 state_reset (curr_state->dfa_state);
7933 index_to_bundle_states [0] = curr_state;
7934 insn_num = 0;
7935 /* Shift cycle mark if it is put on insn which could be ignored. */
7936 for (insn = NEXT_INSN (prev_head_insn);
7937 insn != tail;
7938 insn = NEXT_INSN (insn))
7939 if (INSN_P (insn)
7940 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7941 || GET_CODE (PATTERN (insn)) == USE
7942 || GET_CODE (PATTERN (insn)) == CLOBBER)
7943 && GET_MODE (insn) == TImode)
7944 {
7945 PUT_MODE (insn, VOIDmode);
7946 for (next_insn = NEXT_INSN (insn);
7947 next_insn != tail;
7948 next_insn = NEXT_INSN (next_insn))
7949 if (INSN_P (next_insn)
7950 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7951 && GET_CODE (PATTERN (next_insn)) != USE
7952 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7953 {
7954 PUT_MODE (next_insn, TImode);
7955 break;
7956 }
7957 }
7958 /* Forward pass: generation of bundle states. */
7959 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7960 insn != NULL_RTX;
7961 insn = next_insn)
7962 {
7963 gcc_assert (INSN_P (insn)
7964 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7965 && GET_CODE (PATTERN (insn)) != USE
7966 && GET_CODE (PATTERN (insn)) != CLOBBER);
7967 type = ia64_safe_type (insn);
7968 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7969 insn_num++;
7970 index_to_bundle_states [insn_num] = NULL;
7971 for (curr_state = index_to_bundle_states [insn_num - 1];
7972 curr_state != NULL;
7973 curr_state = next_state)
7974 {
7975 pos = curr_state->accumulated_insns_num % 3;
7976 next_state = curr_state->next;
7977 /* We must fill up the current bundle in order to start a
7978 subsequent asm insn in a new bundle. Asm insn is always
7979 placed in a separate bundle. */
7980 only_bundle_end_p
7981 = (next_insn != NULL_RTX
7982 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7983 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7984 /* We may fill up the current bundle if it is the cycle end
7985 without a group barrier. */
7986 bundle_end_p
7987 = (only_bundle_end_p || next_insn == NULL_RTX
7988 || (GET_MODE (next_insn) == TImode
7989 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7990 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7991 || type == TYPE_S
7992 /* We need to insert 2 nops for cases like M_MII. To
7993 guarantee issuing all insns on the same cycle for
7994 Itanium 1, we need to issue 2 nops after the first M
7995 insn (MnnMII where n is a nop insn). */
7996 || ((type == TYPE_M || type == TYPE_A)
7997 && ia64_tune == PROCESSOR_ITANIUM
7998 && !bundle_end_p && pos == 1))
7999 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8000 only_bundle_end_p);
8001 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8002 only_bundle_end_p);
8003 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8004 only_bundle_end_p);
8005 }
8006 gcc_assert (index_to_bundle_states [insn_num]);
8007 for (curr_state = index_to_bundle_states [insn_num];
8008 curr_state != NULL;
8009 curr_state = curr_state->next)
8010 if (verbose >= 2 && dump)
8011 {
8012 /* This structure is taken from generated code of the
8013 pipeline hazard recognizer (see file insn-attrtab.c).
8014 Please don't forget to change the structure if a new
8015 automaton is added to .md file. */
8016 struct DFA_chip
8017 {
8018 unsigned short one_automaton_state;
8019 unsigned short oneb_automaton_state;
8020 unsigned short two_automaton_state;
8021 unsigned short twob_automaton_state;
8022 };
8023
8024 fprintf
8025 (dump,
8026 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8027 curr_state->unique_num,
8028 (curr_state->originator == NULL
8029 ? -1 : curr_state->originator->unique_num),
8030 curr_state->cost,
8031 curr_state->before_nops_num, curr_state->after_nops_num,
8032 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8033 (ia64_tune == PROCESSOR_ITANIUM
8034 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8035 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8036 INSN_UID (insn));
8037 }
8038 }
8039
8040 /* We should find a solution because the 2nd insn scheduling has
8041 found one. */
8042 gcc_assert (index_to_bundle_states [insn_num]);
8043 /* Find a state corresponding to the best insn sequence. */
8044 best_state = NULL;
8045 for (curr_state = index_to_bundle_states [insn_num];
8046 curr_state != NULL;
8047 curr_state = curr_state->next)
8048 /* We are just looking at the states with fully filled up last
8049 bundle. The first we prefer insn sequences with minimal cost
8050 then with minimal inserted nops and finally with branch insns
8051 placed in the 3rd slots. */
8052 if (curr_state->accumulated_insns_num % 3 == 0
8053 && (best_state == NULL || best_state->cost > curr_state->cost
8054 || (best_state->cost == curr_state->cost
8055 && (curr_state->accumulated_insns_num
8056 < best_state->accumulated_insns_num
8057 || (curr_state->accumulated_insns_num
8058 == best_state->accumulated_insns_num
8059 && curr_state->branch_deviation
8060 < best_state->branch_deviation)))))
8061 best_state = curr_state;
8062 /* Second (backward) pass: adding nops and templates. */
8063 insn_num = best_state->before_nops_num;
8064 template0 = template1 = -1;
8065 for (curr_state = best_state;
8066 curr_state->originator != NULL;
8067 curr_state = curr_state->originator)
8068 {
8069 insn = curr_state->insn;
8070 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8071 || asm_noperands (PATTERN (insn)) >= 0);
8072 insn_num++;
8073 if (verbose >= 2 && dump)
8074 {
8075 struct DFA_chip
8076 {
8077 unsigned short one_automaton_state;
8078 unsigned short oneb_automaton_state;
8079 unsigned short two_automaton_state;
8080 unsigned short twob_automaton_state;
8081 };
8082
8083 fprintf
8084 (dump,
8085 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8086 curr_state->unique_num,
8087 (curr_state->originator == NULL
8088 ? -1 : curr_state->originator->unique_num),
8089 curr_state->cost,
8090 curr_state->before_nops_num, curr_state->after_nops_num,
8091 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8092 (ia64_tune == PROCESSOR_ITANIUM
8093 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8094 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8095 INSN_UID (insn));
8096 }
8097 /* Find the position in the current bundle window. The window can
8098 contain at most two bundles. Two bundle window means that
8099 the processor will make two bundle rotation. */
8100 max_pos = get_max_pos (curr_state->dfa_state);
8101 if (max_pos == 6
8102 /* The following (negative template number) means that the
8103 processor did one bundle rotation. */
8104 || (max_pos == 3 && template0 < 0))
8105 {
8106 /* We are at the end of the window -- find template(s) for
8107 its bundle(s). */
8108 pos = max_pos;
8109 if (max_pos == 3)
8110 template0 = get_template (curr_state->dfa_state, 3);
8111 else
8112 {
8113 template1 = get_template (curr_state->dfa_state, 3);
8114 template0 = get_template (curr_state->dfa_state, 6);
8115 }
8116 }
8117 if (max_pos > 3 && template1 < 0)
8118 /* It may happen when we have the stop inside a bundle. */
8119 {
8120 gcc_assert (pos <= 3);
8121 template1 = get_template (curr_state->dfa_state, 3);
8122 pos += 3;
8123 }
8124 if (!asm_p)
8125 /* Emit nops after the current insn. */
8126 for (i = 0; i < curr_state->after_nops_num; i++)
8127 {
8128 nop = gen_nop ();
8129 emit_insn_after (nop, insn);
8130 pos--;
8131 gcc_assert (pos >= 0);
8132 if (pos % 3 == 0)
8133 {
8134 /* We are at the start of a bundle: emit the template
8135 (it should be defined). */
8136 gcc_assert (template0 >= 0);
8137 ia64_add_bundle_selector_before (template0, nop);
8138 /* If we have two bundle window, we make one bundle
8139 rotation. Otherwise template0 will be undefined
8140 (negative value). */
8141 template0 = template1;
8142 template1 = -1;
8143 }
8144 }
8145 /* Move the position backward in the window. Group barrier has
8146 no slot. Asm insn takes all bundle. */
8147 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8148 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8149 && asm_noperands (PATTERN (insn)) < 0)
8150 pos--;
8151 /* Long insn takes 2 slots. */
8152 if (ia64_safe_type (insn) == TYPE_L)
8153 pos--;
8154 gcc_assert (pos >= 0);
8155 if (pos % 3 == 0
8156 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8157 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8158 && asm_noperands (PATTERN (insn)) < 0)
8159 {
8160 /* The current insn is at the bundle start: emit the
8161 template. */
8162 gcc_assert (template0 >= 0);
8163 ia64_add_bundle_selector_before (template0, insn);
8164 b = PREV_INSN (insn);
8165 insn = b;
8166 /* See comment above in analogous place for emitting nops
8167 after the insn. */
8168 template0 = template1;
8169 template1 = -1;
8170 }
8171 /* Emit nops after the current insn. */
8172 for (i = 0; i < curr_state->before_nops_num; i++)
8173 {
8174 nop = gen_nop ();
8175 ia64_emit_insn_before (nop, insn);
8176 nop = PREV_INSN (insn);
8177 insn = nop;
8178 pos--;
8179 gcc_assert (pos >= 0);
8180 if (pos % 3 == 0)
8181 {
8182 /* See comment above in analogous place for emitting nops
8183 after the insn. */
8184 gcc_assert (template0 >= 0);
8185 ia64_add_bundle_selector_before (template0, insn);
8186 b = PREV_INSN (insn);
8187 insn = b;
8188 template0 = template1;
8189 template1 = -1;
8190 }
8191 }
8192 }
8193 if (ia64_tune == PROCESSOR_ITANIUM)
8194 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
8195 Itanium1 has a strange design, if the distance between an insn
8196 and dependent MM-insn is less 4 then we have a 6 additional
8197 cycles stall. So we make the distance equal to 4 cycles if it
8198 is less. */
8199 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8200 insn != NULL_RTX;
8201 insn = next_insn)
8202 {
8203 gcc_assert (INSN_P (insn)
8204 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8205 && GET_CODE (PATTERN (insn)) != USE
8206 && GET_CODE (PATTERN (insn)) != CLOBBER);
8207 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8208 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8209 /* We found a MM-insn which needs additional cycles. */
8210 {
8211 rtx last;
8212 int i, j, n;
8213 int pred_stop_p;
8214
8215 /* Now we are searching for a template of the bundle in
8216 which the MM-insn is placed and the position of the
8217 insn in the bundle (0, 1, 2). Also we are searching
8218 for that there is a stop before the insn. */
8219 last = prev_active_insn (insn);
8220 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8221 if (pred_stop_p)
8222 last = prev_active_insn (last);
8223 n = 0;
8224 for (;; last = prev_active_insn (last))
8225 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8226 {
8227 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8228 if (template0 == 9)
8229 /* The insn is in MLX bundle. Change the template
8230 onto MFI because we will add nops before the
8231 insn. It simplifies subsequent code a lot. */
8232 PATTERN (last)
8233 = gen_bundle_selector (const2_rtx); /* -> MFI */
8234 break;
8235 }
8236 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8237 && (ia64_safe_itanium_class (last)
8238 != ITANIUM_CLASS_IGNORE))
8239 n++;
8240 /* Some check of correctness: the stop is not at the
8241 bundle start, there are no more 3 insns in the bundle,
8242 and the MM-insn is not at the start of bundle with
8243 template MLX. */
8244 gcc_assert ((!pred_stop_p || n)
8245 && n <= 2
8246 && (template0 != 9 || !n));
8247 /* Put nops after the insn in the bundle. */
8248 for (j = 3 - n; j > 0; j --)
8249 ia64_emit_insn_before (gen_nop (), insn);
8250 /* It takes into account that we will add more N nops
8251 before the insn lately -- please see code below. */
8252 add_cycles [INSN_UID (insn)]--;
8253 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8254 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8255 insn);
8256 if (pred_stop_p)
8257 add_cycles [INSN_UID (insn)]--;
8258 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8259 {
8260 /* Insert "MII;" template. */
8261 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8262 insn);
8263 ia64_emit_insn_before (gen_nop (), insn);
8264 ia64_emit_insn_before (gen_nop (), insn);
8265 if (i > 1)
8266 {
8267 /* To decrease code size, we use "MI;I;"
8268 template. */
8269 ia64_emit_insn_before
8270 (gen_insn_group_barrier (GEN_INT (3)), insn);
8271 i--;
8272 }
8273 ia64_emit_insn_before (gen_nop (), insn);
8274 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8275 insn);
8276 }
8277 /* Put the MM-insn in the same slot of a bundle with the
8278 same template as the original one. */
8279 ia64_add_bundle_selector_before (template0, insn);
8280 /* To put the insn in the same slot, add necessary number
8281 of nops. */
8282 for (j = n; j > 0; j --)
8283 ia64_emit_insn_before (gen_nop (), insn);
8284 /* Put the stop if the original bundle had it. */
8285 if (pred_stop_p)
8286 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8287 insn);
8288 }
8289 }
8290 free (index_to_bundle_states);
8291 finish_bundle_state_table ();
8292 bundling_p = 0;
8293 dfa_clean_insn_cache ();
8294 }
8295
8296 /* The following function is called at the end of scheduling BB or
8297 EBB. After reload, it inserts stop bits and does insn bundling. */
8298
8299 static void
8300 ia64_sched_finish (FILE *dump, int sched_verbose)
8301 {
8302 if (sched_verbose)
8303 fprintf (dump, "// Finishing schedule.\n");
8304 if (!reload_completed)
8305 return;
8306 if (reload_completed)
8307 {
8308 final_emit_insn_group_barriers (dump);
8309 bundling (dump, sched_verbose, current_sched_info->prev_head,
8310 current_sched_info->next_tail);
8311 if (sched_verbose && dump)
8312 fprintf (dump, "// finishing %d-%d\n",
8313 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8314 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8315
8316 return;
8317 }
8318 }
8319
8320 /* The following function inserts stop bits in scheduled BB or EBB. */
8321
8322 static void
8323 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8324 {
8325 rtx insn;
8326 int need_barrier_p = 0;
8327 rtx prev_insn = NULL_RTX;
8328
8329 init_insn_group_barriers ();
8330
8331 for (insn = NEXT_INSN (current_sched_info->prev_head);
8332 insn != current_sched_info->next_tail;
8333 insn = NEXT_INSN (insn))
8334 {
8335 if (GET_CODE (insn) == BARRIER)
8336 {
8337 rtx last = prev_active_insn (insn);
8338
8339 if (! last)
8340 continue;
8341 if (GET_CODE (last) == JUMP_INSN
8342 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8343 last = prev_active_insn (last);
8344 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8345 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
8346
8347 init_insn_group_barriers ();
8348 need_barrier_p = 0;
8349 prev_insn = NULL_RTX;
8350 }
8351 else if (INSN_P (insn))
8352 {
8353 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8354 {
8355 init_insn_group_barriers ();
8356 need_barrier_p = 0;
8357 prev_insn = NULL_RTX;
8358 }
8359 else if (need_barrier_p || group_barrier_needed (insn))
8360 {
8361 if (TARGET_EARLY_STOP_BITS)
8362 {
8363 rtx last;
8364
8365 for (last = insn;
8366 last != current_sched_info->prev_head;
8367 last = PREV_INSN (last))
8368 if (INSN_P (last) && GET_MODE (last) == TImode
8369 && stops_p [INSN_UID (last)])
8370 break;
8371 if (last == current_sched_info->prev_head)
8372 last = insn;
8373 last = prev_active_insn (last);
8374 if (last
8375 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
8376 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
8377 last);
8378 init_insn_group_barriers ();
8379 for (last = NEXT_INSN (last);
8380 last != insn;
8381 last = NEXT_INSN (last))
8382 if (INSN_P (last))
8383 group_barrier_needed (last);
8384 }
8385 else
8386 {
8387 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8388 insn);
8389 init_insn_group_barriers ();
8390 }
8391 group_barrier_needed (insn);
8392 prev_insn = NULL_RTX;
8393 }
8394 else if (recog_memoized (insn) >= 0)
8395 prev_insn = insn;
8396 need_barrier_p = (GET_CODE (insn) == CALL_INSN
8397 || GET_CODE (PATTERN (insn)) == ASM_INPUT
8398 || asm_noperands (PATTERN (insn)) >= 0);
8399 }
8400 }
8401 }
8402
8403 \f
8404
8405 /* If the following function returns TRUE, we will use the DFA
8406 insn scheduler. */
8407
8408 static int
8409 ia64_first_cycle_multipass_dfa_lookahead (void)
8410 {
8411 return (reload_completed ? 6 : 4);
8412 }
8413
8414 /* The following function initiates variable `dfa_pre_cycle_insn'. */
8415
8416 static void
8417 ia64_init_dfa_pre_cycle_insn (void)
8418 {
8419 if (temp_dfa_state == NULL)
8420 {
8421 dfa_state_size = state_size ();
8422 temp_dfa_state = xmalloc (dfa_state_size);
8423 prev_cycle_state = xmalloc (dfa_state_size);
8424 }
8425 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
8426 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
8427 recog_memoized (dfa_pre_cycle_insn);
8428 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
8429 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
8430 recog_memoized (dfa_stop_insn);
8431 }
8432
8433 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
8434 used by the DFA insn scheduler. */
8435
8436 static rtx
8437 ia64_dfa_pre_cycle_insn (void)
8438 {
8439 return dfa_pre_cycle_insn;
8440 }
8441
8442 /* The following function returns TRUE if PRODUCER (of type ilog or
8443 ld) produces address for CONSUMER (of type st or stf). */
8444
8445 int
8446 ia64_st_address_bypass_p (rtx producer, rtx consumer)
8447 {
8448 rtx dest, reg, mem;
8449
8450 gcc_assert (producer && consumer);
8451 dest = ia64_single_set (producer);
8452 gcc_assert (dest);
8453 reg = SET_DEST (dest);
8454 gcc_assert (reg);
8455 if (GET_CODE (reg) == SUBREG)
8456 reg = SUBREG_REG (reg);
8457 gcc_assert (GET_CODE (reg) == REG);
8458
8459 dest = ia64_single_set (consumer);
8460 gcc_assert (dest);
8461 mem = SET_DEST (dest);
8462 gcc_assert (mem && GET_CODE (mem) == MEM);
8463 return reg_mentioned_p (reg, mem);
8464 }
8465
8466 /* The following function returns TRUE if PRODUCER (of type ilog or
8467 ld) produces address for CONSUMER (of type ld or fld). */
8468
8469 int
8470 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
8471 {
8472 rtx dest, src, reg, mem;
8473
8474 gcc_assert (producer && consumer);
8475 dest = ia64_single_set (producer);
8476 gcc_assert (dest);
8477 reg = SET_DEST (dest);
8478 gcc_assert (reg);
8479 if (GET_CODE (reg) == SUBREG)
8480 reg = SUBREG_REG (reg);
8481 gcc_assert (GET_CODE (reg) == REG);
8482
8483 src = ia64_single_set (consumer);
8484 gcc_assert (src);
8485 mem = SET_SRC (src);
8486 gcc_assert (mem);
8487
8488 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
8489 mem = XVECEXP (mem, 0, 0);
8490 else if (GET_CODE (mem) == IF_THEN_ELSE)
8491 /* ??? Is this bypass necessary for ld.c? */
8492 {
8493 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
8494 mem = XEXP (mem, 1);
8495 }
8496
8497 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
8498 mem = XEXP (mem, 0);
8499
8500 if (GET_CODE (mem) == UNSPEC)
8501 {
8502 int c = XINT (mem, 1);
8503
8504 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDSA);
8505 mem = XVECEXP (mem, 0, 0);
8506 }
8507
8508 /* Note that LO_SUM is used for GOT loads. */
8509 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
8510
8511 return reg_mentioned_p (reg, mem);
8512 }
8513
8514 /* The following function returns TRUE if INSN produces address for a
8515 load/store insn. We will place such insns into M slot because it
8516 decreases its latency time. */
8517
8518 int
8519 ia64_produce_address_p (rtx insn)
8520 {
8521 return insn->call;
8522 }
8523
8524 \f
8525 /* Emit pseudo-ops for the assembler to describe predicate relations.
8526 At present this assumes that we only consider predicate pairs to
8527 be mutex, and that the assembler can deduce proper values from
8528 straight-line code. */
8529
8530 static void
8531 emit_predicate_relation_info (void)
8532 {
8533 basic_block bb;
8534
8535 FOR_EACH_BB_REVERSE (bb)
8536 {
8537 int r;
8538 rtx head = BB_HEAD (bb);
8539
8540 /* We only need such notes at code labels. */
8541 if (GET_CODE (head) != CODE_LABEL)
8542 continue;
8543 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
8544 head = NEXT_INSN (head);
8545
8546 /* Skip p0, which may be thought to be live due to (reg:DI p0)
8547 grabbing the entire block of predicate registers. */
8548 for (r = PR_REG (2); r < PR_REG (64); r += 2)
8549 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
8550 {
8551 rtx p = gen_rtx_REG (BImode, r);
8552 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
8553 if (head == BB_END (bb))
8554 BB_END (bb) = n;
8555 head = n;
8556 }
8557 }
8558
8559 /* Look for conditional calls that do not return, and protect predicate
8560 relations around them. Otherwise the assembler will assume the call
8561 returns, and complain about uses of call-clobbered predicates after
8562 the call. */
8563 FOR_EACH_BB_REVERSE (bb)
8564 {
8565 rtx insn = BB_HEAD (bb);
8566
8567 while (1)
8568 {
8569 if (GET_CODE (insn) == CALL_INSN
8570 && GET_CODE (PATTERN (insn)) == COND_EXEC
8571 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
8572 {
8573 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
8574 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
8575 if (BB_HEAD (bb) == insn)
8576 BB_HEAD (bb) = b;
8577 if (BB_END (bb) == insn)
8578 BB_END (bb) = a;
8579 }
8580
8581 if (insn == BB_END (bb))
8582 break;
8583 insn = NEXT_INSN (insn);
8584 }
8585 }
8586 }
8587
8588 /* Perform machine dependent operations on the rtl chain INSNS. */
8589
8590 static void
8591 ia64_reorg (void)
8592 {
8593 /* We are freeing block_for_insn in the toplev to keep compatibility
8594 with old MDEP_REORGS that are not CFG based. Recompute it now. */
8595 compute_bb_for_insn ();
8596
8597 /* If optimizing, we'll have split before scheduling. */
8598 if (optimize == 0)
8599 split_all_insns ();
8600
8601 if (optimize && ia64_flag_schedule_insns2 && dbg_cnt (ia64_sched2))
8602 {
8603 timevar_push (TV_SCHED2);
8604 ia64_final_schedule = 1;
8605
8606 initiate_bundle_states ();
8607 ia64_nop = make_insn_raw (gen_nop ());
8608 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
8609 recog_memoized (ia64_nop);
8610 clocks_length = get_max_uid () + 1;
8611 stops_p = XCNEWVEC (char, clocks_length);
8612 if (ia64_tune == PROCESSOR_ITANIUM)
8613 {
8614 clocks = XCNEWVEC (int, clocks_length);
8615 add_cycles = XCNEWVEC (int, clocks_length);
8616 }
8617 if (ia64_tune == PROCESSOR_ITANIUM2)
8618 {
8619 pos_1 = get_cpu_unit_code ("2_1");
8620 pos_2 = get_cpu_unit_code ("2_2");
8621 pos_3 = get_cpu_unit_code ("2_3");
8622 pos_4 = get_cpu_unit_code ("2_4");
8623 pos_5 = get_cpu_unit_code ("2_5");
8624 pos_6 = get_cpu_unit_code ("2_6");
8625 _0mii_ = get_cpu_unit_code ("2b_0mii.");
8626 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
8627 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
8628 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
8629 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
8630 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
8631 _0mib_ = get_cpu_unit_code ("2b_0mib.");
8632 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
8633 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
8634 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
8635 _1mii_ = get_cpu_unit_code ("2b_1mii.");
8636 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
8637 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
8638 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
8639 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
8640 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
8641 _1mib_ = get_cpu_unit_code ("2b_1mib.");
8642 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
8643 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
8644 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
8645 }
8646 else
8647 {
8648 pos_1 = get_cpu_unit_code ("1_1");
8649 pos_2 = get_cpu_unit_code ("1_2");
8650 pos_3 = get_cpu_unit_code ("1_3");
8651 pos_4 = get_cpu_unit_code ("1_4");
8652 pos_5 = get_cpu_unit_code ("1_5");
8653 pos_6 = get_cpu_unit_code ("1_6");
8654 _0mii_ = get_cpu_unit_code ("1b_0mii.");
8655 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
8656 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
8657 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
8658 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
8659 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
8660 _0mib_ = get_cpu_unit_code ("1b_0mib.");
8661 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
8662 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
8663 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
8664 _1mii_ = get_cpu_unit_code ("1b_1mii.");
8665 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
8666 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
8667 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
8668 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
8669 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
8670 _1mib_ = get_cpu_unit_code ("1b_1mib.");
8671 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
8672 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
8673 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
8674 }
8675 schedule_ebbs ();
8676 /* We cannot reuse this one because it has been corrupted by the
8677 evil glat. */
8678 finish_bundle_states ();
8679 if (ia64_tune == PROCESSOR_ITANIUM)
8680 {
8681 free (add_cycles);
8682 free (clocks);
8683 }
8684 free (stops_p);
8685 stops_p = NULL;
8686 emit_insn_group_barriers (dump_file);
8687
8688 ia64_final_schedule = 0;
8689 timevar_pop (TV_SCHED2);
8690 }
8691 else
8692 emit_all_insn_group_barriers (dump_file);
8693
8694 df_analyze ();
8695
8696 /* A call must not be the last instruction in a function, so that the
8697 return address is still within the function, so that unwinding works
8698 properly. Note that IA-64 differs from dwarf2 on this point. */
8699 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8700 {
8701 rtx insn;
8702 int saw_stop = 0;
8703
8704 insn = get_last_insn ();
8705 if (! INSN_P (insn))
8706 insn = prev_active_insn (insn);
8707 /* Skip over insns that expand to nothing. */
8708 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
8709 {
8710 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
8711 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
8712 saw_stop = 1;
8713 insn = prev_active_insn (insn);
8714 }
8715 if (GET_CODE (insn) == CALL_INSN)
8716 {
8717 if (! saw_stop)
8718 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8719 emit_insn (gen_break_f ());
8720 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8721 }
8722 }
8723
8724 emit_predicate_relation_info ();
8725
8726 if (ia64_flag_var_tracking)
8727 {
8728 timevar_push (TV_VAR_TRACKING);
8729 variable_tracking_main ();
8730 timevar_pop (TV_VAR_TRACKING);
8731 }
8732 df_finish_pass (false);
8733 }
8734 \f
8735 /* Return true if REGNO is used by the epilogue. */
8736
8737 int
8738 ia64_epilogue_uses (int regno)
8739 {
8740 switch (regno)
8741 {
8742 case R_GR (1):
8743 /* With a call to a function in another module, we will write a new
8744 value to "gp". After returning from such a call, we need to make
8745 sure the function restores the original gp-value, even if the
8746 function itself does not use the gp anymore. */
8747 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
8748
8749 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
8750 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
8751 /* For functions defined with the syscall_linkage attribute, all
8752 input registers are marked as live at all function exits. This
8753 prevents the register allocator from using the input registers,
8754 which in turn makes it possible to restart a system call after
8755 an interrupt without having to save/restore the input registers.
8756 This also prevents kernel data from leaking to application code. */
8757 return lookup_attribute ("syscall_linkage",
8758 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
8759
8760 case R_BR (0):
8761 /* Conditional return patterns can't represent the use of `b0' as
8762 the return address, so we force the value live this way. */
8763 return 1;
8764
8765 case AR_PFS_REGNUM:
8766 /* Likewise for ar.pfs, which is used by br.ret. */
8767 return 1;
8768
8769 default:
8770 return 0;
8771 }
8772 }
8773
8774 /* Return true if REGNO is used by the frame unwinder. */
8775
8776 int
8777 ia64_eh_uses (int regno)
8778 {
8779 enum ia64_frame_regs r;
8780
8781 if (! reload_completed)
8782 return 0;
8783
8784 if (regno == 0)
8785 return 0;
8786
8787 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
8788 if (regno == current_frame_info.r[r]
8789 || regno == emitted_frame_related_regs[r])
8790 return 1;
8791
8792 return 0;
8793 }
8794 \f
8795 /* Return true if this goes in small data/bss. */
8796
8797 /* ??? We could also support own long data here. Generating movl/add/ld8
8798 instead of addl,ld8/ld8. This makes the code bigger, but should make the
8799 code faster because there is one less load. This also includes incomplete
8800 types which can't go in sdata/sbss. */
8801
8802 static bool
8803 ia64_in_small_data_p (const_tree exp)
8804 {
8805 if (TARGET_NO_SDATA)
8806 return false;
8807
8808 /* We want to merge strings, so we never consider them small data. */
8809 if (TREE_CODE (exp) == STRING_CST)
8810 return false;
8811
8812 /* Functions are never small data. */
8813 if (TREE_CODE (exp) == FUNCTION_DECL)
8814 return false;
8815
8816 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
8817 {
8818 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
8819
8820 if (strcmp (section, ".sdata") == 0
8821 || strncmp (section, ".sdata.", 7) == 0
8822 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
8823 || strcmp (section, ".sbss") == 0
8824 || strncmp (section, ".sbss.", 6) == 0
8825 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
8826 return true;
8827 }
8828 else
8829 {
8830 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
8831
8832 /* If this is an incomplete type with size 0, then we can't put it
8833 in sdata because it might be too big when completed. */
8834 if (size > 0 && size <= ia64_section_threshold)
8835 return true;
8836 }
8837
8838 return false;
8839 }
8840 \f
8841 /* Output assembly directives for prologue regions. */
8842
8843 /* The current basic block number. */
8844
8845 static bool last_block;
8846
8847 /* True if we need a copy_state command at the start of the next block. */
8848
8849 static bool need_copy_state;
8850
8851 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
8852 # define MAX_ARTIFICIAL_LABEL_BYTES 30
8853 #endif
8854
8855 /* Emit a debugging label after a call-frame-related insn. We'd
8856 rather output the label right away, but we'd have to output it
8857 after, not before, the instruction, and the instruction has not
8858 been output yet. So we emit the label after the insn, delete it to
8859 avoid introducing basic blocks, and mark it as preserved, such that
8860 it is still output, given that it is referenced in debug info. */
8861
8862 static const char *
8863 ia64_emit_deleted_label_after_insn (rtx insn)
8864 {
8865 char label[MAX_ARTIFICIAL_LABEL_BYTES];
8866 rtx lb = gen_label_rtx ();
8867 rtx label_insn = emit_label_after (lb, insn);
8868
8869 LABEL_PRESERVE_P (lb) = 1;
8870
8871 delete_insn (label_insn);
8872
8873 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
8874
8875 return xstrdup (label);
8876 }
8877
8878 /* Define the CFA after INSN with the steady-state definition. */
8879
8880 static void
8881 ia64_dwarf2out_def_steady_cfa (rtx insn)
8882 {
8883 rtx fp = frame_pointer_needed
8884 ? hard_frame_pointer_rtx
8885 : stack_pointer_rtx;
8886
8887 dwarf2out_def_cfa
8888 (ia64_emit_deleted_label_after_insn (insn),
8889 REGNO (fp),
8890 ia64_initial_elimination_offset
8891 (REGNO (arg_pointer_rtx), REGNO (fp))
8892 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8893 }
8894
8895 /* The generic dwarf2 frame debug info generator does not define a
8896 separate region for the very end of the epilogue, so refrain from
8897 doing so in the IA64-specific code as well. */
8898
8899 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8900
8901 /* The function emits unwind directives for the start of an epilogue. */
8902
8903 static void
8904 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8905 {
8906 /* If this isn't the last block of the function, then we need to label the
8907 current state, and copy it back in at the start of the next block. */
8908
8909 if (!last_block)
8910 {
8911 if (unwind)
8912 fprintf (asm_out_file, "\t.label_state %d\n",
8913 ++cfun->machine->state_num);
8914 need_copy_state = true;
8915 }
8916
8917 if (unwind)
8918 fprintf (asm_out_file, "\t.restore sp\n");
8919 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8920 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8921 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8922 }
8923
8924 /* This function processes a SET pattern looking for specific patterns
8925 which result in emitting an assembly directive required for unwinding. */
8926
8927 static int
8928 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8929 {
8930 rtx src = SET_SRC (pat);
8931 rtx dest = SET_DEST (pat);
8932 int src_regno, dest_regno;
8933
8934 /* Look for the ALLOC insn. */
8935 if (GET_CODE (src) == UNSPEC_VOLATILE
8936 && XINT (src, 1) == UNSPECV_ALLOC
8937 && GET_CODE (dest) == REG)
8938 {
8939 dest_regno = REGNO (dest);
8940
8941 /* If this is the final destination for ar.pfs, then this must
8942 be the alloc in the prologue. */
8943 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
8944 {
8945 if (unwind)
8946 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8947 ia64_dbx_register_number (dest_regno));
8948 }
8949 else
8950 {
8951 /* This must be an alloc before a sibcall. We must drop the
8952 old frame info. The easiest way to drop the old frame
8953 info is to ensure we had a ".restore sp" directive
8954 followed by a new prologue. If the procedure doesn't
8955 have a memory-stack frame, we'll issue a dummy ".restore
8956 sp" now. */
8957 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8958 /* if haven't done process_epilogue() yet, do it now */
8959 process_epilogue (asm_out_file, insn, unwind, frame);
8960 if (unwind)
8961 fprintf (asm_out_file, "\t.prologue\n");
8962 }
8963 return 1;
8964 }
8965
8966 /* Look for SP = .... */
8967 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8968 {
8969 if (GET_CODE (src) == PLUS)
8970 {
8971 rtx op0 = XEXP (src, 0);
8972 rtx op1 = XEXP (src, 1);
8973
8974 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8975
8976 if (INTVAL (op1) < 0)
8977 {
8978 gcc_assert (!frame_pointer_needed);
8979 if (unwind)
8980 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8981 -INTVAL (op1));
8982 if (frame)
8983 ia64_dwarf2out_def_steady_cfa (insn);
8984 }
8985 else
8986 process_epilogue (asm_out_file, insn, unwind, frame);
8987 }
8988 else
8989 {
8990 gcc_assert (GET_CODE (src) == REG
8991 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8992 process_epilogue (asm_out_file, insn, unwind, frame);
8993 }
8994
8995 return 1;
8996 }
8997
8998 /* Register move we need to look at. */
8999 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
9000 {
9001 src_regno = REGNO (src);
9002 dest_regno = REGNO (dest);
9003
9004 switch (src_regno)
9005 {
9006 case BR_REG (0):
9007 /* Saving return address pointer. */
9008 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9009 if (unwind)
9010 fprintf (asm_out_file, "\t.save rp, r%d\n",
9011 ia64_dbx_register_number (dest_regno));
9012 return 1;
9013
9014 case PR_REG (0):
9015 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9016 if (unwind)
9017 fprintf (asm_out_file, "\t.save pr, r%d\n",
9018 ia64_dbx_register_number (dest_regno));
9019 return 1;
9020
9021 case AR_UNAT_REGNUM:
9022 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9023 if (unwind)
9024 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9025 ia64_dbx_register_number (dest_regno));
9026 return 1;
9027
9028 case AR_LC_REGNUM:
9029 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9030 if (unwind)
9031 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9032 ia64_dbx_register_number (dest_regno));
9033 return 1;
9034
9035 case STACK_POINTER_REGNUM:
9036 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
9037 && frame_pointer_needed);
9038 if (unwind)
9039 fprintf (asm_out_file, "\t.vframe r%d\n",
9040 ia64_dbx_register_number (dest_regno));
9041 if (frame)
9042 ia64_dwarf2out_def_steady_cfa (insn);
9043 return 1;
9044
9045 default:
9046 /* Everything else should indicate being stored to memory. */
9047 gcc_unreachable ();
9048 }
9049 }
9050
9051 /* Memory store we need to look at. */
9052 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
9053 {
9054 long off;
9055 rtx base;
9056 const char *saveop;
9057
9058 if (GET_CODE (XEXP (dest, 0)) == REG)
9059 {
9060 base = XEXP (dest, 0);
9061 off = 0;
9062 }
9063 else
9064 {
9065 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9066 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9067 base = XEXP (XEXP (dest, 0), 0);
9068 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9069 }
9070
9071 if (base == hard_frame_pointer_rtx)
9072 {
9073 saveop = ".savepsp";
9074 off = - off;
9075 }
9076 else
9077 {
9078 gcc_assert (base == stack_pointer_rtx);
9079 saveop = ".savesp";
9080 }
9081
9082 src_regno = REGNO (src);
9083 switch (src_regno)
9084 {
9085 case BR_REG (0):
9086 gcc_assert (!current_frame_info.r[reg_save_b0]);
9087 if (unwind)
9088 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
9089 return 1;
9090
9091 case PR_REG (0):
9092 gcc_assert (!current_frame_info.r[reg_save_pr]);
9093 if (unwind)
9094 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
9095 return 1;
9096
9097 case AR_LC_REGNUM:
9098 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9099 if (unwind)
9100 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
9101 return 1;
9102
9103 case AR_PFS_REGNUM:
9104 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9105 if (unwind)
9106 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
9107 return 1;
9108
9109 case AR_UNAT_REGNUM:
9110 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9111 if (unwind)
9112 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
9113 return 1;
9114
9115 case GR_REG (4):
9116 case GR_REG (5):
9117 case GR_REG (6):
9118 case GR_REG (7):
9119 if (unwind)
9120 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9121 1 << (src_regno - GR_REG (4)));
9122 return 1;
9123
9124 case BR_REG (1):
9125 case BR_REG (2):
9126 case BR_REG (3):
9127 case BR_REG (4):
9128 case BR_REG (5):
9129 if (unwind)
9130 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9131 1 << (src_regno - BR_REG (1)));
9132 return 1;
9133
9134 case FR_REG (2):
9135 case FR_REG (3):
9136 case FR_REG (4):
9137 case FR_REG (5):
9138 if (unwind)
9139 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9140 1 << (src_regno - FR_REG (2)));
9141 return 1;
9142
9143 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9144 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9145 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9146 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9147 if (unwind)
9148 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9149 1 << (src_regno - FR_REG (12)));
9150 return 1;
9151
9152 default:
9153 return 0;
9154 }
9155 }
9156
9157 return 0;
9158 }
9159
9160
9161 /* This function looks at a single insn and emits any directives
9162 required to unwind this insn. */
9163 void
9164 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
9165 {
9166 bool unwind = (flag_unwind_tables
9167 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
9168 bool frame = dwarf2out_do_frame ();
9169
9170 if (unwind || frame)
9171 {
9172 rtx pat;
9173
9174 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9175 {
9176 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9177
9178 /* Restore unwind state from immediately before the epilogue. */
9179 if (need_copy_state)
9180 {
9181 if (unwind)
9182 {
9183 fprintf (asm_out_file, "\t.body\n");
9184 fprintf (asm_out_file, "\t.copy_state %d\n",
9185 cfun->machine->state_num);
9186 }
9187 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9188 ia64_dwarf2out_def_steady_cfa (insn);
9189 need_copy_state = false;
9190 }
9191 }
9192
9193 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9194 return;
9195
9196 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
9197 if (pat)
9198 pat = XEXP (pat, 0);
9199 else
9200 pat = PATTERN (insn);
9201
9202 switch (GET_CODE (pat))
9203 {
9204 case SET:
9205 process_set (asm_out_file, pat, insn, unwind, frame);
9206 break;
9207
9208 case PARALLEL:
9209 {
9210 int par_index;
9211 int limit = XVECLEN (pat, 0);
9212 for (par_index = 0; par_index < limit; par_index++)
9213 {
9214 rtx x = XVECEXP (pat, 0, par_index);
9215 if (GET_CODE (x) == SET)
9216 process_set (asm_out_file, x, insn, unwind, frame);
9217 }
9218 break;
9219 }
9220
9221 default:
9222 gcc_unreachable ();
9223 }
9224 }
9225 }
9226
9227 \f
9228 enum ia64_builtins
9229 {
9230 IA64_BUILTIN_BSP,
9231 IA64_BUILTIN_FLUSHRS
9232 };
9233
9234 void
9235 ia64_init_builtins (void)
9236 {
9237 tree fpreg_type;
9238 tree float80_type;
9239
9240 /* The __fpreg type. */
9241 fpreg_type = make_node (REAL_TYPE);
9242 TYPE_PRECISION (fpreg_type) = 82;
9243 layout_type (fpreg_type);
9244 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9245
9246 /* The __float80 type. */
9247 float80_type = make_node (REAL_TYPE);
9248 TYPE_PRECISION (float80_type) = 80;
9249 layout_type (float80_type);
9250 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9251
9252 /* The __float128 type. */
9253 if (!TARGET_HPUX)
9254 {
9255 tree float128_type = make_node (REAL_TYPE);
9256 TYPE_PRECISION (float128_type) = 128;
9257 layout_type (float128_type);
9258 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9259 }
9260 else
9261 /* Under HPUX, this is a synonym for "long double". */
9262 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9263 "__float128");
9264
9265 #define def_builtin(name, type, code) \
9266 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9267 NULL, NULL_TREE)
9268
9269 def_builtin ("__builtin_ia64_bsp",
9270 build_function_type (ptr_type_node, void_list_node),
9271 IA64_BUILTIN_BSP);
9272
9273 def_builtin ("__builtin_ia64_flushrs",
9274 build_function_type (void_type_node, void_list_node),
9275 IA64_BUILTIN_FLUSHRS);
9276
9277 #undef def_builtin
9278
9279 if (TARGET_HPUX)
9280 {
9281 if (built_in_decls [BUILT_IN_FINITE])
9282 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9283 "_Isfinite");
9284 if (built_in_decls [BUILT_IN_FINITEF])
9285 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9286 "_Isfinitef");
9287 if (built_in_decls [BUILT_IN_FINITEL])
9288 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
9289 "_Isfinitef128");
9290 }
9291 }
9292
9293 rtx
9294 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9295 enum machine_mode mode ATTRIBUTE_UNUSED,
9296 int ignore ATTRIBUTE_UNUSED)
9297 {
9298 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9299 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9300
9301 switch (fcode)
9302 {
9303 case IA64_BUILTIN_BSP:
9304 if (! target || ! register_operand (target, DImode))
9305 target = gen_reg_rtx (DImode);
9306 emit_insn (gen_bsp_value (target));
9307 #ifdef POINTERS_EXTEND_UNSIGNED
9308 target = convert_memory_address (ptr_mode, target);
9309 #endif
9310 return target;
9311
9312 case IA64_BUILTIN_FLUSHRS:
9313 emit_insn (gen_flushrs ());
9314 return const0_rtx;
9315
9316 default:
9317 break;
9318 }
9319
9320 return NULL_RTX;
9321 }
9322
9323 /* For the HP-UX IA64 aggregate parameters are passed stored in the
9324 most significant bits of the stack slot. */
9325
9326 enum direction
9327 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
9328 {
9329 /* Exception to normal case for structures/unions/etc. */
9330
9331 if (type && AGGREGATE_TYPE_P (type)
9332 && int_size_in_bytes (type) < UNITS_PER_WORD)
9333 return upward;
9334
9335 /* Fall back to the default. */
9336 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9337 }
9338
9339 /* Emit text to declare externally defined variables and functions, because
9340 the Intel assembler does not support undefined externals. */
9341
9342 void
9343 ia64_asm_output_external (FILE *file, tree decl, const char *name)
9344 {
9345 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9346 set in order to avoid putting out names that are never really
9347 used. */
9348 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9349 {
9350 /* maybe_assemble_visibility will return 1 if the assembler
9351 visibility directive is output. */
9352 int need_visibility = ((*targetm.binds_local_p) (decl)
9353 && maybe_assemble_visibility (decl));
9354
9355 /* GNU as does not need anything here, but the HP linker does
9356 need something for external functions. */
9357 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9358 && TREE_CODE (decl) == FUNCTION_DECL)
9359 (*targetm.asm_out.globalize_decl_name) (file, decl);
9360 else if (need_visibility && !TARGET_GNU_AS)
9361 (*targetm.asm_out.globalize_label) (file, name);
9362 }
9363 }
9364
9365 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
9366 modes of word_mode and larger. Rename the TFmode libfuncs using the
9367 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
9368 backward compatibility. */
9369
9370 static void
9371 ia64_init_libfuncs (void)
9372 {
9373 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
9374 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
9375 set_optab_libfunc (smod_optab, SImode, "__modsi3");
9376 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
9377
9378 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
9379 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
9380 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
9381 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
9382 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
9383
9384 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
9385 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
9386 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
9387 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
9388 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
9389 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
9390
9391 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
9392 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
9393 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
9394 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
9395 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
9396
9397 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
9398 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
9399 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
9400 /* HP-UX 11.23 libc does not have a function for unsigned
9401 SImode-to-TFmode conversion. */
9402 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
9403 }
9404
9405 /* Rename all the TFmode libfuncs using the HPUX conventions. */
9406
9407 static void
9408 ia64_hpux_init_libfuncs (void)
9409 {
9410 ia64_init_libfuncs ();
9411
9412 /* The HP SI millicode division and mod functions expect DI arguments.
9413 By turning them off completely we avoid using both libgcc and the
9414 non-standard millicode routines and use the HP DI millicode routines
9415 instead. */
9416
9417 set_optab_libfunc (sdiv_optab, SImode, 0);
9418 set_optab_libfunc (udiv_optab, SImode, 0);
9419 set_optab_libfunc (smod_optab, SImode, 0);
9420 set_optab_libfunc (umod_optab, SImode, 0);
9421
9422 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
9423 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
9424 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
9425 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
9426
9427 /* HP-UX libc has TF min/max/abs routines in it. */
9428 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
9429 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
9430 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
9431
9432 /* ia64_expand_compare uses this. */
9433 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
9434
9435 /* These should never be used. */
9436 set_optab_libfunc (eq_optab, TFmode, 0);
9437 set_optab_libfunc (ne_optab, TFmode, 0);
9438 set_optab_libfunc (gt_optab, TFmode, 0);
9439 set_optab_libfunc (ge_optab, TFmode, 0);
9440 set_optab_libfunc (lt_optab, TFmode, 0);
9441 set_optab_libfunc (le_optab, TFmode, 0);
9442 }
9443
9444 /* Rename the division and modulus functions in VMS. */
9445
9446 static void
9447 ia64_vms_init_libfuncs (void)
9448 {
9449 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9450 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9451 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9452 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9453 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9454 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9455 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9456 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9457 }
9458
9459 /* Rename the TFmode libfuncs available from soft-fp in glibc using
9460 the HPUX conventions. */
9461
9462 static void
9463 ia64_sysv4_init_libfuncs (void)
9464 {
9465 ia64_init_libfuncs ();
9466
9467 /* These functions are not part of the HPUX TFmode interface. We
9468 use them instead of _U_Qfcmp, which doesn't work the way we
9469 expect. */
9470 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
9471 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
9472 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
9473 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
9474 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
9475 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
9476
9477 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
9478 glibc doesn't have them. */
9479 }
9480 \f
9481 /* For HPUX, it is illegal to have relocations in shared segments. */
9482
9483 static int
9484 ia64_hpux_reloc_rw_mask (void)
9485 {
9486 return 3;
9487 }
9488
9489 /* For others, relax this so that relocations to local data goes in
9490 read-only segments, but we still cannot allow global relocations
9491 in read-only segments. */
9492
9493 static int
9494 ia64_reloc_rw_mask (void)
9495 {
9496 return flag_pic ? 3 : 2;
9497 }
9498
9499 /* Return the section to use for X. The only special thing we do here
9500 is to honor small data. */
9501
9502 static section *
9503 ia64_select_rtx_section (enum machine_mode mode, rtx x,
9504 unsigned HOST_WIDE_INT align)
9505 {
9506 if (GET_MODE_SIZE (mode) > 0
9507 && GET_MODE_SIZE (mode) <= ia64_section_threshold
9508 && !TARGET_NO_SDATA)
9509 return sdata_section;
9510 else
9511 return default_elf_select_rtx_section (mode, x, align);
9512 }
9513
9514 static unsigned int
9515 ia64_section_type_flags (tree decl, const char *name, int reloc)
9516 {
9517 unsigned int flags = 0;
9518
9519 if (strcmp (name, ".sdata") == 0
9520 || strncmp (name, ".sdata.", 7) == 0
9521 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9522 || strncmp (name, ".sdata2.", 8) == 0
9523 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
9524 || strcmp (name, ".sbss") == 0
9525 || strncmp (name, ".sbss.", 6) == 0
9526 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9527 flags = SECTION_SMALL;
9528
9529 flags |= default_section_type_flags (decl, name, reloc);
9530 return flags;
9531 }
9532
9533 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
9534 structure type and that the address of that type should be passed
9535 in out0, rather than in r8. */
9536
9537 static bool
9538 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
9539 {
9540 tree ret_type = TREE_TYPE (fntype);
9541
9542 /* The Itanium C++ ABI requires that out0, rather than r8, be used
9543 as the structure return address parameter, if the return value
9544 type has a non-trivial copy constructor or destructor. It is not
9545 clear if this same convention should be used for other
9546 programming languages. Until G++ 3.4, we incorrectly used r8 for
9547 these return values. */
9548 return (abi_version_at_least (2)
9549 && ret_type
9550 && TYPE_MODE (ret_type) == BLKmode
9551 && TREE_ADDRESSABLE (ret_type)
9552 && strcmp (lang_hooks.name, "GNU C++") == 0);
9553 }
9554
9555 /* Output the assembler code for a thunk function. THUNK_DECL is the
9556 declaration for the thunk function itself, FUNCTION is the decl for
9557 the target function. DELTA is an immediate constant offset to be
9558 added to THIS. If VCALL_OFFSET is nonzero, the word at
9559 *(*this + vcall_offset) should be added to THIS. */
9560
9561 static void
9562 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9563 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9564 tree function)
9565 {
9566 rtx this_rtx, insn, funexp;
9567 unsigned int this_parmno;
9568 unsigned int this_regno;
9569 rtx delta_rtx;
9570
9571 reload_completed = 1;
9572 epilogue_completed = 1;
9573
9574 /* Set things up as ia64_expand_prologue might. */
9575 last_scratch_gr_reg = 15;
9576
9577 memset (&current_frame_info, 0, sizeof (current_frame_info));
9578 current_frame_info.spill_cfa_off = -16;
9579 current_frame_info.n_input_regs = 1;
9580 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
9581
9582 /* Mark the end of the (empty) prologue. */
9583 emit_note (NOTE_INSN_PROLOGUE_END);
9584
9585 /* Figure out whether "this" will be the first parameter (the
9586 typical case) or the second parameter (as happens when the
9587 virtual function returns certain class objects). */
9588 this_parmno
9589 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
9590 ? 1 : 0);
9591 this_regno = IN_REG (this_parmno);
9592 if (!TARGET_REG_NAMES)
9593 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
9594
9595 this_rtx = gen_rtx_REG (Pmode, this_regno);
9596
9597 /* Apply the constant offset, if required. */
9598 delta_rtx = GEN_INT (delta);
9599 if (TARGET_ILP32)
9600 {
9601 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
9602 REG_POINTER (tmp) = 1;
9603 if (delta && satisfies_constraint_I (delta_rtx))
9604 {
9605 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
9606 delta = 0;
9607 }
9608 else
9609 emit_insn (gen_ptr_extend (this_rtx, tmp));
9610 }
9611 if (delta)
9612 {
9613 if (!satisfies_constraint_I (delta_rtx))
9614 {
9615 rtx tmp = gen_rtx_REG (Pmode, 2);
9616 emit_move_insn (tmp, delta_rtx);
9617 delta_rtx = tmp;
9618 }
9619 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
9620 }
9621
9622 /* Apply the offset from the vtable, if required. */
9623 if (vcall_offset)
9624 {
9625 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9626 rtx tmp = gen_rtx_REG (Pmode, 2);
9627
9628 if (TARGET_ILP32)
9629 {
9630 rtx t = gen_rtx_REG (ptr_mode, 2);
9631 REG_POINTER (t) = 1;
9632 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
9633 if (satisfies_constraint_I (vcall_offset_rtx))
9634 {
9635 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
9636 vcall_offset = 0;
9637 }
9638 else
9639 emit_insn (gen_ptr_extend (tmp, t));
9640 }
9641 else
9642 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
9643
9644 if (vcall_offset)
9645 {
9646 if (!satisfies_constraint_J (vcall_offset_rtx))
9647 {
9648 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9649 emit_move_insn (tmp2, vcall_offset_rtx);
9650 vcall_offset_rtx = tmp2;
9651 }
9652 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9653 }
9654
9655 if (TARGET_ILP32)
9656 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
9657 else
9658 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9659
9660 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
9661 }
9662
9663 /* Generate a tail call to the target function. */
9664 if (! TREE_USED (function))
9665 {
9666 assemble_external (function);
9667 TREE_USED (function) = 1;
9668 }
9669 funexp = XEXP (DECL_RTL (function), 0);
9670 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9671 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9672 insn = get_last_insn ();
9673 SIBLING_CALL_P (insn) = 1;
9674
9675 /* Code generation for calls relies on splitting. */
9676 reload_completed = 1;
9677 epilogue_completed = 1;
9678 try_split (PATTERN (insn), insn, 0);
9679
9680 emit_barrier ();
9681
9682 /* Run just enough of rest_of_compilation to get the insns emitted.
9683 There's not really enough bulk here to make other passes such as
9684 instruction scheduling worth while. Note that use_thunk calls
9685 assemble_start_function and assemble_end_function. */
9686
9687 insn_locators_alloc ();
9688 emit_all_insn_group_barriers (NULL);
9689 insn = get_insns ();
9690 shorten_branches (insn);
9691 final_start_function (insn, file, 1);
9692 final (insn, file, 1);
9693 final_end_function ();
9694 free_after_compilation (cfun);
9695
9696 reload_completed = 0;
9697 epilogue_completed = 0;
9698 }
9699
9700 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9701
9702 static rtx
9703 ia64_struct_value_rtx (tree fntype,
9704 int incoming ATTRIBUTE_UNUSED)
9705 {
9706 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9707 return NULL_RTX;
9708 return gen_rtx_REG (Pmode, GR_REG (8));
9709 }
9710
9711 static bool
9712 ia64_scalar_mode_supported_p (enum machine_mode mode)
9713 {
9714 switch (mode)
9715 {
9716 case QImode:
9717 case HImode:
9718 case SImode:
9719 case DImode:
9720 case TImode:
9721 return true;
9722
9723 case SFmode:
9724 case DFmode:
9725 case XFmode:
9726 case RFmode:
9727 return true;
9728
9729 case TFmode:
9730 return TARGET_HPUX;
9731
9732 default:
9733 return false;
9734 }
9735 }
9736
9737 static bool
9738 ia64_vector_mode_supported_p (enum machine_mode mode)
9739 {
9740 switch (mode)
9741 {
9742 case V8QImode:
9743 case V4HImode:
9744 case V2SImode:
9745 return true;
9746
9747 case V2SFmode:
9748 return true;
9749
9750 default:
9751 return false;
9752 }
9753 }
9754
9755 /* Implement the FUNCTION_PROFILER macro. */
9756
9757 void
9758 ia64_output_function_profiler (FILE *file, int labelno)
9759 {
9760 bool indirect_call;
9761
9762 /* If the function needs a static chain and the static chain
9763 register is r15, we use an indirect call so as to bypass
9764 the PLT stub in case the executable is dynamically linked,
9765 because the stub clobbers r15 as per 5.3.6 of the psABI.
9766 We don't need to do that in non canonical PIC mode. */
9767
9768 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
9769 {
9770 gcc_assert (STATIC_CHAIN_REGNUM == 15);
9771 indirect_call = true;
9772 }
9773 else
9774 indirect_call = false;
9775
9776 if (TARGET_GNU_AS)
9777 fputs ("\t.prologue 4, r40\n", file);
9778 else
9779 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
9780 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
9781
9782 if (NO_PROFILE_COUNTERS)
9783 fputs ("\tmov out3 = r0\n", file);
9784 else
9785 {
9786 char buf[20];
9787 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9788
9789 if (TARGET_AUTO_PIC)
9790 fputs ("\tmovl out3 = @gprel(", file);
9791 else
9792 fputs ("\taddl out3 = @ltoff(", file);
9793 assemble_name (file, buf);
9794 if (TARGET_AUTO_PIC)
9795 fputs (")\n", file);
9796 else
9797 fputs ("), r1\n", file);
9798 }
9799
9800 if (indirect_call)
9801 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
9802 fputs ("\t;;\n", file);
9803
9804 fputs ("\t.save rp, r42\n", file);
9805 fputs ("\tmov out2 = b0\n", file);
9806 if (indirect_call)
9807 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
9808 fputs ("\t.body\n", file);
9809 fputs ("\tmov out1 = r1\n", file);
9810 if (indirect_call)
9811 {
9812 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
9813 fputs ("\tmov b6 = r16\n", file);
9814 fputs ("\tld8 r1 = [r14]\n", file);
9815 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
9816 }
9817 else
9818 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
9819 }
9820
9821 static GTY(()) rtx mcount_func_rtx;
9822 static rtx
9823 gen_mcount_func_rtx (void)
9824 {
9825 if (!mcount_func_rtx)
9826 mcount_func_rtx = init_one_libfunc ("_mcount");
9827 return mcount_func_rtx;
9828 }
9829
9830 void
9831 ia64_profile_hook (int labelno)
9832 {
9833 rtx label, ip;
9834
9835 if (NO_PROFILE_COUNTERS)
9836 label = const0_rtx;
9837 else
9838 {
9839 char buf[30];
9840 const char *label_name;
9841 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9842 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
9843 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
9844 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
9845 }
9846 ip = gen_reg_rtx (Pmode);
9847 emit_insn (gen_ip_value (ip));
9848 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
9849 VOIDmode, 3,
9850 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
9851 ip, Pmode,
9852 label, Pmode);
9853 }
9854
9855 /* Return the mangling of TYPE if it is an extended fundamental type. */
9856
9857 static const char *
9858 ia64_mangle_type (const_tree type)
9859 {
9860 type = TYPE_MAIN_VARIANT (type);
9861
9862 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
9863 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
9864 return NULL;
9865
9866 /* On HP-UX, "long double" is mangled as "e" so __float128 is
9867 mangled as "e". */
9868 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9869 return "g";
9870 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9871 an extended mangling. Elsewhere, "e" is available since long
9872 double is 80 bits. */
9873 if (TYPE_MODE (type) == XFmode)
9874 return TARGET_HPUX ? "u9__float80" : "e";
9875 if (TYPE_MODE (type) == RFmode)
9876 return "u7__fpreg";
9877 return NULL;
9878 }
9879
9880 /* Return the diagnostic message string if conversion from FROMTYPE to
9881 TOTYPE is not allowed, NULL otherwise. */
9882 static const char *
9883 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
9884 {
9885 /* Reject nontrivial conversion to or from __fpreg. */
9886 if (TYPE_MODE (fromtype) == RFmode
9887 && TYPE_MODE (totype) != RFmode
9888 && TYPE_MODE (totype) != VOIDmode)
9889 return N_("invalid conversion from %<__fpreg%>");
9890 if (TYPE_MODE (totype) == RFmode
9891 && TYPE_MODE (fromtype) != RFmode)
9892 return N_("invalid conversion to %<__fpreg%>");
9893 return NULL;
9894 }
9895
9896 /* Return the diagnostic message string if the unary operation OP is
9897 not permitted on TYPE, NULL otherwise. */
9898 static const char *
9899 ia64_invalid_unary_op (int op, const_tree type)
9900 {
9901 /* Reject operations on __fpreg other than unary + or &. */
9902 if (TYPE_MODE (type) == RFmode
9903 && op != CONVERT_EXPR
9904 && op != ADDR_EXPR)
9905 return N_("invalid operation on %<__fpreg%>");
9906 return NULL;
9907 }
9908
9909 /* Return the diagnostic message string if the binary operation OP is
9910 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9911 static const char *
9912 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
9913 {
9914 /* Reject operations on __fpreg. */
9915 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9916 return N_("invalid operation on %<__fpreg%>");
9917 return NULL;
9918 }
9919
9920 /* Implement overriding of the optimization options. */
9921 void
9922 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
9923 int size ATTRIBUTE_UNUSED)
9924 {
9925 /* Disable the second machine independent scheduling pass and use one for the
9926 IA-64. This needs to be here instead of in OVERRIDE_OPTIONS because this
9927 is done whenever the optimization is changed via #pragma GCC optimize or
9928 attribute((optimize(...))). */
9929 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
9930 flag_schedule_insns_after_reload = 0;
9931
9932 /* Let the scheduler form additional regions. */
9933 set_param_value ("max-sched-extend-regions-iters", 2);
9934
9935 /* Set the default values for cache-related parameters. */
9936 set_param_value ("simultaneous-prefetches", 6);
9937 set_param_value ("l1-cache-line-size", 32);
9938
9939 }
9940
9941 /* HP-UX version_id attribute.
9942 For object foo, if the version_id is set to 1234 put out an alias
9943 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
9944 other than an alias statement because it is an illegal symbol name. */
9945
9946 static tree
9947 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
9948 tree name ATTRIBUTE_UNUSED,
9949 tree args,
9950 int flags ATTRIBUTE_UNUSED,
9951 bool *no_add_attrs)
9952 {
9953 tree arg = TREE_VALUE (args);
9954
9955 if (TREE_CODE (arg) != STRING_CST)
9956 {
9957 error("version attribute is not a string");
9958 *no_add_attrs = true;
9959 return NULL_TREE;
9960 }
9961 return NULL_TREE;
9962 }
9963
9964 /* Target hook for c_mode_for_suffix. */
9965
9966 static enum machine_mode
9967 ia64_c_mode_for_suffix (char suffix)
9968 {
9969 if (suffix == 'q')
9970 return TFmode;
9971 if (suffix == 'w')
9972 return XFmode;
9973
9974 return VOIDmode;
9975 }
9976
9977 #include "gt-ia64.h"